Commit 7726c953 authored by Matthias Braun's avatar Matthias Braun
Browse files

move op_estimated_cost callback to isa_if; remove arch_irn_ops

parent 2109a91a
...@@ -85,7 +85,7 @@ static void TEMPLATE_generate_code(FILE *output, const char *cup_name) ...@@ -85,7 +85,7 @@ static void TEMPLATE_generate_code(FILE *output, const char *cup_name)
static void TEMPLATE_init(void) static void TEMPLATE_init(void)
{ {
TEMPLATE_register_init(); TEMPLATE_register_init();
TEMPLATE_create_opcodes(&be_null_ops); TEMPLATE_create_opcodes();
} }
static void TEMPLATE_finish(void) static void TEMPLATE_finish(void)
...@@ -141,6 +141,15 @@ static int TEMPLATE_is_valid_clobber(const char *clobber) ...@@ -141,6 +141,15 @@ static int TEMPLATE_is_valid_clobber(const char *clobber)
return false; return false;
} }
static unsigned TEMPLATE_get_op_estimated_cost(const ir_node *node)
{
if (is_TEMPLATE_Load(node))
return 5;
if (is_TEMPLATE_Store(node))
return 7;
return 1;
}
static arch_isa_if_t const TEMPLATE_isa_if = { static arch_isa_if_t const TEMPLATE_isa_if = {
.n_registers = N_TEMPLATE_REGISTERS, .n_registers = N_TEMPLATE_REGISTERS,
.registers = TEMPLATE_registers, .registers = TEMPLATE_registers,
...@@ -152,6 +161,7 @@ static arch_isa_if_t const TEMPLATE_isa_if = { ...@@ -152,6 +161,7 @@ static arch_isa_if_t const TEMPLATE_isa_if = {
.generate_code = TEMPLATE_generate_code, .generate_code = TEMPLATE_generate_code,
.lower_for_target = TEMPLATE_lower_for_target, .lower_for_target = TEMPLATE_lower_for_target,
.is_valid_clobber = TEMPLATE_is_valid_clobber, .is_valid_clobber = TEMPLATE_is_valid_clobber,
.get_op_estimated_cost = TEMPLATE_get_op_estimated_cost,
}; };
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_TEMPLATE) BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_TEMPLATE)
......
...@@ -779,11 +779,17 @@ static void amd64_init(void) ...@@ -779,11 +779,17 @@ static void amd64_init(void)
{ {
amd64_init_types(); amd64_init_types();
amd64_register_init(); amd64_register_init();
amd64_create_opcodes(&be_null_ops); amd64_create_opcodes();
amd64_cconv_init(); amd64_cconv_init();
x86_set_be_asm_constraint_support(&amd64_asm_constraints); x86_set_be_asm_constraint_support(&amd64_asm_constraints);
} }
static unsigned amd64_get_op_estimated_cost(const ir_node *node)
{
(void)node;/* TODO */
return 1;
}
static arch_isa_if_t const amd64_isa_if = { static arch_isa_if_t const amd64_isa_if = {
.n_registers = N_AMD64_REGISTERS, .n_registers = N_AMD64_REGISTERS,
.registers = amd64_registers, .registers = amd64_registers,
...@@ -796,6 +802,7 @@ static arch_isa_if_t const amd64_isa_if = { ...@@ -796,6 +802,7 @@ static arch_isa_if_t const amd64_isa_if = {
.lower_for_target = amd64_lower_for_target, .lower_for_target = amd64_lower_for_target,
.is_valid_clobber = amd64_is_valid_clobber, .is_valid_clobber = amd64_is_valid_clobber,
.handle_intrinsics = amd64_handle_intrinsics, .handle_intrinsics = amd64_handle_intrinsics,
.get_op_estimated_cost = amd64_get_op_estimated_cost,
}; };
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_amd64) BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_amd64)
......
...@@ -392,7 +392,7 @@ void arm_lower_64bit(void) ...@@ -392,7 +392,7 @@ void arm_lower_64bit(void)
create_divmod_intrinsics(word_unsigned, word_signed); create_divmod_intrinsics(word_unsigned, word_signed);
/* make sure opcodes are initialized */ /* make sure opcodes are initialized */
arm_create_opcodes(&be_null_ops); arm_create_opcodes();
ir_prepare_dw_lowering(&lower_dw_params); ir_prepare_dw_lowering(&lower_dw_params);
ir_register_dw_lower_function(op_Add, lower64_add); ir_register_dw_lower_function(op_Add, lower64_add);
......
...@@ -298,7 +298,7 @@ static void arm_init(void) ...@@ -298,7 +298,7 @@ static void arm_init(void)
arm_mode_flags = new_non_arithmetic_mode("arm_flags", 32); arm_mode_flags = new_non_arithmetic_mode("arm_flags", 32);
arm_register_init(); arm_register_init();
arm_create_opcodes(&be_null_ops); arm_create_opcodes();
arm_init_backend_params(); arm_init_backend_params();
} }
...@@ -307,6 +307,12 @@ static void arm_finish(void) ...@@ -307,6 +307,12 @@ static void arm_finish(void)
arm_free_opcodes(); arm_free_opcodes();
} }
static unsigned arm_get_op_estimated_cost(const ir_node *node)
{
(void)node; /* TODO */
return 1;
}
static arch_isa_if_t const arm_isa_if = { static arch_isa_if_t const arm_isa_if = {
.n_registers = N_ARM_REGISTERS, .n_registers = N_ARM_REGISTERS,
.registers = arm_registers, .registers = arm_registers,
...@@ -319,6 +325,7 @@ static arch_isa_if_t const arm_isa_if = { ...@@ -319,6 +325,7 @@ static arch_isa_if_t const arm_isa_if = {
.lower_for_target = arm_lower_for_target, .lower_for_target = arm_lower_for_target,
.is_valid_clobber = arm_is_valid_clobber, .is_valid_clobber = arm_is_valid_clobber,
.handle_intrinsics = arm_handle_intrinsics, .handle_intrinsics = arm_handle_intrinsics,
.get_op_estimated_cost = arm_get_op_estimated_cost,
}; };
static const lc_opt_enum_int_items_t arm_fpu_items[] = { static const lc_opt_enum_int_items_t arm_fpu_items[] = {
......
...@@ -26,30 +26,6 @@ arch_register_req_t const arch_no_requirement = { ...@@ -26,30 +26,6 @@ arch_register_req_t const arch_no_requirement = {
.width = 0, .width = 0,
}; };
/**
* Get the isa responsible for a node.
* @param irn The node to get the responsible isa for.
* @return The irn operations given by the responsible isa.
*/
static const arch_irn_ops_t *get_irn_ops(const ir_node *irn)
{
ir_op const *const op = get_irn_op(irn);
arch_irn_ops_t const *const be_ops = get_op_ops(op)->be_ops;
assert(be_ops);
return be_ops;
}
int arch_get_op_estimated_cost(const ir_node *irn)
{
const arch_irn_ops_t *ops = get_irn_ops(irn);
if (ops->get_op_estimated_cost) {
return ops->get_op_estimated_cost(irn);
} else {
return 1;
}
}
static reg_out_info_t *get_out_info_n(const ir_node *node, unsigned pos) static reg_out_info_t *get_out_info_n(const ir_node *node, unsigned pos)
{ {
const backend_info_t *info = be_get_info(node); const backend_info_t *info = be_get_info(node);
......
...@@ -62,8 +62,6 @@ ENUM_BITSET(arch_register_req_type_t) ...@@ -62,8 +62,6 @@ ENUM_BITSET(arch_register_req_type_t)
extern arch_register_req_t const arch_no_requirement; extern arch_register_req_t const arch_no_requirement;
#define arch_no_register_req (&arch_no_requirement) #define arch_no_register_req (&arch_no_requirement)
int arch_get_op_estimated_cost(const ir_node *irn);
/** /**
* Get the register allocated for a value. * Get the register allocated for a value.
*/ */
...@@ -277,16 +275,6 @@ static inline bool reg_reqs_equal(const arch_register_req_t *req1, ...@@ -277,16 +275,6 @@ static inline bool reg_reqs_equal(const arch_register_req_t *req1,
return true; return true;
} }
struct arch_irn_ops_t {
/**
* Get the estimated cycle count for @p irn.
*
* @param irn The node.
* @return The estimated cycle count for this operation
*/
int (*get_op_estimated_cost)(const ir_node *irn);
};
/** /**
* Architecture interface. * Architecture interface.
*/ */
...@@ -334,6 +322,12 @@ struct arch_isa_if_t { ...@@ -334,6 +322,12 @@ struct arch_isa_if_t {
* intrinsics here. * intrinsics here.
*/ */
void (*handle_intrinsics)(ir_graph *irg); void (*handle_intrinsics)(ir_graph *irg);
/**
* Get a cost estimation for node @p irn. The cost should be similar to the
* number of cycles necessary to execute the instruction.
*/
unsigned (*get_op_estimated_cost)(const ir_node *irn);
}; };
static inline bool arch_irn_is_ignore(const ir_node *irn) static inline bool arch_irn_is_ignore(const ir_node *irn)
......
...@@ -594,17 +594,12 @@ bool is_be_node(const ir_node *irn) ...@@ -594,17 +594,12 @@ bool is_be_node(const ir_node *irn)
return get_op_tag(get_irn_op(irn)) == be_op_tag; return get_op_tag(get_irn_op(irn)) == be_op_tag;
} }
arch_irn_ops_t const be_null_ops = {
.get_op_estimated_cost = NULL,
};
static ir_op *new_be_op(unsigned code, const char *name, op_pin_state p, static ir_op *new_be_op(unsigned code, const char *name, op_pin_state p,
irop_flags flags, op_arity opar, size_t attr_size) irop_flags flags, op_arity opar, size_t attr_size)
{ {
ir_op *res = new_ir_op(code, name, p, flags, opar, 0, attr_size); ir_op *res = new_ir_op(code, name, p, flags, opar, 0, attr_size);
set_op_dump(res, dump_node); set_op_dump(res, dump_node);
set_op_copy_attr(res, copy_attr); set_op_copy_attr(res, copy_attr);
res->ops.be_ops = &be_null_ops;
set_op_tag(res, be_op_tag); set_op_tag(res, be_op_tag);
return res; return res;
} }
...@@ -632,13 +627,6 @@ void be_init_op(void) ...@@ -632,13 +627,6 @@ void be_init_op(void)
set_op_attrs_equal(op_be_Keep, attrs_equal_be_node); set_op_attrs_equal(op_be_Keep, attrs_equal_be_node);
set_op_attrs_equal(op_be_MemPerm, attrs_equal_be_node); set_op_attrs_equal(op_be_MemPerm, attrs_equal_be_node);
set_op_attrs_equal(op_be_Perm, attrs_equal_be_node); set_op_attrs_equal(op_be_Perm, attrs_equal_be_node);
/* attach out dummy_ops to middle end nodes */
for (unsigned opc = iro_first; opc <= iro_last; ++opc) {
ir_op *op = ir_get_opcode(opc);
assert(op->ops.be_ops == NULL);
op->ops.be_ops = &be_null_ops;
}
} }
void be_finish_op(void) void be_finish_op(void)
......
...@@ -51,8 +51,6 @@ extern ir_op *op_be_Keep; ...@@ -51,8 +51,6 @@ extern ir_op *op_be_Keep;
extern ir_op *op_be_MemPerm; extern ir_op *op_be_MemPerm;
extern ir_op *op_be_Perm; extern ir_op *op_be_Perm;
extern arch_irn_ops_t const be_null_ops;
/** /**
* Determines if irn is a be_node. * Determines if irn is a be_node.
*/ */
......
...@@ -369,7 +369,7 @@ static int check_remat_conditions_costs(spill_env_t *env, ...@@ -369,7 +369,7 @@ static int check_remat_conditions_costs(spill_env_t *env,
if (!arch_irn_is(insn, rematerializable)) if (!arch_irn_is(insn, rematerializable))
return REMAT_COST_INFINITE; return REMAT_COST_INFINITE;
int costs = arch_get_op_estimated_cost(insn); int costs = isa_if->get_op_estimated_cost(insn);
int spillcosts = env->regif.reload_cost + env->regif.spill_cost; int spillcosts = env->regif.reload_cost + env->regif.spill_cost;
if (parentcosts + costs >= spillcosts) if (parentcosts + costs >= spillcosts)
return REMAT_COST_INFINITE; return REMAT_COST_INFINITE;
......
...@@ -102,7 +102,7 @@ static void estimate_block_costs(ir_node *block, void *data) ...@@ -102,7 +102,7 @@ static void estimate_block_costs(ir_node *block, void *data)
double costs = 0.0; double costs = 0.0;
sched_foreach(block, node) { sched_foreach(block, node) {
costs += arch_get_op_estimated_cost(node); costs += isa_if->get_op_estimated_cost(node);
} }
env->costs += costs * get_block_execfreq(block); env->costs += costs * get_block_execfreq(block);
......
...@@ -243,8 +243,11 @@ ir_entity *ia32_get_frame_address_entity(ir_graph *irg) ...@@ -243,8 +243,11 @@ ir_entity *ia32_get_frame_address_entity(ir_graph *irg)
* *
* @return The estimated cycle count for this operation * @return The estimated cycle count for this operation
*/ */
static int ia32_get_op_estimated_cost(ir_node const *const irn) static unsigned ia32_get_op_estimated_cost(ir_node const *const irn)
{ {
if (!is_ia32_irn(irn))
return 1;
if (is_ia32_CopyB_i(irn)) { if (is_ia32_CopyB_i(irn)) {
unsigned const size = get_ia32_copyb_size(irn); unsigned const size = get_ia32_copyb_size(irn);
return 20 + size * 4 / 3; return 20 + size * 4 / 3;
...@@ -387,11 +390,6 @@ static void ia32_perform_memory_operand(ir_node *irn, unsigned int i) ...@@ -387,11 +390,6 @@ static void ia32_perform_memory_operand(ir_node *irn, unsigned int i)
kill_node(load); kill_node(load);
} }
/* register allocator interface */
static const arch_irn_ops_t ia32_irn_ops = {
.get_op_estimated_cost = ia32_get_op_estimated_cost,
};
static bool gprof; static bool gprof;
static ir_node *ia32_turn_back_dest_am(ir_node *node) static ir_node *ia32_turn_back_dest_am(ir_node *node)
...@@ -1419,7 +1417,7 @@ static void ia32_init(void) ...@@ -1419,7 +1417,7 @@ static void ia32_init(void)
ia32_register_init(); ia32_register_init();
obstack_init(&opcodes_obst); obstack_init(&opcodes_obst);
ia32_create_opcodes(&ia32_irn_ops); ia32_create_opcodes();
ia32_cconv_init(); ia32_cconv_init();
} }
...@@ -1586,6 +1584,7 @@ static arch_isa_if_t const ia32_isa_if = { ...@@ -1586,6 +1584,7 @@ static arch_isa_if_t const ia32_isa_if = {
.generate_code = ia32_generate_code, .generate_code = ia32_generate_code,
.lower_for_target = ia32_lower_for_target, .lower_for_target = ia32_lower_for_target,
.is_valid_clobber = ia32_is_valid_clobber, .is_valid_clobber = ia32_is_valid_clobber,
.get_op_estimated_cost = ia32_get_op_estimated_cost,
}; };
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32) BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32)
......
...@@ -95,7 +95,7 @@ foreach my $class_name (sort(keys(%reg_classes))) { ...@@ -95,7 +95,7 @@ foreach my $class_name (sort(keys(%reg_classes))) {
} }
$obst_header .= "void ${arch}_create_opcodes(const arch_irn_ops_t *be_ops);\n"; $obst_header .= "void ${arch}_create_opcodes(void);\n";
$obst_header .= "void ${arch}_free_opcodes(void);\n"; $obst_header .= "void ${arch}_free_opcodes(void);\n";
sub create_constructor { sub create_constructor {
...@@ -585,7 +585,6 @@ EOF ...@@ -585,7 +585,6 @@ EOF
$temp = "\top = new_ir_op(cur_opcode + iro_$op, \"$op\", op_pin_state_".$n{"state"}.", $op_flags"; $temp = "\top = new_ir_op(cur_opcode + iro_$op, \"$op\", op_pin_state_".$n{"state"}.", $op_flags";
$temp .= ", ".translate_arity($arity).", -1, ${attr_size});\n"; $temp .= ", ".translate_arity($arity).", -1, ${attr_size});\n";
$obst_new_irop .= $temp; $obst_new_irop .= $temp;
$obst_new_irop .= "\top->ops.be_ops = be_ops;\n";
$obst_new_irop .= "\tset_op_dump(op, ${dump_func});\n"; $obst_new_irop .= "\tset_op_dump(op, ${dump_func});\n";
if (defined($attrs_equal_func)) { if (defined($attrs_equal_func)) {
$obst_new_irop .= "\tset_op_attrs_equal(op, ${attrs_equal_func});\n"; $obst_new_irop .= "\tset_op_attrs_equal(op, ${attrs_equal_func});\n";
...@@ -695,7 +694,7 @@ $obst_constructor ...@@ -695,7 +694,7 @@ $obst_constructor
* Creates the $arch specific Firm machine operations * Creates the $arch specific Firm machine operations
* needed for the assembler irgs. * needed for the assembler irgs.
*/ */
void $arch\_create_opcodes(const arch_irn_ops_t *be_ops) void $arch\_create_opcodes(void)
{ {
ir_op *op; ir_op *op;
int cur_opcode = get_next_ir_opcodes(iro_$arch\_last); int cur_opcode = get_next_ir_opcodes(iro_$arch\_last);
......
...@@ -357,7 +357,7 @@ static void sparc_init(void) ...@@ -357,7 +357,7 @@ static void sparc_init(void)
{ {
sparc_init_asm_constraints(); sparc_init_asm_constraints();
sparc_register_init(); sparc_register_init();
sparc_create_opcodes(&be_null_ops); sparc_create_opcodes();
sparc_cconv_init(); sparc_cconv_init();
sparc_setup_cg_config(); sparc_setup_cg_config();
} }
...@@ -551,6 +551,14 @@ static const backend_params *sparc_get_backend_params(void) ...@@ -551,6 +551,14 @@ static const backend_params *sparc_get_backend_params(void)
return &p; return &p;
} }
static unsigned sparc_get_op_estimated_cost(const ir_node *node)
{
/* TODO: refine */
if (sparc_has_load_store_attr(node))
return 5;
return 1;
}
static arch_isa_if_t const sparc_isa_if = { static arch_isa_if_t const sparc_isa_if = {
.n_registers = N_SPARC_REGISTERS, .n_registers = N_SPARC_REGISTERS,
.registers = sparc_registers, .registers = sparc_registers,
...@@ -563,6 +571,7 @@ static arch_isa_if_t const sparc_isa_if = { ...@@ -563,6 +571,7 @@ static arch_isa_if_t const sparc_isa_if = {
.lower_for_target = sparc_lower_for_target, .lower_for_target = sparc_lower_for_target,
.is_valid_clobber = sparc_is_valid_clobber, .is_valid_clobber = sparc_is_valid_clobber,
.handle_intrinsics = sparc_handle_intrinsics, .handle_intrinsics = sparc_handle_intrinsics,
.get_op_estimated_cost = sparc_get_op_estimated_cost,
}; };
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_sparc) BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_sparc)
......
...@@ -138,7 +138,7 @@ void sparc_lower_64bit(void) ...@@ -138,7 +138,7 @@ void sparc_lower_64bit(void)
}; };
/* make sure opcodes are initialized */ /* make sure opcodes are initialized */
sparc_create_opcodes(&be_null_ops); sparc_create_opcodes();
ir_prepare_dw_lowering(&lower_dw_params); ir_prepare_dw_lowering(&lower_dw_params);
ir_register_dw_lower_function(op_Add, lower64_add); ir_register_dw_lower_function(op_Add, lower64_add);
......
...@@ -34,8 +34,6 @@ struct ir_nodemap { ...@@ -34,8 +34,6 @@ struct ir_nodemap {
void **data; /**< maps node indices to void* */ void **data; /**< maps node indices to void* */
}; };
typedef struct arch_irn_ops_t arch_irn_ops_t;
/** /**
* Operation specific callbacks. * Operation specific callbacks.
*/ */
...@@ -58,7 +56,6 @@ typedef struct { ...@@ -58,7 +56,6 @@ typedef struct {
op_func generic; /**< A generic function pointer. */ op_func generic; /**< A generic function pointer. */
op_func generic1; /**< A generic function pointer. */ op_func generic1; /**< A generic function pointer. */
op_func generic2; /**< A generic function pointer. */ op_func generic2; /**< A generic function pointer. */
const arch_irn_ops_t *be_ops; /**< callbacks used by the backend. */
} ir_op_ops; } ir_op_ops;
/** The type of an ir_op. */ /** The type of an ir_op. */
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment