Commit 2109a91a authored by Matthias Braun's avatar Matthias Braun
Browse files

move perform_memory_operand callbcack from arch_irn_ops to regalloc_if

parent f1d0154c
......@@ -39,13 +39,6 @@ static const arch_irn_ops_t *get_irn_ops(const ir_node *irn)
return be_ops;
}
void arch_perform_memory_operand(ir_node *irn, unsigned int i)
{
const arch_irn_ops_t *ops = get_irn_ops(irn);
if (ops->perform_memory_operand)
ops->perform_memory_operand(irn, i);
}
int arch_get_op_estimated_cost(const ir_node *irn)
{
const arch_irn_ops_t *ops = get_irn_ops(irn);
......
......@@ -64,8 +64,6 @@ extern arch_register_req_t const arch_no_requirement;
int arch_get_op_estimated_cost(const ir_node *irn);
void arch_perform_memory_operand(ir_node *irn, unsigned i);
/**
* Get the register allocated for a value.
*/
......@@ -287,14 +285,6 @@ struct arch_irn_ops_t {
* @return The estimated cycle count for this operation
*/
int (*get_op_estimated_cost)(const ir_node *irn);
/**
* Ask the backend to assimilate @p reload of operand @p i into @p irn.
*
* @param irn The node.
* @param i The position of the reload.
*/
void (*perform_memory_operand)(ir_node *irn, unsigned i);
};
/**
......
......@@ -141,7 +141,7 @@ static void dump(unsigned mask, ir_graph *irg,
*/
static void memory_operand_walker(ir_node *irn, void *env)
{
(void)env;
const regalloc_if_t *regif = (const regalloc_if_t*)env;
foreach_irn_in(irn, i, in) {
if (!arch_irn_is(skip_Proj(in), reload))
continue;
......@@ -150,16 +150,18 @@ static void memory_operand_walker(ir_node *irn, void *env)
/* only use memory operands, if the reload is only used by 1 node */
if (get_irn_n_edges(in) > 1)
continue;
arch_perform_memory_operand(irn, i);
regif->perform_memory_operand(irn, i);
}
}
/**
* Starts a walk for memory operands if supported by the backend.
*/
void check_for_memory_operands(ir_graph *irg)
void check_for_memory_operands(ir_graph *irg, const regalloc_if_t *regif)
{
irg_walk_graph(irg, NULL, memory_operand_walker, NULL);
if (regif->perform_memory_operand == NULL)
return;
irg_walk_graph(irg, NULL, memory_operand_walker, (void*)regif);
}
static be_node_stats_t last_node_stats;
......@@ -182,13 +184,14 @@ static void pre_spill(be_chordal_env_t *const chordal_env,
/**
* Perform things which need to be done per register class after spilling.
*/
static void post_spill(be_chordal_env_t *const chordal_env, ir_graph *const irg)
static void post_spill(be_chordal_env_t *const chordal_env, ir_graph *const irg,
const regalloc_if_t *regif)
{
/* If we have a backend provided spiller, post spill is
* called in a loop after spilling for each register class.
* But we only need to fix stack nodes once in this case. */
be_timer_push(T_RA_SPILL_APPLY);
check_for_memory_operands(irg);
check_for_memory_operands(irg, regif);
be_timer_pop(T_RA_SPILL_APPLY);
/* verify schedule and register pressure */
......@@ -305,7 +308,7 @@ static void be_ra_chordal_main(ir_graph *irg, const regalloc_if_t *regif)
dump(BE_CH_DUMP_SPILL, irg, cls, "spill");
stat_ev_dbl("bechordal_spillcosts", be_estimate_irg_costs(irg) - pre_spill_cost);
post_spill(&chordal_env, irg);
post_spill(&chordal_env, irg, regif);
if (stat_ev_enabled) {
be_node_stats_t node_stats;
......
......@@ -81,6 +81,6 @@ struct be_ra_chordal_opts_t {
int lower_perm_opt;
};
void check_for_memory_operands(ir_graph *irg);
void check_for_memory_operands(ir_graph *irg, const regalloc_if_t *regif);
#endif
......@@ -596,7 +596,6 @@ bool is_be_node(const ir_node *irn)
arch_irn_ops_t const be_null_ops = {
.get_op_estimated_cost = NULL,
.perform_memory_operand = NULL,
};
static ir_op *new_be_op(unsigned code, const char *name, op_pin_state p,
......
......@@ -1802,7 +1802,7 @@ static void spill(const regalloc_if_t *regif)
be_timer_pop(T_RA_SPILL);
be_timer_push(T_RA_SPILL_APPLY);
check_for_memory_operands(irg);
check_for_memory_operands(irg, regif);
be_timer_pop(T_RA_SPILL_APPLY);
be_dump(DUMP_RA, irg, "spill");
......
......@@ -41,6 +41,12 @@ struct regalloc_if_t {
*/
ir_node *(*new_reload)(ir_node *value, ir_node *spilled_value,
ir_node *before);
/**
* Ask the backend to fold a reload at operand @p i of @p irn. This can
* be done by targets that support memory addressing modes.
*/
void (*perform_memory_operand)(ir_node *irn, unsigned i);
};
/**
......
......@@ -390,7 +390,6 @@ static void ia32_perform_memory_operand(ir_node *irn, unsigned int i)
/* register allocator interface */
static const arch_irn_ops_t ia32_irn_ops = {
.get_op_estimated_cost = ia32_get_op_estimated_cost,
.perform_memory_operand = ia32_perform_memory_operand,
};
static bool gprof;
......@@ -1440,14 +1439,13 @@ static void ia32_mark_remat(ir_node *node)
set_ia32_is_remat(node);
}
static const regalloc_if_t ia32_regalloc_if = {
.spill_cost = 7,
.reload_cost = 5,
.mark_remat = ia32_mark_remat,
.new_spill = ia32_new_spill,
.new_reload = ia32_new_reload,
.perform_memory_operand = ia32_perform_memory_operand,
};
static void ia32_generate_code(FILE *output, const char *cup_name)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment