Commit f1d0154c authored by Matthias Braun's avatar Matthias Braun
Browse files

break arch_isa_if_t furhter apart by moving spilling related callbacks into an own struct

parent 89a5cac6
......@@ -13,6 +13,7 @@
#include "be_t.h"
#include "bemodule.h"
#include "benode.h"
#include "bera.h"
#include "bestack.h"
#include "gen_TEMPLATE_regalloc_if.h"
#include "irprog_t.h"
......@@ -34,6 +35,29 @@ static void TEMPLATE_select_instructions(ir_graph *irg)
be_dump(DUMP_BE, irg, "code-selection");
}
static ir_node *TEMPLATE_new_spill(ir_node *value, ir_node *after)
{
(void)value;
(void)after;
panic("spilling not implemented yet");
}
static ir_node *TEMPLATE_new_reload(ir_node *value, ir_node *spill,
ir_node *before)
{
(void)value;
(void)spill;
(void)before;
panic("reload not implemented yet");
}
static const regalloc_if_t TEMPLATE_regalloc_if = {
.spill_cost = 7,
.reload_cost = 5,
.new_spill = TEMPLATE_new_spill,
.new_reload = TEMPLATE_new_reload,
};
static void TEMPLATE_generate_code(FILE *output, const char *cup_name)
{
be_begin(output, cup_name);
......@@ -46,7 +70,7 @@ static void TEMPLATE_generate_code(FILE *output, const char *cup_name)
be_step_schedule(irg);
be_step_regalloc(irg);
be_step_regalloc(irg, &TEMPLATE_regalloc_if);
be_fix_stack_nodes(irg, &TEMPLATE_registers[REG_SP]);
......@@ -117,37 +141,17 @@ static int TEMPLATE_is_valid_clobber(const char *clobber)
return false;
}
static ir_node *TEMPLATE_new_spill(ir_node *value, ir_node *after)
{
(void)value;
(void)after;
panic("spilling not implemented yet");
}
static ir_node *TEMPLATE_new_reload(ir_node *value, ir_node *spill,
ir_node *before)
{
(void)value;
(void)spill;
(void)before;
panic("reload not implemented yet");
}
static arch_isa_if_t const TEMPLATE_isa_if = {
.n_registers = N_TEMPLATE_REGISTERS,
.registers = TEMPLATE_registers,
.n_register_classes = N_TEMPLATE_CLASSES,
.register_classes = TEMPLATE_reg_classes,
.spill_cost = 7,
.reload_cost = 5,
.init = TEMPLATE_init,
.finish = TEMPLATE_finish,
.get_params = TEMPLATE_get_backend_params,
.generate_code = TEMPLATE_generate_code,
.lower_for_target = TEMPLATE_lower_for_target,
.is_valid_clobber = TEMPLATE_is_valid_clobber,
.new_spill = TEMPLATE_new_spill,
.new_reload = TEMPLATE_new_reload,
};
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_TEMPLATE)
......
......@@ -15,6 +15,7 @@
#include "beflags.h"
#include "beirg.h"
#include "bemodule.h"
#include "bera.h"
#include "besched.h"
#include "bespillslots.h"
#include "bestack.h"
......@@ -642,6 +643,13 @@ static void amd64_finish(void)
amd64_free_opcodes();
}
static const regalloc_if_t amd64_regalloc_if = {
.spill_cost = 7,
.reload_cost = 5,
.new_spill = amd64_new_spill,
.new_reload = amd64_new_reload,
};
static void amd64_generate_code(FILE *output, const char *cup_name)
{
amd64_constants = pmap_create();
......@@ -660,7 +668,7 @@ static void amd64_generate_code(FILE *output, const char *cup_name)
NULL, NULL);
be_timer_pop(T_RA_PREPARATION);
be_step_regalloc(irg);
be_step_regalloc(irg, &amd64_regalloc_if);
amd64_finish_and_emit(irg);
......@@ -781,16 +789,12 @@ static arch_isa_if_t const amd64_isa_if = {
.registers = amd64_registers,
.n_register_classes = N_AMD64_CLASSES,
.register_classes = amd64_reg_classes,
.spill_cost = 7,
.reload_cost = 5,
.init = amd64_init,
.finish = amd64_finish,
.get_params = amd64_get_backend_params,
.generate_code = amd64_generate_code,
.lower_for_target = amd64_lower_for_target,
.is_valid_clobber = amd64_is_valid_clobber,
.new_spill = amd64_new_spill,
.new_reload = amd64_new_reload,
.handle_intrinsics = amd64_handle_intrinsics,
};
......
......@@ -17,6 +17,7 @@
#include "begnuas.h"
#include "bemodule.h"
#include "benode.h"
#include "bera.h"
#include "besched.h"
#include "betranshlp.h"
#include "gen_arm_regalloc_if.h"
......@@ -162,6 +163,13 @@ static void arm_handle_intrinsics(ir_graph *irg)
irg_walk_graph(irg, handle_intrinsic, NULL, NULL);
}
static const regalloc_if_t arm_regalloc_if = {
.spill_cost = 7,
.reload_cost = 5,
.new_spill = arm_new_spill,
.new_reload = arm_new_reload,
};
static void arm_generate_code(FILE *output, const char *cup_name)
{
be_gas_emit_types = false;
......@@ -183,7 +191,7 @@ static void arm_generate_code(FILE *output, const char *cup_name)
be_sched_fix_flags(irg, &arm_reg_classes[CLASS_arm_flags], NULL, NULL, NULL);
be_timer_pop(T_RA_PREPARATION);
be_step_regalloc(irg);
be_step_regalloc(irg, &arm_regalloc_if);
be_timer_push(T_EMIT);
arm_finish_graph(irg);
......@@ -304,16 +312,12 @@ static arch_isa_if_t const arm_isa_if = {
.registers = arm_registers,
.n_register_classes = N_ARM_CLASSES,
.register_classes = arm_reg_classes,
.spill_cost = 7,
.reload_cost = 5,
.init = arm_init,
.finish = arm_finish,
.get_params = arm_get_libfirm_params,
.generate_code = arm_generate_code,
.lower_for_target = arm_lower_for_target,
.is_valid_clobber = arm_is_valid_clobber,
.new_spill = arm_new_spill,
.new_reload = arm_new_reload,
.handle_intrinsics = arm_handle_intrinsics,
};
......
......@@ -144,7 +144,7 @@ void be_begin(FILE *output, const char *cup_name);
void be_finish(void);
bool be_step_first(ir_graph *irg);
void be_step_regalloc(ir_graph *irg);
void be_step_regalloc(ir_graph *irg, const regalloc_if_t *regif);
void be_step_schedule(ir_graph *irg);
void be_step_last(ir_graph *irg);
/** @} */
......
......@@ -56,6 +56,7 @@ typedef struct be_ifg_t be_ifg_t;
typedef struct copy_opt_t copy_opt_t;
typedef struct be_main_env_t be_main_env_t;
typedef struct be_options_t be_options_t;
typedef struct regalloc_if_t regalloc_if_t;
typedef ir_entity *(*get_frame_entity_func)(const ir_node *node);
......
......@@ -305,8 +305,6 @@ struct arch_isa_if_t {
arch_register_t const *registers; /**< register array */
unsigned n_register_classes; /**< number of register classes */
arch_register_class_t const *register_classes; /**< register classes */
unsigned spill_cost; /**< cost for a spill node */
unsigned reload_cost; /**< cost for a reload node */
/**
* Initializes the isa interface. This is necessary before calling any
......@@ -341,31 +339,6 @@ struct arch_isa_if_t {
*/
int (*is_valid_clobber)(const char *clobber);
/**
* mark node as rematerialized
*/
void (*mark_remat)(ir_node *node);
/**
* Create a spill instruction. We assume that spill instructions do not need
* any additional registers and do not affect cpu-flags in any way.
* Construct a sequence of instructions after @p after (the resulting nodes
* are already scheduled).
* Returns a mode_M value which is used as input for a reload instruction.
*/
ir_node *(*new_spill)(ir_node *value, ir_node *after);
/**
* Create a reload instruction. We assume that reload instructions do not
* need any additional registers and do not affect cpu-flags in any way.
* Constructs a sequence of instruction before @p before (the resulting
* nodes are already scheduled). A rewiring of users is not performed in
* this function.
* Returns a value representing the restored value.
*/
ir_node *(*new_reload)(ir_node *value, ir_node *spilled_value,
ir_node *before);
/**
* Called directly after initialization. Backend should handle all
* intrinsics here.
......
......@@ -263,7 +263,7 @@ static void post_spill(be_chordal_env_t *const chordal_env, ir_graph *const irg)
* @return Structure containing timer for the single phases or NULL if no
* timing requested.
*/
static void be_ra_chordal_main(ir_graph *irg)
static void be_ra_chordal_main(ir_graph *irg, const regalloc_if_t *regif)
{
be_timer_push(T_RA_OTHER);
......@@ -300,7 +300,7 @@ static void be_ra_chordal_main(ir_graph *irg)
pre_spill(&chordal_env, cls, irg);
be_timer_push(T_RA_SPILL);
be_do_spill(irg, cls);
be_do_spill(irg, cls, regif);
be_timer_pop(T_RA_SPILL);
dump(BE_CH_DUMP_SPILL, irg, cls, "spill");
stat_ev_dbl("bechordal_spillcosts", be_estimate_irg_costs(irg) - pre_spill_cost);
......@@ -335,10 +335,7 @@ void be_init_chordal_main(void)
lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
static be_ra_t be_ra_chordal_allocator = {
be_ra_chordal_main,
};
be_register_allocator("chordal", &be_ra_chordal_allocator);
be_register_allocator("chordal", be_ra_chordal_main);
lc_opt_add_table(chordal_grp, be_chordal_options);
be_add_module_list_opt(chordal_grp, "coloring", "select coloring method",
......
......@@ -559,7 +559,7 @@ void be_step_schedule(ir_graph *irg)
be_sched_verify(irg);
}
void be_step_regalloc(ir_graph *irg)
void be_step_regalloc(ir_graph *irg, const regalloc_if_t *regif)
{
if (stat_ev_enabled) {
stat_ev_dbl("bemain_costs_before_ra", be_estimate_irg_costs(irg));
......@@ -569,7 +569,7 @@ void be_step_regalloc(ir_graph *irg)
}
/* Do register allocation */
be_allocate_registers(irg);
be_allocate_registers(irg, regif);
be_regalloc_verify(irg, true);
if (stat_ev_enabled) {
......
......@@ -1794,11 +1794,11 @@ static void be_pref_alloc_cls(void)
/**
* Run the spiller on the current graph.
*/
static void spill(void)
static void spill(const regalloc_if_t *regif)
{
/* spill */
be_timer_push(T_RA_SPILL);
be_do_spill(irg, cls);
be_do_spill(irg, cls, regif);
be_timer_pop(T_RA_SPILL);
be_timer_push(T_RA_SPILL_APPLY);
......@@ -1811,7 +1811,7 @@ static void spill(void)
/**
* The pref register allocator for a whole procedure.
*/
static void be_pref_alloc(ir_graph *new_irg)
static void be_pref_alloc(ir_graph *new_irg, const regalloc_if_t *regif)
{
/* disable optimization callbacks as we cannot deal with same-input phis
* getting optimized away. */
......@@ -1838,7 +1838,7 @@ static void be_pref_alloc(ir_graph *new_irg)
normal_regs = rbitset_malloc(n_regs);
be_get_allocatable_regs(irg, cls, normal_regs);
spill();
spill(regif);
/* verify schedule and register pressure */
if (be_options.do_verify) {
......@@ -1871,7 +1871,6 @@ static void be_pref_alloc(ir_graph *new_irg)
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_pref_alloc)
void be_init_pref_alloc(void)
{
static be_ra_t be_ra_pref = { be_pref_alloc };
be_register_allocator("pref", &be_ra_pref);
be_register_allocator("pref", be_pref_alloc);
FIRM_DBG_REGISTER(dbg, "firm.be.prefalloc");
}
......@@ -28,21 +28,18 @@
/** The list of register allocators */
static be_module_list_entry_t *register_allocators;
static be_ra_t *selected_allocator;
static allocate_func selected_allocator;
void be_register_allocator(const char *name, be_ra_t *allocator)
void be_register_allocator(const char *name, allocate_func allocator)
{
if (selected_allocator == NULL)
selected_allocator = allocator;
be_add_module_to_list(&register_allocators, name, allocator);
}
void be_allocate_registers(ir_graph *irg)
void be_allocate_registers(ir_graph *irg, const regalloc_if_t *regif)
{
assert(selected_allocator != NULL);
if (selected_allocator != NULL) {
selected_allocator->allocate(irg);
}
selected_allocator(irg, regif);
}
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_ra)
......
......@@ -13,16 +13,43 @@
#define FIRM_BE_BERA_H
#include "firm_types.h"
#include "be_types.h"
typedef struct be_ra_t {
void (*allocate)(ir_graph *irg); /**< allocate registers on a graph */
} be_ra_t;
struct regalloc_if_t {
unsigned spill_cost; /**< cost for a spill node */
unsigned reload_cost; /**< cost for a reload node */
void be_register_allocator(const char *name, be_ra_t *allocator);
/** mark node as rematerialized */
void (*mark_remat)(ir_node *node);
/**
* Create a spill instruction. We assume that spill instructions do not need
* any additional registers and do not affect cpu-flags in any way.
* Construct a sequence of instructions after @p after (the resulting nodes
* are already scheduled).
* Returns a mode_M value which is used as input for a reload instruction.
*/
ir_node *(*new_spill)(ir_node *value, ir_node *after);
/**
* Create a reload instruction. We assume that reload instructions do not
* need any additional registers and do not affect cpu-flags in any way.
* Constructs a sequence of instruction before @p before (the resulting
* nodes are already scheduled). A rewiring of users is not performed in
* this function.
* Returns a value representing the restored value.
*/
ir_node *(*new_reload)(ir_node *value, ir_node *spilled_value,
ir_node *before);
};
/**
* Do register allocation with currently selected register allocator
*/
void be_allocate_registers(ir_graph *irg);
void be_allocate_registers(ir_graph *irg, const regalloc_if_t *regif);
typedef void (*allocate_func)(ir_graph *irg, const regalloc_if_t *regif);
void be_register_allocator(const char *name, allocate_func allocator);
#endif
......@@ -48,10 +48,10 @@ void be_register_spiller(const char *name, be_spill_func spiller)
be_add_module_to_list(&spillers, name, spiller);
}
void be_do_spill(ir_graph *irg, const arch_register_class_t *cls)
void be_do_spill(ir_graph *irg, const arch_register_class_t *cls,
const regalloc_if_t *regif)
{
assert(selected_spiller != NULL);
selected_spiller(irg, cls);
selected_spiller(irg, cls, regif);
}
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spilloptions)
......
......@@ -13,11 +13,13 @@
#define FIRM_BE_BESPILL_H
#include "bearch.h"
#include "bera.h"
extern bool be_coalesce_spill_slots;
extern bool be_do_remats;
typedef void (*be_spill_func)(ir_graph *irg, const arch_register_class_t *cls);
typedef void (*be_spill_func)(ir_graph *irg, const arch_register_class_t *cls,
const regalloc_if_t *regif);
/**
* Register a new spill algorithm.
......@@ -34,6 +36,7 @@ void be_register_spiller(const char *name, be_spill_func spiller);
* @param irg the graph to spill on
* @param cls the register class to spill
*/
void be_do_spill(ir_graph *irg, const arch_register_class_t *cls);
void be_do_spill(ir_graph *irg, const arch_register_class_t *cls,
const regalloc_if_t *regif);
#endif
......@@ -832,7 +832,8 @@ static void fix_block_borders(ir_node *block, void *data)
}
}
static void be_spill_belady(ir_graph *irg, const arch_register_class_t *rcls)
static void be_spill_belady(ir_graph *irg, const arch_register_class_t *rcls,
const regalloc_if_t *regif)
{
be_assure_live_sets(irg);
......@@ -854,7 +855,7 @@ static void be_spill_belady(ir_graph *irg, const arch_register_class_t *rcls)
ws = new_workset();
uses = be_begin_uses(irg, lv);
loop_ana = be_new_loop_pressure(irg, cls);
senv = be_new_spill_env(irg);
senv = be_new_spill_env(irg, regif);
blocklist = be_get_cfgpostorder(irg);
temp_workset = new_workset();
stat_ev_tim_pop("belady_time_init");
......
......@@ -306,13 +306,14 @@ static void spill_block(ir_node *block, void *data)
ir_nodeset_destroy(&live_nodes);
}
static void be_spill_daemel(ir_graph *irg, const arch_register_class_t *new_cls)
static void be_spill_daemel(ir_graph *irg, const arch_register_class_t *new_cls,
const regalloc_if_t *regif)
{
n_regs = be_get_n_allocatable_regs(irg, new_cls);
be_assure_live_sets(irg);
spill_env = be_new_spill_env(irg);
spill_env = be_new_spill_env(irg, regif);
cls = new_cls;
lv = be_get_irg_liveness(irg);
spilled_nodes = bitset_malloc(get_irg_last_idx(irg));
......@@ -330,6 +331,6 @@ static void be_spill_daemel(ir_graph *irg, const arch_register_class_t *new_cls)
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_daemelspill)
void be_init_daemelspill(void)
{
be_register_spiller("daemel", &be_spill_daemel);
be_register_spiller("daemel", be_spill_daemel);
FIRM_DBG_REGISTER(dbg, "firm.be.spilldaemel");
}
......@@ -81,8 +81,7 @@ struct spill_env_t {
spill_info_t *spills;
spill_info_t *mem_phis;
struct obstack obst;
int spill_cost; /**< the cost of a single spill node */
int reload_cost; /**< the cost of a reload node */
regalloc_if_t regif;
unsigned spill_count;
unsigned reload_count;
unsigned remat_count;
......@@ -109,12 +108,11 @@ static spill_info_t *get_spillinfo(spill_env_t *env, ir_node *value)
return info;
}
spill_env_t *be_new_spill_env(ir_graph *irg)
spill_env_t *be_new_spill_env(ir_graph *irg, const regalloc_if_t *regif)
{
spill_env_t *env = XMALLOCZ(spill_env_t);
env->irg = irg;
env->spill_cost = isa_if->spill_cost;
env->reload_cost = isa_if->reload_cost;
env->regif = *regif;
ir_nodehashmap_init(&env->spillmap);
obstack_init(&env->obst);
return env;
......@@ -258,7 +256,7 @@ static void spill_irn(spill_env_t *env, spill_info_t *spillinfo)
for (spill_t *spill = spillinfo->spills; spill != NULL;
spill = spill->next) {
ir_node *const after = be_move_after_schedule_first(spill->after);
spill->spill = isa_if->new_spill(to_spill, after);
spill->spill = env->regif.new_spill(to_spill, after);
DB((dbg, LEVEL_1, "\t%+F after %+F\n", spill->spill, after));
env->spill_count++;
}
......@@ -372,7 +370,8 @@ static int check_remat_conditions_costs(spill_env_t *env,
return REMAT_COST_INFINITE;
int costs = arch_get_op_estimated_cost(insn);
if (parentcosts + costs >= env->reload_cost + env->spill_cost)
int spillcosts = env->regif.reload_cost + env->regif.spill_cost;
if (parentcosts + costs >= spillcosts)
return REMAT_COST_INFINITE;
/* never rematerialize a node which modifies the flags.
......@@ -397,7 +396,7 @@ static int check_remat_conditions_costs(spill_env_t *env,
costs += check_remat_conditions_costs(env, arg, reloader,
parentcosts + costs);
if (parentcosts + costs >= env->reload_cost + env->spill_cost)
if (parentcosts + costs >= spillcosts)
return REMAT_COST_INFINITE;
}
......@@ -425,8 +424,8 @@ static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader)
/* create a copy of the node */
ir_node *const bl = get_nodes_block(reloader);
ir_node *const res = new_similar_node(spilled, bl, ins);
if (isa_if->mark_remat)
isa_if->mark_remat(res);
if (env->regif.mark_remat)
env->regif.mark_remat(res);
DBG((dbg, LEVEL_1, "Insert remat %+F of %+F before reloader %+F\n", res,
spilled, reloader));
......@@ -442,7 +441,7 @@ double be_get_spill_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
(void)to_spill;
ir_node *block = get_nodes_block(before);
double freq = get_block_execfreq(block);
return env->spill_cost * freq;
return env->regif.spill_cost * freq;
}
unsigned be_get_reload_costs_no_weight(spill_env_t *env,
......@@ -452,11 +451,11 @@ unsigned be_get_reload_costs_no_weight(spill_env_t *env,
if (be_do_remats) {
/* is the node rematerializable? */
unsigned costs = check_remat_conditions_costs(env, to_spill, before, 0);
if (costs < (unsigned) env->reload_cost)
if (costs < (unsigned) env->regif.reload_cost)
return costs;
}
return env->reload_cost;
return env->regif.reload_cost;
}
double be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
......@@ -467,11 +466,11 @@ double be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
if (be_do_remats) {
/* is the node rematerializable? */
int costs = check_remat_conditions_costs(env, to_spill, before, 0);
if (costs < env->reload_cost)
if (costs < (int)env->regif.reload_cost)
return costs * freq;
}
return env->reload_cost * freq;
return env->regif.reload_cost * freq;
}
double be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill,
......@@ -501,7 +500,7 @@ static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo)
if (spillinfo->spilled_phi) {
/* TODO calculate correct costs...
* (though we can't remat this node anyway so no big problem) */
spillinfo->spill_costs = env->spill_cost * spill_execfreq;
spillinfo->spill_costs = env->regif.spill_cost * spill_execfreq;
return;
}
......@@ -516,13 +515,13 @@ static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo)
}
DB((dbg, LEVEL_1, "%+F: latespillcosts %f after def: %f\n", to_spill,
spills_execfreq * env->spill_cost,
spill_execfreq * env->spill_cost));
spills_execfreq * env->regif.spill_cost,
spill_execfreq * env->regif.spill_cost));
/* multi-/latespill is advantageous -> return*/
if (spills_execfreq < spill_execfreq) {
DB((dbg, LEVEL_1, "use latespills for %+F\n", to_spill));
spillinfo->spill_costs = spills_execfreq * env->spill_cost;
spillinfo->spill_costs = spills_execfreq * env->regif.spill_cost;
return;
}
......@@ -533,7 +532,7 @@ static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo)
spill->spill = NULL;
spillinfo->spills = spill;
spillinfo->spill_costs = spill_execfreq * env->spill_cost;
spillinfo->spill_costs = spill_execfreq * env->regif.spill_cost;
DB((dbg, LEVEL_1, "spill %+F after definition\n", to_spill));
}
......@@ -583,7 +582,7 @@ void be_insert_spills_reloads(spill_env_t *env)
continue;
}
int remat_cost_delta = remat_cost - env->reload_cost;
int remat_cost_delta = remat_cost - env->regif.reload_cost;
rld->remat_cost_delta = remat_cost_delta;
ir_node *block = get_block(reloader);
double freq = get_block_execfreq(block);
......@@ -597,7 +596,7 @@ void be_insert_spills_reloads(spill_env_t *env)