Commit 036ede1b authored by Matthias Braun's avatar Matthias Braun
Browse files

add callback for constructing spill/reload

So we are not forced to use the generic be_Spill, be_Reload functions
which don't work correctly with double-width register requirements.
This should really fix sparc float spill/reload this time.
parent 2fc9b7da
......@@ -44,6 +44,7 @@
#include "../begnuas.h"
#include "../belistsched.h"
#include "../bestack.h"
#include "../bespillutil.h"
#include "bearch_TEMPLATE_t.h"
......@@ -412,6 +413,8 @@ const arch_isa_if_t TEMPLATE_isa_if = {
TEMPLATE_finish_irg,
TEMPLATE_emit_routine,
TEMPLATE_register_saved_by,
be_new_spill,
be_new_reload,
};
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_TEMPLATE)
......
......@@ -46,6 +46,7 @@
#include "../belistsched.h"
#include "../beflags.h"
#include "../bespillslots.h"
#include "../bespillutil.h"
#include "../bestack.h"
#include "bearch_amd64_t.h"
......@@ -588,6 +589,8 @@ const arch_isa_if_t amd64_isa_if = {
amd64_finish_irg,
amd64_gen_routine,
amd64_register_saved_by,
be_new_spill,
be_new_reload
};
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_amd64)
......
......@@ -53,6 +53,7 @@
#include "../bemodule.h"
#include "../beirg.h"
#include "../bespillslots.h"
#include "../bespillutil.h"
#include "../begnuas.h"
#include "../belistsched.h"
#include "../beflags.h"
......@@ -621,6 +622,8 @@ const arch_isa_if_t arm_isa_if = {
arm_finish_irg,
arm_gen_routine,
NULL, /* register_saved_by */
be_new_spill,
be_new_reload,
};
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_arm)
......
......@@ -593,6 +593,27 @@ struct arch_isa_if_t {
* @deprecated, only necessary if backend still uses beabi functions
*/
int (*register_saved_by)(const arch_register_t *reg, int callee);
/**
* Create a spill instruction. We assume that spill instructions
* do not need any additional registers and do not affect cpu-flags in any
* way.
* Construct a sequence of instructions after @p after (the resulting nodes
* are already scheduled).
* Returns a mode_M value which is used as input for a reload instruction.
*/
ir_node *(*new_spill)(ir_node *value, ir_node *after);
/**
* Create a reload instruction. We assume that reload instructions do not
* need any additional registers and do not affect cpu-flags in any way.
* Constructs a sequence of instruction before @p before (the resulting
* nodes are already scheduled). A rewiring of users is not performed in
* this function.
* Returns a value representing the restored value.
*/
ir_node *(*new_reload)(ir_node *value, ir_node *spilled_value,
ir_node *before);
};
#define arch_env_done(env) ((env)->impl->done(env))
......@@ -610,6 +631,9 @@ struct arch_isa_if_t {
#define arch_env_mark_remat(env,node) \
do { if ((env)->impl->mark_remat != NULL) (env)->impl->mark_remat((node)); } while(0)
#define arch_env_new_spill(env,value,after) ((env)->impl->new_spill(value, after))
#define arch_env_new_reload(env,value,spilled,before) ((env)->impl->new_reload(value, spilled, before))
/**
* ISA base class.
*/
......
......@@ -954,44 +954,6 @@ int be_get_IncSP_align(const ir_node *irn)
return a->align;
}
ir_node *be_spill(ir_node *block, ir_node *irn)
{
ir_graph *irg = get_Block_irg(block);
ir_node *frame = get_irg_frame(irg);
const arch_register_class_t *cls = arch_get_irn_reg_class(irn);
const arch_register_class_t *cls_frame = arch_get_irn_reg_class(frame);
ir_node *spill;
spill = be_new_Spill(cls, cls_frame, block, frame, irn);
return spill;
}
ir_node *be_reload(const arch_register_class_t *cls, ir_node *insert,
ir_mode *mode, ir_node *spill)
{
ir_node *reload;
ir_node *bl = is_Block(insert) ? insert : get_nodes_block(insert);
ir_graph *irg = get_Block_irg(bl);
ir_node *frame = get_irg_frame(irg);
const arch_register_class_t *cls_frame = arch_get_irn_reg_class(frame);
assert(be_is_Spill(spill) || (is_Phi(spill) && get_irn_mode(spill) == mode_M));
reload = be_new_Reload(cls, cls_frame, bl, frame, spill, mode);
if (is_Block(insert)) {
do {
insert = sched_prev(insert);
} while (is_cfop(insert));
sched_add_after(insert, reload);
} else {
sched_add_before(insert, reload);
}
return reload;
}
static arch_irn_class_t be_node_classify(const ir_node *irn)
{
switch (get_irn_opcode(irn)) {
......
......@@ -363,28 +363,6 @@ int be_Return_append_node(ir_node *ret, ir_node *node);
ir_node *be_new_Start(dbg_info *dbgi, ir_node *block, int n_out);
/**
* Make a spill node.
*
* @param irn The node to be spilled.
* @param block the block where the spill should be placed
* @return The new spill node.
*/
ir_node *be_spill(ir_node *block, ir_node *irn);
/**
* Make a reload and insert it into the schedule.
*
* @param cls The register class of the reloaded value.
* @param insert The node in the schedule in front of which the reload is
* inserted.
* @param mode The mode of the original (spilled) value.
* @param spill The spill node corresponding to this reload.
* @return A freshly made reload.
*/
ir_node *be_reload(const arch_register_class_t *cls, ir_node *insert,
ir_mode *mode, ir_node *spill);
enum {
n_be_CopyKeep_op = 0
};
......
......@@ -416,12 +416,9 @@ static void spill_irn(spill_env_t *env, spill_info_t *spillinfo)
spill = spillinfo->spills;
for ( ; spill != NULL; spill = spill->next) {
ir_node *after = spill->after;
ir_node *block = get_block(after);
after = determine_spill_point(after);
spill->spill = be_spill(block, to_spill);
sched_add_after(skip_Proj(after), spill->spill);
spill->spill = arch_env_new_spill(env->arch_env, to_spill, after);
DB((dbg, LEVEL_1, "\t%+F after %+F\n", spill->spill, after));
env->spill_count++;
}
......@@ -713,6 +710,39 @@ double be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill,
return be_get_reload_costs(env, to_spill, before);
}
ir_node *be_new_spill(ir_node *value, ir_node *after)
{
ir_graph *irg = get_irn_irg(value);
ir_node *frame = get_irg_frame(irg);
const arch_register_class_t *cls = arch_get_irn_reg_class(value);
const arch_register_class_t *cls_frame = arch_get_irn_reg_class(frame);
ir_node *block = get_block(after);
ir_node *spill
= be_new_Spill(cls, cls_frame, block, frame, value);
sched_add_after(after, spill);
return spill;
}
ir_node *be_new_reload(ir_node *value, ir_node *spill, ir_node *before)
{
ir_graph *irg = get_irn_irg(value);
ir_node *frame = get_irg_frame(irg);
ir_node *block = get_block(before);
const arch_register_class_t *cls = arch_get_irn_reg_class(value);
const arch_register_class_t *cls_frame = arch_get_irn_reg_class(frame);
ir_mode *mode = get_irn_mode(value);
ir_node *reload;
assert(be_is_Spill(spill) || is_Phi(spill));
assert(get_irn_mode(spill) == mode_M);
reload = be_new_Reload(cls, cls_frame, block, frame, spill, mode);
sched_add_before(before, reload);
return reload;
}
/*
* ___ _ ____ _ _
* |_ _|_ __ ___ ___ _ __| |_ | _ \ ___| | ___ __ _ __| |___
......@@ -859,12 +889,11 @@ void be_insert_spills_reloads(spill_env_t *env)
/* process each spilled node */
foreach_set(env->spills, spill_info_t*, si) {
reloader_t *rld;
ir_node *to_spill = si->to_spill;
ir_mode *mode = get_irn_mode(to_spill);
ir_node **copies = NEW_ARR_F(ir_node*, 0);
double all_remat_costs = 0; /** costs when we would remat all nodes */
int force_remat = 0;
bool force_remat = false;
reloader_t *rld;
DBG((dbg, LEVEL_1, "\nhandling all reloaders of %+F:\n", to_spill));
......@@ -924,7 +953,7 @@ void be_insert_spills_reloads(spill_env_t *env)
if (all_remat_costs < 0) {
DBG((dbg, LEVEL_1, "\nforcing remats of all reloaders (%f)\n",
all_remat_costs));
force_remat = 1;
force_remat = true;
}
}
......@@ -945,8 +974,8 @@ void be_insert_spills_reloads(spill_env_t *env)
/* create a reload, use the first spill for now SSA
* reconstruction for memory comes below */
assert(si->spills != NULL);
copy = be_reload(si->reload_cls, rld->reloader, mode,
si->spills->spill);
copy = arch_env_new_reload(env->arch_env, si->to_spill,
si->spills->spill, rld->reloader);
env->reload_count++;
}
......
......@@ -166,4 +166,16 @@ void be_get_total_spill_costs(ir_graph *irg, be_total_spill_costs_t *costs);
*/
int be_is_rematerializable(spill_env_t *env, const ir_node *to_remat, const ir_node *before);
/**
* Create a be_Spill node. This function is compatible to the
* arch_env->new_spill callback.
*/
ir_node *be_new_spill(ir_node *value, ir_node *after);
/**
* Create a be_Reload node. This function is compatible to the
* arch_env->new_reload interface.
*/
ir_node *be_new_reload(ir_node *value, ir_node *spilled, ir_node *before);
#endif
......@@ -66,6 +66,7 @@
#include "../be_dbgout.h"
#include "../beblocksched.h"
#include "../bemachine.h"
#include "../bespillutil.h"
#include "../bespillslots.h"
#include "../bemodule.h"
#include "../begnuas.h"
......@@ -2235,6 +2236,8 @@ const arch_isa_if_t ia32_isa_if = {
ia32_finish, /* called before codegen */
ia32_emit, /* emit && done */
ia32_register_saved_by,
be_new_spill,
be_new_reload
};
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32)
......
......@@ -59,6 +59,7 @@
#include "../begnuas.h"
#include "../belistsched.h"
#include "../beflags.h"
#include "../beutil.h"
#include "bearch_sparc_t.h"
......@@ -536,6 +537,48 @@ static const lc_opt_table_entry_t sparc_options[] = {
LC_OPT_LAST
};
static ir_node *sparc_new_spill(ir_node *value, ir_node *after)
{
ir_node *block = get_block(after);
ir_graph *irg = get_irn_irg(value);
ir_node *frame = get_irg_frame(irg);
ir_node *mem = get_irg_no_mem(irg);
ir_mode *mode = get_irn_mode(value);
ir_node *store;
if (mode_is_float(mode)) {
store = create_stf(NULL, block, value, frame, mem, mode, NULL, 0, true);
} else {
store = new_bd_sparc_St_imm(NULL, block, value, frame, mem, mode, NULL,
0, true);
}
sched_add_after(after, store);
return store;
}
static ir_node *sparc_new_reload(ir_node *value, ir_node *spill,
ir_node *before)
{
ir_node *block = get_block(before);
ir_graph *irg = get_irn_irg(value);
ir_node *frame = get_irg_frame(irg);
ir_mode *mode = get_irn_mode(value);
ir_node *load;
ir_node *res;
if (mode_is_float(mode)) {
load = create_ldf(NULL, block, frame, spill, mode, NULL, 0, true);
} else {
load = new_bd_sparc_Ld_imm(NULL, block, frame, spill, mode, NULL, 0,
true);
}
sched_add_before(before, load);
assert((long)pn_sparc_Ld_res == (long)pn_sparc_Ldf_res);
res = new_r_Proj(load, mode, pn_sparc_Ld_res);
return res;
}
const arch_isa_if_t sparc_isa_if = {
sparc_init,
sparc_lower_for_target,
......@@ -558,6 +601,8 @@ const arch_isa_if_t sparc_isa_if = {
sparc_finish,
sparc_emit_routine,
NULL, /* register_saved_by */
sparc_new_spill,
sparc_new_reload
};
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_sparc)
......
......@@ -350,89 +350,6 @@ static void register_peephole_optimisation(ir_op *op, peephole_opt_func func)
op->ops.generic = (op_func) func;
}
/**
* transform reload node => load
*/
static void transform_Reload(ir_node *node)
{
ir_node *block = get_nodes_block(node);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *ptr = get_irn_n(node, n_be_Spill_frame);
ir_node *mem = get_irn_n(node, n_be_Reload_mem);
ir_mode *mode = get_irn_mode(node);
ir_entity *entity = be_get_frame_entity(node);
const arch_register_t *reg;
ir_node *proj;
ir_node *load;
ir_node *sched_point = sched_prev(node);
if (mode_is_float(mode)) {
load = create_ldf(dbgi, block, ptr, mem, mode, entity, 0, true);
} else {
load = new_bd_sparc_Ld_imm(dbgi, block, ptr, mem, mode, entity, 0,
true);
}
sched_add_after(sched_point, load);
sched_remove(node);
assert((long)pn_sparc_Ld_res == (long)pn_sparc_Ldf_res);
proj = new_rd_Proj(dbgi, load, mode, pn_sparc_Ld_res);
reg = arch_get_irn_register(node);
arch_set_irn_register(proj, reg);
exchange(node, proj);
}
/**
* transform spill node => store
*/
static void transform_Spill(ir_node *node)
{
ir_node *block = get_nodes_block(node);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *ptr = get_irn_n(node, n_be_Spill_frame);
ir_graph *irg = get_irn_irg(node);
ir_node *mem = get_irg_no_mem(irg);
ir_node *val = get_irn_n(node, n_be_Spill_val);
ir_mode *mode = get_irn_mode(val);
ir_entity *entity = be_get_frame_entity(node);
ir_node *sched_point;
ir_node *store;
sched_point = sched_prev(node);
if (mode_is_float(mode)) {
store = create_stf(dbgi, block, val, ptr, mem, mode, entity, 0, true);
} else {
store = new_bd_sparc_St_imm(dbgi, block, val, ptr, mem, mode, entity, 0,
true);
}
sched_remove(node);
sched_add_after(sched_point, store);
exchange(node, store);
}
/**
* walker to transform be_Spill and be_Reload nodes
*/
static void sparc_after_ra_walker(ir_node *block, void *data)
{
ir_node *node, *prev;
(void) data;
for (node = sched_last(block); !sched_is_begin(node); node = prev) {
prev = sched_prev(node);
if (be_is_Reload(node)) {
transform_Reload(node);
} else if (be_is_Spill(node)) {
transform_Spill(node);
}
}
}
static void sparc_collect_frame_entity_nodes(ir_node *node, void *data)
{
be_fec_env_t *env = (be_fec_env_t*)data;
......@@ -488,8 +405,6 @@ void sparc_finish(ir_graph *irg)
be_assign_entities(fec_env, sparc_set_frame_entity, at_begin);
be_free_frame_entity_coalescer(fec_env);
irg_block_walk_graph(irg, NULL, sparc_after_ra_walker, NULL);
sparc_introduce_prolog_epilog(irg);
/* fix stack entity offsets */
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment