Commit 927eac90 authored by Matthias Braun's avatar Matthias Braun
Browse files

Rewrote spillslot handling:

- No need to set strange spillcontexts on spills anymore
- Spillcontexts are now assigned and coalesced in a separate pass after spilling/register allocation of all register classes
- There might be cases where the new code inserts memcopies, these are not implemented in the ia32 backend yet
parent 53091a3e
......@@ -56,6 +56,7 @@
#include "bespillbelady.h"
#include "bespillmorgan.h"
#include "bespillslots.h"
#include "belower.h"
#ifdef WITH_ILP
......@@ -481,8 +482,6 @@ static be_ra_timer_t *be_ra_chordal_main(const be_irg_t *bi)
);
dump(BE_CH_DUMP_SPILL, irg, chordal_env.cls, "-spill", dump_ir_block_graph_sched);
be_compute_spill_offsets(&chordal_env);
//be_coalesce_spillslots(&chordal_env);
check_for_memory_operands(&chordal_env);
be_abi_fix_stack_nodes(bi->abi, chordal_env.lv);
......@@ -578,6 +577,8 @@ static be_ra_timer_t *be_ra_chordal_main(const be_irg_t *bi)
bitset_free(chordal_env.ignore_colors);
}
be_coalesce_spillslots(&chordal_env);
BE_TIMER_PUSH(ra_timer.t_epilog);
dump(BE_CH_DUMP_LOWER, irg, NULL, "-spilloff", dump_ir_block_graph_sched);
......
......@@ -116,7 +116,6 @@ static int start_vm(jni_env_t *env, int argc, char *argv[])
JavaVMInitArgs args;
JavaVMOption *opts;
int result = 0;
long (JNICALL * create_func)(JavaVM **, void **, void *) = find_jvm_symbol(jvm_lib, "JNI_CreateJavaVM");
if(!create_func) {
......@@ -139,7 +138,7 @@ static int start_vm(jni_env_t *env, int argc, char *argv[])
ret = create_func(&env->jvm, (void **) &env->jni, &args);
free(opts);
if(ret != JNI_OK) {
fprintf(stderr, "JNI_CreateJavaVM returned errrocode %d\n" , ret);
fprintf(stderr, "JNI_CreateJavaVM returned errrocode %ld\n" , ret);
return 0;
}
......@@ -195,7 +194,7 @@ static jni_env_t *get_jvm(void)
snprintf(cp_param, sizeof(cp_param), "-Djava.class.path=%s", jar_file);
args[0] = cp_param;
if(!start_vm(&env, sizeof(args) / sizeof(args[0], args), args)) {
if(!start_vm(&env, sizeof(args) / sizeof(args[0]), args)) {
fprintf(stderr, "Couldn't initialize java VM\n");
abort();
}
......
......@@ -48,17 +48,6 @@
static unsigned be_node_tag = FOURCC('B', 'E', 'N', 'O');
#if 0
typedef enum _node_kind_t {
node_kind_spill,
node_kind_reload,
node_kind_perm,
node_kind_copy,
node_kind_kill,
node_kind_last
} node_kind_t;
#endif
typedef enum {
be_req_kind_old_limited,
be_req_kind_negate_old_limited,
......@@ -118,15 +107,16 @@ typedef struct {
ir_type *call_tp; /**< The call type, copied from the original Call node. */
} be_call_attr_t;
/** The be_Spill attribute type. */
typedef struct {
be_frame_attr_t frame_attr;
ir_node *spill_ctx; /**< The node in whose context this spill was introduced. */
} be_spill_attr_t;
be_node_attr_t node_attr;
entity **in_entities;
entity **out_entities;
} be_memperm_attr_t;
ir_op *op_be_Spill;
ir_op *op_be_Reload;
ir_op *op_be_Perm;
ir_op *op_be_MemPerm;
ir_op *op_be_Copy;
ir_op *op_be_Keep;
ir_op *op_be_CopyKeep;
......@@ -204,27 +194,29 @@ void be_node_init(void) {
/* Acquire all needed opcodes. */
beo_base = get_next_ir_opcodes(beo_Last - 1);
op_be_Spill = new_ir_op(beo_base + beo_Spill, "be_Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_spill_attr_t), &be_node_op_ops);
op_be_Reload = new_ir_op(beo_base + beo_Reload, "be_Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
op_be_Perm = new_ir_op(beo_base + beo_Perm, "be_Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_Copy = new_ir_op(beo_base + beo_Copy, "be_Copy", op_pin_state_floats, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_Keep = new_ir_op(beo_base + beo_Keep, "be_Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_CopyKeep = new_ir_op(beo_base + beo_CopyKeep, "be_CopyKeep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_Call = new_ir_op(beo_base + beo_Call, "be_Call", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_call_attr_t), &be_node_op_ops);
op_be_Return = new_ir_op(beo_base + beo_Return, "be_Return", op_pin_state_pinned, X, oparity_variable, 0, sizeof(be_return_attr_t), &be_node_op_ops);
op_be_AddSP = new_ir_op(beo_base + beo_AddSP, "be_AddSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_SetSP = new_ir_op(beo_base + beo_SetSP, "be_SetSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
op_be_IncSP = new_ir_op(beo_base + beo_IncSP, "be_IncSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
op_be_RegParams = new_ir_op(beo_base + beo_RegParams, "be_RegParams", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_StackParam = new_ir_op(beo_base + beo_StackParam, "be_StackParam", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
op_be_FrameAddr = new_ir_op(beo_base + beo_FrameAddr, "be_FrameAddr", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
op_be_FrameLoad = new_ir_op(beo_base + beo_FrameLoad, "be_FrameLoad", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
op_be_FrameStore = new_ir_op(beo_base + beo_FrameStore, "be_FrameStore", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
op_be_Barrier = new_ir_op(beo_base + beo_Barrier, "be_Barrier", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_Spill = new_ir_op(beo_base + beo_Spill, "be_Spill", op_pin_state_mem_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
op_be_Reload = new_ir_op(beo_base + beo_Reload, "be_Reload", op_pin_state_mem_pinned, N, oparity_zero, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
op_be_Perm = new_ir_op(beo_base + beo_Perm, "be_Perm", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_MemPerm = new_ir_op(beo_base + beo_MemPerm, "be_MemPerm", op_pin_state_mem_pinned, N, oparity_variable, 0, sizeof(be_memperm_attr_t), &be_node_op_ops);
op_be_Copy = new_ir_op(beo_base + beo_Copy, "be_Copy", op_pin_state_floats, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_Keep = new_ir_op(beo_base + beo_Keep, "be_Keep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_CopyKeep = new_ir_op(beo_base + beo_CopyKeep, "be_CopyKeep", op_pin_state_pinned, K, oparity_variable, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_Call = new_ir_op(beo_base + beo_Call, "be_Call", op_pin_state_pinned, N, oparity_variable, 0, sizeof(be_call_attr_t), &be_node_op_ops);
op_be_Return = new_ir_op(beo_base + beo_Return, "be_Return", op_pin_state_pinned, X, oparity_variable, 0, sizeof(be_return_attr_t), &be_node_op_ops);
op_be_AddSP = new_ir_op(beo_base + beo_AddSP, "be_AddSP", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_SetSP = new_ir_op(beo_base + beo_SetSP, "be_SetSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
op_be_IncSP = new_ir_op(beo_base + beo_IncSP, "be_IncSP", op_pin_state_pinned, N, oparity_binary, 0, sizeof(be_stack_attr_t), &be_node_op_ops);
op_be_RegParams = new_ir_op(beo_base + beo_RegParams, "be_RegParams", op_pin_state_pinned, N, oparity_zero, 0, sizeof(be_node_attr_t), &be_node_op_ops);
op_be_StackParam = new_ir_op(beo_base + beo_StackParam, "be_StackParam", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
op_be_FrameAddr = new_ir_op(beo_base + beo_FrameAddr, "be_FrameAddr", op_pin_state_pinned, N, oparity_unary, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
op_be_FrameLoad = new_ir_op(beo_base + beo_FrameLoad, "be_FrameLoad", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
op_be_FrameStore = new_ir_op(beo_base + beo_FrameStore, "be_FrameStore", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_frame_attr_t), &be_node_op_ops);
op_be_Barrier = new_ir_op(beo_base + beo_Barrier, "be_Barrier", op_pin_state_pinned, N, oparity_any, 0, sizeof(be_node_attr_t), &be_node_op_ops);
set_op_tag(op_be_Spill, &be_node_tag);
set_op_tag(op_be_Reload, &be_node_tag);
set_op_tag(op_be_Perm, &be_node_tag);
set_op_tag(op_be_MemPerm, &be_node_tag);
set_op_tag(op_be_Copy, &be_node_tag);
set_op_tag(op_be_Keep, &be_node_tag);
set_op_tag(op_be_CopyKeep, &be_node_tag);
......@@ -313,9 +305,9 @@ be_node_set_irn_reg(const void *_self, ir_node *irn, const arch_register_t *reg)
}
ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *to_spill, ir_node *ctx)
ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *to_spill)
{
be_spill_attr_t *a;
be_frame_attr_t *a;
ir_node *in[2];
ir_node *res;
......@@ -323,9 +315,8 @@ ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_clas
in[1] = to_spill;
res = new_ir_node(NULL, irg, bl, op_be_Spill, mode_M, 2, in);
a = init_node_attr(res, 2);
a->frame_attr.ent = NULL;
a->frame_attr.offset = 0;
a->spill_ctx = ctx;
a->ent = NULL;
a->offset = 0;
be_node_set_reg_class(res, 0, cls_frame);
be_node_set_reg_class(res, 1, cls);
......@@ -371,6 +362,31 @@ ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *b
return irn;
}
ir_node *be_new_MemPerm(const arch_env_t *arch_env, ir_graph *irg, ir_node *bl, int n, ir_node *in[])
{
int i;
ir_node *frame = get_irg_frame(irg);
const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
ir_node *irn = new_ir_node(NULL, irg, bl, op_be_MemPerm, mode_T, n, in);
be_memperm_attr_t *attr;
init_node_attr(irn, n);
for(i = 0; i < n; ++i) {
be_node_set_reg_class(irn, i, cls_frame);
be_node_set_reg_class(irn, OUT_POS(i), cls_frame);
}
attr = get_irn_attr(irn);
attr->in_entities = obstack_alloc(irg->obst, n*sizeof(attr->in_entities[0]));
memset(attr->in_entities, 0, n*sizeof(attr->in_entities[0]));
attr->out_entities = obstack_alloc(irg->obst, n*sizeof(attr->out_entities[0]));
memset(attr->out_entities, 0, n*sizeof(attr->out_entities[0]));
return irn;
}
ir_node *be_new_Copy(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, ir_node *op)
{
ir_node *in[1];
......@@ -665,6 +681,7 @@ int be_is_Reload (const ir_node *irn) { return be_get_irn_opcode(irn) ==
int be_is_Copy (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Copy ; }
int be_is_CopyKeep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_CopyKeep ; }
int be_is_Perm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Perm ; }
int be_is_MemPerm (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_MemPerm ; }
int be_is_Keep (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Keep ; }
int be_is_Call (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Call ; }
int be_is_Return (const ir_node *irn) { return be_get_irn_opcode(irn) == beo_Return ; }
......@@ -702,6 +719,56 @@ entity *be_get_frame_entity(const ir_node *irn)
return NULL;
}
void be_set_frame_entity(const ir_node *irn, entity* ent)
{
be_frame_attr_t *a;
assert(be_has_frame_entity(irn));
a = get_irn_attr(irn);
a->ent = ent;
}
void be_set_MemPerm_in_entity(const ir_node *irn, int n, entity *ent)
{
be_memperm_attr_t *attr = get_irn_attr(irn);
assert(be_is_MemPerm(irn));
assert(n < get_irn_arity(irn));
attr->in_entities[n] = ent;
}
entity* be_get_MemPerm_in_entity(const ir_node* irn, int n)
{
be_memperm_attr_t *attr = get_irn_attr(irn);
assert(be_is_MemPerm(irn));
assert(n < get_irn_arity(irn));
return attr->in_entities[n];
}
void be_set_MemPerm_out_entity(const ir_node *irn, int n, entity *ent)
{
be_memperm_attr_t *attr = get_irn_attr(irn);
assert(be_is_MemPerm(irn));
assert(n < get_irn_arity(irn));
attr->out_entities[n] = ent;
}
entity* be_get_MemPerm_out_entity(const ir_node* irn, int n)
{
be_memperm_attr_t *attr = get_irn_attr(irn);
assert(be_is_MemPerm(irn));
assert(n < get_irn_arity(irn));
return attr->out_entities[n];
}
static void be_limited(void *data, bitset_t *bs)
{
be_req_t *req = data;
......@@ -825,145 +892,18 @@ be_stack_dir_t be_get_IncSP_direction(const ir_node *irn)
return a->dir;
}
void be_set_Spill_entity(ir_node *irn, entity *ent)
{
be_spill_attr_t *a = get_irn_attr(irn);
assert(be_is_Spill(irn));
a->frame_attr.ent = ent;
}
void be_set_Spill_context(ir_node *irn, ir_node *ctx)
{
be_spill_attr_t *a = get_irn_attr(irn);
assert(be_is_Spill(irn));
a->spill_ctx = ctx;
}
static ir_node *find_a_spill_walker(ir_node *irn, unsigned visited_nr)
{
unsigned nr = get_irn_visited(irn);
set_irn_visited(irn, visited_nr);
if(is_Phi(irn)) {
int i, n;
if(nr < visited_nr) {
for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
ir_node *n = find_a_spill_walker(get_irn_n(irn, i), visited_nr);
if(n != NULL)
return n;
}
}
}
else if(be_get_irn_opcode(irn) == beo_Spill)
return irn;
return NULL;
}
ir_node *be_get_Spill_context(const ir_node *irn) {
const be_spill_attr_t *a = get_irn_attr(irn);
assert(be_is_Spill(irn));
return a->spill_ctx;
}
/**
* Finds a spill for a reload.
* If the reload is directly using the spill, this is simple,
* else we perform DFS from the reload (over all PhiMs) and return
* the first spill node we find.
*/
static INLINE ir_node *find_a_spill(const ir_node *irn)
{
ir_graph *irg = get_irn_irg(irn);
unsigned visited_nr = get_irg_visited(irg) + 1;
assert(be_is_Reload(irn));
set_irg_visited(irg, visited_nr);
return find_a_spill_walker(be_get_Reload_mem(irn), visited_nr);
}
entity *be_get_spill_entity(const ir_node *irn)
{
switch(be_get_irn_opcode(irn)) {
case beo_Reload:
{
ir_node *spill = find_a_spill(irn);
return be_get_spill_entity(spill);
}
case beo_Spill:
{
be_spill_attr_t *a = get_irn_attr(irn);
return a->frame_attr.ent;
}
default:
assert(0 && "Must give spill/reload node");
break;
}
return NULL;
}
static void link_reload_walker(ir_node *irn, void *data)
{
ir_node **root = (ir_node **) data;
if(be_is_Reload(irn)) {
set_irn_link(irn, *root);
*root = irn;
}
}
void be_copy_entities_to_reloads(ir_graph *irg)
{
ir_node *irn = NULL;
irg_walk_graph(irg, link_reload_walker, NULL, (void *) &irn);
while(irn) {
be_frame_attr_t *a = get_irn_attr(irn);
entity *ent = be_get_spill_entity(irn);
a->ent = ent;
irn = get_irn_link(irn);
}
}
ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn, ir_node *ctx)
ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn)
{
ir_node *bl = get_nodes_block(irn);
ir_graph *irg = get_irn_irg(bl);
ir_node *frame = get_irg_frame(irg);
ir_node *insert = bl;
ir_node *spill;
const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, irn, -1);
const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn, ctx);
return spill;
#if 0
/*
* search the right insertion point. a spill of a phi cannot be put
* directly after the phi, if there are some phis behind the one which
* is spilled. Also, a spill of a Proj must be after all Projs of the
* same tuple node.
*
* Here's one special case:
* If the spill is in the start block, the spill must be after the frame
* pointer is set up. This is done by setting insert to the end of the block
* which is its default initialization (see above).
*/
insert = sched_next(irn);
if(insert != bl && bl == get_irg_start_block(irg) && sched_get_time_step(frame) >= sched_get_time_step(insert))
insert = sched_next(frame);
while((is_Phi(insert) || is_Proj(insert)) && !sched_is_end(insert))
insert = sched_next(insert);
sched_add_before(insert, spill);
spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn);
return spill;
#endif
}
ir_node *be_reload(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_node *insert, ir_mode *mode, ir_node *spill)
......@@ -1303,7 +1243,6 @@ void be_phi_handler_reset(arch_irn_handler_t *handler)
h->regs = pmap_create();
}
/*
_ _ _ ____ _
| \ | | ___ __| | ___ | _ \ _ _ _ __ ___ _ __ (_)_ __ __ _
......@@ -1409,13 +1348,6 @@ static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason)
}
switch(be_get_irn_opcode(irn)) {
case beo_Spill:
{
be_spill_attr_t *a = (be_spill_attr_t *) at;
ir_fprintf(f, "spill context: %+F\n", a->spill_ctx);
}
break;
case beo_IncSP:
{
be_stack_attr_t *a = (be_stack_attr_t *) at;
......
......@@ -29,6 +29,7 @@
extern ir_op *op_be_Spill;
extern ir_op *op_be_Reload;
extern ir_op *op_be_Perm;
extern ir_op *op_be_MemPerm;
extern ir_op *op_be_Copy;
extern ir_op *op_be_Keep;
extern ir_op *op_be_CopyKeep;
......@@ -49,6 +50,7 @@ typedef enum {
beo_Spill,
beo_Reload,
beo_Perm,
beo_MemPerm,
beo_Copy,
beo_Keep,
beo_CopyKeep,
......@@ -106,7 +108,7 @@ enum {
/**
* Make a new Spill node.
*/
ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *node_to_spill, ir_node *ctx);
ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame, ir_graph *irg, ir_node *bl, ir_node *frame, ir_node *node_to_spill);
/**
* Position numbers for the be_Reload inputs.
......@@ -141,6 +143,7 @@ void be_set_Copy_op(ir_node *cpy, ir_node *op);
* Make a new Perm node.
*/
ir_node *be_new_Perm(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int arity, ir_node *in[]);
ir_node *be_new_MemPerm(const arch_env_t *arch_env, ir_graph *irg, ir_node *bl, int n, ir_node *in[]);
ir_node *be_new_Keep(const arch_register_class_t *cls, ir_graph *irg, ir_node *bl, int arity, ir_node *in[]);
ir_node *be_new_FrameLoad(const arch_register_class_t *cls_frame, const arch_register_class_t *cls_data,
......@@ -280,7 +283,7 @@ ir_node *be_new_Barrier(ir_graph *irg, ir_node *bl, int n, ir_node *in[]);
* @param spill_ctx The context in which the spill is introduced (This is mostly == irn up to the case of Phis).
* @return The new spill node.
*/
ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn, ir_node *spill_ctx);
ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn);
/**
* Make a reload and insert it into the schedule.
......@@ -313,6 +316,7 @@ int be_is_Spill(const ir_node *irn);
int be_is_Reload(const ir_node *irn);
int be_is_Copy(const ir_node *irn);
int be_is_Perm(const ir_node *irn);
int be_is_MemPerm(const ir_node *irn);
int be_is_Keep(const ir_node *irn);
int be_is_CopyKeep(const ir_node *irn);
int be_is_Call(const ir_node *irn);
......@@ -336,21 +340,16 @@ int be_is_Barrier(const ir_node *irn);
*/
entity *be_get_frame_entity(const ir_node *irn);
void be_set_Spill_entity(ir_node *irn, entity *ent);
entity *be_get_spill_entity(const ir_node *irn);
void be_set_Spill_context(ir_node *irn, ir_node *ctx);
ir_node *be_get_Spill_context(const ir_node *irn);
void be_set_frame_entity(const ir_node *irn, entity* ent);
ir_node* be_get_Reload_mem(const ir_node *irn);
ir_node* be_get_Reload_frame(const ir_node* irn);
/**
* Set the entities of a Reload to the ones of the Spill it is pointing to.
* @param irg The graph.
*/
void be_copy_entities_to_reloads(ir_graph *irg);
void be_set_MemPerm_in_entity(const ir_node *irn, int n, entity* ent);
entity *be_get_MemPerm_in_entity(const ir_node *irn, int n);
void be_set_MemPerm_out_entity(const ir_node *irn, int n, entity* ent);
entity *be_get_MemPerm_out_entity(const ir_node *irn, int n);
/**
* Impose a register constraint on a backend node.
......
......@@ -528,7 +528,7 @@ static INLINE void var_add_spills_and_reloads(be_raext_env_t *raenv, int var_nr)
/* all ordinary nodes must be spilled */
DBG((raenv->dbg, LEVEL_2, " spilling %+F\n", irn));
spill = be_spill(raenv->aenv, irn, ctx);
spill = be_spill(raenv->aenv, irn);
/* remember the spill */
pset_insert_ptr(spills, spill);
......
This diff is collapsed.
/**
/*
* Author: Daniel Grund, Sebastian Hack
* Date: 29.09.2005
* Copyright: (c) Universitaet Karlsruhe
* Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
*/
#ifndef BESPILL_H_
#define BESPILL_H_
......@@ -45,18 +44,6 @@ void be_insert_spills_reloads(spill_env_t *senv);
*/
void be_spill_phi(spill_env_t *env, ir_node *node);
/**
* Places the necessary copies for the spilled phis in the graph
* This has to be done once before be_insert_spill_reloads, after
* all phis to spill have been marked with be_spill_phi.
*/
void be_place_copies(spill_env_t *env);
/**
* Computes the spill offsets for all spill nodes in the irg
*/
void be_compute_spill_offsets(be_chordal_env_t *cenv);
/**
* Sets the debug module of a spill environment.
*/
......
......@@ -314,10 +314,10 @@ static void displace(belady_env_t *env, workset_t *new_vals, int is_usage) {
static void belady(ir_node *blk, void *env);
/*
* Computes set of live-ins for each block with multiple predecessors and
* places copies in the predecessors when phis get spilled
* Computes set of live-ins for each block with multiple predecessors
* and notifies spill algorithm which phis need to be spilled
*/
static void place_copy_walker(ir_node *block, void *data) {
static void spill_phi_walker(ir_node *block, void *data) {
belady_env_t *env = data;
block_info_t *block_info;
ir_node *first, *irn;
......@@ -580,8 +580,7 @@ void be_spill_belady_spill_env(const be_chordal_env_t *chordal_env, spill_env_t
be_clear_links(chordal_env->irg);
/* Decide which phi nodes will be spilled and place copies for them into the graph */
irg_block_walk_graph(chordal_env->irg, place_copy_walker, NULL, &env);
be_place_copies(env.senv);
irg_block_walk_graph(chordal_env->irg, spill_phi_walker, NULL, &env);
/* Fix high register pressure with belady algorithm */
irg_block_walk_graph(chordal_env->irg, NULL, belady, &env);
/* belady was block-local, fix the global flow by adding reloads on the edges */
......
......@@ -502,7 +502,7 @@ static int reduce_register_pressure_in_loop(morgan_env_t *env, const ir_loop *lo
return outer_spills_needed;
}
void be_spill_morgan(const be_chordal_env_t *chordal_env) {
void be_spill_morgan(be_chordal_env_t *chordal_env) {
morgan_env_t env;
FIRM_DBG_REGISTER(dbg, "ir.be.spillmorgan");
......@@ -528,6 +528,9 @@ void be_spill_morgan(const be_chordal_env_t *chordal_env) {
/* construct control flow loop tree */
construct_cf_backedges(chordal_env->irg);
//dump_looptree(0, get_irg_loop(env.irg));
//dump_execfreqs(env.irg);
/* construct loop out edges and livethrough_unused sets for loops and blocks */
irg_block_walk_graph(chordal_env->irg, NULL, construct_loop_edges, &env);
construct_loop_livethrough_unused(&env, get_irg_loop(env.irg));
......@@ -539,8 +542,6 @@ void be_spill_morgan(const be_chordal_env_t *chordal_env) {
*/
reduce_register_pressure_in_loop(&env, get_irg_loop(env.irg), 0);
/* Place copies for spilled phis */
be_place_copies(env.senv);
/* Insert real spill/reload nodes and fix usages */
be_insert_spills_reloads(env.senv);
......
......@@ -10,6 +10,6 @@
#include "be_t.h"
#include "bechordal.h"
void be_spill_morgan(const be_chordal_env_t *env);
void be_spill_morgan(be_chordal_env_t *env);
#endif
......@@ -3053,7 +3053,7 @@ connect_all_spills_with_keep(spill_ilp_t * si)
}