Commit 5057c4b3 authored by Michael Beck's avatar Michael Beck
Browse files

removed SCHEDULE_PROJS ifdefs

[r15032]
parent 638190a7
......@@ -817,7 +817,6 @@ static void pressure(ir_node *block, void *env_ptr)
DBG((dbg, LEVEL_1, "\tinsn: %+F, pressure: %d\n", irn, pressure));
DBG((dbg, LEVEL_2, "\tlive: %B\n", live));
#ifndef SCHEDULE_PROJS
if (get_irn_mode(irn) == mode_T) {
const ir_edge_t *edge;
......@@ -836,7 +835,7 @@ static void pressure(ir_node *block, void *env_ptr)
}
}
}
#endif
/*
* If the node defines some value, which can put into a
* register of the current class, make a border for it.
......
......@@ -121,14 +121,6 @@ be_insn_t *be_scan_insn(const be_insn_env_t *env, ir_node *irn)
pre_colored += arch_get_irn_register(arch_env, p) != NULL;
}
}
#ifdef SCHEDULE_PROJS
/* When Proj's are scheduled, so we need to find the first
non-Proj instruction in the schedule */
for (p = sched_next(irn); is_Proj(p); p = sched_next(p));
insn->next_insn = p;
#endif
} else if (arch_irn_consider_in_reg_alloc(arch_env, env->cls, irn)) {
/* only one def, create one operand */
o.req = arch_get_register_req(arch_env, irn, -1);
......
......@@ -130,9 +130,6 @@ ir_node *insert_Perm_after(be_irg_t *birg,
ir_node *proj = new_r_Proj(irg, bl, perm, mode, i);
arch_set_irn_register(arch_env, proj, reg);
#ifdef SCHEDULE_PROJS
sched_add_after(curr, proj);
#endif
curr = proj;
be_ssa_construction_init(&senv, birg);
......
......@@ -410,48 +410,6 @@ static void add_to_sched(block_sched_env_t *env, ir_node *irn)
make_users_ready(env, irn);
}
#ifdef SCHEDULE_PROJS
/**
* Add the proj nodes of a tuple-mode irn to the schedule immediately
* after the tuple-moded irn. By pinning the projs after the irn, no
* other nodes can create a new lifetime between the tuple-moded irn and
* one of its projs. This should render a realistic image of a
* tuple-moded irn, which in fact models a node which defines multiple
* values.
*
* @param irn The tuple-moded irn.
*/
static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
{
const ir_edge_t *edge;
assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
if (is_Bad(irn))
return;
/* non-proj nodes can have dependency edges to tuple nodes. */
foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
ir_node *out = get_edge_src_irn(edge);
make_ready(env, irn, out);
}
/* schedule the normal projs */
foreach_out_edge(irn, edge) {
ir_node *out = get_edge_src_irn(edge);
assert(is_Proj(out) && "successor of a modeT node must be a proj");
if (get_irn_mode(out) == mode_T) {
add_tuple_projs(env, out);
} else {
add_to_sched(env, out);
}
}
}
#endif
/**
* Perform list scheduling on a block.
*
......@@ -564,11 +522,6 @@ static void list_sched_block(ir_node *block, void *env_ptr)
/* Add the node to the schedule. */
add_to_sched(&be, irn);
#ifdef SCHEDULE_PROJS
if (get_irn_mode(irn) == mode_T)
add_tuple_projs(&be, irn);
#endif
/* remove the scheduled node from the ready list. */
ir_nodeset_remove(&be.cands, irn);
}
......
......@@ -769,7 +769,6 @@ void be_liveness_transfer(const arch_env_t *arch_env,
* function. */
assert(!is_Phi(node) && "liveness_transfer produces invalid results for phi nodes");
#ifndef SCHEDULE_PROJS
if (get_irn_mode(node) == mode_T) {
const ir_edge_t *edge;
......@@ -781,7 +780,6 @@ void be_liveness_transfer(const arch_env_t *arch_env,
}
}
}
#endif
if (arch_irn_consider_in_reg_alloc(arch_env, cls, node)) {
ir_nodeset_remove(nodeset, node);
......
......@@ -350,11 +350,6 @@ static void lower_perm_node(ir_node *irn, void *walk_env) {
set_Proj_proj(pairs[i].out_node, get_Proj_proj(pairs[i].in_node));
}
#ifdef SCHEDULE_PROJS
/* remove the proj from the schedule */
sched_remove(pairs[i].out_node);
#endif
/* reroute the edges from the proj to the argument */
exchange(pairs[i].out_node, pairs[i].in_node);
//edges_reroute(pairs[i].out_node, pairs[i].in_node, env->birg->irg);
......@@ -466,25 +461,12 @@ static void lower_perm_node(ir_node *irn, void *walk_env) {
/* set as in for next Perm */
pairs[pidx].in_node = res1;
}
else {
#ifdef SCHEDULE_PROJS
sched_remove(res1);
#endif
}
#ifdef SCHEDULE_PROJS
sched_remove(res2);
#endif
set_Proj_pred(res2, cpyxchg);
set_Proj_proj(res2, 0);
set_Proj_pred(res1, cpyxchg);
set_Proj_proj(res1, 1);
#ifdef SCHEDULE_PROJS
sched_add_after(sched_point, res1);
sched_add_after(sched_point, res2);
#endif
arch_set_irn_register(arch_env, res2, cycle->elems[i + 1]);
arch_set_irn_register(arch_env, res1, cycle->elems[i]);
......@@ -504,10 +486,6 @@ static void lower_perm_node(ir_node *irn, void *walk_env) {
arch_set_irn_register(arch_env, cpyxchg, cycle->elems[i + 1]);
n_ops++;
#ifdef SCHEDULE_PROJS
/* remove the proj from the schedule */
sched_remove(res2);
#endif
/* exchange copy node and proj */
exchange(res2, cpyxchg);
......@@ -1006,11 +984,6 @@ found_front:
/* reroute all users of the proj to the moved node. */
edges_reroute(proj, move, irg);
#ifdef SCHEDULE_PROJS
/* remove the proj from the schedule. */
sched_remove(proj);
#endif
/* and like it to bad so it is no more in the use array of the perm */
set_Proj_pred(proj, get_irg_bad(irg));
......
......@@ -54,11 +54,7 @@ typedef struct _sched_info_t {
#define _sched_entry(list_head) (list_entry(list_head, sched_info_t, list))
#ifndef SCHEDULE_PROJS
#define get_irn_sched_info(irn) get_irn_data(skip_Proj_const(irn), sched_info_t, sched_irn_data_offset)
#else
#define get_irn_sched_info(irn) get_irn_data(irn, sched_info_t, sched_irn_data_offset)
#endif
#define get_sched_info_irn(sched_info) get_irn_data_base(sched_info, sched_irn_data_offset)
......@@ -96,10 +92,8 @@ static INLINE int to_appear_in_schedule(const ir_node *irn)
case iro_Jmp:
case iro_Break:
return 1;
#ifndef SCHEDULE_PROJS
case iro_Proj:
return 0;
#endif
default:
return is_data_node(irn);
}
......@@ -222,9 +216,7 @@ static INLINE void _sched_add_before(ir_node *before, ir_node *irn)
sched_info_t *info = get_irn_sched_info(irn);
assert(_sched_is_scheduled(before));
assert(!_sched_is_scheduled(irn));
#ifndef SCHEDULE_PROJS
assert(!is_Proj(irn));
#endif
list_add_tail(&info->list, &get_irn_sched_info(before)->list);
_sched_set_time_stamp(irn);
info->scheduled = 1;
......@@ -241,9 +233,7 @@ static INLINE void _sched_add_after(ir_node *after, ir_node *irn)
sched_info_t *info = get_irn_sched_info(irn);
assert(_sched_is_scheduled(after));
assert(!_sched_is_scheduled(irn));
#ifndef SCHEDULE_PROJS
assert(!is_Proj(irn));
#endif
list_add(&info->list, &get_irn_sched_info(after)->list);
_sched_set_time_stamp(irn);
info->scheduled = 1;
......
......@@ -692,15 +692,6 @@ ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader)
DBG((dbg, LEVEL_1, "Insert remat %+F of %+F before reloader %+F\n", res, spilled, reloader));
#ifdef SCHEDULE_PROJS
/* insert in schedule */
sched_reset(res);
sched_add_before(reloader, res);
#ifdef FIRM_STATISTICS
if (! is_Proj(res))
env->remat_count++;
#endif
#else
if (! is_Proj(res)) {
/* insert in schedule */
sched_reset(res);
......@@ -709,7 +700,6 @@ ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader)
env->remat_count++;
#endif
}
#endif /* SCHEDULE_PROJS */
return res;
}
......
......@@ -189,9 +189,6 @@ static void insert_all_perms_walker(ir_node *bl, void *data) {
pp->proj = proj;
assert(get_reg(pp->arg));
set_reg(proj, get_reg(pp->arg));
#ifdef SCHEDULE_PROJS
sched_add_after(insert_after, proj);
#endif
insert_after = proj;
DBG((dbg, LEVEL_2, "Copy register assignment %s from %+F to %+F\n", get_reg(pp->arg)->name, pp->arg, pp->proj));
}
......
......@@ -128,11 +128,9 @@ void dump_allocated_irg(arch_env_t *arch_env, ir_graph *irg, char *suffix)
*/
static int sched_edge_hook(FILE *F, ir_node *irn)
{
#ifndef SCHEDULE_PROJS
if (is_Proj(irn))
return 1;
#endif
if(sched_is_scheduled(irn) && sched_has_prev(irn)) {
if (sched_is_scheduled(irn) && sched_has_prev(irn)) {
ir_node *prev = sched_prev(irn);
fprintf(F, "edge:{sourcename:\"");
PRINT_NODEID(irn);
......
......@@ -252,19 +252,6 @@ static void verify_schedule_walker(ir_node *block, void *data) {
env->problem_found = 1;
}
#ifdef SCHEDULE_PROJS
/* check that all projs/keeps are behind their nodes */
if(is_Proj(node)) {
ir_node *prev = sched_prev(node);
while(is_Proj(prev))
prev = sched_prev(prev);
if(get_Proj_pred(node) != prev) {
ir_fprintf(stderr, "%+F not scheduled after its pred node in block %+F (%s)\n",
node, block, get_irg_dump_name(env->irg));
env->problem_found = 1;
}
}
#endif
if(be_is_Keep(node)) {
/* at least 1 of the keep arguments has to be it schedule
* predecessor */
......
......@@ -998,9 +998,6 @@ static void transform_to_Load(ia32_code_gen_t *cg, ir_node *node) {
if (sched_point) {
sched_add_after(sched_point, new_op);
#ifdef SCHEDULE_PROJS
sched_add_after(new_op, proj);
#endif
sched_remove(node);
}
......
......@@ -119,9 +119,6 @@ static ir_node *create_fpu_mode_reload(void *env, ir_node *state,
sched_add_before(before, load);
load_res = new_r_Proj(irg, block, load, mode_Iu, pn_ia32_Load_res);
#ifdef SCHEDULE_PROJS
sched_add_before(before, load_res);
#endif
/* TODO: make the actual mode configurable in ChangeCW... */
or = new_rd_ia32_Or(NULL, irg, block, noreg, noreg, load_res, noreg,
......
......@@ -186,9 +186,7 @@ static void ia32_create_Pushs(ir_node *irn, ia32_code_gen_t *cg) {
// create stackpointer proj
curr_sp = new_r_Proj(irg, block, push, spmode, pn_ia32_Push_stack);
arch_set_irn_register(cg->arch_env, curr_sp, spreg);
#ifdef SCHEDULE_PROJS
sched_add_before(irn, curr_sp);
#endif
// create memory proj
mem_proj = new_r_Proj(irg, block, push, mode_M, pn_ia32_Push_M);
......@@ -1500,13 +1498,6 @@ static void optimize_am(ir_node *irn, void *env) {
set_Proj_pred(mem_proj, irn);
set_Proj_proj(mem_proj, 1);
#ifdef SCHEDULE_PROJS
if(sched_is_scheduled(irn)) {
sched_add_after(irn, res_proj);
sched_add_after(irn, mem_proj);
}
#endif
}
try_kill(load);
......
......@@ -730,7 +730,6 @@ static vfp_liveness vfp_liveness_transfer(x87_simulator *sim, ir_node *irn, vfp_
const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_vfp];
const arch_env_t *arch_env = sim->arch_env;
#ifndef SCHEDULE_PROJS
if (get_irn_mode(irn) == mode_T) {
const ir_edge_t *edge;
......@@ -743,7 +742,6 @@ static vfp_liveness vfp_liveness_transfer(x87_simulator *sim, ir_node *irn, vfp_
}
}
}
#endif
if (arch_irn_consider_in_reg_alloc(arch_env, cls, irn)) {
const arch_register_t *reg = x87_get_irn_register(sim, irn);
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment