Commit 2f0eeca3 authored by Christoph Mallon's avatar Christoph Mallon
Browse files

be: Add and use macros to iterate only/all nodes except Phis in the schedule.

parent 605f090a
...@@ -199,15 +199,9 @@ static void fix_flags_walker(ir_node *block, void *env) ...@@ -199,15 +199,9 @@ static void fix_flags_walker(ir_node *block, void *env)
ir_node *flags_needed = NULL; ir_node *flags_needed = NULL;
ir_node *flag_consumers = NULL; ir_node *flag_consumers = NULL;
unsigned pn = (unsigned)-1; unsigned pn = (unsigned)-1;
ir_node *place = block; sched_foreach_non_phi_reverse(block, node) {
sched_foreach_reverse(block, node) {
mark_irn_visited(node); mark_irn_visited(node);
if (is_Phi(node)) {
place = node;
break;
}
if (node == flags_needed) { if (node == flags_needed) {
/* all ok */ /* all ok */
flags_needed = NULL; flags_needed = NULL;
...@@ -274,6 +268,7 @@ static void fix_flags_walker(ir_node *block, void *env) ...@@ -274,6 +268,7 @@ static void fix_flags_walker(ir_node *block, void *env)
if (flags_needed != NULL) { if (flags_needed != NULL) {
assert(get_nodes_block(flags_needed) != block); assert(get_nodes_block(flags_needed) != block);
ir_node *const place = be_move_after_schedule_first(block);
rematerialize_or_move(flags_needed, place, flag_consumers, pn, NULL); rematerialize_or_move(flags_needed, place, flag_consumers, pn, NULL);
flags_needed = NULL; flags_needed = NULL;
flag_consumers = NULL; flag_consumers = NULL;
......
...@@ -62,10 +62,7 @@ static unsigned be_compute_block_pressure(ir_node *const block, arch_register_cl ...@@ -62,10 +62,7 @@ static unsigned be_compute_block_pressure(ir_node *const block, arch_register_cl
be_liveness_end_of_block(lv, cls, block, &live_nodes); be_liveness_end_of_block(lv, cls, block, &live_nodes);
unsigned max_live = (unsigned)ir_nodeset_size(&live_nodes); unsigned max_live = (unsigned)ir_nodeset_size(&live_nodes);
sched_foreach_reverse(block, irn) { sched_foreach_non_phi_reverse(block, irn) {
if (is_Phi(irn))
break;
be_liveness_transfer(cls, irn, &live_nodes); be_liveness_transfer(cls, irn, &live_nodes);
unsigned cnt = (unsigned)ir_nodeset_size(&live_nodes); unsigned cnt = (unsigned)ir_nodeset_size(&live_nodes);
max_live = MAX(cnt, max_live); max_live = MAX(cnt, max_live);
......
...@@ -106,10 +106,7 @@ static arch_register_t const *get_free_register(ir_node *const perm, lower_env_t ...@@ -106,10 +106,7 @@ static arch_register_t const *get_free_register(ir_node *const perm, lower_env_t
arch_register_class_t const *cls = arch_get_irn_register(get_irn_n(perm, 0))->cls; arch_register_class_t const *cls = arch_get_irn_register(get_irn_n(perm, 0))->cls;
unsigned *free_regs = (unsigned*)ir_nodehashmap_get(arch_register_t const, &env->live_regs, perm); unsigned *free_regs = (unsigned*)ir_nodehashmap_get(arch_register_t const, &env->live_regs, perm);
sched_foreach_reverse(block, node) { sched_foreach_non_phi_reverse(block, node) {
if (is_Phi(node))
break;
/* If we later implement first the chains and then the cycles /* If we later implement first the chains and then the cycles
of the Perm, we *cannot* regard the Perm's own outputs as of the Perm, we *cannot* regard the Perm's own outputs as
free registers. */ free registers. */
......
...@@ -315,9 +315,7 @@ bool be_can_move_up(ir_heights_t *heights, const ir_node *node, ...@@ -315,9 +315,7 @@ bool be_can_move_up(ir_heights_t *heights, const ir_node *node,
return false; return false;
} }
} }
sched_foreach(succ, phi) { sched_foreach_phi(succ, phi) {
if (!is_Phi(phi))
break;
const arch_register_t *reg = arch_get_irn_register(phi); const arch_register_t *reg = arch_get_irn_register(phi);
const arch_register_req_t *req = arch_get_irn_register_req(phi); const arch_register_req_t *req = arch_get_irn_register_req(phi);
be_foreach_out(node, o) { be_foreach_out(node, o) {
......
...@@ -280,10 +280,7 @@ static void analyze_block(ir_node *block, void *data) ...@@ -280,10 +280,7 @@ static void analyze_block(ir_node *block, void *data)
ir_nodeset_init(&live_nodes); ir_nodeset_init(&live_nodes);
be_liveness_end_of_block(lv, cls, block, &live_nodes); be_liveness_end_of_block(lv, cls, block, &live_nodes);
sched_foreach_reverse(block, node) { sched_foreach_non_phi_reverse(block, node) {
if (is_Phi(node))
break;
be_foreach_definition(node, cls, value, req, be_foreach_definition(node, cls, value, req,
check_defs(&live_nodes, weight, value, req); check_defs(&live_nodes, weight, value, req);
); );
...@@ -370,10 +367,7 @@ static void create_congruence_class(ir_node *block, void *data) ...@@ -370,10 +367,7 @@ static void create_congruence_class(ir_node *block, void *data)
be_liveness_end_of_block(lv, cls, block, &live_nodes); be_liveness_end_of_block(lv, cls, block, &live_nodes);
/* check should be same constraints */ /* check should be same constraints */
sched_foreach_reverse(block, node) { sched_foreach_non_phi_reverse(block, node) {
if (is_Phi(node))
break;
be_foreach_definition(node, cls, value, req, be_foreach_definition(node, cls, value, req,
congruence_def(&live_nodes, value, req); congruence_def(&live_nodes, value, req);
); );
...@@ -381,10 +375,7 @@ static void create_congruence_class(ir_node *block, void *data) ...@@ -381,10 +375,7 @@ static void create_congruence_class(ir_node *block, void *data)
} }
/* check phi congruence classes */ /* check phi congruence classes */
sched_foreach(block, phi) { sched_foreach_phi(block, phi) {
if (!is_Phi(phi))
break;
if (!arch_irn_consider_in_reg_alloc(cls, phi)) if (!arch_irn_consider_in_reg_alloc(cls, phi))
continue; continue;
...@@ -411,11 +402,9 @@ static void create_congruence_class(ir_node *block, void *data) ...@@ -411,11 +402,9 @@ static void create_congruence_class(ir_node *block, void *data)
if (interferes) if (interferes)
continue; continue;
/* any other phi has the same input? */ /* any other phi has the same input? */
sched_foreach(block, phi) { sched_foreach_phi(block, phi) {
ir_node *oop; ir_node *oop;
int oop_idx; int oop_idx;
if (!is_Phi(phi))
break;
if (!arch_irn_consider_in_reg_alloc(cls, phi)) if (!arch_irn_consider_in_reg_alloc(cls, phi))
continue; continue;
oop = get_Phi_pred(phi, i); oop = get_Phi_pred(phi, i);
...@@ -1272,9 +1261,8 @@ static void add_phi_permutations(ir_node *block, int p) ...@@ -1272,9 +1261,8 @@ static void add_phi_permutations(ir_node *block, int p)
} }
/* check phi nodes */ /* check phi nodes */
bool need_permutation = false; bool need_permutation = false;
ir_node *phi = sched_first(block); sched_foreach_phi(block, phi) {
for ( ; is_Phi(phi); phi = sched_next(phi)) {
if (!arch_irn_consider_in_reg_alloc(cls, phi)) if (!arch_irn_consider_in_reg_alloc(cls, phi))
continue; continue;
...@@ -1308,8 +1296,7 @@ static void add_phi_permutations(ir_node *block, int p) ...@@ -1308,8 +1296,7 @@ static void add_phi_permutations(ir_node *block, int p)
} }
/* change phi nodes to use the copied values */ /* change phi nodes to use the copied values */
phi = sched_first(block); sched_foreach_phi(block, phi) {
for ( ; is_Phi(phi); phi = sched_next(phi)) {
if (!arch_irn_consider_in_reg_alloc(cls, phi)) if (!arch_irn_consider_in_reg_alloc(cls, phi))
continue; continue;
...@@ -1386,9 +1373,7 @@ static void assign_phi_registers(ir_node *block) ...@@ -1386,9 +1373,7 @@ static void assign_phi_registers(ir_node *block)
{ {
/* count phi nodes */ /* count phi nodes */
int n_phis = 0; int n_phis = 0;
sched_foreach(block, node) { sched_foreach_phi(block, node) {
if (!is_Phi(node))
break;
if (!arch_irn_consider_in_reg_alloc(cls, node)) if (!arch_irn_consider_in_reg_alloc(cls, node))
continue; continue;
++n_phis; ++n_phis;
...@@ -1401,9 +1386,7 @@ static void assign_phi_registers(ir_node *block) ...@@ -1401,9 +1386,7 @@ static void assign_phi_registers(ir_node *block)
hungarian_problem_t *bp hungarian_problem_t *bp
= hungarian_new(n_phis, n_regs, HUNGARIAN_MATCH_PERFECT); = hungarian_new(n_phis, n_regs, HUNGARIAN_MATCH_PERFECT);
int n = 0; int n = 0;
sched_foreach(block, node) { sched_foreach_phi(block, node) {
if (!is_Phi(node))
break;
if (!arch_irn_consider_in_reg_alloc(cls, node)) if (!arch_irn_consider_in_reg_alloc(cls, node))
continue; continue;
...@@ -1438,9 +1421,7 @@ static void assign_phi_registers(ir_node *block) ...@@ -1438,9 +1421,7 @@ static void assign_phi_registers(ir_node *block)
/* apply results */ /* apply results */
n = 0; n = 0;
sched_foreach(block, node) { sched_foreach_phi(block, node) {
if (!is_Phi(node))
break;
if (!arch_irn_consider_in_reg_alloc(cls, node)) if (!arch_irn_consider_in_reg_alloc(cls, node))
continue; continue;
const arch_register_req_t *req const arch_register_req_t *req
...@@ -1589,12 +1570,8 @@ static void allocate_coalesce_block(ir_node *block, void *data) ...@@ -1589,12 +1570,8 @@ static void allocate_coalesce_block(ir_node *block, void *data)
} }
#endif #endif
/* assign instructions in the block */ /* assign instructions in the block, phis are already assigned */
sched_foreach(block, node) { sched_foreach_non_phi(block, node) {
/* phis are already assigned */
if (is_Phi(node))
continue;
rewire_inputs(node); rewire_inputs(node);
/* enforce use constraints */ /* enforce use constraints */
......
...@@ -178,6 +178,10 @@ static inline bool value_strictly_dominates(const ir_node *a, ...@@ -178,6 +178,10 @@ static inline bool value_strictly_dominates(const ir_node *a,
#define sched_foreach_reverse_before(before, irn) \ #define sched_foreach_reverse_before(before, irn) \
for (ir_node *irn = (before); !sched_is_begin(irn = sched_prev(irn));) for (ir_node *irn = (before); !sched_is_begin(irn = sched_prev(irn));)
#define sched_foreach_non_phi_reverse_before(before, irn) \
for (ir_node *irn = (before); !sched_is_begin(irn = sched_prev(irn));) \
if (is_Phi(irn)) break; else
/** /**
* A shorthand macro for iterating over a schedule. * A shorthand macro for iterating over a schedule.
* @param block The block. * @param block The block.
...@@ -186,6 +190,14 @@ static inline bool value_strictly_dominates(const ir_node *a, ...@@ -186,6 +190,14 @@ static inline bool value_strictly_dominates(const ir_node *a,
#define sched_foreach(block,irn) \ #define sched_foreach(block,irn) \
sched_foreach_after((assert(is_Block(block)), block), irn) sched_foreach_after((assert(is_Block(block)), block), irn)
#define sched_foreach_phi(block, irn) \
sched_foreach(block, irn) \
if (!is_Phi(irn)) break; else
#define sched_foreach_non_phi(block, irn) \
sched_foreach(block, irn) \
if (is_Phi(irn)) continue; else
/** /**
* A shorthand macro for reversely iterating over a schedule. * A shorthand macro for reversely iterating over a schedule.
* @param block The block. * @param block The block.
...@@ -194,6 +206,9 @@ static inline bool value_strictly_dominates(const ir_node *a, ...@@ -194,6 +206,9 @@ static inline bool value_strictly_dominates(const ir_node *a,
#define sched_foreach_reverse(block,irn) \ #define sched_foreach_reverse(block,irn) \
sched_foreach_reverse_before((assert(is_Block(block)), block), irn) sched_foreach_reverse_before((assert(is_Block(block)), block), irn)
#define sched_foreach_non_phi_reverse(block, irn) \
sched_foreach_non_phi_reverse_before((assert(is_Block(block)), block), irn)
/** /**
* A shorthand macro for iterating over a schedule while the current node may be * A shorthand macro for iterating over a schedule while the current node may be
* removed or replaced. * removed or replaced.
......
...@@ -507,9 +507,7 @@ static void decide_start_workset(ir_node *const block) ...@@ -507,9 +507,7 @@ static void decide_start_workset(ir_node *const block)
/* check all Phis first */ /* check all Phis first */
ir_loop *loop = get_irn_loop(block); ir_loop *loop = get_irn_loop(block);
sched_foreach(block, node) { sched_foreach_phi(block, node) {
if (!is_Phi(node))
break;
if (!arch_irn_consider_in_reg_alloc(cls, node)) if (!arch_irn_consider_in_reg_alloc(cls, node))
continue; continue;
...@@ -706,12 +704,10 @@ static void process_block(ir_node *block) ...@@ -706,12 +704,10 @@ static void process_block(ir_node *block)
DB((dbg, DBG_WSETS, "Processing...\n")); DB((dbg, DBG_WSETS, "Processing...\n"));
workset_t *new_vals = temp_workset; workset_t *new_vals = temp_workset;
sched_foreach(block, irn) { sched_foreach_non_phi(block, irn) {
assert(workset_get_length(ws) <= n_regs);
/* Phis are no real instr (see insert_starters()) */ /* Phis are no real instr (see insert_starters()) */
if (is_Phi(irn))
continue; assert(workset_get_length(ws) <= n_regs);
DB((dbg, DBG_DECIDE, " ...%+F\n", irn)); DB((dbg, DBG_DECIDE, " ...%+F\n", irn));
/* allocate all values _used_ by this instruction */ /* allocate all values _used_ by this instruction */
......
...@@ -254,10 +254,7 @@ static void spill_block(ir_node *block, void *data) ...@@ -254,10 +254,7 @@ static void spill_block(ir_node *block, void *data)
/* walk schedule backwards and spill until register pressure is fine at /* walk schedule backwards and spill until register pressure is fine at
* each node */ * each node */
sched_foreach_reverse(block, node) { sched_foreach_non_phi_reverse(block, node) {
if (is_Phi(node))
break;
remove_defs(node, &live_nodes); remove_defs(node, &live_nodes);
do_spilling(&live_nodes, node); do_spilling(&live_nodes, node);
add_uses(node, &live_nodes); add_uses(node, &live_nodes);
...@@ -267,10 +264,7 @@ static void spill_block(ir_node *block, void *data) ...@@ -267,10 +264,7 @@ static void spill_block(ir_node *block, void *data)
* are still there and occupy registers, so we need to count them and might * are still there and occupy registers, so we need to count them and might
* have to spill some of them. */ * have to spill some of them. */
int n_phi_values_spilled = 0; int n_phi_values_spilled = 0;
sched_foreach(block, node) { sched_foreach_phi(block, node) {
if (!is_Phi(node))
break;
if (bitset_is_set(spilled_nodes, get_irn_idx(node))) if (bitset_is_set(spilled_nodes, get_irn_idx(node)))
n_phi_values_spilled += get_value_width(node); n_phi_values_spilled += get_value_width(node);
} }
...@@ -289,9 +283,7 @@ static void spill_block(ir_node *block, void *data) ...@@ -289,9 +283,7 @@ static void spill_block(ir_node *block, void *data)
/* spill as many phis as needed */ /* spill as many phis as needed */
/* TODO: we should really estimate costs of the phi spill as well... /* TODO: we should really estimate costs of the phi spill as well...
* and preferably spill phis with lower costs... */ * and preferably spill phis with lower costs... */
sched_foreach(block, node) { sched_foreach_phi(block, node) {
if (!is_Phi(node))
break;
if (phi_spills_needed <= 0) if (phi_spills_needed <= 0)
break; break;
......
...@@ -184,9 +184,7 @@ static void insert_shuffle_code_walker(ir_node *block, void *data) ...@@ -184,9 +184,7 @@ static void insert_shuffle_code_walker(ir_node *block, void *data)
} }
bool need_perm = false; bool need_perm = false;
for (ir_node *phi = sched_first(block); is_Phi(phi); sched_foreach_phi(block, phi) {
phi = sched_next(phi)) {
if (!arch_irn_consider_in_reg_alloc(cls, phi)) if (!arch_irn_consider_in_reg_alloc(cls, phi))
continue; continue;
......
...@@ -46,14 +46,9 @@ static void check_reg_pressure_class(pressure_walker_env_t *env, ...@@ -46,14 +46,9 @@ static void check_reg_pressure_class(pressure_walker_env_t *env,
unsigned max_live = ir_nodeset_size(&live_nodes); unsigned max_live = ir_nodeset_size(&live_nodes);
env->regpressure += max_live; env->regpressure += max_live;
sched_foreach_reverse(block, irn) { sched_foreach_non_phi_reverse(block, irn) {
size_t cnt;
if (is_Phi(irn))
break;
be_liveness_transfer(cls, irn, &live_nodes); be_liveness_transfer(cls, irn, &live_nodes);
cnt = ir_nodeset_size(&live_nodes); size_t const cnt = ir_nodeset_size(&live_nodes);
max_live = MAX(max_live, cnt); max_live = MAX(max_live, cnt);
env->regpressure += cnt; env->regpressure += cnt;
env->insn_count++; env->insn_count++;
......
...@@ -212,9 +212,7 @@ static block_info_t *compute_block_start_state(minibelady_env_t *env, ...@@ -212,9 +212,7 @@ static block_info_t *compute_block_start_state(minibelady_env_t *env,
bool outer_loop_allowed = true; bool outer_loop_allowed = true;
/* check all Phis first */ /* check all Phis first */
sched_foreach(block, node) { sched_foreach_phi(block, node) {
if (!is_Phi(node))
break;
if (arch_get_irn_register(node) != env->reg) if (arch_get_irn_register(node) != env->reg)
continue; continue;
...@@ -327,10 +325,8 @@ static void belady(minibelady_env_t *env, ir_node *block) ...@@ -327,10 +325,8 @@ static void belady(minibelady_env_t *env, ir_node *block)
/* process the block from start to end */ /* process the block from start to end */
DBG((dbg, LEVEL_3, "Processing...\n")); DBG((dbg, LEVEL_3, "Processing...\n"));
sched_foreach(block, node) { sched_foreach_non_phi(block, node) {
/* Phis are no real instr (see insert_starters()) */ /* Phis are no real instr (see insert_starters()) */
if (is_Phi(node))
continue;
/* check which state is desired for the node */ /* check which state is desired for the node */
ir_node *need_val = NULL; ir_node *need_val = NULL;
......
...@@ -134,11 +134,7 @@ static bool be_is_phi_argument(const ir_node *block, const ir_node *def) ...@@ -134,11 +134,7 @@ static bool be_is_phi_argument(const ir_node *block, const ir_node *def)
/* iterate over the Phi nodes in the successor and check if def is /* iterate over the Phi nodes in the successor and check if def is
* one of its arguments */ * one of its arguments */
const int i = get_edge_src_pos(edge); const int i = get_edge_src_pos(edge);
sched_foreach(succ_block, node) { sched_foreach_phi(succ_block, node) {
/* we can stop the search on the first non-phi node */
if (!is_Phi(node))
break;
const ir_node *arg = get_irn_n(node, i); const ir_node *arg = get_irn_n(node, i);
if (arg == def) if (arg == def)
return true; return true;
......
...@@ -86,10 +86,7 @@ static void verify_liveness_walker(ir_node *block, void *data) ...@@ -86,10 +86,7 @@ static void verify_liveness_walker(ir_node *block, void *data)
env->problem_found = true; env->problem_found = true;
} }
sched_foreach_reverse(block, irn) { sched_foreach_non_phi_reverse(block, irn) {
if (is_Phi(irn))
break;
// print_living_values(stderr, &live_nodes); // print_living_values(stderr, &live_nodes);
be_liveness_transfer(env->cls, irn, &live_nodes); be_liveness_transfer(env->cls, irn, &live_nodes);
......
...@@ -327,8 +327,8 @@ static void peephole_ia32_Return(ir_node *node) ...@@ -327,8 +327,8 @@ static void peephole_ia32_Return(ir_node *node)
return; return;
/* check if this return is the first on the block */ /* check if this return is the first on the block */
sched_foreach_reverse_before(node, irn) { sched_foreach_non_phi_reverse_before(node, irn) {
if (is_Phi(irn) || is_ia32_Start(irn)) if (is_ia32_Start(irn))
continue; continue;
/* arg, IncSP 0 nodes might occur, ignore these */ /* arg, IncSP 0 nodes might occur, ignore these */
if (be_is_IncSP(irn) && be_get_IncSP_offset(irn) == 0) if (be_is_IncSP(irn) && be_get_IncSP_offset(irn) == 0)
......
...@@ -585,12 +585,9 @@ static fp_liveness fp_live_args_after(x87_simulator *sim, const ir_node *pos, ...@@ -585,12 +585,9 @@ static fp_liveness fp_live_args_after(x87_simulator *sim, const ir_node *pos,
static void update_liveness(x87_simulator *sim, ir_node *block) static void update_liveness(x87_simulator *sim, ir_node *block)
{ {
fp_liveness live = fp_liveness_end_of_block(sim, block); fp_liveness live = fp_liveness_end_of_block(sim, block);
/* now iterate through the block backward and cache the results */ /* Now iterate through the block backward and cache the results.
sched_foreach_reverse(block, irn) { * Stop at the first Phi: this produces the live-in. */
/* stop at the first Phi: this produces the live-in */ sched_foreach_non_phi_reverse(block, irn) {
if (is_Phi(irn))
break;
unsigned idx = get_irn_idx(irn); unsigned idx = get_irn_idx(irn);
sim->live[idx] = live; sim->live[idx] = live;
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment