Commit 2f0eeca3 authored by Christoph Mallon's avatar Christoph Mallon
Browse files

be: Add and use macros to iterate only/all nodes except Phis in the schedule.

parent 605f090a
......@@ -199,15 +199,9 @@ static void fix_flags_walker(ir_node *block, void *env)
ir_node *flags_needed = NULL;
ir_node *flag_consumers = NULL;
unsigned pn = (unsigned)-1;
ir_node *place = block;
sched_foreach_reverse(block, node) {
sched_foreach_non_phi_reverse(block, node) {
mark_irn_visited(node);
if (is_Phi(node)) {
place = node;
break;
}
if (node == flags_needed) {
/* all ok */
flags_needed = NULL;
......@@ -274,6 +268,7 @@ static void fix_flags_walker(ir_node *block, void *env)
if (flags_needed != NULL) {
assert(get_nodes_block(flags_needed) != block);
ir_node *const place = be_move_after_schedule_first(block);
rematerialize_or_move(flags_needed, place, flag_consumers, pn, NULL);
flags_needed = NULL;
flag_consumers = NULL;
......
......@@ -62,10 +62,7 @@ static unsigned be_compute_block_pressure(ir_node *const block, arch_register_cl
be_liveness_end_of_block(lv, cls, block, &live_nodes);
unsigned max_live = (unsigned)ir_nodeset_size(&live_nodes);
sched_foreach_reverse(block, irn) {
if (is_Phi(irn))
break;
sched_foreach_non_phi_reverse(block, irn) {
be_liveness_transfer(cls, irn, &live_nodes);
unsigned cnt = (unsigned)ir_nodeset_size(&live_nodes);
max_live = MAX(cnt, max_live);
......
......@@ -106,10 +106,7 @@ static arch_register_t const *get_free_register(ir_node *const perm, lower_env_t
arch_register_class_t const *cls = arch_get_irn_register(get_irn_n(perm, 0))->cls;
unsigned *free_regs = (unsigned*)ir_nodehashmap_get(arch_register_t const, &env->live_regs, perm);
sched_foreach_reverse(block, node) {
if (is_Phi(node))
break;
sched_foreach_non_phi_reverse(block, node) {
/* If we later implement first the chains and then the cycles
of the Perm, we *cannot* regard the Perm's own outputs as
free registers. */
......
......@@ -315,9 +315,7 @@ bool be_can_move_up(ir_heights_t *heights, const ir_node *node,
return false;
}
}
sched_foreach(succ, phi) {
if (!is_Phi(phi))
break;
sched_foreach_phi(succ, phi) {
const arch_register_t *reg = arch_get_irn_register(phi);
const arch_register_req_t *req = arch_get_irn_register_req(phi);
be_foreach_out(node, o) {
......
......@@ -280,10 +280,7 @@ static void analyze_block(ir_node *block, void *data)
ir_nodeset_init(&live_nodes);
be_liveness_end_of_block(lv, cls, block, &live_nodes);
sched_foreach_reverse(block, node) {
if (is_Phi(node))
break;
sched_foreach_non_phi_reverse(block, node) {
be_foreach_definition(node, cls, value, req,
check_defs(&live_nodes, weight, value, req);
);
......@@ -370,10 +367,7 @@ static void create_congruence_class(ir_node *block, void *data)
be_liveness_end_of_block(lv, cls, block, &live_nodes);
/* check should be same constraints */
sched_foreach_reverse(block, node) {
if (is_Phi(node))
break;
sched_foreach_non_phi_reverse(block, node) {
be_foreach_definition(node, cls, value, req,
congruence_def(&live_nodes, value, req);
);
......@@ -381,10 +375,7 @@ static void create_congruence_class(ir_node *block, void *data)
}
/* check phi congruence classes */
sched_foreach(block, phi) {
if (!is_Phi(phi))
break;
sched_foreach_phi(block, phi) {
if (!arch_irn_consider_in_reg_alloc(cls, phi))
continue;
......@@ -411,11 +402,9 @@ static void create_congruence_class(ir_node *block, void *data)
if (interferes)
continue;
/* any other phi has the same input? */
sched_foreach(block, phi) {
sched_foreach_phi(block, phi) {
ir_node *oop;
int oop_idx;
if (!is_Phi(phi))
break;
if (!arch_irn_consider_in_reg_alloc(cls, phi))
continue;
oop = get_Phi_pred(phi, i);
......@@ -1273,8 +1262,7 @@ static void add_phi_permutations(ir_node *block, int p)
/* check phi nodes */
bool need_permutation = false;
ir_node *phi = sched_first(block);
for ( ; is_Phi(phi); phi = sched_next(phi)) {
sched_foreach_phi(block, phi) {
if (!arch_irn_consider_in_reg_alloc(cls, phi))
continue;
......@@ -1308,8 +1296,7 @@ static void add_phi_permutations(ir_node *block, int p)
}
/* change phi nodes to use the copied values */
phi = sched_first(block);
for ( ; is_Phi(phi); phi = sched_next(phi)) {
sched_foreach_phi(block, phi) {
if (!arch_irn_consider_in_reg_alloc(cls, phi))
continue;
......@@ -1386,9 +1373,7 @@ static void assign_phi_registers(ir_node *block)
{
/* count phi nodes */
int n_phis = 0;
sched_foreach(block, node) {
if (!is_Phi(node))
break;
sched_foreach_phi(block, node) {
if (!arch_irn_consider_in_reg_alloc(cls, node))
continue;
++n_phis;
......@@ -1401,9 +1386,7 @@ static void assign_phi_registers(ir_node *block)
hungarian_problem_t *bp
= hungarian_new(n_phis, n_regs, HUNGARIAN_MATCH_PERFECT);
int n = 0;
sched_foreach(block, node) {
if (!is_Phi(node))
break;
sched_foreach_phi(block, node) {
if (!arch_irn_consider_in_reg_alloc(cls, node))
continue;
......@@ -1438,9 +1421,7 @@ static void assign_phi_registers(ir_node *block)
/* apply results */
n = 0;
sched_foreach(block, node) {
if (!is_Phi(node))
break;
sched_foreach_phi(block, node) {
if (!arch_irn_consider_in_reg_alloc(cls, node))
continue;
const arch_register_req_t *req
......@@ -1589,12 +1570,8 @@ static void allocate_coalesce_block(ir_node *block, void *data)
}
#endif
/* assign instructions in the block */
sched_foreach(block, node) {
/* phis are already assigned */
if (is_Phi(node))
continue;
/* assign instructions in the block, phis are already assigned */
sched_foreach_non_phi(block, node) {
rewire_inputs(node);
/* enforce use constraints */
......
......@@ -178,6 +178,10 @@ static inline bool value_strictly_dominates(const ir_node *a,
#define sched_foreach_reverse_before(before, irn) \
for (ir_node *irn = (before); !sched_is_begin(irn = sched_prev(irn));)
#define sched_foreach_non_phi_reverse_before(before, irn) \
for (ir_node *irn = (before); !sched_is_begin(irn = sched_prev(irn));) \
if (is_Phi(irn)) break; else
/**
* A shorthand macro for iterating over a schedule.
* @param block The block.
......@@ -186,6 +190,14 @@ static inline bool value_strictly_dominates(const ir_node *a,
#define sched_foreach(block,irn) \
sched_foreach_after((assert(is_Block(block)), block), irn)
#define sched_foreach_phi(block, irn) \
sched_foreach(block, irn) \
if (!is_Phi(irn)) break; else
#define sched_foreach_non_phi(block, irn) \
sched_foreach(block, irn) \
if (is_Phi(irn)) continue; else
/**
* A shorthand macro for reversely iterating over a schedule.
* @param block The block.
......@@ -194,6 +206,9 @@ static inline bool value_strictly_dominates(const ir_node *a,
#define sched_foreach_reverse(block,irn) \
sched_foreach_reverse_before((assert(is_Block(block)), block), irn)
#define sched_foreach_non_phi_reverse(block, irn) \
sched_foreach_non_phi_reverse_before((assert(is_Block(block)), block), irn)
/**
* A shorthand macro for iterating over a schedule while the current node may be
* removed or replaced.
......
......@@ -507,9 +507,7 @@ static void decide_start_workset(ir_node *const block)
/* check all Phis first */
ir_loop *loop = get_irn_loop(block);
sched_foreach(block, node) {
if (!is_Phi(node))
break;
sched_foreach_phi(block, node) {
if (!arch_irn_consider_in_reg_alloc(cls, node))
continue;
......@@ -706,12 +704,10 @@ static void process_block(ir_node *block)
DB((dbg, DBG_WSETS, "Processing...\n"));
workset_t *new_vals = temp_workset;
sched_foreach(block, irn) {
assert(workset_get_length(ws) <= n_regs);
sched_foreach_non_phi(block, irn) {
/* Phis are no real instr (see insert_starters()) */
if (is_Phi(irn))
continue;
assert(workset_get_length(ws) <= n_regs);
DB((dbg, DBG_DECIDE, " ...%+F\n", irn));
/* allocate all values _used_ by this instruction */
......
......@@ -254,10 +254,7 @@ static void spill_block(ir_node *block, void *data)
/* walk schedule backwards and spill until register pressure is fine at
* each node */
sched_foreach_reverse(block, node) {
if (is_Phi(node))
break;
sched_foreach_non_phi_reverse(block, node) {
remove_defs(node, &live_nodes);
do_spilling(&live_nodes, node);
add_uses(node, &live_nodes);
......@@ -267,10 +264,7 @@ static void spill_block(ir_node *block, void *data)
* are still there and occupy registers, so we need to count them and might
* have to spill some of them. */
int n_phi_values_spilled = 0;
sched_foreach(block, node) {
if (!is_Phi(node))
break;
sched_foreach_phi(block, node) {
if (bitset_is_set(spilled_nodes, get_irn_idx(node)))
n_phi_values_spilled += get_value_width(node);
}
......@@ -289,9 +283,7 @@ static void spill_block(ir_node *block, void *data)
/* spill as many phis as needed */
/* TODO: we should really estimate costs of the phi spill as well...
* and preferably spill phis with lower costs... */
sched_foreach(block, node) {
if (!is_Phi(node))
break;
sched_foreach_phi(block, node) {
if (phi_spills_needed <= 0)
break;
......
......@@ -184,9 +184,7 @@ static void insert_shuffle_code_walker(ir_node *block, void *data)
}
bool need_perm = false;
for (ir_node *phi = sched_first(block); is_Phi(phi);
phi = sched_next(phi)) {
sched_foreach_phi(block, phi) {
if (!arch_irn_consider_in_reg_alloc(cls, phi))
continue;
......
......@@ -46,14 +46,9 @@ static void check_reg_pressure_class(pressure_walker_env_t *env,
unsigned max_live = ir_nodeset_size(&live_nodes);
env->regpressure += max_live;
sched_foreach_reverse(block, irn) {
size_t cnt;
if (is_Phi(irn))
break;
sched_foreach_non_phi_reverse(block, irn) {
be_liveness_transfer(cls, irn, &live_nodes);
cnt = ir_nodeset_size(&live_nodes);
size_t const cnt = ir_nodeset_size(&live_nodes);
max_live = MAX(max_live, cnt);
env->regpressure += cnt;
env->insn_count++;
......
......@@ -212,9 +212,7 @@ static block_info_t *compute_block_start_state(minibelady_env_t *env,
bool outer_loop_allowed = true;
/* check all Phis first */
sched_foreach(block, node) {
if (!is_Phi(node))
break;
sched_foreach_phi(block, node) {
if (arch_get_irn_register(node) != env->reg)
continue;
......@@ -327,10 +325,8 @@ static void belady(minibelady_env_t *env, ir_node *block)
/* process the block from start to end */
DBG((dbg, LEVEL_3, "Processing...\n"));
sched_foreach(block, node) {
sched_foreach_non_phi(block, node) {
/* Phis are no real instr (see insert_starters()) */
if (is_Phi(node))
continue;
/* check which state is desired for the node */
ir_node *need_val = NULL;
......
......@@ -134,11 +134,7 @@ static bool be_is_phi_argument(const ir_node *block, const ir_node *def)
/* iterate over the Phi nodes in the successor and check if def is
* one of its arguments */
const int i = get_edge_src_pos(edge);
sched_foreach(succ_block, node) {
/* we can stop the search on the first non-phi node */
if (!is_Phi(node))
break;
sched_foreach_phi(succ_block, node) {
const ir_node *arg = get_irn_n(node, i);
if (arg == def)
return true;
......
......@@ -86,10 +86,7 @@ static void verify_liveness_walker(ir_node *block, void *data)
env->problem_found = true;
}
sched_foreach_reverse(block, irn) {
if (is_Phi(irn))
break;
sched_foreach_non_phi_reverse(block, irn) {
// print_living_values(stderr, &live_nodes);
be_liveness_transfer(env->cls, irn, &live_nodes);
......
......@@ -327,8 +327,8 @@ static void peephole_ia32_Return(ir_node *node)
return;
/* check if this return is the first on the block */
sched_foreach_reverse_before(node, irn) {
if (is_Phi(irn) || is_ia32_Start(irn))
sched_foreach_non_phi_reverse_before(node, irn) {
if (is_ia32_Start(irn))
continue;
/* arg, IncSP 0 nodes might occur, ignore these */
if (be_is_IncSP(irn) && be_get_IncSP_offset(irn) == 0)
......
......@@ -585,12 +585,9 @@ static fp_liveness fp_live_args_after(x87_simulator *sim, const ir_node *pos,
static void update_liveness(x87_simulator *sim, ir_node *block)
{
fp_liveness live = fp_liveness_end_of_block(sim, block);
/* now iterate through the block backward and cache the results */
sched_foreach_reverse(block, irn) {
/* stop at the first Phi: this produces the live-in */
if (is_Phi(irn))
break;
/* Now iterate through the block backward and cache the results.
* Stop at the first Phi: this produces the live-in. */
sched_foreach_non_phi_reverse(block, irn) {
unsigned idx = get_irn_idx(irn);
sim->live[idx] = live;
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment