Commit 0ea8b727 authored by Christoph Mallon's avatar Christoph Mallon
Browse files

Add and use foreach_irn_pred{,_r}().

parent cc281f88
......@@ -56,11 +56,9 @@ static void *irg_cfg_pred_get_end(void *self)
static void irg_cfg_pred_grow_succs(void *self, void *node, struct obstack *obst)
{
int i, n;
(void) self;
for (i = 0, n = get_irn_arity((ir_node*) node); i < n; ++i) {
obstack_ptr_grow(obst, get_irn_n((ir_node*) node, i));
foreach_irn_in((ir_node*)node, i, pred) {
obstack_ptr_grow(obst, pred);
}
}
......
......@@ -367,8 +367,7 @@ static void free_ana_walker(ir_node *node, void *env)
/* other nodes: Alle anderen Knoten nehmen wir als Verr�ter an, bis
* jemand das Gegenteil implementiert. */
set_irn_link(node, MARK);
for (int i = get_irn_arity(node) - 1; i >= 0; --i) {
ir_node *pred = get_irn_n(node, i);
foreach_irn_in_r(node, i, pred) {
if (mode_is_reference(get_irn_mode(pred))) {
free_mark(pred, set);
}
......
......@@ -267,12 +267,8 @@ undefined:
}
}
} else {
int const arity = get_irn_arity(irn);
int i;
/* Undefined if any input is undefined. */
for (i = 0; i != arity; ++i) {
ir_node* const pred = get_irn_n(irn, i);
foreach_irn_in(irn, i, pred) {
bitinfo* const pred_b = get_bitinfo(pred);
if (pred_b != NULL && is_undefined(pred_b))
goto undefined;
......
......@@ -20,6 +20,7 @@
* be useful when revising this code.
*/
#include "debug.h"
#include "irnode_t.h"
#include "tv.h"
#include "irtypes.h"
#include "pdeq.h"
......@@ -98,8 +99,9 @@ static void dca_transfer(ir_node *irn, pdeq *q)
return;
case iro_Jmp:
default:
for (int i = 0; i < get_irn_arity(irn); i++)
care_for(get_irn_n(irn, i), 0, q);
foreach_irn_in(irn, i, pred) {
care_for(pred, 0, q);
}
care_for(get_nodes_block(irn), 0, q);
return;
......@@ -259,14 +261,16 @@ static void dca_transfer(ir_node *irn, pdeq *q)
}
if (mode == mode_M || mode == mode_T) {
for (int i = 0; i < get_irn_arity(irn); i++)
care_for(get_irn_n(irn, i), care, q);
foreach_irn_in(irn, i, pred) {
care_for(pred, care, q);
}
return;
}
/* Assume worst case on other nodes */
for (int i = 0; i < get_irn_arity(irn); i++)
care_for(get_irn_n(irn, i), 0, q);
foreach_irn_in(irn, i, pred) {
care_for(pred, 0, q);
}
}
static void dca_init_node(ir_node *n, void *data)
......@@ -318,8 +322,7 @@ void dca_add_fuzz(ir_node *node, void *data)
if (is_Eor(node)) return;
for (int i = 0; i < get_irn_arity(node); i++) {
ir_node *pred = get_irn_n(node, i);
foreach_irn_in(node, i, pred) {
ir_mode *pred_mode = get_irn_mode(pred);
ir_tarval *dc = get_irn_link(pred);
......
......@@ -438,10 +438,7 @@ static void init_tmp_pdom_info(ir_node *block, tmp_dom_info *parent,
are really edges to endless loops. */
const ir_graph *irg = get_irn_irg(block);
if (block == get_irg_end_block(irg)) {
const ir_node *end = get_irg_end(irg);
for (int i = get_irn_arity(end) - 1; i >= 0; --i) {
ir_node *pred = get_irn_n(end, i);
foreach_irn_in_r(get_irg_end(irg), i, pred) {
if (is_Block(pred))
init_tmp_pdom_info(pred, tdi, tdi_list, used, n_blocks);
}
......@@ -537,9 +534,7 @@ void compute_doms(ir_graph *irg)
/* handle keep-alives if we are at the end block */
if (block == get_irg_end_block(irg)) {
const ir_node *end = get_irg_end(irg);
for (int j = 0, arity = get_irn_arity(end); j < arity; j++) {
const ir_node *pred = get_irn_n(end, j);
foreach_irn_in(get_irg_end(irg), j, pred) {
if (!is_Block(pred) || get_Block_dom_pre_num(pred) == -1)
continue; /* unreachable */
......
......@@ -1064,10 +1064,7 @@ static void check_initializer_value(ir_node *value)
set_entity_usage(ent, ir_usage_unknown);
}
int arity = get_irn_arity(value);
for (int i = 0; i < arity; i++) {
ir_node *op = get_irn_n(value, i);
foreach_irn_in(value, i, op) {
check_initializer_value(op);
}
}
......
......@@ -216,19 +216,17 @@ static void arch_dump_register_req(FILE *const F, arch_register_req_t const *con
void arch_dump_reqs_and_registers(FILE *F, const ir_node *node)
{
backend_info_t *const info = be_get_info(node);
int const n_ins = get_irn_arity(node);
backend_info_t *const info = be_get_info(node);
/* don't fail on invalid graphs */
if (!info || (!info->in_reqs && n_ins != 0) || !info->out_infos) {
if (!info || (!info->in_reqs && get_irn_arity(node) != 0) || !info->out_infos) {
fprintf(F, "invalid register requirements!!!\n");
return;
}
for (int i = 0; i < n_ins; ++i) {
foreach_irn_in(node, i, op) {
const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
fprintf(F, "inreq #%d = ", i);
arch_dump_register_req(F, req);
ir_node *const op = get_irn_n(node, i);
arch_register_t const *const reg = be_get_info(skip_Proj_const(op))->out_infos ? arch_get_irn_register(op) : NULL;
fprintf(F, " [%s]\n", reg ? reg->name : "n/a");
}
......
......@@ -147,8 +147,7 @@ ir_node *pre_process_constraints(be_chordal_env_t *env, be_insn_t **the_insn)
/* Copy the input constraints of the irn to the Perm as output
* constraints. Succeeding phases (coalescing) will need that. */
for (int i = 0, n = get_irn_arity(irn); i != n; ++i) {
ir_node *const proj = get_irn_n(irn, i);
foreach_irn_in(irn, i, proj) {
/* Note that the predecessor is not necessarily a Proj of the Perm,
* since ignore-nodes are not Perm'ed. */
if (!is_Proj(proj) || get_Proj_pred(proj) != perm)
......
......@@ -389,18 +389,15 @@ static void co_collect_units(ir_node *irn, void *env)
be_lv_t *const lv = be_get_irg_liveness(co->irg);
/* Phi with some/all of its arguments */
if (is_Phi(irn)) {
int i, arity;
/* init */
arity = get_irn_arity(irn);
int const arity = get_irn_arity(irn);
unit->nodes = XMALLOCN(ir_node*, arity + 1);
unit->costs = XMALLOCN(int, arity + 1);
unit->nodes[0] = irn;
/* fill */
for (i=0; i<arity; ++i) {
foreach_irn_in(irn, i, arg) {
int o, arg_pos;
ir_node *arg = get_irn_n(irn, i);
assert(arch_get_irn_reg_class(arg) == co->cls && "Argument not in same register class.");
if (arg == irn)
......@@ -762,7 +759,6 @@ static void build_graph_walker(ir_node *irn, void *env)
{
const arch_register_req_t *req;
copy_opt_t *co = (copy_opt_t*)env;
int pos, max;
if (get_irn_mode(irn) == mode_T)
return;
......@@ -771,8 +767,7 @@ static void build_graph_walker(ir_node *irn, void *env)
return;
if (is_Phi(irn)) { /* Phis */
for (pos=0, max=get_irn_arity(irn); pos<max; ++pos) {
ir_node *arg = get_irn_n(irn, pos);
foreach_irn_in(irn, pos, arg) {
add_edges(co, irn, arg, co->get_costs(irn, pos));
}
} else if (is_Perm_Proj(irn)) { /* Perms */
......
......@@ -148,12 +148,8 @@ static void rematerialize_or_move(ir_node *flags_needed, ir_node *node,
n = flag_consumers;
do {
int i;
int arity = get_irn_arity(n);
for (i = 0; i < arity; ++i) {
ir_node *in = get_irn_n(n, i);
in = skip_Proj(in);
if (in == flags_needed) {
foreach_irn_in(n, i, in) {
if (skip_Proj(in) == flags_needed) {
set_irn_n(n, i, value);
break;
}
......@@ -167,11 +163,9 @@ static void rematerialize_or_move(ir_node *flags_needed, ir_node *node,
get_nodes_block(node) != get_nodes_block(flags_needed)) {
ir_graph *irg = get_irn_irg(node);
be_lv_t *lv = be_get_irg_liveness(irg);
int i;
if (lv != NULL) {
for (i = get_irn_arity(copy) - 1; i >= 0; --i) {
be_liveness_update(lv, get_irn_n(copy, i));
foreach_irn_in_r(copy, i, pred) {
be_liveness_update(lv, pred);
}
}
}
......@@ -193,7 +187,6 @@ static void fix_flags_walker(ir_node *block, void *env)
ir_node *place = block;
sched_foreach_reverse(block, node) {
int i, arity;
ir_node *new_flags_needed = NULL;
ir_node *test;
......@@ -221,13 +214,11 @@ static void fix_flags_walker(ir_node *block, void *env)
}
/* test whether the current node needs flags */
arity = get_irn_arity(node);
for (i = 0; i < arity; ++i) {
const arch_register_req_t *req
= arch_get_irn_register_req_in(node, i);
foreach_irn_in(node, i, pred) {
const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
if (req->cls == flag_class) {
assert(new_flags_needed == NULL);
new_flags_needed = get_irn_n(node, i);
new_flags_needed = pred;
}
}
......
......@@ -229,18 +229,12 @@ check_preds:
/* removes basic blocks that just contain a jump instruction */
int be_remove_empty_blocks(ir_graph *irg)
{
ir_node *end;
int i, arity;
blocks_removed = 0;
ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
inc_irg_visited(irg);
remove_empty_block(get_irg_end_block(irg));
end = get_irg_end(irg);
arity = get_irn_arity(end);
for (i = 0; i < arity; ++i) {
ir_node *pred = get_irn_n(end, i);
foreach_irn_in(get_irg_end(irg), i, pred) {
if (!is_Block(pred))
continue;
remove_empty_block(pred);
......
......@@ -144,8 +144,7 @@ static arch_register_t const *get_free_register(ir_node *const perm, lower_env_t
set_reg_free(free_regs, node, reg_is_free);
}
for (int i = 0, max = get_irn_arity(node); i < max; ++i) {
ir_node *const in = get_irn_n(node, i);
foreach_irn_in(node, i, in) {
set_reg_free(free_regs, in, false);
}
......
......@@ -238,13 +238,9 @@ static void create_affinity_edges(ir_node *irn, void *env)
be_pbqp_alloc_env_t *pbqp_alloc_env = (be_pbqp_alloc_env_t*)env;
const arch_register_class_t *cls = pbqp_alloc_env->cls;
const arch_register_req_t *req = arch_get_irn_register_req(irn);
unsigned pos;
unsigned max;
if (is_Phi(irn)) { /* Phis */
for (pos = 0, max = get_irn_arity(irn); pos < max; ++pos) {
ir_node *arg = get_irn_n(irn, pos);
foreach_irn_in(irn, pos, arg) {
if (!arch_irn_consider_in_reg_alloc(cls, arg))
continue;
......
......@@ -83,12 +83,8 @@ static void clear_defs(ir_node *node)
static void set_uses(ir_node *node)
{
int i, arity;
/* set values used */
arity = get_irn_arity(node);
for (i = 0; i < arity; ++i) {
ir_node *in = get_irn_n(node, i);
foreach_irn_in(node, i, in) {
set_reg_value(in);
}
}
......@@ -233,7 +229,6 @@ bool be_can_move_down(ir_heights_t *heights, const ir_node *node,
assert(get_nodes_block(node) == get_nodes_block(before));
assert(sched_get_time_step(node) < sched_get_time_step(before));
int node_arity = get_irn_arity(node);
ir_node *schedpoint = sched_next(node);
while (schedpoint != before) {
......@@ -242,9 +237,8 @@ bool be_can_move_down(ir_heights_t *heights, const ir_node *node,
return false;
/* schedpoint must not overwrite registers of our inputs */
for (int i = 0; i < node_arity; ++i) {
ir_node *in = get_irn_n(node, i);
const arch_register_t *reg = arch_get_irn_register(in);
foreach_irn_in(node, i, in) {
const arch_register_t *reg = arch_get_irn_register(in);
if (reg == NULL)
continue;
const arch_register_req_t *in_req
......
......@@ -249,12 +249,10 @@ static void check_defs(ir_nodeset_t const *const live_nodes, float const weight,
int arity = get_irn_arity(insn);
float factor = 1.0f / rbitset_popcount(&req->other_same, arity);
for (int i = 0; i < arity; ++i) {
foreach_irn_in(insn, i, op) {
if (!rbitset_is_set(&req->other_same, i))
continue;
ir_node *op = get_irn_n(insn, i);
/* if we the value at the should_be_same input doesn't die at the
* node, then it is no use to propagate the constraints (since a
* copy will emerge anyway) */
......@@ -290,20 +288,17 @@ static void analyze_block(ir_node *block, void *data)
check_defs(&live_nodes, weight, value, req);
);
/* mark last uses */
int arity = get_irn_arity(node);
/* the allocation info node currently only uses 1 unsigned value
to mark last used inputs. So we will fail for a node with more than
32 inputs. */
allocation_info_t *info = get_allocation_info(node);
if (arity >= (int) sizeof(info->last_uses) * 8) {
if (get_irn_arity(node) >= (int)sizeof(info->last_uses) * 8) {
panic("Node with more than %d inputs not supported yet",
(int) sizeof(info->last_uses) * 8);
}
for (int i = 0; i < arity; ++i) {
ir_node *op = get_irn_n(node, i);
/* mark last uses */
foreach_irn_in(node, i, op) {
const arch_register_req_t *req = arch_get_irn_register_req(op);
if (req->cls != cls)
continue;
......@@ -333,16 +328,14 @@ static void congruence_def(ir_nodeset_t *const live_nodes, ir_node const *const
/* should be same constraint? */
if (arch_register_req_is(req, should_be_same)) {
const ir_node *insn = skip_Proj_const(node);
int arity = get_irn_arity(insn);
unsigned node_idx = get_irn_idx(node);
node_idx = uf_find(congruence_classes, node_idx);
for (int i = 0; i < arity; ++i) {
foreach_irn_in(insn, i, op) {
if (!rbitset_is_set(&req->other_same, i))
continue;
ir_node *op = get_irn_n(insn, i);
int op_idx = get_irn_idx(op);
int op_idx = get_irn_idx(op);
op_idx = uf_find(congruence_classes, op_idx);
/* do we interfere with the value */
......@@ -666,14 +659,12 @@ static void assign_reg(ir_node const *const block, ir_node *const node, arch_reg
ir_node *in_node = skip_Proj(node);
if (arch_register_req_is(req, should_be_same)) {
float weight = (float)get_block_execfreq(block);
int arity = get_irn_arity(in_node);
assert(arity <= (int) sizeof(req->other_same) * 8);
for (int i = 0; i < arity; ++i) {
assert(get_irn_arity(in_node) <= (int)sizeof(req->other_same) * 8);
foreach_irn_in(in_node, i, in) {
if (!rbitset_is_set(&req->other_same, i))
continue;
ir_node *in = get_irn_n(in_node, i);
const arch_register_t *reg = arch_get_irn_register(in);
unsigned reg_index = reg->index;
......@@ -922,14 +913,12 @@ static void free_last_uses(ir_nodeset_t *live_nodes, ir_node *node)
{
allocation_info_t *info = get_allocation_info(node);
const unsigned *last_uses = info->last_uses;
int arity = get_irn_arity(node);
for (int i = 0; i < arity; ++i) {
foreach_irn_in(node, i, op) {
/* check if one operand is the last use */
if (!rbitset_is_set(last_uses, i))
continue;
ir_node *op = get_irn_n(node, i);
free_reg_of_value(op);
ir_nodeset_remove(live_nodes, op);
}
......@@ -940,9 +929,7 @@ static void free_last_uses(ir_nodeset_t *live_nodes, ir_node *node)
*/
static void rewire_inputs(ir_node *node)
{
int arity = get_irn_arity(node);
for (int i = 0; i < arity; ++i) {
ir_node *op = get_irn_n(node, i);
foreach_irn_in(node, i, op) {
allocation_info_t *info = try_get_allocation_info(op);
if (info == NULL)
......@@ -974,12 +961,10 @@ static void determine_live_through_regs(unsigned *bitset, ir_node *node)
}
/* remove registers of value dying at the instruction */
int arity = get_irn_arity(node);
for (int i = 0; i < arity; ++i) {
foreach_irn_in(node, i, op) {
if (!rbitset_is_set(info->last_uses, i))
continue;
ir_node *op = get_irn_n(node, i);
const arch_register_t *reg = arch_get_irn_register(op);
rbitset_clear(bitset, reg->index);
}
......@@ -1341,9 +1326,7 @@ static void adapt_phi_prefs(ir_node *phi)
ir_node *block = get_nodes_block(phi);
allocation_info_t *info = get_allocation_info(phi);
int arity = get_irn_arity(phi);
for (int i = 0; i < arity; ++i) {
ir_node *op = get_irn_n(phi, i);
foreach_irn_in(phi, i, op) {
const arch_register_t *reg = arch_get_irn_register(op);
if (reg == NULL)
......
......@@ -119,7 +119,6 @@ static int normal_tree_cost(ir_node* irn, instance_t *inst)
int n_res;
int cost;
int n_op_res = 0;
int i;
if (be_is_Keep(irn))
return 0;
......@@ -139,9 +138,7 @@ static int normal_tree_cost(ir_node* irn, instance_t *inst)
fc->no_root = 0;
costs = fc->costs;
for (i = 0; i < arity; ++i) {
ir_node* pred = get_irn_n(irn, i);
foreach_irn_in(irn, i, pred) {
if (is_Phi(irn) || get_irn_mode(pred) == mode_M) {
cost = 0;
} else if (get_nodes_block(pred) != block) {
......@@ -171,7 +168,7 @@ static int normal_tree_cost(ir_node* irn, instance_t *inst)
cost = 0;
last = 0;
for (i = 0; i < arity; ++i) {
for (int i = 0; i < arity; ++i) {
ir_node* op = fc->costs[i].irn;
ir_mode* mode;
if (op == last)
......
......@@ -173,12 +173,9 @@ static int get_result_hops_sum(reg_pressure_selector_env_t *env, ir_node *irn)
static inline int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
{
int i, n;
int sum = 0;
for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
ir_node *op = get_irn_n(irn, i);
foreach_irn_in(irn, i, op) {
if (is_Proj(op)
|| (arch_get_irn_flags(op) & arch_irn_flag_not_scheduled))
continue;
......
......@@ -268,7 +268,6 @@ static int get_reg_difference(trace_env_t *env, ir_node *irn)
{
int num_out = 0;
int num_in = 0;
int i;
ir_node *block = get_nodes_block(irn);
if (be_is_Call(irn)) {
......@@ -288,9 +287,7 @@ static int get_reg_difference(trace_env_t *env, ir_node *irn)
num_out = 1;
/* num in regs: number of ins with mode data and not ignore */
for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
ir_node *in = get_irn_n(irn, i);
foreach_irn_in_r(irn, i, in) {
if (!mode_is_data(get_irn_mode(in)))
continue;
......@@ -311,8 +308,6 @@ static int get_reg_difference(trace_env_t *env, ir_node *irn)
*/
static void descent(ir_node *root, ir_node *block, ir_node **list, trace_env_t *env, unsigned path_len)
{
int i;
if (! is_Phi(root)) {
path_len += exectime(env, root);
if (get_irn_critical_path_len(env, root) < path_len) {
......@@ -325,9 +320,7 @@ static void descent(ir_node *root, ir_node *block, ir_node **list, trace_env_t *
set_irn_reg_diff(env, root, get_reg_difference(env, root));
/* Phi nodes always leave the block */
for (i = get_irn_arity(root) - 1; i >= 0; --i) {
ir_node *pred = get_irn_n(root, i);
foreach_irn_in_r(root, i, pred) {
DBG((env->dbg, LEVEL_3, " node %+F\n", pred));
/* Blocks may happen as predecessors of End nodes */
......
......@@ -51,13 +51,11 @@ static void prepare_constr_insn(be_pre_spill_env_t *env, ir_node *node)
* (typical example: stack pointer as input to copyb)
* TODO: This really just checks precolored registers at the moment and
* ignores the general case of not matching in/out constraints */
int const arity = get_irn_arity(node);
for (int i = 0; i < arity; ++i) {
foreach_irn_in(node, i, op) {
const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
if (req->cls != cls)
continue;
ir_node *op = get_irn_n(node, i);
const arch_register_t *reg = arch_get_irn_register(op);
if (reg == NULL)
continue;
......@@ -81,6 +79,7 @@ static void prepare_constr_insn(be_pre_spill_env_t *env, ir_node *node)
}
/* insert copies for nodes that occur constrained more than once. */
int const arity = get_irn_arity(node);
be_foreach_use(node, cls, req, in, in_req_,
if (!arch_register_req_is(req, limited))
continue;
......
......@@ -188,9 +188,7 @@ static void do_spilling(ir_nodeset_t *live_nodes, ir_node *node)
/* make sure the node is not an argument of the instruction */
bool is_use = false;
int arity = get_irn_arity(node);
for (int i = 0; i < arity; ++i) {
ir_node *in = get_irn_n(node, i);
foreach_irn_in(node, i, in) {
if (in == cand_node) {
is_use = true;
break;
......@@ -220,10 +218,7 @@ static void remove_defs(ir_node *node, ir_nodeset_t *nodeset)
static void add_uses(ir_node *node, ir_nodeset_t *nodeset)
{
int arity = get_irn_arity(node);
for (int i = 0; i < arity; ++i) {
ir_node *op = get_irn_n(node, i);
foreach_irn_in(node, i, op) {
if (arch_irn_consider_in_reg_alloc(cls, op) &&
!bitset_is_set(spilled_nodes, get_irn_idx(op))) {
ir_nodeset_insert(nodeset, op);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment