Commit 1376e7ac authored by Matthias Braun's avatar Matthias Braun
Browse files

introduce be_foreach_use and use it

parent ae1da439
......@@ -600,6 +600,20 @@ static inline bool arch_irn_consider_in_reg_alloc(
code \
)
#define be_foreach_use(node, ccls, in_req, value, value_req, code) \
do { \
for (int i_ = 0, n_ = get_irn_arity(node); i_ < n_; ++i_) { \
const arch_register_req_t *in_req = arch_get_irn_register_req_in(node, i_); \
if (in_req->cls != ccls) \
continue; \
ir_node *value = get_irn_n(node, i_); \
const arch_register_req_t *value_req = arch_get_irn_register_req(value); \
if (value_req->type & arch_register_req_type_ignore) \
continue; \
code \
} \
} while (0)
static inline const arch_register_class_t *arch_get_irn_reg_class(
const ir_node *node)
{
......
......@@ -157,22 +157,18 @@ void create_borders(ir_node *block, void *env_ptr)
* If the node is no phi node we can examine the uses.
*/
if (!is_Phi(irn)) {
for (int i = 0, n = get_irn_arity(irn); i < n; ++i) {
ir_node *op = get_irn_n(irn, i);
if (arch_irn_consider_in_reg_alloc(env->cls, op)) {
int nr = get_irn_idx(op);
const char *msg = "-";
if (!bitset_is_set(live, nr)) {
border_use(op, step, 1);
bitset_set(live, nr);
msg = "X";
}
DBG((dbg, LEVEL_4, "\t\t%s pos: %d, use: %+F\n", msg, i, op));
be_foreach_use(irn, env->cls, in_req_, op, op_req_,
unsigned idx = get_irn_idx(op);
const char *msg = "-";
if (!bitset_is_set(live, idx)) {
border_use(op, step, 1);
bitset_set(live, idx);
msg = "X";
}
}
DB((dbg, LEVEL_4, "\t\t%s pos: %d, use: %+F\n", msg, i_, op));
);
}
++step;
}
......
......@@ -39,7 +39,6 @@ be_insn_t *be_scan_insn(be_chordal_env_t *const env, ir_node *const irn)
{
struct obstack *const obst = &env->obst;
be_operand_t o;
int i, n;
be_insn_t *insn = OALLOCZ(obst, be_insn_t);
......@@ -66,24 +65,19 @@ be_insn_t *be_scan_insn(be_chordal_env_t *const env, ir_node *const irn)
insn->use_start = insn->n_ops;
/* now collect the uses for this node */
for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
ir_node *op = get_irn_n(irn, i);
if (arch_irn_consider_in_reg_alloc(env->cls, op)) {
/* found a register use, create an operand */
arch_register_req_t const *const req = arch_get_irn_register_req_in(irn, i);
if (arch_register_req_is(req, limited)) {
o.regs = req->limited;
has_constraints = true;
} else {
o.regs = env->allocatable_regs->data;
}
o.carrier = op;
o.partner = NULL;
obstack_grow(obst, &o, sizeof(o));
insn->n_ops++;
be_foreach_use(irn, cls, in_req, op, op_req,
/* found a register use, create an operand */
if (arch_register_req_is(in_req, limited)) {
o.regs = in_req->limited;
has_constraints = true;
} else {
o.regs = env->allocatable_regs->data;
}
}
o.carrier = op;
o.partner = NULL;
obstack_grow(obst, &o, sizeof(o));
insn->n_ops++;
);
if (!has_constraints)
return NULL;
......
......@@ -850,6 +850,7 @@ static int push_through_perm(ir_node *perm)
int new_size;
ir_node *frontier = bl;
int i, n;
be_lv_t *lv = be_get_irg_liveness(irg);
/* get some Proj and find out the register class of that Proj. */
ir_node *one_proj = get_edge_src_irn(get_irn_out_edge_first_kind(perm, EDGE_KIND_NORMAL));
......@@ -867,15 +868,12 @@ static int push_through_perm(ir_node *perm)
* the Perm, increasing the register pressure by one.
*/
sched_foreach_reverse_from(sched_prev(perm), irn) {
for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
ir_node *op = get_irn_n(irn, i);
be_lv_t *lv = be_get_irg_liveness(irg);
if (arch_irn_consider_in_reg_alloc(cls, op) &&
!be_values_interfere(lv, op, one_proj)) {
be_foreach_use(irn, cls, in_req_, op, op_req_,
if (!be_values_interfere(lv, op, one_proj)) {
frontier = irn;
goto found_front;
}
}
);
}
found_front:
......
......@@ -63,15 +63,13 @@ static void prepare_constr_insn(be_pre_spill_env_t *env, ir_node *node)
unsigned *def_constr = NULL;
int arity = get_irn_arity(node);
int i, i2;
/* Insert a copy for constraint inputs attached to a value which can't
* fulfill the constraint
* (typical example: stack pointer as input to copyb)
* TODO: This really just checks precolored registers at the moment and
* ignores the general case of not matching in/out constraints
*/
for (i = 0; i < arity; ++i) {
for (int i = 0; i < arity; ++i) {
ir_node *op = get_irn_n(node, i);
const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
const arch_register_t *reg;
......@@ -102,23 +100,11 @@ static void prepare_constr_insn(be_pre_spill_env_t *env, ir_node *node)
}
/* insert copies for nodes that occur constrained more than once. */
for (i = 0; i < arity; ++i) {
ir_node *in;
ir_node *copy;
const arch_register_req_t *req;
req = arch_get_irn_register_req_in(node, i);
if (req->cls != cls)
continue;
be_foreach_use(node, cls, req, in, in_req_,
if (!arch_register_req_is(req, limited))
continue;
in = get_irn_n(node, i);
if (!arch_irn_consider_in_reg_alloc(cls, in))
continue;
for (i2 = i + 1; i2 < arity; ++i2) {
for (int i2 = i_ + 1; i2 < arity; ++i2) {
ir_node *in2;
const arch_register_req_t *req2;
......@@ -137,7 +123,7 @@ static void prepare_constr_insn(be_pre_spill_env_t *env, ir_node *node)
if (rbitsets_equal(req->limited, req2->limited, cls->n_regs))
continue;
copy = be_new_Copy(block, in);
ir_node *copy = be_new_Copy(block, in);
stat_ev_int("constr_copy", 1);
sched_add_before(node, copy);
......@@ -146,7 +132,7 @@ static void prepare_constr_insn(be_pre_spill_env_t *env, ir_node *node)
"inserting multiple constr copy %+F for %+F pos %d\n",
copy, node, i2));
}
}
);
/* collect all registers occurring in out constraints. */
be_foreach_definition(node, cls, def,
......@@ -169,26 +155,14 @@ static void prepare_constr_insn(be_pre_spill_env_t *env, ir_node *node)
* and being constrained to a register which also occurs in out constraints.
*/
unsigned *const tmp = rbitset_alloca(cls->n_regs);
for (i = 0; i < arity; ++i) {
const arch_register_req_t *req;
ir_node *in;
ir_node *copy;
/*
* Check, if
be_foreach_use(node, cls, req, in, in_req_,
/* Check, if
* 1) the operand is constrained.
* 2) lives through the node.
* 3) is constrained to a register occurring in out constraints.
*/
req = arch_get_irn_register_req_in(node, i);
if (req->cls != cls)
continue;
if (!arch_register_req_is(req, limited))
continue;
in = get_irn_n(node, i);
if (!arch_irn_consider_in_reg_alloc(cls, in))
continue;
if (!be_values_interfere(lv, node, in))
continue;
......@@ -207,13 +181,13 @@ static void prepare_constr_insn(be_pre_spill_env_t *env, ir_node *node)
if (be_is_Copy(in))
continue;
copy = be_new_Copy(block, in);
ir_node *copy = be_new_Copy(block, in);
sched_add_before(node, copy);
set_irn_n(node, i, copy);
set_irn_n(node, i_, copy);
DBG((dbg, LEVEL_3, "inserting constr copy %+F for %+F pos %d\n",
copy, node, i));
copy, node, i_));
be_liveness_update(lv, in);
}
);
}
static void pre_spill_prepare_constr_walker(ir_node *block, void *data)
......
......@@ -803,7 +803,6 @@ static void process_block(ir_node *block)
new_vals = new_workset();
sched_foreach(block, irn) {
int i, arity;
assert(workset_get_length(ws) <= n_regs);
/* Phis are no real instr (see insert_starters()) */
......@@ -817,14 +816,10 @@ static void process_block(ir_node *block)
/* allocate all values _used_ by this instruction */
workset_clear(new_vals);
for (i = 0, arity = get_irn_arity(irn); i < arity; ++i) {
ir_node *in = get_irn_n(irn, i);
if (!arch_irn_consider_in_reg_alloc(cls, in))
continue;
be_foreach_use(irn, cls, in_req_, in, in_req,
/* (note that "spilled" is irrelevant here) */
workset_insert(new_vals, in, false);
}
);
displace(new_vals, 1);
/* allocate all values _defined_ by this instruction */
......
......@@ -148,14 +148,11 @@ static void do_spilling(ir_nodeset_t *live_nodes, ir_node *node)
/* we need registers for the non-live argument values */
size_t free_regs_needed = 0;
int arity = get_irn_arity(node);
for (int i = 0; i < arity; ++i) {
ir_node *pred = get_irn_n(node, i);
if (arch_irn_consider_in_reg_alloc(cls, pred)
&& !ir_nodeset_contains(live_nodes, pred)) {
free_regs_needed += get_value_width(pred);
be_foreach_use(node, cls, in_req_, use, pred_req_,
if (!ir_nodeset_contains(live_nodes, use)) {
free_regs_needed += get_value_width(use);
}
}
);
/* we can reuse all reloaded values for the defined values, but we might
* need even more registers */
......@@ -203,6 +200,7 @@ static void do_spilling(ir_nodeset_t *live_nodes, ir_node *node)
/* make sure the node is not an argument of the instruction */
bool is_use = false;
int arity = get_irn_arity(node);
for (int i = 0; i < arity; ++i) {
ir_node *in = get_irn_n(node, i);
if (in == cand_node) {
......
......@@ -564,23 +564,16 @@ static ir_node *x87_create_fpop(x87_state *const state, ir_node *const n, int co
*/
static fp_liveness fp_liveness_transfer(ir_node *irn, fp_liveness live)
{
int i, n;
const arch_register_class_t *cls = &ia32_reg_classes[CLASS_ia32_fp];
be_foreach_definition(irn, cls, def,
const arch_register_t *reg = x87_get_irn_register(def);
live &= ~(1 << reg->index);
);
for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
ir_node *op = get_irn_n(irn, i);
if (mode_is_float(get_irn_mode(op)) &&
arch_irn_consider_in_reg_alloc(cls, op)) {
const arch_register_t *reg = x87_get_irn_register(op);
live |= 1 << reg->index;
}
}
be_foreach_use(irn, cls, in_req_, op, op_req_,
const arch_register_t *reg = x87_get_irn_register(op);
live |= 1 << reg->index;
);
return live;
}
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment