Commit e89eea0f authored by Christoph Mallon's avatar Christoph Mallon
Browse files

bearch: Introduce be_foreach_out().

parent 1dbd32d4
......@@ -381,13 +381,10 @@ static void init_arm_CopyB_attributes(ir_node *res, unsigned size)
static void init_arm_SwitchJmp_attributes(ir_node *res,
const ir_switch_table *table)
{
unsigned n_outs = arch_get_irn_n_outs(res);
unsigned o;
arm_SwitchJmp_attr_t *attr = get_arm_SwitchJmp_attr(res);
attr->table = table;
for (o = 0; o < n_outs; ++o) {
be_foreach_out(res, o) {
arch_set_irn_register_req_out(res, o, arch_no_register_req);
}
}
......
......@@ -1491,10 +1491,7 @@ static ir_node *gen_Proj_Proj_Start(ir_node *node)
*/
static int find_out_for_reg(ir_node *node, const arch_register_t *reg)
{
int n_outs = arch_get_irn_n_outs(node);
int o;
for (o = 0; o < n_outs; ++o) {
be_foreach_out(node, o) {
const arch_register_req_t *req = arch_get_irn_register_req_out(node, o);
if (req == reg->single_req)
return o;
......
......@@ -273,8 +273,7 @@ void arch_dump_reqs_and_registers(FILE *F, const ir_node *node)
arch_dump_register_req(F, req, node);
fputs("\n", F);
}
unsigned n_outs = arch_get_irn_n_outs(node);
for (unsigned o = 0; o < n_outs; ++o) {
be_foreach_out(node, o) {
const arch_register_req_t *req = arch_get_irn_register_req_out(node, o);
fprintf(F, "outreq #%u = ", o);
arch_dump_register_req(F, req, node);
......
......@@ -205,13 +205,13 @@ void arch_add_irn_flags(ir_node *node, arch_irn_flags_t flags);
static inline unsigned arch_get_irn_n_outs(const ir_node *node)
{
backend_info_t *info = be_get_info(node);
if (info->out_infos == NULL)
return 0;
backend_info_t *const info = be_get_info(node);
return (unsigned)ARR_LEN(info->out_infos);
}
#define be_foreach_out(node, i) \
for (unsigned i = 0, i##__n = arch_get_irn_n_outs(node); i != i##__n; ++i)
/**
* Start codegeneration
*/
......
......@@ -973,11 +973,9 @@ static const arch_irn_ops_t be_node_irn_ops = {
static int get_start_reg_index(ir_graph *irg, const arch_register_t *reg)
{
ir_node *start = get_irg_start(irg);
unsigned n_outs = arch_get_irn_n_outs(start);
int i;
/* do a naive linear search... */
for (i = 0; i < (int)n_outs; ++i) {
be_foreach_out(start, i) {
arch_register_req_t const *const out_req = arch_get_irn_register_req_out(start, i);
if (!arch_register_req_is(out_req, limited))
continue;
......
......@@ -269,7 +269,6 @@ bool be_can_move_down(ir_heights_t *heights, const ir_node *node,
return false;
/* schedpoint must not overwrite registers of our inputs */
unsigned n_outs = arch_get_irn_n_outs(schedpoint);
for (int i = 0; i < node_arity; ++i) {
ir_node *in = get_irn_n(node, i);
const arch_register_t *reg = arch_get_irn_register(in);
......@@ -277,7 +276,7 @@ bool be_can_move_down(ir_heights_t *heights, const ir_node *node,
continue;
const arch_register_req_t *in_req
= arch_get_irn_register_req_in(node, i);
for (unsigned o = 0; o < n_outs; ++o) {
be_foreach_out(schedpoint, o) {
const arch_register_t *outreg
= arch_get_irn_register_out(schedpoint, o);
const arch_register_req_t *outreq
......@@ -295,7 +294,6 @@ bool be_can_move_down(ir_heights_t *heights, const ir_node *node,
bool be_can_move_up(ir_heights_t *heights, const ir_node *node,
const ir_node *after)
{
unsigned n_outs = arch_get_irn_n_outs(node);
const ir_node *node_block = get_nodes_block(node);
const ir_node *after_block = get_block_const(after);
const ir_node *schedpoint;
......@@ -326,7 +324,7 @@ bool be_can_move_up(ir_heights_t *heights, const ir_node *node,
be_lv_foreach(lv, succ, be_lv_state_in, live_node) {
const arch_register_t *reg = arch_get_irn_register(live_node);
const arch_register_req_t *req = arch_get_irn_register_req(live_node);
for (unsigned o = 0; o < n_outs; ++o) {
be_foreach_out(node, o) {
const arch_register_t *outreg
= arch_get_irn_register_out(node, o);
const arch_register_req_t *outreq
......@@ -340,7 +338,7 @@ bool be_can_move_up(ir_heights_t *heights, const ir_node *node,
break;
const arch_register_t *reg = arch_get_irn_register(phi);
const arch_register_req_t *req = arch_get_irn_register_req(phi);
for (unsigned o = 0; o < n_outs; ++o) {
be_foreach_out(node, o) {
const arch_register_t *outreg
= arch_get_irn_register_out(node, o);
const arch_register_req_t *outreq
......@@ -374,7 +372,7 @@ bool be_can_move_up(ir_heights_t *heights, const ir_node *node,
continue;
const arch_register_req_t *in_req
= arch_get_irn_register_req_in(schedpoint, i);
for (unsigned o = 0; o < n_outs; ++o) {
be_foreach_out(node, o) {
const arch_register_t *outreg
= arch_get_irn_register_out(node, o);
const arch_register_req_t *outreq
......
......@@ -245,14 +245,12 @@ static int get_first_same(const arch_register_req_t* req)
static void assure_should_be_same_requirements(ir_node *node)
{
const arch_register_t *out_reg, *in_reg;
int n_res, i;
ir_node *in_node, *block;
n_res = arch_get_irn_n_outs(node);
block = get_nodes_block(node);
/* check all OUT requirements, if there is a should_be_same */
for (i = 0; i < n_res; i++) {
be_foreach_out(node, i) {
int i2, arity;
int same_pos;
ir_node *uses_out_reg;
......@@ -345,8 +343,6 @@ static void assure_should_be_same_requirements(ir_node *node)
*/
static void fix_am_source(ir_node *irn)
{
int n_res, i;
/* check only ia32 nodes with source address mode */
if (!is_ia32_irn(irn) || get_ia32_op_type(irn) != ia32_AddrModeS)
return;
......@@ -354,9 +350,7 @@ static void fix_am_source(ir_node *irn)
if (get_ia32_am_support(irn) != ia32_am_binary)
return;
n_res = arch_get_irn_n_outs(irn);
for (i = 0; i < n_res; i++) {
be_foreach_out(irn, i) {
const arch_register_req_t *req = arch_get_irn_register_req_out(irn, i);
const arch_register_t *out_reg;
int same_pos;
......
......@@ -878,16 +878,13 @@ static void init_ia32_climbframe_attributes(ir_node *res, unsigned count)
static void init_ia32_switch_attributes(ir_node *node,
const ir_switch_table *table)
{
unsigned n_outs = arch_get_irn_n_outs(node);
unsigned o;
ia32_switch_attr_t *attr = (ia32_switch_attr_t*) get_irn_generic_attr(node);
#ifndef NDEBUG
attr->attr.attr_type |= IA32_ATTR_ia32_switch_attr_t;
#endif
attr->table = table;
for (o = 0; o < n_outs; ++o) {
be_foreach_out(node, o) {
arch_set_irn_register_req_out(node, o, arch_no_register_req);
}
}
......
......@@ -5512,13 +5512,11 @@ static ir_node *gen_Proj_be_Call(ir_node *node)
proj = pn_ia32_Call_X_regular;
} else {
arch_register_req_t const *const req = arch_get_irn_register_req(node);
int const n_outs = arch_get_irn_n_outs(new_call);
int i;
assert(proj >= pn_be_Call_first_res);
assert(arch_register_req_is(req, limited));
for (i = 0; i < n_outs; ++i) {
be_foreach_out(new_call, i) {
arch_register_req_t const *const new_req = arch_get_irn_register_req_out(new_call, i);
if (!arch_register_req_is(new_req, limited) ||
new_req->cls != req->cls ||
......@@ -5526,9 +5524,10 @@ static ir_node *gen_Proj_be_Call(ir_node *node)
continue;
proj = i;
break;
goto found;
}
assert(i < n_outs);
panic("no matching out requirement found");
found:;
}
res = new_rd_Proj(dbgi, new_call, mode, proj);
......
......@@ -1418,7 +1418,7 @@ static int sim_Asm(x87_state *const state, ir_node *const n)
panic("cannot handle %+F with x87 constraints", n);
}
for (size_t i = arch_get_irn_n_outs(n); i-- != 0;) {
be_foreach_out(n, i) {
arch_register_req_t const *const req = arch_get_irn_register_req_out(n, i);
if (req->cls == &ia32_reg_classes[CLASS_ia32_fp])
panic("cannot handle %+F with x87 constraints", n);
......
......@@ -139,8 +139,7 @@ static void sparc_prepare_graph(ir_graph *irg)
static bool sparc_modifies_flags(const ir_node *node)
{
unsigned n_outs = arch_get_irn_n_outs(node);
for (unsigned o = 0; o < n_outs; ++o) {
be_foreach_out(node, o) {
const arch_register_req_t *req = arch_get_irn_register_req_out(node, o);
if (req->cls == &sparc_reg_classes[CLASS_sparc_flags_class])
return true;
......@@ -150,8 +149,7 @@ static bool sparc_modifies_flags(const ir_node *node)
static bool sparc_modifies_fp_flags(const ir_node *node)
{
unsigned n_outs = arch_get_irn_n_outs(node);
for (unsigned o = 0; o < n_outs; ++o) {
be_foreach_out(node, o) {
const arch_register_req_t *req = arch_get_irn_register_req_out(node, o);
if (req->cls == &sparc_reg_classes[CLASS_sparc_fpflags_class])
return true;
......
......@@ -320,8 +320,7 @@ static bool uses_reg(const ir_node *node, unsigned reg_index, unsigned width)
static bool writes_reg(const ir_node *node, unsigned reg_index, unsigned width)
{
unsigned n_outs = arch_get_irn_n_outs(node);
for (unsigned o = 0; o < n_outs; ++o) {
be_foreach_out(node, o) {
const arch_register_t *out_reg = arch_get_irn_register_out(node, o);
if (out_reg == NULL)
continue;
......@@ -423,8 +422,7 @@ static bool can_move_up_into_delayslot(const ir_node *node, const ir_node *to)
}
/* node must not write to one of the call outputs */
unsigned n_call_outs = arch_get_irn_n_outs(to);
for (unsigned o = 0; o < n_call_outs; ++o) {
be_foreach_out(to, o) {
const arch_register_t *reg = arch_get_irn_register_out(to, o);
if (reg == NULL)
continue;
......
......@@ -269,14 +269,12 @@ static void finish_sparc_Ld(ir_node *node)
ir_node *constant = create_constant_from_immediate(node, offset);
ir_node *new_load = new_bd_sparc_Ld_reg(dbgi, block, ptr, constant, mem, load_store_mode);
sparc_load_store_attr_t *new_load_attr = get_sparc_load_store_attr(new_load);
unsigned n_outs = arch_get_irn_n_outs(node);
unsigned i;
new_load_attr->is_frame_entity = load_store_attr->is_frame_entity;
new_load_attr->is_reg_reg = load_store_attr->is_reg_reg;
sched_add_before(node, new_load);
for (i = 0; i < n_outs; i++) {
be_foreach_out(node, i) {
arch_set_irn_register_out(new_load, i, arch_get_irn_register_out(node, i));
}
be_peephole_exchange(node, new_load);
......@@ -333,14 +331,12 @@ static void finish_sparc_Ldf(ir_node *node)
ir_node *new_ptr = new_bd_sparc_Add_reg(dbgi, block, ptr, constant);
ir_node *new_load = new_bd_sparc_Ldf_s(dbgi, block, new_ptr, mem, load_store_mode, NULL, 0, true);
sparc_load_store_attr_t *new_load_attr = get_sparc_load_store_attr(new_load);
unsigned n_outs = arch_get_irn_n_outs(node);
unsigned i;
new_load_attr->is_frame_entity = load_store_attr->is_frame_entity;
new_load_attr->is_reg_reg = load_store_attr->is_reg_reg;
sched_add_before(node, new_load);
for (i = 0; i < n_outs; i++) {
be_foreach_out(node, i) {
arch_set_irn_register_out(new_load, i, arch_get_irn_register_out(node, i));
}
be_peephole_exchange(node, new_load);
......@@ -367,14 +363,12 @@ static void finish_sparc_St(ir_node *node)
ir_node *constant = create_constant_from_immediate(node, offset);
ir_node *new_load = new_bd_sparc_St_reg(dbgi, block, value, ptr, constant, mem, load_store_mode);
sparc_load_store_attr_t *new_load_attr = get_sparc_load_store_attr(new_load);
unsigned n_outs = arch_get_irn_n_outs(node);
unsigned i;
new_load_attr->is_frame_entity = load_store_attr->is_frame_entity;
new_load_attr->is_reg_reg = load_store_attr->is_reg_reg;
sched_add_before(node, new_load);
for (i = 0; i < n_outs; i++) {
be_foreach_out(node, i) {
arch_set_irn_register_out(new_load, i, arch_get_irn_register_out(node, i));
}
be_peephole_exchange(node, new_load);
......@@ -402,14 +396,12 @@ static void finish_sparc_Stf(ir_node *node)
ir_node *new_ptr = new_bd_sparc_Add_reg(dbgi, block, ptr, constant);
ir_node *new_load = new_bd_sparc_Stf_s(dbgi, block, value, new_ptr, mem, load_store_mode, NULL, 0, true);
sparc_load_store_attr_t *new_load_attr = get_sparc_load_store_attr(new_load);
unsigned n_outs = arch_get_irn_n_outs(node);
unsigned i;
new_load_attr->is_frame_entity = load_store_attr->is_frame_entity;
new_load_attr->is_reg_reg = load_store_attr->is_reg_reg;
sched_add_before(node, new_load);
for (i = 0; i < n_outs; i++) {
be_foreach_out(node, i) {
arch_set_irn_register_out(new_load, i, arch_get_irn_register_out(node, i));
}
be_peephole_exchange(node, new_load);
......
......@@ -270,14 +270,11 @@ static void init_sparc_switch_jmp_attributes(ir_node *node,
const ir_switch_table *table,
ir_entity *table_entity)
{
unsigned n_outs = arch_get_irn_n_outs(node);
unsigned o;
sparc_switch_jmp_attr_t *attr = get_sparc_switch_jmp_attr(node);
attr->table = table;
attr->table_entity = table_entity;
for (o = 0; o < n_outs; ++o) {
be_foreach_out(node, o) {
arch_set_irn_register_req_out(node, o, arch_no_register_req);
}
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment