Commit feeff92a authored by Christoph Mallon's avatar Christoph Mallon Committed by Christoph Mallon
Browse files

be: Add and use be_new_Proj_reg(), to create a Proj using the given register.

parent 7ceddc8e
......@@ -105,10 +105,8 @@ static void transform_sub_to_neg_add(ir_node *node,
ir_node *xor = new_bd_amd64_xorp(dbgi, block, ARRAY_SIZE(xor_in),
xor_in, &xor_attr);
arch_set_irn_register_reqs_in(xor, amd64_xmm_reqs);
ir_node *const neg = be_new_Proj(xor, pn_amd64_xorp_res);
sched_add_before(node, xor);
arch_set_irn_register(neg, in2_reg);
ir_node *const neg = be_new_Proj_reg(xor, pn_amd64_xorp_res, in2_reg);
ir_node *in[] = { neg, in1 };
add = new_bd_amd64_adds(dbgi, block, ARRAY_SIZE(in), in, attr);
......@@ -116,9 +114,8 @@ static void transform_sub_to_neg_add(ir_node *node,
} else {
assert(is_amd64_sub(node));
ir_node *neg = new_bd_amd64_neg(dbgi, block, in2, attr->base.insn_mode);
arch_set_irn_register_out(neg, pn_amd64_neg_res, out_reg);
sched_add_before(node, neg);
ir_node *const neg_res = be_new_Proj(neg, pn_amd64_neg_res);
ir_node *const neg_res = be_new_Proj_reg(neg, pn_amd64_neg_res, out_reg);
ir_node *in[] = { neg_res, in1 };
add = new_bd_amd64_add(dbgi, block, ARRAY_SIZE(in), in, attr);
......
......@@ -2371,10 +2371,8 @@ static ir_node *gen_Alloc(ir_node *node)
subsp = new_bd_amd64_sub_sp(dbgi, new_block, arity, in, &attr);
arch_set_irn_register_reqs_in(subsp, reqs);
arch_set_irn_register_out(subsp, pn_amd64_sub_sp_stack,
&amd64_registers[REG_RSP]);
ir_node *const stack_proj = be_new_Proj(subsp, pn_amd64_sub_sp_stack);
ir_node *const stack_proj = be_new_Proj_reg(subsp, pn_amd64_sub_sp_stack, &amd64_registers[REG_RSP]);
keep_alive(stack_proj);
pmap_insert(node_to_stack, node, stack_proj);
......
......@@ -169,9 +169,7 @@ static ir_node *create_pop(ir_node *node, ir_node *schedpoint, ir_node *sp,
static ir_node* create_spproj(ir_node *pred, int pos)
{
ir_node *const sp = be_new_Proj(pred, pos);
arch_set_irn_register(sp, &amd64_registers[REG_RSP]);
return sp;
return be_new_Proj_reg(pred, pos, &amd64_registers[REG_RSP]);
}
/**
......@@ -516,15 +514,13 @@ static void amd64_select_instructions(ir_graph *irg)
static void introduce_epilogue(ir_node *ret)
{
const arch_register_t *sp = &amd64_registers[REG_RSP];
const arch_register_t *bp = &amd64_registers[REG_RBP];
ir_graph *irg = get_irn_irg(ret);
ir_node *block = get_nodes_block(ret);
ir_type *frame_type = get_irg_frame_type(irg);
unsigned frame_size = get_type_size_bytes(frame_type);
be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
ir_node *first_sp = get_irn_n(ret, n_amd64_ret_stack);
ir_node *curr_sp = first_sp;
ir_graph *irg = get_irn_irg(ret);
ir_node *block = get_nodes_block(ret);
ir_type *frame_type = get_irg_frame_type(irg);
unsigned frame_size = get_type_size_bytes(frame_type);
be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
ir_node *first_sp = get_irn_n(ret, n_amd64_ret_stack);
ir_node *curr_sp = first_sp;
if (!layout->sp_relative) {
int const n_rbp = determine_rbp_input(ret);
......@@ -532,10 +528,8 @@ static void introduce_epilogue(ir_node *ret)
ir_node *curr_mem = get_irn_n(ret, n_amd64_ret_mem);
ir_node *const leave = new_bd_amd64_leave(NULL, block, curr_bp, curr_mem);
curr_mem = be_new_Proj(leave, pn_amd64_leave_M);
curr_bp = be_new_Proj(leave, pn_amd64_leave_frame);
curr_sp = be_new_Proj(leave, pn_amd64_leave_stack);
arch_set_irn_register(curr_bp, bp);
arch_set_irn_register(curr_sp, sp);
curr_bp = be_new_Proj_reg(leave, pn_amd64_leave_frame, &amd64_registers[REG_RBP]);
curr_sp = be_new_Proj_reg(leave, pn_amd64_leave_stack, &amd64_registers[REG_RSP]);
sched_add_before(ret, leave);
set_irn_n(ret, n_amd64_ret_mem, curr_mem);
......@@ -575,8 +569,7 @@ static void introduce_prologue(ir_graph *const irg)
sched_add_after(start, push);
ir_node *const curr_mem = be_new_Proj(push, pn_amd64_push_reg_M);
edges_reroute_except(mem, curr_mem, push);
ir_node *const curr_sp = be_new_Proj(push, pn_amd64_push_reg_stack);
arch_set_irn_register(curr_sp, sp);
ir_node *const curr_sp = be_new_Proj_reg(push, pn_amd64_push_reg_stack, sp);
/* move rsp to rbp */
ir_node *const curr_bp = be_new_Copy(block, curr_sp);
......
......@@ -315,10 +315,8 @@ static void lower_perm_node(ir_node *const perm, arch_register_class_t const *co
ir_node *const in[] = { p->in_node, q->in_node };
ir_node *const xchg = be_new_Perm(cls, block, ARRAY_SIZE(in), in);
DBG((dbg, LEVEL_2, "%+F: inserting %+F for %+F (%s) and %+F (%s)\n", perm, xchg, in[0], arch_get_irn_register(in[0]), in[1], arch_get_irn_register(in[1])));
new_p = be_new_Proj(xchg, 0);
new_q = be_new_Proj(xchg, 1);
arch_set_irn_register_out(xchg, 0, q->in_reg);
arch_set_irn_register_out(xchg, 1, q->out_reg);
new_p = be_new_Proj_reg(xchg, 0, q->in_reg);
new_q = be_new_Proj_reg(xchg, 1, q->out_reg);
sched_add_before(perm, xchg);
/* Prevent that the broken down Perm is visited by the walker. */
mark_irn_visited(xchg);
......
......@@ -557,6 +557,12 @@ ir_node *be_new_Proj(ir_node *const pred, unsigned const pos)
return new_r_Proj(pred, req->cls->mode, pos);
}
ir_node *be_new_Proj_reg(ir_node *const pred, unsigned const pos, arch_register_t const *const reg)
{
arch_set_irn_register_out(pred, pos, reg);
return be_new_Proj(pred, pos);
}
ir_node *be_get_or_make_Proj_for_pn(ir_node *const irn, unsigned const pn)
{
ir_node *const proj = get_Proj_for_pn(irn, pn);
......
......@@ -216,6 +216,8 @@ ir_node *be_get_Start_proj(ir_graph *irg, arch_register_t const *reg);
*/
ir_node *be_new_Proj(ir_node *pred, unsigned pos);
ir_node *be_new_Proj_reg(ir_node *pred, unsigned pos, arch_register_t const *reg);
/**
* Gets the Proj with number pn from irn.
* Creates the Proj, if it does not exist, yet.
......
......@@ -118,9 +118,8 @@ static void impl_parcopy(const arch_register_class_t *cls,
if (src == n_regs || src == dst)
continue;
ir_node *const proj = be_new_Proj(perm, i);
arch_register_t const *const reg = arch_register_for_index(cls, dst);
arch_set_irn_register(proj, reg);
ir_node *const proj = be_new_Proj_reg(perm, i, reg);
ir_node *phi = phis[dst];
set_irn_n(phi, pred_nr, proj);
......
......@@ -569,17 +569,14 @@ static bool ia32_try_replace_flags(ir_node *consumers, ir_node *flags, ir_node *
if (flags_left == avail_right && avail_left == flags_right) {
/* We can use available if we reverse the
* consumers' condition codes. */
ir_mode *flag_mode = ia32_reg_classes[CLASS_ia32_flags].mode;
const arch_register_t *flag_reg = &ia32_reg_classes[CLASS_ia32_flags].regs[0];
ir_mode *const flag_mode = ia32_reg_classes[CLASS_ia32_flags].mode;
for (ir_node *c = consumers; c != NULL; c = get_irn_link(c)) {
x86_condition_code_t cc = get_ia32_condcode(c);
set_ia32_condcode(c, x86_invert_condition_code(cc));
foreach_irn_in(c, i, in) {
if (get_irn_mode(in) == flag_mode) {
ir_node *const proj = be_new_Proj(available, pn);
arch_set_irn_register(proj, flag_reg);
ir_node *const proj = be_new_Proj_reg(available, pn, &ia32_registers[REG_EFLAGS]);
set_irn_n(c, i, proj);
}
}
......@@ -808,9 +805,7 @@ static ir_node *create_pop(ir_node *node, ir_node *schedpoint, ir_node *sp,
static ir_node *create_spproj(ir_node *const pred, unsigned const pos)
{
ir_node *const sp = be_new_Proj(pred, pos);
arch_set_irn_register(sp, &ia32_registers[REG_ESP]);
return sp;
return be_new_Proj_reg(pred, pos, &ia32_registers[REG_ESP]);
}
/**
......@@ -1017,8 +1012,8 @@ static void introduce_epilogue(ir_node *const ret)
ir_node *curr_mem = get_irn_n(ret, n_ia32_Return_mem);
if (ia32_cg_config.use_leave) {
restore = new_bd_ia32_Leave(NULL, block, curr_mem, curr_bp);
curr_bp = be_new_Proj(restore, pn_ia32_Leave_frame);
curr_sp = be_new_Proj(restore, pn_ia32_Leave_stack);
curr_bp = be_new_Proj_reg(restore, pn_ia32_Leave_frame, bp);
curr_sp = be_new_Proj_reg(restore, pn_ia32_Leave_stack, sp);
curr_mem = be_new_Proj(restore, pn_ia32_Leave_M);
} else {
/* Copy ebp to esp. */
......@@ -1028,13 +1023,11 @@ static void introduce_epilogue(ir_node *const ret)
/* Pop ebp. */
restore = new_bd_ia32_Pop_ebp(NULL, block, curr_mem, curr_sp);
curr_bp = be_new_Proj(restore, pn_ia32_Pop_res);
curr_sp = be_new_Proj(restore, pn_ia32_Pop_stack);
curr_bp = be_new_Proj_reg(restore, pn_ia32_Pop_res, bp);
curr_sp = be_new_Proj_reg(restore, pn_ia32_Pop_stack, sp);
curr_mem = be_new_Proj(restore, pn_ia32_Pop_M);
}
sched_add_before(ret, restore);
arch_set_irn_register(curr_bp, bp);
arch_set_irn_register(curr_sp, sp);
set_irn_n(ret, n_ia32_Return_mem, curr_mem);
set_irn_n(ret, n_ebp, curr_bp);
} else {
......@@ -1070,8 +1063,7 @@ static void introduce_prologue(ir_graph *const irg)
sched_add_after(start, push);
ir_node *const curr_mem = be_new_Proj(push, pn_ia32_Push_M);
edges_reroute_except(mem, curr_mem, push);
ir_node *const curr_sp = be_new_Proj(push, pn_ia32_Push_stack);
arch_set_irn_register(curr_sp, sp);
ir_node *const curr_sp = be_new_Proj_reg(push, pn_ia32_Push_stack, sp);
/* move esp to ebp */
ir_node *const curr_bp = be_new_Copy(block, curr_sp);
......
......@@ -141,11 +141,11 @@ carry:;
if (flags_proj != NULL) {
set_irn_mode(adc, mode_T);
ir_node *const adc_flags = be_new_Proj(adc, pn_ia32_Adc_flags);
arch_set_irn_register(adc_flags, &ia32_registers[REG_EFLAGS]);
arch_register_t const *const reg_flags = &ia32_registers[REG_EFLAGS];
ir_node *const adc_flags = be_new_Proj_reg(adc, pn_ia32_Adc_flags, reg_flags);
ir_node *cmc = new_bd_ia32_Cmc(dbgi, block, adc_flags);
arch_set_irn_register(cmc, &ia32_registers[REG_EFLAGS]);
arch_set_irn_register(cmc, reg_flags);
sched_add_after(irn, cmc);
exchange(flags_proj, cmc);
}
......
......@@ -266,8 +266,7 @@ static void peephole_ia32_Test(ir_node *node)
kill_node(left);
}
ir_node *const flags_proj = be_new_Proj(op, pn_ia32_flags);
arch_set_irn_register(flags_proj, &ia32_registers[REG_EFLAGS]);
ir_node *const flags_proj = be_new_Proj_reg(op, pn_ia32_flags, &ia32_registers[REG_EFLAGS]);
be_peephole_exchange(node, flags_proj);
} else if (is_ia32_Immediate(right)) {
ia32_immediate_attr_t const *const imm = get_ia32_immediate_attr_const(right);
......@@ -415,17 +414,16 @@ static void peephole_IncSP_Store_to_push(ir_node *irn)
}
/* walk through the Stores and create Pushs for them */
ir_node *block = get_nodes_block(irn);
ir_graph *irg = get_irn_irg(irn);
ir_node *first_push = NULL;
ir_node *const block = get_nodes_block(irn);
ir_graph *const irg = get_irn_irg(irn);
ir_node *const noreg = ia32_new_NoReg_gp(irg);
ir_node *first_push = NULL;
for (; i >= 0; --i) {
ir_node *store = stores[i];
ir_node *noreg = ia32_new_NoReg_gp(irg);
ir_node *val = get_irn_n(store, n_ia32_unary_op);
ir_node *mem = get_irn_n(store, n_ia32_mem);
const arch_register_t *spreg = arch_get_irn_register(curr_sp);
ir_node *const push = new_bd_ia32_Push(get_irn_dbg_info(store), block, noreg, noreg, mem, val, curr_sp, ia32_mode_gp);
ir_node *const store = stores[i];
dbg_info *const dbgi = get_irn_dbg_info(store);
ir_node *const mem = get_irn_n(store, n_ia32_mem);
ir_node *const val = get_irn_n(store, n_ia32_unary_op);
ir_node *const push = new_bd_ia32_Push(dbgi, block, noreg, noreg, mem, val, curr_sp, ia32_mode_gp);
copy_mark(store, push);
if (first_push == NULL)
......@@ -434,8 +432,7 @@ static void peephole_IncSP_Store_to_push(ir_node *irn)
sched_add_after(skip_Proj(curr_sp), push);
/* create stackpointer Proj */
curr_sp = be_new_Proj(push, pn_ia32_Push_stack);
arch_set_irn_register(curr_sp, spreg);
curr_sp = be_new_Proj_reg(push, pn_ia32_Push_stack, &ia32_registers[REG_ESP]);
/* use the memproj now */
be_peephole_exchange(store, push);
......@@ -556,8 +553,7 @@ static void peephole_Load_IncSP_to_pop(ir_node *irn)
inc_ofs = (i + 1) * 4;
/* create a new IncSP if needed */
const arch_register_t *esp = &ia32_registers[REG_ESP];
ir_node *const block = get_nodes_block(irn);
ir_node *const block = get_nodes_block(irn);
if (inc_ofs > 0) {
pred_sp = ia32_new_IncSP(block, pred_sp, -inc_ofs, be_get_IncSP_align(irn));
sched_add_before(irn, pred_sp);
......@@ -575,8 +571,7 @@ static void peephole_Load_IncSP_to_pop(ir_node *irn)
copy_mark(load, pop);
/* create stackpointer Proj */
pred_sp = be_new_Proj(pop, pn_ia32_Pop_stack);
arch_set_irn_register(pred_sp, esp);
pred_sp = be_new_Proj_reg(pop, pn_ia32_Pop_stack, &ia32_registers[REG_ESP]);
sched_add_before(irn, pop);
be_peephole_exchange(load, pop);
......@@ -621,22 +616,16 @@ static ir_node *create_pop(dbg_info *dbgi, ir_node *block,
ir_node *stack, ir_node *schedpoint,
const arch_register_t *reg)
{
const arch_register_t *esp = &ia32_registers[REG_ESP];
ir_graph *irg = get_irn_irg(block);
ir_node *pop = new_bd_ia32_Pop(dbgi, block, get_irg_no_mem(irg), stack);
stack = be_new_Proj(pop, pn_ia32_Pop_stack);
arch_set_irn_register(stack, esp);
ir_node *const val = be_new_Proj(pop, pn_ia32_Pop_res);
arch_set_irn_register(val, reg);
ir_graph *const irg = get_irn_irg(block);
ir_node *const mem = get_irg_no_mem(irg);
ir_node *const pop = new_bd_ia32_Pop(dbgi, block, mem, stack);
sched_add_before(schedpoint, pop);
ir_node *const val = be_new_Proj_reg(pop, pn_ia32_Pop_res, reg);
ir_node *const keep = be_new_Keep_one(val);
sched_add_before(schedpoint, keep);
return stack;
return be_new_Proj_reg(pop, pn_ia32_Pop_stack, &ia32_registers[REG_ESP]);
}
/**
......
......@@ -4338,10 +4338,8 @@ static ir_node *gen_Alloc(ir_node *node)
set_ia32_op_type(new_node, ia32_Normal);
set_ia32_ls_mode(new_node, ia32_mode_gp);
SET_IA32_ORIG_NODE(new_node, node);
arch_set_irn_register_out(new_node, pn_ia32_SubSP_stack,
&ia32_registers[REG_ESP]);
ir_node *const stack_proj = be_new_Proj(new_node, pn_ia32_SubSP_stack);
ir_node *const stack_proj = be_new_Proj_reg(new_node, pn_ia32_SubSP_stack, &ia32_registers[REG_ESP]);
keep_alive(stack_proj);
pmap_insert(node_to_stack, node, stack_proj);
......
......@@ -845,14 +845,13 @@ do_pop:
ia32_copy_am_attrs(vfld, n);
set_ia32_op_type(vfld, ia32_AddrModeS);
ir_node *const rproj = be_new_Proj(vfld, pn_ia32_fld_res);
ir_node *const mproj = be_new_Proj(vfld, pn_ia32_fld_M);
arch_set_irn_register(rproj, arch_get_irn_register(val));
arch_register_t const *const reg = arch_get_irn_register(val);
ir_node *const rproj = be_new_Proj_reg(vfld, pn_ia32_fld_res, reg);
/* Replace TOS by the reloaded value. */
x87_set_st(state, rproj, 0);
/* reroute all former users of the store memory to the load memory */
ir_node *const mproj = be_new_Proj(vfld, pn_ia32_fld_M);
edges_reroute_except(mem, mproj, vfld);
sched_add_after(n, vfld);
......
......@@ -485,17 +485,15 @@ static bool is_restorezeroopt_reg(const arch_register_t *reg)
static void replace_with_restore(ir_node *const restore, ir_node *const node, ir_node *const replaced)
{
sched_add_before(node, restore);
sched_add_before(node, restore);
arch_register_t const *const sp = &sparc_registers[REG_SP];
arch_set_irn_register_out(restore, pn_sparc_Restore_stack, sp);
ir_node *const stack = be_new_Proj(restore, pn_sparc_Restore_stack);
be_peephole_exchange(node, stack);
arch_register_t const *const sp = &sparc_registers[REG_SP];
ir_node *const stack = be_new_Proj_reg(restore, pn_sparc_Restore_stack, sp);
be_peephole_exchange(node, stack);
arch_register_t const *const reg = arch_get_irn_register(replaced);
arch_set_irn_register_out(restore, pn_sparc_Restore_res, reg);
ir_node *const res = be_new_Proj(restore, pn_sparc_Restore_res);
be_peephole_exchange(replaced, res);
arch_register_t const *const reg = arch_get_irn_register(replaced);
ir_node *const res = be_new_Proj_reg(restore, pn_sparc_Restore_res, reg);
be_peephole_exchange(replaced, res);
}
static void replace_with_restore_reg(ir_node *node, ir_node *replaced,
......
......@@ -1949,8 +1949,7 @@ static ir_node *gen_Alloc(ir_node *node)
subsp = new_bd_sparc_SubSP_reg(dbgi, new_block, stack_pred, new_size, new_mem);
}
ir_node *const stack_proj = be_new_Proj(subsp, pn_sparc_SubSP_stack);
arch_set_irn_register(stack_proj, sp_reg);
ir_node *const stack_proj = be_new_Proj_reg(subsp, pn_sparc_SubSP_stack, sp_reg);
/* If we are the last stack producer in a block, we have to keep the
* stack value. This keeps all producers, which is more than necessary. */
keep_alive(stack_proj);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment