Commit 7cbfeeea authored by Christoph Mallon's avatar Christoph Mallon
Browse files

be: Add and use be_allocate_in_reqs().

parent d8fb534d
......@@ -228,8 +228,7 @@ static ir_node *gen_Return(ir_node *node)
unsigned const n_ins = p + n_res;
ir_node **const in = ALLOCAN(ir_node*, n_ins);
ir_graph *const irg = get_irn_irg(node);
struct obstack *const obst = be_get_be_obst(irg);
arch_register_req_t const **const reqs = OALLOCN(obst, arch_register_req_t const*, n_ins);
arch_register_req_t const **const reqs = be_allocate_in_reqs(irg, n_ins);
in[n_TEMPLATE_Return_mem] = be_transform_node(get_Return_mem(node));
reqs[n_TEMPLATE_Return_mem] = arch_no_register_req;
......
......@@ -1517,7 +1517,6 @@ static ir_node *gen_Return(ir_node *node)
ir_node *new_mem = be_transform_node(mem);
ir_node *sp = get_stack_pointer_for(node);
size_t n_res = get_Return_n_ress(node);
struct obstack *be_obst = be_get_be_obst(irg);
x86_cconv_t *cconv = current_cconv;
/* estimate number of return values */
......@@ -1525,8 +1524,7 @@ static ir_node *gen_Return(ir_node *node)
size_t const n_callee_saves = rbitset_popcount(cconv->callee_saves, N_AMD64_REGISTERS);
size_t const n_ins = p + n_res + n_callee_saves;
const arch_register_req_t **reqs
= OALLOCN(be_obst, const arch_register_req_t*, n_ins);
arch_register_req_t const **const reqs = be_allocate_in_reqs(irg, n_ins);
ir_node **in = ALLOCAN(ir_node*, n_ins);
in[n_amd64_ret_mem] = new_mem;
......@@ -1574,15 +1572,12 @@ static ir_node *gen_Call(ir_node *node)
/* max inputs: memory, callee, register arguments */
ir_node **sync_ins = ALLOCAN(ir_node*, n_params+1);
ir_graph *irg = get_irn_irg(node);
struct obstack *obst = be_get_be_obst(irg);
x86_cconv_t *cconv
= amd64_decide_calling_convention(type, NULL);
size_t n_param_regs = cconv->n_param_regs;
/* param-regs + mem + stackpointer + callee(2) + n_sse_regs */
unsigned max_inputs = 5 + n_param_regs;
ir_node **in = ALLOCAN(ir_node*, max_inputs);
const arch_register_req_t **in_req
= OALLOCNZ(obst, const arch_register_req_t*, max_inputs);
int in_arity = 0;
int sync_arity = 0;
ir_node *new_frame = get_stack_pointer_for(node);
......@@ -1607,6 +1602,8 @@ static ir_node *gen_Call(ir_node *node)
ir_node *mem_proj = NULL;
arch_register_req_t const **const in_req = be_allocate_in_reqs(irg, max_inputs);
if (match_immediate_32(&addr.immediate, callee, true, true)) {
op_mode = AMD64_OP_UNOP_IMM32;
} else {
......
......@@ -1795,13 +1795,11 @@ static ir_node *gen_Return(ir_node *node)
ir_node *sp = get_stack_pointer_for(node);
unsigned n_res = get_Return_n_ress(node);
ir_graph *irg = get_irn_irg(node);
struct obstack *obst = be_get_be_obst(irg);
unsigned p = n_arm_Return_first_result;
unsigned const n_ins = p + n_res + n_callee_saves;
const arch_register_req_t **reqs
= OALLOCN(obst, const arch_register_req_t*, n_ins);
arch_register_req_t const **const reqs = be_allocate_in_reqs(irg, n_ins);
ir_node **in = ALLOCAN(ir_node*, n_ins);
in[n_arm_Return_mem] = new_mem;
......@@ -1855,9 +1853,7 @@ static ir_node *gen_Call(ir_node *node)
size_t const max_inputs = 3 + n_param_regs;
ir_node **in = ALLOCAN(ir_node*, max_inputs);
ir_node **sync_ins = ALLOCAN(ir_node*, n_params);
struct obstack *obst = be_get_be_obst(irg);
const arch_register_req_t **in_req
= OALLOCNZ(obst, const arch_register_req_t*, max_inputs);
arch_register_req_t const **const in_req = be_allocate_in_reqs(irg, max_inputs);
size_t in_arity = 0;
size_t sync_arity = 0;
size_t const n_caller_saves = ARRAY_SIZE(caller_saves);
......
......@@ -85,6 +85,12 @@ static int be_incsp_attrs_equal(const ir_node *a, const ir_node *b)
return attr_a->offset == attr_b->offset && attrs_equal_be_node(a, b);
}
arch_register_req_t const **be_allocate_in_reqs(ir_graph *const irg, unsigned const n)
{
struct obstack *const obst = be_get_be_obst(irg);
return OALLOCN(obst, arch_register_req_t const*, n);
}
static arch_register_req_t *allocate_reg_req(ir_graph *const irg)
{
struct obstack *obst = be_get_be_obst(irg);
......@@ -106,19 +112,19 @@ static void be_node_set_register_req_in(ir_node *const node, int const pos,
static void init_node_attr(ir_node *const node, unsigned const n_outputs, arch_irn_flags_t const flags)
{
ir_graph *irg = get_irn_irg(node);
struct obstack *obst = be_get_be_obst(irg);
backend_info_t *info = be_get_info(node);
unsigned const arity = get_irn_arity(node);
arch_register_req_t const **const in_reqs =
is_irn_dynamic(node) ? NEW_ARR_F(arch_register_req_t const*, arity) :
arity != 0 ? OALLOCN(obst, arch_register_req_t const*, arity) :
arity != 0 ? be_allocate_in_reqs(irg, arity) :
NULL;
for (unsigned i = 0; i < arity; ++i) {
in_reqs[i] = arch_no_register_req;
}
info->in_reqs = in_reqs;
struct obstack *const obst = be_get_be_obst(irg);
info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, n_outputs);
for (unsigned i = 0; i < n_outputs; ++i) {
info->out_infos[i].req = arch_no_register_req;
......@@ -423,7 +429,7 @@ ir_node *be_new_Phi(ir_node *block, int n_ins, ir_node **ins, ir_mode *mode,
struct obstack *obst = be_get_be_obst(irg);
backend_info_t *info = be_get_info(phi);
info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, 1);
info->in_reqs = OALLOCN(obst, const arch_register_req_t*, n_ins);
info->in_reqs = be_allocate_in_reqs(irg, n_ins);
info->out_infos[0].req = req;
for (int i = 0; i < n_ins; ++i) {
......@@ -453,8 +459,7 @@ ir_node *be_complete_Phi(ir_node *const phi, unsigned const n_ins, ir_node **con
phi->attr.phi.u.backedge = new_backedge_arr(get_irg_obstack(irg), n_ins);
set_irn_in(phi, n_ins, ins);
struct obstack *const obst = be_get_be_obst(irg);
arch_register_req_t const **const in_reqs = OALLOCN(obst, arch_register_req_t const*, n_ins);
arch_register_req_t const **const in_reqs = be_allocate_in_reqs(irg, n_ins);
arch_register_req_t const *const req = arch_get_irn_register_req(phi);
for (unsigned i = 0; i < n_ins; ++i) {
in_reqs[i] = req;
......@@ -581,7 +586,7 @@ static void copy_attr(ir_graph *irg, const ir_node *old_node, ir_node *new_node)
if (is_irn_dynamic(old_node)) {
new_info->in_reqs = NEW_ARR_F(const arch_register_req_t*, n_ins);
} else {
new_info->in_reqs = OALLOCN(obst,const arch_register_req_t*, n_ins);
new_info->in_reqs = be_allocate_in_reqs(irg, n_ins);
}
MEMCPY(new_info->in_reqs, old_info->in_reqs, n_ins);
} else {
......
......@@ -159,6 +159,8 @@ unsigned be_get_MemPerm_entity_arity(const ir_node *irn);
*/
ir_node *be_new_AnyVal(ir_node *block, const arch_register_class_t *cls);
arch_register_req_t const **be_allocate_in_reqs(ir_graph *irg, unsigned n);
const arch_register_req_t *be_create_reg_req(struct obstack *obst,
const arch_register_t *reg, arch_register_req_type_t additional_types);
......
......@@ -68,8 +68,7 @@ ir_node *be_transform_phi(ir_node *node, const arch_register_req_t *req)
copy_node_attr(irg, node, phi);
backend_info_t *info = be_get_info(phi);
struct obstack *obst = be_get_be_obst(irg);
info->in_reqs = OALLOCN(obst, const arch_register_req_t*, arity);
info->in_reqs = be_allocate_in_reqs(irg, arity);
for (int i = 0; i < arity; ++i) {
info->in_reqs[i] = req;
}
......
......@@ -4255,7 +4255,6 @@ static ir_node *gen_Return(ir_node *node)
ir_node *new_mem = be_transform_node(mem);
ir_node *sp = get_stack_pointer_for(node);
unsigned n_res = get_Return_n_ress(node);
struct obstack *obst = be_get_be_obst(irg);
x86_cconv_t *cconv = current_cconv;
/* estimate number of return values */
......@@ -4263,8 +4262,7 @@ static ir_node *gen_Return(ir_node *node)
unsigned const n_callee_saves = rbitset_popcount(cconv->callee_saves, N_IA32_REGISTERS);
unsigned const n_ins = p + n_res + n_callee_saves;
const arch_register_req_t **reqs
= OALLOCN(obst, const arch_register_req_t*, n_ins);
arch_register_req_t const **const reqs = be_allocate_in_reqs(irg, n_ins);
ir_node **in = ALLOCAN(ir_node*, n_ins);
in[n_ia32_Return_mem] = new_mem;
......@@ -4915,11 +4913,10 @@ static ir_node *gen_Call(ir_node *node)
ir_type *const type = get_Call_type(node);
x86_cconv_t *const cconv = ia32_decide_calling_convention(type, NULL);
ir_graph *const irg = get_irn_irg(node);
struct obstack *const obst = be_get_be_obst(irg);
unsigned in_arity = n_ia32_Call_first_argument;
unsigned const n_ins = in_arity + cconv->n_param_regs;
ir_node **const in = ALLOCAN(ir_node*, n_ins);
arch_register_req_t const **const in_req = OALLOCNZ(obst, arch_register_req_t const*, n_ins);
arch_register_req_t const **const in_req = be_allocate_in_reqs(irg, n_ins);
in[n_ia32_Call_base] = am.addr.base;
in_req[n_ia32_Call_base] = req_gp;
......
......@@ -1679,7 +1679,6 @@ static ir_node *gen_Return(ir_node *node)
ir_node *new_mem = be_transform_node(mem);
ir_node *sp = get_stack_pointer_for(node);
size_t n_res = get_Return_n_ress(node);
struct obstack *be_obst = be_get_be_obst(irg);
/* estimate number of return values */
unsigned p = n_sparc_Return_first_result;
......@@ -1687,8 +1686,7 @@ static ir_node *gen_Return(ir_node *node)
if (current_cconv->omit_fp)
n_ins += ARRAY_SIZE(omit_fp_callee_saves);
const arch_register_req_t **reqs
= OALLOCN(be_obst, const arch_register_req_t*, n_ins);
arch_register_req_t const **const reqs = be_allocate_in_reqs(irg, n_ins);
ir_node **in = ALLOCAN(ir_node*, n_ins);
in[n_sparc_Return_mem] = new_mem;
......@@ -1823,15 +1821,13 @@ static ir_node *gen_Call(ir_node *node)
size_t n_ress = get_method_n_ress(type);
/* max inputs: memory, callee, register arguments */
ir_node **sync_ins = ALLOCAN(ir_node*, n_params);
struct obstack *obst = be_get_be_obst(irg);
calling_convention_t *cconv
= sparc_decide_calling_convention(type, NULL);
size_t n_param_regs = cconv->n_param_regs;
/* param-regs + mem + stackpointer + callee */
unsigned max_inputs = 3 + n_param_regs;
ir_node **in = ALLOCAN(ir_node*, max_inputs);
const arch_register_req_t **in_req
= OALLOCNZ(obst, const arch_register_req_t*, max_inputs);
arch_register_req_t const **const in_req = be_allocate_in_reqs(irg, max_inputs);
int in_arity = 0;
int sync_arity = 0;
int n_caller_saves
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment