Commit eae40498 authored by Matthias Braun's avatar Matthias Braun
Browse files

amd64: Always initialize amd64_addr_t

We previously left the amd64_addr_t attribute uninitialized for OP_REG,
OP_REG_REG and OP_REG_IMM. Make the code more consistent by always
initializing it to the newly introduced X86_ADDR_REG variant, as a bonus
we can also use base_input member to indicate the register input instead
of hardcoding it to 0.
parent cf5da50e
...@@ -333,13 +333,13 @@ static void amd64_emit_am(const ir_node *const node, bool indirect_star) ...@@ -333,13 +333,13 @@ static void amd64_emit_am(const ir_node *const node, bool indirect_star)
= (const amd64_binop_addr_attr_t*)attr; = (const amd64_binop_addr_attr_t*)attr;
amd64_emit_immediate32(true, &binop_attr->u.immediate); amd64_emit_immediate32(true, &binop_attr->u.immediate);
be_emit_cstring(", "); be_emit_cstring(", ");
goto emit_reg_in0; goto emit_addr_reg;
} }
case AMD64_OP_REG_REG: { case AMD64_OP_REG_REG: {
const arch_register_t *reg1 = arch_get_irn_register_in(node, 1); const arch_register_t *reg1 = arch_get_irn_register_in(node, 1);
emit_register_mode(reg1, attr->size); emit_register_mode(reg1, attr->size);
be_emit_cstring(", "); be_emit_cstring(", ");
goto emit_reg_in0; goto emit_addr_reg;
} }
case AMD64_OP_REG_ADDR: { case AMD64_OP_REG_ADDR: {
const amd64_binop_addr_attr_t *const binop_attr const amd64_binop_addr_attr_t *const binop_attr
...@@ -375,8 +375,10 @@ emit_addr: ...@@ -375,8 +375,10 @@ emit_addr:
case AMD64_OP_REG: { case AMD64_OP_REG: {
if (indirect_star) if (indirect_star)
be_emit_char('*'); be_emit_char('*');
emit_reg_in0:; emit_addr_reg:
const arch_register_t *reg = arch_get_irn_register_in(node, 0); assert(attr->addr.variant == X86_ADDR_REG);
arch_register_t const *const reg
= arch_get_irn_register_in(node, attr->addr.base_input);
emit_register_mode(reg, attr->size); emit_register_mode(reg, attr->size);
return; return;
} }
......
...@@ -83,10 +83,12 @@ static void transform_sub_to_neg_add(ir_node *node, ...@@ -83,10 +83,12 @@ static void transform_sub_to_neg_add(ir_node *node,
tv = tarval_shl_unsigned(tv, bits - 1); tv = tarval_shl_unsigned(tv, bits - 1);
ir_entity *sign_bit_const = create_float_const_entity(tv); ir_entity *sign_bit_const = create_float_const_entity(tv);
amd64_binop_addr_attr_t xor_attr; amd64_binop_addr_attr_t xor_attr = {
memset(&xor_attr, 0, sizeof(xor_attr)); .base = {
xor_attr.base.size = INSN_SIZE_64; .base.op_mode = AMD64_OP_REG_ADDR,
xor_attr.base.base.op_mode = AMD64_OP_REG_ADDR; .size = INSN_SIZE_64,
},
};
init_lconst_addr(&xor_attr.base.addr, sign_bit_const); init_lconst_addr(&xor_attr.base.addr, sign_bit_const);
ir_node *xor_in[] = { in2 }; ir_node *xor_in[] = { in2 };
...@@ -150,9 +152,10 @@ static void amd64_turn_back_am(ir_node *const node, arch_register_t const *const ...@@ -150,9 +152,10 @@ static void amd64_turn_back_am(ir_node *const node, arch_register_t const *const
new_in[1] = load_res; new_in[1] = load_res;
set_irn_in(node, ARRAY_SIZE(new_in), new_in); set_irn_in(node, ARRAY_SIZE(new_in), new_in);
attr->base.op_mode = AMD64_OP_REG_REG; attr->base.op_mode = AMD64_OP_REG_REG;
#ifndef NDEBUG attr->addr = (amd64_addr_t) {
memset(&attr->addr, 0, sizeof(attr->addr)); .base_input = 0,
#endif .variant = X86_ADDR_REG,
};
/* rewire mem-proj */ /* rewire mem-proj */
foreach_out_edge(node, edge) { foreach_out_edge(node, edge) {
......
...@@ -132,7 +132,7 @@ my $divop = { ...@@ -132,7 +132,7 @@ my $divop = {
out_reqs => [ "rax", "flags", "mem", "rdx" ], out_reqs => [ "rax", "flags", "mem", "rdx" ],
outs => [ "res_div", "flags", "M", "res_mod" ], outs => [ "res_div", "flags", "M", "res_mod" ],
attr_type => "amd64_addr_attr_t", attr_type => "amd64_addr_attr_t",
fixed => "amd64_addr_t addr = { { .offset = 0 }, .variant = X86_ADDR_JUST_IMM };\n" fixed => "amd64_addr_t addr = { .base_input = 0, .variant = X86_ADDR_REG };\n"
."amd64_op_mode_t op_mode = AMD64_OP_REG;\n", ."amd64_op_mode_t op_mode = AMD64_OP_REG;\n",
attr => "amd64_insn_size_t size", attr => "amd64_insn_size_t size",
}; };
...@@ -169,7 +169,7 @@ my $unop = { ...@@ -169,7 +169,7 @@ my $unop = {
attr_type => "amd64_addr_attr_t", attr_type => "amd64_addr_attr_t",
attr => "amd64_insn_size_t size", attr => "amd64_insn_size_t size",
fixed => "amd64_op_mode_t op_mode = AMD64_OP_REG;\n" fixed => "amd64_op_mode_t op_mode = AMD64_OP_REG;\n"
."amd64_addr_t addr = { { .offset = 0 }, .variant = X86_ADDR_JUST_IMM };", ."amd64_addr_t addr = { .base_input = 0, .variant = X86_ADDR_REG };",
}; };
my $unop_out = { my $unop_out = {
......
...@@ -737,7 +737,8 @@ static void match_binop(amd64_args_t *args, ir_node *block, ...@@ -737,7 +737,8 @@ static void match_binop(amd64_args_t *args, ir_node *block,
bool use_immediate = flags & match_immediate; bool use_immediate = flags & match_immediate;
bool mode_neutral = flags & match_mode_neutral; bool mode_neutral = flags & match_mode_neutral;
args->attr.base.size = get_insn_size_from_mode(mode); amd64_binop_addr_attr_t *const attr = &args->attr;
attr->base.size = get_insn_size_from_mode(mode);
/* TODO: legalize phase */ /* TODO: legalize phase */
if (mode_neutral) { if (mode_neutral) {
...@@ -753,19 +754,21 @@ static void match_binop(amd64_args_t *args, ir_node *block, ...@@ -753,19 +754,21 @@ static void match_binop(amd64_args_t *args, ir_node *block,
bool use_am bool use_am
= use_address_matching(mode, flags, block, op1, op2, &load, &op); = use_address_matching(mode, flags, block, op1, op2, &load, &op);
amd64_addr_t *addr = &attr->base.addr;
if (use_immediate if (use_immediate
&& match_immediate_32(&args->attr.u.immediate, op2, false, mode_neutral)) { && match_immediate_32(&attr->u.immediate, op2, false, mode_neutral)) {
assert(!use_xmm && "Can't (yet) match binop with xmm immediate"); assert(!use_xmm && "Can't (yet) match binop with xmm immediate");
/* fine, we found an immediate */ /* fine, we found an immediate */
args->attr.base.base.op_mode = AMD64_OP_REG_IMM; int const reg_input = args->arity++;
args->in[args->arity++] = be_transform_node(op1); args->in[reg_input] = be_transform_node(op1);
args->reqs = reg_reqs; addr->variant = X86_ADDR_REG;
addr->base_input = reg_input;
attr->base.base.op_mode = AMD64_OP_REG_IMM;
args->reqs = reg_reqs;
} else if (use_am) { } else if (use_am) {
ir_node *new_op = be_transform_node(op); int const reg_input = args->arity++;
int reg_input = args->arity++; attr->u.reg_input = reg_input;
args->attr.u.reg_input = reg_input; args->in[reg_input] = be_transform_node(op);
args->in[reg_input] = new_op;
amd64_addr_t *addr = &args->attr.base.addr;
ir_node *ptr = get_Load_ptr(load); ir_node *ptr = get_Load_ptr(load);
perform_address_matching(ptr, &(args->arity), args->in, addr); perform_address_matching(ptr, &(args->arity), args->in, addr);
...@@ -778,12 +781,17 @@ static void match_binop(amd64_args_t *args, ir_node *block, ...@@ -778,12 +781,17 @@ static void match_binop(amd64_args_t *args, ir_node *block,
addr->mem_input = mem_input; addr->mem_input = mem_input;
args->mem_proj = get_Proj_for_pn(load, pn_Load_M); args->mem_proj = get_Proj_for_pn(load, pn_Load_M);
args->attr.base.base.op_mode = AMD64_OP_REG_ADDR; attr->base.base.op_mode = AMD64_OP_REG_ADDR;
} else { } else {
/* simply transform the arguments */ /* simply transform the arguments */
args->in[args->arity++] = be_transform_node(op1); int const reg_input0 = args->arity++;
args->in[args->arity++] = be_transform_node(op2); int const reg_input1 = args->arity++;
args->attr.base.base.op_mode = AMD64_OP_REG_REG; args->in[reg_input0] = be_transform_node(op1);
args->in[reg_input1] = be_transform_node(op2);
addr->variant = X86_ADDR_REG;
addr->base_input = reg_input0;
attr->u.reg_input = reg_input1;
attr->base.base.op_mode = AMD64_OP_REG_REG;
args->reqs = use_xmm ? amd64_xmm_xmm_reqs : amd64_reg_reg_reqs; args->reqs = use_xmm ? amd64_xmm_xmm_reqs : amd64_reg_reg_reqs;
} }
...@@ -814,20 +822,20 @@ static ir_node *gen_binop_am(ir_node *node, ir_node *op1, ir_node *op2, ...@@ -814,20 +822,20 @@ static ir_node *gen_binop_am(ir_node *node, ir_node *op1, ir_node *op2,
return be_new_Proj(new_node, pn_res); return be_new_Proj(new_node, pn_res);
} }
static ir_node *gen_binop_rax(ir_node *node, ir_node *op1, ir_node *op2, static ir_node *gen_binop_rax(ir_node *node, ir_node *op0, ir_node *op1,
construct_rax_binop_func make_node, construct_rax_binop_func make_node,
match_flags_t flags) match_flags_t flags)
{ {
bool mode_neutral = flags & match_mode_neutral; bool mode_neutral = flags & match_mode_neutral;
assert(! (flags & match_immediate)); assert(! (flags & match_immediate));
ir_mode *mode = get_irn_mode(op1); ir_mode *mode = get_irn_mode(op0);
amd64_insn_size_t size = get_insn_size_from_mode(mode); amd64_insn_size_t size = get_insn_size_from_mode(mode);
/* TODO: legalize phase */ /* TODO: legalize phase */
if (mode_neutral) { if (mode_neutral) {
op0 = be_skip_downconv(op0, true);
op1 = be_skip_downconv(op1, true); op1 = be_skip_downconv(op1, true);
op2 = be_skip_downconv(op2, true);
} else { } else {
/* TODO: extend inputs? */ /* TODO: extend inputs? */
(void)needs_extension; (void)needs_extension;
...@@ -837,7 +845,7 @@ static ir_node *gen_binop_rax(ir_node *node, ir_node *op1, ir_node *op2, ...@@ -837,7 +845,7 @@ static ir_node *gen_binop_rax(ir_node *node, ir_node *op1, ir_node *op2,
ir_node *load; ir_node *load;
ir_node *op; ir_node *op;
bool use_am bool use_am
= use_address_matching(mode, flags, block, op1, op2, &load, &op); = use_address_matching(mode, flags, block, op0, op1, &load, &op);
ir_node *in[4]; ir_node *in[4];
int arity = 0; int arity = 0;
...@@ -866,10 +874,14 @@ static ir_node *gen_binop_rax(ir_node *node, ir_node *op1, ir_node *op2, ...@@ -866,10 +874,14 @@ static ir_node *gen_binop_rax(ir_node *node, ir_node *op1, ir_node *op2,
op_mode = AMD64_OP_ADDR; op_mode = AMD64_OP_ADDR;
} else { } else {
/* simply transform the arguments */ /* simply transform the arguments */
in[arity++] = be_transform_node(op1); int const input0 = arity++;
in[arity++] = be_transform_node(op2); int const input1 = arity++;
reqs = reg_rax_reqs; in[input0] = be_transform_node(op0);
op_mode = AMD64_OP_REG; in[input1] = be_transform_node(op1);
reqs = reg_rax_reqs;
op_mode = AMD64_OP_REG;
addr.variant = X86_ADDR_REG;
addr.base_input = input0;
} }
assert((size_t)arity <= ARRAY_SIZE(in)); assert((size_t)arity <= ARRAY_SIZE(in));
...@@ -890,19 +902,20 @@ static ir_node *gen_binop_xmm(ir_node *node, ir_node *op0, ir_node *op1, ...@@ -890,19 +902,20 @@ static ir_node *gen_binop_xmm(ir_node *node, ir_node *op0, ir_node *op1,
ir_mode *mode = get_irn_mode(op0); ir_mode *mode = get_irn_mode(op0);
amd64_args_t args; amd64_args_t args;
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));
args.attr.base.size = INSN_SIZE_64; amd64_binop_addr_attr_t *const attr = &args.attr;
attr->base.size = INSN_SIZE_64;
ir_node *load; ir_node *load;
ir_node *op; ir_node *op;
bool use_am = use_address_matching(mode, flags, block, op0, op1, &load, bool use_am = use_address_matching(mode, flags, block, op0, op1, &load,
&op); &op);
amd64_addr_t *addr = &attr->base.addr;
if (use_am) { if (use_am) {
int reg_input = args.arity++; int reg_input = args.arity++;
args.attr.u.reg_input = reg_input; attr->u.reg_input = reg_input;
args.in[reg_input] = be_transform_node(op); args.in[reg_input] = be_transform_node(op);
amd64_addr_t *addr = &args.attr.base.addr;
ir_node *ptr = get_Load_ptr(load); ir_node *ptr = get_Load_ptr(load);
perform_address_matching(ptr, &args.arity, args.in, addr); perform_address_matching(ptr, &args.arity, args.in, addr);
...@@ -914,12 +927,17 @@ static ir_node *gen_binop_xmm(ir_node *node, ir_node *op0, ir_node *op1, ...@@ -914,12 +927,17 @@ static ir_node *gen_binop_xmm(ir_node *node, ir_node *op0, ir_node *op1,
addr->mem_input = mem_input; addr->mem_input = mem_input;
args.mem_proj = get_Proj_for_pn(load, pn_Load_M); args.mem_proj = get_Proj_for_pn(load, pn_Load_M);
args.attr.base.base.op_mode = AMD64_OP_REG_ADDR; attr->base.base.op_mode = AMD64_OP_REG_ADDR;
} else { } else {
args.in[args.arity++] = be_transform_node(op0); int const input0 = args.arity++;
args.in[args.arity++] = be_transform_node(op1); int const input1 = args.arity++;
args.attr.base.base.op_mode = AMD64_OP_REG_REG; args.in[input0] = be_transform_node(op0);
args.reqs = amd64_xmm_xmm_reqs; args.in[input1] = be_transform_node(op1);
addr->base_input = input0;
addr->variant = X86_ADDR_REG;
attr->u.reg_input = input1;
attr->base.base.op_mode = AMD64_OP_REG_REG;
args.reqs = amd64_xmm_xmm_reqs;
} }
dbg_info *const dbgi = get_irn_dbg_info(node); dbg_info *const dbgi = get_irn_dbg_info(node);
...@@ -1396,8 +1414,11 @@ static ir_node *gen_unop_out(ir_node *const node, int op_pos, ...@@ -1396,8 +1414,11 @@ static ir_node *gen_unop_out(ir_node *const node, int op_pos,
fix_node_mem_proj(new_node, mem_proj); fix_node_mem_proj(new_node, mem_proj);
} else { } else {
amd64_addr_t addr = { .immediate = { .entity = NULL } }; amd64_addr_t addr = {
ir_node *in[] = { be_transform_node(op) }; .base_input = 0,
.variant = X86_ADDR_REG,
};
ir_node *in[] = { be_transform_node(op) };
new_node = gen(dbgi, new_block, ARRAY_SIZE(in), in, reg_reqs, size, AMD64_OP_REG, addr); new_node = gen(dbgi, new_block, ARRAY_SIZE(in), in, reg_reqs, size, AMD64_OP_REG, addr);
} }
...@@ -1416,11 +1437,16 @@ static ir_node *gen_float_neg(ir_node *const node) ...@@ -1416,11 +1437,16 @@ static ir_node *gen_float_neg(ir_node *const node)
ir_node *const load = create_float_const(dbgi, new_block, tv); ir_node *const load = create_float_const(dbgi, new_block, tv);
ir_node *const in[] = { new_op, load }; ir_node *const in[] = { new_op, load };
amd64_binop_addr_attr_t attr; amd64_binop_addr_attr_t attr = {
memset(&attr, 0, sizeof(attr)); .base = {
attr.base.base.op_mode = AMD64_OP_REG_REG; .base.op_mode = AMD64_OP_REG_REG,
attr.base.size = get_insn_size_from_mode(mode); .addr = {
.base_input = 0,
.variant = X86_ADDR_REG,
},
.size = get_insn_size_from_mode(mode),
},
};
ir_node *const xor ir_node *const xor
= new_bd_amd64_xorp(dbgi, new_block, ARRAY_SIZE(in), in, = new_bd_amd64_xorp(dbgi, new_block, ARRAY_SIZE(in), in,
amd64_xmm_xmm_reqs, &attr); amd64_xmm_xmm_reqs, &attr);
...@@ -1517,10 +1543,12 @@ static ir_node *gen_IJmp(ir_node *const node) ...@@ -1517,10 +1543,12 @@ static ir_node *gen_IJmp(ir_node *const node)
op_mode = AMD64_OP_ADDR; op_mode = AMD64_OP_ADDR;
} else { } else {
assert(arity == 0); // AMD64_OP_REG always outputs the first input op_mode = AMD64_OP_REG;
op_mode = AMD64_OP_REG; int const input = arity++;
in[arity++] = be_transform_node(op); in[input] = be_transform_node(op);
reqs = reg_reqs; reqs = reg_reqs;
addr.variant = X86_ADDR_REG;
addr.base_input = input;
} }
} }
...@@ -1584,10 +1612,14 @@ static ir_node *gen_Switch(ir_node *const node) ...@@ -1584,10 +1612,14 @@ static ir_node *gen_Switch(ir_node *const node)
ir_node *const add = create_add_lea(dbgi, new_block, INSN_SIZE_64, ir_node *const add = create_add_lea(dbgi, new_block, INSN_SIZE_64,
base, load_res); base, load_res);
memset(&addr, 0, sizeof(addr)); int const input = arity++;
op_mode = AMD64_OP_REG; addr = (amd64_addr_t) {
in[arity++] = add; .base_input = input,
in_reqs = reg_reqs; .variant = X86_ADDR_REG,
};
op_mode = AMD64_OP_REG;
in[input] = add;
in_reqs = reg_reqs;
} else { } else {
int index_in = arity++; int index_in = arity++;
in[index_in] = new_sel; in[index_in] = new_sel;
...@@ -1813,9 +1845,11 @@ static ir_node *gen_Call(ir_node *const node) ...@@ -1813,9 +1845,11 @@ static ir_node *gen_Call(ir_node *const node)
if (mem == load_mem || (is_Proj(mem) && get_Proj_pred(mem) == load)) if (mem == load_mem || (is_Proj(mem) && get_Proj_pred(mem) == load))
goto no_call_mem; goto no_call_mem;
} else { } else {
int input = in_arity++; int const input = in_arity++;
assert(input == 0); /* AMD64_OP_REG is currently hardcoded to always addr = (amd64_addr_t) {
* output the register of the first input. */ .base_input = input,
.variant = X86_ADDR_REG,
};
in[input] = be_transform_node(callee); in[input] = be_transform_node(callee);
in_req[input] = &amd64_class_reg_req_gp; in_req[input] = &amd64_class_reg_req_gp;
op_mode = AMD64_OP_REG; op_mode = AMD64_OP_REG;
...@@ -2104,8 +2138,6 @@ static ir_node *match_mov(dbg_info *dbgi, ir_node *block, ir_node *value, ...@@ -2104,8 +2138,6 @@ static ir_node *match_mov(dbg_info *dbgi, ir_node *block, ir_node *value,
amd64_insn_size_t size, create_mov_func create_mov, amd64_insn_size_t size, create_mov_func create_mov,
unsigned pn_res) unsigned pn_res)
{ {
amd64_addr_t addr;
memset(&addr, 0, sizeof(addr));
int arity = 0; int arity = 0;
ir_node *in[4]; ir_node *in[4];
ir_mode *mode = get_irn_mode(value); ir_mode *mode = get_irn_mode(value);
...@@ -2115,6 +2147,7 @@ static ir_node *match_mov(dbg_info *dbgi, ir_node *block, ir_node *value, ...@@ -2115,6 +2147,7 @@ static ir_node *match_mov(dbg_info *dbgi, ir_node *block, ir_node *value,
value, &load, &op); value, &load, &op);
amd64_op_mode_t op_mode; amd64_op_mode_t op_mode;
amd64_addr_t addr;
const arch_register_req_t **reqs; const arch_register_req_t **reqs;
ir_node *mem_proj = NULL; ir_node *mem_proj = NULL;
if (use_am) { if (use_am) {
...@@ -2132,10 +2165,15 @@ static ir_node *match_mov(dbg_info *dbgi, ir_node *block, ir_node *value, ...@@ -2132,10 +2165,15 @@ static ir_node *match_mov(dbg_info *dbgi, ir_node *block, ir_node *value,
op_mode = AMD64_OP_ADDR; op_mode = AMD64_OP_ADDR;
} else { } else {
ir_node *new_value = be_transform_node(value); ir_node *new_value = be_transform_node(value);
in[arity++] = new_value; int const input = arity++;
reqs = get_irn_mode(new_value) == amd64_mode_xmm ? amd64_xmm_reqs addr = (amd64_addr_t) {
: reg_reqs; .base_input = input,
op_mode = AMD64_OP_REG; .variant = X86_ADDR_REG,
};
in[input] = new_value;
reqs = get_irn_mode(new_value) == amd64_mode_xmm ? amd64_xmm_reqs
: reg_reqs;
op_mode = AMD64_OP_REG;
} }
assert((size_t)arity <= ARRAY_SIZE(in)); assert((size_t)arity <= ARRAY_SIZE(in));
...@@ -2353,7 +2391,10 @@ static ir_node *gen_Conv(ir_node *const node) ...@@ -2353,7 +2391,10 @@ static ir_node *gen_Conv(ir_node *const node)
// has done that already. // has done that already.
ir_node *const in[] = { op_ext }; ir_node *const in[] = { op_ext };
unsigned const n_in = ARRAY_SIZE(in); unsigned const n_in = ARRAY_SIZE(in);
amd64_addr_t const addr = { .variant = X86_ADDR_JUST_IMM }; amd64_addr_t const addr = {
.base_input = 0,
.variant = X86_ADDR_REG,
};
ir_node *const movq = new_bd_amd64_movq(dbgi, block, n_in, in, ir_node *const movq = new_bd_amd64_movq(dbgi, block, n_in, in,
reg_reqs, AMD64_OP_REG, reg_reqs, AMD64_OP_REG,
addr); addr);
...@@ -2397,9 +2438,6 @@ static ir_node *gen_Conv(ir_node *const node) ...@@ -2397,9 +2438,6 @@ static ir_node *gen_Conv(ir_node *const node)
if (is_gp && be_upper_bits_clean(op, min_mode)) if (is_gp && be_upper_bits_clean(op, min_mode))
return be_transform_node(op); return be_transform_node(op);
amd64_addr_t addr;
memset(&addr, 0, sizeof(addr));
amd64_insn_size_t size; amd64_insn_size_t size;
if (!is_gp && get_mode_size_bits(min_mode) < 32) { if (!is_gp && get_mode_size_bits(min_mode) < 32) {
/* Only 32-bit and 64-bit register size allowed for /* Only 32-bit and 64-bit register size allowed for
...@@ -2433,6 +2471,10 @@ static ir_node *gen_Conv(ir_node *const node) ...@@ -2433,6 +2471,10 @@ static ir_node *gen_Conv(ir_node *const node)
ir_node *const new_op = be_transform_node(op); ir_node *const new_op = be_transform_node(op);
ir_node * in[] = { new_op }; ir_node * in[] = { new_op };
unsigned const n_in = ARRAY_SIZE(in); unsigned const n_in = ARRAY_SIZE(in);
amd64_addr_t addr = {
.base_input = 0,
.variant = X86_ADDR_REG,
};
ir_node *conv; ir_node *conv;
unsigned pn_res; unsigned pn_res;
...@@ -2770,16 +2812,23 @@ static ir_node *gen_Alloc(ir_node *const node) ...@@ -2770,16 +2812,23 @@ static ir_node *gen_Alloc(ir_node *const node)
ir_node *const new_mem = be_transform_node(mem); ir_node *const new_mem = be_transform_node(mem);
const arch_register_req_t **reqs; const arch_register_req_t **reqs;
amd64_binop_addr_attr_t attr;
memset(&attr, 0, sizeof(attr));
attr.base.size = INSN_SIZE_64;
ir_node *in[3]; ir_node *in[3];
unsigned arity = 0; unsigned arity = 0;
ir_graph *const irg = get_irn_irg(node); ir_graph *const irg = get_irn_irg(node);
in[arity++] = get_initial_sp(irg); int const input0 = arity++;
in[input0] = get_initial_sp(irg);
amd64_binop_addr_attr_t attr = {
.base = {
.addr = {
.base_input = input0,
.variant = X86_ADDR_REG,
},
.size = INSN_SIZE_64,
},
};
if (is_Const(size)) { if (is_Const(size)) {
ir_tarval *tv = get_Const_tarval(size); ir_tarval *tv = get_Const_tarval(size);
long sizel = get_tarval_long(tv); long sizel = get_tarval_long(tv);
...@@ -2788,8 +2837,10 @@ static ir_node *gen_Alloc(ir_node *const node) ...@@ -2788,8 +2837,10 @@ static ir_node *gen_Alloc(ir_node *const node)
attr.u.immediate.offset = sizel; attr.u.immediate.offset = sizel;
reqs = rsp_mem_reqs; reqs = rsp_mem_reqs;
} else { } else {
int const input1 = arity++;
in[input1] = be_transform_node(size);
attr.base.base.op_mode = AMD64_OP_REG_REG; attr.base.base.op_mode = AMD64_OP_REG_REG;
in[arity++] = be_transform_node(size); attr.u.reg_input = input1;
reqs = rsp_reg_mem_reqs; reqs = rsp_reg_mem_reqs;
} }
in[arity++] = new_mem; in[arity++] = new_mem;
...@@ -2833,9 +2884,8 @@ static ir_node *gen_clz(ir_node *const node) ...@@ -2833,9 +2884,8 @@ static ir_node *gen_clz(ir_node *const node)
}, },
.size = size, .size = size,
.addr = { .addr = {
.immediate = { .base_input = 0,
.entity = NULL, .variant = X86_ADDR_REG,
},
}, },
}, },
.u = { .u = {
...@@ -2872,9 +2922,8 @@ static ir_node *gen_ffs(ir_node *const node) ...@@ -2872,9 +2922,8 @@ static ir_node *gen_ffs(ir_node *const node)
/* movzbl temp, temp */ /* movzbl temp, temp */
ir_node *const movzbl_in[] = { setcc }; ir_node *const movzbl_in[] = { setcc };
amd64_addr_t movzbl_addr = {