Commit 3d2bbc65 authored by Matthias Braun's avatar Matthias Braun
Browse files

ia32, amd64: Share address mode emit code.

This meant initializing the address mode variant all over the ia32
backend, on the other hand this will be useful if we ever switch the
ia32 backend to a style without NoReg nodes (like amd64).
parent d6445fe0
......@@ -224,52 +224,6 @@ static void amd64_emit_immediate64(const amd64_imm64_t *const imm)
be_emit_irprintf("%+" PRId64, imm->offset);
}
static void amd64_emit_addr(const ir_node *const node,
const x86_addr_t *const addr)
{
int32_t const offset = addr->immediate.offset;
ir_entity *const entity = addr->immediate.entity;
x86_addr_variant_t const variant = addr->variant;
assert(variant != X86_ADDR_INVALID);
if (entity != NULL) {
assert(addr->immediate.kind != X86_IMM_VALUE);
assert(!is_frame_type(get_entity_owner(entity)));
x86_emit_relocation_no_offset(addr->immediate.kind, entity);
if (offset != 0)
be_emit_irprintf("%+" PRId32, offset);
} else if (offset != 0 || variant == X86_ADDR_JUST_IMM) {
assert(addr->immediate.kind == X86_IMM_VALUE);
be_emit_irprintf("%" PRId32, offset);
}
if (variant != X86_ADDR_JUST_IMM) {
be_emit_char('(');
if (variant == X86_ADDR_RIP) {
be_emit_cstring("%rip");
} else {
if (x86_addr_variant_has_base(variant)) {
arch_register_t const *const reg
= arch_get_irn_register_in(node, addr->base_input);
emit_register(reg);
}
if (x86_addr_variant_has_index(variant)) {
be_emit_char(',');
arch_register_t const *const reg
= arch_get_irn_register_in(node, addr->index_input);
emit_register(reg);
unsigned scale = addr->log_scale;
if (scale > 0)
be_emit_irprintf(",%u", 1 << scale);
}
}
be_emit_char(')');
}
}
static void amd64_emit_am(const ir_node *const node, bool indirect_star)
{
const amd64_addr_attr_t *const attr = get_amd64_addr_attr_const(node);
......@@ -292,7 +246,7 @@ static void amd64_emit_am(const ir_node *const node, bool indirect_star)
case AMD64_OP_REG_ADDR: {
const amd64_binop_addr_attr_t *const binop_attr
= (const amd64_binop_addr_attr_t*)attr;
amd64_emit_addr(node, &attr->addr);
x86_emit_addr(node, &attr->addr);
be_emit_cstring(", ");
const arch_register_t *reg
= arch_get_irn_register_in(node, binop_attr->u.reg_input);
......@@ -318,7 +272,7 @@ static void amd64_emit_am(const ir_node *const node, bool indirect_star)
emit_register_mode(reg, binop_attr->base.size);
be_emit_cstring(", ");
emit_addr:
amd64_emit_addr(node, &attr->addr);
x86_emit_addr(node, &attr->addr);
return;
}
case AMD64_OP_REG: {
......@@ -436,7 +390,7 @@ end_of_mods:
default: {
amd64_addr_attr_t const *const attr
= get_amd64_addr_attr_const(node);
amd64_emit_addr(node, &attr->addr);
x86_emit_addr(node, &attr->addr);
--fmt;
}
}
......
......@@ -372,6 +372,8 @@ static void ia32_perform_memory_operand(ir_node *irn, unsigned int i)
set_irn_n(irn, n_ia32_base, get_irg_frame(get_irn_irg(irn)));
set_irn_n(irn, n_ia32_mem, spill);
set_irn_n(irn, i, ia32_get_admissible_noreg(irn, i));
ia32_attr_t *const attr = get_ia32_attr(irn);
attr->addr.variant = X86_ADDR_BASE;
set_ia32_is_reload(irn);
/* kill the reload */
......@@ -676,6 +678,8 @@ static ir_node *ia32_new_spill(ir_node *value, ir_node *after)
: new_bd_ia32_Store (NULL, block, frame, noreg, nomem, value);
res = be_new_Proj(store, pn_ia32_Store_M);
}
ia32_attr_t *const attr = get_ia32_attr(store);
attr->addr.variant = X86_ADDR_BASE;
set_ia32_op_type(store, ia32_AddrModeD);
set_ia32_ls_mode(store, mode);
set_ia32_frame_use(store, IA32_FRAME_USE_AUTO);
......@@ -706,6 +710,8 @@ static ir_node *ia32_new_reload(ir_node *value, ir_node *spill, ir_node *before)
} else {
load = new_bd_ia32_Load(NULL, block, frame, noreg, spill);
}
ia32_attr_t *const attr = get_ia32_attr(load);
attr->addr.variant = X86_ADDR_BASE;
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_ls_mode(load, spillmode);
set_ia32_frame_use(load, IA32_FRAME_USE_AUTO);
......@@ -728,9 +734,12 @@ static ir_node *create_push(ir_node *node, ir_node *schedpoint, ir_node *sp,
ir_node *const push = new_bd_ia32_Push(dbgi, block, frame, noreg, mem,
noreg, sp, mode);
ia32_attr_t *const attr = get_ia32_attr(push);
attr->addr.immediate = (x86_imm32_t) {
.kind = X86_IMM_FRAMEENT,
.entity = ent,
attr->addr = (x86_addr_t) {
.immediate = (x86_imm32_t) {
.kind = X86_IMM_FRAMEENT,
.entity = ent,
},
.variant = X86_ADDR_BASE,
};
set_ia32_frame_use(push, IA32_FRAME_USE_AUTO);
set_ia32_op_type(push, ia32_AddrModeS);
......@@ -751,9 +760,12 @@ static ir_node *create_pop(ir_node *node, ir_node *schedpoint, ir_node *sp,
ir_node *pop = new_bd_ia32_PopMem(dbgi, block, frame, noreg,
get_irg_no_mem(irg), sp);
ia32_attr_t *const attr = get_ia32_attr(pop);
attr->addr.immediate = (x86_imm32_t) {
.kind = X86_IMM_FRAMEENT,
.entity = ent,
attr->addr = (x86_addr_t) {
.immediate = (x86_imm32_t) {
.kind = X86_IMM_FRAMEENT,
.entity = ent,
},
.variant = X86_ADDR_BASE,
};
set_ia32_frame_use(pop, IA32_FRAME_USE_AUTO);
set_ia32_op_type(pop, ia32_AddrModeD);
......
......@@ -314,66 +314,19 @@ typedef enum ia32_emit_mod_t {
} ia32_emit_mod_t;
ENUM_BITSET(ia32_emit_mod_t)
static ir_node const *get_irn_n_reg(ir_node const *const node, int const pos)
{
ir_node *const in = get_irn_n(node, pos);
return is_ia32_NoReg_GP(in) ? NULL : in;
}
/**
* Emits address mode.
*/
static void ia32_emit_am(ir_node const *const node)
{
ia32_attr_t const *const attr = get_ia32_attr_const(node);
switch (attr->addr.segment) {
case X86_SEGMENT_DEFAULT:
break;
case X86_SEGMENT_CS: be_emit_cstring("%cs:"); break;
case X86_SEGMENT_SS: be_emit_cstring("%ss:"); break;
case X86_SEGMENT_DS: be_emit_cstring("%ds:"); break;
case X86_SEGMENT_ES: be_emit_cstring("%es:"); break;
case X86_SEGMENT_FS: be_emit_cstring("%fs:"); break;
case X86_SEGMENT_GS: be_emit_cstring("%gs:"); break;
}
ir_node const *const base = get_irn_n_reg(node, n_ia32_base);
ir_node const *const idx = get_irn_n_reg(node, n_ia32_index);
/* emit offset */
int32_t const offset = attr->addr.immediate.offset;
ir_entity const *const entity = attr->addr.immediate.entity;
if (entity) {
x86_emit_relocation_no_offset(attr->addr.immediate.kind, entity);
if (offset != 0)
be_emit_irprintf("%+"PRId32, offset);
} else if (offset != 0 || (!base && !idx)) {
assert(attr->addr.immediate.kind == X86_IMM_VALUE);
/* also handle special case if nothing is set */
be_emit_irprintf("%"PRId32, offset);
}
if (base || idx) {
be_emit_char('(');
/* emit base */
if (base) {
arch_register_t const *const reg = arch_get_irn_register(base);
emit_register(reg, NULL);
}
/* emit index + scale */
if (idx) {
be_emit_char(',');
arch_register_t const *const reg = arch_get_irn_register(idx);
emit_register(reg, NULL);
unsigned const log_scale = attr->addr.log_scale;
if (log_scale > 0)
be_emit_irprintf(",%d", 1 << log_scale);
}
be_emit_char(')');
}
/* Prepare a temporary x86_addr_t with input numbers set until the ia32
* backend sets them properly earlier. */
x86_addr_t addr = attr->addr;
addr.base_input = n_ia32_base;
addr.index_input = n_ia32_index;
addr.mem_input = n_ia32_mem;
x86_emit_addr(node, &addr);
}
void ia32_emitf(ir_node const *const node, char const *fmt, ...)
......
......@@ -57,7 +57,10 @@ static ir_entity *create_ent(ir_entity **const dst, int value, const char *name)
static ir_node *create_fnstcw(ir_node *const block, ir_node *const frame, ir_node *const noreg, ir_node *const nomem, ir_node *const state)
{
ir_node *const fnstcw = new_bd_ia32_FnstCW(NULL, block, frame, noreg, nomem, state);
ir_node *const fnstcw = new_bd_ia32_FnstCW(NULL, block, frame, noreg, nomem,
state);
ia32_attr_t *const attr = get_ia32_attr(fnstcw);
attr->addr.variant = X86_ADDR_BASE;
set_ia32_op_type(fnstcw, ia32_AddrModeD);
set_ia32_ls_mode(fnstcw, ia32_mode_fpcw);
set_ia32_frame_use(fnstcw, IA32_FRAME_USE_32BIT);
......@@ -104,6 +107,7 @@ static ir_node *create_fpu_mode_reload(void *const env, ir_node *const state, ir
create_ent(&fpcw_truncate, 0x37F, "_fpcw_truncate");
ia32_attr_t *const attr = get_ia32_attr(reload);
attr->addr.immediate.entity = rounding_mode;
attr->addr.variant = X86_ADDR_JUST_IMM;
} else {
ir_node *mem;
ir_node *const frame = get_irg_frame(irg);
......@@ -115,6 +119,8 @@ static ir_node *create_fpu_mode_reload(void *const env, ir_node *const state, ir
sched_add_before(before, cwstore);
ir_node *const load = new_bd_ia32_Load(NULL, block, frame, noreg, cwstore);
ia32_attr_t *const load_attr = get_ia32_attr(load);
load_attr->addr.variant = X86_ADDR_BASE;
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_ls_mode(load, mode_Hu);
set_ia32_frame_use(load, IA32_FRAME_USE_32BIT);
......@@ -128,6 +134,8 @@ static ir_node *create_fpu_mode_reload(void *const env, ir_node *const state, ir
sched_add_before(before, orn);
ir_node *const store = new_bd_ia32_Store(NULL, block, frame, noreg, nomem, orn);
ia32_attr_t *const store_attr = get_ia32_attr(store);
store_attr->addr.variant = X86_ADDR_BASE;
set_ia32_op_type(store, ia32_AddrModeD);
/* Use ia32_mode_gp, as movl has a shorter opcode than movw. */
set_ia32_ls_mode(store, ia32_mode_gp);
......@@ -137,6 +145,8 @@ static ir_node *create_fpu_mode_reload(void *const env, ir_node *const state, ir
}
reload = new_bd_ia32_FldCW(NULL, block, frame, noreg, mem);
ia32_attr_t *const attr = get_ia32_attr(reload);
attr->addr.variant = X86_ADDR_BASE;
}
set_ia32_op_type(reload, ia32_AddrModeS);
......
......@@ -429,8 +429,15 @@ static ir_node *gen_Const(ir_node *node)
load = new_bd_ia32_fld(dbgi, block, base, noreg_GP, nomem,
ls_mode);
set_irn_pinned(load, false);
ia32_attr_t *const attr = get_ia32_attr(load);
attr->addr = (x86_addr_t) {
.immediate = {
.kind = lconst_imm_kind,
.entity = floatent,
},
.variant = X86_ADDR_BASE,
};
set_ia32_op_type(load, ia32_AddrModeS);
set_am_const_entity(load, floatent);
arch_add_irn_flags(load, arch_irn_flag_rematerializable);
res = be_new_Proj(load, pn_ia32_fld_res);
}
......@@ -477,6 +484,7 @@ static ir_node *gen_Address(ir_node *node)
ir_node *tls_base = new_bd_ia32_LdTls(NULL, block);
ir_node *lea = new_bd_ia32_Lea(dbgi, block, tls_base, noreg_GP);
ia32_attr_t *const attr = get_ia32_attr(lea);
attr->addr.variant = X86_ADDR_BASE;
attr->addr.immediate = imm;
cnst = lea;
} else {
......@@ -833,9 +841,13 @@ static void build_address_ptr(x86_address_t *addr, ir_node *ptr, ir_node *mem)
memset(addr, 0, sizeof(addr[0]));
ia32_create_address_mode(addr, ptr, x86_create_am_normal);
addr->base = addr->base ? be_transform_node(addr->base) : noreg_GP;
addr->index = addr->index ? be_transform_node(addr->index) : noreg_GP;
addr->mem = be_transform_node(mem);
addr->variant = addr->base ? (addr->index ? X86_ADDR_BASE_INDEX
: X86_ADDR_BASE)
: (addr->index ? X86_ADDR_INDEX
: X86_ADDR_JUST_IMM);
addr->base = addr->base ? be_transform_node(addr->base) : noreg_GP;
addr->index = addr->index ? be_transform_node(addr->index) : noreg_GP;
addr->mem = be_transform_node(mem);
}
static void build_address(ia32_address_mode_t *am, ir_node *node,
......@@ -855,6 +867,7 @@ static void build_address(ia32_address_mode_t *am, ir_node *node,
.kind = lconst_imm_kind,
.entity = entity,
};
addr->variant = X86_ADDR_BASE,
adjust_relocation(&addr->imm);
addr->tls_segment = false;
am->ls_mode = get_type_mode(get_entity_type(entity));
......@@ -874,9 +887,13 @@ static void build_address(ia32_address_mode_t *am, ir_node *node,
/* construct load address */
ia32_create_address_mode(addr, ptr, flags);
addr->base = addr->base ? be_transform_node(addr->base) : noreg_GP;
addr->index = addr->index ? be_transform_node(addr->index) : noreg_GP;
addr->mem = new_mem;
addr->variant = addr->base ? (addr->index ? X86_ADDR_BASE_INDEX
: X86_ADDR_BASE)
: (addr->index ? X86_ADDR_INDEX
: X86_ADDR_JUST_IMM);
addr->base = addr->base ? be_transform_node(addr->base) : noreg_GP;
addr->index = addr->index ? be_transform_node(addr->index) : noreg_GP;
addr->mem = new_mem;
}
static void set_address(ir_node *node, const x86_address_t *addr)
......@@ -884,6 +901,7 @@ static void set_address(ir_node *node, const x86_address_t *addr)
ia32_attr_t *const attr = get_ia32_attr(node);
attr->addr.immediate = addr->imm;
attr->addr.log_scale = addr->scale;
attr->addr.variant = addr->variant;
if (addr->tls_segment)
attr->addr.segment = X86_SEGMENT_GS;
if (addr->imm.kind == X86_IMM_FRAMEENT)
......@@ -1060,10 +1078,11 @@ static void match_arguments(ia32_address_mode_t *am, ir_node *block,
}
am->op_type = ia32_AddrModeS;
} else {
am->op_type = ia32_Normal;
am->addr.base = noreg_GP;
am->addr.index = noreg_GP;
am->addr.mem = nomem;
am->op_type = ia32_Normal;
am->addr.base = noreg_GP;
am->addr.index = noreg_GP;
am->addr.mem = nomem;
am->addr.variant = X86_ADDR_INVALID;
if (flags & match_try_am) {
am->new_op1 = NULL;
......@@ -1389,6 +1408,14 @@ static ir_node *gen_unop(ir_node *node, ir_node *op, construct_unop_func *func,
return new_node;
}
static ir_node *create_lea_add(dbg_info *const dbgi, ir_node *const block,
ir_node *const op0, ir_node *const op1)
{
ir_node *const lea = new_bd_ia32_Lea(dbgi, block, op0, op1);
get_ia32_attr(lea)->addr.variant = X86_ADDR_BASE_INDEX;
return lea;
}
static ir_node *create_lea_from_address(dbg_info *dbgi, ir_node *block,
x86_address_t *addr)
{
......@@ -1414,7 +1441,7 @@ static ir_node *create_lea_from_address(dbg_info *dbgi, ir_node *block,
if (base == noreg_GP)
base = tls_base;
else
base = new_bd_ia32_Lea(dbgi, block, tls_base, base);
base = create_lea_add(dbgi, block, tls_base, base);
addr->tls_segment = false;
}
......@@ -1941,8 +1968,8 @@ static ir_node *gen_Shl(ir_node *node)
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *new_block = be_transform_nodes_block(node);
ir_node *new_left = be_transform_node(left);
ir_node *new_node
= new_bd_ia32_Lea(dbgi, new_block, new_left, new_left);
ir_node *new_node = create_lea_add(dbgi, new_block, new_left,
new_left);
SET_IA32_ORIG_NODE(new_node, node);
return new_node;
}
......@@ -2996,10 +3023,16 @@ static ir_node *gen_Switch(ir_node *node)
set_ia32_ls_mode(switchjmp, ia32_mode_gp);
}
ia32_attr_t *attr = get_ia32_attr(table_am);
attr->addr.log_scale = 2;
attr->addr = (x86_addr_t) {
.immediate = {
.kind = lconst_imm_kind,
.entity = entity,
},
.log_scale = 2,
.variant = X86_ADDR_BASE_INDEX,
};
set_ia32_op_type(table_am, ia32_AddrModeS);
set_ia32_ls_mode(table_am, ia32_mode_gp);
set_am_const_entity(table_am, entity);
SET_IA32_ORIG_NODE(switchjmp, node);
return switchjmp;
......@@ -3561,6 +3594,19 @@ static ir_node *create_Conv_I2I(dbg_info *dbgi, ir_node *block, ir_node *base,
return func(dbgi, block, base, index, mem, val, mode);
}
static ir_node *create_lea_add_c(dbg_info *const dbgi, ir_node *const block,
ir_node *const base, int32_t const offset)
{
ir_node *const lea = new_bd_ia32_Lea(dbgi, block, base, noreg_GP);
ia32_attr_t *const attr = get_ia32_attr(lea);
attr->addr = (x86_addr_t) {
.immediate.offset = offset,
.variant = X86_ADDR_BASE,
};
set_ia32_ls_mode(lea, ia32_mode_gp);
return lea;
}
/**
* Transforms a Mux node into some code sequence.
*
......@@ -3638,7 +3684,7 @@ static ir_node *gen_Mux(ir_node *node)
} else if (new_mode == x86_mode_E) {
/* arg, shift 16 NOT supported */
log_scale = 3;
new_node = new_bd_ia32_Lea(dbgi, new_block, new_node, new_node);
new_node = create_lea_add(dbgi, new_block, new_node, new_node);
} else {
panic("unsupported constant size");
}
......@@ -3650,10 +3696,11 @@ static ir_node *gen_Mux(ir_node *node)
.kind = lconst_imm_kind,
.entity = array,
},
.base = get_global_base(irg),
.index = new_node,
.mem = nomem,
.scale = log_scale,
.variant = X86_ADDR_BASE_INDEX,
.base = get_global_base(irg),
.index = new_node,
.mem = nomem,
.scale = log_scale,
},
.ls_mode = new_mode,
.mem_proj = nomem,
......@@ -3721,22 +3768,23 @@ static ir_node *gen_Mux(ir_node *node)
for (unsigned step = res.num_steps; step-- != 0;) {
switch (res.steps[step].transform) {
case SETCC_TR_ADD: {
new_node = new_bd_ia32_Lea(dbgi, new_block, new_node, noreg_GP);
ia32_attr_t *const attr = get_ia32_attr(new_node);
attr->addr.immediate.offset = res.steps[step].val;
new_node = create_lea_add_c(dbgi, new_block, new_node,
res.steps[step].val);
SET_IA32_ORIG_NODE(new_node, node);
continue;
}
case SETCC_TR_ADDxx:
new_node = new_bd_ia32_Lea(dbgi, new_block, new_node, new_node);
new_node = create_lea_add(dbgi, new_block, new_node,
new_node);
SET_IA32_ORIG_NODE(new_node, node);
continue;
case SETCC_TR_LEA: {
new_node = new_bd_ia32_Lea(dbgi, new_block, noreg_GP, new_node);
ia32_attr_t *const attr = get_ia32_attr(new_node);
attr->addr.log_scale = res.steps[step].log_scale;
attr->addr.variant = X86_ADDR_INDEX;
attr->addr.log_scale = res.steps[step].log_scale;
attr->addr.immediate.offset = res.steps[step].val;
SET_IA32_ORIG_NODE(new_node, node);
continue;
......@@ -3745,7 +3793,8 @@ static ir_node *gen_Mux(ir_node *node)
case SETCC_TR_LEAxx: {
new_node = new_bd_ia32_Lea(dbgi, new_block, new_node, new_node);
ia32_attr_t *const attr = get_ia32_attr(new_node);
attr->addr.log_scale = res.steps[step].log_scale;
attr->addr.variant = X86_ADDR_BASE_INDEX;
attr->addr.log_scale = res.steps[step].log_scale;
attr->addr.immediate.offset = res.steps[step].val;
SET_IA32_ORIG_NODE(new_node, node);
continue;
......@@ -3837,6 +3886,8 @@ static ir_node *gen_x87_fp_to_gp(ir_node *node)
ir_node *fist = gen_fist(dbgi, block, frame, noreg_GP, nomem, new_op,
ls_mode);
set_irn_pinned(fist, false);
ia32_attr_t *const fist_attr = get_ia32_attr(fist);
fist_attr->addr.variant = X86_ADDR_BASE;
set_ia32_op_type(fist, ia32_AddrModeD);
arch_add_irn_flags(fist, arch_irn_flag_spill);
......@@ -3850,6 +3901,8 @@ static ir_node *gen_x87_fp_to_gp(ir_node *node)
ir_node *load = new_bd_ia32_Load(dbgi, block, frame, noreg_GP, mem);
set_irn_pinned(load, false);
ia32_attr_t *const load_attr = get_ia32_attr(load);
load_attr->addr.variant = X86_ADDR_BASE;
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_ls_mode(load, ia32_mode_gp);
force_int_stackent(load, ls_mode);
......@@ -3871,6 +3924,8 @@ static ir_node *gen_x87_conv(ir_mode *tgt_mode, ir_node *node)
ir_node *store = create_fst(dbgi, block, frame, noreg_GP, nomem, node,
tgt_mode);
set_irn_pinned(store, false);
ia32_attr_t *const store_attr = get_ia32_attr(store);
store_attr->addr.variant = X86_ADDR_BASE;
set_ia32_frame_use(store, IA32_FRAME_USE_AUTO);
set_ia32_op_type(store, ia32_AddrModeD);
arch_add_irn_flags(store, arch_irn_flag_spill);
......@@ -3881,6 +3936,8 @@ static ir_node *gen_x87_conv(ir_mode *tgt_mode, ir_node *node)
ir_node *load = new_bd_ia32_fld(dbgi, block, frame, noreg_GP, store_mem,
tgt_mode);
set_irn_pinned(load, false);
ia32_attr_t *const load_attr = get_ia32_attr(load);
load_attr->addr.variant = X86_ADDR_BASE;
set_ia32_frame_use(load, IA32_FRAME_USE_AUTO);
set_ia32_op_type(load, ia32_AddrModeS);
SET_IA32_ORIG_NODE(load, node);
......@@ -3922,6 +3979,8 @@ static void store_gp(dbg_info *dbgi, ia32_address_mode_t *am, ir_node *block,
nomem, new_node);
set_irn_pinned(store, false);
ia32_attr_t *const attr = get_ia32_attr(store);
attr->addr.variant = X86_ADDR_BASE;
set_ia32_op_type(store, ia32_AddrModeD);
set_ia32_ls_mode(store, ia32_mode_gp);
arch_add_irn_flags(store, arch_irn_flag_spill);
......@@ -3940,6 +3999,7 @@ static void store_gp(dbg_info *dbgi, ia32_address_mode_t *am, ir_node *block,
set_irn_pinned(zero_store, false);
set_ia32_op_type(zero_store, ia32_AddrModeD);
ia32_attr_t *const attr = get_ia32_attr(zero_store);
attr->addr.variant = X86_ADDR_BASE;
attr->addr.immediate.offset = 4;
set_ia32_ls_mode(zero_store, ia32_mode_gp);
arch_add_irn_flags(zero_store, arch_irn_flag_spill);
......@@ -3961,6 +4021,7 @@ static void store_gp(dbg_info *dbgi, ia32_address_mode_t *am, ir_node *block,
addr->index = noreg_GP;
addr->mem = store_mem;
addr->imm = (x86_imm32_t) { .kind = X86_IMM_FRAMEENT };
addr->variant = X86_ADDR_BASE;
am->op_type = ia32_AddrModeS;
am->ls_mode = store_mode;
am->pinned = false;
......@@ -4134,6 +4195,8 @@ static void store_fp(dbg_info *dbgi, ia32_address_mode_t *am, ir_node *block,
ir_node *fst = create_fst(dbgi, new_block, frame, noreg_GP, nomem,
new_value, mode);
set_irn_pinned(fst, false);
ia32_attr_t *const attr = get_ia32_attr(fst);
attr->addr.variant = X86_ADDR_BASE;
set_ia32_op_type(fst, ia32_AddrModeD);
arch_add_irn_flags(fst, arch_irn_flag_spill);
force_int_stackent(fst, mode);
......@@ -4141,6 +4204,7 @@ static void store_fp(dbg_info *dbgi, ia32_address_mode_t *am, ir_node *block,
memset(am, 0, sizeof(*am));
x86_address_t *addr = &am->addr;
addr->variant = X86_ADDR_BASE;
addr->base = frame;
addr->index = noreg_GP;
addr->mem = mem;
......@@ -4551,7 +4615,10 @@ static ir_node *gen_ia32_l_LLtoFloat(ir_node *node)
arch_add_irn_flags(store_high, arch_irn_flag_spill);
force_int_stackent(store_low, mode_Ls);
force_int_stackent(store_high, mode_Ls);
ia32_attr_t *const attr_low = get_ia32_attr(store_low);
attr_low->addr.variant = X86_ADDR_BASE;
ia32_attr_t *const attr_high = get_ia32_attr(store_high);
attr_high->addr.variant = X86_ADDR_BASE;
attr_high->addr.immediate.offset = 4;
ir_node *in[2] = { mem_low, mem_high };
......@@ -4560,6 +4627,8 @@ static ir_node *gen_ia32_l_LLtoFloat(ir_node *node)
/* do a fild */
ir_node *fild = new_bd_ia32_fild(dbgi, block, frame, noreg_GP, sync);
set_irn_pinned(fild, false);
ia32_attr_t *const fild_attr = get_ia32_attr(fild);
fild_attr->addr.variant = X86_ADDR_BASE;
set_ia32_op_type(fild, ia32_AddrModeS);
set_ia32_ls_mode(fild, mode_Ls);
force_int_stackent(fild, mode_Ls);
......@@ -4573,10 +4642,11 @@ static ir_node *gen_ia32_l_LLtoFloat(ir_node *node)
ia32_address_mode_t am = {
.addr = {
.base = get_global_base(irg),
.index = new_bd_ia32_Shr(dbgi, block, new_val_high, count),
.mem = nomem,
.imm = {
.variant = X86_ADDR_BASE_INDEX,
.base = get_global_base(irg),
.index = new_bd_ia32_Shr(dbgi, block, new_val_high, count),
.mem = nomem,
.imm = {
.kind = lconst_imm_kind,
.entity = ia32_gen_fp_known_const(ia32_ULLBIAS),
},
......@@ -4615,6 +4685,8 @@ static ir_node *gen_ia32_l_FloattoLL(ir_node *node)
ir_node *fist = gen_fist(dbgi, block, frame, noreg_GP, nomem, new_val,
mode_Ls);
set_irn_pinned(fist, false);
ia32_attr_t *const attr = get_ia32_attr(fist);
attr->addr.variant = X86_ADDR_BASE;
SET_IA32_ORIG_NODE(fist, node);
set_ia32_op_type(fist, ia32_AddrModeD);
arch_add_irn_flags(fist, arch_irn_flag_spill);
......@@ -4636,6 +4708,8 @@ static ir_node *gen_Proj_l_FloattoLL(ir_node *node)
ir_node *load = new_bd_ia32_Load(dbgi, block, frame, noreg_GP, new_pred);
set_irn_pinned(load, false);
ia32_attr_t *const attr = get_ia32_attr(load);
attr->addr.variant = X86_ADDR_BASE;
SET_IA32_ORIG_NODE(load, node);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_ls_mode(load, ia32_mode_gp);
......@@ -5005,11 +5079,8 @@ static ir_node *gen_Call(ir_node *node)
ir_type *const param_type = get_method_param_type(type, p);
if (is_aggregate_type(param_type)) {
/* Copy aggregate arguments into the callframe. */
ir_node *const lea = new_bd_ia32_Lea(dbgi, block, callframe, noreg_GP);