Commit 64bc887f authored by Matthias Braun's avatar Matthias Braun
Browse files

amd64: Move size attribute to amd64_attr_t

Nearly all structs derived from amd64_attr_t has a size member anyway.
Simplify the code and move it to the base amd64_attr_t.
parent e9ab92dc
......@@ -437,7 +437,7 @@ static void amd64_collect_frame_entity_nodes(ir_node *node, void *data)
const amd64_addr_attr_t *attr = get_amd64_addr_attr_const(node);
x86_imm32_t const *imm = &attr->addr.immediate;
if (imm->kind == X86_IMM_FRAMEENT && imm->entity == NULL) {
const ir_type *type = get_type_for_insn_size(attr->size);
const ir_type *type = get_type_for_insn_size(attr->base.size);
be_load_needs_frame_entity(env, node, type);
}
}
......@@ -519,7 +519,7 @@ static void introduce_prologue(ir_graph *const irg, bool omit_fp)
/* push rbp */
ir_node *const mem = get_irg_initial_mem(irg);
ir_node *const initial_bp = be_get_Start_proj(irg, bp);
ir_node *const push = new_bd_amd64_push_reg(NULL, block, initial_sp, mem, initial_bp);
ir_node *const push = new_bd_amd64_push_reg(NULL, block, initial_sp, mem, initial_bp, INSN_SIZE_64);
sched_add_after(start, push);
ir_node *const curr_mem = be_new_Proj(push, pn_amd64_push_reg_M);
edges_reroute_except(mem, curr_mem, push);
......@@ -597,14 +597,14 @@ static void amd64_sp_sim(ir_node *const node, stack_pointer_state_t *state)
* address, so do this first */
if (is_amd64_pop_am(node)) {
const amd64_addr_attr_t *attr = get_amd64_addr_attr_const(node);
state->offset -= get_insn_size_bytes(attr->size);
state->offset -= get_insn_size_bytes(attr->base.size);
}
amd64_determine_frameoffset(node, state->offset);
if (is_amd64_push_am(node)) {
const amd64_addr_attr_t *attr = get_amd64_addr_attr_const(node);
state->offset += get_insn_size_bytes(attr->size);
state->offset += get_insn_size_bytes(attr->base.size);
} else if (is_amd64_push_reg(node)) {
/* 64-bit register size */
state->offset += AMD64_REGISTER_SIZE;
......
......@@ -239,7 +239,7 @@ static void amd64_emit_am(const ir_node *const node, bool indirect_star)
}
case AMD64_OP_REG_REG: {
const arch_register_t *reg1 = arch_get_irn_register_in(node, 1);
emit_register_mode(reg1, attr->size);
emit_register_mode(reg1, attr->base.size);
be_emit_cstring(", ");
goto emit_addr_reg;
}
......@@ -250,7 +250,7 @@ static void amd64_emit_am(const ir_node *const node, bool indirect_star)
be_emit_cstring(", ");
const arch_register_t *reg
= arch_get_irn_register_in(node, binop_attr->u.reg_input);
emit_register_mode(reg, binop_attr->base.size);
emit_register_mode(reg, binop_attr->base.base.size);
return;
}
case AMD64_OP_ADDR_IMM: {
......@@ -269,7 +269,7 @@ static void amd64_emit_am(const ir_node *const node, bool indirect_star)
case AMD64_OP_ADDR_REG: {
amd64_binop_addr_attr_t const *const binop_attr = (amd64_binop_addr_attr_t const*)attr;
arch_register_t const *const reg = arch_get_irn_register_in(node, binop_attr->u.reg_input);
emit_register_mode(reg, binop_attr->base.size);
emit_register_mode(reg, binop_attr->base.base.size);
be_emit_cstring(", ");
emit_addr:
x86_emit_addr(node, &attr->addr);
......@@ -282,7 +282,7 @@ emit_addr_reg:
assert(attr->addr.variant == X86_ADDR_REG);
arch_register_t const *const reg
= arch_get_irn_register_in(node, attr->addr.base_input);
emit_register_mode(reg, attr->size);
emit_register_mode(reg, attr->base.size);
return;
}
case AMD64_OP_IMM32:
......@@ -309,7 +309,7 @@ static void emit_shiftop(const ir_node *const node)
case AMD64_OP_SHIFT_IMM: {
be_emit_irprintf("$0x%X, ", attr->immediate);
const arch_register_t *reg = arch_get_irn_register_in(node, 0);
emit_register_mode(reg, attr->size);
emit_register_mode(reg, attr->base.size);
return;
}
case AMD64_OP_SHIFT_REG: {
......@@ -317,7 +317,7 @@ static void emit_shiftop(const ir_node *const node)
const arch_register_t *reg1 = arch_get_irn_register_in(node, 1);
emit_register_mode(reg1, INSN_SIZE_8);
be_emit_cstring(", ");
emit_register_mode(reg0, attr->size);
emit_register_mode(reg0, attr->base.size);
return;
}
default:
......@@ -420,7 +420,7 @@ end_of_mods:
++fmt;
amd64_addr_attr_t const *const attr
= get_amd64_addr_attr_const(node);
amd64_emit_x87_size_suffix(attr->size);
amd64_emit_x87_size_suffix(attr->base.size);
} else if (*fmt == 'P') {
++fmt;
x87_attr_t const *const attr
......@@ -489,36 +489,24 @@ emit_R:
} else if (mod & EMIT_FORCE_32) {
emit_register_mode(reg, INSN_SIZE_32);
} else if (mod & EMIT_CONV_DEST) {
amd64_insn_size_t src_size = get_amd64_insn_size(node);
amd64_attr_t const *const attr = get_amd64_attr_const(node);
amd64_insn_size_t src_size = attr->size;
amd64_insn_size_t dest_size = src_size == INSN_SIZE_64
? INSN_SIZE_64 : INSN_SIZE_32;
emit_register_mode(reg, dest_size);
} else {
amd64_insn_size_t size = get_amd64_insn_size(node);
emit_register_mode(reg, size);
amd64_attr_t const *const attr = get_amd64_attr_const(node);
emit_register_mode(reg, attr->size);
}
break;
}
case 'M': {
if (*fmt == 'S') {
++fmt;
const amd64_shift_attr_t *attr
= get_amd64_shift_attr_const(node);
amd64_emit_insn_size_suffix(attr->size);
} else if (*fmt == 'M') {
++fmt;
const amd64_movimm_attr_t *attr
= get_amd64_movimm_attr_const(node);
amd64_emit_insn_size_suffix(attr->size);
} else if (*fmt == 'X') {
amd64_attr_t const *const attr = get_amd64_attr_const(node);
if (*fmt == 'X') {
++fmt;
amd64_addr_attr_t const *const attr
= get_amd64_addr_attr_const(node);
amd64_emit_xmm_size_suffix(attr->size);
} else {
amd64_addr_attr_t const *const attr
= get_amd64_addr_attr_const(node);
amd64_emit_insn_size_suffix(attr->size);
}
break;
......@@ -760,7 +748,7 @@ static void emit_amd64_jcc(const ir_node *irn)
static void emit_amd64_mov_gp(const ir_node *node)
{
const amd64_addr_attr_t *attr = get_amd64_addr_attr_const(node);
amd64_attr_t const *const attr = get_amd64_attr_const(node);
switch (attr->size) {
case INSN_SIZE_8: amd64_emitf(node, "movzbq %AM, %^D0"); return;
case INSN_SIZE_16: amd64_emitf(node, "movzwq %AM, %^D0"); return;
......
......@@ -64,15 +64,15 @@ static void transform_sub_to_neg_add(ir_node *node,
ir_node *add;
unsigned pos;
if (is_amd64_subs(node)) {
unsigned bits = amd64_get_insn_size_bits(attr->base.size);
unsigned bits = amd64_get_insn_size_bits(attr->base.base.size);
ir_tarval *tv = get_mode_one(amd64_mode_xmm);
tv = tarval_shl_unsigned(tv, bits - 1);
ir_entity *sign_bit_const = create_float_const_entity(tv);
amd64_binop_addr_attr_t xor_attr = {
.base = {
.base.op_mode = AMD64_OP_REG_ADDR,
.size = INSN_SIZE_64,
.base.base = {
.op_mode = AMD64_OP_REG_ADDR,
.size = INSN_SIZE_64,
},
};
init_lconst_addr(&xor_attr.base.addr, sign_bit_const);
......@@ -87,7 +87,7 @@ static void transform_sub_to_neg_add(ir_node *node,
pos = pn_amd64_adds_res;
} else {
assert(is_amd64_sub(node));
ir_node *neg = new_bd_amd64_neg(dbgi, block, in2, attr->base.size);
ir_node *neg = new_bd_amd64_neg(dbgi, block, in2, attr->base.base.size);
sched_add_before(node, neg);
ir_node *const neg_res = be_new_Proj_reg(neg, pn_amd64_neg_res, out_reg);
......@@ -127,7 +127,7 @@ static void amd64_turn_back_am(ir_node *const node, arch_register_t const *const
load_in[mem_input] = get_irn_n(node, attr->addr.mem_input);
assert(get_irn_mode(load_in[mem_input]) == mode_M);
ir_node *const load = new_bd_amd64_mov_gp(dbgi, block, load_arity, load_in, gp_am_reqs[load_arity - 1], attr->size, AMD64_OP_ADDR, new_addr);
ir_node *const load = new_bd_amd64_mov_gp(dbgi, block, load_arity, load_in, gp_am_reqs[load_arity - 1], attr->base.size, AMD64_OP_ADDR, new_addr);
ir_node *const load_res = be_new_Proj_reg(load, pn_amd64_mov_gp_res, out_reg);
/* change operation */
......
......@@ -29,25 +29,6 @@
#include "amd64_new_nodes_t.h"
#include "gen_amd64_regalloc_if.h"
amd64_insn_size_t get_amd64_insn_size(const ir_node *node)
{
if (is_amd64_mov_imm(node)) {
const amd64_movimm_attr_t *const attr
= get_amd64_movimm_attr_const(node);
return attr->size;
}
amd64_op_mode_t const op_mode = get_amd64_attr_const(node)->op_mode;
if (amd64_has_addr_attr(op_mode)) {
amd64_addr_attr_t const *const attr = get_amd64_addr_attr_const(node);
return attr->size;
} else if (op_mode == AMD64_OP_CC) {
amd64_cc_attr_t const *const attr = get_amd64_cc_attr_const(node);
return attr->size;
} else {
panic("Node attributes do not contain insn_size");
}
}
x87_attr_t *amd64_get_x87_attr(ir_node *const node)
{
amd64_attr_t const *const attr = get_amd64_attr_const(node);
......@@ -134,6 +115,7 @@ void amd64_dump_node(FILE *F, const ir_node *n, dump_reason_t reason)
const amd64_attr_t *attr = get_amd64_attr_const(n);
amd64_op_mode_t const op_mode = attr->op_mode;
fprintf(F, "mode = %s\n", get_op_mode_string(op_mode));
fprintf(F, "size = %s\n", get_insn_size_string(attr->size));
switch (op_mode) {
case AMD64_OP_ADDR_REG:
case AMD64_OP_REG_ADDR: {
......@@ -153,7 +135,6 @@ void amd64_dump_node(FILE *F, const ir_node *n, dump_reason_t reason)
}
if (amd64_has_addr_attr(op_mode)) {
const amd64_addr_attr_t *addr_attr = get_amd64_addr_attr_const(n);
fprintf(F, "size = %s\n", get_insn_size_string(addr_attr->size));
x86_addr_variant_t const variant = addr_attr->addr.variant;
fprintf(F, "am variant = %s\n", x86_get_addr_variant_str(variant));
if (x86_addr_variant_has_base(variant))
......@@ -171,11 +152,13 @@ void amd64_dump_node(FILE *F, const ir_node *n, dump_reason_t reason)
void init_amd64_attributes(ir_node *node, arch_irn_flags_t flags,
const arch_register_req_t **in_reqs,
int n_res, amd64_op_mode_t op_mode)
int n_res, amd64_op_mode_t op_mode,
amd64_insn_size_t size)
{
be_info_init_irn(node, flags, in_reqs, n_res);
amd64_attr_t *attr = get_amd64_attr(node);
attr->op_mode = op_mode;
attr->size = size;
}
void init_amd64_switch_attributes(ir_node *node, const ir_switch_table *table,
......@@ -190,19 +173,15 @@ void init_amd64_switch_attributes(ir_node *node, const ir_switch_table *table,
}
}
void init_amd64_cc_attributes(ir_node *node, x86_condition_code_t cc,
amd64_insn_size_t size)
void init_amd64_cc_attributes(ir_node *node, x86_condition_code_t cc)
{
amd64_cc_attr_t *attr = get_amd64_cc_attr(node);
attr->cc = cc;
attr->size = size;
attr->cc = cc;
}
void init_amd64_movimm_attributes(ir_node *node, amd64_insn_size_t size,
const amd64_imm64_t *imm)
void init_amd64_movimm_attributes(ir_node *node, const amd64_imm64_t *imm)
{
amd64_movimm_attr_t *attr = get_amd64_movimm_attr(node);
attr->size = size;
attr->immediate = *imm;
}
......@@ -216,7 +195,8 @@ int amd64_attrs_equal(const ir_node *a, const ir_node *b)
{
const amd64_attr_t *attr_a = get_amd64_attr_const(a);
const amd64_attr_t *attr_b = get_amd64_attr_const(b);
return attr_a->op_mode == attr_b->op_mode;
return attr_a->op_mode == attr_b->op_mode
&& attr_a->size == attr_b->size;
}
int amd64_addr_attrs_equal(const ir_node *a, const ir_node *b)
......@@ -224,8 +204,7 @@ int amd64_addr_attrs_equal(const ir_node *a, const ir_node *b)
const amd64_addr_attr_t *attr_a = get_amd64_addr_attr_const(a);
const amd64_addr_attr_t *attr_b = get_amd64_addr_attr_const(b);
return amd64_attrs_equal(a, b)
&& x86_addrs_equal(&attr_a->addr, &attr_b->addr)
&& attr_a->size == attr_b->size;
&& x86_addrs_equal(&attr_a->addr, &attr_b->addr);
}
int amd64_binop_addr_attrs_equal(const ir_node *a, const ir_node *b)
......@@ -247,8 +226,7 @@ int amd64_movimm_attrs_equal(const ir_node *const a, const ir_node *const b)
const amd64_movimm_attr_t *const attr_a = get_amd64_movimm_attr_const(a);
const amd64_movimm_attr_t *const attr_b = get_amd64_movimm_attr_const(b);
return amd64_attrs_equal(a, b)
&& imm64s_equal(&attr_a->immediate, &attr_b->immediate)
&& attr_a->size == attr_b->size;
&& imm64s_equal(&attr_a->immediate, &attr_b->immediate);
}
int amd64_shift_attrs_equal(const ir_node *const a, const ir_node *const b)
......@@ -256,8 +234,7 @@ int amd64_shift_attrs_equal(const ir_node *const a, const ir_node *const b)
const amd64_shift_attr_t *const attr_a = get_amd64_shift_attr_const(a);
const amd64_shift_attr_t *const attr_b = get_amd64_shift_attr_const(b);
return amd64_attrs_equal(a, b)
&& attr_a->immediate == attr_b->immediate
&& attr_a->size == attr_b->size;
&& attr_a->immediate == attr_b->immediate;
}
int amd64_cc_attrs_equal(const ir_node *const a, const ir_node *const b)
......
......@@ -185,7 +185,6 @@ static inline amd64_x87_binop_addr_attr_t *get_amd64_x87_binop_addr_attr(
x87_attr_t *amd64_get_x87_attr(ir_node *node);
x87_attr_t const *amd64_get_x87_attr_const(ir_node const *node);
amd64_insn_size_t get_amd64_insn_size(const ir_node *node);
unsigned amd64_get_insn_size_bits(amd64_insn_size_t insn_size);
/* Include the generated headers */
......
......@@ -16,16 +16,15 @@ void amd64_dump_node(FILE *F, const ir_node *n, dump_reason_t reason);
void init_amd64_attributes(ir_node *node, arch_irn_flags_t flags,
const arch_register_req_t **in_reqs,
int n_res, amd64_op_mode_t op_mode);
int n_res, amd64_op_mode_t op_mode,
amd64_insn_size_t size);
void init_amd64_switch_attributes(ir_node *node, const ir_switch_table *table,
ir_entity *table_entity);
void init_amd64_cc_attributes(ir_node *node, x86_condition_code_t cc,
amd64_insn_size_t size);
void init_amd64_cc_attributes(ir_node *node, x86_condition_code_t cc);
void init_amd64_movimm_attributes(ir_node *node, amd64_insn_size_t size,
const amd64_imm64_t *imm);
void init_amd64_movimm_attributes(ir_node *node, const amd64_imm64_t *imm);
int amd64_attrs_equal(const ir_node *a, const ir_node *b);
int amd64_addr_attrs_equal(const ir_node *a, const ir_node *b);
......
......@@ -59,13 +59,13 @@ typedef struct amd64_imm64_t {
typedef struct amd64_attr_t {
except_attr exc; /**< the exception attribute. MUST be the first one. */
amd64_op_mode_t op_mode;
ENUMBF(amd64_op_mode_t) op_mode : 5;
ENUMBF(amd64_insn_size_t) size : 3;
} amd64_attr_t;
typedef struct amd64_addr_attr_t {
amd64_attr_t base;
ENUMBF(amd64_insn_size_t) size : 3;
x86_addr_t addr;
amd64_attr_t base;
x86_addr_t addr;
} amd64_addr_attr_t;
typedef struct amd64_binop_addr_attr_t {
......@@ -77,21 +77,18 @@ typedef struct amd64_binop_addr_attr_t {
} amd64_binop_addr_attr_t;
typedef struct amd64_shift_attr_t {
amd64_attr_t base;
ENUMBF(amd64_insn_size_t) size : 3;
uint8_t immediate;
amd64_attr_t base;
uint8_t immediate;
} amd64_shift_attr_t;
typedef struct amd64_movimm_attr_t {
amd64_attr_t base;
ENUMBF(amd64_insn_size_t) size : 3;
amd64_imm64_t immediate;
amd64_attr_t base;
amd64_imm64_t immediate;
} amd64_movimm_attr_t;
typedef struct amd64_cc_attr_t {
amd64_attr_t base;
x86_condition_code_t cc;
ENUMBF(amd64_insn_size_t) size : 3;
amd64_attr_t base;
x86_condition_code_t cc;
} amd64_cc_attr_t;
typedef struct amd64_switch_jmp_attr_t {
......
......@@ -42,8 +42,10 @@ static void peephole_amd64_lea(ir_node *const node)
if (oreg == arch_get_irn_register(base)) {
amd64_binop_addr_attr_t const add_attr = {
.base = {
.base = { .op_mode = AMD64_OP_REG_IMM, },
.size = attr->size,
.base = {
.op_mode = AMD64_OP_REG_IMM,
.size = attr->base.size,
},
.addr = {
.base_input = 0,
.variant = X86_ADDR_REG,
......@@ -71,8 +73,10 @@ static void peephole_amd64_lea(ir_node *const node)
add_reg_reg:;
amd64_binop_addr_attr_t const add_attr = {
.base = {
.base = { .op_mode = AMD64_OP_REG_REG, },
.size = attr->size,
.base = {
.op_mode = AMD64_OP_REG_REG,
.size = attr->base.size,
},
.addr = {
.base_input = 0,
.variant = X86_ADDR_REG,
......@@ -96,7 +100,7 @@ static void peephole_amd64_mov_imm(ir_node *const node)
/* mov $0, %reg -> xorl %reg, %reg */
dbg_info *const dbgi = get_irn_dbg_info(node);
ir_node *const block = get_nodes_block(node);
ir_node *const xor = new_bd_amd64_xor_0(dbgi, block);
ir_node *const xor = new_bd_amd64_xor_0(dbgi, block, INSN_SIZE_32);
arch_register_t const *const reg = arch_get_irn_register_out(node, pn_amd64_mov_imm_res);
arch_set_irn_register_out(xor, pn_amd64_xor_0_res, reg);
sched_add_before(node, xor);
......
......@@ -67,25 +67,23 @@ $mode_x87 = "x86_mode_E";
%init_attr = (
amd64_attr_t =>
"init_amd64_attributes(res, irn_flags, in_reqs, n_res, op_mode);",
"init_amd64_attributes(res, irn_flags, in_reqs, n_res, op_mode, size);",
amd64_addr_attr_t =>
"init_amd64_attributes(res, irn_flags, in_reqs, n_res, op_mode);\n"
."\tattr->size = size;\n"
"init_amd64_attributes(res, irn_flags, in_reqs, n_res, op_mode, size);\n"
."\tattr->addr = addr;",
amd64_binop_addr_attr_t =>
"be_info_init_irn(res, irn_flags, in_reqs, n_res);\n"
."\t*attr = *attr_init;",
amd64_switch_jmp_attr_t =>
"init_amd64_attributes(res, irn_flags, in_reqs, n_res, op_mode);\n"
."\tattr->base.size = INSN_SIZE_64;\n"
"init_amd64_attributes(res, irn_flags, in_reqs, n_res, op_mode, size);\n"
."\tattr->base.addr = *addr;\n"
."\tinit_amd64_switch_attributes(res, table, table_entity);",
amd64_cc_attr_t =>
"init_amd64_attributes(res, irn_flags, in_reqs, n_res, AMD64_OP_CC);\n"
."\tinit_amd64_cc_attributes(res, cc, size);",
"init_amd64_attributes(res, irn_flags, in_reqs, n_res, AMD64_OP_CC, size);\n"
."\tinit_amd64_cc_attributes(res, cc);",
amd64_movimm_attr_t =>
"init_amd64_attributes(res, irn_flags, in_reqs, n_res, AMD64_OP_IMM64);\n"
."\tinit_amd64_movimm_attributes(res, size, imm);",
"init_amd64_attributes(res, irn_flags, in_reqs, n_res, AMD64_OP_IMM64, size);\n"
."\tinit_amd64_movimm_attributes(res, imm);",
amd64_shift_attr_t =>
"be_info_init_irn(res, irn_flags, in_reqs, n_res);\n"
."\t*attr = *attr_init;\n",
......@@ -93,10 +91,9 @@ $mode_x87 = "x86_mode_E";
"be_info_init_irn(res, irn_flags, in_reqs, n_res);\n"
."\t*attr = *attr_init;",
amd64_x87_attr_t =>
"init_amd64_attributes(res, irn_flags, in_reqs, n_res, AMD64_OP_X87);\n",
"init_amd64_attributes(res, irn_flags, in_reqs, n_res, AMD64_OP_X87, INSN_SIZE_80);\n",
amd64_x87_addr_attr_t =>
"init_amd64_attributes(res, irn_flags, in_reqs, n_res, op_mode);\n"
."\tattr->base.size = size;\n"
"init_amd64_attributes(res, irn_flags, in_reqs, n_res, op_mode, size);\n"
."\tattr->base.addr = addr;\n",
amd64_x87_binop_addr_attr_t =>
"be_info_init_irn(res, irn_flags, in_reqs, n_res);\n"
......@@ -233,7 +230,8 @@ my $x87const = {
irn_flags => [ "rematerializable" ],
out_reqs => [ "x87" ],
outs => [ "res" ],
fixed => "amd64_op_mode_t op_mode = AMD64_OP_X87;\n",
fixed => "amd64_op_mode_t op_mode = AMD64_OP_X87;\n"
."amd64_insn_size_t size = INSN_SIZE_80;\n",
mode => $mode_x87,
};
......@@ -288,7 +286,8 @@ push_reg => {
out_reqs => [ "rsp:I", "mem" ],
outs => [ "stack", "M" ],
fixed => "amd64_op_mode_t op_mode = AMD64_OP_NONE;\n",
emit => "pushq %^S2",
attr => "amd64_insn_size_t size",
emit => "push%M %^S2",
},
pop_am => {
......@@ -322,7 +321,8 @@ leave => {
in_reqs => [ "rbp", "mem" ],
out_reqs => [ "rbp:I", "mem", "rsp:I" ],
outs => [ "frame", "M", "stack" ],
fixed => "amd64_op_mode_t op_mode = AMD64_OP_NONE;\n",
fixed => "amd64_op_mode_t op_mode = AMD64_OP_NONE;\n"
."amd64_insn_size_t size = INSN_SIZE_64;\n",
emit => "leave",
},
......@@ -368,17 +368,17 @@ or => {
shl => {
template => $shiftop,
emit => "shl%MS %SO",
emit => "shl%M %SO",
},
shr => {
template => $shiftop,
emit => "shr%MS %SO",
emit => "shr%M %SO",
},
sar => {
template => $shiftop,
emit => "sar%MS %SO",
emit => "sar%M %SO",
},
sub => {
......@@ -413,7 +413,8 @@ xor_0 => {
out_reqs => [ "gp", "flags" ],
outs => [ "res", "flags" ],
fixed => "amd64_op_mode_t op_mode = AMD64_OP_NONE;",
emit => "xorl %3D0, %3D0",
attr => "amd64_insn_size_t size",
emit => "xor%M %3D0, %3D0",
},
mov_imm => {
......@@ -423,7 +424,7 @@ mov_imm => {
outs => [ "res" ],
attr_type => "amd64_movimm_attr_t",
attr => "amd64_insn_size_t size, const amd64_imm64_t *imm",
emit => 'mov%MM $%C, %D0',
emit => 'mov%M $%C, %D0',
},
movs => {
......@@ -460,7 +461,8 @@ jmp => {
state => "pinned",
op_flags => [ "cfopcode" ],
out_reqs => [ "exec" ],
fixed => "amd64_op_mode_t op_mode = AMD64_OP_NONE;",
fixed => "amd64_op_mode_t op_mode = AMD64_OP_NONE;\n"
."amd64_insn_size_t size = INSN_SIZE_64;\n",
},
cmp => {
......@@ -538,7 +540,7 @@ jmp_switch => {
in_reqs => "...",
out_reqs => "...",
attr_type => "amd64_switch_jmp_attr_t",
attr => "amd64_op_mode_t op_mode, const x86_addr_t *addr, const ir_switch_table *table, ir_entity *table_entity",
attr => "amd64_op_mode_t op_mode, amd64_insn_size_t size, const x86_addr_t *addr, const ir_switch_table *table, ir_entity *table_entity",
},
call => {
......@@ -558,7 +560,8 @@ ret => {
in_reqs => "...",
out_reqs => [ "exec" ],
ins => [ "mem", "stack", "first_result" ],
fixed => "amd64_op_mode_t op_mode = AMD64_OP_NONE;\n",
fixed => "amd64_op_mode_t op_mode = AMD64_OP_NONE;\n"
."amd64_insn_size_t size = INSN_SIZE_64;\n",
emit => "ret",
},
......@@ -622,13 +625,14 @@ ucomis => {
emit => "ucomis%MX %AM",
},
xorpd_0 => {
xorp_0 => {
op_flags => [ "constlike" ],
irn_flags => [ "rematerializable" ],
out_reqs => [ "xmm" ],
outs => [ "res" ],
fixed => "amd64_op_mode_t op_mode = AMD64_OP_NONE;",
emit => "xorpd %^D0, %^D0",
attr => "amd64_insn_size_t size",
emit => "xorp%MX %^D0, %^D0",
},
xorp => {
......
......@@ -455,7 +455,7 @@ static ir_node *gen_Const(ir_node *const node)
if (mode == x86_mode_E) {
return gen_x87_Const(block, tv);
} else if (tarval_is_null(tv)) {
return new_bd_amd64_xorpd_0(dbgi, block);
return new_bd_amd64_xorp_0(dbgi, block, INSN_SIZE_64);
}
return create_float_const(dbgi, block, tv);
}
......@@ -643,7 +643,7 @@ static ir_node *create_sar(dbg_info *dbgi, ir_node *const new_block,
amd64_shift_attr_t attr;
memset(&attr, 0, sizeof(attr));
attr.base.op_mode = AMD64_OP_SHIFT_IMM;
attr.size = size;
attr.base.size = size;
attr.immediate = immediate;
ir_node *in[1] = { value };
ir_node *const sar = new_bd_amd64_sar(dbgi, new_block, ARRAY_SIZE(in), in,
......@@ -727,7 +727,7 @@ static void match_binop(amd64_args_t *args, ir_node *block,
bool mode_neutral = flags & match_mode_neutral;
amd64_binop_addr_attr_t *const attr = &args->attr;
attr->base.size = get_insn_size_from_mode(mode);
attr->base.base.size = get_insn_size_from_mode(mode);
/* TODO: legalize phase */
if (mode_neutral) {
......@@ -891,7 +891,7 @@ static ir_node *gen_binop_xmm(ir_node *node, ir_node *op0, ir_node *op1,
amd64_args_t args;
memset(&args, 0, sizeof(args));
amd64_binop_addr_attr_t *const attr = &args.attr;
attr->base.size = INSN_SIZE_64;
attr->base.base.size = INSN_SIZE_64;
ir_node *load;
ir_node *op;
......@@ -1003,7 +1003,7 @@ static ir_node *gen_shift_binop(ir_node *node, ir_node *op1, ir_node *op2,
reqs = reg_rcx_reqs;
out_req0 = &amd64_requirement_gp_same_0_not_1;
}
attr.size = get_insn_size_from_mode(mode);
attr.base.size = get_insn_size_from_mode(mode);
dbg_info *const dbgi = get_irn_dbg_info(node);
<