Commit 07ccf5c7 authored by Christoph Mallon's avatar Christoph Mallon
Browse files

Consistently place the mem in for AM directly after base and index.

[r15766]
parent 9f259c73
......@@ -423,7 +423,7 @@ static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap
be_node_set_flags(get_Proj_pred(curr_bp), BE_OUT_POS(get_Proj_proj(curr_bp)), arch_irn_flags_ignore);
/* push ebp */
push = new_rd_ia32_Push(NULL, env->irg, bl, noreg, noreg, curr_bp, curr_sp, *mem);
push = new_rd_ia32_Push(NULL, env->irg, bl, noreg, noreg, *mem, curr_bp, curr_sp);
curr_sp = new_r_Proj(env->irg, bl, push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
*mem = new_r_Proj(env->irg, bl, push, mode_M, pn_ia32_Push_M);
......@@ -494,7 +494,7 @@ static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_
curr_sp = be_new_SetSP(env->isa->sp, env->irg, bl, curr_sp, curr_bp, *mem);
/* pop ebp */
pop = new_rd_ia32_Pop(NULL, env->irg, bl, noreg, noreg, curr_sp, *mem);
pop = new_rd_ia32_Pop(NULL, env->irg, bl, noreg, noreg, *mem, curr_sp);
set_ia32_flags(pop, arch_irn_flags_ignore);
curr_bp = new_r_Proj(current_ir_graph, bl, pop, mode_bp, pn_ia32_Pop_res);
curr_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
......@@ -653,7 +653,7 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in
return NULL;
/* operand must always be a real operand (not base, index or mem) */
if (i != 2 && i != 3)
if (i != n_ia32_binary_left && i != n_ia32_binary_right)
return NULL;
/* we don't invert address mode operations */
......@@ -684,7 +684,7 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in
if (get_ia32_immop_type(irn) == ia32_ImmConst) {
/* we have an add with a const here */
/* invers == add with negated const */
inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->costs += 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
set_ia32_Immop_tarval(inverse->nodes[0], tarval_neg(get_ia32_Immop_tarval(irn)));
......@@ -693,13 +693,13 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in
else if (get_ia32_immop_type(irn) == ia32_ImmSymConst) {
/* we have an add with a symconst here */
/* invers == sub with const */
inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->costs += 2;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal add: inverse == sub */
inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, (ir_node*) irn, get_irn_n(irn, i ^ 1), nomem);
inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, i ^ 1));
inverse->costs += 2;
}
#endif
......@@ -709,17 +709,17 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in
if (get_ia32_immop_type(irn) != ia32_ImmNone) {
/* we have a sub with a const/symconst here */
/* invers == add with this const */
inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal sub */
if (i == 2) {
inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, (ir_node*) irn, get_irn_n(irn, 3), nomem);
if (i == n_ia32_binary_left) {
inverse->nodes[0] = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, 3));
}
else {
inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, get_irn_n(irn, 2), (ir_node*) irn, nomem);
inverse->nodes[0] = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, n_ia32_binary_left), (ir_node*) irn);
}
inverse->costs += 1;
}
......@@ -729,13 +729,13 @@ static arch_inverse_t *ia32_get_inverse(const void *self, const ir_node *irn, in
#if 0
if (get_ia32_immop_type(irn) != ia32_ImmNone) {
/* xor with const: inverse = xor */
inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, get_irn_n(irn, i), noreg, nomem);
inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
inverse->costs += (get_ia32_immop_type(irn) == ia32_ImmSymConst) ? 5 : 1;
copy_ia32_Immop_attr(inverse->nodes[0], (ir_node *)irn);
}
else {
/* normal xor */
inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, (ir_node *) irn, get_irn_n(irn, i), nomem);
inverse->nodes[0] = new_rd_ia32_Xor(dbg, irg, block, noreg, noreg, nomem, (ir_node *) irn, get_irn_n(irn, i));
inverse->costs += 1;
}
#endif
......@@ -801,23 +801,23 @@ static int ia32_possible_memory_operand(const void *self, const ir_node *irn, un
const ir_mode *spillmode = get_spill_mode(op);
(void) self;
if (! is_ia32_irn(irn) || /* must be an ia32 irn */
get_irn_arity(irn) != 5 || /* must be a binary operation */
get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */
! ia32_is_spillmode_compatible(mode, spillmode) ||
(i != 2 && i != 3) || /* a "real" operand position must be requested */
is_ia32_use_frame(irn)) /* must not already use frame */
if (! is_ia32_irn(irn) || /* must be an ia32 irn */
get_ia32_am_arity(irn) != 2 || /* must be a binary operation TODO is this necessary? */
get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
! (get_ia32_am_support(irn) & ia32_am_Source) || /* must be capable of source addressmode */
! ia32_is_spillmode_compatible(mode, spillmode) ||
(i != n_ia32_binary_left && i != n_ia32_binary_right) || /* a "real" operand position must be requested */
is_ia32_use_frame(irn)) /* must not already use frame */
return 0;
if(i == 2) {
if (i == n_ia32_binary_left) {
const arch_register_req_t *req;
if(!is_ia32_commutative(irn))
return 0;
/* we can't swap left/right for limited registers
* (As this (currently) breaks constraint handling copies)
*/
req = get_ia32_in_req(irn, 2);
req = get_ia32_in_req(irn, n_ia32_binary_left);
if(req->type & arch_register_req_type_limited) {
return 0;
}
......@@ -834,7 +834,7 @@ static void ia32_perform_memory_operand(const void *self, ir_node *irn,
assert(ia32_possible_memory_operand(self, irn, i) && "Cannot perform memory operand change");
if (i == 2) {
if (i == n_ia32_binary_left) {
ia32_swap_left_right(irn);
}
......@@ -843,12 +843,12 @@ static void ia32_perform_memory_operand(const void *self, ir_node *irn,
set_ia32_use_frame(irn);
set_ia32_need_stackent(irn);
set_irn_n(irn, 0, get_irg_frame(get_irn_irg(irn)));
set_irn_n(irn, 3, ia32_get_admissible_noreg(cg, irn, 3));
set_irn_n(irn, 4, spill);
set_irn_n(irn, n_ia32_base, get_irg_frame(get_irn_irg(irn)));
set_irn_n(irn, n_ia32_binary_right, ia32_get_admissible_noreg(cg, irn, n_ia32_binary_right));
set_irn_n(irn, n_ia32_mem, spill);
/* immediates are only allowed on the right side */
if(i == 2 && is_ia32_Immediate(get_irn_n(irn, 2))) {
if (i == n_ia32_binary_left && is_ia32_Immediate(get_irn_n(irn, n_ia32_binary_left))) {
ia32_swap_left_right(irn);
}
}
......@@ -1045,16 +1045,16 @@ static void transform_to_Store(ia32_code_gen_t *cg, ir_node *node) {
if (mode_is_float(mode)) {
if (USE_SSE2(cg))
store = new_rd_ia32_xStore(dbg, irg, block, ptr, noreg, val, nomem);
store = new_rd_ia32_xStore(dbg, irg, block, ptr, noreg, nomem, val);
else
store = new_rd_ia32_vfst(dbg, irg, block, ptr, noreg, val, nomem, mode);
store = new_rd_ia32_vfst(dbg, irg, block, ptr, noreg, nomem, val, mode);
} else if (get_mode_size_bits(mode) == 128) {
// Spill 128 bit SSE registers
store = new_rd_ia32_xxStore(dbg, irg, block, ptr, noreg, val, nomem);
store = new_rd_ia32_xxStore(dbg, irg, block, ptr, noreg, nomem, val);
} else if (get_mode_size_bits(mode) == 8) {
store = new_rd_ia32_Store8Bit(dbg, irg, block, ptr, noreg, val, nomem);
store = new_rd_ia32_Store8Bit(dbg, irg, block, ptr, noreg, nomem, val);
} else {
store = new_rd_ia32_Store(dbg, irg, block, ptr, noreg, val, nomem);
store = new_rd_ia32_Store(dbg, irg, block, ptr, noreg, nomem, val);
}
set_ia32_op_type(store, ia32_AddrModeD);
......@@ -1079,7 +1079,7 @@ static ir_node *create_push(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpo
ir_node *noreg = ia32_new_NoReg_gp(cg);
ir_node *frame = get_irg_frame(irg);
ir_node *push = new_rd_ia32_Push(dbg, irg, block, frame, noreg, noreg, sp, mem);
ir_node *push = new_rd_ia32_Push(dbg, irg, block, frame, noreg, mem, noreg, sp);
set_ia32_frame_ent(push, ent);
set_ia32_use_frame(push);
......@@ -1097,7 +1097,7 @@ static ir_node *create_pop(ia32_code_gen_t *cg, ir_node *node, ir_node *schedpoi
ir_node *noreg = ia32_new_NoReg_gp(cg);
ir_node *frame = get_irg_frame(irg);
ir_node *pop = new_rd_ia32_Pop(dbg, irg, block, frame, noreg, sp, new_NoMem());
ir_node *pop = new_rd_ia32_Pop(dbg, irg, block, frame, noreg, new_NoMem(), sp);
set_ia32_frame_ent(pop, ent);
set_ia32_use_frame(pop);
......
......@@ -87,7 +87,7 @@ const arch_register_t *get_in_reg(ia32_emit_env_t *env, const ir_node *irn,
assert(reg && "no in register found");
if(reg == &ia32_gp_regs[REG_GP_NOREG])
panic("trying to emit noreg");
panic("trying to emit noreg for %+F input %d", irn, pos);
/* in case of unknown register: just return a valid register */
if (reg == &ia32_gp_regs[REG_GP_UKNWN]) {
......@@ -442,18 +442,18 @@ void ia32_emit_source_register_or_immediate(ia32_emit_env_t *env,
* Emits registers and/or address mode of a binary operation.
*/
void ia32_emit_binop(ia32_emit_env_t *env, const ir_node *node) {
const ir_node *right_op = get_irn_n(node, 3);
const ir_node *right_op = get_irn_n(node, n_ia32_binary_right);
switch(get_ia32_op_type(node)) {
case ia32_Normal:
if(is_ia32_Immediate(right_op)) {
emit_ia32_Immediate(env, right_op);
be_emit_cstring(env, ", ");
ia32_emit_source_register(env, node, 2);
ia32_emit_source_register(env, node, n_ia32_binary_left);
break;
} else {
const arch_register_t *in1 = get_in_reg(env, node, 2);
const arch_register_t *in2 = get_in_reg(env, node, 3);
const arch_register_t *in1 = get_in_reg(env, node, n_ia32_binary_left);
const arch_register_t *in2 = get_in_reg(env, node, n_ia32_binary_right);
const arch_register_t *out = produces_result(node) ? get_out_reg(env, node, 0) : NULL;
const arch_register_t *in;
const char *in_name;
......@@ -483,7 +483,7 @@ void ia32_emit_binop(ia32_emit_env_t *env, const ir_node *node) {
} else {
ia32_emit_am(env, node);
be_emit_cstring(env, ", ");
ia32_emit_source_register(env, node, 2);
ia32_emit_source_register(env, node, n_ia32_binary_left);
}
break;
case ia32_AddrModeD:
......@@ -605,14 +605,14 @@ void ia32_emit_am(ia32_emit_env_t *env, const ir_node *node) {
/* emit base */
if (has_base) {
ia32_emit_source_register(env, node, 0);
ia32_emit_source_register(env, node, n_ia32_base);
}
/* emit index + scale */
if (has_index) {
int scale;
be_emit_char(env, ',');
ia32_emit_source_register(env, node, 1);
ia32_emit_source_register(env, node, n_ia32_index);
scale = get_ia32_am_scale(node);
if (scale > 0) {
......@@ -1057,7 +1057,7 @@ void Set_emitter(ia32_emit_env_t *env, const ir_node *node)
if(is_ia32_xCmpSet(node)) {
be_emit_cstring(env, "\tucomis");
ia32_emit_mode_suffix_mode(env, get_irn_mode(get_irn_n(node, 2)));
ia32_emit_mode_suffix_mode(env, get_irn_mode(get_irn_n(node, n_ia32_binary_left)));
be_emit_char(env, ' ');
ia32_emit_binop(env, node);
} else {
......@@ -1626,7 +1626,7 @@ void emit_ia32_Conv_with_FP(ia32_emit_env_t *env, const ir_node *node) {
switch(get_ia32_op_type(node)) {
case ia32_Normal:
ia32_emit_source_register(env, node, 2);
ia32_emit_source_register(env, node, n_ia32_unary_op);
be_emit_cstring(env, ", ");
ia32_emit_dest_register(env, node, 0);
break;
......@@ -1681,7 +1681,7 @@ void emit_ia32_Conv_I2I(ia32_emit_env_t *env, const ir_node *node) {
switch(get_ia32_op_type(node)) {
case ia32_Normal:
in_reg = get_in_reg(env, node, 2);
in_reg = get_in_reg(env, node, n_ia32_unary_op);
out_reg = get_out_reg(env, node, 0);
if (in_reg == &ia32_gp_regs[REG_EAX] &&
......
......@@ -74,8 +74,8 @@ static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) {
noreg = ia32_new_NoReg_gp(cg);
noreg_fp = ia32_new_NoReg_fp(cg);
nomem = new_rd_NoMem(cg->irg);
in1 = get_irn_n(irn, 2);
in2 = get_irn_n(irn, 3);
in1 = get_irn_n(irn, n_ia32_binary_left);
in2 = get_irn_n(irn, n_ia32_binary_right);
in1_reg = arch_get_irn_register(cg->arch_env, in1);
in2_reg = arch_get_irn_register(cg->arch_env, in2);
out_reg = get_ia32_out_reg(irn, 0);
......@@ -95,7 +95,7 @@ static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) {
ir_entity *entity;
ir_mode *op_mode = get_ia32_ls_mode(irn);
res = new_rd_ia32_xXor(dbg, irg, block, noreg, noreg, in2, noreg_fp, nomem);
res = new_rd_ia32_xXor(dbg, irg, block, noreg, noreg, nomem, in2, noreg_fp);
size = get_mode_size_bits(op_mode);
entity = ia32_gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN);
set_ia32_am_sc(res, entity);
......@@ -111,11 +111,11 @@ static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) {
/* generate the add */
if (mode_is_float(mode)) {
res = new_rd_ia32_xAdd(dbg, irg, block, noreg, noreg, res, in1, nomem);
res = new_rd_ia32_xAdd(dbg, irg, block, noreg, noreg, nomem, res, in1);
set_ia32_am_support(res, ia32_am_Source, ia32_am_binary);
set_ia32_ls_mode(res, get_ia32_ls_mode(irn));
} else {
res = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, res, in1, nomem);
res = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, nomem, res, in1);
set_ia32_am_support(res, ia32_am_Full, ia32_am_binary);
set_ia32_commutative(res);
}
......@@ -320,7 +320,7 @@ make_add:
block = get_nodes_block(node);
noreg = ia32_new_NoReg_gp(cg);
nomem = new_NoMem();
res = new_rd_ia32_Add(dbgi, irg, block, noreg, noreg, op1, op2, nomem);
res = new_rd_ia32_Add(dbgi, irg, block, noreg, noreg, nomem, op1, op2);
arch_set_irn_register(arch_env, res, out_reg);
set_ia32_commutative(res);
goto exchange;
......@@ -461,7 +461,7 @@ static void assure_should_be_same_requirements(ia32_code_gen_t *cg,
}
/* for commutative nodes we can simply swap the left/right */
if(is_ia32_commutative(node) && uses_out_reg_pos == 3) {
if(is_ia32_commutative(node) && uses_out_reg_pos == n_ia32_binary_right) {
ia32_swap_left_right(node);
DBG((dbg, LEVEL_1, "swapped left/right input of %+F to resolve "
"should be same constraint\n", node));
......@@ -510,7 +510,8 @@ static void assure_should_be_same_requirements(ia32_code_gen_t *cg,
if (pnc & pn_Cmp_Uo) {
ir_node *tmp;
int idx1 = 2, idx2 = 3;
int idx1 = n_ia32_binary_left;
int idx2 = n_ia32_binary_right;
if (is_ia32_xCmpCMov(node)) {
idx1 = 0;
......@@ -581,6 +582,7 @@ static void fix_am_source(ir_node *irn, void *env) {
ir_mode *proj_mode;
ir_node *load;
ir_node *load_res;
ir_node *mem;
int pnres;
/* should_be same constraint is fullfilled, nothing to do */
......@@ -594,17 +596,15 @@ static void fix_am_source(ir_node *irn, void *env) {
/* turn back address mode */
same_cls = arch_register_get_class(same_reg);
mem = get_irn_n(irn, n_ia32_mem);
assert(get_irn_mode(mem) == mode_M);
if (same_cls == &ia32_reg_classes[CLASS_ia32_gp]) {
load = new_rd_ia32_Load(dbgi, irg, block, base, index,
get_irn_n(irn, 4));
assert(get_irn_mode(get_irn_n(irn,4)) == mode_M);
load = new_rd_ia32_Load(dbgi, irg, block, base, index, mem);
pnres = pn_ia32_Load_res;
proj_mode = mode_Iu;
} else if (same_cls == &ia32_reg_classes[CLASS_ia32_xmm]) {
load = new_rd_ia32_xLoad(dbgi, irg, block, base, index,
get_irn_n(irn, 4),
get_ia32_ls_mode(irn));
assert(get_irn_mode(get_irn_n(irn,4)) == mode_M);
load = new_rd_ia32_xLoad(dbgi, irg, block, base, index, mem,
get_ia32_ls_mode(irn));
pnres = pn_ia32_xLoad_res;
proj_mode = mode_E;
} else {
......@@ -632,7 +632,7 @@ static void fix_am_source(ir_node *irn, void *env) {
arch_set_irn_register(cg->arch_env, load_res, out_reg);
/* set the new input operand */
set_irn_n(irn, 3, load_res);
set_irn_n(irn, n_ia32_binary_right, load_res);
if(get_irn_mode(irn) == mode_T) {
const ir_edge_t *edge, *next;
foreach_out_edge_safe(irn, edge, next) {
......@@ -649,8 +649,8 @@ static void fix_am_source(ir_node *irn, void *env) {
}
/* this is a normal node now */
set_irn_n(irn, 0, noreg);
set_irn_n(irn, 1, noreg);
set_irn_n(irn, n_ia32_base, noreg);
set_irn_n(irn, n_ia32_index, noreg);
set_ia32_op_type(irn, ia32_Normal);
break;
}
......
......@@ -64,8 +64,7 @@ static ir_node *create_fpu_mode_spill(void *env, ir_node *state, int force,
ir_node *nomem = new_NoMem();
ir_node *frame = get_irg_frame(irg);
spill = new_rd_ia32_FnstCW(NULL, irg, block, frame, noreg, state,
nomem);
spill = new_rd_ia32_FnstCW(NULL, irg, block, frame, noreg, nomem, state);
set_ia32_op_type(spill, ia32_AddrModeD);
set_ia32_ls_mode(spill, ia32_reg_classes[CLASS_ia32_fp_cw].mode);
set_ia32_use_frame(spill);
......@@ -102,8 +101,8 @@ static ir_node *create_fpu_mode_reload(void *env, ir_node *state,
ir_node *or_const;
assert(last_state != NULL);
cwstore = new_rd_ia32_FnstCW(NULL, irg, block, frame, noreg, last_state,
nomem);
cwstore = new_rd_ia32_FnstCW(NULL, irg, block, frame, noreg, nomem,
last_state);
set_ia32_op_type(cwstore, ia32_AddrModeD);
set_ia32_ls_mode(cwstore, lsmode);
set_ia32_use_frame(cwstore);
......@@ -122,11 +121,11 @@ static ir_node *create_fpu_mode_reload(void *env, ir_node *state,
NULL, 0, 3072);
arch_set_irn_register(cg->arch_env, or_const,
&ia32_gp_regs[REG_GP_NOREG]);
or = new_rd_ia32_Or(NULL, irg, block, noreg, noreg, load_res, or_const,
nomem);
or = new_rd_ia32_Or(NULL, irg, block, noreg, noreg, nomem, load_res,
or_const);
sched_add_before(before, or);
store = new_rd_ia32_Store(NULL, irg, block, frame, noreg, or, nomem);
store = new_rd_ia32_Store(NULL, irg, block, frame, noreg, nomem, or);
set_ia32_op_type(store, ia32_AddrModeD);
set_ia32_ls_mode(store, lsmode);
set_ia32_use_frame(store);
......
......@@ -276,7 +276,7 @@ static int ia32_dump_node(ir_node *n, FILE *F, dump_reason_t reason) {
fprintf(F, "AM scale = %d\n", get_ia32_am_scale(n));
/* dump pn code */
if(is_ia32_SwitchJmp(n)) {
if(is_ia32_SwitchJmp(n) || is_ia32_CopyB(n) || is_ia32_CopyB_i(n)) {
fprintf(F, "pn_code = %ld\n", get_ia32_pncode(n));
} else {
if(get_ia32_pncode(n) & ia32_pn_Cmp_Unsigned) {
......@@ -986,11 +986,11 @@ int get_ia32_out_regnr(const ir_node *node, int pos) {
void ia32_swap_left_right(ir_node *node)
{
ir_node *left = get_irn_n(node, 2);
ir_node *right = get_irn_n(node, 3);
ir_node *left = get_irn_n(node, n_ia32_binary_left);
ir_node *right = get_irn_n(node, n_ia32_binary_right);
assert(is_ia32_commutative(node));
set_irn_n(node, 2, right);
set_irn_n(node, 3, left);
set_irn_n(node, n_ia32_binary_left, right);
set_irn_n(node, n_ia32_binary_right, left);
set_ia32_pncode(node, get_inversed_pnc(get_ia32_pncode(node)));
}
......
......@@ -32,6 +32,16 @@
#include "firm_config.h"
#include "ia32_nodes_attr.h"
/** indices for AM inputs */
enum {
n_ia32_base = 0,
n_ia32_index = 1,
n_ia32_mem = 2,
n_ia32_unary_op = 3,
n_ia32_binary_left = 3,
n_ia32_binary_right = 4
};
/** proj numbers for "normal" one-result nodes (for the complicated cases where we not only
* need the result) */
enum {
......
......@@ -170,7 +170,7 @@ static void ia32_create_Pushs(ir_node *irn, ia32_code_gen_t *cg) {
mem = get_irn_n(store, 3);
spreg = arch_get_irn_register(cg->arch_env, curr_sp);
push = new_rd_ia32_Push(get_irn_dbg_info(store), irg, block, noreg, noreg, val, curr_sp, mem);
push = new_rd_ia32_Push(get_irn_dbg_info(store), irg, block, noreg, noreg, mem, val, curr_sp);
set_ia32_am_support(push, ia32_am_Source, ia32_am_unary);
......
This diff is collapsed.
This diff is collapsed.
......@@ -54,16 +54,9 @@
#define N_x87_REGS 8
/* first and second binop index */
#define BINOP_IDX_1 2
#define BINOP_IDX_2 3
/* the unop index */
#define UNOP_IDX 0
/* the store val index */
#define STORE_VAL_IDX 2
#define MASK_TOS(x) ((x) & (N_x87_REGS - 1))
/** the debug handle */
......@@ -892,8 +885,8 @@ static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl) {
ir_node *patched_insn;
ir_op *dst;
x87_simulator *sim = state->sim;
ir_node *op1 = get_irn_n(n, BINOP_IDX_1);
ir_node *op2 = get_irn_n(n, BINOP_IDX_2);
ir_node *op1 = get_irn_n(n, n_ia32_binary_left);
ir_node *op2 = get_irn_n(n, n_ia32_binary_right);
const arch_register_t *op1_reg = x87_get_irn_register(sim, op1);
const arch_register_t *op2_reg = x87_get_irn_register(sim, op2);
const arch_register_t *out = x87_get_irn_register(sim, n);
......@@ -937,7 +930,7 @@ static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl) {
if (op1_live_after) {
/* Both operands are live: push the first one.
This works even for op1 == op2. */
x87_create_fpush(state, n, op1_idx, BINOP_IDX_2);
x87_create_fpush(state, n, op1_idx, n_ia32_binary_right);
/* now do fxxx (tos=tos X op) */
op1_idx = 0;
op2_idx += 1;
......@@ -1013,7 +1006,7 @@ static int sim_binop(x87_state *state, ir_node *n, const exchange_tmpl *tmpl) {
/* second operand is an address mode */
if (op1_live_after) {
/* first operand is live: push it here */
x87_create_fpush(state, n, op1_idx, BINOP_IDX_1);
x87_create_fpush(state, n, op1_idx, n_ia32_binary_left);
op1_idx = 0;
/* use fxxx (tos = tos X mem) */
dst = tmpl->normal_op;
......@@ -1164,7 +1157,7 @@ static void collect_and_rewire_users(ir_node *store, ir_node *old_val, ir_node *
*/
static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p) {
x87_simulator *sim = state->sim;
ir_node *val = get_irn_n(n, STORE_VAL_IDX);
ir_node *val = get_irn_n(n, n_ia32_vfst_val);
const arch_register_t *op2 = x87_get_irn_register(sim, val);
unsigned live = vfp_live_args_after(sim, n, 0);
int insn = NO_NODE_ADDED;
......@@ -1207,7 +1200,7 @@ static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p) {
if (mode == mode_E) {
if (depth < N_x87_REGS) {
/* ok, we have a free register: push + fstp */
x87_create_fpush(state, n, op2_idx, STORE_VAL_IDX);
x87_create_fpush(state, n, op2_idx, n_ia32_vfst_val);
x87_pop(state);
x87_patch_insn(n, op_p);
} else {
......@@ -1242,7 +1235,7 @@ static int sim_store(x87_state *state, ir_node *n, ir_op *op, ir_op *op_p) {
/* reroute all former users of the store memory to the load memory */
edges_reroute(mem, mproj, irg);
/* set the memory input of the load to the store memory */
set_irn_n(vfld, 2, mem);
set_irn_n(vfld, n_ia32_vfld_mem, mem);
sched_add_after(n, vfld);
sched_add_after(vfld, rproj);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment