Commit 712399b9 authored by Christoph Mallon's avatar Christoph Mallon
Browse files

ia32: Merge the attributes use_frame, need_stackent, need_32bit_stackent and...

ia32: Merge the attributes use_frame, need_stackent, need_32bit_stackent and need_64bit_stackent into one attribute.

* Out of the 16 possible combinations of these four boolean attributes, only four are sensible.
* This correct the latent bug, that not all of these attributes were compared in ia32_attrs_equal_().
parent f3327fe6
......@@ -158,10 +158,9 @@ static void ia32_set_frame_entity(ir_node *node, ir_entity *entity,
{
set_ia32_frame_ent(node, entity);
ia32_attr_t *attr = get_ia32_attr(node);
/* set ls_mode based on entity unless we explicitely requested
* a certain mode */
if (attr->need_32bit_stackent || attr->need_64bit_stackent || is_ia32_Conv_I2I(node))
if (get_ia32_frame_use(node) != IA32_FRAME_USE_AUTO || is_ia32_Conv_I2I(node))
return;
ir_mode *mode = get_type_mode(type);
/** we 8bit stores have a special register requirement, so we can't simply
......@@ -277,7 +276,7 @@ static int ia32_get_op_estimated_cost(ir_node const *const irn)
/* in case of address mode operations add additional cycles */
if (get_ia32_op_type(irn) != ia32_Normal) {
if (is_ia32_use_frame(irn) || (
if (get_ia32_frame_use(irn) != IA32_FRAME_USE_NONE || (
is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_base)) &&
is_ia32_NoReg_GP(get_irn_n(irn, n_ia32_index))
)) {
......@@ -313,7 +312,7 @@ static bool ia32_possible_memory_operand(const ir_node *irn, unsigned int i)
{
if (!is_ia32_irn(irn) || /* must be an ia32 irn */
get_ia32_op_type(irn) != ia32_Normal || /* must not already be a addressmode irn */
is_ia32_use_frame(irn)) /* must not already use frame */
get_ia32_frame_use(irn) != IA32_FRAME_USE_NONE) /* must not already use frame */
return false;
ir_node *op = get_irn_n(irn, i);
......@@ -386,8 +385,7 @@ static void ia32_perform_memory_operand(ir_node *irn, unsigned int i)
if (get_mode_size_bits(load_mode) <= get_mode_size_bits(dest_op_mode))
set_ia32_ls_mode(irn, load_mode);
set_ia32_op_type(irn, ia32_AddrModeS);
set_ia32_use_frame(irn);
set_ia32_need_stackent(irn);
set_ia32_frame_use(irn, IA32_FRAME_USE_AUTO);
if (i == n_ia32_binary_left &&
get_ia32_am_support(irn) == ia32_am_binary &&
......@@ -776,7 +774,7 @@ static ir_node *ia32_new_spill(ir_node *value, ir_node *after)
}
set_ia32_op_type(store, ia32_AddrModeD);
set_ia32_ls_mode(store, mode);
set_ia32_use_frame(store);
set_ia32_frame_use(store, IA32_FRAME_USE_AUTO);
set_ia32_is_spill(store);
sched_add_after(after, store);
......@@ -807,7 +805,7 @@ static ir_node *ia32_new_reload(ir_node *value, ir_node *spill, ir_node *before)
}
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_ls_mode(load, spillmode);
set_ia32_use_frame(load);
set_ia32_frame_use(load, IA32_FRAME_USE_AUTO);
set_ia32_is_reload(load);
arch_add_irn_flags(load, arch_irn_flag_reload);
sched_add_before(before, load);
......@@ -1000,18 +998,18 @@ static void ia32_collect_frame_entity_nodes(ir_node *node, void *data)
be_forbid_coalescing(env);
}
if (!is_ia32_irn(node) || !is_ia32_use_frame(node)
if (!is_ia32_irn(node)
|| get_ia32_frame_ent(node) != NULL
|| get_ia32_op_type(node) != ia32_AddrModeS)
return;
const ia32_attr_t *attr = get_ia32_attr_const(node);
const ir_type *type;
if (attr->need_32bit_stackent) {
type = get_type_for_mode(ia32_mode_gp);
} else if (attr->need_64bit_stackent) {
type = get_type_for_mode(mode_Ls);
} else {
ir_type const *type;
switch (get_ia32_frame_use(node)) {
case IA32_FRAME_USE_NONE: return;
case IA32_FRAME_USE_32BIT: type = get_type_for_mode(ia32_mode_gp); break;
case IA32_FRAME_USE_64BIT: type = get_type_for_mode(mode_Ls); break;
default: {
ir_mode *mode = get_ia32_ls_mode(node);
/* stupid hack: in some situations (like reloads folded into ConvI2I
* with 8bit mode, an 8bit entity and reload+spill would suffice, but
......@@ -1026,6 +1024,8 @@ static void ia32_collect_frame_entity_nodes(ir_node *node, void *data)
} else {
type = get_type_for_mode(mode);
}
break;
}
}
be_load_needs_frame_entity(env, node, type);
}
......
......@@ -361,7 +361,7 @@ static void ia32_emit_am(ir_node const *const node)
bool has_index = !is_ia32_NoReg_GP(idx);
/* just to be sure... */
assert(!is_ia32_use_frame(node) || get_ia32_frame_ent(node) != NULL);
assert(get_ia32_frame_use(node) == IA32_FRAME_USE_NONE || get_ia32_frame_ent(node));
if (get_ia32_am_tls_segment(node))
be_emit_cstring("%gs:");
......
......@@ -84,7 +84,7 @@ static ir_node *create_fpu_mode_spill(void *env, ir_node *state, bool force,
set_ia32_op_type(spill, ia32_AddrModeD);
/* use ia32_mode_gp, as movl has a shorter opcode than movw */
set_ia32_ls_mode(spill, ia32_mode_gp);
set_ia32_use_frame(spill);
set_ia32_frame_use(spill, IA32_FRAME_USE_AUTO);
sched_add_after(skip_Proj(after), spill);
return spill;
......@@ -92,12 +92,6 @@ static ir_node *create_fpu_mode_spill(void *env, ir_node *state, bool force,
return NULL;
}
static void set_32bit_stackent(ir_node *node)
{
ia32_attr_t *attr = get_ia32_attr(node);
attr->need_32bit_stackent = true;
}
static ir_node *create_fldcw_ent(ir_node *block, ir_entity *entity)
{
ir_graph *irg = get_irn_irg(block);
......@@ -107,9 +101,8 @@ static ir_node *create_fldcw_ent(ir_node *block, ir_entity *entity)
set_ia32_op_type(reload, ia32_AddrModeS);
set_ia32_ls_mode(reload, ia32_reg_classes[CLASS_ia32_fp_cw].mode);
set_ia32_am_ent(reload, entity);
set_ia32_use_frame(reload);
set_ia32_frame_use(reload, IA32_FRAME_USE_32BIT);
arch_set_irn_register(reload, &ia32_registers[REG_FPCW]);
set_32bit_stackent(reload);
return reload;
}
......@@ -141,8 +134,7 @@ static ir_node *create_fpu_mode_reload(void *env, ir_node *state,
reload = new_bd_ia32_FldCW(NULL, block, frame, noreg, spill);
set_ia32_op_type(reload, ia32_AddrModeS);
set_ia32_ls_mode(reload, ia32_reg_classes[CLASS_ia32_fp_cw].mode);
set_ia32_use_frame(reload);
set_32bit_stackent(reload);
set_ia32_frame_use(reload, IA32_FRAME_USE_32BIT);
arch_set_irn_register(reload, &ia32_registers[REG_FPCW]);
sched_add_before(before, reload);
......@@ -158,15 +150,13 @@ static ir_node *create_fpu_mode_reload(void *env, ir_node *state,
last_state);
set_ia32_op_type(cwstore, ia32_AddrModeD);
set_ia32_ls_mode(cwstore, lsmode);
set_ia32_use_frame(cwstore);
set_32bit_stackent(cwstore);
set_ia32_frame_use(cwstore, IA32_FRAME_USE_32BIT);
sched_add_before(before, cwstore);
load = new_bd_ia32_Load(NULL, block, frame, noreg, cwstore);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_ls_mode(load, lsmode);
set_ia32_use_frame(load);
set_32bit_stackent(load);
set_ia32_frame_use(load, IA32_FRAME_USE_32BIT);
sched_add_before(before, load);
load_res = new_r_Proj(load, ia32_mode_gp, pn_ia32_Load_res);
......@@ -183,16 +173,14 @@ static ir_node *create_fpu_mode_reload(void *env, ir_node *state,
set_ia32_op_type(store, ia32_AddrModeD);
/* use ia32_mode_gp, as movl has a shorter opcode than movw */
set_ia32_ls_mode(store, ia32_mode_gp);
set_ia32_use_frame(store);
set_32bit_stackent(store);
set_ia32_frame_use(store, IA32_FRAME_USE_32BIT);
store_proj = new_r_Proj(store, mode_M, pn_ia32_Store_M);
sched_add_before(before, store);
fldcw = new_bd_ia32_FldCW(NULL, block, frame, noreg, store_proj);
set_ia32_op_type(fldcw, ia32_AddrModeS);
set_ia32_ls_mode(fldcw, lsmode);
set_ia32_use_frame(fldcw);
set_32bit_stackent(fldcw);
set_ia32_frame_use(fldcw, IA32_FRAME_USE_32BIT);
arch_set_irn_register(fldcw, &ia32_registers[REG_FPCW]);
sched_add_before(before, fldcw);
......
......@@ -83,6 +83,17 @@ static bool has_ia32_condcode_attr(const ir_node *node)
|| is_ia32_Sbb0(node) || is_ia32_Cmc(node);
}
static char const *get_frame_use_str(ir_node const *const node)
{
switch (get_ia32_frame_use(node)) {
case IA32_FRAME_USE_NONE: return "none";
case IA32_FRAME_USE_32BIT: return "32bit";
case IA32_FRAME_USE_64BIT: return "64bit";
case IA32_FRAME_USE_AUTO: return "auto";
}
return "invalid";
}
/**
* Dumper interface for dumping ia32 nodes in vcg.
* @param n the node to dump
......@@ -218,13 +229,12 @@ static void ia32_dump_node(FILE *F, const ir_node *n, dump_reason_t reason)
fprintf(F, "size = %u\n", get_ia32_copyb_size(n));
}
fprintf(F, "use_frame = %d\n", is_ia32_use_frame(n));
fprintf(F, "commutative = %d\n", is_ia32_commutative(n));
fprintf(F, "need stackent = %d\n", is_ia32_need_stackent(n));
fprintf(F, "is reload = %d\n", is_ia32_is_reload(n));
fprintf(F, "latency = %u\n", get_ia32_latency(n));
fprintf(F, "commutative = %d\n", is_ia32_commutative(n));
fprintf(F, "is reload = %d\n", is_ia32_is_reload(n));
fprintf(F, "latency = %u\n", get_ia32_latency(n));
/* dump frame entity */
fprintf(F, "frame use = %s\n", get_frame_use_str(n));
fprintf(F, "frame entity = ");
if (get_ia32_frame_ent(n)) {
ir_fprintf(F, "%+F", get_ia32_frame_ent(n));
......@@ -505,35 +515,7 @@ void ia32_copy_am_attrs(ir_node *to, const ir_node *from)
set_ia32_am_ent(to, get_ia32_am_ent(from));
add_ia32_am_offs_int(to, get_ia32_am_offs_int(from));
set_ia32_frame_ent(to, get_ia32_frame_ent(from));
if (is_ia32_use_frame(from))
set_ia32_use_frame(to);
}
/**
* Sets the uses_frame flag.
*/
void set_ia32_use_frame(ir_node *node)
{
ia32_attr_t *const attr = get_ia32_attr(node);
attr->use_frame = 1;
}
/**
* Clears the uses_frame flag.
*/
void clear_ia32_use_frame(ir_node *node)
{
ia32_attr_t *const attr = get_ia32_attr(node);
attr->use_frame = 0;
}
/**
* Gets the uses_frame flag.
*/
int is_ia32_use_frame(const ir_node *node)
{
const ia32_attr_t *attr = get_ia32_attr_const(node);
return attr->use_frame;
set_ia32_frame_use(to, get_ia32_frame_use(from));
}
/**
......@@ -563,24 +545,6 @@ int is_ia32_commutative(const ir_node *node)
return attr->is_commutative;
}
void set_ia32_need_stackent(ir_node *node)
{
ia32_attr_t *const attr = get_ia32_attr(node);
attr->need_stackent = 1;
}
void clear_ia32_need_stackent(ir_node *node)
{
ia32_attr_t *const attr = get_ia32_attr(node);
attr->need_stackent = 0;
}
int is_ia32_need_stackent(const ir_node *node)
{
const ia32_attr_t *attr = get_ia32_attr_const(node);
return attr->need_stackent;
}
void set_ia32_is_reload(ir_node *node)
{
ia32_attr_t *attr = get_ia32_attr(node);
......@@ -651,10 +615,13 @@ void set_ia32_frame_ent(ir_node *node, ir_entity *ent)
{
ia32_attr_t *attr = get_ia32_attr(node);
attr->frame_ent = ent;
if (ent != NULL)
set_ia32_use_frame(node);
else
clear_ia32_use_frame(node);
if (!ent) {
set_ia32_frame_use(node, IA32_FRAME_USE_NONE);
} else if (get_ia32_frame_use(node) == IA32_FRAME_USE_NONE) {
/* Only set frame use to auto, if it is not set to something more specific
* already. */
set_ia32_frame_use(node, IA32_FRAME_USE_AUTO);
}
}
......@@ -902,7 +869,7 @@ static int ia32_attrs_equal_(const ia32_attr_t *a, const ia32_attr_t *b)
{
/* nodes with not yet assigned entities shouldn't be CSEd (important for
* unsigned int -> double conversions */
if (a->use_frame && a->frame_ent == NULL)
if (a->frame_use != IA32_FRAME_USE_NONE && !a->frame_ent)
return false;
return a->tp == b->tp
......@@ -911,7 +878,7 @@ static int ia32_attrs_equal_(const ia32_attr_t *a, const ia32_attr_t *b)
&& a->am_ent == b->am_ent
&& a->am_sc_no_pic_adjust == b->am_sc_no_pic_adjust
&& a->ls_mode == b->ls_mode
&& a->use_frame == b->use_frame
&& a->frame_use == b->frame_use
&& a->frame_ent == b->frame_ent
&& a->has_except_label == b->has_except_label
&& a->ins_permuted == b->ins_permuted;
......
......@@ -136,21 +136,6 @@ unsigned get_ia32_am_scale(const ir_node *node);
*/
void set_ia32_am_scale(ir_node *node, unsigned scale);
/**
* Sets the uses_frame flag.
*/
void set_ia32_use_frame(ir_node *node);
/**
* Clears the uses_frame flag.
*/
void clear_ia32_use_frame(ir_node *node);
/**
* Gets the uses_frame flag.
*/
int is_ia32_use_frame(const ir_node *node);
/**
* copies all address-mode attributes from one node to the other
*/
......@@ -171,20 +156,19 @@ void clear_ia32_commutative(ir_node *node);
*/
int is_ia32_commutative(const ir_node *node);
/**
* Sets node needs_stackent
*/
void set_ia32_need_stackent(ir_node *node);
/**
* Clears node needs_stackent
*/
void clear_ia32_need_stackent(ir_node *node);
/**
* Checks if node needs a stack entity assigned
*/
int is_ia32_need_stackent(const ir_node *node);
static inline void set_ia32_frame_use(ir_node *const node, ia32_frame_use_t const val)
{
ia32_attr_t *const attr = get_ia32_attr(node);
/* Only allow more specific, the same or reset. */
assert(attr->frame_use == IA32_FRAME_USE_NONE || attr->frame_use == IA32_FRAME_USE_AUTO || attr->frame_use == val || val == IA32_FRAME_USE_NONE);
attr->frame_use = val;
}
static inline ia32_frame_use_t get_ia32_frame_use(ir_node const *const node)
{
ia32_attr_t const *const attr = get_ia32_attr_const(node);
return (ia32_frame_use_t)attr->frame_use;
}
void set_ia32_is_reload(ir_node *node);
int is_ia32_is_reload(const ir_node *node);
......
......@@ -78,6 +78,13 @@ typedef enum {
} ia32_attr_type_t;
#endif
typedef enum ia32_frame_use_t {
IA32_FRAME_USE_NONE,
IA32_FRAME_USE_32BIT,
IA32_FRAME_USE_64BIT,
IA32_FRAME_USE_AUTO,
} ia32_frame_use_t;
/**
* The generic ia32 attributes. Every node has them.
*/
......@@ -91,14 +98,11 @@ struct ia32_attr_t {
unsigned am_sc_no_pic_adjust:1; /**< AM entity can be relative to EIP */
unsigned am_tls_segment:1; /**< addresses are relative to TLS */
unsigned use_frame:1; /**< Indicates whether the operation uses the frame pointer or not. */
ENUMBF(ia32_frame_use_t) frame_use:2; /**< Whether an entity on the frame is used and its size. */
unsigned has_except_label:1; /**< Set if this node needs a label because of possible exception. */
unsigned is_commutative:1; /**< Indicates whether op is commutative or not. */
unsigned need_stackent:1; /**< Set to 1 if node need space on stack. */
unsigned need_64bit_stackent:1; /**< needs a 64bit stack entity (see double->unsigned int conv) */
unsigned need_32bit_stackent:1; /**< needs a 32bit stack entity */
unsigned ins_permuted:1; /**< inputs of node have been permuted (for commutative nodes) */
unsigned is_reload:1; /**< node performs a reload */
unsigned is_spill:1;
......
......@@ -821,7 +821,7 @@ static void peephole_ia32_Lea(ir_node *node)
int has_immediates;
ir_node *op1;
ir_node *op2;
assert(!is_ia32_need_stackent(node) || get_ia32_frame_ent(node) != NULL);
assert(get_ia32_frame_use(node) == IA32_FRAME_USE_NONE || get_ia32_frame_ent(node));
/* check if we have immediates values (frame entities should already be
* expressed in the offsets) */
if (get_ia32_am_offs_int(node) != 0 || get_ia32_am_ent(node) != NULL) {
......
......@@ -664,7 +664,7 @@ static void set_address(ir_node *node, const x86_address_t *addr)
set_ia32_am_tls_segment(node, addr->tls_segment);
set_ia32_frame_ent(node, addr->frame_entity);
if (addr->use_frame)
set_ia32_use_frame(node);
set_ia32_frame_use(node, IA32_FRAME_USE_AUTO);
}
/**
......@@ -3691,13 +3691,14 @@ static ir_node *gen_Mux(ir_node *node)
static void force_int_stackent(ir_node *node, ir_mode *mode)
{
ia32_attr_t *attr = get_ia32_attr(node);
ia32_frame_use_t frame_use;
if (get_mode_size_bits(mode) == 64) {
attr->need_64bit_stackent = true;
frame_use = IA32_FRAME_USE_64BIT;
} else {
assert(get_mode_size_bits(mode) == 32);
attr->need_32bit_stackent = true;
frame_use = IA32_FRAME_USE_32BIT;
}
set_ia32_frame_use(node, frame_use);
}
/**
......@@ -3714,7 +3715,6 @@ static ir_node *gen_x87_fp_to_gp(ir_node *node)
ir_node *fist = gen_fist(dbgi, block, frame, noreg_GP, nomem, new_op);
set_irn_pinned(fist, op_pin_state_floats);
set_ia32_use_frame(fist);
set_ia32_op_type(fist, ia32_AddrModeD);
arch_add_irn_flags(fist, arch_irn_flag_spill);
......@@ -3737,7 +3737,6 @@ static ir_node *gen_x87_fp_to_gp(ir_node *node)
ir_node *load = new_bd_ia32_Load(dbgi, block, frame, noreg_GP, mem);
set_irn_pinned(load, op_pin_state_floats);
set_ia32_use_frame(load);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_ls_mode(load, ia32_mode_gp);
force_int_stackent(load, ls_mode);
......@@ -3759,7 +3758,7 @@ static ir_node *gen_x87_conv(ir_mode *tgt_mode, ir_node *node)
ir_node *store = new_bd_ia32_fst(dbgi, block, frame, noreg_GP, nomem, node,
tgt_mode);
set_irn_pinned(store, op_pin_state_floats);
set_ia32_use_frame(store);
set_ia32_frame_use(store, IA32_FRAME_USE_AUTO);
set_ia32_op_type(store, ia32_AddrModeD);
arch_add_irn_flags(store, arch_irn_flag_spill);
SET_IA32_ORIG_NODE(store, node);
......@@ -3769,7 +3768,7 @@ static ir_node *gen_x87_conv(ir_mode *tgt_mode, ir_node *node)
ir_node *load = new_bd_ia32_fld(dbgi, block, frame, noreg_GP, store_mem,
tgt_mode);
set_irn_pinned(load, op_pin_state_floats);
set_ia32_use_frame(load);
set_ia32_frame_use(load, IA32_FRAME_USE_AUTO);
set_ia32_op_type(load, ia32_AddrModeS);
SET_IA32_ORIG_NODE(load, node);
......@@ -3811,7 +3810,6 @@ static void store_gp(dbg_info *dbgi, ia32_address_mode_t *am, ir_node *block,
nomem, new_node);
set_irn_pinned(store, op_pin_state_floats);
set_ia32_use_frame(store);
set_ia32_op_type(store, ia32_AddrModeD);
set_ia32_ls_mode(store, ia32_mode_gp);
arch_add_irn_flags(store, arch_irn_flag_spill);
......@@ -3830,13 +3828,11 @@ static void store_gp(dbg_info *dbgi, ia32_address_mode_t *am, ir_node *block,
ir_node *zero_store_mem = new_r_Proj(zero_store, mode_M, pn_ia32_Store_M);
set_irn_pinned(zero_store, op_pin_state_floats);
set_ia32_use_frame(zero_store);
set_ia32_op_type(zero_store, ia32_AddrModeD);
add_ia32_am_offs_int(zero_store, 4);
set_ia32_ls_mode(zero_store, ia32_mode_gp);
arch_add_irn_flags(zero_store, arch_irn_flag_spill);
ia32_attr_t *zero_store_attr = get_ia32_attr(zero_store);
zero_store_attr->need_64bit_stackent = true;
set_ia32_frame_use(zero_store, IA32_FRAME_USE_64BIT);
in[0] = zero_store_mem;
in[1] = store_mem;
......@@ -3876,11 +3872,11 @@ static ir_node *gen_x87_gp_to_fp(ir_node *node)
ir_node *fild = new_bd_ia32_fild(dbgi, new_block, addr->base,
addr->index, addr->mem);
ir_node *new_node = new_r_Proj(fild, mode_fp, pn_ia32_fild_res);
set_am_attributes(fild, &am);
if (addr->use_frame && addr->entity == NULL
&& get_mode_arithmetic(am.ls_mode) != irma_twos_complement)
force_int_stackent(fild, am.ls_mode);
set_am_attributes(fild, &am);
SET_IA32_ORIG_NODE(fild, node);
fix_mem_proj(fild, &am);
......@@ -4050,7 +4046,6 @@ static void store_fp(dbg_info *dbgi, ia32_address_mode_t *am, ir_node *block,
ir_node *fst = new_bd_ia32_fst(dbgi, new_block, frame, noreg_GP, nomem,
new_value, mode);
set_irn_pinned(fst, op_pin_state_floats);
set_ia32_use_frame(fst);
set_ia32_op_type(fst, ia32_AddrModeD);
arch_add_irn_flags(fst, arch_irn_flag_spill);
force_int_stackent(fst, mode);
......@@ -4099,11 +4094,11 @@ static ir_node *gen_Bitcast(ir_node *const node)
const x86_address_t *addr = &am.addr;
ir_node *fld = new_bd_ia32_fld(dbgi, new_block, addr->base,
addr->index, addr->mem, dst_mode);
force_int_stackent(fld, dst_mode);
res = new_r_Proj(fld, mode_fp, pn_ia32_fld_res);
am.ls_mode = dst_mode;
set_am_attributes(fld, &am);
force_int_stackent(fld, dst_mode);
SET_IA32_ORIG_NODE(fld, node);
fix_mem_proj(fld, &am);
break;
......@@ -4112,10 +4107,10 @@ static ir_node *gen_Bitcast(ir_node *const node)
const x86_address_t *addr = &am.addr;
ir_node *ld = new_bd_ia32_Load(dbgi, new_block, addr->base, addr->index,
addr->mem);
force_int_stackent(ld, dst_mode);
res = new_r_Proj(ld, ia32_mode_gp, pn_ia32_Load_res);
am.ls_mode = dst_mode;
set_am_attributes(ld, &am);
force_int_stackent(ld, dst_mode);
SET_IA32_ORIG_NODE(ld, node);
fix_mem_proj(ld, &am);
break;
......@@ -4530,8 +4525,6 @@ static ir_node *gen_ia32_l_LLtoFloat(ir_node *node)
set_irn_pinned(store_low, op_pin_state_floats);
set_irn_pinned(store_high, op_pin_state_floats);
set_ia32_use_frame(store_low);
set_ia32_use_frame(store_high);
set_ia32_op_type(store_low, ia32_AddrModeD);
set_ia32_op_type(store_high, ia32_AddrModeD);
set_ia32_ls_mode(store_low, ia32_mode_gp);
......@@ -4548,7 +4541,6 @@ static ir_node *gen_ia32_l_LLtoFloat(ir_node *node)
/* do a fild */
ir_node *fild = new_bd_ia32_fild(dbgi, block, frame, noreg_GP, sync);
set_irn_pinned(fild, op_pin_state_floats);
set_ia32_use_frame(fild);
set_ia32_op_type(fild, ia32_AddrModeS);
set_ia32_ls_mode(fild, mode_Ls);
force_int_stackent(fild, mode_Ls);
......@@ -4604,7 +4596,6 @@ static ir_node *gen_ia32_l_FloattoLL(ir_node *node)
ir_node *fist = gen_fist(dbgi, block, frame, noreg_GP, nomem, new_val);
set_irn_pinned(fist, op_pin_state_floats);
SET_IA32_ORIG_NODE(fist, node);
set_ia32_use_frame(fist);
set_ia32_op_type(fist, ia32_AddrModeD);
set_ia32_ls_mode(fist, mode_Ls);
arch_add_irn_flags(fist, arch_irn_flag_spill);
......@@ -4627,7 +4618,6 @@ static ir_node *gen_Proj_l_FloattoLL(ir_node *node)
ir_node *load = new_bd_ia32_Load(dbgi, block, frame, noreg_GP, new_pred);
set_irn_pinned(load, op_pin_state_floats);
SET_IA32_ORIG_NODE(load, node);
set_ia32_use_frame(load);
set_ia32_op_type(load, ia32_AddrModeS);
set_ia32_ls_mode(load, ia32_mode_gp);
force_int_stackent(load, mode_Ls);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment