Commit d013e557 authored by Matthias Braun's avatar Matthias Braun
Browse files

Enfore Add/Sub pointer, offset to use reference_offset_mode

parent 1f55516e
......@@ -34,13 +34,13 @@ static void fix_address_pic(ir_node *const node, void *const data)
if (i == n_Call_ptr && is_Call(node)) {
// Somehow we can always call PC relative. Are there trampolines
// involved?
res = be_new_Relocation(irg, X86_IMM_PCREL, entity);
res = be_new_Relocation(irg, X86_IMM_PCREL, entity, mode_P);
} else if (entity_has_definition(entity)
&& !(get_entity_linkage(entity) & IR_LINKAGE_MERGE)) {
res = be_new_Relocation(irg, X86_IMM_PCREL, entity);
res = be_new_Relocation(irg, X86_IMM_PCREL, entity, mode_P);
} else {
ir_node *const addr
= be_new_Relocation(irg, X86_IMM_GOTPCREL, entity);
= be_new_Relocation(irg, X86_IMM_GOTPCREL, entity, mode_P);
ir_type *const type = get_entity_type(entity);
ir_node *const nomem = get_irg_no_mem(irg);
ir_node *const block = get_irg_start_block(irg);
......
......@@ -201,7 +201,8 @@ static ir_node *load_va_from_stack(dbg_info *dbgi, ir_node *block, ir_mode *resm
// Increment stack_args and write back
long increment = round_up2(get_mode_size_bytes(resmode), 8);
ir_node *sizeof_resmode = new_r_Const_long(irg, mode_Is, increment);
ir_mode *offset_mode = get_reference_offset_mode(mode_P);
ir_node *sizeof_resmode = new_r_Const_long(irg, offset_mode, increment);
ir_mode *mode_stack_args = get_irn_mode(stack_args);
ir_node *stack_args_inc = new_rd_Add(dbgi, block, stack_args, sizeof_resmode, mode_stack_args);
make_store(dbgi, block, stack_args_ptr, stack_args_inc, stack_args_type, mem);
......@@ -248,12 +249,13 @@ static ir_node *load_va_from_register_or_stack(dbg_info *dbgi, ir_node *block,
// Load from reg_save + offset
ir_mode *mode_reg_save = get_irn_mode(reg_save);
ir_node *true_result_ptr = new_rd_Add(dbgi, true_block, reg_save, offset, mode_reg_save);
ir_mode *offset_mode = get_reference_offset_mode(mode_reg_save);
ir_node *conv_offset = new_r_Conv(true_block, offset, offset_mode);
ir_node *true_result_ptr = new_rd_Add(dbgi, true_block, reg_save, conv_offset, mode_reg_save);
ir_node *true_result = load_result(dbgi, true_block, true_result_ptr, restype, &true_mem);
// Increment offset and write back
ir_mode *offset_mode = get_type_mode(offset_type);
ir_node *offset_inc = new_rd_Add(dbgi, true_block, offset, stride, offset_mode);
ir_node *offset_inc = new_rd_Add(dbgi, true_block, offset, stride, mode_Is);
make_store(dbgi, true_block, offset_ptr, offset_inc, offset_type, &true_mem);
// False side: Load from the stack
......
......@@ -479,11 +479,12 @@ ir_node *be_new_Asm(dbg_info *const dbgi, ir_node *const block, int const n_ins,
return asmn;
}
ir_node *be_new_Relocation(ir_graph *irg, unsigned kind, ir_entity *entity)
ir_node *be_new_Relocation(ir_graph *irg, unsigned kind, ir_entity *entity,
ir_mode *mode)
{
ir_node *const block = get_irg_start_block(irg);
ir_node *const node = new_ir_node(NULL, irg, block, op_be_Relocation,
mode_P, 0, NULL);
mode, 0, NULL);
be_relocation_attr_t *const attr
= (be_relocation_attr_t*)get_irn_generic_attr(node);
attr->entity = entity;
......
......@@ -198,7 +198,8 @@ ir_node *be_new_Asm(dbg_info *dbgi, ir_node *block, int n_ins, ir_node **ins, ar
* specific. This node is meant to be used in preparation phases for position
* independent code.
*/
ir_node *be_new_Relocation(ir_graph *irg, unsigned kind, ir_entity *entity);
ir_node *be_new_Relocation(ir_graph *irg, unsigned kind, ir_entity *entity,
ir_mode *mode);
ir_entity *be_get_Relocation_entity(ir_node const* node);
......
......@@ -43,10 +43,11 @@ void be_default_lower_va_arg(ir_node *node)
new_mem = node_mem;
}
const backend_params *be_params = be_get_backend_param();
unsigned round_up = round_up2(get_type_size_bytes(aptype), be_params->stack_param_align);
ir_node *const diff_const = new_r_Const_long(irg, mode_Iu, round_up);
ir_node *const new_ap = new_rd_Add(dbgi, block, ap, diff_const, mode_P);
backend_params const *const be_params = be_get_backend_param();
unsigned round_up = round_up2(get_type_size_bytes(aptype), be_params->stack_param_align);
ir_mode *const offset_mode = get_reference_offset_mode(mode_P);
ir_node *const offset = new_r_Const_long(irg, offset_mode, round_up);
ir_node *const new_ap = new_rd_Add(dbgi, block, ap, offset, mode_P);
ir_node *const in[] = { new_mem, res, new_ap };
turn_into_tuple(node, ARRAY_SIZE(in), in);
......
......@@ -85,8 +85,11 @@ static ir_node *get_eip_relative(ir_graph *const irg,
{
/* Everything else is accessed relative to EIP. */
ir_node *const pic_base = ia32_get_pic_base(irg);
/* cheat a bit and set pic_base node to mode_P for now */
set_irn_mode(pic_base, mode_P);
ir_node *const block = get_irg_start_block(irg);
ir_node *reloc = be_new_Relocation(irg, (unsigned)kind, entity);
ir_mode *const offset_mode = get_reference_offset_mode(mode_P);
ir_node *reloc = be_new_Relocation(irg, (unsigned)kind, entity, offset_mode);
/* All ok now for locally constructed stuff. */
ir_node *add = new_rd_Add(NULL, block, pic_base, reloc, mode_P);
/* Make sure the walker doesn't visit this add again. */
......@@ -126,7 +129,7 @@ static void fix_address_elf(ir_node *const node, void *const data)
continue;
if (ia32_pic_style == IA32_PIC_ELF_PLT) {
res = be_new_Relocation(irg, X86_IMM_PLT, entity);
res = be_new_Relocation(irg, X86_IMM_PLT, entity, mode_P);
} else {
assert(ia32_pic_style == IA32_PIC_ELF_NO_PLT);
res = get_table_load(irg, X86_IMM_GOT, entity);
......@@ -169,7 +172,7 @@ static void fix_address_macho(ir_node *const node, void *const data)
continue;
ir_entity *const trampoline = get_trampoline(be, entity);
res = be_new_Relocation(irg, X86_IMM_ADDR, trampoline);
res = be_new_Relocation(irg, X86_IMM_ADDR, trampoline, mode_P);
} else {
/* Everything else is accessed relative to EIP. */
if (entity_has_definition(entity)
......
......@@ -5870,6 +5870,10 @@ void ia32_transform_graph(ir_graph *irg)
case IA32_PIC_ELF_PLT:
case IA32_PIC_ELF_NO_PLT: lconst_imm_kind = X86_IMM_GOTOFF; break;
}
/* fix get_eip mode ia32_pic sets it to mode_P */
ir_node *const get_eip = ia32_get_irg_data(irg)->get_eip;
if (get_eip != NULL)
set_irn_mode(get_eip, ia32_mode_gp);
register_transformers();
......
......@@ -654,12 +654,15 @@ static int verify_node_Add(const ir_node *n)
fine &= check_mode_same_input(n, n_Add_left, "left");
fine &= check_mode_same_input(n, n_Add_right, "right");
} else if (mode_is_reference(mode)) {
ir_mode *left_mode = get_irn_mode(get_Add_left(n));
ir_mode *right_mode = get_irn_mode(get_Add_right(n));
ir_mode *left_mode = get_irn_mode(get_Add_left(n));
ir_mode *right_mode = get_irn_mode(get_Add_right(n));
ir_mode *offset_mode = get_reference_offset_mode(mode);
if (mode_is_int(left_mode)) {
fine &= check_input_mode(n, n_Add_left, "left", offset_mode);
fine &= check_mode_same_input(n, n_Add_right, "right");
} else if (mode_is_int(right_mode)) {
fine &= check_mode_same_input(n, n_Add_left, "left");
fine &= check_input_mode(n, n_Add_right, "right", offset_mode);
} else {
warn(n, "AddP has no integer input");
fine = false;
......@@ -688,7 +691,8 @@ static int verify_node_Sub(const ir_node *n)
}
} else if (mode_is_reference(mode)) {
fine &= check_mode_same_input(n, n_Sub_left, "left");
fine &= check_input_func(n, n_Sub_right, "right", mode_is_int, "int");
ir_mode *offset_mode = get_reference_offset_mode(mode);
fine &= check_input_mode(n, n_Sub_right, "right", offset_mode);
}
return fine;
}
......
......@@ -2731,8 +2731,9 @@ void ir_lower_dw_ops(void)
unop_tp_s = make_type_2_2(even, odd);
}
ir_mode *offset_mode = get_reference_offset_mode(mode_P);
env.tv_mode_bytes = new_tarval_from_long(env.p.doubleword_size/(2*8),
env.p.word_unsigned);
offset_mode);
env.waitq = new_pdeq();
irp_reserve_resources(irp, IRP_RESOURCE_TYPE_LINK
......
......@@ -2045,83 +2045,6 @@ static ir_node *apply_conv_on_mux(ir_node *mux, ir_mode *mode)
return new_r_Mux(block, sel, irn_false, irn_true, mode);
}
/**
* Transform AddP(P, ConvIs(Iu)), AddP(P, ConvIu(Is)) and
* SubP(P, ConvIs(Iu)), SubP(P, ConvIu(Is)).
* If possible, remove the Conv's.
*/
static ir_node *transform_node_AddSub(ir_node *n)
{
const ir_mode *mode = get_irn_mode(n);
if (mode_is_reference(mode)) {
const ir_node *left = get_binop_left(n);
ir_node *right = get_binop_right(n);
unsigned ref_bits = get_mode_size_bits(mode);
if (is_Conv(left)) {
const ir_mode *lmode = get_irn_mode(left);
unsigned bits = get_mode_size_bits(lmode);
if (ref_bits == bits &&
mode_is_int(lmode) &&
get_mode_arithmetic(lmode) == irma_twos_complement) {
ir_node *pre = get_Conv_op(left);
const ir_mode *pre_mode = get_irn_mode(pre);
if (mode_is_int(pre_mode) &&
get_mode_size_bits(pre_mode) == bits &&
get_mode_arithmetic(pre_mode) == irma_twos_complement) {
/* ok, this conv just changes to sign, moreover the calculation
* is done with same number of bits as our address mode, so
* we can ignore the conv as address calculation can be viewed
* as either signed or unsigned
*/
set_binop_left(n, pre);
}
}
}
if (is_Conv(right)) {
const ir_mode *rmode = get_irn_mode(right);
unsigned bits = get_mode_size_bits(rmode);
if (ref_bits == bits &&
mode_is_int(rmode) &&
get_mode_arithmetic(rmode) == irma_twos_complement) {
ir_node *pre = get_Conv_op(right);
const ir_mode *pre_mode = get_irn_mode(pre);
if (mode_is_int(pre_mode) &&
get_mode_size_bits(pre_mode) == bits &&
get_mode_arithmetic(pre_mode) == irma_twos_complement) {
/* ok, this conv just changes to sign, moreover the calculation
* is done with same number of bits as our address mode, so
* we can ignore the conv as address calculation can be viewed
* as either signed or unsigned
*/
set_binop_right(n, pre);
}
}
}
/* let address arithmetic use unsigned modes */
if (is_Const(right)) {
const ir_mode *rmode = get_irn_mode(right);
if (mode_is_signed(rmode) && get_mode_arithmetic(rmode) == irma_twos_complement) {
/* convert a AddP(P, *s) into AddP(P, *u) */
ir_mode *nm = get_reference_offset_mode(mode);
ir_node *pre = new_r_Conv(get_nodes_block(n), right, nm);
set_binop_right(n, pre);
}
}
}
return n;
}
/*
* Macros to include the constant folding optimizations for nodes with
* a choice of data (i.e. Phi and Mux).
......@@ -3086,10 +3009,6 @@ static ir_node *transform_node_Add(ir_node *n)
if (n != oldn)
return n;
n = transform_node_AddSub(n);
if (n != oldn)
return n;
ir_node *a = get_Add_left(n);
ir_node *b = get_Add_right(n);
ir_mode *mode = get_irn_mode(n);
......@@ -3289,8 +3208,6 @@ static ir_node *transform_node_Sub(ir_node *n)
{
ir_node *oldn = n;
n = transform_node_AddSub(n);
ir_node *a = get_Sub_left(n);
ir_node *b = get_Sub_right(n);
ir_mode *mode = get_irn_mode(n);
......@@ -3331,7 +3248,6 @@ static ir_node *transform_node_Sub(ir_node *n)
}
ir_node *c;
restart:
HANDLE_BINOP_CHOICE((eval_func) tarval_sub, a, b, c, mode);
/* these optimizations are imprecise for floating-point ops */
......@@ -3489,18 +3405,22 @@ restart:
ir_mode *mb = get_irn_mode(op_b);
if (mode_is_reference(ma) && mode_is_reference(mb)) {
unsigned mode_size = get_mode_size_bits(mode);
unsigned ma_size = get_mode_size_bits(ma);
unsigned mb_size = get_mode_size_bits(mb);
if (ma_size == mode_size && mb_size == mode_size) {
unsigned const mode_size = get_mode_size_bits(mode);
unsigned const ma_size = get_mode_size_bits(ma);
unsigned const mb_size = get_mode_size_bits(mb);
ir_mode *const offset_mode_a = get_reference_offset_mode(ma);
ir_mode *const offset_mode_b = get_reference_offset_mode(mb);
if (ma_size == mode_size && mb_size == mode_size
&& offset_mode_a == offset_mode_b) {
/* SubInt(ConvInt(aP), ConvInt(bP)) -> SubInt(aP,bP) */
a = op_a;
b = op_b;
set_Sub_left(n, a);
set_Sub_right(n, b);
goto restart;
dbg_info *const dbgi = get_irn_dbg_info(n);
ir_node *const block = get_nodes_block(n);
ir_node *const new_sub
= new_rd_Sub(dbgi, block, op_a, op_b, offset_mode_a);
ir_node *const conv
= new_rd_Conv(dbgi, block, new_sub, mode);
return conv;
}
}
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment