Commit a8583be0 authored by sebastian.buchwald1's avatar sebastian.buchwald1
Browse files

Fix indentation

parent 52e16f28
...@@ -49,7 +49,7 @@ static void pqueue_heapify(pqueue_t *q, size_t pos) ...@@ -49,7 +49,7 @@ static void pqueue_heapify(pqueue_t *q, size_t pos)
exchange = pos * 2; exchange = pos * 2;
if ((pos * 2 + 1) < len if ((pos * 2 + 1) < len
&& q->elems[exchange].priority < q->elems[pos * 2 + 1].priority) && q->elems[exchange].priority < q->elems[pos * 2 + 1].priority)
exchange = pos * 2 + 1; exchange = pos * 2 + 1;
if (exchange == pos) if (exchange == pos)
......
...@@ -665,7 +665,7 @@ undefined: ...@@ -665,7 +665,7 @@ undefined:
switch (relation) { switch (relation) {
case ir_relation_less_greater: case ir_relation_less_greater:
if (!tarval_is_null(tarval_andnot(ro, lz)) || if (!tarval_is_null(tarval_andnot(ro, lz)) ||
!tarval_is_null(tarval_andnot(lo, rz))) { !tarval_is_null(tarval_andnot(lo, rz))) {
// At least one bit differs. // At least one bit differs.
z = o = t; z = o = t;
} else if (lz == lo && rz == ro && lz == rz) { } else if (lz == lo && rz == ro && lz == rz) {
...@@ -677,7 +677,7 @@ undefined: ...@@ -677,7 +677,7 @@ undefined:
case ir_relation_equal: case ir_relation_equal:
if (!tarval_is_null(tarval_andnot(ro, lz)) || if (!tarval_is_null(tarval_andnot(ro, lz)) ||
!tarval_is_null(tarval_andnot(lo, rz))) { !tarval_is_null(tarval_andnot(lo, rz))) {
// At least one bit differs. // At least one bit differs.
z = o = f; z = o = f;
} else if (lz == lo && rz == ro && lz == rz) { } else if (lz == lo && rz == ro && lz == rz) {
...@@ -691,7 +691,7 @@ undefined: ...@@ -691,7 +691,7 @@ undefined:
case ir_relation_less: case ir_relation_less:
/* TODO handle negative values */ /* TODO handle negative values */
if (tarval_is_negative(lz) || tarval_is_negative(lo) || if (tarval_is_negative(lz) || tarval_is_negative(lo) ||
tarval_is_negative(rz) || tarval_is_negative(ro)) tarval_is_negative(rz) || tarval_is_negative(ro))
goto result_unknown; goto result_unknown;
if (tarval_cmp(lz, ro) & relation) { if (tarval_cmp(lz, ro) & relation) {
...@@ -709,7 +709,7 @@ undefined: ...@@ -709,7 +709,7 @@ undefined:
case ir_relation_greater: case ir_relation_greater:
/* TODO handle negative values */ /* TODO handle negative values */
if (tarval_is_negative(lz) || tarval_is_negative(lo) || if (tarval_is_negative(lz) || tarval_is_negative(lo) ||
tarval_is_negative(rz) || tarval_is_negative(ro)) tarval_is_negative(rz) || tarval_is_negative(ro))
goto result_unknown; goto result_unknown;
if (!(tarval_cmp(lz, ro) & relation)) { if (!(tarval_cmp(lz, ro) & relation)) {
......
...@@ -170,8 +170,8 @@ static void dca_transfer(ir_node *irn) ...@@ -170,8 +170,8 @@ static void dca_transfer(ir_node *irn)
* don't fit into the smaller mode. */ * don't fit into the smaller mode. */
if (get_tarval_highest_bit(care) >= (int)pred_bits) if (get_tarval_highest_bit(care) >= (int)pred_bits)
care = tarval_or(care, care = tarval_or(care,
tarval_shl_unsigned(get_mode_one(mode), tarval_shl_unsigned(get_mode_one(mode),
pred_bits - 1)); pred_bits - 1));
} else { } else {
/* Thwart sign extension as it doesn't make sense on /* Thwart sign extension as it doesn't make sense on
* our abstract tarvals. */ * our abstract tarvals. */
...@@ -401,7 +401,7 @@ static void dca_init_node(ir_node *n, void *data) ...@@ -401,7 +401,7 @@ static void dca_init_node(ir_node *n, void *data)
ir_mode *m = get_irn_mode(n); ir_mode *m = get_irn_mode(n);
set_irn_link(n, (void *) (mode_is_int(m) ? set_irn_link(n, (void *) (mode_is_int(m) ?
get_mode_null(m) : tarval_b_false)); get_mode_null(m) : tarval_b_false));
} }
void dca_analyze(ir_graph *irg) void dca_analyze(ir_graph *irg)
...@@ -413,7 +413,7 @@ void dca_analyze(ir_graph *irg) ...@@ -413,7 +413,7 @@ void dca_analyze(ir_graph *irg)
assert(tarval_get_wrap_on_overflow()); assert(tarval_get_wrap_on_overflow());
assert(((ir_resources_reserved(irg) & IR_RESOURCE_IRN_LINK) != 0) && assert(((ir_resources_reserved(irg) & IR_RESOURCE_IRN_LINK) != 0) &&
"user of dc analysis must reserve links"); "user of dc analysis must reserve links");
irg_walk_graph(irg, dca_init_node, NULL, 0); irg_walk_graph(irg, dca_init_node, NULL, 0);
......
...@@ -290,7 +290,7 @@ static void handle_if(ir_node *block, ir_node *cmp, ir_relation rel, env_t *env) ...@@ -290,7 +290,7 @@ static void handle_if(ir_node *block, ir_node *cmp, ir_relation rel, env_t *env)
env->num_eq += 1; env->num_eq += 1;
} else if (block_dominates(blk, cond_block) } else if (block_dominates(blk, cond_block)
&& is_Const(right) && !get_irn_pinned(user)) { && is_Const(right) && !get_irn_pinned(user)) {
/* /*
* left == Const and we found a movable user of left in a * left == Const and we found a movable user of left in a
* dominator of the Cond block * dominator of the Cond block
......
...@@ -227,14 +227,12 @@ static int vrp_update_node(ir_vrp_info *info, ir_node *node) ...@@ -227,14 +227,12 @@ static int vrp_update_node(ir_vrp_info *info, ir_node *node)
vrp_left = vrp_get_or_set_info(info, get_Eor_left(node)); vrp_left = vrp_get_or_set_info(info, get_Eor_left(node));
vrp_right = vrp_get_or_set_info(info, get_Eor_right(node)); vrp_right = vrp_get_or_set_info(info, get_Eor_right(node));
new_bits_set = tarval_or( new_bits_set = tarval_or(tarval_and(vrp_left->bits_set, tarval_not(vrp_right->bits_not_set)),
tarval_and(vrp_left->bits_set, tarval_not(vrp_right->bits_not_set)), tarval_and(tarval_not(vrp_left->bits_not_set), vrp_right->bits_set));
tarval_and(tarval_not(vrp_left->bits_not_set), vrp_right->bits_set));
new_bits_not_set = tarval_not(tarval_or( new_bits_not_set = tarval_not(tarval_or(tarval_and(vrp_left->bits_set,vrp_right->bits_set),
tarval_and(vrp_left->bits_set,vrp_right->bits_set), tarval_and(tarval_not(vrp_left->bits_not_set),
tarval_and(tarval_not(vrp_left->bits_not_set), tarval_not(vrp_right->bits_not_set))));
tarval_not(vrp_right->bits_not_set))));
break; break;
} }
...@@ -272,8 +270,8 @@ static int vrp_update_node(ir_vrp_info *info, ir_node *node) ...@@ -272,8 +270,8 @@ static int vrp_update_node(ir_vrp_info *info, ir_node *node)
/* The second and is needed if target type is smaller*/ /* The second and is needed if target type is smaller*/
new_bits_not_set = tarval_convert_to(get_mode_all_one(old_mode), new_mode); new_bits_not_set = tarval_convert_to(get_mode_all_one(old_mode), new_mode);
new_bits_not_set = tarval_and(new_bits_not_set, tarval_convert_to(vrp_pred->bits_not_set, new_mode)); new_bits_not_set = tarval_and(new_bits_not_set, tarval_convert_to(vrp_pred->bits_not_set, new_mode));
new_bits_set = tarval_and( new_bits_set = tarval_and(new_bits_not_set,
new_bits_not_set, tarval_convert_to(vrp_pred->bits_set, new_mode)); tarval_convert_to(vrp_pred->bits_set, new_mode));
/* Matze: TODO, BUGGY, tarval_cmp never returns ir_relation_less_equal */ /* Matze: TODO, BUGGY, tarval_cmp never returns ir_relation_less_equal */
if (tarval_cmp(vrp_pred->range_top, get_mode_max(new_mode)) == ir_relation_less_equal) { if (tarval_cmp(vrp_pred->range_top, get_mode_max(new_mode)) == ir_relation_less_equal) {
...@@ -322,8 +320,7 @@ static int vrp_update_node(ir_vrp_info *info, ir_node *node) ...@@ -322,8 +320,7 @@ static int vrp_update_node(ir_vrp_info *info, ir_node *node)
for (int i = 1, num = get_Phi_n_preds(node); i < num; i++) { for (int i = 1, num = get_Phi_n_preds(node); i < num; i++) {
pred = get_Phi_pred(node, i); pred = get_Phi_pred(node, i);
vrp_pred = vrp_get_or_set_info(info, pred); vrp_pred = vrp_get_or_set_info(info, pred);
if (new_range_type == VRP_RANGE && vrp_pred->range_type == if (new_range_type == VRP_RANGE && vrp_pred->range_type == VRP_RANGE) {
VRP_RANGE) {
ir_relation relation = tarval_cmp(new_range_top, vrp_pred->range_top); ir_relation relation = tarval_cmp(new_range_top, vrp_pred->range_top);
if (relation == ir_relation_less) { if (relation == ir_relation_less) {
new_range_top = vrp_pred->range_top; new_range_top = vrp_pred->range_top;
...@@ -337,7 +334,7 @@ static int vrp_update_node(ir_vrp_info *info, ir_node *node) ...@@ -337,7 +334,7 @@ static int vrp_update_node(ir_vrp_info *info, ir_node *node)
} }
new_bits_set = tarval_and(new_bits_set, vrp_pred->bits_set); new_bits_set = tarval_and(new_bits_set, vrp_pred->bits_set);
new_bits_not_set = tarval_or(new_bits_not_set, new_bits_not_set = tarval_or(new_bits_not_set,
vrp_pred->bits_not_set); vrp_pred->bits_not_set);
} }
break; break;
...@@ -397,7 +394,7 @@ static int vrp_update_node(ir_vrp_info *info, ir_node *node) ...@@ -397,7 +394,7 @@ static int vrp_update_node(ir_vrp_info *info, ir_node *node)
} }
if (vrp->range_type == VRP_UNDEFINED && if (vrp->range_type == VRP_UNDEFINED &&
new_range_type != VRP_UNDEFINED) { new_range_type != VRP_UNDEFINED) {
something_changed = true; something_changed = true;
vrp->range_type = new_range_type; vrp->range_type = new_range_type;
vrp->range_bottom = new_range_bottom; vrp->range_bottom = new_range_bottom;
...@@ -419,12 +416,12 @@ static int vrp_update_node(ir_vrp_info *info, ir_node *node) ...@@ -419,12 +416,12 @@ static int vrp_update_node(ir_vrp_info *info, ir_node *node)
/* if they are overlapping, cut the range.*/ /* if they are overlapping, cut the range.*/
/* TODO: Maybe we can preserve more information here*/ /* TODO: Maybe we can preserve more information here*/
if (tarval_cmp(vrp->range_bottom, new_range_top) == ir_relation_greater && if (tarval_cmp(vrp->range_bottom, new_range_top) == ir_relation_greater &&
tarval_cmp(vrp->range_bottom, new_range_bottom) == ir_relation_greater) { tarval_cmp(vrp->range_bottom, new_range_bottom) == ir_relation_greater) {
something_changed = true; something_changed = true;
vrp->range_bottom = new_range_top; vrp->range_bottom = new_range_top;
} else if (tarval_cmp(vrp->range_top, new_range_bottom) == ir_relation_greater && } else if (tarval_cmp(vrp->range_top, new_range_bottom) == ir_relation_greater &&
tarval_cmp(vrp->range_top, new_range_top) == ir_relation_less) { tarval_cmp(vrp->range_top, new_range_top) == ir_relation_less) {
something_changed = true; something_changed = true;
vrp->range_top = new_range_bottom; vrp->range_top = new_range_bottom;
} }
...@@ -567,7 +564,7 @@ ir_relation vrp_cmp(const ir_node *left, const ir_node *right) ...@@ -567,7 +564,7 @@ ir_relation vrp_cmp(const ir_node *left, const ir_node *right)
} }
if (!tarval_is_null(tarval_and(vrp_left->bits_set, tarval_not(vrp_right->bits_not_set))) || if (!tarval_is_null(tarval_and(vrp_left->bits_set, tarval_not(vrp_right->bits_not_set))) ||
!tarval_is_null(tarval_and(tarval_not(vrp_left->bits_not_set), vrp_right->bits_set))) { !tarval_is_null(tarval_and(tarval_not(vrp_left->bits_not_set), vrp_right->bits_set))) {
return ir_relation_less_greater; return ir_relation_less_greater;
} }
......
...@@ -450,7 +450,7 @@ static void introduce_epilogue(ir_node *ret, bool omit_fp) ...@@ -450,7 +450,7 @@ static void introduce_epilogue(ir_node *ret, bool omit_fp)
ir_type *frame_type = get_irg_frame_type(irg); ir_type *frame_type = get_irg_frame_type(irg);
unsigned frame_size = get_type_size(frame_type); unsigned frame_size = get_type_size(frame_type);
ir_node *incsp = amd64_new_IncSP(block, curr_sp, -(int)frame_size, ir_node *incsp = amd64_new_IncSP(block, curr_sp, -(int)frame_size,
true); true);
sched_add_before(ret, incsp); sched_add_before(ret, incsp);
curr_sp = incsp; curr_sp = incsp;
} }
...@@ -496,7 +496,7 @@ static void introduce_prologue(ir_graph *const irg, bool omit_fp) ...@@ -496,7 +496,7 @@ static void introduce_prologue(ir_graph *const irg, bool omit_fp)
be_keep_if_unused(incsp); be_keep_if_unused(incsp);
} else { } else {
ir_node *const incsp = amd64_new_IncSP(block, initial_sp, ir_node *const incsp = amd64_new_IncSP(block, initial_sp,
frame_size, false); frame_size, false);
sched_add_after(start, incsp); sched_add_after(start, incsp);
edges_reroute_except(initial_sp, incsp, incsp); edges_reroute_except(initial_sp, incsp, incsp);
} }
...@@ -654,7 +654,7 @@ static void amd64_generate_code(FILE *output, const char *cup_name) ...@@ -654,7 +654,7 @@ static void amd64_generate_code(FILE *output, const char *cup_name)
be_timer_push(T_RA_PREPARATION); be_timer_push(T_RA_PREPARATION);
be_sched_fix_flags(irg, &amd64_reg_classes[CLASS_amd64_flags], NULL, be_sched_fix_flags(irg, &amd64_reg_classes[CLASS_amd64_flags], NULL,
NULL, NULL); NULL, NULL);
be_timer_pop(T_RA_PREPARATION); be_timer_pop(T_RA_PREPARATION);
be_step_regalloc(irg, &amd64_regalloc_if); be_step_regalloc(irg, &amd64_regalloc_if);
......
...@@ -43,7 +43,7 @@ static ir_node *create_gotpcrel_load(ir_graph *irg, ir_entity *const entity) ...@@ -43,7 +43,7 @@ static ir_node *create_gotpcrel_load(ir_graph *irg, ir_entity *const entity)
ir_node *const nomem = get_irg_no_mem(irg); ir_node *const nomem = get_irg_no_mem(irg);
ir_node *const block = get_irg_start_block(irg); ir_node *const block = get_irg_start_block(irg);
ir_node *const load = new_rd_Load(NULL, block, nomem, addr, mode_P, ir_node *const load = new_rd_Load(NULL, block, nomem, addr, mode_P,
type, cons_floats); type, cons_floats);
return new_r_Proj(load, mode_P, pn_Load_res); return new_r_Proj(load, mode_P, pn_Load_res);
} }
......
...@@ -921,8 +921,7 @@ static ir_node *gen_binop_xmm(ir_node *node, ir_node *op0, ir_node *op1, ...@@ -921,8 +921,7 @@ static ir_node *gen_binop_xmm(ir_node *node, ir_node *op0, ir_node *op1,
fix_node_mem_proj(new_node, args.mem_proj); fix_node_mem_proj(new_node, args.mem_proj);
arch_set_irn_register_req_out(new_node, 0, arch_set_irn_register_req_out(new_node, 0, &amd64_requirement_xmm_same_0);
&amd64_requirement_xmm_same_0);
return be_new_Proj(new_node, pn_amd64_subs_res); return be_new_Proj(new_node, pn_amd64_subs_res);
} }
...@@ -1046,7 +1045,7 @@ static ir_node *gen_Add(ir_node *const node) ...@@ -1046,7 +1045,7 @@ static ir_node *gen_Add(ir_node *const node)
if (mode == x86_mode_E) if (mode == x86_mode_E)
return gen_binop_x87(node, op1, op2, new_bd_amd64_fadd); return gen_binop_x87(node, op1, op2, new_bd_amd64_fadd);
return gen_binop_am(node, op1, op2, new_bd_amd64_adds, return gen_binop_am(node, op1, op2, new_bd_amd64_adds,
pn_amd64_adds_res, match_commutative | match_am); pn_amd64_adds_res, match_commutative | match_am);
} }
match_flags_t flags = match_immediate | match_am | match_mode_neutral match_flags_t flags = match_immediate | match_am | match_mode_neutral
...@@ -1613,7 +1612,9 @@ static ir_node *gen_Switch(ir_node *const node) ...@@ -1613,7 +1612,9 @@ static ir_node *gen_Switch(ir_node *const node)
table = ir_switch_table_duplicate(irg, table); table = ir_switch_table_duplicate(irg, table);
ir_node *const out = new_bd_amd64_jmp_switch(dbgi, new_block, arity, in, ir_node *const out = new_bd_amd64_jmp_switch(dbgi, new_block, arity, in,
in_reqs, n_outs, op_mode, X86_SIZE_64, &addr, table, entity); in_reqs, n_outs, op_mode,
X86_SIZE_64, &addr, table,
entity);
return out; return out;
} }
...@@ -2133,7 +2134,7 @@ static ir_node *match_mov(dbg_info *dbgi, ir_node *block, ir_node *value, ...@@ -2133,7 +2134,7 @@ static ir_node *match_mov(dbg_info *dbgi, ir_node *block, ir_node *value,
ir_node *load; ir_node *load;
ir_node *op; ir_node *op;
bool use_am = use_address_matching(mode, match_am, block, NULL, bool use_am = use_address_matching(mode, match_am, block, NULL,
value, &load, &op); value, &load, &op);
amd64_op_mode_t op_mode; amd64_op_mode_t op_mode;
x86_addr_t addr; x86_addr_t addr;
...@@ -2231,9 +2232,9 @@ static ir_node *create_cvtsd2ss(dbg_info *dbgi, ir_node *block, ir_node *value) ...@@ -2231,9 +2232,9 @@ static ir_node *create_cvtsd2ss(dbg_info *dbgi, ir_node *block, ir_node *value)
} }
static void store_to_temp(construct_binop_func const new_store, static void store_to_temp(construct_binop_func const new_store,
arch_register_req_t const **const in_reqs, x86_addr_t *addr, arch_register_req_t const **const in_reqs, x86_addr_t *addr,
dbg_info *dbgi, ir_node *block, ir_node **in, int *n_in, dbg_info *dbgi, ir_node *block, ir_node **in, int *n_in,
ir_node *new_op, x86_insn_size_t size) ir_node *new_op, x86_insn_size_t size)
{ {
ir_graph *const irg = get_irn_irg(block); ir_graph *const irg = get_irn_irg(block);
ir_node *const frame = get_irg_frame(irg); ir_node *const frame = get_irg_frame(irg);
...@@ -2345,7 +2346,7 @@ static ir_node *conv_x87_to_int(dbg_info *const dbgi, ir_node *const block, ...@@ -2345,7 +2346,7 @@ static ir_node *conv_x87_to_int(dbg_info *const dbgi, ir_node *const block,
int n_in = 0; int n_in = 0;
x86_addr_t addr; x86_addr_t addr;
store_to_temp(new_bd_amd64_fisttp, x87K_reg_mem_reqs, &addr, dbgi, block, store_to_temp(new_bd_amd64_fisttp, x87K_reg_mem_reqs, &addr, dbgi, block,
in, &n_in, new_val, insn_size_src); in, &n_in, new_val, insn_size_src);
assert(n_in < (int)ARRAY_SIZE(in)); assert(n_in < (int)ARRAY_SIZE(in));
create_mov_func new_mov = insn_size_dest < X86_SIZE_64 create_mov_func new_mov = insn_size_dest < X86_SIZE_64
......
...@@ -99,7 +99,7 @@ void arm_dump_node(FILE *F, const ir_node *n, dump_reason_t reason) ...@@ -99,7 +99,7 @@ void arm_dump_node(FILE *F, const ir_node *n, dump_reason_t reason)
break; break;
case ARM_SHF_IMM: case ARM_SHF_IMM:
fprintf(F, "modifier = imm %d ror %d\n", fprintf(F, "modifier = imm %d ror %d\n",
attr->immediate_value, attr->shift_immediate); attr->immediate_value, attr->shift_immediate);
break; break;
case ARM_SHF_ASR_IMM: case ARM_SHF_ASR_IMM:
fprintf(F, "modifier = V >>s %d\n", attr->shift_immediate); fprintf(F, "modifier = V >>s %d\n", attr->shift_immediate);
......
...@@ -506,7 +506,7 @@ static ir_node *gen_Ror(ir_node *node, ir_node *op1, ir_node *op2, ...@@ -506,7 +506,7 @@ static ir_node *gen_Ror(ir_node *node, ir_node *op1, ir_node *op2,
new_op2 = new_bd_arm_Rsb_imm(dbgi, block, new_op2, 32, 0); new_op2 = new_bd_arm_Rsb_imm(dbgi, block, new_op2, 32, 0);
} }
return new_bd_arm_Mov_reg_shift_reg(dbgi, block, new_op1, new_op2, return new_bd_arm_Mov_reg_shift_reg(dbgi, block, new_op1, new_op2,
ARM_SHF_ROR_REG); ARM_SHF_ROR_REG);
} }
static bool is_low_mask(ir_tarval *tv) static bool is_low_mask(ir_tarval *tv)
......
...@@ -329,7 +329,7 @@ static void assign(ir_node *const block, void *const env_ptr) ...@@ -329,7 +329,7 @@ static void assign(ir_node *const block, void *const env_ptr)
DBG((dbg, LEVEL_4, "\tusedef chain for block\n")); DBG((dbg, LEVEL_4, "\tusedef chain for block\n"));
foreach_border_head(head, b) { foreach_border_head(head, b) {
DBG((dbg, LEVEL_4, "\t%s %+F/%d\n", b->is_def ? "def" : "use", DBG((dbg, LEVEL_4, "\t%s %+F/%d\n", b->is_def ? "def" : "use",
b->irn, get_irn_idx(b->irn))); b->irn, get_irn_idx(b->irn)));
} }
bitset_t *const available = bitset_alloca(env->allocatable_regs->size); bitset_t *const available = bitset_alloca(env->allocatable_regs->size);
......
...@@ -322,8 +322,8 @@ static int ou_max_ind_set_costs(unit_t *const ou) ...@@ -322,8 +322,8 @@ static int ou_max_ind_set_costs(unit_t *const ou)
/* check if curr is a stable set */ /* check if curr is a stable set */
for (int i=bitset_next_set(curr, 0); i!=-1; i=bitset_next_set(curr, i+1)) for (int i=bitset_next_set(curr, 0); i!=-1; i=bitset_next_set(curr, i+1))
for (int o=bitset_next_set(curr, i+1); o!=-1; o=bitset_next_set(curr, o+1)) /* !!!!! difference to qnode_max_ind_set(): NOT (curr, i) */ for (int o=bitset_next_set(curr, i+1); o!=-1; o=bitset_next_set(curr, o+1)) /* !!!!! difference to qnode_max_ind_set(): NOT (curr, i) */
if (be_values_interfere(unsafe[i], unsafe[o])) if (be_values_interfere(unsafe[i], unsafe[o]))
goto no_stable_set; goto no_stable_set;
/* if we arrive here, we have a stable set */ /* if we arrive here, we have a stable set */
/* compute the weight of the stable set*/ /* compute the weight of the stable set*/
...@@ -336,7 +336,7 @@ static int ou_max_ind_set_costs(unit_t *const ou) ...@@ -336,7 +336,7 @@ static int ou_max_ind_set_costs(unit_t *const ou)
best_weight = curr_weight; best_weight = curr_weight;
} }
no_stable_set: no_stable_set:
bitset_minus1(curr); bitset_minus1(curr);
} }
} }
...@@ -442,7 +442,7 @@ static void co_collect_units(ir_node *irn, void *env) ...@@ -442,7 +442,7 @@ static void co_collect_units(ir_node *irn, void *env)
if (other & (1U << i)) { if (other & (1U << i)) {
ir_node *o = get_irn_n(skip_Proj(irn), i); ir_node *o = get_irn_n(skip_Proj(irn), i);
if (!arch_irn_is_ignore(o) && if (!arch_irn_is_ignore(o) &&
!be_values_interfere(irn, o)) { !be_values_interfere(irn, o)) {
unit->nodes[k] = o; unit->nodes[k] = o;
unit->costs[k] = co->get_costs(irn, -1); unit->costs[k] = co->get_costs(irn, -1);
++k; ++k;
......
...@@ -593,8 +593,8 @@ static void emit_visibility(const ir_entity *entity, bool implicit_globl) ...@@ -593,8 +593,8 @@ static void emit_visibility(const ir_entity *entity, bool implicit_globl)
emit_symbol_directive(directive, entity); emit_symbol_directive(directive, entity);
if (is_macho() if (is_macho()
&& (linkage & IR_LINKAGE_HIDDEN_USER) && (linkage & IR_LINKAGE_HIDDEN_USER)
&& get_entity_ld_name(entity)[0] != '\0') { && get_entity_ld_name(entity)[0] != '\0') {
emit_symbol_directive(".no_dead_strip", entity); emit_symbol_directive(".no_dead_strip", entity);
} }
} }
...@@ -1060,7 +1060,7 @@ static void emit_ir_initializer(normal_or_bitfield *vals, ...@@ -1060,7 +1060,7 @@ static void emit_ir_initializer(normal_or_bitfield *vals,
if (bitfield_size > 0) { if (bitfield_size > 0) {
unsigned offset_bits = get_entity_bitfield_offset(member); unsigned offset_bits = get_entity_bitfield_offset(member);
emit_bitfield(&vals[offset], offset_bits, bitfield_size, emit_bitfield(&vals[offset], offset_bits, bitfield_size,
sub_initializer, subtype); sub_initializer, subtype);
continue; continue;
} }
......
...@@ -611,12 +611,11 @@ void be_step_last(ir_graph *irg) ...@@ -611,12 +611,11 @@ void be_step_last(ir_graph *irg)
for (be_timer_id_t t = T_FIRST; t < T_LAST+1; ++t) { for (be_timer_id_t t = T_FIRST; t < T_LAST+1; ++t) {
char buf[128]; char buf[128];
snprintf(buf, sizeof(buf), "bemain_time_%s", snprintf(buf, sizeof(buf), "bemain_time_%s",
get_timer_name(t)); get_timer_name(t));
stat_ev_dbl(buf, ir_timer_elapsed_usec(be_timers[t])); stat_ev_dbl(buf, ir_timer_elapsed_usec(be_timers[t]));
} }
} else { } else {
printf("==>> IRG %s <<==\n", printf("==>> IRG %s <<==\n", get_entity_name(get_irg_entity(irg)));
get_entity_name(get_irg_entity(irg)));
for (be_timer_id_t t = T_FIRST; t < T_LAST+1; ++t) { for (be_timer_id_t t = T_FIRST; t < T_LAST+1; ++t) {
double val = ir_timer_elapsed_usec(be_timers[t]) / 1000.0; double val = ir_timer_elapsed_usec(be_timers[t]) / 1000.0;
printf("%-20s: %10.3f msec\n", get_timer_name(t), val); printf("%-20s: %10.3f msec\n", get_timer_name(t), val);
......
...@@ -289,7 +289,7 @@ static void analyze_block(ir_node *block, void *data) ...@@ -289,7 +289,7 @@ static void analyze_block(ir_node *block, void *data)
allocation_info_t *info = get_allocation_info(node); allocation_info_t *info = get_allocation_info(node);
if (get_irn_arity(node) >= (int)sizeof(info->last_uses) * 8) { if (get_irn_arity(node) >= (int)sizeof(info->last_uses) * 8) {
panic("node with more than %d inputs not supported yet", panic("node with more than %d inputs not supported yet",
(int) sizeof(info->last_uses) * 8); (int) sizeof(info->last_uses) * 8);
} }
/* mark last uses */ /* mark last uses */
...@@ -571,7 +571,7 @@ static bool try_optimistic_split(ir_node *to_split, ir_node *before, ...@@ -571,7 +571,7 @@ static bool try_optimistic_split(ir_node *to_split, ir_node *before,
delta = pref_delta + prefs[i].pref; delta = pref_delta + prefs[i].pref;
if (delta < split_threshold) { if (delta < split_threshold) {
DB((dbg, LEVEL_3, "Not doing optimistical split of %+F (depth %d), win %f too low\n", DB((dbg, LEVEL_3, "Not doing optimistical split of %+F (depth %d), win %f too low\n",
to_split, recursion, delta)); to_split, recursion, delta));
return false; return false;
} }
...@@ -1176,7 +1176,7 @@ static void enforce_constraints(ir_nodeset_t *live_nodes, ir_node *node, ...@@ -1176,7 +1176,7 @@ static void enforce_constraints(ir_nodeset_t *live_nodes, ir_node *node,
continue; continue;
/* livethrough values may not use constrainted output registers */ /* livethrough values may not use constrainted output registers */
if (rbitset_is_set(live_through_regs, l) if (rbitset_is_set(live_through_regs, l)
&& rbitset_is_set(forbidden_regs, r)) && rbitset_is_set(forbidden_regs, r))
continue; continue;
hungarian_add(bp, r, l, l == r ? 9 : 8); hungarian_add(bp, r, l, l == r ? 9 : 8);
...@@ -1404,7 +1404,7 @@ static void assign_phi_registers(ir_node *block) ...@@ -1404,7 +1404,7 @@ static void assign_phi_registers(ir_node *block)
costs += 10000; costs += 10000;
hungarian_add(bp, n, r, (int)costs); hungarian_add(bp, n, r, (int)costs);
DB((dbg, LEVEL_3, " %s(%f)", arch_register_for_index(cls, r)->name, DB((dbg, LEVEL_3, " %s(%f)", arch_register_for_index(cls, r)->name,
info->prefs[r])); info->prefs[r]));
} }
DB((dbg, LEVEL_3, "\n")); DB((dbg, LEVEL_3, "\n"));
++n; ++n;
......
...@@ -780,7 +780,7 @@ static void fix_block_borders(ir_node *block, void *data) ...@@ -780,7 +780,7 @@ static void fix_block_borders(ir_node *block, void *data)
continue; continue;
if (move_spills && be_is_live_in(lv, block, node) if (move_spills && be_is_live_in(lv, block, node)
&& !pred_end_workset->vals[iter].spilled) { && !pred_end_workset->vals[iter].spilled) {
ir_node *insert_point; ir_node *insert_point;
if (n_cfgpreds > 1) { if (n_cfgpreds > 1) {
insert_point = be_get_end_of_block_insertion_point(pred); insert_point = be_get_end_of_block_insertion_point(pred);
......
...@@ -514,8 +514,8 @@ static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo) ...@@ -514,8 +514,8 @@ static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo)
} }
DB((dbg, LEVEL_1, "%+F: latespillcosts %f after def: %f\n", to_spill, DB((dbg, LEVEL_1, "%+F: latespillcosts %f after def: %f\n", to_spill,