Commit a8583be0 authored by sebastian.buchwald1's avatar sebastian.buchwald1
Browse files

Fix indentation

parent 52e16f28
......@@ -58,7 +58,7 @@ static bool ia32_transform_sub_to_neg_add(ir_node *const irn,
ir_node *const noreg_fp = ia32_new_NoReg_xmm(irg);
res = new_bd_ia32_xXor(dbgi, block, noreg, noreg, nomem, in2, noreg_fp,
size);
size);
ir_entity *entity = ia32_gen_fp_known_const(size == X86_SIZE_32
? ia32_SSIGN : ia32_DSIGN);
ia32_attr_t *const attr = get_ia32_attr(res);
......@@ -195,7 +195,7 @@ static void fix_am_source(ir_node *const irn, arch_register_t const *const out_r
return;
/* Only need to fix if the out reg is the same as base or index register. */
if (out_reg != arch_get_irn_register_in(irn, n_ia32_base) &&
out_reg != arch_get_irn_register_in(irn, n_ia32_index))
out_reg != arch_get_irn_register_in(irn, n_ia32_index))
return;
ir_node *const load_res = ia32_turn_back_am(irn);
......
......@@ -361,7 +361,7 @@ static void ia32_lower_conv64(ir_node *node, ir_mode *mode)
l_res = new_r_Proj(float_to_ll, ia32_mode_gp,
pn_ia32_l_FloattoLL_res_low);
h_res = new_r_Proj(float_to_ll, mode,
pn_ia32_l_FloattoLL_res_high);
pn_ia32_l_FloattoLL_res_high);
} else {
/* Convert from float to unsigned 64bit. */
ir_graph *irg = get_irn_irg(node);
......@@ -410,9 +410,9 @@ static void ia32_lower_conv64(ir_node *node, ir_mode *mode)
float_to_ll = new_bd_ia32_l_FloattoLL(dbg, lower_blk, flt_phi);
l_res = new_r_Proj(float_to_ll, ia32_mode_gp,
pn_ia32_l_FloattoLL_res_low);
pn_ia32_l_FloattoLL_res_low);
h_res = new_r_Proj(float_to_ll, mode,
pn_ia32_l_FloattoLL_res_high);
pn_ia32_l_FloattoLL_res_high);
h_res = new_rd_Add(dbg, lower_blk, h_res, int_phi, mode);
/* move the call and its Proj's to the lower block */
......
......@@ -1577,7 +1577,7 @@ static ir_node *gen_Add(ir_node *node)
/* test if we can use source address mode */
ia32_address_mode_t am;
match_arguments(&am, block, op1, op2, NULL, match_commutative
| match_mode_neutral | match_am | match_immediate | match_try_am);
| match_mode_neutral | match_am | match_immediate | match_try_am);
/* construct an Add with source address mode */
if (am.op_type == ia32_AddrModeS)
......@@ -1713,7 +1713,7 @@ static ir_node *gen_Eor(ir_node *node)
ir_node *op1 = get_Eor_left(node);
ir_node *op2 = get_Eor_right(node);
return gen_binop(node, op1, op2, new_bd_ia32_Xor, match_commutative
| match_mode_neutral | match_am | match_immediate);
| match_mode_neutral | match_am | match_immediate);
}
/**
......@@ -2323,8 +2323,8 @@ static ir_node *gen_Load(ir_node *node)
if (!get_irn_pinned(node)) {
assert((int)pn_ia32_xLoad_res == (int)pn_ia32_fld_res
&& (int)pn_ia32_fld_res == (int)pn_ia32_Load_res
&& (int)pn_ia32_Load_res == (int)pn_ia32_res);
&& (int)pn_ia32_fld_res == (int)pn_ia32_Load_res
&& (int)pn_ia32_Load_res == (int)pn_ia32_res);
arch_add_irn_flags(new_node, arch_irn_flag_rematerializable);
}
......@@ -3161,7 +3161,7 @@ static ir_node *create_CMov(ir_node *node, ir_node *flags, ir_node *new_flags,
ia32_address_mode_t am;
ir_node *block = get_nodes_block(node);
match_arguments(&am, block, val_false, val_true, flags,
match_commutative | match_am | match_16bit_am | match_mode_neutral);
match_commutative | match_am | match_16bit_am | match_mode_neutral);
if (am.ins_permuted)
cc = x86_negate_condition_code(cc);
......@@ -3560,7 +3560,7 @@ static ir_node *gen_Mux(ir_node *node)
.entity = array,
},
.variant = be_options.pic_style != BE_PIC_NONE
? X86_ADDR_BASE_INDEX : X86_ADDR_INDEX,
? X86_ADDR_BASE_INDEX : X86_ADDR_INDEX,
.base = get_global_base(irg),
.index = new_node,
.mem = nomem,
......@@ -3809,7 +3809,7 @@ static void store_gp(dbg_info *dbgi, ia32_address_mode_t *am, ir_node *block,
return;
} else if (possible_int_mode_for_fp(mode)) {
match_arguments(am, block, NULL, value, NULL,
match_am | match_try_am | match_sign_ext | match_16bit_am);
match_am | match_try_am | match_sign_ext | match_16bit_am);
if (am->op_type == ia32_AddrModeS)
return;
}
......@@ -4483,7 +4483,7 @@ static ir_node *gen_ia32_l_LLtoFloat(ir_node *node)
ia32_address_mode_t am = {
.addr = {
.variant = be_options.pic_style != BE_PIC_NONE
? X86_ADDR_BASE_INDEX : X86_ADDR_INDEX,
? X86_ADDR_BASE_INDEX : X86_ADDR_INDEX,
.base = get_global_base(irg),
.index = new_bd_ia32_Shr(dbgi, block, new_val_high, count,
X86_SIZE_32),
......@@ -4526,7 +4526,7 @@ static ir_node *gen_ia32_l_FloattoLL(ir_node *node)
ir_node *new_val = be_transform_node(val);
ir_node *fist = gen_fist(dbgi, block, frame, noreg_GP, nomem, new_val,
X86_SIZE_64);
X86_SIZE_64);
set_irn_pinned(fist, false);
ia32_attr_t *const attr = get_ia32_attr(fist);
attr->addr.variant = X86_ADDR_BASE;
......@@ -4971,8 +4971,8 @@ static ir_node *gen_Call(ir_node *node)
/* Create node. */
ir_node *const call = new_bd_ia32_Call(dbgi, block, in_arity, in, in_req,
n_out, cconv->sp_delta,
n_reg_results);
n_out, cconv->sp_delta,
n_reg_results);
arch_set_additional_pressure(call, &ia32_reg_classes[CLASS_ia32_gp],
add_pressure);
......
......@@ -800,7 +800,7 @@ static void sim_ia32_binop_am(x87_state *const state, ir_node *const node)
x87_set_st(state, get_result_node(node), 0);
} else {
x86_sim_x87_binop(state, node, n_ia32_binary_left, n_ia32_binary_right,
out);
out);
}
ia32_x87_attr_t *const attr = get_ia32_x87_attr(node);
attr->x87.reverse ^= attr->attr.ins_permuted;
......
......@@ -227,10 +227,10 @@ static void rewrite_float_unsigned_Conv(ir_node *node)
ir_node *c_const = new_r_Const_long(irg, mode_s, 0x80000000L);
collect_new_start_block_node(c_const);
ir_node *sub = new_rd_Sub(dbgi, true_block, float_x, limitc,
mode_f);
mode_f);
ir_node *sub_conv = new_rd_Conv(dbgi, true_block, sub, mode_s);
ir_node *xorn = new_rd_Eor(dbgi, true_block, sub_conv, c_const,
mode_s);
mode_s);
ir_node *converted = new_rd_Conv(dbgi, false_block, float_x,mode_s);
......@@ -447,9 +447,9 @@ static void sparc_generate_code(FILE *output, const char *cup_name)
be_timer_push(T_RA_PREPARATION);
be_sched_fix_flags(irg, &sparc_reg_classes[CLASS_sparc_flags],
NULL, sparc_modifies_flags, NULL);
NULL, sparc_modifies_flags, NULL);
be_sched_fix_flags(irg, &sparc_reg_classes[CLASS_sparc_fpflags],
NULL, sparc_modifies_fp_flags, NULL);
NULL, sparc_modifies_fp_flags, NULL);
be_timer_pop(T_RA_PREPARATION);
be_step_regalloc(irg, &sparc_regalloc_if);
......
......@@ -133,7 +133,7 @@ static void sparc_emit_offset(const ir_node *node, int offset_node_pos)
be_emit_irprintf("%+"PRId32, offset);
}
} else if (attr->base.immediate_value != 0
|| attr->base.immediate_value_entity != NULL) {
|| attr->base.immediate_value_entity != NULL) {
be_emit_char('+');
sparc_emit_immediate(attr->base.immediate_value,
attr->base.immediate_value_entity);
......
......@@ -110,7 +110,7 @@ static void sparc_introduce_prolog_epilog(ir_graph *irg, bool omit_fp)
if (!omit_fp) {
ir_node *const save = new_bd_sparc_Save_imm(NULL, block, initial_sp,
NULL, -frame_size-SPARC_MIN_STACKSIZE);
NULL, -frame_size-SPARC_MIN_STACKSIZE);
arch_set_irn_register(save, sp_reg);
sched_add_after(start, save);
......
......@@ -1050,8 +1050,8 @@ static ir_node *gen_helper_bitop(ir_node *node,
}
}
return gen_helper_binop_args(node, op1, op2,
flags | MATCH_COMMUTATIVE,
new_reg, new_imm);
flags | MATCH_COMMUTATIVE,
new_reg, new_imm);
}
static ir_node *gen_And(ir_node *node)
......@@ -1676,7 +1676,7 @@ static void bitcast_float_to_int(dbg_info *dbgi, ir_node *block,
if (bits == 64) {
ir_node *ld2 = new_bd_sparc_Ld_imm(dbgi, block, stack, stf, mode_gp,
NULL, 4, true);
NULL, 4, true);
set_irn_pinned(ld, false);
result[1] = be_new_Proj(ld2, pn_sparc_Ld_res);
......
......@@ -758,15 +758,15 @@ static struct magicu_info compute_unsigned_magic_info(ir_tarval *divisor,
* Note that exponent may be larger than the maximum shift supported,
* so the check for >= ceil_log_2_D is critical. */
if ((exponent + extra_shift >= ceil_log_2_D) ||
/* (divisor - remainder) <= (1 << exponent + extra_shift) */
(CMP(SUB(divisor, remainder), SHL(ONE(mode), exponent + extra_shift)) & ir_relation_less_equal))
/* (divisor - remainder) <= (1 << exponent + extra_shift) */
(CMP(SUB(divisor, remainder), SHL(ONE(mode), exponent + extra_shift)) & ir_relation_less_equal))
break;
/* Set magic_down if we have not set it yet and this exponent works for
* the round_down algorithm */
if (!has_magic_down &&
(CMP(remainder, SHL(ONE(mode), exponent + extra_shift)) &
ir_relation_less_equal)) {
(CMP(remainder, SHL(ONE(mode), exponent + extra_shift)) &
ir_relation_less_equal)) {
has_magic_down = 1;
down_multiplier = quotient;
down_exponent = exponent;
......@@ -1041,7 +1041,7 @@ ir_node *arch_dep_replace_mod_by_const(ir_node *irn)
} else { /* unsigned case */
ir_tarval *k_val
= tarval_shr_unsigned(get_mode_all_one(mode),
get_mode_size_bits(mode)-k);
get_mode_size_bits(mode) - k);
ir_node *k_node = new_r_Const(irg, k_val);
res = new_rd_And(dbg, block, left, k_node, mode);
}
......
......@@ -552,7 +552,7 @@ static int node_floats(const ir_node *n)
static void ird_walk_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *post, void *env)
{
if ((flags & ir_dump_flag_all_anchors)
|| ((flags & ir_dump_flag_iredges) && edges_activated(irg))) {
|| ((flags & ir_dump_flag_iredges) && edges_activated(irg))) {
irg_walk_anchors(irg, pre, post, env);
} else {
irg_walk_graph(irg, pre, post, env);
......@@ -1509,7 +1509,7 @@ static void dump_class_hierarchy_node(ir_type *const tp, ir_entity *const ent, v
if (!is_Method_type(get_entity_type(ent)))
return; /* GL */
if (flags & ir_dump_flag_entities_in_hierarchy
&& is_Class_type(get_entity_owner(ent))) {
&& is_Class_type(get_entity_owner(ent))) {
/* The node */
dump_entity_node(F, ent);
/* The edges */
......@@ -1757,7 +1757,7 @@ void dump_ir_graph_file(FILE *out, ir_graph *irg)
/* dump the out edges in a separate walk */
if ((flags & ir_dump_flag_out_edges)
&& (irg_has_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS))) {
&& (irg_has_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS))) {
irg_out_walk(get_irg_start(irg), dump_out_edge, NULL, out);
}
......
......@@ -219,7 +219,7 @@ static void add_edge(ir_node *src, int pos, ir_node *tgt, ir_edge_kind_t kind,
irn_edge_info_t *tgt_info = get_irn_edge_info(tgt, kind);
struct list_head *head = &tgt_info->outs_head;
assert(head->next && head->prev &&
"target list head must have been initialized");
"target list head must have been initialized");
/* The old target was NULL, thus, the edge is newly created. */
ir_edge_t *edge;
......@@ -299,7 +299,7 @@ static void edges_notify_edge_kind(ir_node *src, int pos, ir_node *tgt, ir_node
irn_edge_info_t *tgt_info = get_irn_edge_info(tgt, kind);
struct list_head *head = &tgt_info->outs_head;
assert(head->next && head->prev &&
"target list head must have been initialized");
"target list head must have been initialized");
/* Initialize the edge template to search in the set. */
ir_edge_t templ;
......
......@@ -630,7 +630,7 @@ static int verify_node_Call(const ir_node *n)
fine &= check_input_mode(n, n_Call_max+1+i, NULL, mode);
} else {
fine &= check_input_func(n, n_Call_max+1+i, NULL,
mode_is_reference, "reference");
mode_is_reference, "reference");
}
} else {
fine &= check_input_func(n, n_Call_max+1+i, NULL,
......
......@@ -316,12 +316,12 @@ void solve_pbqp_brute_force(pbqp_t *pbqp)
fh = fopen("solutions.pb", "a");
#if KAPS_USE_UNSIGNED
fprintf(fh, ": %u RE:%u R0:%u R1:%u R2:%u RM:%u RN/BF:%u\n", pbqp->solution,
pbqp->num_edges, pbqp->num_r0, pbqp->num_r1, pbqp->num_r2,
pbqp->num_rm, pbqp->num_rn);
pbqp->num_edges, pbqp->num_r0, pbqp->num_r1, pbqp->num_r2,
pbqp->num_rm, pbqp->num_rn);
#else
fprintf(fh, ": %lld RE:%u R0:%u R1:%u R2:%u RM:%u RN/BF:%u\n", pbqp->solution,
pbqp->num_edges, pbqp->num_r0, pbqp->num_r1, pbqp->num_r2,
pbqp->num_rm, pbqp->num_bf);
pbqp->num_edges, pbqp->num_r0, pbqp->num_r1, pbqp->num_r2,
pbqp->num_rm, pbqp->num_bf);
#endif
fclose(fh);
#endif
......
......@@ -19,7 +19,7 @@
int edge_bucket_contains(pbqp_edge_bucket_t bucket, pbqp_edge_t *edge)
{
return edge->bucket_index < edge_bucket_get_length(bucket)
&& bucket[edge->bucket_index] == edge;
&& bucket[edge->bucket_index] == edge;
}
void edge_bucket_free(pbqp_edge_bucket_t *bucket)
......@@ -66,7 +66,7 @@ void node_bucket_shrink(pbqp_node_bucket_t *bucket, unsigned len)
int node_bucket_contains(pbqp_node_bucket_t bucket, pbqp_node_t *node)
{
return node->bucket_index < node_bucket_get_length(bucket)
&& bucket[node->bucket_index] == node;
&& bucket[node->bucket_index] == node;
}
void node_bucket_copy(pbqp_node_bucket_t *dst, pbqp_node_bucket_t src)
......
......@@ -108,12 +108,12 @@ void solve_pbqp_heuristical(pbqp_t *pbqp)
fh = fopen("solutions.pb", "a");
#if KAPS_USE_UNSIGNED
fprintf(fh, ": %u RE:%u R0:%u R1:%u R2:%u RM:%u RN/BF:%u\n", pbqp->solution,
pbqp->num_edges, pbqp->num_r0, pbqp->num_r1, pbqp->num_r2,
pbqp->num_rm, pbqp->num_rn);
pbqp->num_edges, pbqp->num_r0, pbqp->num_r1, pbqp->num_r2,
pbqp->num_rm, pbqp->num_rn);
#else
fprintf(fh, ": %lld RE:%u R0:%u R1:%u R2:%u RM:%u RN/BF:%u\n", pbqp->solution,
pbqp->num_edges, pbqp->num_r0, pbqp->num_r1, pbqp->num_r2,
pbqp->num_rm, pbqp->num_rn);
pbqp->num_edges, pbqp->num_r0, pbqp->num_r1, pbqp->num_r2,
pbqp->num_rm, pbqp->num_rn);
#endif
fclose(fh);
#endif
......
......@@ -189,12 +189,12 @@ void solve_pbqp_heuristical_co(pbqp_t *pbqp, plist_t *rpeo)
fh = fopen("solutions.pb", "a");
#if KAPS_USE_UNSIGNED
fprintf(fh, ": %u RE:%u R0:%u R1:%u R2:%u RM:%u RN/BF:%u\n", pbqp->solution,
pbqp->num_edges, pbqp->num_r0, pbqp->num_r1, pbqp->num_r2,
pbqp->num_rm, pbqp->num_rn);
pbqp->num_edges, pbqp->num_r0, pbqp->num_r1, pbqp->num_r2,
pbqp->num_rm, pbqp->num_rn);
#else
fprintf(fh, ": %lld RE:%u R0:%u R1:%u R2:%u RM:%u RN/BF:%u\n", pbqp->solution,
pbqp->num_edges, pbqp->num_r0, pbqp->num_r1, pbqp->num_r2,
pbqp->num_rm, pbqp->num_rn);
pbqp->num_edges, pbqp->num_r0, pbqp->num_r1, pbqp->num_r2,
pbqp->num_rm, pbqp->num_rn);
#endif
fclose(fh);
#endif
......
......@@ -337,12 +337,12 @@ void solve_pbqp_heuristical_co_ld(pbqp_t *pbqp, plist_t *rpeo)
fh = fopen("solutions.pb", "a");
#if KAPS_USE_UNSIGNED
fprintf(fh, ": %u RE:%u R0:%u R1:%u R2:%u RM:%u RN/BF:%u\n", pbqp->solution,
pbqp->num_edges, pbqp->num_r0, pbqp->num_r1, pbqp->num_r2,
pbqp->num_rm, pbqp->num_rn);
pbqp->num_edges, pbqp->num_r0, pbqp->num_r1, pbqp->num_r2,
pbqp->num_rm, pbqp->num_rn);
#else
fprintf(fh, ": %lld RE:%u R0:%u R1:%u R2:%u RM:%u RN/BF:%u\n", pbqp->solution,
pbqp->num_edges, pbqp->num_r0, pbqp->num_r1, pbqp->num_r2,
pbqp->num_rm, pbqp->num_rn);
pbqp->num_edges, pbqp->num_r0, pbqp->num_r1, pbqp->num_r2,
pbqp->num_rm, pbqp->num_rn);
#endif
fclose(fh);
#endif
......
......@@ -47,7 +47,7 @@ static void dump_vector(FILE *f, vector_t *vec)
for (unsigned index = 0; index < len; ++index) {
#if KAPS_ENABLE_VECTOR_NAMES
fprintf(f, "<span title=\"%s\">%s</span> ",
vec->entries[index].name, cost2a(vec->entries[index].data));
vec->entries[index].name, cost2a(vec->entries[index].data));
#else
fprintf(f, "%s ", cost2a(vec->entries[index].data));
#endif
......@@ -82,7 +82,7 @@ void pbqp_dump_edge(FILE *file, pbqp_edge_t *edge)
{
fputs("<tex>\n", file);
fprintf(file, "\t\\overline\n{C}_{%u,%u}=\n",
edge->src->index, edge->tgt->index);
edge->src->index, edge->tgt->index);
dump_matrix(file, edge->costs);
fputs("</tex><br>", file);
}
......@@ -170,7 +170,7 @@ void pbqp_dump_graph(pbqp_t *pbqp)
if (src_index < tgt_index) {
fprintf(pbqp->dump_file, "\t n%u -- n%u;\n", src_index,
tgt_index);
tgt_index);
}
}
}
......
......@@ -171,8 +171,8 @@ void pbqp_matrix_sub_col_value(pbqp_matrix_t *matrix, unsigned col_index,
continue;
}
/* inf - x = inf if x < inf */
if (matrix->entries[row_index * col_len + col_index] == INF_COSTS && value
!= INF_COSTS)
if (matrix->entries[row_index * col_len + col_index] == INF_COSTS
&& value != INF_COSTS)
continue;
matrix->entries[row_index * col_len + col_index] -= value;
}
......@@ -235,8 +235,8 @@ void pbqp_matrix_sub_row_value(pbqp_matrix_t *matrix, unsigned row_index,
continue;
}
/* inf - x = inf if x < inf */
if (matrix->entries[row_index * col_len + col_index] == INF_COSTS && value
!= INF_COSTS)
if (matrix->entries[row_index * col_len + col_index] == INF_COSTS
&& value != INF_COSTS)
continue;
matrix->entries[row_index * col_len + col_index] -= value;
}
......@@ -278,8 +278,7 @@ void pbqp_matrix_add_to_all_cols(pbqp_matrix_t *mat, vector_t *vec)
num value = vec->entries[row_index].data;
for (unsigned col_index = 0; col_index < col_len; ++col_index) {
mat->entries[row_index * col_len + col_index] = pbqp_add(
mat->entries[row_index * col_len + col_index], value);
mat->entries[row_index * col_len + col_index] = pbqp_add(mat->entries[row_index * col_len + col_index], value);
}
}
}
......
......@@ -138,8 +138,7 @@ static void normalize_towards_source(pbqp_edge_t *edge)
}
pbqp_matrix_sub_row_value(mat, src_index, tgt_vec, min);
src_vec->entries[src_index].data = pbqp_add(
src_vec->entries[src_index].data, min);
src_vec->entries[src_index].data = pbqp_add(src_vec->entries[src_index].data, min);
if (min == INF_COSTS) {
new_infinity = 1;
......@@ -185,8 +184,7 @@ static void normalize_towards_target(pbqp_edge_t *edge)
}
pbqp_matrix_sub_col_value(mat, tgt_index, src_vec, min);
tgt_vec->entries[tgt_index].data = pbqp_add(
tgt_vec->entries[tgt_index].data, min);
tgt_vec->entries[tgt_index].data = pbqp_add(tgt_vec->entries[tgt_index].data, min);
if (min == INF_COSTS) {
new_infinity = 1;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment