Commit 034cf31a authored by Matthias Braun's avatar Matthias Braun
Browse files

cleanup, use C99

parent 05afbfae
......@@ -58,7 +58,7 @@ static void pair_up_operands(be_chordal_env_t const *const env, be_insn_t *const
be_operand_t *smallest = NULL;
int smallest_n_regs = n_regs + 1;
be_operand_t *const out_op = &insn->ops[j];
for (int i = insn->use_start; i < insn->n_ops; ++i) {
for (int i = insn->use_start, n_ops = insn->n_ops; i < n_ops; ++i) {
be_operand_t *const op = &insn->ops[i];
if (op->partner || be_value_live_after(op->carrier, insn->irn))
continue;
......@@ -73,7 +73,7 @@ static void pair_up_operands(be_chordal_env_t const *const env, be_insn_t *const
}
if (smallest != NULL) {
for (int i = insn->use_start; i < insn->n_ops; ++i) {
for (int i = insn->use_start, n_ops = insn->n_ops; i < n_ops; ++i) {
if (insn->ops[i].carrier == smallest->carrier)
insn->ops[i].partner = out_op;
}
......@@ -122,7 +122,7 @@ static void handle_constraints(be_chordal_env_t *const env, ir_node *const irn)
#else
bipartite_t *const bp = bipartite_new(n_regs, n_regs);
#endif
for (int i = 0; i < insn->n_ops; ++i) {
for (int i = 0, n_ops = insn->n_ops; i < n_ops; ++i) {
/* If the operand has no partner or the partner has not been marked
* for allocation, determine the admissible registers and mark it
* for allocation by associating the node and its partner with the
......@@ -306,11 +306,11 @@ static void assign(ir_node *const block, void *const env_ptr)
assert(reg && "Register must have been assigned");
bitset_set(available, reg->index);
} else {
int col;
arch_register_t const *reg = arch_get_irn_register(irn);
/* All live-ins must have a register assigned. (The dominators were
* allocated before.) */
assert(b->is_real || reg);
unsigned col;
if (reg) {
DBG((dbg, LEVEL_4, "%+F has reg %s\n", irn, reg->name));
col = reg->index;
......@@ -330,9 +330,7 @@ static void assign(ir_node *const block, void *const env_ptr)
static void be_ra_chordal_color(be_chordal_env_t *const chordal_env)
{
char buf[256];
ir_graph *const irg = chordal_env->irg;
assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE);
be_assure_live_sets(irg);
......@@ -342,6 +340,7 @@ static void be_ra_chordal_color(be_chordal_env_t *const chordal_env)
dom_tree_walk_irg(irg, constraints, NULL, chordal_env);
if (chordal_env->opts->dump_flags & BE_CH_DUMP_CONSTR) {
char buf[256];
snprintf(buf, sizeof(buf), "%s-constr", chordal_env->cls->name);
dump_ir_graph(irg, buf);
}
......@@ -355,6 +354,7 @@ static void be_ra_chordal_color(be_chordal_env_t *const chordal_env)
dom_tree_walk_irg(irg, assign, NULL, chordal_env);
if (chordal_env->opts->dump_flags & BE_CH_DUMP_TREE_INTV) {
char buf[256];
ir_snprintf(buf, sizeof(buf), "ifg_%s_%F.eps", chordal_env->cls->name, irg);
plotter_t *const plotter = new_plotter_ps(buf);
draw_interval_tree(&draw_chordal_def_opts, chordal_env, plotter);
......
......@@ -127,7 +127,7 @@ static void dump(unsigned mask, ir_graph *irg,
const char *suffix)
{
if ((options.dump_flags & mask) == mask) {
if (cls) {
if (cls != NULL) {
char buf[256];
snprintf(buf, sizeof(buf), "%s-%s", cls->name, suffix);
dump_ir_graph(irg, buf);
......@@ -168,22 +168,21 @@ void check_for_memory_operands(ir_graph *irg)
irg_walk_graph(irg, NULL, memory_operand_walker, NULL);
}
static be_node_stats_t last_node_stats;
/**
* Perform things which need to be done per register class before spilling.
*/
static void pre_spill(be_chordal_env_t *const chordal_env, arch_register_class_t const *const cls, ir_graph *const irg)
static void pre_spill(be_chordal_env_t *const chordal_env,
arch_register_class_t const *const cls,
ir_graph *const irg)
{
chordal_env->cls = cls;
chordal_env->border_heads = pmap_create();
chordal_env->allocatable_regs = bitset_malloc(cls->n_regs);
be_assure_live_chk(irg);
/* put all ignore registers into the ignore register set. */
be_get_allocatable_regs(irg, cls, chordal_env->allocatable_regs->data);
be_assure_live_chk(irg);
}
/**
......@@ -191,11 +190,9 @@ static void pre_spill(be_chordal_env_t *const chordal_env, arch_register_class_t
*/
static void post_spill(be_chordal_env_t *const chordal_env, ir_graph *const irg)
{
/*
If we have a backend provided spiller, post spill is
called in a loop after spilling for each register class.
But we only need to fix stack nodes once in this case.
*/
/* If we have a backend provided spiller, post spill is
* called in a loop after spilling for each register class.
* But we only need to fix stack nodes once in this case. */
be_timer_push(T_RA_SPILL_APPLY);
check_for_memory_operands(irg);
be_timer_pop(T_RA_SPILL_APPLY);
......@@ -221,14 +218,13 @@ static void post_spill(be_chordal_env_t *const chordal_env, ir_graph *const irg)
be_timer_pop(T_RA_IFG);
if (stat_ev_enabled) {
be_ifg_stat_t stat;
be_node_stats_t node_stats;
be_ifg_stat_t stat;
be_ifg_stat(irg, chordal_env->ifg, &stat);
stat_ev_dbl("bechordal_ifg_nodes", stat.n_nodes);
stat_ev_dbl("bechordal_ifg_edges", stat.n_edges);
stat_ev_dbl("bechordal_ifg_comps", stat.n_comps);
be_node_stats_t node_stats;
be_collect_node_stats(&node_stats, irg);
be_subtract_node_stats(&node_stats, &last_node_stats);
......@@ -272,10 +268,6 @@ static void post_spill(be_chordal_env_t *const chordal_env, ir_graph *const irg)
*/
static void be_ra_chordal_main(ir_graph *irg)
{
const arch_env_t *arch_env = be_get_irg_arch_env(irg);
int j;
int m;
be_timer_push(T_RA_OTHER);
be_chordal_env_t chordal_env;
......@@ -286,20 +278,18 @@ static void be_ra_chordal_main(ir_graph *irg)
chordal_env.ifg = NULL;
chordal_env.allocatable_regs = NULL;
if (stat_ev_enabled) {
if (stat_ev_enabled)
be_collect_node_stats(&last_node_stats, irg);
}
/* use one of the generic spiller */
/* Perform the following for each register class. */
for (j = 0, m = arch_env->n_register_classes; j < m; ++j) {
const arch_env_t *arch_env = be_get_irg_arch_env(irg);
for (int j = 0, m = arch_env->n_register_classes; j < m; ++j) {
const arch_register_class_t *cls = &arch_env->register_classes[j];
if (arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
continue;
stat_ev_ctx_push_str("bechordal_cls", cls->name);
double pre_spill_cost = 0;
......@@ -313,20 +303,16 @@ static void be_ra_chordal_main(ir_graph *irg)
be_timer_push(T_RA_SPILL);
be_do_spill(irg, cls);
be_timer_pop(T_RA_SPILL);
dump(BE_CH_DUMP_SPILL, irg, cls, "spill");
stat_ev_dbl("bechordal_spillcosts", be_estimate_irg_costs(irg) - pre_spill_cost);
post_spill(&chordal_env, irg);
if (stat_ev_enabled) {
be_node_stats_t node_stats;
be_collect_node_stats(&node_stats, irg);
be_subtract_node_stats(&node_stats, &last_node_stats);
be_emit_node_stats(&node_stats, "bechordal_");
be_copy_node_stats(&last_node_stats, &node_stats);
stat_ev_ctx_pop("bechordal_cls");
}
......@@ -346,16 +332,16 @@ static void be_ra_chordal_main(ir_graph *irg)
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_chordal_main)
void be_init_chordal_main(void)
{
lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
static be_ra_t be_ra_chordal_allocator = {
be_ra_chordal_main,
};
lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
be_register_allocator("chordal", &be_ra_chordal_allocator);
lc_opt_add_table(chordal_grp, be_chordal_options);
be_add_module_list_opt(chordal_grp, "coloring", "select coloring method", &colorings, (void**) &selected_coloring);
be_add_module_list_opt(chordal_grp, "coloring", "select coloring method",
&colorings, (void**) &selected_coloring);
}
......@@ -20,15 +20,13 @@
be_insn_t *be_scan_insn(be_chordal_env_t *const env, ir_node *const irn)
{
struct obstack *const obst = &env->obst;
be_operand_t o;
be_insn_t *insn = OALLOCZ(obst, be_insn_t);
bool has_constraints = false;
struct obstack *const obst = &env->obst;
const arch_register_class_t *const cls = env->cls;
const arch_register_class_t *cls = env->cls;
be_insn_t *const insn = OALLOCZ(obst, be_insn_t);
insn->irn = irn;
be_operand_t o;
bool has_constraints = false;
be_foreach_definition(irn, cls, p, req,
/* found a def: create a new operand */
if (arch_register_req_is(req, limited)) {
......@@ -43,7 +41,6 @@ be_insn_t *be_scan_insn(be_chordal_env_t *const env, ir_node *const irn)
obstack_grow(obst, &o, sizeof(o));
insn->n_ops++;
);
insn->use_start = insn->n_ops;
/* now collect the uses for this node */
......
......@@ -469,7 +469,6 @@ static void assure_different_constraints(ir_node *irn, ir_node *skipped_irn, con
if (arch_register_req_is(req, must_be_different)) {
const unsigned other = req->other_different;
int i;
if (arch_register_req_is(req, should_be_same)) {
const unsigned same = req->other_same;
......@@ -487,7 +486,7 @@ static void assure_different_constraints(ir_node *irn, ir_node *skipped_irn, con
}
}
}
for (i = 0; 1U << i <= other; ++i) {
for (int i = 0; 1U << i <= other; ++i) {
if (other & (1U << i)) {
ir_node *different_from = get_irn_n(skipped_irn, i);
gen_assure_different_pattern(irn, different_from, env);
......@@ -520,20 +519,17 @@ static void assure_constraints_walker(ir_node *block, void *walk_env)
*/
static void melt_copykeeps(constraint_env_t *cenv)
{
ir_nodehashmap_iterator_t map_iter;
ir_nodehashmap_entry_t map_entry;
struct obstack obst;
obstack_init(&obst);
/* for all */
ir_nodehashmap_entry_t map_entry;
ir_nodehashmap_iterator_t map_iter;
foreach_ir_nodehashmap(&cenv->op_set, map_entry, map_iter) {
op_copy_assoc_t *entry = (op_copy_assoc_t*)map_entry.data;
int idx, num_ck;
struct obstack obst;
ir_node **ck_arr, **melt_arr;
obstack_init(&obst);
/* collect all copykeeps */
num_ck = idx = 0;
unsigned num_ck = 0;
foreach_ir_nodeset(&entry->copies, cp, iter) {
if (be_is_CopyKeep(cp)) {
obstack_grow(&obst, &cp, sizeof(cp));
......@@ -547,95 +543,88 @@ static void melt_copykeeps(constraint_env_t *cenv)
}
/* compare each copykeep with all other copykeeps */
ck_arr = (ir_node **)obstack_finish(&obst);
for (idx = 0; idx < num_ck; ++idx) {
ir_node *ref, *ref_mode_T;
if (ck_arr[idx]) {
int j, n_melt;
ir_node *sched_pt = NULL;
n_melt = 1;
ref = ck_arr[idx];
ref_mode_T = skip_Proj(get_irn_n(ref, 1));
obstack_grow(&obst, &ref, sizeof(ref));
DB((dbg_constr, LEVEL_1, "Trying to melt %+F:\n", ref));
/* check for copykeeps pointing to the same mode_T node as the reference copykeep */
for (j = 0; j < num_ck; ++j) {
ir_node *cur_ck = ck_arr[j];
if (j != idx && cur_ck && skip_Proj(get_irn_n(cur_ck, 1)) == ref_mode_T) {
obstack_grow(&obst, &cur_ck, sizeof(cur_ck));
ir_nodeset_remove(&entry->copies, cur_ck);
DB((dbg_constr, LEVEL_1, "\t%+F\n", cur_ck));
ck_arr[j] = NULL;
++n_melt;
sched_remove(cur_ck);
}
ir_node **ck_arr = (ir_node **)obstack_finish(&obst);
for (unsigned idx = 0; idx < num_ck; ++idx) {
if (ck_arr[idx] == NULL)
continue;
int n_melt = 1;
ir_node *ref = ck_arr[idx];
ir_node *ref_mode_T = skip_Proj(get_irn_n(ref, 1));
obstack_grow(&obst, &ref, sizeof(ref));
DB((dbg_constr, LEVEL_1, "Trying to melt %+F:\n", ref));
/* check for copykeeps pointing to the same mode_T node as the reference copykeep */
for (unsigned j = 0; j < num_ck; ++j) {
ir_node *cur_ck = ck_arr[j];
if (j != idx && cur_ck && skip_Proj(get_irn_n(cur_ck, 1)) == ref_mode_T) {
obstack_grow(&obst, &cur_ck, sizeof(cur_ck));
ir_nodeset_remove(&entry->copies, cur_ck);
DB((dbg_constr, LEVEL_1, "\t%+F\n", cur_ck));
ck_arr[j] = NULL;
++n_melt;
sched_remove(cur_ck);
}
ck_arr[idx] = NULL;
}
ck_arr[idx] = NULL;
/* check, if we found some candidates for melting */
if (n_melt == 1) {
DB((dbg_constr, LEVEL_1, "\tno candidate found\n"));
continue;
}
/* check, if we found some candidates for melting */
if (n_melt == 1) {
DB((dbg_constr, LEVEL_1, "\tno candidate found\n"));
continue;
}
ir_nodeset_remove(&entry->copies, ref);
sched_remove(ref);
ir_nodeset_remove(&entry->copies, ref);
sched_remove(ref);
melt_arr = (ir_node **)obstack_finish(&obst);
/* melt all found copykeeps */
ir_node **new_ck_in = ALLOCAN(ir_node*,n_melt);
for (j = 0; j < n_melt; ++j) {
new_ck_in[j] = get_irn_n(melt_arr[j], 1);
ir_node **melt_arr = (ir_node **)obstack_finish(&obst);
/* melt all found copykeeps */
ir_node **new_ck_in = ALLOCAN(ir_node*,n_melt);
for (int j = 0; j < n_melt; ++j) {
new_ck_in[j] = get_irn_n(melt_arr[j], 1);
/* now, we can kill the melted keep, except the */
/* ref one, we still need some information */
if (melt_arr[j] != ref)
kill_node(melt_arr[j]);
}
/* now, we can kill the melted keep, except the */
/* ref one, we still need some information */
if (melt_arr[j] != ref)
kill_node(melt_arr[j]);
}
ir_node *const new_ck = be_new_CopyKeep(get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in);
ir_node *const new_ck = be_new_CopyKeep(get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in);
#ifdef KEEP_ALIVE_COPYKEEP_HACK
keep_alive(new_ck);
keep_alive(new_ck);
#endif /* KEEP_ALIVE_COPYKEEP_HACK */
/* set register class for all kept inputs */
for (j = 1; j <= n_melt; ++j)
be_node_set_reg_class_in(new_ck, j, entry->cls);
/* set register class for all kept inputs */
for (int j = 1; j <= n_melt; ++j)
be_node_set_reg_class_in(new_ck, j, entry->cls);
ir_nodeset_insert(&entry->copies, new_ck);
ir_nodeset_insert(&entry->copies, new_ck);
/* find scheduling point */
ir_node *sched_pt = ref_mode_T;
do {
/* just walk along the schedule until a non-Keep/CopyKeep node is found */
sched_pt = sched_next(sched_pt);
} while (be_is_Keep(sched_pt) || be_is_CopyKeep(sched_pt));
/* find scheduling point */
sched_pt = ref_mode_T;
do {
/* just walk along the schedule until a non-Keep/CopyKeep node is found */
sched_pt = sched_next(sched_pt);
} while (be_is_Keep(sched_pt) || be_is_CopyKeep(sched_pt));
sched_add_before(sched_pt, new_ck);
DB((dbg_constr, LEVEL_1, "created %+F, scheduled before %+F\n", new_ck, sched_pt));
sched_add_before(sched_pt, new_ck);
DB((dbg_constr, LEVEL_1, "created %+F, scheduled before %+F\n", new_ck, sched_pt));
/* finally: kill the reference copykeep */
kill_node(ref);
/* finally: kill the reference copykeep */
kill_node(ref);
}
obstack_free(&obst, ck_arr);
}
obstack_free(&obst, NULL);
}
obstack_free(&obst, NULL);
}
void assure_constraints(ir_graph *irg)
{
constraint_env_t cenv;
ir_nodehashmap_iterator_t map_iter;
ir_nodehashmap_entry_t map_entry;
FIRM_DBG_REGISTER(dbg_constr, "firm.be.lower.constr");
constraint_env_t cenv;
cenv.irg = irg;
ir_nodehashmap_init(&cenv.op_set);
obstack_init(&cenv.obst);
......@@ -648,6 +637,8 @@ void assure_constraints(ir_graph *irg)
melt_copykeeps(&cenv);
/* for all */
ir_nodehashmap_iterator_t map_iter;
ir_nodehashmap_entry_t map_entry;
foreach_ir_nodehashmap(&cenv.op_set, map_entry, map_iter) {
op_copy_assoc_t *entry = (op_copy_assoc_t*)map_entry.data;
size_t n = ir_nodeset_size(&entry->copies);
......
......@@ -247,19 +247,13 @@ static void ssa_destruction_check_walker(ir_node *block, void *data)
if (!arch_irn_consider_in_reg_alloc(cls, phi))
continue;
#ifndef NDEBUG
const arch_register_t *phi_reg = arch_get_irn_register(phi);
#endif
/* iterate over all args of phi */
foreach_irn_in(phi, i, arg) {
const arch_register_req_t *req = arch_get_irn_register_req(arg);
if (arch_register_req_is(req, ignore))
continue;
#ifndef NDEBUG
const arch_register_t *arg_reg = arch_get_irn_register(arg);
assert(phi_reg == arg_reg && "Error: Registers of phi and arg differ");
#endif
assert(arch_get_irn_register(phi) == arch_get_irn_register(arg));
}
}
}
......
......@@ -50,7 +50,7 @@ int be_get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent,
*/
static ir_entity *search_ent_with_offset(ir_type *t, int offset)
{
for (int i = 0, n = get_compound_n_members(t); i < n; ++i) {
for (size_t i = 0, n = get_compound_n_members(t); i < n; ++i) {
ir_entity *ent = get_compound_member(t, i);
if (get_entity_offset(ent) == offset)
return ent;
......@@ -225,7 +225,6 @@ typedef struct fix_stack_walker_env_t {
static void collect_stack_nodes_walker(ir_node *node, void *data)
{
fix_stack_walker_env_t *const env = (fix_stack_walker_env_t*)data;
if (get_irn_mode(node) == mode_T)
return;
......@@ -259,22 +258,19 @@ void be_abi_fix_stack_nodes(ir_graph *irg)
* continue, as for endless loops incsp might have had no users and is bad
* now.
*/
int len = ARR_LEN(walker_env.sp_nodes);
if (len == 0) {
size_t n_sp_nodes = ARR_LEN(walker_env.sp_nodes);
if (n_sp_nodes == 0) {
DEL_ARR_F(walker_env.sp_nodes);
return;
}
be_ssa_construction_env_t senv;
be_ssa_construction_init(&senv, irg);
be_ssa_construction_add_copies(&senv, walker_env.sp_nodes,
ARR_LEN(walker_env.sp_nodes));
be_ssa_construction_fix_users_array(&senv, walker_env.sp_nodes,
ARR_LEN(walker_env.sp_nodes));
be_ssa_construction_add_copies(&senv, walker_env.sp_nodes, n_sp_nodes);
be_ssa_construction_fix_users_array(&senv, walker_env.sp_nodes, n_sp_nodes);
if (lv != NULL) {
len = ARR_LEN(walker_env.sp_nodes);
for (int i = 0; i < len; ++i) {
for (size_t i = 0; i < n_sp_nodes; ++i) {
be_liveness_update(lv, walker_env.sp_nodes[i]);
}
be_ssa_construction_update_liveness_phis(&senv, lv);
......@@ -283,8 +279,7 @@ void be_abi_fix_stack_nodes(ir_graph *irg)
ir_node **phis = be_ssa_construction_get_new_phis(&senv);
/* set register requirements for stack phis */
len = ARR_LEN(phis);
for (int i = 0; i < len; ++i) {
for (size_t i = 0, n_phis = ARR_LEN(phis); i < n_phis; ++i) {
ir_node *phi = phis[i];
be_set_phi_reg_req(phi, sp_req);
arch_set_irn_register(phi, arch_env->sp);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment