Commit 5a5b0a88 authored by Matthias Braun's avatar Matthias Braun
Browse files

replace psets with arrays to make the compiler more predictable across runs...

replace psets with arrays to make the compiler more predictable across runs (and a little bit more efficient)

[r27820]
parent b7388535
......@@ -431,22 +431,18 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
int n_stack_params = 0;
int n_ins;
pset_new_t destroyed_regs, states;
pset_new_iterator_t iter;
const arch_register_t **states = NEW_ARR_F(const arch_register_t*, 0);
const arch_register_t **destroyed_regs = NEW_ARR_F(const arch_register_t*, 0);
ir_node *low_call;
ir_node **in;
ir_node **res_projs;
int n_reg_results = 0;
const arch_register_t *reg;
const ir_edge_t *edge;
int *reg_param_idxs;
int *stack_param_idx;
int i, n, destroy_all_regs;
dbg_info *dbgi;
pset_new_init(&destroyed_regs);
pset_new_init(&states);
/* Let the isa fill out the abi description for that call node. */
arch_env_get_call_abi(arch_env, call_tp, call);
......@@ -596,30 +592,34 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
destroy_all_regs = 1;
}
/* Put caller save into the destroyed set and state registers in the states set */
/* Put caller save into the destroyed set and state registers in the states
* set */
for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
unsigned j;
const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
for (j = 0; j < cls->n_regs; ++j) {
const arch_register_t *reg = arch_register_for_index(cls, j);
if (destroy_all_regs || arch_register_type_is(reg, caller_save)) {
if (! arch_register_type_is(reg, ignore))
pset_new_insert(&destroyed_regs, (void *) reg);
}
/* even if destroyed all is specified, neither SP nor FP are
* destroyed (else bad things will happen) */
if (reg == arch_env->sp || reg == arch_env->bp)
continue;
if (arch_register_type_is(reg, state)) {
pset_new_insert(&destroyed_regs, (void*) reg);
pset_new_insert(&states, (void*) reg);
ARR_APP1(const arch_register_t*, destroyed_regs, reg);
ARR_APP1(const arch_register_t*, states, reg);
/* we're already in the destroyed set so no need for further
* checking */
continue;
}
if (destroy_all_regs || arch_register_type_is(reg, caller_save)) {
if (! arch_register_type_is(reg, ignore)) {
ARR_APP1(const arch_register_t*, destroyed_regs, reg);
}
}
}
}
if (destroy_all_regs) {
/* even if destroyed all is specified, neither SP nor FP are destroyed (else bad things will happen) */
pset_new_remove(&destroyed_regs, arch_env->sp);
pset_new_remove(&destroyed_regs, arch_env->bp);
}
/* search the largest result proj number */
res_projs = ALLOCANZ(ir_node*, n_res);
......@@ -651,7 +651,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
n_reg_results = n_res;
n_ins = 0;
in = ALLOCAN(ir_node*, n_reg_params + pset_new_size(&states));
in = ALLOCAN(ir_node*, n_reg_params + ARR_LEN(states));
/* make the back end call node and set its register requirements. */
for (i = 0; i < n_reg_params; ++i) {
......@@ -659,7 +659,8 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
}
/* add state registers ins */
foreach_pset_new(&states, reg, iter) {
for (i = 0; i < ARR_LEN(states); ++i) {
const arch_register_t *reg = states[i];
const arch_register_class_t *cls = arch_register_get_class(reg);
#if 0
ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
......@@ -668,19 +669,19 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
ir_node *regnode = new_r_Unknown(irg, arch_register_class_mode(cls));
in[n_ins++] = regnode;
}
assert(n_ins == (int) (n_reg_params + pset_new_size(&states)));
assert(n_ins == (int) (n_reg_params + ARR_LEN(states)));
/* ins collected, build the call */
if (env->call->flags.bits.call_has_imm && is_SymConst(call_ptr)) {
/* direct call */
low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, curr_sp,
n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
n_reg_results + pn_be_Call_first_res + ARR_LEN(destroyed_regs),
n_ins, in, get_Call_type(irn));
be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
} else {
/* indirect call */
low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, call_ptr,
n_reg_results + pn_be_Call_first_res + pset_new_size(&destroyed_regs),
n_reg_results + pn_be_Call_first_res + ARR_LEN(destroyed_regs),
n_ins, in, get_Call_type(irn));
}
be_Call_set_pop(low_call, call->pop);
......@@ -721,7 +722,16 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
}
if (arg->in_reg) {
pset_new_remove(&destroyed_regs, arg->reg);
/* remove register from destroyed regs */
int j;
int n = ARR_LEN(destroyed_regs);
for (j = 0; j < n; ++j) {
if (destroyed_regs[j] == arg->reg) {
destroyed_regs[j] = destroyed_regs[n-1];
ARR_SHRINKLEN(destroyed_regs,n-1);
break;
}
}
}
}
......@@ -763,22 +773,21 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
/* Make additional projs for the caller save registers
and the Keep node which keeps them alive. */
{
const arch_register_t *reg;
ir_node **in, *keep;
int i;
int n = 0;
int curr_res_proj = pn_be_Call_first_res + n_reg_results;
pset_new_iterator_t iter;
int n_ins;
n_ins = (int)pset_new_size(&destroyed_regs) + n_reg_results + 1;
n_ins = ARR_LEN(destroyed_regs) + n_reg_results + 1;
in = ALLOCAN(ir_node *, n_ins);
/* also keep the stack pointer */
set_irn_link(curr_sp, (void*) sp);
in[n++] = curr_sp;
foreach_pset_new(&destroyed_regs, reg, iter) {
for (i = 0; i < ARR_LEN(destroyed_regs); ++i) {
const arch_register_t *reg = destroyed_regs[i];
ir_node *proj = new_r_Proj(low_call, reg->reg_class->mode, curr_res_proj);
/* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
......@@ -833,8 +842,8 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
be_abi_call_free(call);
pset_new_destroy(&states);
pset_new_destroy(&destroyed_regs);
DEL_ARR_F(states);
DEL_ARR_F(destroyed_regs);
return curr_sp;
}
......
......@@ -43,7 +43,6 @@
#include "pqueue.h"
#include "xmalloc.h"
#include "pdeq.h"
#include "pset.h"
#include "irprintf.h"
#include "irbitset.h"
#include "error.h"
......@@ -94,13 +93,14 @@ typedef struct _col_cost_t {
* An affinity chunk.
*/
typedef struct _aff_chunk_t {
const ir_node **n; /**< An ARR_F containing all nodes of the chunk. */
const ir_node **n; /**< An ARR_F containing all nodes of the chunk. */
const ir_node **interfere; /**< An ARR_F containing all inference. */
int weight; /**< Weight of this chunk */
unsigned weight_consistent : 1; /**< Set if the weight is consistent. */
unsigned deleted : 1; /**< For debugging: Set if the was deleted. */
unsigned id; /**< An id of this chunk. */
unsigned visited;
list_head list;
col_cost_t color_affinity[1];
} aff_chunk_t;
......@@ -120,7 +120,7 @@ typedef struct _co_mst_env_t {
bitset_t *ignore_regs; /**< set containing all global ignore registers */
ir_phase ph; /**< phase object holding data for nodes */
pqueue_t *chunks; /**< priority queue for chunks */
pset *chunkset; /**< set holding all chunks */
list_head chunklist; /**< list holding all chunks */
be_ifg_t *ifg; /**< the interference graph */
copy_opt_t *co; /**< the copy opt object */
unsigned chunk_visited;
......@@ -255,6 +255,9 @@ static int cmp_col_cost_gt(const void *a, const void *b)
{
const col_cost_t *c1 = a;
const col_cost_t *c2 = b;
if (c2->cost == c1->cost)
return QSORT_CMP(c1->col, c2->col);
real_t diff = c2->cost - c1->cost;
return (diff > 0) - (diff < 0);
}
......@@ -272,16 +275,16 @@ static inline aff_chunk_t *new_aff_chunk(co_mst_env_t *env)
c->deleted = 0;
c->id = ++last_chunk_id;
c->visited = 0;
pset_insert(env->chunkset, c, c->id);
list_add(&c->list, &env->chunklist);
return c;
}
/**
* Frees all memory allocated by an affinity chunk.
*/
static inline void delete_aff_chunk(co_mst_env_t *env, aff_chunk_t *c)
static inline void delete_aff_chunk(aff_chunk_t *c)
{
pset_remove(env->chunkset, c, c->id);
list_del(&c->list);
DEL_ARR_F(c->interfere);
DEL_ARR_F(c->n);
c->deleted = 1;
......@@ -532,7 +535,7 @@ static int aff_chunk_absorb(co_mst_env_t *env, const ir_node *src, const ir_node
c1->weight_consistent = 0;
delete_aff_chunk(env, c2);
delete_aff_chunk(c2);
goto absorbed;
}
DB((dbg, LEVEL_4, " ... c1 interferes with c2, skipped\n"));
......@@ -696,7 +699,7 @@ static void build_affinity_chunks(co_mst_env_t *env)
}
/* now insert all chunks into a priority queue */
foreach_pset(env->chunkset, curr_chunk) {
list_for_each_entry(aff_chunk_t, curr_chunk, &env->chunklist, list) {
aff_chunk_assure_weight(env, curr_chunk);
DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
......@@ -1315,7 +1318,7 @@ static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c)
while (! waitq_empty(tmp_chunks)) {
aff_chunk_t *tmp = waitq_get(tmp_chunks);
if (tmp != best_chunk)
delete_aff_chunk(env, tmp);
delete_aff_chunk(tmp);
}
del_waitq(tmp_chunks);
......@@ -1399,7 +1402,7 @@ static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c)
}
/* clear obsolete chunks and free some memory */
delete_aff_chunk(env, best_chunk);
delete_aff_chunk(best_chunk);
bitset_free(visited);
if (best_starts)
del_waitq(best_starts);
......@@ -1435,7 +1438,7 @@ static int co_solve_heuristic_mst(copy_opt_t *co)
mst_env.co = co;
mst_env.ignore_regs = ignore_regs;
mst_env.ifg = co->cenv->ifg;
mst_env.chunkset = pset_new_ptr(512);
INIT_LIST_HEAD(&mst_env.chunklist);
mst_env.chunk_visited = 0;
mst_env.single_cols = phase_alloc(&mst_env.ph, sizeof(*mst_env.single_cols) * n_regs);
......@@ -1465,7 +1468,7 @@ static int co_solve_heuristic_mst(copy_opt_t *co)
color_aff_chunk(&mst_env, chunk);
DB((dbg, LEVEL_4, "<<<====== Coloring chunk (%u) done\n", chunk->id));
delete_aff_chunk(&mst_env, chunk);
delete_aff_chunk(chunk);
}
/* apply coloring */
......@@ -1491,7 +1494,6 @@ static int co_solve_heuristic_mst(copy_opt_t *co)
/* free allocated memory */
del_pqueue(mst_env.chunks);
phase_deinit(&mst_env.ph);
del_pset(mst_env.chunkset);
stat_ev_tim_pop("heur4_total");
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment