Commit 6c3146b9 authored by Matthias Braun's avatar Matthias Braun
Browse files

backend: cleanup queries for ignore regs

[r28071]
parent 199fcc3a
......@@ -88,16 +88,15 @@ extern unsigned short asm_constraint_flags[256];
void be_init_default_asm_constraint_flags(void);
/**
* Put the registers to be ignored in this IRG into a bitset.
* @param irg The graph
* @param cls The register class.
* @param bs The bitset (may be NULL).
* @return The number of registers to be ignored.
*/
unsigned be_put_ignore_regs(const ir_graph *irg,
const arch_register_class_t *cls, bitset_t *bs);
void be_put_allocatable_regs(const ir_graph *irg,
const arch_register_class_t *cls, bitset_t *bs);
void be_set_allocatable_regs(const ir_graph *irg,
const arch_register_class_t *cls,
unsigned *raw_bitset);
unsigned be_get_n_allocatable_regs(const ir_graph *irg,
const arch_register_class_t *cls);
/**
* Initialize the backend. Must be run first in init_firm();
......
......@@ -2268,17 +2268,42 @@ void be_abi_free(ir_graph *irg)
be_set_irg_abi(irg, NULL);
}
void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
void be_put_allocatable_regs(const ir_graph *irg,
const arch_register_class_t *cls, bitset_t *bs)
{
arch_register_t *reg;
be_abi_irg_t *abi = be_get_irg_abi(irg);
const arch_register_t *reg;
unsigned i;
for (reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
if (reg->reg_class == cls)
assert(bitset_size(bs) == cls->n_regs);
bitset_clear_all(bs);
for (i = 0; i < cls->n_regs; ++i) {
reg = &cls->regs[i];
if (! (reg->type & arch_register_type_ignore))
bitset_set(bs, reg->index);
}
for (reg = pset_first(abi->ignore_regs); reg != NULL;
reg = pset_next(abi->ignore_regs)) {
if (reg->reg_class == cls)
bitset_clear(bs, reg->index);
}
}
unsigned be_get_n_allocatable_regs(const ir_graph *irg,
const arch_register_class_t *cls)
{
bitset_t *bs = bitset_alloca(cls->n_regs);
be_put_allocatable_regs(irg, cls, bs);
return bitset_popcount(bs);
}
void be_abi_set_non_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, unsigned *raw_bitset)
void be_set_allocatable_regs(const ir_graph *irg,
const arch_register_class_t *cls,
unsigned *raw_bitset)
{
be_abi_irg_t *abi = be_get_irg_abi(irg);
unsigned i;
arch_register_t *reg;
......
......@@ -185,13 +185,6 @@ be_abi_irg_t *be_abi_introduce(ir_graph *irg);
void be_abi_free(ir_graph *irg);
/**
* Put the registers which are forbidden specifically for this IRG in a bitset.
*/
void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs);
void be_abi_set_non_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, unsigned *raw_bitset);
ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg);
ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg);
......
......@@ -147,16 +147,6 @@ int arch_get_op_estimated_cost(const ir_node *irn)
}
}
void arch_put_non_ignore_regs(const arch_register_class_t *cls, bitset_t *bs)
{
unsigned i;
for (i = 0; i < cls->n_regs; ++i) {
if (!arch_register_type_is(&cls->regs[i], ignore))
bitset_set(bs, i);
}
}
int arch_reg_is_allocatable(const ir_node *irn, int pos,
const arch_register_t *reg)
{
......
......@@ -145,15 +145,6 @@ void arch_perform_memory_operand(ir_node *irn, ir_node *spill,
*/
const arch_register_req_t *arch_get_register_req(const ir_node *irn, int pos);
/**
* Put all registers which shall not be ignored by the register
* allocator in a bit set.
* @param cls The register class to consider.
* @param bs The bit set to put the registers to.
*/
extern void arch_put_non_ignore_regs(const arch_register_class_t *cls,
bitset_t *bs);
/**
* Check, if a register is assignable to an operand of a node.
* @param irn The node.
......
......@@ -88,8 +88,9 @@ static int get_next_free_reg(const be_chordal_alloc_env_t *alloc_env, bitset_t *
{
bitset_t *tmp = alloc_env->tmp_colors;
bitset_copy(tmp, colors);
bitset_or(tmp, alloc_env->chordal_env->ignore_colors);
return bitset_next_clear(tmp, 0);
bitset_flip_all(tmp);
bitset_and(tmp, alloc_env->chordal_env->allocatable_regs);
return bitset_next_set(tmp, 0);
}
static bitset_t *get_decisive_partner_regs(bitset_t *bs, const be_operand_t *o1, const be_operand_t *o2)
......@@ -315,10 +316,7 @@ static ir_node *handle_constraints(be_chordal_alloc_env_t *alloc_env,
alloc_nodes[n_alloc] = proj;
pmap_insert(partners, proj, NULL);
bitset_clear_all(bs);
arch_put_non_ignore_regs(env->cls, bs);
bitset_andnot(bs, env->ignore_colors);
bitset_foreach(bs, col) {
bitset_foreach(env->allocatable_regs, col) {
//hungarian_add(bp, n_alloc, col, 1);
bipartite_add(bp, n_alloc, col);
}
......
......@@ -222,9 +222,9 @@ be_insn_t *chordal_scan_insn(be_chordal_env_t *env, ir_node *irn)
{
be_insn_env_t ie;
ie.ignore_colors = env->ignore_colors;
ie.obst = env->obst;
ie.cls = env->cls;
ie.allocatable_regs = env->allocatable_regs;
ie.obst = env->obst;
ie.cls = env->cls;
return be_scan_insn(&ie, irn);
}
......
......@@ -250,10 +250,10 @@ static void pre_spill(post_spill_env_t *pse, const arch_register_class_t *cls)
ir_graph *irg = pse->irg;
ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
pse->cls = cls;
chordal_env->cls = cls;
chordal_env->border_heads = pmap_create();
chordal_env->ignore_colors = bitset_malloc(chordal_env->cls->n_regs);
pse->cls = cls;
chordal_env->cls = cls;
chordal_env->border_heads = pmap_create();
chordal_env->allocatable_regs = bitset_malloc(chordal_env->cls->n_regs);
be_assure_liveness(irg);
be_liveness_assure_chk(be_get_irg_liveness(irg));
......@@ -261,7 +261,7 @@ static void pre_spill(post_spill_env_t *pse, const arch_register_class_t *cls)
stat_ev_do(pse->pre_spill_cost = be_estimate_irg_costs(irg, exec_freq));
/* put all ignore registers into the ignore register set. */
be_put_ignore_regs(irg, pse->cls, chordal_env->ignore_colors);
be_put_allocatable_regs(irg, pse->cls, chordal_env->allocatable_regs);
be_timer_push(T_RA_CONSTR);
be_pre_spill_prepare_constr(irg, chordal_env->cls);
......@@ -278,9 +278,7 @@ static void post_spill(post_spill_env_t *pse, int iteration)
be_chordal_env_t *chordal_env = &pse->cenv;
ir_graph *irg = pse->irg;
ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
int colors_n = arch_register_class_n_regs(chordal_env->cls);
int allocatable_regs
= colors_n - be_put_ignore_regs(irg, chordal_env->cls, NULL);
int allocatable_regs = be_get_n_allocatable_regs(irg, chordal_env->cls);
/* some special classes contain only ignore regs, no work to be done */
if (allocatable_regs > 0) {
......@@ -382,7 +380,7 @@ static void post_spill(post_spill_env_t *pse, int iteration)
/* free some always allocated data structures */
pmap_destroy(chordal_env->border_heads);
bitset_free(chordal_env->ignore_colors);
bitset_free(chordal_env->allocatable_regs);
}
/**
......@@ -406,12 +404,12 @@ static void be_ra_chordal_main(ir_graph *irg)
be_assure_liveness(irg);
chordal_env.obst = &obst;
chordal_env.opts = &options;
chordal_env.irg = irg;
chordal_env.border_heads = NULL;
chordal_env.ifg = NULL;
chordal_env.ignore_colors = NULL;
chordal_env.obst = &obst;
chordal_env.opts = &options;
chordal_env.irg = irg;
chordal_env.border_heads = NULL;
chordal_env.ifg = NULL;
chordal_env.allocatable_regs = NULL;
obstack_init(&obst);
......
......@@ -68,7 +68,7 @@ struct be_chordal_env_t {
const arch_register_class_t *cls; /**< The current register class. */
pmap *border_heads; /**< Maps blocks to border heads. */
be_ifg_t *ifg; /**< The interference graph. */
bitset_t *ignore_colors;/**< A set of colors which shall be ignored in register allocation. */
bitset_t *allocatable_regs; /**< set of allocatable registers */
};
static inline struct list_head *_get_block_border_head(const be_chordal_env_t *inf, ir_node *bl) {
......
......@@ -285,8 +285,7 @@ static ir_node *qnode_color_irn(const qnode_t *qn, ir_node *irn, int col, const
int free_col;
/* Get all possible colors */
bitset_copy(free_cols, co->cenv->ignore_colors);
bitset_flip_all(free_cols);
bitset_copy(free_cols, co->cenv->allocatable_regs);
/* Exclude colors not assignable to the irn */
req = arch_get_register_req_out(irn);
......@@ -553,7 +552,7 @@ static void ou_optimize(unit_t *ou)
qnode_t *curr = NULL;
qnode_t *tmp;
const arch_register_req_t *req;
bitset_t const* ignore;
bitset_t const* allocatable_regs;
unsigned n_regs;
unsigned idx;
int i;
......@@ -565,14 +564,14 @@ static void ou_optimize(unit_t *ou)
/* init queue */
INIT_LIST_HEAD(&ou->queue);
req = arch_get_register_req_out(ou->nodes[0]);
ignore = ou->co->cenv->ignore_colors;
n_regs = req->cls->n_regs;
req = arch_get_register_req_out(ou->nodes[0]);
allocatable_regs = ou->co->cenv->allocatable_regs;
n_regs = req->cls->n_regs;
if (arch_register_req_is(req, limited)) {
unsigned const* limited = req->limited;
for (idx = 0; idx != n_regs; ++idx) {
if (bitset_is_set(ignore, idx))
if (!bitset_is_set(allocatable_regs, idx))
continue;
if (!rbitset_is_set(limited, idx))
continue;
......@@ -581,7 +580,7 @@ static void ou_optimize(unit_t *ou)
}
} else {
for (idx = 0; idx != n_regs; ++idx) {
if (bitset_is_set(ignore, idx))
if (!bitset_is_set(allocatable_regs, idx))
continue;
ou_insert_qnode(ou, new_qnode(ou, idx));
......
......@@ -111,7 +111,7 @@ typedef struct {
typedef struct {
ir_phase ph;
copy_opt_t *co;
bitset_t *ignore_regs;
bitset_t *allocatable_regs;
co2_irn_t *touched;
int visited;
int n_regs;
......@@ -264,8 +264,7 @@ static inline bitset_t *get_adm(co2_t *env, co2_irn_t *ci)
}
ci->is_constrained = 1;
} else {
bitset_copy(ci->adm_cache, env->ignore_regs);
bitset_flip_all(ci->adm_cache);
bitset_copy(ci->adm_cache, env->allocatable_regs);
}
}
......@@ -1232,8 +1231,8 @@ int co_solve_heuristic_new(copy_opt_t *co)
env.visited = 0;
env.co = co;
env.n_regs = co->cls->n_regs;
env.ignore_regs = bitset_alloca(co->cls->n_regs);
be_put_ignore_regs(co->cenv->irg, co->cls, env.ignore_regs);
env.allocatable_regs = bitset_alloca(co->cls->n_regs);
be_put_allocatable_regs(co->cenv->irg, co->cls, env.allocatable_regs);
FIRM_DBG_REGISTER(env.dbg, "firm.be.co2");
INIT_LIST_HEAD(&env.cloud_head);
......
......@@ -117,7 +117,7 @@ typedef struct aff_edge_t {
typedef struct co_mst_env_t {
int n_regs; /**< number of regs in class */
int k; /**< number of non-ignore registers in class */
bitset_t *ignore_regs; /**< set containing all global ignore registers */
bitset_t *allocatable_regs; /**< set containing all global ignore registers */
ir_phase ph; /**< phase object holding data for nodes */
pqueue_t *chunks; /**< priority queue for chunks */
list_head chunklist; /**< list holding all chunks */
......@@ -405,7 +405,7 @@ static void *co_mst_irn_init(ir_phase *ph, const ir_node *irn)
bitset_set_all(res->adm_colors);
/* exclude global ignore registers as well */
bitset_andnot(res->adm_colors, env->ignore_regs);
bitset_and(res->adm_colors, env->allocatable_regs);
/* compute the constraint factor */
res->constr_factor = (real_t) (1 + env->n_regs - bitset_popcount(res->adm_colors)) / env->n_regs;
......@@ -1234,7 +1234,7 @@ static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c)
int n_succeeded;
/* skip ignore colors */
if (bitset_is_set(env->ignore_regs, col))
if (!bitset_is_set(env->allocatable_regs, col))
continue;
DB((dbg, LEVEL_2, "\ttrying color %d\n", col));
......@@ -1416,8 +1416,8 @@ static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c)
*/
static int co_solve_heuristic_mst(copy_opt_t *co)
{
unsigned n_regs = co->cls->n_regs;
bitset_t *ignore_regs = bitset_alloca(n_regs);
unsigned n_regs = co->cls->n_regs;
bitset_t *allocatable_regs = bitset_alloca(n_regs);
unsigned i, j, k;
ir_node *irn;
co_mst_env_t mst_env;
......@@ -1430,18 +1430,18 @@ static int co_solve_heuristic_mst(copy_opt_t *co)
phase_init(&mst_env.ph, co->irg, co_mst_irn_init);
phase_set_private(&mst_env.ph, &mst_env);
k = be_put_ignore_regs(co->cenv->irg, co->cls, ignore_regs);
k = n_regs - k;
be_put_allocatable_regs(co->cenv->irg, co->cls, allocatable_regs);
k = bitset_popcount(allocatable_regs);
mst_env.n_regs = n_regs;
mst_env.k = k;
mst_env.chunks = new_pqueue();
mst_env.co = co;
mst_env.ignore_regs = ignore_regs;
mst_env.ifg = co->cenv->ifg;
mst_env.n_regs = n_regs;
mst_env.k = k;
mst_env.chunks = new_pqueue();
mst_env.co = co;
mst_env.allocatable_regs = allocatable_regs;
mst_env.ifg = co->cenv->ifg;
INIT_LIST_HEAD(&mst_env.chunklist);
mst_env.chunk_visited = 0;
mst_env.single_cols = phase_alloc(&mst_env.ph, sizeof(*mst_env.single_cols) * n_regs);
mst_env.chunk_visited = 0;
mst_env.single_cols = phase_alloc(&mst_env.ph, sizeof(*mst_env.single_cols) * n_regs);
for (i = 0; i < n_regs; ++i) {
col_cost_t *vec = phase_alloc(&mst_env.ph, sizeof(*vec) * n_regs);
......
......@@ -578,7 +578,7 @@ int co_solve_ilp2(copy_opt_t *co)
my.normal_colors = bitset_alloca(arch_register_class_n_regs(co->cls));
bitset_clear_all(my.normal_colors);
arch_put_non_ignore_regs(co->cls, my.normal_colors);
be_put_allocatable_regs(co->irg, co->cls, my.normal_colors);
my.n_colors = bitset_popcount(my.normal_colors);
ienv = new_ilp_env(co, ilp2_build, ilp2_apply, &my);
......
......@@ -175,28 +175,14 @@ be_insn_t *be_scan_insn(const be_insn_env_t *env, ir_node *irn)
assert(cls == env->cls);
op->regs = bitset_obstack_alloc(obst, env->cls->n_regs);
if (type & arch_register_req_type_limited) {
rbitset_copy_to_bitset(req->limited, op->regs);
bitset_t *regs = bitset_obstack_alloc(obst, env->cls->n_regs);
rbitset_copy_to_bitset(req->limited, regs);
op->regs = regs;
} else {
arch_put_non_ignore_regs(env->cls, op->regs);
if (env->ignore_colors)
bitset_andnot(op->regs, env->ignore_colors);
op->regs = env->allocatable_regs;
}
}
return insn;
}
be_insn_env_t *be_insn_env_init(be_insn_env_t *ie, ir_graph *irg,
const arch_register_class_t *cls,
struct obstack *obst)
{
ie->cls = cls;
ie->obst = obst;
ie->ignore_colors = bitset_obstack_alloc(obst, cls->n_regs);
be_abi_put_ignore_regs(be_get_irg_abi(irg), cls, ie->ignore_colors);
return ie;
}
......@@ -41,7 +41,7 @@ struct be_operand_t {
ir_node *irn; /**< Firm node of the insn this operand belongs to */
ir_node *carrier; /**< node representing the operand value (Proj or the node itself for defs, the used value for uses) */
be_operand_t *partner; /**< used in bechordal later... (TODO what does it do?) */
bitset_t *regs; /**< admissible register bitset */
const bitset_t *regs; /**< admissible register bitset */
int pos; /**< pos of the operand (0 to n are inputs, -1 to -n are outputs) */
const arch_register_req_t *req; /**< register constraints for the carrier node */
unsigned has_constraints : 1; /**< the carrier node has register constraints (the constraint type is limited) */
......@@ -63,7 +63,7 @@ struct be_insn_t {
struct be_insn_env_t {
struct obstack *obst;
const arch_register_class_t *cls;
bitset_t *ignore_colors;
bitset_t *allocatable_regs;
};
#define be_insn_n_defs(insn) ((insn)->use_start)
......@@ -71,6 +71,4 @@ struct be_insn_env_t {
be_insn_t *be_scan_insn(const be_insn_env_t *env, ir_node *irn);
be_insn_env_t *be_insn_env_init(be_insn_env_t *ie, ir_graph *irg, const arch_register_class_t *cls, struct obstack *obst);
#endif /* FIRM_BE_BEINSN_T_H */
......@@ -878,19 +878,3 @@ ir_prog_pass_t *lower_for_target_pass(const char *name)
name ? name : "lower_for_target",
do_lower_for_target);
}
unsigned be_put_ignore_regs(const ir_graph *irg,
const arch_register_class_t *cls, bitset_t *bs)
{
if (bs == NULL)
bs = bitset_alloca(cls->n_regs);
else
bitset_clear_all(bs);
assert(bitset_size(bs) == cls->n_regs);
arch_put_non_ignore_regs(cls, bs);
bitset_flip_all(bs);
be_abi_put_ignore_regs(be_get_irg_abi(irg), cls, bs);
return bitset_popcount(bs);
}
......@@ -87,7 +87,7 @@ typedef struct be_pbqp_alloc_env_t {
ir_graph *irg; /**< The graph under examination. */
const arch_register_class_t *cls; /**< Current processed register class */
be_lv_t *lv;
bitset_t *ignored_regs;
bitset_t *allocatable_regs;
pbqp_matrix *ife_matrix_template;
pbqp_matrix *aff_matrix_template;
plist_t *rpeo;
......@@ -147,7 +147,7 @@ static void create_pbqp_node(be_pbqp_alloc_env_t *pbqp_alloc_env, ir_node *irn)
{
const arch_register_class_t *cls = pbqp_alloc_env->cls;
pbqp *pbqp_inst = pbqp_alloc_env->pbqp_inst;
bitset_t *ignored_regs = pbqp_alloc_env->ignored_regs;
bitset_t *allocatable_regs = pbqp_alloc_env->allocatable_regs;
unsigned colors_n = arch_register_class_n_regs(cls);
unsigned cntConstrains = 0;
......@@ -157,7 +157,7 @@ static void create_pbqp_node(be_pbqp_alloc_env_t *pbqp_alloc_env, ir_node *irn)
/* set costs depending on register constrains */
unsigned idx;
for (idx = 0; idx < colors_n; idx++) {
if (bitset_is_set(ignored_regs, idx) || !arch_reg_out_is_allocatable(irn, arch_register_for_index(cls, idx))) {
if (!bitset_is_set(allocatable_regs, idx) || !arch_reg_out_is_allocatable(irn, arch_register_for_index(cls, idx))) {
/* constrained */
vector_set(costs_vector, idx, INF_COSTS);
cntConstrains++;
......@@ -663,16 +663,16 @@ static void be_pbqp_coloring(be_chordal_env_t *env)
/* initialize pbqp allocation data structure */
pbqp_alloc_env.pbqp_inst = alloc_pbqp(get_irg_last_idx(irg)); /* initialize pbqp instance */
pbqp_alloc_env.cls = cls;
pbqp_alloc_env.irg = irg;
pbqp_alloc_env.lv = lv;
pbqp_alloc_env.ignored_regs = bitset_malloc(colors_n);
pbqp_alloc_env.rpeo = plist_new();
pbqp_alloc_env.restr_nodes = XMALLOCNZ(unsigned, get_irg_last_idx(irg));
pbqp_alloc_env.ife_edge_num = XMALLOCNZ(unsigned, get_irg_last_idx(irg));
pbqp_alloc_env.env = env;
be_put_ignore_regs(irg, cls, pbqp_alloc_env.ignored_regs); /* get ignored registers */
pbqp_alloc_env.pbqp_inst = alloc_pbqp(get_irg_last_idx(irg)); /* initialize pbqp instance */
pbqp_alloc_env.cls = cls;
pbqp_alloc_env.irg = irg;
pbqp_alloc_env.lv = lv;
pbqp_alloc_env.allocatable_regs = bitset_malloc(colors_n);
pbqp_alloc_env.rpeo = plist_new();
pbqp_alloc_env.restr_nodes = XMALLOCNZ(unsigned, get_irg_last_idx(irg));
pbqp_alloc_env.ife_edge_num = XMALLOCNZ(unsigned, get_irg_last_idx(irg));
pbqp_alloc_env.env = env;
be_put_allocatable_regs(irg, cls, pbqp_alloc_env.allocatable_regs);
/* create costs matrix template for interference edges */
......@@ -785,7 +785,7 @@ static void be_pbqp_coloring(be_chordal_env_t *env)
#if KAPS_DUMP
fclose(file_before);
#endif
bitset_free(pbqp_alloc_env.ignored_regs);
bitset_free(pbqp_alloc_env.allocatable_regs);
free_pbqp(pbqp_alloc_env.pbqp_inst);
plist_free(pbqp_alloc_env.rpeo);
xfree(pbqp_alloc_env.restr_nodes);
......
......@@ -1919,7 +1919,7 @@ static void be_pref_alloc(ir_graph *new_irg)
n_regs = arch_register_class_n_regs(cls);
normal_regs = rbitset_malloc(n_regs);
be_abi_set_non_ignore_regs(be_get_irg_abi(irg), cls, normal_regs);
be_set_allocatable_regs(irg, cls, normal_regs);
spill();
......
......@@ -1992,20 +1992,12 @@ static serialization_t *compute_best_admissible_serialization(rss_t *rss, ir_nod
*/
static void perform_value_serialization_heuristic(rss_t *rss)
{
bitset_t *arch_nonign_bs = bitset_alloca(arch_register_class_n_regs(rss->cls));
bitset_t *abi_ign_bs = bitset_alloca(arch_register_class_n_regs(rss->cls));
unsigned available_regs, iteration;
dvg_t dvg;
ir_nodeset_t *sat_vals;
pset *ser_set = new_pset(cmp_rss_edges, 20);
/* available_regs = R = |arch_non_ignore_regs cut ~abi_ignore_regs| */
arch_put_non_ignore_regs(rss->cls, arch_nonign_bs);
be_abi_put_ignore_regs(rss->abi, rss->cls, abi_ign_bs);
bitset_andnot(arch_nonign_bs, abi_ign_bs);
available_regs = bitset_popcount(arch_nonign_bs);
//num_live = pset_count(rss->live_block);
//available_regs -= num_live < available_regs ? num_live : 0;
available_regs = be_get_n_allocatable_regs(rss->irg, rss->cls);
DBG((rss->dbg, LEVEL_1, "\n\t#available regs: %d\n\n", available_regs));
......
......@@ -970,7 +970,7 @@ static void be_spill_belady(ir_graph *irg, const arch_register_class_t *rcls)
obstack_init(&obst);
cls = rcls;
lv = be_get_irg_liveness(irg);
n_regs = cls->n_regs - be_put_ignore_regs(irg, cls, NULL);
n_regs = be_get_n_allocatable_regs(irg, cls);
ws = new_workset();
uses = be_begin_uses(irg, lv);
loop_ana = be_new_loop_pressure(irg, cls);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment