Commit 88036a81 authored by Sebastian Hack's avatar Sebastian Hack
Browse files

Several bug fixes

Adapted to the new dependency edges
parent 45a7e67f
#ifndef _arm_MAP_REGS_H_
#define _arm_MAP_REGS_H_
#include "irnode.h"
#include "set.h"
#include "../bearch.h"
#include "arm_nodes_attr.h"
const arch_register_t *arm_get_RegParam_reg(int n);
int arm_cmp_irn_reg_assoc(const void *a, const void *b, size_t len);
void arm_set_firm_reg(ir_node *irn, const arch_register_t *reg, set *reg_set);
const arch_register_t *arm_get_firm_reg(const ir_node *irn, set *reg_set);
long arm_translate_proj_pos(const ir_node *proj);
#endif /* _arm_MAP_REGS_H_ */
......@@ -10,6 +10,7 @@
#include "firm_types.h"
#include "obst.h"
#include "debug.h"
#include "bitset.h"
#include "be.h"
#include "bearch.h"
......@@ -68,4 +69,15 @@ struct _be_irg_t {
struct _arch_code_generator_t *cg;
};
/**
* Put the registers to be ignored in this IRG into a bitset.
* @param birg The backend IRG data structure.
* @param cls The register class.
* @param bs The bitset (may be NULL).
* @return The number of registers to be ignored.
*/
int be_put_ignore_regs(const struct _be_irg_t *birg, const struct _arch_register_class_t *cls, bitset_t *bs);
#endif /* _BE_T_H */
......@@ -383,7 +383,7 @@ static INLINE int is_on_stack(be_abi_call_t *call, int pos)
* @param curr_sp The stack pointer node to use.
* @return The stack pointer after the call.
*/
static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp, ir_node *alloca_copy)
{
ir_graph *irg = env->birg->irg;
const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
......@@ -471,7 +471,11 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
* moving the stack pointer along the stack's direction.
*/
if(stack_dir < 0 && !do_seq && !no_alloc) {
curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size);
curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, stack_size);
if(alloca_copy) {
add_irn_dep(curr_sp, alloca_copy);
alloca_copy = NULL;
}
}
assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
......@@ -491,8 +495,12 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
*/
if (do_seq) {
curr_ofs = 0;
addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, curr_mem,
param_size + arg->space_before);
addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, param_size + arg->space_before);
if(alloca_copy) {
add_irn_dep(curr_sp, alloca_copy);
alloca_copy = NULL;
}
add_irn_dep(curr_sp, curr_mem);
}
else {
curr_ofs += arg->space_before;
......@@ -690,8 +698,14 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
/* Clean up the stack frame if we allocated it */
if(!no_alloc)
curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, -stack_size);
if(!no_alloc) {
curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, -stack_size);
add_irn_dep(curr_sp, mem_proj);
if(alloca_copy) {
add_irn_dep(curr_sp, alloca_copy);
alloca_copy = NULL;
}
}
}
be_abi_call_free(call);
......@@ -706,7 +720,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
* Adjust an alloca.
* The alloca is transformed into a back end alloca node and connected to the stack nodes.
*/
static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp, ir_node **result_copy)
{
if (get_Alloc_where(alloc) == stack_alloc) {
ir_node *bl = get_nodes_block(alloc);
......@@ -758,9 +772,9 @@ static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp
addr = env->isa->stack_dir < 0 ? alloc_res : curr_sp;
/* copy the address away, since it could be used after further stack pointer modifictions. */
/* copy the address away, since it could be used after further stack pointer modifications. */
/* Let it point curr_sp just for the moment, I'll reroute it in a second. */
copy = be_new_Copy(env->isa->sp->reg_class, irg, bl, curr_sp);
*result_copy = copy = be_new_Copy(env->isa->sp->reg_class, irg, bl, curr_sp);
/* Let all users of the Alloc() result now point to the copy. */
edges_reroute(alloc_res, copy, irg);
......@@ -880,6 +894,7 @@ static void process_calls_in_block(ir_node *bl, void *data)
if(n > 0) {
ir_node *keep;
ir_node **nodes;
ir_node *copy = NULL;
int i;
nodes = obstack_finish(&env->obst);
......@@ -893,10 +908,10 @@ static void process_calls_in_block(ir_node *bl, void *data)
DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
switch(get_irn_opcode(irn)) {
case iro_Call:
curr_sp = adjust_call(env, irn, curr_sp);
curr_sp = adjust_call(env, irn, curr_sp, copy);
break;
case iro_Alloc:
curr_sp = adjust_alloc(env, irn, curr_sp);
curr_sp = adjust_alloc(env, irn, curr_sp, &copy);
break;
default:
break;
......@@ -1611,7 +1626,7 @@ static void modify_irg(be_abi_irg_t *env)
/* do the stack allocation BEFORE the barrier, or spill code
might be added before it */
env->init_sp = be_abi_reg_map_get(env->regs, sp);
env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE_EXPAND);
env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND);
be_abi_reg_map_set(env->regs, sp, env->init_sp);
barrier = create_barrier(env, bl, &mem, env->regs, 0);
......
......@@ -121,6 +121,9 @@ void be_abi_fix_stack_bias(be_abi_irg_t *env);
void be_abi_fix_stack_nodes(be_abi_irg_t *env, be_lv_t *lv);
void be_abi_free(be_abi_irg_t *abi);
/**
* Put the registers which are forbidden specifically for this IRG in a bitset.
*/
void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs);
ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg);
......
......@@ -741,5 +741,4 @@ extern arch_env_t *arch_env_push_irn_handler(arch_env_t *env, const arch_irn_han
*/
extern const arch_irn_handler_t *arch_env_pop_irn_handler(arch_env_t *env);
#endif /* _FIRM_BEARCH_H */
......@@ -563,6 +563,7 @@ static ir_node *handle_constraints(be_chordal_alloc_env_t *alloc_env, ir_node *i
bitset_clear_all(bs);
arch_put_non_ignore_regs(aenv, env->cls, bs);
bitset_andnot(bs, env->ignore_colors);
bitset_foreach(bs, col)
bipartite_add(bp, n_alloc, col);
......
......@@ -505,11 +505,11 @@ static be_ra_timer_t *be_ra_chordal_main(const be_irg_t *bi)
/* verify schedule and register pressure */
if (options.vrfy_option == BE_CH_VRFY_WARN) {
be_verify_schedule(irg);
be_verify_register_pressure(chordal_env.birg->main_env->arch_env, chordal_env.cls, irg);
be_verify_register_pressure(chordal_env.birg, chordal_env.cls, irg);
}
else if (options.vrfy_option == BE_CH_VRFY_ASSERT) {
assert(be_verify_schedule(irg) && "Schedule verification failed");
assert(be_verify_register_pressure(chordal_env.birg->main_env->arch_env, chordal_env.cls, irg)
assert(be_verify_register_pressure(chordal_env.birg, chordal_env.cls, irg)
&& "Register pressure verification failed");
}
BE_TIMER_POP(ra_timer.t_verify);
......
......@@ -216,8 +216,8 @@ static int max_hops_walker(reg_pressure_selector_env_t *env, ir_node *irn, ir_no
if(!nodeset_find(env->already_scheduled, irn)) {
int i, n;
int res = 0;
for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
ir_node *operand = get_irn_n(irn, i);
for(i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
ir_node *operand = get_irn_in_or_dep(irn, i);
if(get_irn_visited(operand) < visited_nr) {
int tmp;
......@@ -625,8 +625,8 @@ static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn
if (env->block != get_nodes_block(irn))
return 0;
for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
ir_node *op = get_irn_n(irn, i);
for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
ir_node *op = get_irn_in_or_dep(irn, i);
/* if irn is an End we have keep-alives and op might be a block, skip that */
if (is_Block(op)) {
......@@ -640,7 +640,7 @@ static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn
return 0;
}
nodeset_insert(env->cands, irn);
nodeset_insert(env->cands, irn);
/* calculate the etime of this node */
etime = env->curr_time;
......@@ -670,7 +670,13 @@ static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
const ir_edge_t *edge;
foreach_out_edge(irn, edge) {
ir_node *user = edge->src;
ir_node *user = get_edge_src_irn(edge);
if(!is_Phi(user))
make_ready(env, irn, user);
}
foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
ir_node *user = get_edge_src_irn(edge);
if(!is_Phi(user))
make_ready(env, irn, user);
}
......@@ -748,8 +754,8 @@ static void update_sched_liveness(block_sched_env_t *env, ir_node *irn) {
if (is_Proj(irn))
return;
for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
ir_node *in = get_irn_n(irn, i);
for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
ir_node *in = get_irn_in_or_dep(irn, i);
/* if in is a proj: update predecessor */
while (is_Proj(in))
......@@ -874,8 +880,8 @@ static int get_reg_difference(block_sched_env_t *be, ir_node *irn) {
num_out = 1;
/* num in regs: number of ins with mode datab and not ignore */
for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
ir_node *in = get_irn_n(irn, i);
for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; i--) {
ir_node *in = get_irn_in_or_dep(irn, i);
if (mode_is_datab(get_irn_mode(in)) && ! arch_irn_is(be->sched_env->arch_env, in, ignore))
num_in++;
}
......@@ -1009,8 +1015,8 @@ static void descent(ir_node *root, ir_node *block, ir_node **list, block_sched_e
}
/* Phi nodes always leave the block */
for (i = get_irn_arity(root) - 1; i >= 0; --i) {
ir_node *pred = get_irn_n(root, i);
for (i = get_irn_ins_or_deps(root) - 1; i >= 0; --i) {
ir_node *pred = get_irn_in_or_dep(root, i);
DBG((xxxdbg, LEVEL_3, " node %+F\n", pred));
/* Blocks may happen as predecessors of End nodes */
......@@ -1072,7 +1078,7 @@ static void list_sched_block(ir_node *block, void *env_ptr)
be.selector = selector;
be.sched_env = env;
FIRM_DBG_REGISTER(be.dbg, "firm.be.sched");
FIRM_DBG_REGISTER(xxxdbg, "firm.be.sched");
FIRM_DBG_REGISTER(xxxdbg, "firm.be.schedxxx");
// firm_dbg_set_mask(be.dbg, SET_LEVEL_3);
......@@ -1128,6 +1134,17 @@ static void list_sched_block(ir_node *block, void *env_ptr)
d = ld > d ? ld : d;
}
}
foreach_out_edge_kind(curr, edge, EDGE_KIND_DEP) {
ir_node *n = get_edge_src_irn(edge);
if (get_nodes_block(n) == block) {
sched_timestep_t ld;
ld = latency(env, curr, 1, n, 0) + get_irn_delay(&be, n);
d = ld > d ? ld : d;
}
}
}
}
set_irn_delay(&be, curr, d);
......@@ -1170,8 +1187,8 @@ static void list_sched_block(ir_node *block, void *env_ptr)
int ready = 1;
/* Check, if the operands of a node are not local to this block */
for (j = 0, m = get_irn_arity(irn); j < m; ++j) {
ir_node *operand = get_irn_n(irn, j);
for (j = 0, m = get_irn_ins_or_deps(irn); j < m; ++j) {
ir_node *operand = get_irn_in_or_dep(irn, j);
if (get_nodes_block(operand) == block) {
ready = 0;
......
......@@ -329,7 +329,7 @@ static void prepare_graph(be_irg_t *birg)
compute_doms(irg);
/* Ensure, that the ir_edges are computed. */
edges_activate(irg);
edges_assure(irg);
/* check, if the dominance property is fulfilled. */
be_check_dominance(irg);
......@@ -425,6 +425,9 @@ static void be_main_loop(FILE *file_handle)
birg.irg = irg;
birg.main_env = &env;
edges_deactivate_kind(irg, EDGE_KIND_DEP);
edges_activate_kind(irg, EDGE_KIND_DEP);
DBG((env.dbg, LEVEL_2, "====> IRG: %F\n", irg));
dump(DUMP_INITIAL, irg, "-begin", dump_ir_block_graph);
......@@ -663,7 +666,7 @@ void be_main(FILE *file_handle)
/* never build code for pseudo irgs */
set_visit_pseudo_irgs(0);
be_node_init();
be_node_init();
be_main_loop(file_handle);
#ifdef WITH_LIBCORE
......@@ -690,3 +693,18 @@ const char *be_retrieve_dbg_info(const dbg_info *dbg, unsigned *line) {
*line = 0;
return NULL;
}
int be_put_ignore_regs(const be_irg_t *birg, const arch_register_class_t *cls, bitset_t *bs)
{
if(bs == NULL)
bs = bitset_alloca(cls->n_regs);
else
bitset_clear_all(bs);
assert(bitset_size(bs) == cls->n_regs);
arch_put_non_ignore_regs(birg->main_env->arch_env, cls, bs);
bitset_flip_all(bs);
be_abi_put_ignore_regs(birg->abi, cls, bs);
return bitset_popcnt(bs);
}
......@@ -542,15 +542,14 @@ int be_Return_get_n_rets(ir_node *ret)
return a->num_ret_vals;
}
ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *mem, int offset)
ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, int offset)
{
be_stack_attr_t *a;
ir_node *irn;
ir_node *in[2];
ir_node *in[1];
in[0] = old_sp;
in[1] = mem;
irn = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode, 2, in);
irn = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode, sizeof(in) / sizeof(in[0]), in);
a = init_node_attr(irn, 1);
a->offset = offset;
......
......@@ -195,7 +195,7 @@ ir_node *be_new_SetSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_
* @return A new stack pointer increment/decrement node.
* @note This node sets a register constraint to the @p sp register on its output.
*/
ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *mem, int offset);
ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, int offset);
/** Returns the previous node that computes the stack pointer. */
ir_node *be_get_IncSP_pred(ir_node *incsp);
......@@ -203,9 +203,6 @@ ir_node *be_get_IncSP_pred(ir_node *incsp);
/** Sets the previous node that computes the stack pointer. */
void be_set_IncSP_pred(ir_node *incsp, ir_node *pred);
/** Returns the memory input of the IncSP. */
ir_node *be_get_IncSP_mem(ir_node *irn);
/**
* Sets a new offset to a IncSP node.
* A positive offset means expanding the stack, a negative offset shrinking
......
......@@ -35,7 +35,6 @@ struct _mris_env_t {
const arch_env_t *aenv;
ir_graph *irg;
ir_node *bl;
nodeset *inserted;
int visited;
struct list_head lineage_head;
struct obstack obst;
......@@ -111,7 +110,7 @@ static void compute_heights(mris_env_t *env)
}
#endif
#define valid_node(env, dep) (to_appear(env, dep) && !nodeset_find(env->inserted, dep) && !be_is_Keep(dep))
#define valid_node(env, dep) (to_appear(env, dep) && !be_is_Keep(dep))
static void grow_all_descendands(mris_env_t *env, ir_node *irn, unsigned long visited)
{
......@@ -126,6 +125,14 @@ static void grow_all_descendands(mris_env_t *env, ir_node *irn, unsigned long vi
set_irn_visited(desc, visited);
}
}
foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
ir_node *desc = get_edge_src_irn(edge);
if(valid_node(env, desc) && get_irn_visited(desc) < visited) {
obstack_ptr_grow(&env->obst, desc);
set_irn_visited(desc, visited);
}
}
}
static ir_node **all_descendants(mris_env_t *env, ir_node *irn)
......@@ -309,13 +316,13 @@ static void lineage_formation(mris_env_t *env)
*/
if(n_desc > 1 && !be_is_Keep(lowest_desc)) {
const arch_register_class_t *cls;
ir_node *copy_keep, *op;
ir_node *op;
int i, n;
for(i = 0, n = get_irn_arity(lowest_desc); i < n; ++i) {
for(i = 0, n = get_irn_ins_or_deps(lowest_desc); i < n; ++i) {
ir_node *cmp;
op = get_irn_n(lowest_desc, i);
op = get_irn_in_or_dep(lowest_desc, i);
cmp = highest_is_tuple ? skip_Projs(op) : op;
if(cmp == highest_node)
......@@ -326,9 +333,7 @@ static void lineage_formation(mris_env_t *env)
cls = arch_get_irn_reg_class(env->aenv, op, BE_OUT_POS(0));
replace_tuple_by_repr_proj(env, &in[1]);
copy_keep = be_new_CopyKeep(cls, env->irg, env->bl, op, n_desc, &in[1], get_irn_mode(op));
set_irn_n(lowest_desc, i, copy_keep);
nodeset_insert(env->inserted, copy_keep);
add_irn_dep(lowest_desc, in[1]);
}
obstack_free(&env->obst, in);
......@@ -384,15 +389,9 @@ static int fuse_two_lineages(mris_env_t *env, mris_irn_t *u, mris_irn_t *v)
/* insert a CopyKeep to make lineage v dependent on u. */
{
const arch_register_class_t *cls;
ir_node *op = NULL;
if(get_irn_arity(start) == 0)
if(get_irn_ins_or_deps(start) == 0)
return 0;
op = get_irn_n(start, 0);
cls = arch_get_irn_reg_class(env->aenv, op, BE_OUT_POS(0));
if(get_irn_mode(last) == mode_T) {
const ir_edge_t *edge;
foreach_out_edge(last, edge) {
......@@ -400,10 +399,8 @@ static int fuse_two_lineages(mris_env_t *env, mris_irn_t *u, mris_irn_t *v)
break;
}
}
copy = be_new_CopyKeep_single(cls, env->irg, env->bl, op, last, get_irn_mode(op));
set_irn_n(start, 0, copy);
copy_mi = get_mris_irn(env, copy);
nodeset_insert(env->inserted, copy);
add_irn_dep(start, last);
}
/* irn now points to the last node in lineage u; mi has the info for the node _before_ the terminator of the lineage. */
......@@ -466,7 +463,6 @@ mris_env_t *be_sched_mris_preprocess(const be_irg_t *birg)
env->aenv = birg->main_env->arch_env;
env->irg = birg->irg;
env->visited = 0;
env->inserted = new_nodeset(128);
env->heights = heights_new(birg->irg);
INIT_LIST_HEAD(&env->lineage_head);
FIRM_DBG_REGISTER(env->dbg, "firm.be.sched.mris");
......@@ -477,31 +473,9 @@ mris_env_t *be_sched_mris_preprocess(const be_irg_t *birg)
return env;
}
static void cleanup_inserted(mris_env_t *env)
{
ir_node *irn;
foreach_nodeset(env->inserted, irn) {
int i, n;
ir_node *tgt;
assert(be_is_CopyKeep(irn));
tgt = get_irn_n(irn, be_pos_CopyKeep_op);
/* reroute the edges, remove from schedule and make it invisible. */
edges_reroute(irn, tgt, env->irg);
if (sched_is_scheduled(irn))
sched_remove(irn);
for(i = -1, n = get_irn_arity(irn); i < n; ++i)
set_irn_n(irn, i, new_r_Bad(env->irg));
}
}
void be_sched_mris_free(mris_env_t *env)
{
cleanup_inserted(env);
phase_free(&env->ph);
del_nodeset(env->inserted);
heights_free(env->heights);
free(env);
}
......@@ -185,6 +185,7 @@ void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block,
}
void be_spill_phi(spill_env_t *env, ir_node *node) {
spill_info_t* spill;
int i, arity;
assert(is_Phi(node));
......@@ -192,7 +193,7 @@ void be_spill_phi(spill_env_t *env, ir_node *node) {
pset_insert_ptr(env->mem_phis, node);
// create spillinfos for the phi arguments
spill_info_t* spill = get_spillinfo(env, node);
spill = get_spillinfo(env, node);
for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
ir_node *arg = get_irn_n(node, i);
get_spillinfo(env, arg);
......
......@@ -626,7 +626,7 @@ void be_spill_belady_spill_env(const be_chordal_env_t *chordal_env, spill_env_t
env.cenv = chordal_env;
env.arch = chordal_env->birg->main_env->arch_env;
env.cls = chordal_env->cls;
env.n_regs = arch_count_non_ignore_regs(env.arch, env.cls);
env.n_regs = env.cls->n_regs - be_put_ignore_regs(chordal_env->birg, chordal_env->cls, NULL);
env.ws = new_workset(&env, &env.ob);
env.uses = be_begin_uses(chordal_env->irg, chordal_env->lv, chordal_env->birg->main_env->arch_env, env.cls);
if(spill_env == NULL) {
......
......@@ -453,13 +453,6 @@ static int reduce_register_pressure_in_loop(morgan_env_t *env, const ir_loop *lo
return outer_spills_needed;
}
static int count_available_registers(be_abi_irg_t *abi, const arch_register_class_t *cls)
{
bitset_t* bs = bitset_alloca(cls->n_regs);
be_abi_put_ignore_regs(abi, cls, bs);
return bitset_popcnt(bs);
}
void be_spill_morgan(be_chordal_env_t *chordal_env) {
morgan_env_t env;
......@@ -475,7 +468,7 @@ void be_spill_morgan(be_chordal_env_t *chordal_env) {
obstack_init(&env.obst);
env.registers_available = count_available_registers(chordal_env->birg->abi, chordal_env->cls);
env.registers_available = env.cls->n_regs - be_put_ignore_regs(chordal_env->birg, env.cls, NULL);
env.loop_attr_set = new_set(loop_attr_cmp, 5);
env.block_attr_set = new_set(block_attr_cmp, 20);
......
......@@ -90,14 +90,14 @@ static void verify_liveness_walker(ir_node *block, void *data) {
/**
* Start a walk over the irg and check the register pressure.
*/
int be_verify_register_pressure(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_graph *irg) {
int be_verify_register_pressure(const be_irg_t *birg, const arch_register_class_t *cls, ir_graph *irg) {
be_verify_register_pressure_env_t env;
env.lv = be_liveness(irg);
env.irg = irg;
env.arch_env = arch_env;
env.arch_env = birg->main_env->arch_env;
env.cls = cls;
env.registers_available = arch_count_non_ignore_regs(arch_env, cls);
env.registers_available = env.cls->n_regs - be_put_ignore_regs(birg, env.cls, NULL);
env.problem_found = 0;
irg_block_walk_graph(irg, verify_liveness_walker, NULL, &env);
......
......@@ -22,12 +22,12 @@
* Verifies, that the register pressure for a given register class doesn't exceed the limit
* of available registers.
*
* @param arch_env An architecture environment
* @param cls The register class to check
* @param irg The irg to check
* @return 1 if the pressure is valid, 0 otherwise
* @param birg The backend IRG.
* @param cls The register class to check.
* @param irg The irg to check.
* @return 1 if the pressure is valid, 0 otherwise.
*/
int be_verify_register_pressure(const