Commit 65a52a96 authored by Matthias Braun's avatar Matthias Braun
Browse files

convert remaining APIs from be_irg_t* to ir_graph*

[r27693]
parent 7a178059
......@@ -159,7 +159,7 @@ static void TEMPLATE_emit_and_done(void *self)
free(cg);
}
static void *TEMPLATE_cg_init(be_irg_t *birg);
static void *TEMPLATE_cg_init(ir_graph *irg);
static const arch_code_generator_if_t TEMPLATE_code_gen_if = {
TEMPLATE_cg_init,
......@@ -176,15 +176,15 @@ static const arch_code_generator_if_t TEMPLATE_code_gen_if = {
/**
* Initializes the code generator.
*/
static void *TEMPLATE_cg_init(be_irg_t *birg)
static void *TEMPLATE_cg_init(ir_graph *irg)
{
const arch_env_t *arch_env = be_get_irg_arch_env(birg->irg);
const arch_env_t *arch_env = be_get_irg_arch_env(irg);
TEMPLATE_isa_t *isa = (TEMPLATE_isa_t *) arch_env;
TEMPLATE_code_gen_t *cg = XMALLOC(TEMPLATE_code_gen_t);
cg->impl = &TEMPLATE_code_gen_if;
cg->irg = be_get_birg_irg(birg);
cg->isa = isa;
cg->impl = &TEMPLATE_code_gen_if;
cg->irg = irg;
cg->isa = isa;
return (arch_code_generator_t *)cg;
}
......
......@@ -637,7 +637,7 @@ void amd64_gen_routine(const amd64_code_gen_t *cg, ir_graph *irg)
blk_sched = be_create_block_schedule(irg);
be_dbg_method_begin(entity, be_abi_get_stack_layout(cg->birg->abi));
be_dbg_method_begin(entity, be_abi_get_stack_layout(be_get_irg_abi(cg->irg)));
be_gas_emit_function_prolog(entity, 4);
irg_block_walk_graph(irg, amd64_gen_labels, NULL, NULL);
......
......@@ -163,7 +163,7 @@ static void amd64_before_ra(void *self)
{
amd64_code_gen_t *cg = self;
be_sched_fix_flags(cg->birg, &amd64_reg_classes[CLASS_amd64_flags], 0);
be_sched_fix_flags(cg->irg, &amd64_reg_classes[CLASS_amd64_flags], 0);
}
......@@ -235,7 +235,7 @@ static void amd64_after_ra_walker(ir_node *block, void *data)
static void amd64_after_ra(void *self)
{
amd64_code_gen_t *cg = self;
be_coalesce_spillslots(cg->birg);
be_coalesce_spillslots(cg->irg);
irg_block_walk_graph(cg->irg, NULL, amd64_after_ra_walker, NULL);
}
......@@ -256,7 +256,7 @@ static void amd64_emit_and_done(void *self)
free(cg);
}
static void *amd64_cg_init(be_irg_t *birg);
static void *amd64_cg_init(ir_graph *irg);
static const arch_code_generator_if_t amd64_code_gen_if = {
amd64_cg_init,
......@@ -273,17 +273,16 @@ static const arch_code_generator_if_t amd64_code_gen_if = {
/**
* Initializes the code generator.
*/
static void *amd64_cg_init(be_irg_t *birg)
static void *amd64_cg_init(ir_graph *irg)
{
const arch_env_t *arch_env = be_get_irg_arch_env(birg->irg);
const arch_env_t *arch_env = be_get_irg_arch_env(irg);
amd64_isa_t *isa = (amd64_isa_t *) arch_env;
amd64_code_gen_t *cg = XMALLOC(amd64_code_gen_t);
cg->impl = &amd64_code_gen_if;
cg->irg = be_get_birg_irg(birg);
cg->irg = irg;
cg->isa = isa;
cg->birg = birg;
cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
cg->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) ? 1 : 0;
return (arch_code_generator_t *)cg;
}
......
......@@ -39,7 +39,6 @@ struct amd64_code_gen_t {
const arch_code_generator_if_t *impl; /**< implementation */
ir_graph *irg; /**< current irg */
amd64_isa_t *isa; /**< the isa instance */
be_irg_t *birg; /**< The be-irg (contains additional information about the irg) */
char dump; /**< set to 1 if graphs should be dumped */
ir_node *noreg_gp; /**< unique NoReg_GP node */
};
......
......@@ -1006,7 +1006,7 @@ static void arm_emit_block_header(ir_node *block, ir_node *prev)
int n_cfgpreds;
int need_label;
int i, arity;
ir_exec_freq *exec_freq = cg->birg->exec_freq;
ir_exec_freq *exec_freq = be_get_irg_exec_freq(cg->irg);
need_label = 0;
n_cfgpreds = get_Block_n_cfgpreds(block);
......@@ -1114,7 +1114,7 @@ void arm_gen_routine(const arm_code_gen_t *arm_cg, ir_graph *irg)
arm_register_emitters();
be_dbg_method_begin(entity, be_abi_get_stack_layout(cg->birg->abi));
be_dbg_method_begin(entity, be_abi_get_stack_layout(be_get_irg_abi(cg->irg)));
/* create the block schedule */
blk_sched = be_create_block_schedule(irg);
......
......@@ -278,5 +278,5 @@ void arm_peephole_optimization(arm_code_gen_t *new_cg)
register_peephole_optimisation(op_arm_Ldr, peephole_arm_Str_Ldr);
register_peephole_optimisation(op_arm_FrameAddr, peephole_arm_FrameAddr);
be_peephole_opt(cg->birg);
be_peephole_opt(cg->irg);
}
......@@ -194,7 +194,7 @@ static void arm_before_ra(void *self)
{
arm_code_gen_t *cg = self;
be_sched_fix_flags(cg->birg, &arm_reg_classes[CLASS_arm_flags],
be_sched_fix_flags(cg->irg, &arm_reg_classes[CLASS_arm_flags],
&arm_flags_remat);
}
......@@ -267,7 +267,7 @@ static void arm_after_ra_walker(ir_node *block, void *data)
static void arm_after_ra(void *self)
{
arm_code_gen_t *cg = self;
be_coalesce_spillslots(cg->birg);
be_coalesce_spillslots(cg->irg);
irg_block_walk_graph(cg->irg, NULL, arm_after_ra_walker, NULL);
}
......@@ -481,7 +481,7 @@ static void arm_before_abi(void *self)
}
/* forward */
static void *arm_cg_init(be_irg_t *birg);
static void *arm_cg_init(ir_graph *irg);
static const arch_code_generator_if_t arm_code_gen_if = {
arm_cg_init,
......@@ -498,10 +498,10 @@ static const arch_code_generator_if_t arm_code_gen_if = {
/**
* Initializes the code generator.
*/
static void *arm_cg_init(be_irg_t *birg)
static void *arm_cg_init(ir_graph *irg)
{
static ir_type *int_tp = NULL;
arm_isa_t *isa = (arm_isa_t *)birg->main_env->arch_env;
arm_isa_t *isa = (arm_isa_t *) be_get_irg_arch_env(irg);
arm_code_gen_t *cg;
if (! int_tp) {
......@@ -511,13 +511,12 @@ static void *arm_cg_init(be_irg_t *birg)
cg = XMALLOC(arm_code_gen_t);
cg->impl = &arm_code_gen_if;
cg->irg = birg->irg;
cg->irg = irg;
cg->reg_set = new_set(arm_cmp_irn_reg_assoc, 1024);
cg->isa = isa;
cg->birg = birg;
cg->int_tp = int_tp;
cg->have_fp_insn = 0;
cg->dump = (birg->main_env->options->dump_flags & DUMP_BE) ? 1 : 0;
cg->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) ? 1 : 0;
FIRM_DBG_REGISTER(cg->mod, "firm.be.arm.cg");
......
......@@ -141,7 +141,6 @@ typedef struct _arm_code_gen_t {
ir_graph *irg; /**< current irg */
set *reg_set; /**< set to memorize registers for FIRM nodes (e.g. phi) */
arm_isa_t *isa; /**< the isa instance */
be_irg_t *birg; /**< The be-irg (contains additional information about the irg) */
ir_type *int_tp; /**< the int type, needed for Call conversion */
char have_fp_insn; /**< non-zero, if fp hardware instructions are emitted */
char dump; /**< set to 1 if graphs should be dumped */
......
......@@ -27,7 +27,6 @@
#define FIRM_BE_TYPES_H
typedef unsigned int sched_timestep_t;
typedef struct be_irg_t be_irg_t;
typedef struct arch_register_class_t arch_register_class_t;
typedef struct arch_register_req_t arch_register_req_t;
......
......@@ -83,10 +83,9 @@ struct _be_abi_call_t {
};
/**
* The ABI information for the current birg.
* The ABI information for the current graph.
*/
struct _be_abi_irg_t {
be_irg_t *birg; /**< The back end IRG. */
ir_graph *irg;
const arch_env_t *arch_env;
survive_dce_t *dce_survivor;
......@@ -417,8 +416,8 @@ static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *ar
*/
static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
{
ir_graph *irg = env->birg->irg;
const arch_env_t *arch_env = env->birg->main_env->arch_env;
ir_graph *irg = env->irg;
const arch_env_t *arch_env = env->arch_env;
ir_type *call_tp = get_Call_type(irn);
ir_node *call_ptr = get_Call_ptr(irn);
int n_params = get_method_n_params(call_tp);
......@@ -429,7 +428,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
const arch_register_t *sp = arch_env->sp;
be_abi_call_t *call = be_abi_call_new(sp->reg_class);
ir_mode *mach_mode = sp->reg_class->mode;
struct obstack *obst = be_get_birg_obst(irg);
struct obstack *obst = be_get_be_obst(irg);
int no_alloc = call->flags.bits.frame_is_setup_on_call;
int n_res = get_method_n_ress(call_tp);
int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
......@@ -1189,12 +1188,12 @@ static void process_ops_in_block(ir_node *bl, void *data)
*/
static void process_calls(be_abi_irg_t *env)
{
ir_graph *irg = env->birg->irg;
ir_graph *irg = env->irg;
env->call->flags.bits.irg_is_leaf = 1;
irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, env);
ir_heights = heights_new(env->birg->irg);
ir_heights = heights_new(env->irg);
irg_block_walk_graph(irg, NULL, process_ops_in_block, env);
heights_free(ir_heights);
}
......@@ -1217,16 +1216,16 @@ static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call,
ir_entity ***param_map)
{
int dir = env->call->flags.bits.left_to_right ? 1 : -1;
int inc = env->birg->main_env->arch_env->stack_dir * dir;
int inc = env->arch_env->stack_dir * dir;
int n = get_method_n_params(method_type);
int curr = inc > 0 ? 0 : n - 1;
struct obstack *obst = be_get_birg_obst(env->irg);
struct obstack *obst = be_get_be_obst(env->irg);
int ofs = 0;
char buf[128];
ir_type *res;
int i;
ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
ident *id = get_entity_ident(get_irg_entity(env->irg));
ir_entity **map;
*param_map = map = OALLOCN(obst, ir_entity*, n);
......@@ -1365,7 +1364,7 @@ static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl,
ir_node *mem, int n_res)
{
be_abi_call_t *call = env->call;
const arch_env_t *arch_env = env->birg->main_env->arch_env;
const arch_env_t *arch_env = env->arch_env;
dbg_info *dbgi;
pmap *reg_map = pmap_create();
ir_node *keep = pmap_get(env->keep_map, bl);
......@@ -1389,7 +1388,7 @@ static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl,
if (keep) {
stack = get_irn_n(keep, 0);
kill_node(keep);
remove_End_keepalive(get_irg_end(env->birg->irg), keep);
remove_End_keepalive(get_irg_end(env->irg), keep);
}
/* Insert results for Return into the register map. */
......@@ -1456,7 +1455,7 @@ static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl,
}
/* we have to pop the shadow parameter in in case of struct returns */
pop = call->pop;
ret = be_new_Return(dbgi, env->birg->irg, bl, n_res, pop, n, in);
ret = be_new_Return(dbgi, env->irg, bl, n_res, pop, n, in);
/* Set the register classes of the return's parameter accordingly. */
for (i = 0; i < n; ++i) {
......@@ -1582,7 +1581,7 @@ static void lower_frame_sels_walker(ir_node *irn, void *data)
static void fix_address_of_parameter_access(be_abi_irg_t *env, ent_pos_pair *value_param_list)
{
be_abi_call_t *call = env->call;
ir_graph *irg = env->birg->irg;
ir_graph *irg = env->irg;
ent_pos_pair *entry, *new_list;
ir_type *frame_tp;
int i, n = ARR_LEN(value_param_list);
......@@ -1781,15 +1780,15 @@ static void fix_outer_variable_access(be_abi_irg_t *env,
static void modify_irg(be_abi_irg_t *env)
{
be_abi_call_t *call = env->call;
const arch_env_t *arch_env= env->birg->main_env->arch_env;
const arch_env_t *arch_env= env->arch_env;
const arch_register_t *sp = arch_env->sp;
ir_graph *irg = env->birg->irg;
ir_graph *irg = env->irg;
ir_node *end;
ir_node *old_mem;
ir_node *new_mem_proj;
ir_node *mem;
ir_type *method_type = get_entity_type(get_irg_entity(irg));
struct obstack *obst = be_get_birg_obst(irg);
struct obstack *obst = be_get_be_obst(irg);
int n_params;
int i, n;
......@@ -2182,7 +2181,7 @@ static void fix_pic_symconsts(ir_node *node, void *data)
ir_node *load_res;
be_abi_irg_t *env = data;
int arity, i;
be_main_env_t *be = env->birg->main_env;
be_main_env_t *be = be_birg_from_irg(env->irg)->main_env;
arity = get_irn_arity(node);
for (i = 0; i < arity; ++i) {
......@@ -2218,7 +2217,7 @@ static void fix_pic_symconsts(ir_node *node, void *data)
/* everything else is accessed relative to EIP */
mode = get_irn_mode(pred);
pic_base = arch_code_generator_get_pic_base(env->birg->cg);
pic_base = arch_code_generator_get_pic_base(be_get_irg_cg(env->irg));
/* all ok now for locally constructed stuff */
if (can_address_relative(entity)) {
......@@ -2248,32 +2247,32 @@ static void fix_pic_symconsts(ir_node *node, void *data)
}
}
be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
be_abi_irg_t *be_abi_introduce(ir_graph *irg)
{
be_abi_irg_t *env = XMALLOC(be_abi_irg_t);
ir_node *old_frame = get_irg_frame(birg->irg);
ir_graph *irg = birg->irg;
struct obstack *obst = be_get_birg_obst(irg);
be_abi_irg_t *env = XMALLOC(be_abi_irg_t);
ir_node *old_frame = get_irg_frame(irg);
struct obstack *obst = be_get_be_obst(irg);
be_options_t *options = be_get_irg_options(irg);
const arch_env_t *arch_env = be_get_irg_arch_env(irg);
pmap_entry *ent;
ir_node *dummy;
unsigned *limited_bitset;
arch_register_req_t *sp_req;
be_omit_fp = birg->main_env->options->omit_fp;
be_omit_leaf_fp = birg->main_env->options->omit_leaf_fp;
be_omit_fp = options->omit_fp;
be_omit_leaf_fp = options->omit_leaf_fp;
obstack_init(obst);
env->arch_env = birg->main_env->arch_env;
env->arch_env = arch_env;
env->method_type = get_entity_type(get_irg_entity(irg));
env->call = be_abi_call_new(env->arch_env->sp->reg_class);
arch_env_get_call_abi(env->arch_env, env->method_type, env->call);
env->call = be_abi_call_new(arch_env->sp->reg_class);
arch_env_get_call_abi(arch_env, env->method_type, env->call);
env->ignore_regs = pset_new_ptr_default();
env->keep_map = pmap_create();
env->dce_survivor = new_survive_dce();
env->birg = birg;
env->irg = irg;
sp_req = OALLOCZ(obst, arch_register_req_t);
......@@ -2281,20 +2280,20 @@ be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
sp_req->type = arch_register_req_type_limited
| arch_register_req_type_produces_sp;
sp_req->cls = arch_register_get_class(env->arch_env->sp);
sp_req->cls = arch_register_get_class(arch_env->sp);
limited_bitset = rbitset_obstack_alloc(obst, sp_req->cls->n_regs);
rbitset_set(limited_bitset, arch_register_get_index(env->arch_env->sp));
rbitset_set(limited_bitset, arch_register_get_index(arch_env->sp));
sp_req->limited = limited_bitset;
if (env->arch_env->sp->type & arch_register_type_ignore) {
if (arch_env->sp->type & arch_register_type_ignore) {
sp_req->type |= arch_register_req_type_ignore;
}
env->init_sp = dummy = new_r_Dummy(irg, env->arch_env->sp->reg_class->mode);
env->init_sp = dummy = new_r_Dummy(irg, arch_env->sp->reg_class->mode);
env->calls = NEW_ARR_F(ir_node*, 0);
if (birg->main_env->options->pic) {
if (options->pic) {
irg_walk_graph(irg, fix_pic_symconsts, NULL, env);
}
......@@ -2305,7 +2304,7 @@ be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
Beware: init backend abi call object after processing calls,
otherwise some information might be not yet available.
*/
env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
env->cb = env->call->cb->init(env->call, arch_env, irg);
/* Process the IRG */
modify_irg(env);
......@@ -2427,14 +2426,13 @@ void be_abi_fix_stack_nodes(be_abi_irg_t *env)
be_ssa_construction_env_t senv;
int i, len;
ir_node **phis;
be_irg_t *birg = env->birg;
ir_graph *irg = env->irg;
be_lv_t *lv = be_get_irg_liveness(irg);
fix_stack_walker_env_t walker_env;
walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
irg_walk_graph(birg->irg, collect_stack_nodes_walker, NULL, &walker_env);
irg_walk_graph(irg, collect_stack_nodes_walker, NULL, &walker_env);
/* nothing to be done if we didn't find any node, in fact we mustn't
* continue, as for endless loops incsp might have had no users and is bad
......@@ -2515,11 +2513,11 @@ static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias)
if (be_is_IncSP(irn)) {
/* fill in real stack frame size */
if (ofs == BE_STACK_FRAME_SIZE_EXPAND) {
ir_type *frame_type = get_irg_frame_type(env->birg->irg);
ir_type *frame_type = get_irg_frame_type(env->irg);
ofs = (int) get_type_size_bytes(frame_type);
be_set_IncSP_offset(irn, ofs);
} else if (ofs == BE_STACK_FRAME_SIZE_SHRINK) {
ir_type *frame_type = get_irg_frame_type(env->birg->irg);
ir_type *frame_type = get_irg_frame_type(env->irg);
ofs = - (int)get_type_size_bytes(frame_type);
be_set_IncSP_offset(irn, ofs);
} else {
......@@ -2613,7 +2611,7 @@ static void lower_outer_frame_sels(ir_node *sel, void *ctx)
void be_abi_fix_stack_bias(be_abi_irg_t *env)
{
ir_graph *irg = env->birg->irg;
ir_graph *irg = env->irg;
ir_type *frame_tp;
int i;
struct bias_walk bw;
......
......@@ -182,7 +182,7 @@ be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call);
*/
ir_type *be_abi_call_get_method_type(const be_abi_call_t *call);
be_abi_irg_t *be_abi_introduce(be_irg_t *bi);
be_abi_irg_t *be_abi_introduce(ir_graph *irg);
/**
* Fix the stack bias for all nodes accessing the stack frame using the
......
......@@ -484,10 +484,10 @@ struct arch_irn_ops_t {
struct arch_code_generator_if_t {
/**
* Initialize the code generator.
* @param birg A backend IRG session.
* @param irg A graph
* @return A newly created code generator.
*/
void *(*init)(be_irg_t *birg);
void *(*init)(ir_graph *irg);
/**
* return node used as base in pic code addresses
......@@ -508,7 +508,7 @@ struct arch_code_generator_if_t {
* Backend may provide an own spiller.
* This spiller needs to spill all register classes.
*/
void (*spill)(void *self, be_irg_t *birg);
void (*spill)(void *self, ir_graph *irg);
/**
* Called before register allocation.
......@@ -556,7 +556,7 @@ do { \
#define arch_code_generator_after_ra(cg) _arch_cg_call(cg, after_ra)
#define arch_code_generator_finish(cg) _arch_cg_call(cg, finish)
#define arch_code_generator_done(cg) _arch_cg_call(cg, done)
#define arch_code_generator_spill(cg, birg) _arch_cg_call_env(cg, birg, spill)
#define arch_code_generator_spill(cg, irg) _arch_cg_call_env(cg, irg, spill)
#define arch_code_generator_has_spiller(cg) ((cg)->impl->spill != NULL)
#define arch_code_generator_get_pic_base(cg) \
((cg)->impl->get_pic_base != NULL ? (cg)->impl->get_pic_base(cg) : NULL)
......
......@@ -517,7 +517,7 @@ static ir_node **create_block_schedule_greedy(ir_graph *irg, ir_exec_freq *execf
start_entry = finish_block_schedule(&env);
block_list = create_blocksched_array(&env, start_entry, env.blockcount,
be_get_birg_obst(irg));
be_get_be_obst(irg));
DEL_ARR_F(env.edges);
obstack_free(&obst, NULL);
......@@ -716,7 +716,7 @@ static ir_node **create_block_schedule_ilp(ir_graph *irg, ir_exec_freq *execfreq
start_entry = finish_block_schedule(&env.env);
block_list = create_blocksched_array(&env.env, start_entry,
env.env.blockcount,
be_get_birg_obst(irg));
be_get_be_obst(irg));
DEL_ARR_F(env.ilpedges);
free_lpp(env.lpp);
......
......@@ -476,7 +476,7 @@ static void be_ra_chordal_main(ir_graph *irg)
}
be_timer_push(T_RA_SPILL);
arch_code_generator_spill(be_get_irg_cg(irg), be_birg_from_irg(irg));
arch_code_generator_spill(be_get_irg_cg(irg), irg);
be_timer_pop(T_RA_SPILL);
dump(BE_CH_DUMP_SPILL, irg, NULL, "spill");
......
......@@ -223,7 +223,7 @@ ilp_env_t *new_ilp_env(copy_opt_t *co, ilp_callback build, ilp_callback apply, v
lpp_sol_state_t ilp_go(ilp_env_t *ienv)
{
be_main_env_t *main_env = ienv->co->cenv->birg->main_env;
be_options_t *options = be_get_irg_options(ienv->co->irg);
sr_remove(ienv->sr);
......@@ -234,7 +234,7 @@ lpp_sol_state_t ilp_go(ilp_env_t *ienv)
lpp_set_log(ienv->lp, stdout);
if (solve_net)
lpp_solve_net(ienv->lp, main_env->options->ilp_server, main_env->options->ilp_solver);
lpp_solve_net(ienv->lp, options->ilp_server, options->ilp_solver);
else {
#ifdef LPP_SOLVE_NET
fprintf(stderr, "can only solve ilp over the net\n");
......
......@@ -43,10 +43,11 @@ static FILE *my_open(const be_chordal_env_t *env, const char *prefix, const char
char buf[1024];
size_t i, n;
char *tu_name;
const char *cup_name = be_birg_from_irg(irg)->main_env->cup_name;
n = strlen(env->birg->main_env->cup_name);
n = strlen(cup_name);
tu_name = XMALLOCN(char, n + 1);
strcpy(tu_name, env->birg->main_env->cup_name);
strcpy(tu_name, cup_name);
for (i = 0; i < n; ++i)
if (tu_name[i] == '.')
tu_name[i] = '_';
......@@ -143,7 +144,7 @@ static int co_solve_heuristic_pbqp(copy_opt_t *co)
bitset_clear_all(pbqp_co.restricted_nodes);
/* get ignored registers */
be_put_ignore_regs(co->cenv->birg, co->cls, pbqp_co.ignore_reg);
be_put_ignore_regs(co->cenv->irg, co->cls, pbqp_co.ignore_reg);
/* add costs vector to nodes */
be_ifg_foreach_node(co->cenv->ifg, &nodes_it, ifg_node) {
......
......@@ -286,11 +286,9 @@ static void fix_flags_walker(ir_node *block, void *env)
assert(flag_consumers == NULL);
}
void be_sched_fix_flags(be_irg_t *birg, const arch_register_class_t *flag_cls,
void be_sched_fix_flags(ir_graph *irg, const arch_register_class_t *flag_cls,
func_rematerialize remat_func)
{
ir_graph *irg = be_get_birg_irg(birg);
flag_class = flag_cls;
flags_reg = & flag_class->regs[0];
remat = remat_func;
......@@ -299,7 +297,7 @@ void be_sched_fix_flags(be_irg_t *birg, const arch_register_class_t *flag_cls,
remat = &default_remat;
ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
irg_block_walk_graph(irg, fix_flags_walker, NULL, birg->lv);
irg_block_walk_graph(irg, fix_flags_walker, NULL, be_get_irg_liveness(irg));
ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
if (changed) {
......
......@@ -36,7 +36,7 @@ typedef ir_node * (*func_rematerialize) (ir_node *node, ir_node *after);
* and consumer of flags. It does so by moving down/rematerialising of the
* nodes. This does not work across blocks.
*/
void be_sched_fix_flags(be_irg_t *birg, const arch_register_class_t *flag_cls,
void be_sched_fix_flags(ir_graph *irg, const arch_register_class_t *flag_cls,
func_rematerialize remat_func);
#endif
......@@ -150,11 +150,8 @@ typedef struct {
void *irg_env; /**< An environment for the irg scheduling, provided by the backend */
void *block_env; /**< An environment for scheduling a block, provided by the backend */
const arch_env_t *arch_env;
const be_main_env_t *main_env;
const be_machine_t *cpu; /**< the current abstract machine */
ilpsched_options_t *opts; /**< the ilp options for current irg */
const be_irg_t *birg; /**< The birg object */
be_options_t *be_opts; /**< backend options */
const ilp_sched_selector_t *sel; /**< The ILP sched selector provided by the backend */
DEBUG_ONLY(firm_dbg_module_t *dbg);
} be_ilpsched_env_t;
......@@ -1861,6 +1858,7 @@ static void create_ilp(ir_node *block, void *walk_env)
int base_num = ba->n_interesting_nodes * ba->n_interesting_nodes;
int estimated_n_var = (int)((double)base_num * fact_var);
int estimated_n_cst = (int)((double)base_num * fact_cst);
be_options_t *options = be_get_irg_options(env->irg);
DBG((env->dbg, LEVEL_1, "Creating LPP with estimated numbers: %d vars, %d cst\n",