Commit e1397b01 authored by Michael Beck's avatar Michael Beck
Browse files

- remove all irg parameter from node constructors having a block

- SymConst's are now ALWAYS placed in the start block

[r26236]
parent f3264b83
This diff is collapsed.
......@@ -225,7 +225,7 @@ static void handle_modeb(ir_node *block, ir_node *selector, pn_Cond pnc, env_t *
else
in[i] = c_o;
}
phi = new_r_Phi(current_ir_graph, user_blk, n, in, mode_b);
phi = new_r_Phi(user_blk, n, in, mode_b);
set_irn_n(user, pos, phi);
}
}
......@@ -344,7 +344,7 @@ static void handle_if(ir_node *block, ir_node *cmp, pn_Cmp pnc, env_t *env) {
* We can replace the input with a Confirm(left, pnc, right).
*/
if (! c)
c = new_r_Confirm(current_ir_graph, block, left, right, pnc);
c = new_r_Confirm(block, left, right, pnc);
pos = get_edge_src_pos(edge);
set_irn_n(succ, pos, c);
......@@ -377,7 +377,7 @@ static void handle_if(ir_node *block, ir_node *cmp, pn_Cmp pnc, env_t *env) {
* We can replace the input with a Confirm(right, pnc^-1, left).
*/
if (! rc)
rc = new_r_Confirm(current_ir_graph, block, right, left, pnc);
rc = new_r_Confirm(block, right, left, pnc);
pos = get_edge_src_pos(edge);
set_irn_n(succ, pos, rc);
......@@ -509,7 +509,7 @@ static void insert_non_null(ir_node *ptr, ir_node *block, env_t *env) {
ir_mode *mode = get_irn_mode(ptr);
c = new_Const(get_mode_null(mode));
c = new_r_Confirm(current_ir_graph, block, ptr, c, pn_Cmp_Lg);
c = new_r_Confirm(block, ptr, c, pn_Cmp_Lg);
}
set_irn_n(succ, pos, c);
......
......@@ -1275,7 +1275,6 @@ static ir_type *clone_type_and_cache(ir_type *tp) {
res = clone_type_method(tp, prefix);
pmap_insert(mtp_map, tp, res);
DB((dbgcall, LEVEL_2, "cloned type %+F into %+F\n", tp, res));
return res;
} /* clone_type_and_cache */
......@@ -1298,7 +1297,7 @@ static void update_calls_to_private(ir_node *call, void *env) {
ctp = clone_type_and_cache(ctp);
set_method_additional_property(ctp, mtp_property_private);
set_Call_type(call, ctp);
DB((dbgcall, LEVEL_1, "changed call to private method %+F\n", ent));
DB((dbgcall, LEVEL_1, "changed call to private method %+F using cloned type %+F\n", ent, ctp));
}
}
}
......@@ -1332,8 +1331,9 @@ void mark_private_methods(void) {
if ((get_method_additional_properties(mtp) & mtp_property_private) == 0) {
/* need a new type */
mtp = clone_type_and_cache(mtp);
set_entity_type(ent, mtp);
set_method_additional_property(mtp, mtp_property_private);
set_entity_type(ent, mtp);
DB((dbgcall, LEVEL_2, "changed entity type of %+F to %+F\n", ent, mtp));
changed = 1;
}
}
......
......@@ -114,7 +114,7 @@ ir_region *get_irn_region(ir_node *n) {
}
/**
* Return non-if a given firm thing is a region.
* Return non-zero if a given firm thing is a region.
*/
int is_region(const void *thing) {
const firm_kind *kind = thing;
......@@ -122,7 +122,7 @@ int is_region(const void *thing) {
}
/**
* Return the number of predecessors in a region.
* Return the number of predecessors of a region.
*/
int get_region_n_preds(const ir_region *reg) {
return ARR_LEN(reg->pred);
......@@ -171,13 +171,13 @@ void set_region_succ(ir_region *reg, int pos, ir_region *n) {
/** Walker environment. */
typedef struct walk_env {
struct obstack *obst; /**< an obstack to allocate from. */
struct obstack *obst; /**< An obstack to allocate from. */
ir_region **post; /**< The list of all currently existent top regions. */
unsigned l_post; /**< length of the allocated regions array. */
unsigned l_post; /**< The length of the allocated regions array. */
unsigned premax; /**< maximum pre counter */
unsigned postmax; /**< maximum post counter */
ir_node *start_block; /**< the start block of the graph. */
ir_node *end_block; /**< the end block of the graph. */
ir_node *start_block; /**< The start block of the graph. */
ir_node *end_block; /**< The end block of the graph. */
} walk_env;
/**
......@@ -248,7 +248,7 @@ static void wrap_BasicBlocks(ir_node *block, void *ctx) {
} /* wrap_BasicBlocks */
/**
* Create the pred and succ edges for Block wrapper.
* Post-walker: Create the pred and succ edges for Block wrapper.
* Kill edges to the Start and End blocks.
*/
static void update_BasicBlock_regions(ir_node *blk, void *ctx) {
......@@ -257,7 +257,7 @@ static void update_BasicBlock_regions(ir_node *blk, void *ctx) {
int i, j, len;
if (blk == env->start_block) {
/* handle Firm's self loop */
/* handle Firm's self loop: Start block has no predecessors */
reg->pred = NEW_ARR_D(ir_region *, env->obst, 0);
} else {
len = get_Block_n_cfgpreds(blk);
......@@ -278,7 +278,7 @@ static void update_BasicBlock_regions(ir_node *blk, void *ctx) {
ARR_SHRINKLEN(reg->succ, j);
} /* update_BasicBlock_regions */
/** Allocate a new region of a obstack */
/** Allocate a new region on an obstack */
#define ALLOC_REG(obst, reg, tp) \
do { \
(reg) = obstack_alloc((obst), sizeof(*(reg))); \
......@@ -597,14 +597,14 @@ static ir_region *new_NaturalLoop(struct obstack *obst, ir_region *head) {
} /* new_NaturalLoop */
/**
* Return true if a is an ancestor of b in DFS search.
* Return true if region a is an ancestor of region b in DFS search.
*/
static int is_ancestor(const ir_region *a, const ir_region *b) {
return (a->prenum <= b->prenum && a->postnum > b->postnum);
}
/**
* Return true if region pred is a predecessor of region n.
* Return true if region pred is a predecessor of region n.
*/
static int pred_of(const ir_region *pred, const ir_region *n) {
int i;
......@@ -616,7 +616,7 @@ static int pred_of(const ir_region *pred, const ir_region *n) {
}
/**
* Return true if region succ is a successor of region n.
* Return true if region succ is a successor of region n.
*/
static int succ_of(const ir_region *succ, const ir_region *n) {
int i;
......@@ -628,7 +628,7 @@ static int succ_of(const ir_region *succ, const ir_region *n) {
}
/**
* Reverse linked list.
* Reverse a linked list of regions.
*/
static struct ir_region *reverse_list(ir_region *n) {
ir_region *prev = NULL, *next;
......@@ -719,7 +719,7 @@ static ir_region *cyclic_region_type(struct obstack *obst, ir_region *node) {
}
/**
* Clear all links on a list. Needed, because we expect cleared links-
* Clear all links on a list. Needed, because we expect cleared links.
*/
static void clear_list(ir_region *list) {
ir_region *next;
......@@ -977,7 +977,7 @@ static void reduce(walk_env *env, ir_region *reg) {
replace_pred(succ, reg);
}
/* second third: replace all succs in predessors */
/* third step: replace all succs in predessors */
for (i = get_region_n_preds(reg) - 1; i >= 0; --i) {
ir_region *pred = get_region_pred(reg, i);
......@@ -1038,7 +1038,7 @@ ir_reg_tree *construct_region_tree(ir_graph *irg) {
do {
ir_region *reg, *n = env.post[postctr];
do {
if (n->parent) {
if (n->parent != NULL) {
/* already folded */
break;
}
......
......@@ -114,7 +114,6 @@ static int allowed_arm_immediate(int offset, arm_vals *result) {
* Fix an IncSP node if the offset gets too big
*/
static void peephole_be_IncSP(ir_node *node) {
ir_graph *irg;
ir_node *block;
int offset, cnt, align, sign = 1;
arm_vals v;
......@@ -133,12 +132,11 @@ static void peephole_be_IncSP(ir_node *node) {
be_set_IncSP_offset(node, (int)arm_rol(v.values[0], v.shifts[0]) * sign);
irg = current_ir_graph;
block = get_nodes_block(node);
align = be_get_IncSP_align(node);
for (cnt = 1; cnt < v.ops; ++cnt) {
int value = (int)arm_rol(v.values[cnt], v.shifts[cnt]);
ir_node *next = be_new_IncSP(&arm_gp_regs[REG_SP], irg, block, node, value * sign, align);
ir_node *next = be_new_IncSP(&arm_gp_regs[REG_SP], block, node, value * sign, align);
sched_add_after(node, next);
node = next;
}
......@@ -252,7 +250,6 @@ static void peephole_be_Reload(ir_node *node) {
ir_node *block, *ptr, *frame, *load, *mem, *proj;
ir_mode *mode;
dbg_info *dbg;
ir_graph *irg;
arm_vals v;
const arch_register_t *reg;
......@@ -273,7 +270,6 @@ static void peephole_be_Reload(ir_node *node) {
reg = arch_get_irn_register(node);
mem = be_get_Reload_mem(node);
mode = get_irn_mode(node);
irg = current_ir_graph;
dbg = get_irn_dbg_info(node);
block = get_nodes_block(node);
......@@ -282,7 +278,7 @@ static void peephole_be_Reload(ir_node *node) {
/* transform into fpaLdf */
load = new_bd_arm_fpaLdf(dbg, block, ptr, mem, mode);
sched_add_before(node, load);
proj = new_rd_Proj(dbg, irg, block, load, mode, pn_arm_fpaLdf_res);
proj = new_rd_Proj(dbg, block, load, mode, pn_arm_fpaLdf_res);
arch_set_irn_register(proj, reg);
} else {
panic("peephole_be_Spill: spill not supported for this mode");
......@@ -291,7 +287,7 @@ static void peephole_be_Reload(ir_node *node) {
/* transform into Store */;
load = new_bd_arm_Load(dbg, block, ptr, mem);
sched_add_before(node, load);
proj = new_rd_Proj(dbg, irg, block, load, mode_Iu, pn_arm_Load_res);
proj = new_rd_Proj(dbg, block, load, mode_Iu, pn_arm_Load_res);
arch_set_irn_register(proj, reg);
} else {
panic("peephole_be_Spill: spill not supported for this mode");
......
......@@ -855,8 +855,8 @@ static ir_node *gen_Load(ir_node *node) {
ir_graph *irg = current_ir_graph;
/* add a result proj and a Keep to produce a pseudo use */
ir_node *proj = new_r_Proj(irg, block, new_load, mode_Iu, pn_arm_Load_res);
be_new_Keep(arch_get_irn_reg_class_out(proj), irg, block, 1, &proj);
ir_node *proj = new_r_Proj(block, new_load, mode_Iu, pn_arm_Load_res);
be_new_Keep(arch_get_irn_reg_class_out(proj), block, 1, &proj);
}
return new_load;
......@@ -1111,8 +1111,8 @@ static ir_node *gen_CopyB(ir_node *node) {
ir_node *src_copy;
ir_node *dst_copy;
src_copy = be_new_Copy(&arm_reg_classes[CLASS_arm_gp], irg, block, new_src);
dst_copy = be_new_Copy(&arm_reg_classes[CLASS_arm_gp], irg, block, new_dst);
src_copy = be_new_Copy(&arm_reg_classes[CLASS_arm_gp], block, new_src);
dst_copy = be_new_Copy(&arm_reg_classes[CLASS_arm_gp], block, new_dst);
return new_bd_arm_CopyB(dbg, block, dst_copy, src_copy,
new_bd_arm_EmptyReg(dbg, block, mode_Iu),
......@@ -1274,7 +1274,6 @@ static ir_node *gen_Proj_Load(ir_node *node) {
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *load = get_Proj_pred(node);
ir_node *new_load = be_transform_node(load);
ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
long proj = get_Proj_proj(node);
......@@ -1287,17 +1286,17 @@ static ir_node *gen_Proj_Load(ir_node *node) {
case iro_arm_Loadhs:
/* handle all gp loads equal: they have the same proj numbers. */
if (proj == pn_Load_res) {
return new_rd_Proj(dbgi, irg, block, new_load, mode_Iu, pn_arm_Load_res);
return new_rd_Proj(dbgi, block, new_load, mode_Iu, pn_arm_Load_res);
} else if (proj == pn_Load_M) {
return new_rd_Proj(dbgi, irg, block, new_load, mode_M, pn_arm_Load_M);
return new_rd_Proj(dbgi, block, new_load, mode_M, pn_arm_Load_M);
}
break;
case iro_arm_fpaLdf:
if (proj == pn_Load_res) {
ir_mode *mode = get_Load_mode(load);
return new_rd_Proj(dbgi, irg, block, new_load, mode, pn_arm_fpaLdf_res);
return new_rd_Proj(dbgi, block, new_load, mode, pn_arm_fpaLdf_res);
} else if (proj == pn_Load_M) {
return new_rd_Proj(dbgi, irg, block, new_load, mode_M, pn_arm_fpaLdf_M);
return new_rd_Proj(dbgi, block, new_load, mode_M, pn_arm_fpaLdf_M);
}
break;
default:
......@@ -1313,14 +1312,13 @@ static ir_node *gen_Proj_CopyB(ir_node *node) {
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
long proj = get_Proj_proj(node);
switch(proj) {
case pn_CopyB_M_regular:
if (is_arm_CopyB(new_pred)) {
return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_arm_CopyB_M);
return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_arm_CopyB_M);
}
break;
default:
......@@ -1336,7 +1334,6 @@ static ir_node *gen_Proj_Quot(ir_node *node) {
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_mode *mode = get_irn_mode(node);
long proj = get_Proj_proj(node);
......@@ -1344,24 +1341,24 @@ static ir_node *gen_Proj_Quot(ir_node *node) {
switch (proj) {
case pn_Quot_M:
if (is_arm_fpaDvf(new_pred) || is_arm_fpaDvf_i(new_pred)) {
return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_arm_fpaDvf_M);
return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_arm_fpaDvf_M);
} else if (is_arm_fpaRdf(new_pred) || is_arm_fpaRdf_i(new_pred)) {
return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_arm_fpaRdf_M);
return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_arm_fpaRdf_M);
} else if (is_arm_fpaFdv(new_pred) || is_arm_fpaFdv_i(new_pred)) {
return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_arm_fpaFdv_M);
return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_arm_fpaFdv_M);
} else if (is_arm_fpaFrd(new_pred) || is_arm_fpaFrd_i(new_pred)) {
return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_arm_fpaFrd_M);
return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_arm_fpaFrd_M);
}
break;
case pn_Quot_res:
if (is_arm_fpaDvf(new_pred) || is_arm_fpaDvf_i(new_pred)) {
return new_rd_Proj(dbgi, irg, block, new_pred, mode, pn_arm_fpaDvf_res);
return new_rd_Proj(dbgi, block, new_pred, mode, pn_arm_fpaDvf_res);
} else if (is_arm_fpaRdf(new_pred) || is_arm_fpaRdf_i(new_pred)) {
return new_rd_Proj(dbgi, irg, block, new_pred, mode, pn_arm_fpaRdf_res);
return new_rd_Proj(dbgi, block, new_pred, mode, pn_arm_fpaRdf_res);
} else if (is_arm_fpaFdv(new_pred) || is_arm_fpaFdv_i(new_pred)) {
return new_rd_Proj(dbgi, irg, block, new_pred, mode, pn_arm_fpaFdv_res);
return new_rd_Proj(dbgi, block, new_pred, mode, pn_arm_fpaFdv_res);
} else if (is_arm_fpaFrd(new_pred) || is_arm_fpaFrd_i(new_pred)) {
return new_rd_Proj(dbgi, irg, block, new_pred, mode, pn_arm_fpaFrd_res);
return new_rd_Proj(dbgi, block, new_pred, mode, pn_arm_fpaFrd_res);
}
break;
default:
......@@ -1377,20 +1374,18 @@ static ir_node *gen_Proj_be_AddSP(ir_node *node) {
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
long proj = get_Proj_proj(node);
if (proj == pn_be_AddSP_sp) {
ir_node *res = new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu,
ir_node *res = new_rd_Proj(dbgi, block, new_pred, mode_Iu,
pn_arm_SubSPandCopy_stack);
arch_set_irn_register(res, &arm_gp_regs[REG_SP]);
return res;
} else if(proj == pn_be_AddSP_res) {
return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu,
pn_arm_SubSPandCopy_addr);
return new_rd_Proj(dbgi, block, new_pred, mode_Iu, pn_arm_SubSPandCopy_addr);
} else if (proj == pn_be_AddSP_M) {
return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_arm_SubSPandCopy_M);
return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_arm_SubSPandCopy_M);
}
panic("Unsupported Proj from AddSP");
}
......@@ -1402,17 +1397,16 @@ static ir_node *gen_Proj_be_SubSP(ir_node *node) {
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
long proj = get_Proj_proj(node);
if (proj == pn_be_SubSP_sp) {
ir_node *res = new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu,
ir_node *res = new_rd_Proj(dbgi, block, new_pred, mode_Iu,
pn_arm_AddSP_stack);
arch_set_irn_register(res, &arm_gp_regs[REG_SP]);
return res;
} else if (proj == pn_be_SubSP_M) {
return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_arm_AddSP_M);
return new_rd_Proj(dbgi, block, new_pred, mode_M, pn_arm_AddSP_M);
}
panic("Unsupported Proj from SubSP");
}
......@@ -1470,7 +1464,7 @@ static ir_node *gen_Proj(ir_node *node) {
/* we exchange the ProjX with a jump */
block = be_transform_node(block);
jump = new_rd_Jmp(dbgi, irg, block);
jump = new_rd_Jmp(dbgi, block);
return jump;
}
if (node == get_irg_anchor(irg, anchor_tls)) {
......@@ -1481,7 +1475,7 @@ static ir_node *gen_Proj(ir_node *node) {
ir_mode *mode = get_irn_mode(node);
if (mode_needs_gp_reg(mode)) {
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *new_proj = new_r_Proj(irg, block, new_pred, mode_Iu,
ir_node *new_proj = new_r_Proj(block, new_pred, mode_Iu,
get_Proj_proj(node));
new_proj->node_nr = node->node_nr;
return new_proj;
......
......@@ -302,9 +302,9 @@ static ir_node *convert_dbl_to_int(ir_node *bl, ir_node *arg, ir_node *mem,
conv = new_bd_arm_fpaDbl2GP(NULL, bl, arg, mem);
/* move high/low */
*resL = new_r_Proj(irg, bl, conv, mode_Is, pn_arm_fpaDbl2GP_low);
*resH = new_r_Proj(irg, bl, conv, mode_Is, pn_arm_fpaDbl2GP_high);
mem = new_r_Proj(irg, bl, conv, mode_M, pn_arm_fpaDbl2GP_M);
*resL = new_r_Proj(bl, conv, mode_Is, pn_arm_fpaDbl2GP_low);
*resH = new_r_Proj(bl, conv, mode_Is, pn_arm_fpaDbl2GP_high);
mem = new_r_Proj(bl, conv, mode_M, pn_arm_fpaDbl2GP_M);
}
return mem;
}
......@@ -836,15 +836,15 @@ static const arch_register_t *arm_abi_prologue(void *self, ir_node **mem, pmap *
arch_register_req_type_ignore);
/* copy SP to IP (so we can spill it */
ip = be_new_Copy(gp, irg, block, sp);
ip = be_new_Copy(gp, block, sp);
be_set_constr_single_reg_out(ip, 0, &arm_gp_regs[REG_R12], 0);
/* spill stuff */
store = new_bd_arm_StoreStackM4Inc(NULL, block, sp, fp, ip, lr, pc, *mem);
sp = new_r_Proj(irg, block, store, env->arch_env->sp->reg_class->mode, pn_arm_StoreStackM4Inc_ptr);
sp = new_r_Proj(block, store, env->arch_env->sp->reg_class->mode, pn_arm_StoreStackM4Inc_ptr);
arch_set_irn_register(sp, env->arch_env->sp);
*mem = new_r_Proj(irg, block, store, mode_M, pn_arm_StoreStackM4Inc_M);
*mem = new_r_Proj(block, store, mode_M, pn_arm_StoreStackM4Inc_M);
/* frame pointer is ip-4 (because ip is our old sp value) */
fp = new_bd_arm_Sub_i(NULL, block, ip, get_irn_mode(fp), 4);
......@@ -856,7 +856,7 @@ static const arch_register_t *arm_abi_prologue(void *self, ir_node **mem, pmap *
* to extract this order from register requirements) */
add_irn_dep(fp, store);
fp = be_new_Copy(gp, irg, block, fp); // XXX Gammelfix: only be_ have custom register requirements
fp = be_new_Copy(gp, block, fp); // XXX Gammelfix: only be_ have custom register requirements
be_set_constr_single_reg_out(fp, 0, env->arch_env->bp,
arch_register_req_type_ignore);
arch_set_irn_register(fp, env->arch_env->bp);
......@@ -882,22 +882,22 @@ static void arm_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_m
// TODO: Activate Omit fp in epilogue
if (env->flags.try_omit_fp) {
curr_sp = be_new_IncSP(env->arch_env->sp, env->irg, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
curr_sp = be_new_IncSP(env->arch_env->sp, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
curr_lr = be_new_CopyKeep_single(&arm_reg_classes[CLASS_arm_gp], env->irg, bl, curr_lr, curr_sp, get_irn_mode(curr_lr));
curr_lr = be_new_CopyKeep_single(&arm_reg_classes[CLASS_arm_gp], bl, curr_lr, curr_sp, get_irn_mode(curr_lr));
be_set_constr_single_reg_out(curr_lr, 0, &arm_gp_regs[REG_LR], 0);
curr_pc = be_new_Copy(&arm_reg_classes[CLASS_arm_gp], env->irg, bl, curr_lr );
curr_pc = be_new_Copy(&arm_reg_classes[CLASS_arm_gp], bl, curr_lr );
be_set_constr_single_reg_out(curr_pc, BE_OUT_POS(0), &arm_gp_regs[REG_PC], 0);
} else {
ir_node *load_node;
load_node = new_bd_arm_LoadStackM3Epilogue(NULL, bl, curr_bp, *mem);
curr_bp = new_r_Proj(env->irg, bl, load_node, env->arch_env->bp->reg_class->mode, pn_arm_LoadStackM3Epilogue_res0);
curr_sp = new_r_Proj(env->irg, bl, load_node, env->arch_env->sp->reg_class->mode, pn_arm_LoadStackM3Epilogue_res1);
curr_pc = new_r_Proj(env->irg, bl, load_node, mode_Iu, pn_arm_LoadStackM3Epilogue_res2);
*mem = new_r_Proj(env->irg, bl, load_node, mode_M, pn_arm_LoadStackM3Epilogue_M);
curr_bp = new_r_Proj(bl, load_node, env->arch_env->bp->reg_class->mode, pn_arm_LoadStackM3Epilogue_res0);
curr_sp = new_r_Proj(bl, load_node, env->arch_env->sp->reg_class->mode, pn_arm_LoadStackM3Epilogue_res1);
curr_pc = new_r_Proj(bl, load_node, mode_Iu, pn_arm_LoadStackM3Epilogue_res2);
*mem = new_r_Proj(bl, load_node, mode_M, pn_arm_LoadStackM3Epilogue_M);
arch_set_irn_register(curr_bp, env->arch_env->bp);
arch_set_irn_register(curr_sp, env->arch_env->sp);
arch_set_irn_register(curr_pc, &arm_gp_regs[REG_PC]);
......
......@@ -501,7 +501,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
* to adjust stack alignment for the call.
*/
if (stack_dir < 0 && !do_seq && !no_alloc) {
curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, stack_size, 1);
curr_sp = be_new_IncSP(sp, bl, curr_sp, stack_size, 1);
}
dbgi = get_irn_dbg_info(irn);
......@@ -544,7 +544,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
*/
if (do_seq) {
curr_ofs = 0;
addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, param_size + arg->space_before, 0);
addr = curr_sp = be_new_IncSP(sp, bl, curr_sp, param_size + arg->space_before, 0);
add_irn_dep(curr_sp, curr_mem);
}
else {
......@@ -558,7 +558,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
constmode = mode_Is;
}
addr = new_r_Const_long(irg, constmode, curr_ofs);
addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
addr = new_r_Add(bl, curr_sp, addr, mach_mode);
}
}
......@@ -566,8 +566,8 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
if (is_atomic_type(param_type)) {
ir_node *store;
ir_node *mem_input = do_seq ? curr_mem : new_NoMem();
store = new_rd_Store(dbgi, irg, bl, mem_input, addr, param, 0);
mem = new_r_Proj(irg, bl, store, mode_M, pn_Store_M);
store = new_rd_Store(dbgi, bl, mem_input, addr, param, 0);
mem = new_r_Proj(bl, store, mode_M, pn_Store_M);
}
/* Make a mem copy for compound arguments. */
......@@ -575,8 +575,8 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
ir_node *copy;
assert(mode_is_reference(get_irn_mode(param)));
copy = new_rd_CopyB(dbgi, irg, bl, curr_mem, addr, param, param_type);
mem = new_r_Proj(irg, bl, copy, mode_M, pn_CopyB_M_regular);
copy = new_rd_CopyB(dbgi, bl, curr_mem, addr, param, param_type);
mem = new_r_Proj(bl, copy, mode_M, pn_CopyB_M_regular);
}
curr_ofs += param_size;
......@@ -592,7 +592,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
/* We need the sync only, if we didn't build the stores sequentially. */
if (! do_seq) {
if (n_stack_params >= 1) {
curr_mem = new_r_Sync(irg, bl, n_stack_params + 1, in);
curr_mem = new_r_Sync(bl, n_stack_params + 1, in);
} else {
curr_mem = get_Call_mem(irn);
}
......@@ -706,8 +706,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
ARR_APP1(ir_node *, env->calls, low_call);
/* create new stack pointer */
curr_sp = new_r_Proj(irg, bl, low_call, get_irn_mode(curr_sp),
pn_be_Call_sp);
curr_sp = new_r_Proj(bl, low_call, get_irn_mode(curr_sp), pn_be_Call_sp);
be_set_constr_single_reg_out(low_call, pn_be_Call_sp, sp,
arch_register_req_type_ignore | arch_register_req_type_produces_sp);
arch_set_irn_register(curr_sp, sp);
......@@ -731,7 +730,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
if (proj == NULL) {
ir_type *res_type = get_method_res_type(call_tp, i);
ir_mode *mode = get_type_mode(res_type);
proj = new_r_Proj(irg, bl, low_call, mode, pn);
proj = new_r_Proj(bl, low_call, mode, pn);
res_projs[i] = proj;
} else {
set_Proj_pred(proj, low_call);
......@@ -795,7 +794,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
obstack_ptr_grow(obst, curr_sp);
foreach_pset_new(&destroyed_regs, reg, iter) {
ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj);
ir_node *proj = new_r_Proj(bl, low_call, reg->reg_class->mode, curr_res_proj);
/* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
be_set_constr_single_reg_out(low_call, curr_res_proj, reg, 0);
......@@ -817,7 +816,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
/* create the Keep for the caller save registers */
in = (ir_node **) obstack_finish(obst);
keep = be_new_Keep(NULL, irg, bl, n, in);
keep = be_new_Keep(NULL, bl, n, in);
for (i = 0; i < n; ++i) {
const arch_register_t *reg = get_irn_link(in[i]);
be_node_set_reg_class_in(keep, i, reg->reg_class);
......@@ -841,13 +840,13 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
}
if (! mem_proj) {
mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_be_Call_M_regular);
mem_proj = new_r_Proj(bl, low_call, mode_M, pn_be_Call_M_regular);
keep_alive(mem_proj);
}
}
/* Clean up the stack frame or revert alignment fixes if we allocated it */
if (! no_alloc) {
curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, -stack_size, 0);
curr_sp = be_new_IncSP(sp, bl, curr_sp, -stack_size, 0);
}
be_abi_call_free(call);
......@@ -864,30 +863,31 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
*
* @param alignment the minimum stack alignment
* @param size the node containing the non-aligned size
* @param irg the irg where new nodes are allocated on
* @param irg the block where new nodes are allocated on
* @param block the block where new nodes are allocated on
* @param dbg debug info for new nodes
*
* @return a node representing the aligned size
*/
static ir_node *adjust_alloc_size(unsigned stack_alignment, ir_node *size,
ir_graph *irg, ir_node *block, dbg_info *dbg)
ir_node *block, dbg_info *dbg)
{
if (stack_alignment > 1) {
ir_mode *mode;
tarval *tv;
ir_node *mask;
ir_mode *mode;
tarval *tv;
ir_node *mask;
ir_graph *irg;