Commit e628faf6 authored by Matthias Braun's avatar Matthias Braun
Browse files

rename proj attribute from Proj to num and change type to unsigned

parent f3209aa8
......@@ -579,7 +579,7 @@ FIRM_API size_t ir_switch_table_get_n_entries(const ir_switch_table *table);
* @param pn Proj number taken on match
*/
FIRM_API void ir_switch_table_set(ir_switch_table *table, size_t entry,
ir_tarval *min, ir_tarval *max, long pn);
ir_tarval *min, ir_tarval *max, unsigned pn);
/** Returns maximum tarval value of switch table entry @p entry */
FIRM_API ir_tarval *ir_switch_table_get_max(const ir_switch_table *table,
......@@ -590,7 +590,8 @@ FIRM_API ir_tarval *ir_switch_table_get_min(const ir_switch_table *table,
size_t entry);
/** Returns proj number taken if switch table entry @p entry matches */
FIRM_API long ir_switch_table_get_pn(const ir_switch_table *table, size_t entry);
FIRM_API unsigned ir_switch_table_get_pn(const ir_switch_table *table,
size_t entry);
/** Duplicates switch table @p table on obstack of @p irg */
FIRM_API ir_switch_table *ir_switch_table_duplicate(ir_graph *irg, const ir_switch_table *table);
......
......@@ -295,8 +295,8 @@ FIRM_API void ir_op_set_memory_index(ir_op *op, int memory_index);
* Sets proj-number for X_regular and X_except projs of fragile nodes.
* Note: should only be used immediately after new_ir_op
*/
FIRM_API void ir_op_set_fragile_indices(ir_op *op, int pn_x_regular,
int pn_x_except);
FIRM_API void ir_op_set_fragile_indices(ir_op *op, unsigned pn_x_regular,
unsigned pn_x_except);
/** @} */
......
......@@ -190,7 +190,7 @@ static void analyze_ent_args(ir_entity *ent)
to analyze them.*/
foreach_irn_out_r(irg_args, i, arg) {
ir_mode *arg_mode = get_irn_mode(arg);
long proj_nr = get_Proj_proj(arg);
unsigned proj_nr = get_Proj_num(arg);
if (mode_is_reference(arg_mode))
rw_info[proj_nr] |= analyze_arg(arg, rw_info[proj_nr]);
......@@ -299,13 +299,13 @@ static unsigned calc_method_param_weight(ir_node *arg)
break;
case iro_Tuple:
/* unoptimized tuple */
for (int j = get_Tuple_n_preds(succ); j-- > 0; ) {
for (unsigned j = get_Tuple_n_preds(succ); j-- > 0; ) {
ir_node *pred = get_Tuple_pred(succ, j);
if (pred == arg) {
/* look for Proj(j) */
foreach_irn_out_r(succ, k, succ_succ) {
if (is_Proj(succ_succ)) {
if (get_Proj_proj(succ_succ) == j) {
if (get_Proj_num(succ_succ) == j) {
/* found */
weight += calc_method_param_weight(succ_succ);
}
......@@ -379,7 +379,7 @@ static void analyze_method_params_weight(ir_entity *ent)
ir_node *irg_args = get_irg_args(irg);
foreach_irn_out_r(irg_args, i, arg) {
long const proj_nr = get_Proj_proj(arg);
unsigned const proj_nr = get_Proj_num(arg);
ent->attr.mtd_attr.param_weight[proj_nr] += calc_method_param_weight(arg);
}
}
......
......@@ -241,7 +241,7 @@ static ir_entity *get_member_method(ir_node *member, size_t pos)
/* forward */
static void free_mark(ir_node *node, pset *set);
static void free_mark_proj(ir_node *node, long n, pset *set)
static void free_mark_proj(ir_node *node, unsigned n, pset *set)
{
assert(get_irn_mode(node) == mode_T);
if (get_irn_link(node) == MARK) {
......@@ -255,7 +255,7 @@ static void free_mark_proj(ir_node *node, long n, pset *set)
* which is handled by free_ana_walker(). */
ir_node *pred = get_Proj_pred(node);
if (get_irn_link(pred) != MARK && is_Tuple(pred)) {
free_mark_proj(get_Tuple_pred(pred, get_Proj_proj(node)), n, set);
free_mark_proj(get_Tuple_pred(pred, get_Proj_num(node)), n, set);
}
break;
}
......@@ -319,7 +319,7 @@ static void free_mark(ir_node *node, pset *set)
break;
case iro_Proj:
free_mark_proj(get_Proj_pred(node), get_Proj_proj(node), set);
free_mark_proj(get_Proj_pred(node), get_Proj_num(node), set);
break;
default:
break;
......@@ -504,7 +504,7 @@ static size_t get_free_methods(ir_entity ***free_methods)
static void callee_ana_node(ir_node *node, pset *methods);
static void callee_ana_proj(ir_node *node, long n, pset *methods)
static void callee_ana_proj(ir_node *node, unsigned n, pset *methods)
{
assert(get_irn_mode(node) == mode_T);
if (get_irn_link(node) == MARK) {
......@@ -520,7 +520,7 @@ static void callee_ana_proj(ir_node *node, long n, pset *methods)
ir_node *pred = get_Proj_pred(node);
if (get_irn_link(pred) != MARK) {
if (is_Tuple(pred)) {
callee_ana_proj(get_Tuple_pred(pred, get_Proj_proj(node)), n, methods);
callee_ana_proj(get_Tuple_pred(pred, get_Proj_num(node)), n, methods);
} else {
pset_insert_ptr(methods, get_unknown_entity()); /* free method -> unknown */
}
......@@ -599,7 +599,7 @@ static void callee_ana_node(ir_node *node, pset *methods)
break;
case iro_Proj:
callee_ana_proj(get_Proj_pred(node), get_Proj_proj(node), methods);
callee_ana_proj(get_Proj_pred(node), get_Proj_num(node), methods);
break;
case iro_Add:
......
......@@ -212,7 +212,7 @@ unreachable_X:
if (is_undefined(b))
goto unreachable_X;
if (b->z == b->o) {
if ((b->z == t) == get_Proj_proj(irn)) {
if ((b->z == t) == get_Proj_num(irn)) {
z = o = t;
} else {
z = o = f;
......@@ -632,7 +632,7 @@ undefined:
case iro_Proj: {
ir_node *const pred = get_Proj_pred(irn);
if (is_Tuple(pred)) {
long pn = get_Proj_proj(irn);
unsigned pn = get_Proj_num(irn);
ir_node *const op = get_Tuple_pred(pred, pn);
bitinfo *const b = get_bitinfo(op);
z = b->z;
......
......@@ -57,7 +57,7 @@ static ir_node *get_effective_use_block(ir_node *node, int pos)
return get_nodes_block(node);
}
static ir_node *get_case_value(ir_node *switchn, long pn)
static ir_node *get_case_value(ir_node *switchn, unsigned pn)
{
ir_graph *irg = get_irn_irg(switchn);
const ir_switch_table *table = get_Switch_table(switchn);
......@@ -87,7 +87,7 @@ static ir_node *get_case_value(ir_node *switchn, long pn)
* Branch labels are a simple case. We can replace the value
* by a Const with the branch label.
*/
static void handle_case(ir_node *block, ir_node *switchn, long pn, env_t *env)
static void handle_case(ir_node *block, ir_node *switchn, unsigned pn, env_t *env)
{
ir_node *c = NULL;
ir_node *selector = get_Switch_selector(switchn);
......@@ -165,7 +165,7 @@ static void handle_modeb(ir_node *block, ir_node *selector, pn_Cond pnc, env_t *
ir_node *cond = get_Proj_pred(get_Block_cfgpred(block, 0));
foreach_out_edge(cond, edge) {
ir_node *proj = get_edge_src_irn(edge);
if (get_Proj_proj(proj) == (long)pnc)
if (get_Proj_num(proj) == pnc)
continue;
edge = get_irn_out_edge_first(proj);
other_blk = get_edge_src_irn(edge);
......@@ -411,20 +411,20 @@ static void insert_Confirm_in_block(ir_node *block, void *data)
env_t *env = (env_t*)data;
ir_node *cond = get_Proj_pred(proj);
if (is_Switch(cond)) {
long proj_nr = get_Proj_proj(proj);
unsigned proj_nr = get_Proj_num(proj);
handle_case(block, cond, proj_nr, env);
} else if (is_Cond(cond)) {
ir_node *selector = get_Cond_selector(cond);
ir_relation rel;
handle_modeb(block, selector, (pn_Cond) get_Proj_proj(proj), env);
handle_modeb(block, selector, (pn_Cond) get_Proj_num(proj), env);
if (! is_Cmp(selector))
return;
rel = get_Cmp_relation(selector);
if (get_Proj_proj(proj) != pn_Cond_true) {
if (get_Proj_num(proj) != pn_Cond_true) {
/* it's the false branch */
rel = get_negated_relation(rel);
}
......
......@@ -629,7 +629,7 @@ static ir_entity_usage determine_entity_usage(const ir_node *irn,
/* skip tuples */
case iro_Tuple:
foreach_irn_out_r(succ, k, proj) {
if (is_Proj(proj) && get_Proj_proj(proj) == succ_pos) {
if (is_Proj(proj) && get_Proj_num(proj) == (unsigned)succ_pos) {
res |= determine_entity_usage(proj, entity);
break;
}
......@@ -690,7 +690,7 @@ static void analyse_irg_entity_usage(ir_graph *irg)
}
/* check inner functions accessing outer frame */
int static_link_arg = 0;
unsigned static_link_arg = 0;
for (size_t i = 0, n = get_class_n_members(frame_type); i < n; ++i) {
ir_entity *ent = get_class_member(frame_type, i);
if (!is_method_entity(ent))
......@@ -703,7 +703,7 @@ static void analyse_irg_entity_usage(ir_graph *irg)
assure_irg_outs(inner_irg);
ir_node *args = get_irg_args(inner_irg);
foreach_irn_out_r(args, j, arg) {
if (get_Proj_proj(arg) == static_link_arg) {
if (get_Proj_num(arg) == static_link_arg) {
foreach_irn_out_r(arg, k, succ) {
if (is_Member(succ)) {
ir_entity *entity = get_Member_entity(succ);
......
......@@ -247,7 +247,7 @@ static ir_node *gen_Proj_Start(ir_node *node)
ir_node *new_block = be_transform_node(block);
ir_node *start = get_Proj_pred(node);
ir_node *new_start = be_transform_node(start);
long pn = get_Proj_proj(node);
unsigned pn = get_Proj_num(node);
switch ((pn_Start) pn) {
case pn_Start_X_initial_exec:
......@@ -259,7 +259,7 @@ static ir_node *gen_Proj_Start(ir_node *node)
case pn_Start_P_frame_base:
return new_rd_Proj(dbgi, new_start, gp_regs_mode, pn_TEMPLATE_Start_stack);
}
panic("unexpected Start proj %ld\n", pn);
panic("unexpected Start proj %u\n", pn);
}
static ir_node *gen_Proj(ir_node *node)
......
......@@ -611,7 +611,7 @@ static void emit_amd64_Jcc(const ir_node *irn)
foreach_out_edge(irn, edge) {
ir_node *proj = get_edge_src_irn(edge);
long nr = get_Proj_proj(proj);
unsigned nr = get_Proj_num(proj);
if (nr == pn_Cond_true) {
proj_true = proj;
} else {
......
......@@ -181,7 +181,7 @@ static ir_node *amd64_turn_back_am(ir_node *node)
ir_node *out = get_edge_src_irn(edge);
if (get_irn_mode(out) == mode_M) {
set_Proj_pred(out, load);
set_Proj_proj(out, pn_amd64_Mov_M);
set_Proj_num(out, pn_amd64_Mov_M);
break;
}
}
......
......@@ -471,7 +471,7 @@ static ir_node *source_am_possible(ir_node *block, ir_node *node)
ir_node *load = get_Proj_pred(node);
if (!is_Load(load))
return NULL;
assert(get_Proj_proj(node) == pn_Load_res);
assert(get_Proj_num(node) == pn_Load_res);
if (get_nodes_block(load) != block)
return NULL;
/* make sure we are the only user */
......@@ -1079,12 +1079,12 @@ static ir_node *gen_Proj_Div(ir_node *const node)
{
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
long pn = get_Proj_proj(node);
unsigned pn = get_Proj_num(node);
assert((long)pn_amd64_Div_M == (long)pn_amd64_IDiv_M);
assert((long)pn_amd64_Div_res_div == (long)pn_amd64_IDiv_res_div);
assert((long)pn_amd64_xDivs_M == (long)pn_amd64_IDiv_M);
assert((long)pn_amd64_xDivs_res_div == (long)pn_amd64_IDiv_res_div);
assert((unsigned)pn_amd64_Div_M == (unsigned)pn_amd64_IDiv_M);
assert((unsigned)pn_amd64_Div_res_div == (unsigned)pn_amd64_IDiv_res_div);
assert((unsigned)pn_amd64_xDivs_M == (unsigned)pn_amd64_IDiv_M);
assert((unsigned)pn_amd64_xDivs_res_div == (unsigned)pn_amd64_IDiv_res_div);
ir_mode *mode;
if (mode_is_float(get_Div_resmode(pred)))
......@@ -1117,10 +1117,10 @@ static ir_node *gen_Proj_Mod(ir_node *const node)
{
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
long pn = get_Proj_proj(node);
unsigned pn = get_Proj_num(node);
assert((long)pn_amd64_Div_M == (long)pn_amd64_IDiv_M);
assert((long)pn_amd64_Div_res_mod == (long)pn_amd64_IDiv_res_mod);
assert((unsigned)pn_amd64_Div_M == (unsigned)pn_amd64_IDiv_M);
assert((unsigned)pn_amd64_Div_res_mod == (unsigned)pn_amd64_IDiv_res_mod);
switch ((pn_Mod)pn) {
case pn_Mod_M:
return new_r_Proj(new_pred, mode_M, pn_amd64_Div_M);
......@@ -1401,7 +1401,7 @@ static ir_node *gen_Proj_Start(ir_node *node)
ir_graph *irg = get_irn_irg(node);
ir_node *block = get_nodes_block(node);
ir_node *new_block = be_transform_node(block);
long pn = get_Proj_proj(node);
unsigned pn = get_Proj_num(node);
be_transform_node(get_Proj_pred(node));
switch ((pn_Start)pn) {
......@@ -1414,7 +1414,7 @@ static ir_node *gen_Proj_Start(ir_node *node)
case pn_Start_P_frame_base:
return get_frame_base(irg);
}
panic("Unexpected Start Proj: %ld\n", pn);
panic("Unexpected Start Proj: %u\n", pn);
}
static ir_node *get_stack_pointer_for(ir_node *node)
......@@ -1480,7 +1480,7 @@ static ir_node *gen_Return(ir_node *node)
}
/* callee saves */
ir_node *start = get_irg_start(irg);
long start_pn = start_callee_saves_offset;
unsigned start_pn = start_callee_saves_offset;
for (size_t i = 0; i < N_AMD64_REGISTERS; ++i) {
if (!rbitset_is_set(cconv->callee_saves, i))
continue;
......@@ -1735,7 +1735,7 @@ static ir_node *gen_Call(ir_node *node)
static ir_node *gen_Proj_Call(ir_node *node)
{
long pn = get_Proj_proj(node);
unsigned pn = get_Proj_num(node);
ir_node *call = get_Proj_pred(node);
ir_node *new_call = be_transform_node(call);
switch ((pn_Call)pn) {
......@@ -1751,14 +1751,14 @@ static ir_node *gen_Proj_Call(ir_node *node)
static ir_node *gen_Proj_Proj_Call(ir_node *node)
{
long pn = get_Proj_proj(node);
unsigned pn = get_Proj_num(node);
ir_node *call = get_Proj_pred(get_Proj_pred(node));
ir_node *new_call = be_transform_node(call);
ir_type *tp = get_Call_type(call);
amd64_cconv_t *cconv = amd64_decide_calling_convention(tp, NULL);
const reg_or_stackslot_t *res = &cconv->results[pn];
ir_mode *mode = get_irn_mode(node);
long new_pn = 1 + res->reg_offset;
unsigned new_pn = 1 + res->reg_offset;
assert(res->req != NULL);
if (mode_needs_gp_reg(mode))
......@@ -1773,19 +1773,19 @@ static ir_node *gen_Proj_Proj_Start(ir_node *node)
{
ir_node *block = get_nodes_block(node);
ir_node *new_block = be_transform_node(block);
long pn = get_Proj_proj(node);
unsigned pn = get_Proj_num(node);
ir_node *args = get_Proj_pred(node);
ir_node *start = get_Proj_pred(args);
ir_node *new_start = be_transform_node(start);
assert(get_Proj_proj(args) == pn_Start_T_args);
assert(get_Proj_num(args) == pn_Start_T_args);
const reg_or_stackslot_t *param = &current_cconv->parameters[pn];
if (param->reg != NULL) {
/* argument transmitted in register */
const arch_register_t *reg = param->reg;
ir_mode *mode = reg->reg_class->mode;
long new_pn = param->reg_offset + start_params_offset;
unsigned new_pn = param->reg_offset + start_params_offset;
ir_node *value = new_r_Proj(new_start, mode, new_pn);
return value;
} else {
......@@ -2015,7 +2015,7 @@ static ir_node *gen_Conv(ir_node *node)
in, insn_mode, AMD64_OP_REG,
addr);
}
assert((long)pn_amd64_CvtSS2SI_res == (long)pn_amd64_CvtSD2SI_res);
assert((unsigned)pn_amd64_CvtSS2SI_res == (unsigned)pn_amd64_CvtSD2SI_res);
res = new_r_Proj(conv, mode_gp, pn_amd64_CvtSS2SI_res);
reqs = xmm_reqs;
......@@ -2266,27 +2266,27 @@ static ir_node *gen_Proj_Load(ir_node *node)
ir_node *load = get_Proj_pred(node);
ir_node *new_load = be_transform_node(load);
dbg_info *dbgi = get_irn_dbg_info(node);
long proj = get_Proj_proj(node);
unsigned pn = get_Proj_num(node);
/* loads might be part of source address mode matches, so we don't
transform the ProjMs yet (with the exception of loads whose result is
not used) */
if (is_Load(load) && proj == pn_Load_M && get_irn_n_edges(load) > 1) {
if (is_Load(load) && pn == pn_Load_M && get_irn_n_edges(load) > 1) {
/* this is needed, because sometimes we have loops that are only
reachable through the ProjM */
be_enqueue_preds(node);
/* do it in 2 steps, to silence firm verifier */
ir_node *res = new_rd_Proj(dbgi, load, mode_M, pn_Load_M);
set_Proj_proj(res, pn_amd64_mem);
set_Proj_num(res, pn_amd64_mem);
return res;
}
/* renumber the proj */
switch (get_amd64_irn_opcode(new_load)) {
case iro_amd64_xMovs:
if (proj == pn_Load_res) {
if (pn == pn_Load_res) {
return new_rd_Proj(dbgi, new_load, mode_D, pn_amd64_xMovs_res);
} else if (proj == pn_Load_M) {
} else if (pn == pn_Load_M) {
return new_rd_Proj(dbgi, new_load, mode_M, pn_amd64_xMovs_M);
}
break;
......@@ -2295,16 +2295,16 @@ static ir_node *gen_Proj_Load(ir_node *node)
assert((int)pn_amd64_Movs_res == (int)pn_amd64_Mov_res);
assert((int)pn_amd64_Movs_M == (int)pn_amd64_Mov_M);
/* handle all gp loads equal: they have the same proj numbers. */
if (proj == pn_Load_res) {
if (pn == pn_Load_res) {
return new_rd_Proj(dbgi, new_load, mode_Lu, pn_amd64_Movs_res);
} else if (proj == pn_Load_M) {
} else if (pn == pn_Load_M) {
return new_rd_Proj(dbgi, new_load, mode_M, pn_amd64_Movs_M);
}
break;
case iro_amd64_Add:
case iro_amd64_And:
case iro_amd64_Cmp:
assert(proj == pn_Load_M);
assert(pn == pn_Load_M);
return new_r_Proj(new_load, mode_M, pn_amd64_mem);
default:
panic("Unsupported Proj from Load");
......@@ -2316,7 +2316,7 @@ static ir_node *gen_Proj_Load(ir_node *node)
static ir_node *gen_Proj_Store(ir_node *node)
{
ir_node *pred = get_Proj_pred(node);
long pn = get_Proj_proj(node);
unsigned pn = get_Proj_num(node);
if (pn == pn_Store_M) {
return be_transform_node(pred);
} else {
......
......@@ -267,12 +267,12 @@ static void transform_MemPerm(ir_node *node)
/* exchange memprojs */
foreach_out_edge_safe(node, edge) {
ir_node *proj = get_edge_src_irn(edge);
int p = get_Proj_proj(proj);
int p = get_Proj_num(proj);
assert(p < arity);
set_Proj_pred(proj, pops[p]);
set_Proj_proj(proj, pn_amd64_PopAM_M);
set_Proj_num(proj, pn_amd64_PopAM_M);
}
/* remove memperm */
......
......@@ -463,7 +463,7 @@ static void emit_arm_B(const ir_node *irn)
const ir_node *proj_false = NULL;
foreach_out_edge(irn, edge) {
ir_node *proj = get_edge_src_irn(edge);
long nr = get_Proj_proj(proj);
unsigned nr = get_Proj_num(proj);
if (nr == pn_Cond_true) {
proj_true = proj;
} else {
......
......@@ -183,19 +183,19 @@ static void lower_divmod(ir_node *node, ir_node *left, ir_node *right,
if (!is_Proj(proj))
continue;
switch ((pn_Div)get_Proj_proj(proj)) {
switch ((pn_Div)get_Proj_num(proj)) {
case pn_Div_M:
/* reroute to the call */
set_Proj_pred(proj, call);
set_Proj_proj(proj, pn_Call_M);
set_Proj_num(proj, pn_Call_M);
break;
case pn_Div_X_regular:
set_Proj_pred(proj, call);
set_Proj_proj(proj, pn_Call_X_regular);
set_Proj_num(proj, pn_Call_X_regular);
break;
case pn_Div_X_except:
set_Proj_pred(proj, call);
set_Proj_proj(proj, pn_Call_X_except);
set_Proj_num(proj, pn_Call_X_except);
break;
case pn_Div_res: {
ir_mode *low_mode = get_irn_mode(left_low);
......
......@@ -724,7 +724,7 @@ static ir_node *gen_arm_AddS_t(ir_node *node)
static ir_node *gen_Proj_arm_AddS_t(ir_node *node)
{
long pn = get_Proj_proj(node);
unsigned pn = get_Proj_num(node);
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
switch ((pn_arm_AddS_t)pn) {
......@@ -797,7 +797,7 @@ static ir_node *gen_arm_UMulL_t(ir_node *node)
static ir_node *gen_Proj_arm_UMulL_t(ir_node *node)
{
long pn = get_Proj_proj(node);
unsigned pn = get_Proj_num(node);
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
switch ((pn_arm_UMulL_t)pn) {
......@@ -989,7 +989,7 @@ static ir_node *gen_arm_SubS_t(ir_node *node)
static ir_node *gen_Proj_arm_SubS_t(ir_node *node)
{
long pn = get_Proj_proj(node);
unsigned pn = get_Proj_num(node);
ir_node *pred = get_Proj_pred(node);
ir_node *new_pred = be_transform_node(pred);
assert((int)pn_arm_SubS_flags == (int)pn_arm_RsbS_flags);
......@@ -1488,13 +1488,13 @@ static ir_node *gen_Proj_Builtin(ir_node *proj)
case ir_bk_parity:
case ir_bk_popcount:
case ir_bk_bswap:
assert(get_Proj_proj(proj) == pn_Builtin_max+1);
assert(get_Proj_num(proj) == pn_Builtin_max+1);
return new_node;
case ir_bk_trap:
case ir_bk_debugbreak:
case ir_bk_prefetch:
case ir_bk_outport:
assert(get_Proj_proj(proj) == pn_Builtin_M);
assert(get_Proj_num(proj) == pn_Builtin_M);
return new_node;
case ir_bk_inport:
case ir_bk_inner_trampoline:
......@@ -1511,23 +1511,23 @@ static ir_node *gen_Proj_Load(ir_node *node)
ir_node *load = get_Proj_pred(node);
ir_node *new_load = be_transform_node(load);
dbg_info *dbgi = get_irn_dbg_info(node);
long proj = get_Proj_proj(node);
unsigned pn = get_Proj_num(node);
/* renumber the proj */
switch (get_arm_irn_opcode(new_load)) {
case iro_arm_Ldr:
/* handle all gp loads equal: they have the same proj numbers. */
if (proj == pn_Load_res) {
if (pn == pn_Load_res) {
return new_rd_Proj(dbgi, new_load, arm_mode_gp, pn_arm_Ldr_res);
} else if (proj == pn_Load_M) {
} else if (pn == pn_Load_M) {
return new_rd_Proj(dbgi, new_load, mode_M, pn_arm_Ldr_M);
}
break;
case iro_arm_Ldf:
if (proj == pn_Load_res) {
if (pn == pn_Load_res) {
ir_mode *mode = get_Load_mode(load);
return new_rd_Proj(dbgi, new_load, mode, pn_arm_Ldf_res);
} else if (proj == pn_Load_M) {
} else if (pn == pn_Load_M) {
return new_rd_Proj(dbgi, new_load, mode_M, pn_arm_Ldf_M);
}
break;
......@@ -1543,7 +1543,7 @@ static ir_node *gen_Proj_Div(ir_node *node)
ir_node *new_pred = be_transform_node(pred);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_mode *mode = get_irn_mode(node);
long pn = get_Proj_proj(node);
unsigned pn = get_Proj_num(node);
switch ((pn_Div)pn) {
case pn_Div_M:
......@@ -1561,7 +1561,7 @@ static ir_node *gen_Proj_Start(ir_node *node)
{
ir_node *block = get_nodes_block(node);
ir_node *new_block = be_transform_node(block);
long pn = get_Proj_proj(node);
unsigned pn = get_Proj_num(node);
switch ((pn_Start)pn) {
case pn_Start_X_initial_exec:
......@@ -1577,12 +1577,12 @@ static ir_node *gen_Proj_Start(ir_node *node)
case pn_Start_P_frame_base:
return get_start_val(get_irn_irg(node), &start_sp);
}
panic("unexpected start proj: %ld\n", pn);
panic("unexpected start proj: %u\n", pn);
}
static ir_node *gen_Proj_Proj_Start(ir_node *node)
{
long pn = get_Proj_proj(node);
unsigned pn = get_Proj_num(node);
ir_node *block = get_nodes_block(node);
ir_node *new_block = be_transform_node(block);
ir_graph *irg = get_irn_irg(new_block);
......@@ -1591,7 +1591,7 @@ static ir_node *gen_Proj_Proj_Start(ir_node *node)
ir_node *new_start = be_transform_node(start);
/* Proj->Proj->Start must be a method argument */
assert(get_Proj_proj(args) == pn_Start_T_args);
assert(get_Proj_num(args) == pn_Start_T_args);
const reg_or_stackslot_t *param = &cconv->parameters[pn];
......@@ -1599,7 +1599,7 @@ static ir_node *gen_Proj_Proj_Start(ir_node *node)
if (reg0 != NULL) {
/* argument transmitted in register */
ir_mode *mode = reg0->reg_class->mode;
long new_pn = param->reg_offset + start_params_offset;
unsigned new_pn = param->reg_offset + start_params_offset;
ir_node *value = new_r_Proj(new_start, mode, new_pn);
if (mode_is_float(mode)) {
......@@ -1664,7 +1664,7 @@ static int find_out_for_reg(ir_node *node, const arch_register_t *reg)