Commit ac7a8d6d authored by Michael Beck's avatar Michael Beck
Browse files

New inlining schema implemented:

small functions that should be inlined in libFirm are implemented in _t.h files
with a __ prefix.
Preprocessor magic is used to automatically inline these functions whenever a _t.h
file is included instead of a .h file.
Note that this magic did not work outside libFirm without accessing _t.h files.

[r3085]
parent 53ad6436
......@@ -80,7 +80,7 @@ entity *get_inherited_methods_implementation(entity *inh_meth) {
ir_node *addr = get_atomic_ent_value(inh_meth);
assert(addr && "constant entity without value");
if (intern_get_irn_op(addr) == op_Const) {
if (get_irn_op(addr) == op_Const) {
impl_meth = tarval_to_entity(get_Const_tarval(addr));
} else {
assert(0 && "Complex constant values not supported -- address of method should be straight constant!");
......@@ -190,7 +190,7 @@ static entity ** get_impl_methods(entity * method) {
static void sel_methods_walker(ir_node * node, pmap * ldname_map) {
if (intern_get_irn_op(node) == op_SymConst) {
if (get_irn_op(node) == op_SymConst) {
/* Wenn mglich SymConst-Operation durch Const-Operation
* ersetzen. */
if (get_SymConst_kind(node) == linkage_ptr_info) {
......@@ -207,11 +207,11 @@ static void sel_methods_walker(ir_node * node, pmap * ldname_map) {
}
}
}
} else if (intern_get_irn_op(node) == op_Sel &&
} else if (get_irn_op(node) == op_Sel &&
is_method_type(get_entity_type(get_Sel_entity(node)))) {
entity * ent = get_Sel_entity(node);
if (get_opt_optimize() && get_opt_dyn_meth_dispatch() &&
(intern_get_irn_op(skip_Proj(get_Sel_ptr(node))) == op_Alloc)) {
(get_irn_op(skip_Proj(get_Sel_ptr(node))) == op_Alloc)) {
ir_node *new_node;
entity *called_ent;
/* We know which method will be called, no dispatch necessary. */
......@@ -333,7 +333,7 @@ static entity ** get_Sel_arr(ir_node * sel) {
static entity ** NULL_ARRAY = NULL;
entity * ent;
entity ** arr;
assert(sel && intern_get_irn_op(sel) == op_Sel);
assert(sel && get_irn_op(sel) == op_Sel);
ent = get_Sel_entity(sel);
assert(is_method_type(get_entity_type(ent))); /* what else? */
arr = get_entity_link(ent);
......@@ -377,14 +377,14 @@ static void callee_ana_proj(ir_node * node, long n, eset * methods) {
}
set_irn_link(node, MARK);
switch (intern_get_irn_opcode(node)) {
switch (get_irn_opcode(node)) {
case iro_Proj: {
/* proj_proj: in einem "sinnvollen" Graphen kommt jetzt ein
* op_Tuple oder ein Knoten, der eine "freie Methode"
* zurckgibt. */
ir_node * pred = get_Proj_pred(node);
if (get_irn_link(pred) != MARK) {
if (intern_get_irn_op(pred) == op_Tuple) {
if (get_irn_op(pred) == op_Tuple) {
callee_ana_proj(get_Tuple_pred(pred, get_Proj_proj(node)), n, methods);
} else {
eset_insert(methods, MARK); /* free method -> unknown */
......@@ -421,7 +421,7 @@ static void callee_ana_node(ir_node * node, eset * methods) {
}
set_irn_link(node, MARK);
switch (intern_get_irn_opcode(node)) {
switch (get_irn_opcode(node)) {
case iro_SymConst:
/* externe Methode (wegen fix_symconst!) */
eset_insert(methods, MARK); /* free method -> unknown */
......@@ -487,7 +487,7 @@ static void callee_ana_node(ir_node * node, eset * methods) {
static void callee_walker(ir_node * call, void * env) {
if (intern_get_irn_op(call) == op_Call) {
if (get_irn_op(call) == op_Call) {
eset * methods = eset_create();
entity * ent;
entity ** arr = NEW_ARR_F(entity *, 0);
......@@ -543,13 +543,13 @@ static void free_mark_proj(ir_node * node, long n, eset * set) {
return;
}
set_irn_link(node, MARK);
switch (intern_get_irn_opcode(node)) {
switch (get_irn_opcode(node)) {
case iro_Proj: {
/* proj_proj: in einem "sinnvollen" Graphen kommt jetzt ein
* op_Tuple oder ein Knoten, der in "free_ana_walker" behandelt
* wird. */
ir_node * pred = get_Proj_pred(node);
if (get_irn_link(pred) != MARK && intern_get_irn_op(pred) == op_Tuple) {
if (get_irn_link(pred) != MARK && get_irn_op(pred) == op_Tuple) {
free_mark_proj(get_Tuple_pred(pred, get_Proj_proj(node)), n, set);
} else {
/* nothing: da in "free_ana_walker" behandelt. */
......@@ -587,7 +587,7 @@ static void free_mark(ir_node * node, eset * set) {
return; /* already visited */
}
set_irn_link(node, MARK);
switch (intern_get_irn_opcode(node)) {
switch (get_irn_opcode(node)) {
case iro_Sel: {
entity * ent = get_Sel_entity(node);
if (is_method_type(get_entity_type(ent))) {
......@@ -635,7 +635,7 @@ static void free_ana_walker(ir_node * node, eset * set) {
/* bereits in einem Zyklus besucht. */
return;
}
switch (intern_get_irn_opcode(node)) {
switch (get_irn_opcode(node)) {
/* special nodes */
case iro_Sel:
case iro_SymConst:
......@@ -652,7 +652,7 @@ static void free_ana_walker(ir_node * node, eset * set) {
set_irn_link(node, MARK);
for (i = get_Call_arity(node) - 1; i >= 0; --i) {
ir_node * pred = get_Call_param(node, i);
if (mode_is_reference(intern_get_irn_mode(pred))) {
if (mode_is_reference(get_irn_mode(pred))) {
free_mark(pred, set);
}
}
......@@ -661,10 +661,10 @@ static void free_ana_walker(ir_node * node, eset * set) {
* jemand das Gegenteil implementiert. */
default:
set_irn_link(node, MARK);
for (i = intern_get_irn_arity(node) - 1; i >= 0; --i) {
for (i = get_irn_arity(node) - 1; i >= 0; --i) {
ir_node * pred = get_irn_n(node, i);
if (mode_is_reference(intern_get_irn_mode(pred))) {
free_mark(pred, set);
if (mode_is_reference(get_irn_mode(pred))) {
free_mark(pred, set);
}
}
break;
......
......@@ -27,7 +27,7 @@
* very careful!
*/
static INLINE int *mere_get_backarray(ir_node *n) {
switch(intern_get_irn_opcode(n)) {
switch (get_irn_opcode(n)) {
case iro_Block:
if (!get_Block_matured(n)) return NULL;
if (interprocedural_view && n->attr.block.in_cg) {
......@@ -82,7 +82,7 @@ static INLINE bool legal_backarray (ir_node *n) {
INLINE void fix_backedges(struct obstack *obst, ir_node *n) {
opcode opc = intern_get_irn_opcode(n);
opcode opc = get_irn_opcode(n);
int *arr = mere_get_backarray(n);
if (ARR_LEN(arr) == ARR_LEN(get_irn_in(n))-1)
return;
......@@ -130,7 +130,7 @@ bool has_backedges (ir_node *n) {
int i;
int *ba = get_backarray (n);
if (ba)
for (i = 0; i < intern_get_irn_arity(n); i++)
for (i = 0; i < get_irn_arity(n); i++)
if (ba[i]) return true;
return false;
}
......@@ -142,12 +142,12 @@ void clear_backedges (ir_node *n) {
interprocedural_view = 0;
ba = get_backarray (n);
if (ba)
for (i = 0; i < intern_get_irn_arity(n); i++)
for (i = 0; i < get_irn_arity(n); i++)
ba[i] = 0;
interprocedural_view = 1;
ba = get_backarray (n);
if (ba)
for (i = 0; i < intern_get_irn_arity(n); i++)
for (i = 0; i < get_irn_arity(n); i++)
ba[i] = 0;
interprocedural_view = rem;
}
......@@ -279,7 +279,7 @@ static bool is_outermost_StartBlock(ir_node *n) {
recursion must end. */
assert(is_Block(n));
if ((get_Block_n_cfgpreds(n) == 1) &&
(intern_get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Start) &&
(get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Start) &&
(get_nodes_Block(skip_Proj(get_Block_cfgpred(n, 0))) == n)) {
return true;
}
......@@ -300,9 +300,9 @@ is_head (ir_node *n, ir_node *root)
assert(is_Block(n));
if (!is_outermost_StartBlock(n)) {
arity = intern_get_irn_arity(n);
arity = get_irn_arity(n);
for (i = 0; i < arity; i++) {
ir_node *pred = get_nodes_block(skip_Proj(intern_get_irn_n(n, i)));
ir_node *pred = get_nodes_block(skip_Proj(get_irn_n(n, i)));
if (is_backedge(n, i)) continue;
if (!irn_is_in_stack(pred)) {
some_outof_loop = 1;
......@@ -326,9 +326,9 @@ smallest_dfn_pred (ir_node *n, int limit)
int i, index = -2, min = -1;
if (!is_outermost_StartBlock(n)) {
int arity = intern_get_irn_arity(n);
int arity = get_irn_arity(n);
for (i = 0; i < arity; i++) {
ir_node *pred = get_nodes_block(skip_Proj(intern_get_irn_n(n, i)));
ir_node *pred = get_nodes_block(skip_Proj(get_irn_n(n, i)));
if (is_backedge(n, i) || !irn_is_in_stack(pred)) continue;
if (get_irn_dfn(pred) >= limit && (min == -1 || get_irn_dfn(pred) < min)) {
index = i;
......@@ -346,9 +346,9 @@ largest_dfn_pred (ir_node *n)
int i, index = -2, max = -1;
if (!is_outermost_StartBlock(n)) {
int arity = intern_get_irn_arity(n);
int arity = get_irn_arity(n);
for (i = 0; i < arity; i++) {
ir_node *pred = get_nodes_block(skip_Proj(intern_get_irn_n(n, i)));
ir_node *pred = get_nodes_block(skip_Proj(get_irn_n(n, i)));
if (is_backedge (n, i) || !irn_is_in_stack(pred)) continue;
if (get_irn_dfn(pred) > max) {
index = i;
......@@ -390,7 +390,7 @@ find_tail (ir_node *n) {
assert (res_index > -2);
set_backedge (m, res_index);
return is_outermost_StartBlock(n) ? NULL : get_nodes_block(skip_Proj(intern_get_irn_n(m, res_index)));
return is_outermost_StartBlock(n) ? NULL : get_nodes_block(skip_Proj(get_irn_n(m, res_index)));
}
/*-----------------------------------------------------------*
......@@ -418,12 +418,12 @@ static void cfscc (ir_node *n) {
so is_backedge does not access array[-1] but correctly returns false! */
if (!is_outermost_StartBlock(n)) {
int arity = intern_get_irn_arity(n);
int arity = get_irn_arity(n);
for (i = 0; i < arity; i++) {
ir_node *m;
if (is_backedge(n, i)) continue;
m = get_nodes_block(skip_Proj(intern_get_irn_n(n, i)));
m = get_nodes_block(skip_Proj(get_irn_n(n, i)));
cfscc (m);
if (irn_is_in_stack(m)) {
......
......@@ -213,7 +213,7 @@ void compute_doms(ir_graph *irg) {
tmp_dom_info *v;
/* Step 2 */
irn_arity = intern_get_irn_arity(w->block);
irn_arity = get_irn_arity(w->block);
for (j = 0; j < irn_arity; j++) {
ir_node *pred = get_nodes_Block(get_Block_cfgpred(w->block, j));
tmp_dom_info *u;
......
......@@ -82,8 +82,8 @@ INLINE int get_Block_n_cfg_outs (ir_node *bl) {
assert (bl->out_valid);
#endif
for (i = 0; i < (int)bl->out[0]; i++)
if ((intern_get_irn_mode(bl->out[i+1]) == mode_X) &&
(intern_get_irn_op(bl->out[i+1]) != op_End)) n_cfg_outs++;
if ((get_irn_mode(bl->out[i+1]) == mode_X) &&
(get_irn_op(bl->out[i+1]) != op_End)) n_cfg_outs++;
return n_cfg_outs;
}
......@@ -95,13 +95,13 @@ INLINE ir_node *get_Block_cfg_out (ir_node *bl, int pos) {
assert (bl->out_valid);
#endif
for (i = 0; i < (int)bl->out[0]; i++)
if ((intern_get_irn_mode(bl->out[i+1]) == mode_X) &&
(intern_get_irn_op(bl->out[i+1]) != op_End)) {
if ((get_irn_mode(bl->out[i+1]) == mode_X) &&
(get_irn_op(bl->out[i+1]) != op_End)) {
if (out_pos == pos) {
ir_node *cfop = bl->out[i+1];
return cfop->out[0+1];
ir_node *cfop = bl->out[i+1];
return cfop->out[0+1];
} else {
out_pos++;
out_pos++;
}
}
return NULL;
......@@ -171,11 +171,11 @@ void irg_out_block_walk(ir_node *node,
irg_walk_func *pre, irg_walk_func *post,
void *env) {
assert((get_irn_op(node) == op_Block) || (intern_get_irn_mode(node) == mode_X));
assert((get_irn_op(node) == op_Block) || (get_irn_mode(node) == mode_X));
inc_irg_block_visited(current_ir_graph);
if (intern_get_irn_mode(node) == mode_X) node = node->out[1];
if (get_irn_mode(node) == mode_X) node = node->out[1];
irg_out_block_walk2(node, pre, post, env);
......@@ -211,12 +211,12 @@ static int count_outs(ir_node *n) {
set_irn_visited(n, get_irg_visited(current_ir_graph));
n->out = (ir_node **) 1; /* Space for array size. */
if ((intern_get_irn_op(n) == op_Block)) start = 0; else start = -1;
irn_arity = intern_get_irn_arity(n);
start = get_irn_op(n) == op_Block ? 0 : -1;
irn_arity = get_irn_arity(n);
res = irn_arity - start +1; /* --1 or --0; 1 for array size. */
for (i = start; i < irn_arity; i++) {
/* Optimize Tuples. They annoy if walking the cfg. */
succ = skip_Tuple(intern_get_irn_n(n, i));
succ = skip_Tuple(get_irn_n(n, i));
set_irn_n(n, i, succ);
/* count outs for successors */
if (get_irn_visited(succ) < get_irg_visited(current_ir_graph)) {
......@@ -246,10 +246,10 @@ static ir_node **set_out_edges(ir_node *n, ir_node **free) {
edge. */
n->out[0] = (ir_node *)0;
if (intern_get_irn_op(n) == op_Block) start = 0; else start = -1;
irn_arity = intern_get_irn_arity(n);
if (get_irn_op(n) == op_Block) start = 0; else start = -1;
irn_arity = get_irn_arity(n);
for (i = start; i < irn_arity; i++) {
succ = intern_get_irn_n(n, i);
succ = get_irn_n(n, i);
/* Recursion */
if (get_irn_visited(succ) < get_irg_visited(current_ir_graph))
free = set_out_edges(succ, free);
......@@ -266,8 +266,8 @@ static INLINE void fix_start_proj(ir_graph *irg) {
if (get_Block_n_cfg_outs(get_irg_start_block(irg))) {
startbl = get_irg_start_block(irg);
for (i = 0; i < get_irn_n_outs(startbl); i++)
if (intern_get_irn_mode(get_irn_out(startbl, i)) == mode_X)
proj = get_irn_out(startbl, i);
if (get_irn_mode(get_irn_out(startbl, i)) == mode_X)
proj = get_irn_out(startbl, i);
if (get_irn_out(proj, 0) == startbl) {
assert(get_irn_n_outs(proj) == 2);
set_irn_out(proj, 0, get_irn_out(proj, 1));
......@@ -347,13 +347,13 @@ static void node_arity_count(ir_node * node, void * env)
int *anz = (int *) env, arity, i, start;
ir_node *succ;
arity = 1 + intern_get_irn_arity(node)
arity = 1 + get_irn_arity(node)
+ ((is_Block(node)) ? 0 : 1);
*anz += arity;
start = (is_Block(node)) ? 0 : -1;
for(i = start; i < intern_get_irn_arity(node); i++) {
succ = intern_get_irn_n(node, i);
for(i = start; i < get_irn_arity(node); i++) {
succ = get_irn_n(node, i);
succ->out = (ir_node **)((int)succ->out + 1);
}
}
......@@ -409,12 +409,11 @@ static void set_out_pointer(ir_node * node, void * env) {
ir_node *succ;
int start = (!is_Block(node)) ? -1 : 0;
for(i = start; i < intern_get_irn_arity(node); i++)
{
succ = intern_get_irn_n(node, i);
succ->out[get_irn_n_outs(succ)+1] = node;
succ->out[0] = (ir_node *) (get_irn_n_outs(succ) + 1);
}
for(i = start; i < get_irn_arity(node); i++) {
succ = get_irn_n(node, i);
succ->out[get_irn_n_outs(succ)+1] = node;
succ->out[0] = (ir_node *) (get_irn_n_outs(succ) + 1);
}
}
......@@ -455,11 +454,10 @@ void compute_ip_outs(void) {
void free_ip_outs(void)
{
ir_node **out_edges = get_irp_ip_outedges();
if (out_edges != NULL)
{
free(out_edges);
set_irp_ip_outedges(NULL);
}
if (out_edges != NULL) {
free(out_edges);
set_irp_ip_outedges(NULL);
}
irp->outs_state = no_outs;
}
......
......@@ -520,23 +520,23 @@ init_node (ir_node *n, void *env) {
/* Also init nodes not visible in intraproc_view. */
/* @@@ init_node is called for too many nodes -- this wastes memory!.
The mem is not lost as its on the obstack. */
if (intern_get_irn_op(n) == op_Filter) {
if (get_irn_op(n) == op_Filter) {
for (i = 0; i < get_Filter_n_cg_preds(n); i++)
init_node(get_Filter_cg_pred(n, i), NULL);
}
if (intern_get_irn_op(n) == op_Block) {
if (get_irn_op(n) == op_Block) {
for (i = 0; i < get_Block_cg_n_cfgpreds(n); i++) {
init_node(get_Block_cg_cfgpred(n, i), NULL);
}
}
/* The following pattern matches only after a call from above pattern. */
if ((intern_get_irn_op(n) == op_Proj) /*&& (get_Proj_proj(n) == 0)*/) {
if ((get_irn_op(n) == op_Proj) /*&& (get_Proj_proj(n) == 0)*/) {
/* @@@ init_node is called for every proj -- this wastes memory!.
The mem is not lost as its on the obstack. */
ir_node *cb = get_Proj_pred(n);
if ((intern_get_irn_op(cb) == op_CallBegin) ||
(intern_get_irn_op(cb) == op_EndReg) ||
(intern_get_irn_op(cb) == op_EndExcept)) {
if ((get_irn_op(cb) == op_CallBegin) ||
(get_irn_op(cb) == op_EndReg) ||
(get_irn_op(cb) == op_EndExcept)) {
init_node(cb, NULL);
init_node(get_nodes_Block(cb), NULL);
}
......@@ -575,9 +575,9 @@ init_ip_scc (void) {
static bool is_outermost_Start(ir_node *n) {
/* Test whether this is the outermost Start node. If so
recursion must end. */
if ((intern_get_irn_op(n) == op_Block) &&
if ((get_irn_op(n) == op_Block) &&
(get_Block_n_cfgpreds(n) == 1) &&
(intern_get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Start) &&
(get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Start) &&
(get_nodes_Block(skip_Proj(get_Block_cfgpred(n, 0))) == n)) {
return true;
}
......@@ -586,7 +586,7 @@ static bool is_outermost_Start(ir_node *n) {
not possible in interprocedural view as outermost_graph is
not necessarily the only with a dead-end start block.
Besides current_ir_graph is not set properly. */
if ((intern_get_irn_op(n) == op_Block) &&
if ((get_irn_op(n) == op_Block) &&
(n == get_irg_start_block(current_ir_graph))) {
if ((!interprocedural_view) ||
(current_ir_graph == outermost_ir_graph))
......@@ -609,9 +609,9 @@ get_start_index(ir_node *n) {
not reachable.
I.e., with this code, the order on the loop tree is correct. But a (single)
test showed the loop tree is deeper. */
if (intern_get_irn_op(n) == op_Phi ||
intern_get_irn_op(n) == op_Block ||
(intern_get_irn_op(n) == op_Filter && interprocedural_view) ||
if (get_irn_op(n) == op_Phi ||
get_irn_op(n) == op_Block ||
(get_irn_op(n) == op_Filter && interprocedural_view) ||
(get_irg_pinned(get_irn_irg(n)) == floats &&
get_op_pinned(get_irn_op(n)) == floats))
// Here we could test for backedge at -1 which is illegal
......@@ -626,7 +626,7 @@ get_start_index(ir_node *n) {
But it guarantees that Blocks are analysed before nodes contained in the
block. If so, we can set the value to undef if the block is not \
executed. */
if (is_cfop(n) || is_fragile_op(n) || intern_get_irn_op(n) == op_Start)
if (is_cfop(n) || is_fragile_op(n) || get_irn_op(n) == op_Start)
return -1;
else
return 0;
......@@ -644,9 +644,9 @@ switch_irg (ir_node *n, int index) {
if (interprocedural_view) {
/* Only Filter and Block nodes can have predecessors in other graphs. */
if (intern_get_irn_op(n) == op_Filter)
if (get_irn_op(n) == op_Filter)
n = get_nodes_Block(n);
if (intern_get_irn_op(n) == op_Block) {
if (get_irn_op(n) == op_Block) {
ir_node *cfop = skip_Proj(get_Block_cfgpred(n, index));
if (is_ip_cfop(cfop)) {
current_ir_graph = get_irn_irg(cfop);
......@@ -681,7 +681,7 @@ find_irg_on_stack (ir_node *n) {
current_ir_graph = get_irn_irg(m);
break;
}
if (intern_get_irn_op(m) == op_Filter) {
if (get_irn_op(m) == op_Filter) {
/* Find the corresponding ip_cfop */
ir_node *pred = stack[i+1];
int j;
......@@ -722,7 +722,7 @@ static void test(ir_node *pred, ir_node *root, ir_node *this) {
/* Test for legal loop header: Block, Phi, ... */
INLINE static bool is_possible_loop_head(ir_node *n) {
ir_op *op = intern_get_irn_op(n);
ir_op *op = get_irn_op(n);
return ((op == op_Block) ||
(op == op_Phi) ||
((op == op_Filter) && interprocedural_view));
......@@ -743,9 +743,9 @@ is_head (ir_node *n, ir_node *root)
return false;
if (!is_outermost_Start(n)) {
arity = intern_get_irn_arity(n);
arity = get_irn_arity(n);
for (i = get_start_index(n); i < arity; i++) {
ir_node *pred = intern_get_irn_n(n, i);
ir_node *pred = get_irn_n(n, i);
assert(pred);
if (is_backedge(n, i)) continue;
if (!irn_is_in_stack(pred)) {
......@@ -771,9 +771,9 @@ smallest_dfn_pred (ir_node *n, int limit)
int i, index = -2, min = -1;
if (!is_outermost_Start(n)) {
int arity = intern_get_irn_arity(n);
int arity = get_irn_arity(n);
for (i = get_start_index(n); i < arity; i++) {
ir_node *pred = intern_get_irn_n(n, i);
ir_node *pred = get_irn_n(n, i);
assert(pred);
if (is_backedge(n, i) || !irn_is_in_stack(pred)) continue;
if (get_irn_dfn(pred) >= limit && (min == -1 || get_irn_dfn(pred) < min)) {
......@@ -792,9 +792,9 @@ largest_dfn_pred (ir_node *n)
int i, index = -2, max = -1;
if (!is_outermost_Start(n)) {
int arity = intern_get_irn_arity(n);
int arity = get_irn_arity(n);
for (i = get_start_index(n); i < arity; i++) {
ir_node *pred = intern_get_irn_n(n, i);
ir_node *pred = get_irn_n(n, i);
if (is_backedge (n, i) || !irn_is_in_stack(pred)) continue;
if (get_irn_dfn(pred) > max) {
index = i;
......@@ -841,7 +841,7 @@ find_tail (ir_node *n) {
assert (res_index > -2);
set_backedge (m, res_index);
return is_outermost_Start(n) ? NULL : intern_get_irn_n(m, res_index);
return is_outermost_Start(n) ? NULL : get_irn_n(m, res_index);
}
......@@ -942,7 +942,7 @@ static void scc (ir_node *n) {
so is_backedge does not access array[-1] but correctly returns false! */
if (!is_outermost_Start(n)) {
int arity = intern_get_irn_arity(n);
int arity = get_irn_arity(n);
#if EXPERIMENTAL_LOOP_TREE
......@@ -987,8 +987,8 @@ static void scc (ir_node *n) {
ir_node *m;
if (is_backedge(n, i)) continue;
/* printf("i: %d\n", i); */
m = intern_get_irn_n(n, i); /* get_irn_ip_pred(n, i); */
/* if ((!m) || (intern_get_irn_op(m) == op_Unknown)) continue; */
m = get_irn_n(n, i); /* get_irn_ip_pred(n, i); */
/* if ((!m) || (get_irn_op(m) == op_Unknown)) continue; */
scc (m);
if (irn_is_in_stack(m)) {
/* Uplink of m is smaller if n->m is a backedge.
......
......@@ -61,7 +61,7 @@ static type* compute_irn_type(ir_node *n);
static type *find_type_for_Proj(ir_node *n) {
type *tp;
ir_node *pred = skip_Tuple(get_Proj_pred(n));
ir_mode *m = intern_get_irn_mode(n);
ir_mode *m = get_irn_mode(n);
if (m == mode_T ||
m == mode_BB ||
......@@ -70,16 +70,16 @@ static type *find_type_for_Proj(ir_node *n) {
m == mode_b )
return none_type;
switch(intern_get_irn_opcode(pred)) {
switch (get_irn_opcode(pred)) {
case iro_Proj: {
ir_node *pred_pred;
/* Deal with Start / Call here: we need to know the Proj Nr. */
assert(get_irn_mode(pred) == mode_T);
pred_pred = get_Proj_pred(pred);
if (intern_get_irn_op(pred_pred) == op_Start) {
if (get_irn_op(pred_pred) == op_Start) {
type *mtp = get_entity_type(get_irg_ent(get_Start_irg(pred_pred)));
tp = get_method_param_type(mtp, get_Proj_proj(n));
} else if (intern_get_irn_op(pred_pred) == op_Call) {
} else if (get_irn_op(pred_pred) == op_Call) {
type *mtp = get_Call_type(pred_pred);
tp = get_method_res_type(mtp, get_Proj_proj(n));
} else {
......@@ -136,7 +136,7 @@ static type *find_type_for_node(ir_node *n) {
tp2 = compute_irn_type(b);
}
switch(intern_get_irn_opcode(n)) {
switch(get_irn_opcode(n)) {
case iro_InstOf: {
assert(0 && "op_InstOf not supported");
......@@ -230,9 +230,9 @@ static type *find_type_for_node(ir_node *n) {
} break;
case iro_Load: {
ir_node *a = get_Load_ptr(n);
if (intern_get_irn_op(a) == op_Sel)
if (get_irn_op(a) == op_Sel)
tp = get_entity_type(get_Sel_entity(a));
else if ((intern_get_irn_op(a) == op_Const) &&
else if ((get_irn_op(a) == op_Const) &&
(tarval_is_entity(get_Const_tarval(a))))
tp = get_entity_type(tarval_to_entity(get_Const_tarval(a)));
else if (is_pointer_type(compute_irn_type(a))) {
......@@ -256,28 +256,28 @@ static type *find_type_for_node(ir_node *n) {
/* catch special cases with fallthrough to binop/unop cases in default. */
case iro_Sub: {
if (mode_is_int(intern_get_irn_mode(n)) &&
mode_is_reference(intern_get_irn_mode(a)) &&
mode_is_reference(intern_get_irn_mode(b)) ) {
if (mode_is_int(get_irn_mode(n)) &&
mode_is_reference(get_irn_mode(a)) &&
mode_is_reference(get_irn_mode(b)) ) {
VERBOSE_UNKNOWN_TYPE(("Sub %ld ptr - ptr = int: unknown type\n", get_irn_node_nr(n)));
tp = unknown_type; break;
}
} /* fall through to Add. */
case iro_Add: {
if (mode_is_reference(intern_get_irn_mode(n)) &&
mode_is_reference(intern_get_irn_mode(a)) &&
mode_is_int(intern_get_irn_mode(b)) ) {
if (mode_is_reference(get_irn_mode(n)) &&
mode_is_reference(get_irn_mode(a)) &&
mode_is_int(get_irn_mode(b)) ) {
tp = tp1; break;
}