Commit 4b1138a9 authored by Christoph Mallon's avatar Christoph Mallon
Browse files

s/get_irn_op(x) {==,!=} op_FOO/{,!}is_FOO(x)/.

[r21832]
parent c0bef4f5
......@@ -728,7 +728,7 @@ static int is_ip_head(ir_graph *n, ir_graph *pred)
for (i = 0; i < arity; i++) {
ir_node *pred_cfop = skip_Proj(get_Block_cfgpred(sblock, i));
//printf(" "); DDMN(pred_cfop);
if (get_irn_op(pred_cfop) == op_CallBegin) { /* could be Unknown */
if (is_CallBegin(pred_cfop)) { /* could be Unknown */
ir_graph *ip_pred = get_irn_irg(pred_cfop);
//printf(" "); DDMG(ip_pred);
if ((ip_pred == pred) && is_backedge(sblock, i)) {
......
......@@ -348,7 +348,7 @@ static void free_mark_proj(ir_node * node, long n, eset * set) {
* op_Tuple oder ein Knoten, der in "free_ana_walker" behandelt
* wird. */
ir_node * pred = get_Proj_pred(node);
if (get_irn_link(pred) != MARK && get_irn_op(pred) == op_Tuple) {
if (get_irn_link(pred) != MARK && is_Tuple(pred)) {
free_mark_proj(get_Tuple_pred(pred, get_Proj_proj(node)), n, set);
} else {
/* nothing: da in "free_ana_walker" behandelt. */
......
......@@ -192,12 +192,12 @@ static void my_irg_walk_current_graph(irg_walk_func *pre, irg_walk_func *post, v
static void walk_pre(ir_node *n, void *env)
{
(void) env;
if (get_irn_op(n) == op_Raise)
if (is_Raise(n))
just_passed_a_Raise = 1;
if ( (get_irn_op(n) == op_Proj)
&& (get_irn_op(get_Proj_pred(n)) == op_Cond)
&& (just_passed_a_Raise)) {
if (get_irn_op(n) == op_Proj &&
is_Cond(get_Proj_pred(n)) &&
just_passed_a_Raise) {
ir_node *other_proj;
ir_node *c = get_Proj_pred(n);
......@@ -215,7 +215,7 @@ static void walk_pre(ir_node *n, void *env)
}
}
if (get_irn_op(n) == op_Cond) {
if (is_Cond(n)) {
set_irn_link(n, Cond_list);
Cond_list = n;
}
......@@ -224,13 +224,14 @@ static void walk_pre(ir_node *n, void *env)
static void walk_post(ir_node *n, void *env)
{
(void) env;
if (get_irn_op(n) == op_Raise)
if (is_Raise(n))
just_passed_a_Raise = 0;
if ( (get_irn_op(n) == op_Proj)
&& (get_irn_op(get_Proj_pred(n)) == op_Cond)
&& ((get_ProjX_probability(n) == Cond_prob_exception_taken) ||
(get_ProjX_probability(n) == Cond_prob_was_exception_taken) )) {
if (get_irn_op(n) == op_Proj &&
is_Cond(get_Proj_pred(n)) && (
get_ProjX_probability(n) == Cond_prob_exception_taken ||
get_ProjX_probability(n) == Cond_prob_was_exception_taken
)) {
just_passed_a_Raise = 1;
}
}
......@@ -315,7 +316,7 @@ static INLINE double get_weighted_region_exec_freq(void *reg, int pos) {
ir_node *cfop;
if (is_ir_node(reg)) {
cfop = get_Block_cfgpred((ir_node *)reg, pos);
if (is_Proj(cfop) && (get_irn_op(get_Proj_pred(cfop)) != op_Cond))
if (is_Proj(cfop) && !is_Cond(get_Proj_pred(cfop)))
cfop = skip_Proj(cfop);
} else {
assert(is_ir_loop(reg));
......
......@@ -171,8 +171,8 @@ int get_type_estimated_n_fields(ir_type *tp) {
case tpo_array: {
long n_elt = DEFAULT_N_ARRAY_ELEMENTS;
assert(get_array_n_dimensions(tp) == 1 && "other not implemented");
if ((get_irn_op(get_array_lower_bound(tp, 0)) == op_Const) &&
(get_irn_op(get_array_upper_bound(tp, 0)) == op_Const) ) {
if (is_Const(get_array_lower_bound(tp, 0)) &&
is_Const(get_array_upper_bound(tp, 0))) {
n_elt = get_array_upper_bound_int(tp, 0) - get_array_upper_bound_int(tp, 0);
}
s = n_elt;
......@@ -214,8 +214,8 @@ int get_type_estimated_size_bytes(ir_type *tp) {
int elt_s = get_type_estimated_size_bytes(get_array_element_type(tp));
long n_elt = DEFAULT_N_ARRAY_ELEMENTS;
assert(get_array_n_dimensions(tp) == 1 && "other not implemented");
if ((get_irn_op(get_array_lower_bound(tp, 0)) == op_Const) &&
(get_irn_op(get_array_upper_bound(tp, 0)) == op_Const) ) {
if (is_Const(get_array_lower_bound(tp, 0)) &&
is_Const(get_array_upper_bound(tp, 0))) {
n_elt = get_array_upper_bound_int(tp, 0) - get_array_lower_bound_int(tp, 0);
}
s = n_elt * elt_s;
......@@ -306,7 +306,7 @@ double get_entity_estimated_n_loads(ir_entity *ent) {
double n_loads = 0;
for (i = 0; i < n_acc; ++i) {
ir_node *acc = get_entity_access(ent, i);
if (get_irn_op(acc) == op_Load) {
if (is_Load(acc)) {
n_loads += get_irn_final_cost(acc);
}
}
......@@ -318,7 +318,7 @@ double get_entity_estimated_n_stores(ir_entity *ent) {
double n_stores = 0;
for (i = 0; i < n_acc; ++i) {
ir_node *acc = get_entity_access(ent, i);
if (get_irn_op(acc) == op_Store)
if (is_Store(acc))
n_stores += get_irn_final_cost(acc);
}
return n_stores;
......@@ -330,8 +330,7 @@ double get_entity_estimated_n_calls(ir_entity *ent) {
double n_calls = 0;
for (i = 0; i < n_acc; ++i) {
ir_node *acc = get_entity_access(ent, i);
if (get_irn_op(acc) == op_Call)
if (is_Call(acc))
n_calls += get_irn_final_cost(acc);
}
return n_calls;
......
......@@ -261,7 +261,7 @@ static void construct_interval_block(ir_node *b, ir_loop *l) {
cfop = get_Block_cfgpred(b, i);
if (is_Proj(cfop)) {
if (get_irn_op(get_Proj_pred(cfop)) != op_Cond) {
if (!is_Cond(get_Proj_pred(cfop))) {
cfop = skip_Proj(cfop);
} else {
assert(get_nodes_block(cfop) == get_nodes_block(skip_Proj(cfop)));
......@@ -270,8 +270,7 @@ static void construct_interval_block(ir_node *b, ir_loop *l) {
pred = skip_Proj(get_nodes_block(cfop));
/* We want nice blocks. */
assert( get_irn_op(pred) != op_Bad
&& get_irn_op(skip_Proj(get_Block_cfgpred(b, i))) != op_Bad);
assert(!is_Bad(pred) && !is_Bad(skip_Proj(get_Block_cfgpred(b, i))));
pred_l = get_irn_loop(pred);
if (pred_l == l) {
add_region_in(b, pred);
......
......@@ -393,11 +393,12 @@ static INLINE int get_start_index(ir_node *n) {
not reachable.
I.e., with this code, the order on the loop tree is correct. But a (single)
test showed the loop tree is deeper. */
if (get_irn_op(n) == op_Phi ||
get_irn_op(n) == op_Block ||
(get_irn_op(n) == op_Filter && get_interprocedural_view()) ||
(get_irg_pinned(get_irn_irg(n)) == op_pin_state_floats &&
get_irn_pinned(n) == op_pin_state_floats))
if (get_irn_op(n) == op_Phi ||
is_Block(n) ||
(is_Filter(n) && get_interprocedural_view()) || (
get_irg_pinned(get_irn_irg(n)) == op_pin_state_floats &&
get_irn_pinned(n) == op_pin_state_floats
))
// Here we could test for backedge at -1 which is illegal
return 0;
else
......@@ -410,7 +411,7 @@ static INLINE int get_start_index(ir_node *n) {
But it guarantees that Blocks are analysed before nodes contained in the
block. If so, we can set the value to undef if the block is not \
executed. */
if (is_cfop(n) || is_fragile_op(n) || get_irn_op(n) == op_Start)
if (is_cfop(n) || is_fragile_op(n) || is_Start(n))
return -1;
else
return 0;
......@@ -833,7 +834,7 @@ static void my_scc(ir_node *n) {
ir_node *m;
if (is_backedge(n, i)) continue;
m = get_irn_n(n, i); /* get_irn_ip_pred(n, i); */
/* if ((!m) || (get_irn_op(m) == op_Unknown)) continue; */
/* if (!m || is_Unknown(m)) continue; */
my_scc(m);
if (irn_is_in_stack(m)) {
/* Uplink of m is smaller if n->m is a backedge.
......
......@@ -117,13 +117,13 @@ static ir_type *find_type_for_Proj(ir_node *n) {
/* Deal with Start / Call here: we need to know the Proj Nr. */
assert(get_irn_mode(pred) == mode_T);
pred_pred = get_Proj_pred(pred);
if (get_irn_op(pred_pred) == op_Start) {
if (is_Start(pred_pred)) {
ir_type *mtp = get_entity_type(get_irg_entity(get_irn_irg(pred_pred)));
tp = get_method_param_type(mtp, get_Proj_proj(n));
} else if (get_irn_op(pred_pred) == op_Call) {
} else if (is_Call(pred_pred)) {
ir_type *mtp = get_Call_type(pred_pred);
tp = get_method_res_type(mtp, get_Proj_proj(n));
} else if (get_irn_op(pred_pred) == op_Tuple) {
} else if (is_Tuple(pred_pred)) {
panic("Encountered nested Tuple");
} else {
DB((dbg, SET_LEVEL_1, "Proj %ld from Proj from ??: unknown type\n", get_irn_node_nr(n)));
......
......@@ -353,7 +353,7 @@ static void force_description (ir_entity *ent, ir_entity *addr)
ir_node *f_addr = get_atomic_ent_value (over);
ir_entity *impl_ent = get_SymConst_entity (f_addr);
assert ((get_irn_op(f_addr) == op_SymConst) && "can't do complex addrs");
assert(is_SymConst(f_addr) && "can't do complex addrs");
if (impl_ent == addr) {
assert (0 && "gibt's denn sowas");
force_description (over, addr);
......
......@@ -548,10 +548,10 @@ static void chain_accesses(ir_node *n, void *env) {
ir_node *addr;
(void) env;
if (get_irn_op(n) == op_Alloc) {
if (is_Alloc(n)) {
add_type_alloc(get_Alloc_type(n), n);
return;
} else if (get_irn_op(n) == op_Cast) {
} else if (is_Cast(n)) {
add_type_cast(get_Cast_type(n), n);
return;
} else if (is_Sel(n)) {
......@@ -562,7 +562,7 @@ static void chain_accesses(ir_node *n, void *env) {
return;
} else if (is_memop(n)) {
addr = get_memop_ptr(n);
} else if (get_irn_op(n) == op_Call) {
} else if (is_Call(n)) {
addr = get_Call_ptr(n);
if (! is_Sel(addr)) return; /* Sels before Calls mean a Load / polymorphic Call. */
} else {
......
......@@ -240,7 +240,7 @@ ir_node *arch_transform_node_Mux(ir_node *n)
return n;
cmp = get_Proj_pred(proj);
if (get_irn_op(cmp) == op_Cmp) {
if (is_Cmp(cmp)) {
ir_node *a = get_Cmp_left(cmp);
ir_node *b = get_Cmp_right(cmp);
ir_node *t = get_Mux_true(n);
......
......@@ -402,8 +402,8 @@ void TEMPLATE_transform_node(ir_node *node, void *env) {
default:
if (get_irn_op(node) == get_op_Max() ||
get_irn_op(node) == get_op_Min() ||
get_irn_op(node) == get_op_Mulh())
get_irn_op(node) == get_op_Min() ||
is_Mulh(node))
{
/* TODO: implement */
/* ignore for now */
......
......@@ -1493,7 +1493,7 @@ static ir_node *gen_Proj(ir_node *node) {
return gen_Proj_be_AddSP(node);
} else if (is_Cmp(pred)) {
return gen_Proj_Cmp(node);
} else if (get_irn_op(pred) == op_Start) {
} else if (is_Start(pred)) {
if (proj == pn_Start_X_initial_exec) {
ir_node *block = get_nodes_block(pred);
ir_node *jump;
......
......@@ -383,8 +383,7 @@ static ir_node *convert_dbl_to_int(ir_node *bl, ir_node *arg, ir_node *mem,
v = (v << 8) | get_tarval_sub_bits(tv, 1);
v = (v << 8) | get_tarval_sub_bits(tv, 0);
*resL = new_Const_long(mode_Is, v);
}
else if (get_irn_op(skip_Proj(arg)) == op_Load) {
} else if (is_Load(skip_Proj(arg))) {
/* FIXME: handling of low/high depends on LE/BE here */
assert(0);
}
......@@ -423,8 +422,7 @@ static ir_node *convert_sng_to_int(ir_node *bl, ir_node *arg)
v = (v << 8) | get_tarval_sub_bits(tv, 1);
v = (v << 8) | get_tarval_sub_bits(tv, 0);
return new_Const_long(mode_Is, v);
}
else if (get_irn_op(skip_Proj(arg)) == op_Load) {
} else if (is_Load(skip_Proj(arg))) {
ir_node *load;
load = skip_Proj(arg);
......
......@@ -229,7 +229,7 @@ static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn
/* if irn is an End we have keep-alives and op might be a block, skip that */
if (is_Block(op)) {
assert(get_irn_op(irn) == op_End);
assert(is_End(irn));
continue;
}
......
......@@ -482,7 +482,7 @@ is_rematerializable(const spill_ilp_t * si, const ir_node * irn)
for (n = get_irn_arity(irn)-1; n>=0 && remat; --n) {
ir_node *op = get_irn_n(irn, n);
remat &= has_reg_class(si, op) || arch_irn_get_flags(arch_env, op) & arch_irn_flags_ignore || (get_irn_op(op) == op_NoMem);
remat &= has_reg_class(si, op) || arch_irn_get_flags(arch_env, op) & arch_irn_flags_ignore || is_NoMem(op);
// if(!remat)
// ir_fprintf(stderr, " Argument %d (%+F) of Node %+F has wrong regclass\n", i, op, irn);
......
......@@ -398,7 +398,7 @@ typedef struct _wenv_t {
static void collect_copyb_nodes(ir_node *node, void *env) {
wenv_t *wenv = env;
if (get_irn_op(node) == op_CopyB) {
if (is_CopyB(node)) {
set_irn_link(node, wenv->list);
wenv->list = node;
}
......
......@@ -494,7 +494,7 @@ static void emit_be_Perm(const ir_node *irn) {
static void emit_Proj(const ir_node *irn) {
ir_node *pred = get_Proj_pred(irn);
if (get_irn_op(pred) == op_Start) {
if (is_Start(pred)) {
if (get_Proj_proj(irn) == pn_Start_X_initial_exec) {
emit_Jmp(irn);
}
......
......@@ -96,7 +96,7 @@ static void caller_init(int arr_length, ir_entity ** free_methods) {
ir_node * call;
/* We collected all call nodes in a link list at the end node. */
for (call = get_irn_link(get_irg_end(irg)); call; call = get_irn_link(call)) {
if (get_irn_op(call) != op_Call) continue;
if (!is_Call(call)) continue;
for (j = get_Call_n_callees(call) - 1; j >= 0; --j) {
ir_entity * ent = get_Call_callee(call, j);
if (get_entity_irg(ent)) {
......@@ -126,7 +126,7 @@ static INLINE ir_node * tail(ir_node * node) {
* (auch bei Proj->Call Operationen) und Phi-Operationen in die Liste ihres
* Grundblocks einfgen. */
static void collect_phicallproj_walker(ir_node * node, ir_node ** call_tail) {
if (get_irn_op(node) == op_Call) {
if (is_Call(node)) {
/* Die Liste von Call an call_tail anhngen. */
ir_node * link;
assert(get_irn_link(*call_tail) == NULL);
......@@ -261,9 +261,9 @@ static void prepare_irg(ir_graph * irg, irg_data_t * data) {
* dass oben fr "verschiedene" Proj-Operationen wegen CSE nur eine
* Filter-Operation erzeugt worden sein kann. */
for (link = get_irg_start(irg), proj = get_irn_link(link); proj; proj = get_irn_link(proj)) {
if (get_irn_op(proj) == op_Id) { /* replaced with filter */
if (is_Id(proj)) { /* replaced with filter */
ir_node * filter = get_Id_pred(proj);
assert(get_irn_op(filter) == op_Filter);
assert(is_Filter(filter));
if (filter != link && get_irn_link(filter) == NULL) {
set_irn_link(link, filter);
link = filter;
......@@ -279,7 +279,7 @@ static void prepare_irg(ir_graph * irg, irg_data_t * data) {
if (data->open) {
set_Block_cg_cfgpred(start_block, 0, get_cg_Unknown(mode_X));
for (proj = get_irn_link(get_irg_start(irg)); proj; proj = get_irn_link(proj)) {
if (get_irn_op(proj) == op_Filter) {
if (is_Filter(proj)) {
set_Filter_cg_pred(proj, 0, get_cg_Unknown(get_irn_mode(proj)));
}
}
......@@ -457,7 +457,7 @@ static void move_nodes(ir_node * from_block, ir_node * to_block, ir_node * node)
/* Move projs of this node. */
proj = get_irn_link(node);
for (; proj; proj = skip_Id(get_irn_link(proj))) {
if (get_irn_op(proj) != op_Proj && get_irn_op(proj) != op_Filter) continue;
if (get_irn_op(proj) != op_Proj && !is_Filter(proj)) continue;
if ((get_nodes_block(proj) == from_block) && (skip_Proj(get_irn_n(proj, 0)) == node))
set_nodes_block(proj, to_block);
}
......@@ -482,7 +482,7 @@ static void construct_start(ir_entity * caller, ir_entity * callee,
set_Block_cg_cfgpred(get_nodes_block(start), data->count, exec);
for (filter = get_irn_link(start); filter; filter = get_irn_link(filter)) {
if (get_irn_op(filter) != op_Filter) continue;
if (!is_Filter(filter)) continue;
if (get_Proj_pred(filter) == start) {
switch ((int) get_Proj_proj(filter)) {
case pn_Start_M:
......@@ -573,7 +573,7 @@ static ir_node * get_except(ir_node * call) {
* Aufrufstelle nur ein einziges Mal aufgerufen. */
ir_node * proj;
for (proj = get_irn_link(call); proj && get_irn_op(proj) == op_Proj; proj = get_irn_link(proj)) {
if (get_Proj_proj(proj) == 1 && get_irn_op(get_Proj_pred(proj)) == op_Call) {
if (get_Proj_proj(proj) == 1 && is_Call(get_Proj_pred(proj))) {
return proj;
}
}
......@@ -857,7 +857,7 @@ void cg_construct(int arr_len, ir_entity ** free_methods_arr) {
current_ir_graph = get_irp_irg(i);
for (node = get_irn_link(get_irg_end(current_ir_graph)); node; node = get_irn_link(node)) {
if (get_irn_op(node) == op_Call) {
if (is_Call(node)) {
int j, n_callees = get_Call_n_callees(node);
for (j = 0; j < n_callees; ++j)
if (get_entity_irg(get_Call_callee(node, j)))
......@@ -881,7 +881,7 @@ void cg_construct(int arr_len, ir_entity ** free_methods_arr) {
static void destruct_walker(ir_node * node, void * env)
{
(void) env;
if (get_irn_op(node) == op_Block) {
if (is_Block(node)) {
remove_Block_cg_cfgpred_arr(node);
/* Do not turn Break into Jmp. Better: merge blocks right away.
Well, but there are Breaks left.
......@@ -891,13 +891,13 @@ static void destruct_walker(ir_node * node, void * env)
if (get_irn_op(pred) == op_Break)
exchange(node, get_nodes_block(pred));
}
} else if (get_irn_op(node) == op_Filter) {
} else if (is_Filter(node)) {
set_irg_current_block(current_ir_graph, get_nodes_block(node));
exchange(node, new_Proj(get_Filter_pred(node), get_irn_mode(node), get_Filter_proj(node)));
} else if (get_irn_op(node) == op_Break) {
set_irg_current_block(current_ir_graph, get_nodes_block(node));
exchange(node, new_Jmp());
} else if (get_irn_op(node) == op_Call) {
} else if (is_Call(node)) {
remove_Call_callee_arr(node);
} else if (get_irn_op(node) == op_Proj) {
/* some ProjX end up in strange blocks. */
......
......@@ -254,7 +254,7 @@ new_bd_Phi(dbg_info *db, ir_node *block, int arity, ir_node **in, ir_mode *mode)
res->attr.phi.u.backedge = new_backedge_arr(irg->obst, arity);
for (i = arity - 1; i >= 0; --i)
if (get_irn_op(in[i]) == op_Unknown) {
if (is_Unknown(in[i])) {
has_unknown = 1;
break;
}
......@@ -1744,7 +1744,7 @@ new_d_Block(dbg_info *db, int arity, ir_node **in) {
}
for (i = arity-1; i >= 0; i--)
if (get_irn_op(in[i]) == op_Unknown) {
if (is_Unknown(in[i])) {
has_unknown = 1;
break;
}
......@@ -1897,11 +1897,11 @@ static INLINE ir_node **new_frag_arr(ir_node *n) {
finished yet. */
opt = get_opt_optimize(); set_optimize(0);
/* Here we rely on the fact that all frag ops have Memory as first result! */
if (get_irn_op(n) == op_Call)
if (is_Call(n)) {
arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
else if (get_irn_op(n) == op_CopyB)
} else if (is_CopyB(n)) {
arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
else {
} else {
assert((pn_Quot_M == pn_DivMod_M) &&
(pn_Quot_M == pn_Div_M) &&
(pn_Quot_M == pn_Mod_M) &&
......
......@@ -801,7 +801,7 @@ int dump_node_opcode(FILE *F, ir_node *n)
case iro_CallBegin: {
ir_node *addr = get_CallBegin_ptr(n);
ir_entity *ent = NULL;
if (get_irn_op(addr) == op_Sel)
if (is_Sel(addr))
ent = get_Sel_entity(addr);
else if (is_Global(addr))
ent = get_Global_entity(addr);
......@@ -2728,7 +2728,7 @@ dump_block_to_cfg(ir_node *block, void *env) {
fprintf(F, "}\n");
/* Dump the edges */
for ( i = 0; i < get_Block_n_cfgpreds(block); i++)
if (get_irn_op(skip_Proj(get_Block_cfgpred(block, i))) != op_Bad) {
if (!is_Bad(skip_Proj(get_Block_cfgpred(block, i)))) {
pred = get_nodes_block(skip_Proj(get_Block_cfgpred(block, i)));
fprintf(F, "edge: { sourcename: \"");
PRINT_NODEID(block);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment