Commit f5d8d4c4 authored by Florian Liekweg's avatar Florian Liekweg
Browse files

fixed bug in equivalent_node (store/store elimination) -- goetz, flo

[r311]
parent 7aa27d67
......@@ -186,7 +186,7 @@ void compute_outs(ir_graph *irg) {
void free_outs(ir_graph *irg) {
/* Update graph state */
assert(get_irg_phase_state(current_ir_graph) != phase_building);
// assert(get_irg_phase_state(current_ir_graph) != phase_building);
current_ir_graph->outs_state = no_outs;
if (irg->outs) free(irg->outs);
......
......@@ -18,6 +18,7 @@
operation is executed. Else it uses the values valid at the end of the
block with the fragile operation. */
#define PRECISE_EXC_CONTEXT 1
// #define PRECISE_EXC_CONTEXT 0
/* There are two implementations of the Phi node construction. The first
is faster, but does not work for blocks with more than 2 predecessors.
......
......@@ -471,6 +471,9 @@ new_r_Raise (ir_graph *irg, ir_node *block, ir_node *store, ir_node *obj)
ir_node *res;
res = new_ir_node (irg, block, op_Raise, mode_T, 2, in);
// DEBUG
fprintf (stdout, "%s: res = %p\n", __PRETTY_FUNCTION__, res);
res = optimize (res);
irn_vrfy (res);
return res;
......@@ -1355,6 +1358,7 @@ mature_block (ir_node *block)
ir_node *next;
assert (get_irn_opcode(block) == iro_Block);
// assert (!get_Block_matured(block) && "Block already matured");
if (!get_Block_matured(block)) {
......
......@@ -66,7 +66,7 @@ local_optimize_graph (ir_graph *irg) {
current_ir_graph = irg;
/* Handle graph state */
assert(get_irg_phase_state(irg) != phase_building);
// assert(get_irg_phase_state(irg) != phase_building);
if (get_opt_global_cse())
set_irg_pinned(current_ir_graph, floats);
if (get_irg_outs_state(current_ir_graph) == outs_consistent)
......@@ -346,7 +346,7 @@ dead_node_elimination(ir_graph *irg) {
current_ir_graph = irg;
/* Handle graph state */
assert(get_irg_phase_state(current_ir_graph) != phase_building);
// assert(get_irg_phase_state(current_ir_graph) != phase_building);
free_outs(current_ir_graph);
if (get_optimize() && get_opt_dead_node_elimination()) {
......@@ -413,7 +413,7 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
if (!get_opt_inline()) return;
/* Handle graph state */
assert(get_irg_phase_state(current_ir_graph) != phase_building);
// assert(get_irg_phase_state(current_ir_graph) != phase_building);
if (get_irg_outs_state(current_ir_graph) == outs_consistent)
set_irg_outs_inconsistent(current_ir_graph);
......@@ -424,9 +424,9 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
if (called_graph == current_ir_graph) return;
/** Part the Call node into two nodes. Pre_call collects the parameters of
the procedure and later replaces the Start node of the called graph.
Post_call is the old Call node and collects the results of the called
graph. Both will end up being a tuple. **/
the procedure and later replaces the Start node of the called graph.
Post_call is the old Call node and collects the results of the called
graph. Both will end up being a tuple. **/
post_bl = get_nodes_Block(call);
set_irg_current_block(current_ir_graph, post_bl);
/* XxMxPxP of Start + parameter of Call */
......@@ -435,7 +435,7 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
in[2] = get_irg_frame(current_ir_graph);
in[3] = get_irg_globals(current_ir_graph);
in[4] = new_Tuple (get_Call_n_params(call),
get_Call_param_arr(call));
get_Call_param_arr(call));
pre_call = new_Tuple(5, in);
post_call = call;
......@@ -452,23 +452,23 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
if (get_irg_block_visited(current_ir_graph)< get_irg_block_visited(called_graph))
set_irg_block_visited(current_ir_graph, get_irg_block_visited(called_graph));
/* Set pre_call as new Start node in link field of the start node of
calling graph and pre_calls block as new block for the start block
of calling graph.
Further mark these nodes so that they are not visited by the
copying. */
calling graph and pre_calls block as new block for the start block
of calling graph.
Further mark these nodes so that they are not visited by the
copying. */
set_irn_link(get_irg_start(called_graph), pre_call);
set_irn_visited(get_irg_start(called_graph),
get_irg_visited(current_ir_graph));/***/
get_irg_visited(current_ir_graph));/***/
set_irn_link(get_irg_start_block(called_graph),
get_nodes_Block(pre_call));
get_nodes_Block(pre_call));
set_irn_visited(get_irg_start_block(called_graph),
get_irg_visited(current_ir_graph)); /***/
get_irg_visited(current_ir_graph)); /***/
/* Initialize for compaction of in arrays */
inc_irg_block_visited(current_ir_graph);
/*
set_Block_block_visited(get_irg_start_block(called_graph),
get_irg_block_visited(current_ir_graph) +1 +1); /* count for self edge */
set_Block_block_visited(get_irg_start_block(called_graph),
get_irg_block_visited(current_ir_graph) +1 +1); /* count for self edge */
/*** Replicate local entities of the called_graph ***/
/* copy the entities. */
......@@ -490,7 +490,7 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
entities. */
/* @@@ endless loops are not copied!! */
irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
get_irg_frame_type(called_graph));
get_irg_frame_type(called_graph));
/* Repair called_graph */
set_irg_visited(called_graph, get_irg_visited(current_ir_graph));
......@@ -503,7 +503,7 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
-1: Block of Tuple.
0: Phi of all Memories of Return statements.
1: Jmp from new Block that merges the control flow from all exception
predecessors of the old end block.
predecessors of the old end block.
2: Tuple of all arguments.
3: Phi of Exception memories.
*/
......@@ -559,11 +559,11 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
for (j = 0; j < n_res; j++) {
n_ret = 0;
for (i = 0; i < arity; i++) {
ret = get_irn_n(end_bl, i);
if (get_irn_op(ret) == op_Return) {
cf_pred[n_ret] = get_Return_res(ret, j);
n_ret++;
}
ret = get_irn_n(end_bl, i);
if (get_irn_op(ret) == op_Return) {
cf_pred[n_ret] = get_Return_res(ret, j);
n_ret++;
}
}
phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
res_pred[j] = phi;
......@@ -597,15 +597,15 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
ir_node *ret;
ret = skip_Proj(get_irn_n(end_bl, i));
if (get_irn_op(ret) == op_Call) {
cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
n_exc++;
cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
n_exc++;
} else if (is_fragile_op(ret)) {
/* We rely that all cfops have the memory output at the same position. */
cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
n_exc++;
/* We rely that all cfops have the memory output at the same position. */
cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
n_exc++;
} else if (get_irn_op(ret) == op_Raise) {
cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
n_exc++;
cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
n_exc++;
}
}
set_Tuple_pred(call, 3, new_Phi(n_exc, cf_pred, mode_M));
......@@ -629,9 +629,9 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
if (get_irn_op(cf_op) == op_Proj) {
cf_op = get_Proj_pred(cf_op);
if (get_irn_op(cf_op) == op_Tuple) {
cf_op = get_Tuple_pred(cf_op, 1);
assert(get_irn_op(cf_op) == op_Jmp);
break;
cf_op = get_Tuple_pred(cf_op, 1);
assert(get_irn_op(cf_op) == op_Jmp);
break;
}
}
}
......
......@@ -84,6 +84,10 @@ new_ir_node (ir_graph *irg, ir_node *block, ir_op *op, ir_mode *mode,
res = (ir_node *) obstack_alloc (irg->obst, node_size);
// DEBUG
if (op_Raise == op)
fprintf (stdout, "%s: res(%p) = %p\n", __PRETTY_FUNCTION__, op, res);
res->kind = k_ir_node;
res->op = op;
res->mode = mode;
......
......@@ -258,7 +258,7 @@ different_identity (ir_node *a, ir_node *b)
ir_node *a1 = get_Proj_pred (a);
ir_node *b1 = get_Proj_pred (b);
if (a1 != b1 && get_irn_op (a1) == op_Alloc
&& get_irn_op (b1) == op_Alloc)
&& get_irn_op (b1) == op_Alloc)
return 1;
}
return 0;
......@@ -293,30 +293,30 @@ equivalent_node (ir_node *n)
case iro_Block:
{
/* The Block constructor does not call optimize, but mature_block
calls the optimization. */
calls the optimization. */
assert(get_Block_matured(n));
/* A single entry Block following a single exit Block can be merged,
if it is not the Start block. */
/* !!! Beware, all Phi-nodes of n must have been optimized away.
This should be true, as the block is matured before optimize is called.
This should be true, as the block is matured before optimize is called.
But what about Phi-cycles with the Phi0/Id that could not be resolved?
Remaining Phi nodes are just Ids. */
Remaining Phi nodes are just Ids. */
if (get_Block_n_cfgpreds(n) == 1
&& get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp) {
n = get_nodes_Block(get_Block_cfgpred(n, 0));
&& get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp) {
n = get_nodes_Block(get_Block_cfgpred(n, 0));
} else if ((n != current_ir_graph->start_block) &&
(n != current_ir_graph->end_block) ) {
int i;
/* If all inputs are dead, this block is dead too, except if it is
(n != current_ir_graph->end_block) ) {
int i;
/* If all inputs are dead, this block is dead too, except if it is
the start or end block. This is a step of unreachable code
elimination */
for (i = 0; i < get_Block_n_cfgpreds(n); i++) {
if (!is_Bad(get_Block_cfgpred(n, i))) break;
}
if (i == get_Block_n_cfgpreds(n))
n = new_Bad();
elimination */
for (i = 0; i < get_Block_n_cfgpreds(n); i++) {
if (!is_Bad(get_Block_cfgpred(n, i))) break;
}
if (i == get_Block_n_cfgpreds(n))
n = new_Bad();
}
}
break;
......@@ -325,27 +325,27 @@ equivalent_node (ir_node *n)
/* unreachable code elimination */
if (is_Bad(get_nodes_Block(n))) n = new_Bad();
break;
/* We do not evaluate Cond here as we replace it by a new node, a Jmp.
See cases for iro_Cond and iro_Proj in transform_node. */
/** remove stuff as x+0, x*1 x&true ... constant expression evaluation **/
/* We do not evaluate Cond here as we replace it by a new node, a Jmp.
See cases for iro_Cond and iro_Proj in transform_node. */
/** remove stuff as x+0, x*1 x&true ... constant expression evaluation **/
case iro_Or: if (a == b) {n = a; break;}
case iro_Add:
case iro_Eor:
{ tarval *tv;
ir_node *on;
/* After running compute_node there is only one constant predecessor.
Find this predecessors value and remember the other node: */
if ((tv = computed_value (a))) {
on = b;
} else if ((tv = computed_value (b))) {
on = a;
} else break;
/* If this predecessors constant value is zero, the operation is
unnecessary. Remove it: */
if (tarval_classify (tv) == 0) {
n = on;
}
ir_node *on;
/* After running compute_node there is only one constant predecessor.
Find this predecessors value and remember the other node: */
if ((tv = computed_value (a))) {
on = b;
} else if ((tv = computed_value (b))) {
on = a;
} else break;
/* If this predecessors constant value is zero, the operation is
unnecessary. Remove it: */
if (tarval_classify (tv) == 0) {
n = on;
}
}
break;
case iro_Sub:
......@@ -362,7 +362,7 @@ equivalent_node (ir_node *n)
break;
case iro_Not: /* NotNot x == x */
case iro_Minus: /* --x == x */ /* ??? Is this possible or can --x raise an
out of bounds exception if min =! max? */
out of bounds exception if min =! max? */
if (get_irn_op(get_unop_op(n)) == get_irn_op(n))
n = get_unop_op(get_unop_op(n));
break;
......@@ -387,7 +387,7 @@ equivalent_node (ir_node *n)
break;
/* GL: Why are they skipped? DivMod allocates new nodes --> it's
teated in transform node.
case iro_Mod, Quot, DivMod
case iro_Mod, Quot, DivMod
*/
case iro_And:
if (a == b) n = a;
......@@ -403,8 +403,8 @@ equivalent_node (ir_node *n)
n = a;
} else if (get_irn_mode(n) == mode_b) {
if (get_irn_op(a) == op_Conv &&
get_irn_mode (get_Conv_op(a)) == mode_b) {
n = get_Conv_op(a); /* Convb(Conv*(xxxb(...))) == xxxb(...) */
get_irn_mode (get_Conv_op(a)) == mode_b) {
n = get_Conv_op(a); /* Convb(Conv*(xxxb(...))) == xxxb(...) */
}
}
break;
......@@ -415,7 +415,7 @@ equivalent_node (ir_node *n)
- no Phi in start block.
- remove Id operators that are inputs to Phi
- fold Phi-nodes, iff they have only one predecessor except
themselves.
themselves.
*/
int i, n_preds;
......@@ -431,13 +431,13 @@ equivalent_node (ir_node *n)
/* there should be no Phi nodes in the Start region. */
if (block == current_ir_graph->start_block) {
n = new_Bad();
break;
n = new_Bad();
break;
}
if (n_preds == 0) { /* Phi of dead Region without predecessors. */
/* GL: why not return new_Bad? */
break;
break;
}
#if 0
......@@ -446,16 +446,16 @@ equivalent_node (ir_node *n)
value that is known at a certain point. This is useful for
dataflow analysis. */
if (n_preds == 2) {
ir_node *a = follow_Id (get_Phi_pred(n, 0));
ir_node *b = follow_Id (get_Phi_pred(n, 1));
if ( (get_irn_op(a) == op_Confirm)
&& (get_irn_op(b) == op_Confirm)
&& (follow_Id (get_irn_n(a, 0)) == follow_Id(get_irn_n(b, 0)))
&& (get_irn_n(a, 1) == get_irn_n (b, 1))
&& (a->data.num == (~b->data.num & irpn_True) )) {
n = follow_Id (get_irn_n(a, 0));
break;
}
ir_node *a = follow_Id (get_Phi_pred(n, 0));
ir_node *b = follow_Id (get_Phi_pred(n, 1));
if ( (get_irn_op(a) == op_Confirm)
&& (get_irn_op(b) == op_Confirm)
&& (follow_Id (get_irn_n(a, 0)) == follow_Id(get_irn_n(b, 0)))
&& (get_irn_n(a, 1) == get_irn_n (b, 1))
&& (a->data.num == (~b->data.num & irpn_True) )) {
n = follow_Id (get_irn_n(a, 0));
break;
}
}
#endif
......@@ -464,11 +464,11 @@ equivalent_node (ir_node *n)
first_val = follow_Id(get_Phi_pred(n, i));
/* skip Id's */
set_Phi_pred(n, i, first_val);
if ( (first_val != n) /* not self pointer */
&& (get_irn_op(first_val) != op_Bad) /* value not dead */
&& !(is_Bad (get_Block_cfgpred(block, i))) ) { /* not dead control flow */
break; /* then found first value. */
}
if ( (first_val != n) /* not self pointer */
&& (get_irn_op(first_val) != op_Bad) /* value not dead */
&& !(is_Bad (get_Block_cfgpred(block, i))) ) { /* not dead control flow */
break; /* then found first value. */
}
}
/* A totally Bad or self-referencing Phi (we didn't break the above loop) */
......@@ -477,27 +477,27 @@ equivalent_node (ir_node *n)
scnd_val = NULL;
/* follow_Id () for rest of inputs, determine if any of these
are non-self-referencing */
are non-self-referencing */
while (++i < n_preds) {
scnd_val = follow_Id(get_Phi_pred(n, i));
/* skip Id's */
set_Phi_pred(n, i, scnd_val);
if ( (scnd_val != n)
&& (scnd_val != first_val)
&& (get_irn_op(scnd_val) != op_Bad)
&& !(is_Bad (get_Block_cfgpred(block, i))) ) {
&& (scnd_val != first_val)
&& (get_irn_op(scnd_val) != op_Bad)
&& !(is_Bad (get_Block_cfgpred(block, i))) ) {
break;
}
}
}
/* Fold, if no multiple distinct non-self-referencing inputs */
if (i >= n_preds) {
n = first_val;
n = first_val;
} else {
/* skip the remaining Ids. */
while (++i < n_preds) {
set_Phi_pred(n, i, follow_Id(get_Phi_pred(n, i)));
}
/* skip the remaining Ids. */
while (++i < n_preds) {
set_Phi_pred(n, i, follow_Id(get_Phi_pred(n, i)));
}
}
}
break;
......@@ -505,21 +505,21 @@ equivalent_node (ir_node *n)
case iro_Load:
{
#if 0 /* Is an illegal transformation: different nodes can
represent the same pointer value!! */
a = skip_Proj(get_Load_mem(n));
b = get_Load_ptr(n);
if (get_irn_op(a) == op_Store) {
if ( different_identity (b, get_Store_ptr(a))) {
/* load and store use different pointers, therefore load
needs not take store's memory but the state before. */
set_Load_mem (n, get_Store_mem(a));
} else if (( 0 /* ???didn't get cryptic test that returns 0 */ )) {
}
}
represent the same pointer value!! */
a = skip_Proj(get_Load_mem(n));
b = get_Load_ptr(n);
if (get_irn_op(a) == op_Store) {
if ( different_identity (b, get_Store_ptr(a))) {
/* load and store use different pointers, therefore load
needs not take store's memory but the state before. */
set_Load_mem (n, get_Store_mem(a));
} else if (( 0 /* ???didn't get cryptic test that returns 0 */ )) {
}
}
#endif
}
break;
break;
case iro_Store:
/* remove unnecessary store. */
{
......@@ -531,13 +531,14 @@ equivalent_node (ir_node *n)
&& get_Store_ptr(a) == b
&& skip_Proj(get_Store_value(a)) == c) {
/* We have twice exactly the same store -- a write after write. */
n = a;
n = a;
} else if (get_irn_op(c) == op_Load
&& (a == c || skip_Proj(get_Load_mem(c)) == a)
&& (a == c || skip_Proj(get_Load_mem(c)) == a)
&& get_Load_ptr(c) == b )
/* !!!??? and a cryptic test */ {
/* !!!??? and a cryptic test */ {
/* We just loaded the value from the same memory, i.e., the store
doesn't change the memory -- a write after read. */
a = get_Store_mem(n);
turn_into_tuple(n, 2);
set_Tuple_pred(n, 0, a);
set_Tuple_pred(n, 1, new_Bad());
......@@ -551,16 +552,16 @@ equivalent_node (ir_node *n)
if ( get_irn_op(a) == op_Tuple) {
/* Remove the Tuple/Proj combination. */
if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) {
n = get_Tuple_pred(a, get_Proj_proj(n));
} else {
if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) {
n = get_Tuple_pred(a, get_Proj_proj(n));
} else {
assert(0); /* This should not happen! */
n = new_Bad();
}
n = new_Bad();
}
} else if (get_irn_mode(n) == mode_X &&
is_Bad(get_nodes_Block(n))) {
is_Bad(get_nodes_Block(n))) {
/* Remove dead control flow. */
n = new_Bad();
n = new_Bad();
}
}
break;
......@@ -598,7 +599,7 @@ transform_node (ir_node *n)
mode = get_irn_mode(a);
if (!( mode_is_int(get_irn_mode(a))
&& mode_is_int(get_irn_mode(b))))
&& mode_is_int(get_irn_mode(b))))
break;
if (a == b) {
......@@ -610,23 +611,23 @@ transform_node (ir_node *n)
tb = value_of(b);
if (tb) {
if (tarval_classify(tb) == 1) {
b = new_Const (mode, tarval_from_long (mode, 0));
evaluated = 1;
} else if (ta) {
tarval *resa, *resb;
if (tarval_classify(tb) == 1) {
b = new_Const (mode, tarval_from_long (mode, 0));
evaluated = 1;
} else if (ta) {
tarval *resa, *resb;
resa = tarval_div (ta, tb);
if (!resa) break; /* Causes exception!!! Model by replacing through
Jmp for X result!? */
Jmp for X result!? */
resb = tarval_mod (ta, tb);
if (!resb) break; /* Causes exception! */
a = new_Const (mode, resa);
b = new_Const (mode, resb);
evaluated = 1;
}
a = new_Const (mode, resa);
b = new_Const (mode, resb);
evaluated = 1;
}
} else if (tarval_classify (ta) == 0) {
b = a;
evaluated = 1;
evaluated = 1;
}
}
if (evaluated) { /* replace by tuple */
......@@ -650,15 +651,15 @@ transform_node (ir_node *n)
if (ta && (get_irn_mode(a) == mode_b)) {
/* It's a boolean Cond, branching on a boolean constant.
Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
jmp = new_r_Jmp(current_ir_graph, get_nodes_Block(n));
turn_into_tuple(n, 2);
if (tv_val_b(ta) == 1) /* GL: I hope this returns 1 if true */ {
set_Tuple_pred(n, 0, new_Bad());
set_Tuple_pred(n, 1, jmp);
set_Tuple_pred(n, 0, new_Bad());
set_Tuple_pred(n, 1, jmp);
} else {
set_Tuple_pred(n, 0, jmp);
set_Tuple_pred(n, 1, new_Bad());
set_Tuple_pred(n, 0, jmp);
set_Tuple_pred(n, 1, new_Bad());
}
/* We might generate an endless loop, so keep it alive. */
add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
......@@ -671,18 +672,18 @@ transform_node (ir_node *n)
/* We might generate an endless loop, so keep it alive. */
add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
} else if ( (get_irn_op(get_Cond_selector(n)) == op_Eor)
&& (get_irn_mode(get_Cond_selector(n)) == mode_b)
&& (tarval_classify(computed_value(get_Eor_right(a))) == 1)) {
&& (get_irn_mode(get_Cond_selector(n)) == mode_b)
&& (tarval_classify(computed_value(get_Eor_right(a))) == 1)) {
/* The Eor is a negate. Generate a new Cond without the negate,
simulate the negate by exchanging the results. */
set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n),
get_Eor_left(a)));
get_Eor_left(a)));
} else if ( (get_irn_op(get_Cond_selector(n)) == op_Not)
&& (get_irn_mode(get_Cond_selector(n)) == mode_b)) {
&& (get_irn_mode(get_Cond_selector(n)) == mode_b)) {
/* A Not before the Cond. Generate a new Cond without the Not,
simulate the Not by exchanging the results. */
set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n),
get_Not_op(a)));
get_Not_op(a)));
}
}
break;
......@@ -691,25 +692,25 @@ transform_node (ir_node *n)
a = get_Proj_pred(n);
if ( (get_irn_op(a) == op_Cond)
&& get_irn_link(a)
&& get_irn_op(get_irn_link(a)) == op_Cond) {
/* Use the better Cond if the Proj projs from a Cond which get's
its result from an Eor/Not. */
&& get_irn_link(a)
&& get_irn_op(get_irn_link(a)) == op_Cond) {
/* Use the better Cond if the Proj projs from a Cond which get's
its result from an Eor/Not. */
assert ( ( (get_irn_op(get_Cond_selector(a)) == op_Eor)
|| (get_irn_op(get_Cond_selector(a)) == op_Not))
&& (get_irn_mode(get_Cond_selector(a)) == mode_b)
&& (get_irn_op(get_irn_link(a)) == op_Cond)
&& (get_Cond_selector(get_irn_link(a)) ==
get_Eor_left(get_Cond_selector(a))));
|| (get_irn_op(get_Cond_selector(a)) == op_Not))
&& (get_irn_mode(get_Cond_selector(a)) == mode_b)
&& (get_irn_op(get_irn_link(a)) == op_Cond)
&& (get_Cond_selector(get_irn_link(a)) ==
get_Eor_left(get_Cond_selector(a))));
set_Proj_pred(n, get_irn_link(a));
if (get_Proj_proj(n) == 0)
set_Proj_proj(n, 1);
else
set_Proj_proj(n, 0);
} else if ( (get_irn_op(a) == op_Cond)
&& (get_irn_mode(get_Cond_selector(a)) == mode_I)
&& value_of(a)
&& (get_Cond_kind(a) == dense)) {
&& (get_irn_mode(get_Cond_selector(a)) == mode_I)
&& value_of(a)
&& (get_Cond_kind(a) == dense)) {