Commit a00e3544 authored by Matthias Braun's avatar Matthias Braun
Browse files

lower_mode_b: refactoring, make it more robust

- Make API private as it should only be called by backends
- Let ia32 backend create special ia32_Set nodes instead of relying
  on muxes which must not be touched anymore
- Does not create ConvB nodes anymore but instead produces the Cmp
  directly. (All backends did this anyway during code-selection so I was
  able to leave this case out code-selection)
- First collect nodes to lower, then lower them. This avoids robustness
  problems when transforming the graph while at the same time walking
  it.
parent 799d89c9
......@@ -478,34 +478,33 @@ FIRM_API ir_resources_t ir_resources_reserved(const ir_graph *irg);
* Graph State
*/
typedef enum {
IR_GRAPH_STATE_KEEP_MUX = 1U << 0, /**< should perform no further optimisations on Mux nodes */
IR_GRAPH_STATE_ARCH_DEP = 1U << 1, /**< should not construct more nodes which irarch potentially breaks down */
IR_GRAPH_STATE_BCONV_ALLOWED = 1U << 2, /**< Conv(mode_b) to Iu is allowed as set command */
IR_GRAPH_STATE_ARCH_DEP = 1U << 0, /**< should not construct more nodes which irarch potentially breaks down */
IR_GRAPH_STATE_MODEB_LOWERED = 1U << 1, /**< the only node which may produce mode_b is Cmp */
/**
* There are normalisations where there is no "best" representative.
* In this case we first normalise into 1 direction (!NORMALISATION2) and
* later in the other (NORMALISATION2).
*/
IR_GRAPH_STATE_NORMALISATION2 = 1U << 4,
IR_GRAPH_STATE_NORMALISATION2 = 1U << 2,
/**
* Define the semantic of Load(Sel(x)), if x has a bit offset (Bitfields!).
* Normally, the frontend is responsible for bitfield masking operations.
* Set IMPLICIT_BITFIELD_MASKING, if the lowering phase must insert masking operations.
*/
IR_GRAPH_STATE_IMPLICIT_BITFIELD_MASKING = 1U << 5,
IR_GRAPH_STATE_NO_CRITICAL_EDGES = 1U << 6,
IR_GRAPH_STATE_NO_BAD_BLOCKS = 1U << 7,
IR_GRAPH_STATE_NO_UNREACHABLE_BLOCKS = 1U << 8,
IR_GRAPH_STATE_ONE_RETURN = 1U << 9,
IR_GRAPH_STATE_CONSISTENT_DOMINANCE = 1U << 10,
IR_GRAPH_STATE_CONSISTENT_POSTDOMINANCE = 1U << 11,
IR_GRAPH_STATE_CONSISTENT_OUT_EDGES = 1U << 12,
IR_GRAPH_STATE_CONSISTENT_OUTS = 1U << 13,
IR_GRAPH_STATE_CONSISTENT_LOOPINFO = 1U << 14,
IR_GRAPH_STATE_CONSISTENT_ENTITY_USAGE = 1U << 15,
IR_GRAPH_STATE_VALID_EXTENDED_BLOCKS = 1U << 16,
IR_GRAPH_STATE_BROKEN_FOR_VERIFIER = 1U << 17,
IR_GRAPH_STATE_IMPLICIT_BITFIELD_MASKING = 1U << 3,
IR_GRAPH_STATE_NO_CRITICAL_EDGES = 1U << 4,
IR_GRAPH_STATE_NO_BAD_BLOCKS = 1U << 5,
IR_GRAPH_STATE_NO_UNREACHABLE_BLOCKS = 1U << 6,
IR_GRAPH_STATE_ONE_RETURN = 1U << 7,
IR_GRAPH_STATE_CONSISTENT_DOMINANCE = 1U << 8,
IR_GRAPH_STATE_CONSISTENT_POSTDOMINANCE = 1U << 9,
IR_GRAPH_STATE_CONSISTENT_OUT_EDGES = 1U << 10,
IR_GRAPH_STATE_CONSISTENT_OUTS = 1U << 11,
IR_GRAPH_STATE_CONSISTENT_LOOPINFO = 1U << 12,
IR_GRAPH_STATE_CONSISTENT_ENTITY_USAGE = 1U << 13,
IR_GRAPH_STATE_VALID_EXTENDED_BLOCKS = 1U << 14,
IR_GRAPH_STATE_BROKEN_FOR_VERIFIER = 1U << 15,
} ir_graph_state_t;
ENUM_BITSET(ir_graph_state_t)
......
......@@ -113,48 +113,6 @@ FIRM_API void lower_const_code(void);
*/
FIRM_API ir_prog_pass_t *lower_const_code_pass(const char *name);
/**
* Function which creates a "set" instraction. A "set" instruction takes a
* condition value (a value with mode_b) as input and produces a value in a
* general purpose integer mode.
* Most architectures have special intrinsics for this. But if all else fails
* you can just produces the an if-like construct.
*/
typedef ir_node* (*create_set_func)(ir_node *cond);
/**
* implementation of create_set_func which produces a Mux node with 0/1 input
*/
FIRM_API ir_node *ir_create_mux_set(ir_node *cond, ir_mode *dest_mode);
/**
* implementation of create_set_func which produces a cond with control
* flow
*/
FIRM_API ir_node *ir_create_cond_set(ir_node *cond, ir_mode *dest_mode);
typedef struct lower_mode_b_config_t {
/* mode that is used to transport 0/1 values */
ir_mode *lowered_mode;
/* callback for creating set-like instructions */
create_set_func create_set;
/* whether direct Cond(Cmp) should also be lowered */
int lower_direct_cmp;
} lower_mode_b_config_t;
/**
* Lowers mode_b operations to integer arithmetic. After the lowering the only
* operations with mode_b are the Projs of Cmps; the only nodes with mode_b
* inputs are Cond and Psi nodes.
*
* Example: Psi(a < 0, 1, 0) => a >> 31
*
* @param irg the firm graph to lower
* @param config configuration for mode_b lowerer
*/
FIRM_API void ir_lower_mode_b(ir_graph *irg,
const lower_mode_b_config_t *config);
/**
* Used as callback, whenever a lowerable mux is found. The return value
* indicates, whether the mux should be lowered. This may be used, to lower
......
......@@ -53,6 +53,7 @@
#include "iropt_t.h"
#include "lower_dw.h"
#include "lower_calls.h"
#include "lower_mode_b.h"
#include "lower_softfloat.h"
#include "../beabi.h"
......@@ -2008,10 +2009,10 @@ static int ia32_is_valid_clobber(const char *clobber)
static ir_node *ia32_create_set(ir_node *cond)
{
/* ia32-set function produces 8-bit results which have to be converted */
ir_node *set = ir_create_mux_set(cond, mode_Bu);
ir_node *block = get_nodes_block(set);
return new_r_Conv(block, set, mode_Iu);
ir_node *block = get_nodes_block(cond);
ir_node *set = new_bd_ia32_l_Setcc(NULL, block, cond);
ir_node *conv = new_r_Conv(block, set, mode_Iu);
return conv;
}
static void ia32_lower_for_target(void)
......@@ -2020,7 +2021,6 @@ static void ia32_lower_for_target(void)
lower_mode_b_config_t lower_mode_b_config = {
mode_Iu, /* lowered mode */
ia32_create_set,
0, /* don't lower direct compares */
};
/* perform doubleword lowering */
......@@ -2031,6 +2031,8 @@ static void ia32_lower_for_target(void)
&intrinsic_env,
};
ia32_create_opcodes(&ia32_irn_ops);
/* lower compound param handling
* Note: we lower compound arguments ourself, since on ia32 we don't
* have hidden parameters but know where to find the structs on the stack.
......
......@@ -2098,18 +2098,6 @@ static ia32_condition_code_t relation_to_condition_code(ir_relation relation,
}
}
static ir_node *get_flags_mode_b(ir_node *node, ia32_condition_code_t *cc_out)
{
/* a mode_b value, we have to compare it against 0 */
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *new_block = be_transform_node(get_nodes_block(node));
ir_node *new_op = be_transform_node(node);
ir_node *flags = new_bd_ia32_Test(dbgi, new_block, noreg_GP, noreg_GP, nomem, new_op, new_op, false);
set_ia32_ls_mode(flags, get_irn_mode(new_op));
*cc_out = ia32_cc_not_equal;
return flags;
}
static ir_node *get_flags_node_cmp(ir_node *cmp, ia32_condition_code_t *cc_out)
{
/* must have a Cmp as input */
......@@ -2171,10 +2159,8 @@ static ir_node *get_flags_node_cmp(ir_node *cmp, ia32_condition_code_t *cc_out)
*/
static ir_node *get_flags_node(ir_node *node, ia32_condition_code_t *cc_out)
{
if (is_Cmp(node))
return get_flags_node_cmp(node, cc_out);
assert(get_irn_mode(node) == mode_b);
return get_flags_mode_b(node, cc_out);
assert(is_Cmp(node));
return get_flags_node_cmp(node, cc_out);
}
/**
......@@ -3656,6 +3642,18 @@ static ir_node *gen_Mux(ir_node *node)
}
}
static ir_node *gen_ia32_l_Setcc(ir_node *node)
{
ia32_condition_code_t cc;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *new_block = be_transform_node(block);
ir_node *cond = get_irn_n(node, n_ia32_l_Setcc_cond);
ir_node *flags = get_flags_node(cond, &cc);
ir_node *new_node = new_bd_ia32_Setcc(dbgi, new_block, flags, cc);
SET_IA32_ORIG_NODE(new_node, node);
return new_node;
}
/**
* Create a conversion from x87 state register to general purpose.
......@@ -5781,6 +5779,7 @@ static void register_transformers(void)
be_set_transform_function(op_ia32_l_LLtoFloat, gen_ia32_l_LLtoFloat);
be_set_transform_function(op_ia32_l_Mul, gen_ia32_l_Mul);
be_set_transform_function(op_ia32_l_Sbb, gen_ia32_l_Sbb);
be_set_transform_function(op_ia32_l_Setcc, gen_ia32_l_Setcc);
be_set_transform_function(op_ia32_l_Sub, gen_ia32_l_Sub);
be_set_transform_function(op_ia32_GetEIP, be_duplicate_node);
be_set_transform_function(op_ia32_Minus64Bit, be_duplicate_node);
......
......@@ -42,6 +42,7 @@
#include "lower_alloc.h"
#include "lower_builtins.h"
#include "lower_calls.h"
#include "lower_mode_b.h"
#include "lower_softfloat.h"
#include "bitset.h"
......@@ -420,7 +421,6 @@ static void sparc_lower_for_target(void)
lower_mode_b_config_t lower_mode_b_config = {
mode_Iu,
sparc_create_set,
0,
};
lower_calls_with_compounds(LF_RETURN_HIDDEN);
......
......@@ -1153,16 +1153,6 @@ static ir_node *gen_Const(ir_node *node)
}
}
static ir_mode *get_cmp_mode(ir_node *b_value)
{
ir_node *op;
if (!is_Cmp(b_value))
panic("can't determine cond signednes (no cmp)");
op = get_Cmp_left(b_value);
return get_irn_mode(op);
}
static ir_node *gen_SwitchJmp(ir_node *node)
{
dbg_info *dbgi = get_irn_dbg_info(node);
......@@ -1200,9 +1190,10 @@ static ir_node *gen_Cond(ir_node *node)
{
ir_node *selector = get_Cond_selector(node);
ir_mode *mode = get_irn_mode(selector);
ir_node *cmp_left;
ir_mode *cmp_mode;
ir_node *block;
ir_node *flag_node;
bool is_unsigned;
ir_relation relation;
dbg_info *dbgi;
......@@ -1211,38 +1202,18 @@ static ir_node *gen_Cond(ir_node *node)
return gen_SwitchJmp(node);
}
block = be_transform_node(get_nodes_block(node));
dbgi = get_irn_dbg_info(node);
/* regular if/else jumps */
if (is_Cmp(selector)) {
ir_mode *cmp_mode;
cmp_mode = get_cmp_mode(selector);
flag_node = be_transform_node(selector);
relation = get_Cmp_relation(selector);
is_unsigned = !mode_is_signed(cmp_mode);
if (mode_is_float(cmp_mode)) {
assert(!is_unsigned);
return new_bd_sparc_fbfcc(dbgi, block, flag_node, relation);
} else {
return new_bd_sparc_Bicc(dbgi, block, flag_node, relation, is_unsigned);
}
/* note: after lower_mode_b we are guaranteed to have a Cmp input */
block = be_transform_node(get_nodes_block(node));
dbgi = get_irn_dbg_info(node);
cmp_left = get_Cmp_left(selector);
cmp_mode = get_irn_mode(cmp_left);
flag_node = be_transform_node(selector);
relation = get_Cmp_relation(selector);
if (mode_is_float(cmp_mode)) {
return new_bd_sparc_fbfcc(dbgi, block, flag_node, relation);
} else {
/* in this case, the selector must already deliver a mode_b value.
* this happens, for example, when the Cond is connected to a Conv
* which converts its argument to mode_b. */
ir_node *new_op;
ir_graph *irg;
assert(mode == mode_b);
block = be_transform_node(get_nodes_block(node));
irg = get_irn_irg(block);
dbgi = get_irn_dbg_info(node);
new_op = be_transform_node(selector);
/* follow the SPARC architecture manual and use orcc for tst */
flag_node = new_bd_sparc_OrCCZero_reg(dbgi, block, new_op, get_g0(irg));
return new_bd_sparc_Bicc(dbgi, block, flag_node, ir_relation_less_greater, true);
bool is_unsigned = !mode_is_signed(cmp_mode);
return new_bd_sparc_Bicc(dbgi, block, flag_node, relation, is_unsigned);
}
}
......
......@@ -5592,63 +5592,6 @@ static ir_node *transform_node_Mux(ir_node *n)
}
}
if (is_irg_state(irg, IR_GRAPH_STATE_KEEP_MUX))
return n;
if (is_Mux(t)) {
ir_node* block = get_nodes_block(n);
ir_node* c0 = sel;
ir_node* c1 = get_Mux_sel(t);
ir_node* t1 = get_Mux_true(t);
ir_node* f1 = get_Mux_false(t);
if (f == f1) {
/* Mux(cond0, Mux(cond1, x, y), y) -> typical if (cond0 && cond1) x else y */
ir_node* and_ = new_r_And(block, c0, c1, mode_b);
ir_node* new_mux = new_r_Mux(block, and_, f1, t1, mode);
n = new_mux;
sel = and_;
f = f1;
t = t1;
DBG_OPT_ALGSIM0(oldn, t, FS_OPT_MUX_COMBINE);
} else if (f == t1) {
/* Mux(cond0, Mux(cond1, x, y), x) */
ir_node* not_c1 = new_r_Not(block, c1, mode_b);
ir_node* and_ = new_r_And(block, c0, not_c1, mode_b);
ir_node* new_mux = new_r_Mux(block, and_, t1, f1, mode);
n = new_mux;
sel = and_;
f = t1;
t = f1;
DBG_OPT_ALGSIM0(oldn, t, FS_OPT_MUX_COMBINE);
}
} else if (is_Mux(f)) {
ir_node* block = get_nodes_block(n);
ir_node* c0 = sel;
ir_node* c1 = get_Mux_sel(f);
ir_node* t1 = get_Mux_true(f);
ir_node* f1 = get_Mux_false(f);
if (t == t1) {
/* Mux(cond0, x, Mux(cond1, x, y)) -> typical if (cond0 || cond1) x else y */
ir_node* or_ = new_r_Or(block, c0, c1, mode_b);
ir_node* new_mux = new_r_Mux(block, or_, f1, t1, mode);
n = new_mux;
sel = or_;
f = f1;
t = t1;
DBG_OPT_ALGSIM0(oldn, f, FS_OPT_MUX_COMBINE);
} else if (t == f1) {
/* Mux(cond0, x, Mux(cond1, y, x)) */
ir_node* not_c1 = new_r_Not(block, c1, mode_b);
ir_node* or_ = new_r_Or(block, c0, not_c1, mode_b);
ir_node* new_mux = new_r_Mux(block, or_, t1, f1, mode);
n = new_mux;
sel = or_;
f = t1;
t = f1;
DBG_OPT_ALGSIM0(oldn, f, FS_OPT_MUX_COMBINE);
}
}
/* first normalization step: try to move a constant to the false side,
* 0 preferred on false side too */
if (is_Cmp(sel) && is_Const(t) &&
......@@ -5667,63 +5610,121 @@ static ir_node *transform_node_Mux(ir_node *n)
n = new_rd_Mux(get_irn_dbg_info(n), get_nodes_block(n), sel, f, t, mode);
}
/* note: after normalization, false can only happen on default */
if (mode == mode_b) {
dbg_info *dbg = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
/* the following optimisations create new mode_b nodes, so only do them
* before mode_b lowering */
if (!is_irg_state(irg, IR_GRAPH_STATE_MODEB_LOWERED)) {
if (is_Mux(t)) {
ir_node* block = get_nodes_block(n);
ir_node* c0 = sel;
ir_node* c1 = get_Mux_sel(t);
ir_node* t1 = get_Mux_true(t);
ir_node* f1 = get_Mux_false(t);
if (f == f1) {
/* Mux(cond0, Mux(cond1, x, y), y) => Mux(cond0 && cond1, x, y) */
ir_node* and_ = new_r_And(block, c0, c1, mode_b);
ir_node* new_mux = new_r_Mux(block, and_, f1, t1, mode);
n = new_mux;
sel = and_;
f = f1;
t = t1;
DBG_OPT_ALGSIM0(oldn, t, FS_OPT_MUX_COMBINE);
} else if (f == t1) {
/* Mux(cond0, Mux(cond1, x, y), x) */
ir_node* not_c1 = new_r_Not(block, c1, mode_b);
ir_node* and_ = new_r_And(block, c0, not_c1, mode_b);
ir_node* new_mux = new_r_Mux(block, and_, t1, f1, mode);
n = new_mux;
sel = and_;
f = t1;
t = f1;
DBG_OPT_ALGSIM0(oldn, t, FS_OPT_MUX_COMBINE);
}
} else if (is_Mux(f)) {
ir_node* block = get_nodes_block(n);
ir_node* c0 = sel;
ir_node* c1 = get_Mux_sel(f);
ir_node* t1 = get_Mux_true(f);
ir_node* f1 = get_Mux_false(f);
if (t == t1) {
/* Mux(cond0, x, Mux(cond1, x, y)) -> typical if (cond0 || cond1) x else y */
ir_node* or_ = new_r_Or(block, c0, c1, mode_b);
ir_node* new_mux = new_r_Mux(block, or_, f1, t1, mode);
n = new_mux;
sel = or_;
f = f1;
t = t1;
DBG_OPT_ALGSIM0(oldn, f, FS_OPT_MUX_COMBINE);
} else if (t == f1) {
/* Mux(cond0, x, Mux(cond1, y, x)) */
ir_node* not_c1 = new_r_Not(block, c1, mode_b);
ir_node* or_ = new_r_Or(block, c0, not_c1, mode_b);
ir_node* new_mux = new_r_Mux(block, or_, t1, f1, mode);
n = new_mux;
sel = or_;
f = t1;
t = f1;
DBG_OPT_ALGSIM0(oldn, f, FS_OPT_MUX_COMBINE);
}
}
if (is_Const(t)) {
ir_tarval *tv_t = get_Const_tarval(t);
if (tv_t == tarval_b_true) {
if (is_Const(f)) {
/* Muxb(sel, true, false) = sel */
assert(get_Const_tarval(f) == tarval_b_false);
DBG_OPT_ALGSIM0(oldn, sel, FS_OPT_MUX_BOOL);
return sel;
/* note: after normalization, false can only happen on default */
if (mode == mode_b) {
dbg_info *dbg = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
if (is_Const(t)) {
ir_tarval *tv_t = get_Const_tarval(t);
if (tv_t == tarval_b_true) {
if (is_Const(f)) {
/* Muxb(sel, true, false) = sel */
assert(get_Const_tarval(f) == tarval_b_false);
DBG_OPT_ALGSIM0(oldn, sel, FS_OPT_MUX_BOOL);
return sel;
} else {
/* Muxb(sel, true, x) = Or(sel, x) */
n = new_rd_Or(dbg, block, sel, f, mode_b);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_OR_BOOL);
return n;
}
}
} else if (is_Const(f)) {
ir_tarval *tv_f = get_Const_tarval(f);
if (tv_f == tarval_b_true) {
/* Muxb(sel, x, true) = Or(Not(sel), x) */
ir_node* not_sel = new_rd_Not(dbg, block, sel, mode_b);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_ORNOT_BOOL);
n = new_rd_Or(dbg, block, not_sel, t, mode_b);
return n;
} else {
/* Muxb(sel, true, x) = Or(sel, x) */
n = new_rd_Or(dbg, block, sel, f, mode_b);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_OR_BOOL);
/* Muxb(sel, x, false) = And(sel, x) */
assert(tv_f == tarval_b_false);
n = new_rd_And(dbg, block, sel, t, mode_b);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_AND_BOOL);
return n;
}
}
} else if (is_Const(f)) {
ir_tarval *tv_f = get_Const_tarval(f);
if (tv_f == tarval_b_true) {
/* Muxb(sel, x, true) = Or(Not(sel), x) */
ir_node* not_sel = new_rd_Not(dbg, block, sel, mode_b);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_ORNOT_BOOL);
n = new_rd_Or(dbg, block, not_sel, t, mode_b);
return n;
} else {
/* Muxb(sel, x, false) = And(sel, x) */
assert(tv_f == tarval_b_false);
n = new_rd_And(dbg, block, sel, t, mode_b);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_AND_BOOL);
return n;
}
}
}
/* more normalization: Mux(sel, 0, 1) is simply a conv from the mode_b
* value to integer. */
if (is_Const(t) && is_Const(f) && mode_is_int(mode)) {
ir_tarval *a = get_Const_tarval(t);
ir_tarval *b = get_Const_tarval(f);
/* more normalization: Mux(sel, 0, 1) is simply a conv from the mode_b
* value to integer. */
if (is_Const(t) && is_Const(f) && mode_is_int(mode)) {
ir_tarval *a = get_Const_tarval(t);
ir_tarval *b = get_Const_tarval(f);
if (tarval_is_one(a) && tarval_is_null(b)) {
ir_node *block = get_nodes_block(n);
ir_node *conv = new_r_Conv(block, sel, mode);
n = conv;
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_CONV);
return n;
} else if (tarval_is_null(a) && tarval_is_one(b)) {
ir_node *block = get_nodes_block(n);
ir_node *not_ = new_r_Not(block, sel, mode_b);
ir_node *conv = new_r_Conv(block, not_, mode);
n = conv;
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_CONV);
return n;
if (tarval_is_one(a) && tarval_is_null(b)) {
ir_node *block = get_nodes_block(n);
ir_node *conv = new_r_Conv(block, sel, mode);
n = conv;
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_CONV);
return n;
} else if (tarval_is_null(a) && tarval_is_one(b)) {
ir_node *block = get_nodes_block(n);
ir_node *not_ = new_r_Not(block, sel, mode_b);
ir_node *conv = new_r_Conv(block, not_, mode);
n = conv;
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_CONV);
return n;
}
}
}
......@@ -5744,11 +5745,11 @@ static ir_node *transform_node_Mux(ir_node *n)
if (and_r == t && f == cmp_r) {
if (is_Const(t) && tarval_is_single_bit(get_Const_tarval(t))) {
if (relation == ir_relation_less_greater) {
/* Mux((a & 2^C) != 0, 2^C, 0) */
/* Mux((a & 2^C) != 0, 2^C, 0) == a & 2^c */
n = cmp_l;
DBG_OPT_ALGSIM1(oldn, sel, sel, n, FS_OPT_MUX_TO_BITOP);
} else {
/* Mux((a & 2^C) == 0, 2^C, 0) */
/* Mux((a & 2^C) == 0, 2^C, 0) == (a & 2^c) xor (2^c) */
n = new_rd_Eor(get_irn_dbg_info(n),
block, cmp_l, t, mode);
DBG_OPT_ALGSIM1(oldn, sel, sel, n, FS_OPT_MUX_TO_BITOP);
......@@ -5761,11 +5762,11 @@ static ir_node *transform_node_Mux(ir_node *n)
if (is_Const(shl_l) && is_Const_one(shl_l)) {
if (and_r == t && f == cmp_r) {
if (relation == ir_relation_less_greater) {
/* (a & (1 << n)) != 0, (1 << n), 0) */
/* (a & (1 << n)) != 0, (1 << n), 0) == a & (1<<n) */
n = cmp_l;
DBG_OPT_ALGSIM1(oldn, sel, sel, n, FS_OPT_MUX_TO_BITOP);
} else {
/* (a & (1 << n)) == 0, (1 << n), 0) */
/* (a & (1 << n)) == 0, (1 << n), 0) == (a & (1<<n)) xor (1<<n) */
n = new_rd_Eor(get_irn_dbg_info(n),
block, cmp_l, t, mode);
DBG_OPT_ALGSIM1(oldn, sel, sel, n, FS_OPT_MUX_TO_BITOP);
......
......@@ -1392,13 +1392,10 @@ static int verify_node_Rotl(const ir_node *n)
*/
static int verify_node_Conv(const ir_node *n)
{
ir_graph *irg = get_irn_irg(n);
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Conv_op(n));
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Conv_op(n));
ASSERT_AND_RET_DBG(
is_irg_state(irg, IR_GRAPH_STATE_BCONV_ALLOWED) ||
(mode_is_datab(op1mode) && mode_is_data(mymode)),
ASSERT_AND_RET_DBG(mode_is_datab(op1mode) && mode_is_data(mymode),
"Conv node", 0,
show_unop_failure(n, "/* Conv: BB x datab --> data */");
);
......
......@@ -19,26 +19,14 @@
/**
* @file
* @brief lowers operations with mode_b. The result is a graph which
* might still contains some convs from/to mode_b, but no
* operations are performed on them anymore, they are just there
* so modes match. A backend can safely skip all mode_b convs.
* @brief lower mode_b operations to something the backend can handle
* @author Matthias Braun, Christoph Mallon
* @version $Id$
*
* After this pass the following should hold:
* - The only inputs with mode_b are for the Cond node or the
* Sel input of a Mux node.
* - The only nodes producing mode_b are: Proj(Cmp) and ConvB(X) (where X
* is some mode that can be converted to the lowered mode).
* ConvB will usually be implemented by a comparison with 0 producing some
* flags in the backends. It's debatable whether ConvB(X) is a good idea.
* Maybe we should rather introduce a Test node.
* All other former uses should be converted to manipulations with an integer
* mode that was specified in the pass configuration.
*/
#include "config.h"
#include "lower_mode_b.h"
#include <stdlib.h>
#include <stdbool.h>
......@@ -58,22 +46,13 @@
#include "util.h"
#include "array.h"
static const lower_mode_b_config_t *config;
static ir_type *lowered_type;