Commit 1de3ff1c authored by Matthias Braun's avatar Matthias Braun
Browse files

Introduce flip-flopping normalisations

There are cases where we can normalize in 2 directions and both have
their merits. Just do both in this case. The frontend should at some
point set IRG_GRAPH_STATE_NORMALISATION2 to trigger the 2nd direction.
parent b6a361cf
......@@ -514,6 +514,12 @@ typedef enum {
IR_GRAPH_STATE_ARCH_DEP = 1U << 1, /**< should not construct more nodes which irarch potentially breaks down */
IR_GRAPH_STATE_BCONV_ALLOWED = 1U << 2, /**< Conv(mode_b) to Iu is allowed as set command */
IR_GRAPH_STATE_BAD_BLOCK = 1U << 3, /**< a node may have Bad in its block input */
/**
* There are normalisations where there is no "best" representative.
* In this case we first normalise into 1 direction (!NORMALISATION2) and
* later in the other (NORMALISATION2).
*/
IR_GRAPH_STATE_NORMALISATION2 = 1U << 4,
} ir_graph_state_t;
ENUM_BITSET(ir_graph_state_t)
......
......@@ -2181,6 +2181,7 @@ restart:
}
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
return n;
#if 0
} else if (is_Mul(b)) { /* a - (b * C) -> a + (b * -C) */
ir_node *m_right = get_Mul_right(b);
if (is_Const(m_right)) {
......@@ -2199,6 +2200,7 @@ restart:
return n;
}
}
#endif
}
/* Beware of Sub(P, P) which cannot be optimized into a simple Minus ... */
......@@ -2930,15 +2932,109 @@ static bool is_shiftop(const ir_node *n)
return is_Shl(n) || is_Shr(n) || is_Shrs(n) || is_Rotl(n);
}
/**
* normalisation: (x & c1) >> c2 to (x >> c2) & (c1 >> c2)
* (we can use:
* - and, or, xor instead of &
* - Shl, Shr, Shrs, rotl instead of >>
* (with a special case for Or/Xor + Shrs)
*
* This normalisation is good for things like x-(x&y) esp. in 186.crafty.
*/
static ir_node *transform_node_shift_bitop(ir_node *n)
{
ir_graph *irg = get_irn_irg(n);
ir_node *right = get_binop_right(n);
ir_mode *mode = get_irn_mode(n);
ir_node *left;
ir_node *bitop_left;
ir_node *bitop_right;
ir_op *op_left;
ir_node *block;
dbg_info *dbgi;
ir_node *new_shift;
ir_node *new_bitop;
ir_node *new_const;
ir_tarval *tv1;
ir_tarval *tv2;
ir_tarval *tv_shift;
if (is_irg_state(irg, IR_GRAPH_STATE_NORMALISATION2))
return n;
assert(is_Shrs(n) || is_Shr(n) || is_Shl(n) || is_Rotl(n));
if (!is_Const(right))
return n;
left = get_binop_left(n);
op_left = get_irn_op(left);
if (op_left != op_And && op_left != op_Or && op_left != op_Eor)
return n;
/* doing it with Shrs is not legal if the Or/Eor affects the topmost bit */
if (is_Shrs(n) && (op_left == op_Or || op_left == op_Eor)) {
/* TODO: test if sign bit is affectes */
return n;
}
bitop_right = get_binop_right(left);
if (!is_Const(bitop_right))
return n;
bitop_left = get_binop_left(left);
block = get_nodes_block(n);
dbgi = get_irn_dbg_info(n);
tv1 = get_Const_tarval(bitop_right);
tv2 = get_Const_tarval(right);
assert(get_tarval_mode(tv1) == mode);
if (is_Shl(n)) {
new_shift = new_rd_Shl(dbgi, block, bitop_left, right, mode);
tv_shift = tarval_shl(tv1, tv2);
} else if (is_Shr(n)) {
new_shift = new_rd_Shr(dbgi, block, bitop_left, right, mode);
tv_shift = tarval_shr(tv1, tv2);
} else if (is_Shrs(n)) {
new_shift = new_rd_Shrs(dbgi, block, bitop_left, right, mode);
tv_shift = tarval_shrs(tv1, tv2);
} else {
assert(is_Rotl(n));
new_shift = new_rd_Rotl(dbgi, block, bitop_left, right, mode);
tv_shift = tarval_rotl(tv1, tv2);
}
assert(get_tarval_mode(tv_shift) == mode);
irg = get_irn_irg(n);
new_const = new_r_Const(irg, tv_shift);
if (op_left == op_And) {
new_bitop = new_rd_And(dbgi, block, new_shift, new_const, mode);
} else if (op_left == op_Or) {
new_bitop = new_rd_Or(dbgi, block, new_shift, new_const, mode);
} else {
assert(op_left == op_Eor);
new_bitop = new_rd_Eor(dbgi, block, new_shift, new_const, mode);
}
return new_bitop;
}
/**
* normalisation: (x >> c1) & c2 to (x & (c2<<c1)) >> c1
* (we can use:
* - and, or, xor instead of &
* - Shl, Shr, Shrs, rotl instead of >>
* (with a special case for Or/Xor + Shrs)
*
* This normalisation is usually good for the backend since << C can often be
* matched as address-mode.
*/
static ir_node *transform_node_bitop_shift(ir_node *n)
{
ir_graph *irg = get_irn_irg(n);
ir_node *left = get_binop_left(n);
ir_node *right = get_binop_right(n);
ir_mode *mode = get_irn_mode(n);
......@@ -2947,7 +3043,6 @@ static ir_node *transform_node_bitop_shift(ir_node *n)
ir_node *block;
dbg_info *dbg_bitop;
dbg_info *dbg_shift;
ir_graph *irg;
ir_node *new_bitop;
ir_node *new_shift;
ir_node *new_const;
......@@ -2955,6 +3050,9 @@ static ir_node *transform_node_bitop_shift(ir_node *n)
ir_tarval *tv2;
ir_tarval *tv_bitop;
if (!is_irg_state(irg, IR_GRAPH_STATE_NORMALISATION2))
return n;
assert(is_And(n) || is_Or(n) || is_Eor(n));
if (!is_Const(right) || !is_shiftop(left))
return n;
......@@ -5135,6 +5233,8 @@ static ir_node *transform_node_Shr(ir_node *n)
n = transform_node_shift_modulo(n, new_rd_Shr);
if (is_Shr(n))
n = transform_node_shl_shr(n);
if (is_Shr(n))
n = transform_node_shift_bitop(n);
return n;
} /* transform_node_Shr */
......@@ -5164,6 +5264,8 @@ static ir_node *transform_node_Shrs(ir_node *n)
if (is_Shrs(n))
n = transform_node_shift_modulo(n, new_rd_Shrs);
if (is_Shrs(n))
n = transform_node_shift_bitop(n);
return n;
} /* transform_node_Shrs */
......@@ -5185,6 +5287,8 @@ static ir_node *transform_node_Shl(ir_node *n)
n = transform_node_shift_modulo(n, new_rd_Shl);
if (is_Shl(n))
n = transform_node_shl_shr(n);
if (is_Shl(n))
n = transform_node_shift_bitop(n);
return n;
} /* transform_node_Shl */
......@@ -5202,6 +5306,9 @@ static ir_node *transform_node_Rotl(ir_node *n)
HANDLE_BINOP_PHI((eval_func) tarval_rotl, a, b, c, mode);
n = transform_node_shift(n);
if (is_Rotl(n))
n = transform_node_shift_bitop(n);
return n;
} /* transform_node_Rotl */
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment