Commit a34bb75c authored by Matthias Braun's avatar Matthias Braun
Browse files

remove deprecated support for bitfield masking

parent b4841b96
......@@ -434,48 +434,41 @@ typedef enum {
* later in the other (NORMALISATION2).
*/
IR_GRAPH_STATE_NORMALISATION2 = 1U << 2,
/**
* Defines the semantic of Load(Sel(x)), if x has a bit offset (Bitfields!).
* Normally, the frontend is responsible for bitfield masking operations.
* Sets IMPLICIT_BITFIELD_MASKING, if the lowering phase must insert masking
* operations.
*/
IR_GRAPH_STATE_IMPLICIT_BITFIELD_MASKING = 1U << 3,
/**
* Allows localopts to remove edges to unreachable code.
* Warning: It is only safe to enable this when you are sure that you
* apply all localopts to the fixpunkt. (=in optimize_graph_df)
*/
IR_GRAPH_STATE_OPTIMIZE_UNREACHABLE_CODE = 1U << 4,
IR_GRAPH_STATE_OPTIMIZE_UNREACHABLE_CODE = 1U << 3,
/** graph contains no critical edges */
IR_GRAPH_STATE_NO_CRITICAL_EDGES = 1U << 5,
IR_GRAPH_STATE_NO_CRITICAL_EDGES = 1U << 4,
/** graph contains no Bad nodes */
IR_GRAPH_STATE_NO_BADS = 1U << 6,
IR_GRAPH_STATE_NO_BADS = 1U << 5,
/**
* there exists no (obviously) unreachable code in the graph.
* Unreachable in this context is code that you can't reach by following
* execution flow from the start block.
*/
IR_GRAPH_STATE_NO_UNREACHABLE_CODE = 1U << 7,
IR_GRAPH_STATE_NO_UNREACHABLE_CODE = 1U << 6,
/** graph contains at most one return */
IR_GRAPH_STATE_ONE_RETURN = 1U << 8,
IR_GRAPH_STATE_ONE_RETURN = 1U << 7,
/** dominance information about the graph is valid */
IR_GRAPH_STATE_CONSISTENT_DOMINANCE = 1U << 9,
IR_GRAPH_STATE_CONSISTENT_DOMINANCE = 1U << 8,
/** postdominance information about the graph is valid */
IR_GRAPH_STATE_CONSISTENT_POSTDOMINANCE = 1U << 10,
IR_GRAPH_STATE_CONSISTENT_POSTDOMINANCE = 1U << 9,
/**
* out edges (=iredges) are enable and there is no dead code that can be
* reached by following them
*/
IR_GRAPH_STATE_CONSISTENT_OUT_EDGES = 1U << 11,
IR_GRAPH_STATE_CONSISTENT_OUT_EDGES = 1U << 10,
/** outs (irouts) are computed and up to date */
IR_GRAPH_STATE_CONSISTENT_OUTS = 1U << 12,
IR_GRAPH_STATE_CONSISTENT_OUTS = 1U << 11,
/** loopinfo is computed and up to date */
IR_GRAPH_STATE_CONSISTENT_LOOPINFO = 1U << 13,
IR_GRAPH_STATE_CONSISTENT_LOOPINFO = 1U << 12,
/** entity usage information is computed and up to date */
IR_GRAPH_STATE_CONSISTENT_ENTITY_USAGE = 1U << 14,
IR_GRAPH_STATE_CONSISTENT_ENTITY_USAGE = 1U << 13,
/** graph contains as many returns as possible */
IR_GRAPH_STATE_MANY_RETURNS = 1U << 15,
IR_GRAPH_STATE_MANY_RETURNS = 1U << 14,
} ir_graph_state_t;
ENUM_BITSET(ir_graph_state_t)
......
......@@ -1446,8 +1446,6 @@ static void dump_graph_info(FILE *F, ir_graph *irg)
fprintf(F, " modeb_lowered");
if (is_irg_state(irg, IR_GRAPH_STATE_NORMALISATION2))
fprintf(F, " normalisation2");
if (is_irg_state(irg, IR_GRAPH_STATE_IMPLICIT_BITFIELD_MASKING))
fprintf(F, " implicit_bitfield_masking");
if (is_irg_state(irg, IR_GRAPH_STATE_OPTIMIZE_UNREACHABLE_CODE))
fprintf(F, " optimize_unreachable_code");
if (is_irg_state(irg, IR_GRAPH_STATE_NO_CRITICAL_EDGES))
......
......@@ -284,180 +284,6 @@ static int is_integral_size(int size)
return size >= 8;
}
/**
* lower bitfield load access.
*
* @param proj the Proj(result) node
* @param load the Load node
*/
static void lower_bitfields_loads(ir_node *proj, ir_node *load)
{
ir_node *sel = get_Load_ptr(load);
ir_node *block, *res, *ptr;
ir_graph *irg;
ir_entity *ent;
ir_type *bf_type;
ir_mode *bf_mode, *mode;
int offset, bit_offset, bits, bf_bits, old_cse;
dbg_info *db;
if (!is_Sel(sel))
return;
ent = get_Sel_entity(sel);
bf_type = get_entity_type(ent);
/* must be a bitfield type */
if (!is_Primitive_type(bf_type) || get_primitive_base_type(bf_type) == NULL)
return;
/* We have a bitfield access, if either a bit offset is given, or
the size is not integral. */
bf_mode = get_type_mode(bf_type);
if (! bf_mode)
return;
mode = get_irn_mode(proj);
block = get_nodes_block(proj);
bf_bits = get_mode_size_bits(bf_mode);
bit_offset = get_entity_offset_bits_remainder(ent);
if (bit_offset == 0 && is_integral_size(bf_bits) && bf_mode == get_Load_mode(load))
return;
bits = get_mode_size_bits(mode);
offset = get_entity_offset(ent);
/*
* ok, here we are: now convert the Proj_mode_bf(Load) into And(Shr(Proj_mode(Load)) for unsigned
* and Shr(Shl(Proj_mode(load)) for signed
*/
/* abandon bitfield sel */
irg = get_irn_irg(sel);
ptr = get_Sel_ptr(sel);
db = get_irn_dbg_info(sel);
ptr = new_rd_Add(db, block, ptr, new_r_Const_long(irg, mode_Is, offset), get_irn_mode(ptr));
set_Load_ptr(load, ptr);
set_Load_mode(load, mode);
/* create new proj, switch off CSE or we may get the old one back */
old_cse = get_opt_cse();
set_opt_cse(0);
res = new_r_Proj(load, mode, pn_Load_res);
set_opt_cse(old_cse);
if (mode_is_signed(mode)) { /* signed */
int shift_count_up = bits - (bf_bits + bit_offset);
int shift_count_down = bits - bf_bits;
if (shift_count_up) {
res = new_r_Shl(block, res, new_r_Const_long(irg, mode_Iu, shift_count_up), mode);
}
if (shift_count_down) {
res = new_r_Shrs(block, res, new_r_Const_long(irg, mode_Iu, shift_count_down), mode);
}
} else { /* unsigned */
int shift_count_down = bit_offset;
unsigned mask = ((unsigned)-1) >> (bits - bf_bits);
if (shift_count_down) {
res = new_r_Shr(block, res, new_r_Const_long(irg, mode_Iu, shift_count_down), mode);
}
if (bits != bf_bits) {
res = new_r_And(block, res, new_r_Const_long(irg, mode, mask), mode);
}
}
exchange(proj, res);
}
/**
* lower bitfield store access.
*
* @todo: It adds a load which may produce an exception!
*/
static void lower_bitfields_stores(ir_node *store)
{
ir_node *sel = get_Store_ptr(store);
ir_node *ptr, *value;
ir_entity *ent;
ir_type *bf_type;
ir_mode *bf_mode, *mode;
ir_node *mem, *irn, *block;
ir_graph *irg;
unsigned mask, neg_mask;
int bf_bits, bits_mask, offset, bit_offset;
dbg_info *db;
/* check bitfield access */
if (!is_Sel(sel))
return;
ent = get_Sel_entity(sel);
bf_type = get_entity_type(ent);
/* must be a bitfield type */
if (!is_Primitive_type(bf_type) || get_primitive_base_type(bf_type) == NULL)
return;
/* We have a bitfield access, if either a bit offset is given, or
the size is not integral. */
bf_mode = get_type_mode(bf_type);
if (! bf_mode)
return;
value = get_Store_value(store);
mode = get_irn_mode(value);
block = get_nodes_block(store);
bf_bits = get_mode_size_bits(bf_mode);
bit_offset = get_entity_offset_bits_remainder(ent);
if (bit_offset == 0 && is_integral_size(bf_bits) && bf_mode == get_irn_mode(value))
return;
/*
* ok, here we are: now convert the Store(Sel(), value) into Or(And(Load(Sel),c), And(Value,c))
*/
mem = get_Store_mem(store);
offset = get_entity_offset(ent);
bits_mask = get_mode_size_bits(mode) - bf_bits;
mask = ((unsigned)-1) >> bits_mask;
mask <<= bit_offset;
neg_mask = ~mask;
/* abandon bitfield sel */
irg = get_irn_irg(sel);
ptr = get_Sel_ptr(sel);
db = get_irn_dbg_info(sel);
ptr = new_rd_Add(db, block, ptr, new_r_Const_long(irg, mode_Is, offset), get_irn_mode(ptr));
if (neg_mask) {
/* there are some bits, normal case */
irn = new_r_Load(block, mem, ptr, mode, cons_none);
mem = new_r_Proj(irn, mode_M, pn_Load_M);
irn = new_r_Proj(irn, mode, pn_Load_res);
irn = new_r_And(block, irn, new_r_Const_long(irg, mode, neg_mask), mode);
if (bit_offset > 0) {
value = new_r_Shl(block, value, new_r_Const_long(irg, mode_Iu, bit_offset), mode);
}
value = new_r_And(block, value, new_r_Const_long(irg, mode, mask), mode);
value = new_r_Or(block, value, irn, mode);
}
set_Store_mem(store, mem);
set_Store_value(store, value);
set_Store_ptr(store, ptr);
}
/**
* lowers IR-nodes, called from walker
*/
......@@ -479,31 +305,6 @@ static void lower_irnode(ir_node *irn, void *env)
}
}
/**
* Walker: lowers IR-nodes for bitfield access
*/
static void lower_bf_access(ir_node *irn, void *env)
{
(void) env;
switch (get_irn_opcode(irn)) {
case iro_Proj:
{
long proj = get_Proj_proj(irn);
ir_node *pred = get_Proj_pred(irn);
if (proj == pn_Load_res && is_Load(pred))
lower_bitfields_loads(irn, pred);
break;
}
case iro_Store:
lower_bitfields_stores(irn);
break;
default:
break;
}
}
/*
* Replaces SymConsts by a real constant if possible.
* Replace Sel nodes by address computation. Also resolves array access.
......@@ -511,12 +312,6 @@ static void lower_bf_access(ir_node *irn, void *env)
*/
void lower_highlevel_graph(ir_graph *irg)
{
if (is_irg_state(irg, IR_GRAPH_STATE_IMPLICIT_BITFIELD_MASKING)) {
/* First step: lower bitfield access: must be run as long as Sels still
* exists. */
irg_walk_graph(irg, NULL, lower_bf_access, NULL);
}
/* Finally: lower SymConst-Size and Sel nodes, Casts, unaligned Load/Stores. */
irg_walk_graph(irg, NULL, lower_irnode, NULL);
}
......
......@@ -1182,8 +1182,7 @@ static unsigned optimize_load(ir_node *load)
if (value != NULL) {
ir_graph *irg = get_irn_irg(load);
value = can_replace_load_by_const(load, value);
if (value != NULL && is_Sel(ptr) &&
!is_irg_state(irg, IR_GRAPH_STATE_IMPLICIT_BITFIELD_MASKING)) {
if (value != NULL && is_Sel(ptr)) {
/* frontend has inserted masking operations after bitfield accesses,
* so we might have to shift the const. */
unsigned char bit_offset = get_entity_offset_bits_remainder(get_Sel_entity(ptr));
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment