Commit cbc9fdd0 authored by Matthias Braun's avatar Matthias Braun
Browse files

switch_lowerer: cast to backend specified Switch mode

This should fix all cases of 64bit switches
parent b8d367e5
......@@ -70,9 +70,10 @@ FIRM_API void lower_CopyB(ir_graph *irg, unsigned max_small_size,
* @param small_switch If switch has <= cases then change it to an if-cascade.
* @param spare_size Allowed spare size for table switches in machine words.
* (Default in edgfe: 128)
* @param selector_mode mode which must be used for Switch selector
*/
FIRM_API void lower_switch(ir_graph *irg, unsigned small_switch,
unsigned spare_size);
unsigned spare_size, ir_mode *selector_mode);
/**
* Replaces SymConsts by a real constant if possible.
......
......@@ -492,6 +492,7 @@ static int arm_is_valid_clobber(const char *clobber)
static void arm_lower_for_target(void)
{
ir_mode *mode_gp = arm_reg_classes[CLASS_arm_gp].mode;
size_t i, n_irgs = get_irp_n_irgs();
/* lower compound param handling */
......@@ -499,7 +500,7 @@ static void arm_lower_for_target(void)
for (i = 0; i < n_irgs; ++i) {
ir_graph *irg = get_irp_irg(i);
lower_switch(irg, 4, 256);
lower_switch(irg, 4, 256, mode_gp);
}
for (i = 0; i < n_irgs; ++i) {
......
......@@ -1174,7 +1174,7 @@ static void introduce_epilog(ir_node *ret)
ir_node *block = get_nodes_block(ret);
ir_node *first_sp = get_irn_n(ret, n_be_Return_sp);
ir_node *curr_sp = first_sp;
ir_mode *mode_gp = mode_Iu;
ir_mode *mode_gp = ia32_reg_classes[CLASS_ia32_gp].mode;
if (!layout->sp_relative) {
int n_ebp = determine_ebp_input(ret);
......@@ -2027,6 +2027,7 @@ static int ia32_is_valid_clobber(const char *clobber)
static void ia32_lower_for_target(void)
{
ir_mode *mode_gp = ia32_reg_classes[CLASS_ia32_gp].mode;
size_t i, n_irgs = get_irp_n_irgs();
/* perform doubleword lowering */
......@@ -2054,7 +2055,7 @@ static void ia32_lower_for_target(void)
for (i = 0; i < n_irgs; ++i) {
ir_graph *irg = get_irp_irg(i);
/* break up switches with wide ranges */
lower_switch(irg, 4, 256);
lower_switch(irg, 4, 256, mode_gp);
}
ir_prepare_dw_lowering(&lower_dw_params);
......
......@@ -468,6 +468,7 @@ static void sparc_end_codegeneration(void *self)
static void sparc_lower_for_target(void)
{
ir_mode *mode_gp = sparc_reg_classes[CLASS_sparc_gp].mode;
size_t i, n_irgs = get_irp_n_irgs();
lower_calls_with_compounds(LF_RETURN_HIDDEN);
......@@ -489,7 +490,7 @@ static void sparc_lower_for_target(void)
for (i = 0; i < n_irgs; ++i) {
ir_graph *irg = get_irp_irg(i);
ir_lower_mode_b(irg, mode_Iu);
lower_switch(irg, 4, 256);
lower_switch(irg, 4, 256, mode_gp);
/* TODO: Pass SPARC_MIN_STACKSIZE as addr_delta as soon as
* Alloc nodes are implemented more efficiently. */
lower_alloc(irg, SPARC_STACK_ALIGNMENT, true, 0);
......
......@@ -42,10 +42,11 @@
i >= 0 && (outirn = get_irn_out(irn, i)); --i)
typedef struct walk_env_t {
ir_nodeset_t processed;
ir_mode *selector_mode;
unsigned spare_size; /**< the allowed spare size for table switches */
unsigned small_switch;
bool changed; /**< indicates whether a change was performed */
ir_nodeset_t processed;
} walk_env_t;
typedef struct case_data_t {
......@@ -193,17 +194,86 @@ static void normalize_table(ir_node *switchn, ir_mode *new_mode,
}
}
static void create_out_of_bounds_check(switch_info_t *info)
{
ir_node *switchn = info->switchn;
ir_graph *irg = get_irn_irg(switchn);
dbg_info *dbgi = get_irn_dbg_info(switchn);
ir_node *selector = get_Switch_selector(switchn);
ir_node *block = get_nodes_block(switchn);
ir_node **default_preds = NEW_ARR_F(ir_node*, 0);
ir_node *default_block = NULL;
ir_node *max_const;
ir_node *proj_true;
ir_node *proj_false;
ir_node *cmp;
ir_node *oob_cond;
ir_node *in[1];
ir_node *new_block;
int i;
ir_node *proj;
size_t n_default_preds;
assert(tarval_is_null(info->switch_min));
/* check for out-of-bounds */
max_const = new_r_Const(irg, info->switch_max);
cmp = new_rd_Cmp(dbgi, block, selector, max_const, ir_relation_less_equal);
oob_cond = new_rd_Cond(dbgi, block, cmp);
proj_true = new_r_Proj(oob_cond, mode_X, pn_Cond_true);
proj_false = new_r_Proj(oob_cond, mode_X, pn_Cond_false);
ARR_APP1(ir_node*, default_preds, proj_false);
/* create new block containing the switch */
in[0] = proj_true;
new_block = new_r_Block(irg, 1, in);
set_nodes_block(switchn, new_block);
/* adjust projs */
foreach_out_irn(switchn, i, proj) {
long pn = get_Proj_proj(proj);
if (pn == pn_Switch_default) {
assert(default_block == NULL);
default_block = get_irn_out(proj, 0);
ARR_APP1(ir_node*, default_preds, proj);
}
set_nodes_block(proj, new_block);
}
/* adapt default block */
n_default_preds = ARR_LEN(default_preds);
if (n_default_preds > 1) {
/* create new intermediate blocks so we don't have critical edges */
size_t p;
for (p = 0; p < n_default_preds; ++p) {
ir_node *pred = default_preds[p];
ir_node *split_block;
ir_node *block_in[1];
block_in[0] = pred;
split_block = new_r_Block(irg, 1, block_in);
default_preds[p] = new_r_Jmp(split_block);
}
}
set_irn_in(default_block, n_default_preds, default_preds);
DEL_ARR_F(default_preds);
clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE);
}
/**
* normalize switch to work on an unsigned input with the first case at 0
*/
static void normalize_switch(switch_info_t *info)
static void normalize_switch(switch_info_t *info, ir_mode *selector_mode)
{
ir_node *switchn = info->switchn;
ir_graph *irg = get_irn_irg(switchn);
ir_node *block = get_nodes_block(switchn);
ir_node *selector = get_Switch_selector(switchn);
ir_mode *switch_mode = get_irn_mode(selector);
ir_mode *mode = switch_mode;
ir_mode *mode = get_irn_mode(selector);
ir_tarval *delta = NULL;
bool needs_normalize = false;
......@@ -212,8 +282,9 @@ static void normalize_switch(switch_info_t *info)
mode = find_unsigned_mode(mode);
selector = new_r_Conv(block, selector, mode);
min = tarval_convert_to(min, mode);
needs_normalize = true;
info->switch_min = min;
info->switch_max = tarval_convert_to(info->switch_max, mode);
needs_normalize = true;
}
/* normalize so switch_min is at 0 */
......@@ -229,6 +300,22 @@ static void normalize_switch(switch_info_t *info)
needs_normalize = true;
}
/* if we have a selector_mode set, then the we will have a switch node,
* we have to construct an out-of-bounds check then and after that convert
* the switch/selector to the backends desired switch mode */
if (selector_mode != NULL) {
set_Switch_selector(switchn, selector);
create_out_of_bounds_check(info);
selector = new_r_Conv(block, selector, selector_mode);
mode = selector_mode;
info->switch_min = tarval_convert_to(info->switch_min, mode);
info->switch_max = tarval_convert_to(info->switch_max, mode);
if (delta != NULL)
delta = tarval_convert_to(delta, mode);
needs_normalize = true;
}
if (needs_normalize) {
set_Switch_selector(switchn, selector);
normalize_table(switchn, mode, delta);
......@@ -326,76 +413,6 @@ static void create_if_cascade(switch_info_t *info, ir_node *block,
}
}
static void create_out_of_bounds_check(switch_info_t *info)
{
ir_node *switchn = info->switchn;
ir_graph *irg = get_irn_irg(switchn);
dbg_info *dbgi = get_irn_dbg_info(switchn);
ir_node *selector = get_Switch_selector(switchn);
ir_node *block = get_nodes_block(switchn);
ir_node **default_preds = NEW_ARR_F(ir_node*, 0);
ir_node *default_block = NULL;
ir_node *max_const;
ir_node *proj_true;
ir_node *proj_false;
ir_node *cmp;
ir_node *oob_cond;
ir_node *in[1];
ir_node *new_block;
int i;
ir_node *proj;
size_t n_default_preds;
assert(tarval_is_null(info->switch_min));
/* check for out-of-bounds */
max_const = new_r_Const(irg, info->switch_max);
cmp = new_rd_Cmp(dbgi, block, selector, max_const, ir_relation_less_equal);
oob_cond = new_rd_Cond(dbgi, block, cmp);
proj_true = new_r_Proj(oob_cond, mode_X, pn_Cond_true);
proj_false = new_r_Proj(oob_cond, mode_X, pn_Cond_false);
ARR_APP1(ir_node*, default_preds, proj_false);
/* create new block containing the switch */
in[0] = proj_true;
new_block = new_r_Block(irg, 1, in);
set_nodes_block(switchn, new_block);
/* adjust projs */
foreach_out_irn(switchn, i, proj) {
long pn = get_Proj_proj(proj);
if (pn == pn_Switch_default) {
assert(default_block == NULL);
default_block = get_irn_out(proj, 0);
ARR_APP1(ir_node*, default_preds, proj);
}
set_nodes_block(proj, new_block);
}
/* adapt default block */
n_default_preds = ARR_LEN(default_preds);
if (n_default_preds > 1) {
/* create new intermediate blocks so we don't have critical edges */
size_t p;
for (p = 0; p < n_default_preds; ++p) {
ir_node *pred = default_preds[p];
ir_node *split_block;
ir_node *block_in[1];
block_in[0] = pred;
split_block = new_r_Block(irg, 1, block_in);
default_preds[p] = new_r_Jmp(split_block);
}
}
set_irn_in(default_block, n_default_preds, default_preds);
DEL_ARR_F(default_preds);
clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE);
}
/**
* Block-Walker: searches for Switch nodes
*/
......@@ -444,12 +461,11 @@ static void find_switch_nodes(ir_node *block, void *ctx)
if (!lower_switch) {
/* we won't decompose the switch. But we must add an out-of-bounds
* check */
normalize_switch(&info);
create_out_of_bounds_check(&info);
normalize_switch(&info, env->selector_mode);
return;
}
normalize_switch(&info);
normalize_switch(&info, NULL);
analyse_switch1(&info);
/* Now create the if cascade */
......@@ -467,12 +483,17 @@ static void find_switch_nodes(ir_node *block, void *ctx)
| IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE);
}
void lower_switch(ir_graph *irg, unsigned small_switch, unsigned spare_size)
void lower_switch(ir_graph *irg, unsigned small_switch, unsigned spare_size,
ir_mode *selector_mode)
{
if (mode_is_signed(selector_mode))
panic("expected unsigned mode for switch selector");
walk_env_t env;
env.changed = false;
env.selector_mode = selector_mode;
env.spare_size = spare_size;
env.small_switch = small_switch;
env.changed = false;
ir_nodeset_init(&env.processed);
remove_critical_cf_edges(irg);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment