Commit bba5f077 authored by Christoph Mallon's avatar Christoph Mallon
Browse files

be: Factor out common code to check whether a jump falls through.

parent 4f9714f2
......@@ -666,9 +666,7 @@ static void emit_amd64_jcc(const ir_node *irn)
be_cond_branch_projs_t projs = be_get_cond_branch_projs(irn);
ir_node const *const block = get_nodes_block(irn);
ir_node const *const true_target = be_emit_get_cfop_target(projs.t);
if (be_emit_get_prev_block(true_target) == block) {
if (be_is_fallthrough(projs.t)) {
/* exchange both proj's so the second one can be omitted */
ir_node *const t = projs.t;
projs.t = projs.f;
......
......@@ -392,9 +392,7 @@ static void emit_arm_B(const ir_node *irn)
assert(relation != ir_relation_false);
assert(relation != ir_relation_true);
ir_node const *const block = get_nodes_block(irn);
ir_node const *const true_target = be_emit_get_cfop_target(projs.t);
if (be_emit_get_prev_block(true_target) == block) {
if (be_is_fallthrough(projs.t)) {
/* exchange both proj's so the second one can be omitted */
ir_node *const t = projs.t;
projs.t = projs.f;
......
......@@ -92,3 +92,10 @@ void be_emit_cfop_target(ir_node const *const jmp)
ir_node *const target = be_emit_get_cfop_target(jmp);
be_gas_emit_block_name(target);
}
bool be_is_fallthrough(ir_node const *const jmp)
{
ir_node *const block = get_nodes_block(jmp);
ir_node *const target = be_emit_get_cfop_target(jmp);
return be_emit_get_prev_block(target) == block;
}
......@@ -83,6 +83,8 @@ be_cond_branch_projs_t be_get_cond_branch_projs(ir_node const *node);
*/
void be_emit_cfop_target(ir_node const *jmp);
bool be_is_fallthrough(ir_node const *jmp);
#define BE_EMITF(node, fmt, ap, in_delay_slot) \
va_list ap; \
va_start(ap, fmt); \
......@@ -108,7 +110,7 @@ void be_emit_cfop_target(ir_node const *jmp);
} else
#define BE_EMIT_JMP(arch, node, name, jmp) \
if (be_emit_get_prev_block(be_emit_get_cfop_target(jmp)) == get_nodes_block(node)) { \
if (be_is_fallthrough(jmp)) { \
if (be_options.verbose_asm) \
arch##_emitf(node, "/* fallthrough to %L */", jmp); \
} else if (arch##_emitf(node, name " %L", jmp), 0) {} else
......
......@@ -664,11 +664,6 @@ static void ia32_emit_exc_label(const ir_node *node)
be_emit_irprintf("%lu", get_ia32_exc_label_id(node));
}
static bool fallthrough_possible(const ir_node *block, const ir_node *target)
{
return be_emit_get_prev_block(target) == block;
}
/**
* Emits the jump sequence for a conditional jump (cmp + jmp_true + jmp_false)
*/
......@@ -678,23 +673,22 @@ static void emit_ia32_Jcc(const ir_node *node)
be_cond_branch_projs_t projs = be_get_cond_branch_projs(node);
ir_node const *target_true = be_emit_get_cfop_target(projs.t);
ir_node const *block = get_nodes_block(node);
if (fallthrough_possible(block, target_true)) {
if (be_is_fallthrough(projs.t)) {
/* exchange both proj's so the second one can be omitted */
ir_node *const t = projs.t;
projs.t = projs.f;
projs.f = t;
cc = x86_negate_condition_code(cc);
}
const ir_node *target_false = be_emit_get_cfop_target(projs.f);
bool fallthrough = fallthrough_possible(block, target_false);
bool const fallthrough = be_is_fallthrough(projs.f);
/* if we can't have a fallthrough anyway, put the more likely case first */
if (!fallthrough) {
/* We would need execfreq for the concrete edge, but don't have it
* available here, so we use the block execfreq :-( */
double freq_true = get_block_execfreq(target_true);
double freq_false = get_block_execfreq(target_false);
ir_node const *const target_true = be_emit_get_cfop_target(projs.t);
double const freq_true = get_block_execfreq(target_true);
ir_node const *const target_false = be_emit_get_cfop_target(projs.f);
double const freq_false = get_block_execfreq(target_false);
if (freq_false > freq_true) {
ir_node *const t = projs.t;
projs.t = projs.f;
......@@ -1367,7 +1361,7 @@ static void ia32_emit_block_header(ir_node *block)
bool has_fallthrough = false;
for (int i = get_Block_n_cfgpreds(block); i-- > 0; ) {
ir_node *pred_block = get_Block_cfgpred_block(block, i);
if (fallthrough_possible(pred_block, block)) {
if (be_emit_get_prev_block(block) == pred_block) {
has_fallthrough = true;
break;
}
......
......@@ -917,19 +917,10 @@ static void enc_jmp(ir_node const *const cfop)
enc_jmp_destination(cfop);
}
static bool fallthrough_possible(const ir_node *block, const ir_node *target)
{
return be_emit_get_prev_block(target) == block;
}
static void enc_jump(const ir_node *node)
{
ir_node *block = get_nodes_block(node);
ir_node *target = be_emit_get_cfop_target(node);
if (fallthrough_possible(block, target))
return;
enc_jmp(node);
if (!be_is_fallthrough(node))
enc_jmp(node);
}
static void enc_jcc(x86_condition_code_t pnc, ir_node const *const cfop)
......@@ -953,9 +944,7 @@ static void enc_ia32_jcc(const ir_node *node)
be_cond_branch_projs_t projs = be_get_cond_branch_projs(node);
ir_node const *target_true = be_emit_get_cfop_target(projs.t);
ir_node const *block = get_nodes_block(node);
if (fallthrough_possible(block, target_true)) {
if (be_is_fallthrough(projs.t)) {
/* exchange both proj's so the second one can be omitted */
ir_node *const t = projs.t;
projs.t = projs.f;
......@@ -963,14 +952,15 @@ static void enc_ia32_jcc(const ir_node *node)
cc = x86_negate_condition_code(cc);
}
ir_node const *target_false = be_emit_get_cfop_target(projs.f);
bool const fallthrough = fallthrough_possible(block, target_false);
bool const fallthrough = be_is_fallthrough(projs.f);
/* if we can't have a fallthrough anyway, put the more likely case first */
if (!fallthrough) {
/* We would need execfreq for the concrete edge, but don't have it
* available here, so we use the block execfreq :-( */
double freq_true = get_block_execfreq(target_true);
double freq_false = get_block_execfreq(target_false);
ir_node const *const target_true = be_emit_get_cfop_target(projs.t);
double const freq_true = get_block_execfreq(target_true);
ir_node const *const target_false = be_emit_get_cfop_target(projs.f);
double const freq_false = get_block_execfreq(target_false);
if (freq_false > freq_true) {
ir_node *const t = projs.t;
projs.t = projs.f;
......
......@@ -194,13 +194,6 @@ static int get_sparc_Call_dest_addr_pos(const ir_node *node)
return get_irn_arity(node)-1;
}
static bool ba_is_fallthrough(const ir_node *node)
{
ir_node *const block = get_nodes_block(node);
ir_node *const target = be_emit_get_cfop_target(node);
return be_emit_get_prev_block(target) == block;
}
static bool is_no_instruction(const ir_node *node)
{
/* copies are nops if src_reg == dest_reg */
......@@ -214,7 +207,7 @@ static bool is_no_instruction(const ir_node *node)
if (be_is_IncSP(node) && be_get_IncSP_offset(node) == 0)
return true;
/* Ba is not emitted if it is a simple fallthrough */
if (is_sparc_Ba(node) && ba_is_fallthrough(node))
if (is_sparc_Ba(node) && be_is_fallthrough(node))
return true;
return be_is_Keep(node) || be_is_Start(node) || is_Phi(node);
......@@ -223,7 +216,7 @@ static bool is_no_instruction(const ir_node *node)
static bool has_delay_slot(const ir_node *node)
{
if (is_sparc_Ba(node)) {
return !ba_is_fallthrough(node);
return !be_is_fallthrough(node);
}
return arch_get_irn_flags(node) & sparc_arch_irn_flag_has_delay_slot;
......@@ -401,10 +394,7 @@ static void optimize_fallthrough(ir_node *node)
{
be_cond_branch_projs_t const projs = be_get_cond_branch_projs(node);
ir_node const *const block = get_nodes_block(node);
ir_node const *const true_target = be_emit_get_cfop_target(projs.t);
if (be_emit_get_prev_block(true_target) == block) {
if (be_is_fallthrough(projs.t)) {
/* exchange both proj destinations so the second one can be omitted */
set_Proj_num(projs.t, pn_sparc_Bicc_false);
set_Proj_num(projs.f, pn_sparc_Bicc_true);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment