Commit 4f9714f2 authored by Christoph Mallon's avatar Christoph Mallon
Browse files

be: Factor out (almost) identical code to emit assembler for unconditional...

be: Factor out (almost) identical code to emit assembler for unconditional jumps into the macro BE_EMIT_JMP().
parent b8d2203a
......@@ -620,13 +620,7 @@ static void emit_amd64_asm(const ir_node *node)
*/
static void emit_amd64_jmp(const ir_node *node)
{
ir_node const *const block = get_nodes_block(node);
ir_node const *const target = be_emit_get_cfop_target(node);
if (be_emit_get_prev_block(target) != block) {
amd64_emitf(node, "jmp %L", node);
} else if (be_options.verbose_asm) {
amd64_emitf(node, "/* fallthrough to %L */", node);
}
BE_EMIT_JMP(amd64, node, "jmp", node) {}
}
static void emit_jumptable_target(ir_entity const *const table,
......@@ -695,12 +689,7 @@ static void emit_amd64_jcc(const ir_node *irn)
/* emit the true proj */
amd64_emitf(irn, "j%PX %L", (int)cc, projs.t);
ir_node const *const false_target = be_emit_get_cfop_target(projs.f);
if (be_emit_get_prev_block(false_target) != block) {
amd64_emitf(irn, "jmp %L", projs.f);
} else if (be_options.verbose_asm) {
amd64_emitf(irn, "/* fallthrough to %L */", projs.f);
}
BE_EMIT_JMP(amd64, irn, "jmp", projs.f) {}
}
static void emit_amd64_mov_gp(const ir_node *node)
......
......@@ -418,12 +418,7 @@ static void emit_arm_B(const ir_node *irn)
/* emit the true proj */
arm_emitf(irn, "b%s %L", suffix, projs.t);
ir_node const *const false_target = be_emit_get_cfop_target(projs.f);
if (be_emit_get_prev_block(false_target) != block) {
arm_emitf(irn, "b %L", projs.f);
} else if (be_options.verbose_asm) {
arm_emitf(irn, "/* fallthrough to %L */", projs.f);
}
BE_EMIT_JMP(arm, irn, "b", projs.f) {}
}
static void emit_jumptable_target(ir_entity const *const table,
......@@ -518,14 +513,7 @@ static void emit_be_MemPerm(const ir_node *node)
static void emit_arm_Jmp(const ir_node *node)
{
/* for now, the code works for scheduled and non-schedules blocks */
ir_node const *const block = get_nodes_block(node);
ir_node const *const target = be_emit_get_cfop_target(node);
if (be_emit_get_prev_block(target) != block) {
arm_emitf(node, "b %L", node);
} else if (be_options.verbose_asm) {
arm_emitf(node, "/* fallthrough to %L */", node);
}
BE_EMIT_JMP(arm, node, "b", node) {}
}
/**
......
......@@ -107,4 +107,10 @@ void be_emit_cfop_target(ir_node const *jmp);
be_emit_cfop_target(va_arg(ap, ir_node const*)); \
} else
#define BE_EMIT_JMP(arch, node, name, jmp) \
if (be_emit_get_prev_block(be_emit_get_cfop_target(jmp)) == get_nodes_block(node)) { \
if (be_options.verbose_asm) \
arch##_emitf(node, "/* fallthrough to %L */", jmp); \
} else if (arch##_emitf(node, name " %L", jmp), 0) {} else
#endif
......@@ -726,12 +726,7 @@ static void emit_ia32_Jcc(const ir_node *node)
be_emit_write_line();
}
/* the second Proj might be a fallthrough */
if (!fallthrough) {
ia32_emitf(node, "jmp %L", projs.f);
} else if (be_options.verbose_asm) {
ia32_emitf(node, "/* fallthrough to %L */", projs.f);
}
BE_EMIT_JMP(ia32, node, "jmp", projs.f) {}
}
/**
......@@ -796,14 +791,7 @@ static void emit_ia32_SwitchJmp(const ir_node *node)
*/
static void emit_ia32_Jmp(const ir_node *node)
{
/* we have a block schedule */
ir_node *block = get_nodes_block(node);
ir_node *target = be_emit_get_cfop_target(node);
if (!fallthrough_possible(block, target)) {
ia32_emitf(node, "jmp %L", node);
} else if (be_options.verbose_asm) {
ia32_emitf(node, "/* fallthrough to %L */", node);
}
BE_EMIT_JMP(ia32, node, "jmp", node) {}
}
static void emit_ia32_asm_register(const arch_register_t *reg, char modifier,
......
......@@ -1112,16 +1112,11 @@ static void emit_sparc_branch(const ir_node *node, get_cc_func get_cc)
sparc_emitf(node, "%s%A %L", get_cc(relation), projs.t);
fill_delay_slot(node);
const ir_node *block = get_nodes_block(node);
const ir_node *proj_target = be_emit_get_cfop_target(projs.f);
if (be_emit_get_prev_block(proj_target) != block) {
sparc_emitf(node, "ba %L", projs.f);
BE_EMIT_JMP(sparc, node, "ba", projs.f) {
/* TODO: fill this slot as well */
emitting_delay_slot = true;
sparc_emitf(NULL, "nop");
emitting_delay_slot = false;
} else if (be_options.verbose_asm) {
sparc_emitf(node, "/* fallthrough to %L */", projs.f);
}
}
......@@ -1152,11 +1147,8 @@ static void emit_sparc_fbfcc(const ir_node *node)
static void emit_sparc_Ba(const ir_node *node)
{
if (!ba_is_fallthrough(node)) {
sparc_emitf(node, "ba %L", node);
BE_EMIT_JMP(sparc, node, "ba", node) {
fill_delay_slot(node);
} else if (be_options.verbose_asm) {
sparc_emitf(node, "/* fallthrough to %L */", node);
}
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment