Commit ac956e20 authored by Matthias Braun's avatar Matthias Braun
Browse files

add a callback to dump+verify lowering steps

This allows the fronted to register a dump+verify callback for the
lowering transformations performed by the backends.
parent b2409eda
......@@ -209,6 +209,15 @@ FIRM_API const backend_params *be_get_backend_param(void);
*/
FIRM_API void be_lower_for_target(void);
typedef void (*after_transform_func)(ir_graph *irg, const char *name);
/**
* Sets a callback that is called after each transformation step in
* be_lower_for_target(). This is typically used to run dump & verify steps
* to help debugging.
*/
FIRM_API void be_set_after_transform_func(after_transform_func func);
/**
* Main interface to the frontend.
*/
......
......@@ -225,9 +225,11 @@ static void TEMPLATE_get_call_abi(ir_type *method_type, be_abi_call_t *abi)
static void TEMPLATE_lower_for_target(void)
{
lower_builtins(0, NULL);
be_after_irp_transform("lower-builtins");
/* lower compound param handling */
lower_calls_with_compounds(LF_RETURN_HIDDEN);
be_after_irp_transform("lower-calls");
}
static int TEMPLATE_is_mux_allowed(ir_node *sel, ir_node *mux_false,
......
......@@ -500,11 +500,13 @@ static void amd64_lower_for_target(void)
{
/* lower compound param handling */
lower_calls_with_compounds(LF_RETURN_HIDDEN);
be_after_irp_transform("lower-calls");
size_t n_irgs = get_irp_n_irgs();
for (size_t i = 0; i < n_irgs; ++i) {
ir_graph *irg = get_irp_irg(i);
lower_switch(irg, 4, 256, mode_Iu);
be_after_transform(irg, "lower-switch");
}
for (size_t i = 0; i < n_irgs; ++i) {
......@@ -514,9 +516,11 @@ static void amd64_lower_for_target(void)
* during code generation yet.
* TODO: Adapt this once custom CopyB handling is implemented. */
lower_CopyB(irg, 64, 65, true);
be_after_transform(irg, "lower-copyb");
}
lower_builtins(0, NULL);
be_after_irp_transform("lower-builtins");
}
static int amd64_is_mux_allowed(ir_node *sel, ir_node *mux_false,
......
......@@ -385,10 +385,12 @@ static void arm_lower_for_target(void)
/* lower compound param handling */
lower_calls_with_compounds(LF_RETURN_HIDDEN);
be_after_irp_transform("lower-calls");
for (i = 0; i < n_irgs; ++i) {
ir_graph *irg = get_irp_irg(i);
lower_switch(irg, 4, 256, mode_gp);
be_after_transform(irg, "lower-switch");
}
for (i = 0; i < n_irgs; ++i) {
......@@ -397,6 +399,7 @@ static void arm_lower_for_target(void)
* memcpy calls.
* TODO: These constants need arm-specific tuning. */
lower_CopyB(irg, 31, 32, false);
be_after_transform(irg, "lower-copyb");
}
}
......
......@@ -51,6 +51,8 @@ struct be_options_t {
};
extern be_options_t be_options;
extern after_transform_func be_after_transform;
struct be_main_env_t {
arch_env_t *arch_env;
const char *cup_name; /**< name of the compilation unit */
......@@ -67,6 +69,8 @@ void be_get_allocatable_regs(ir_graph const *irg, arch_register_class_t const *c
unsigned be_get_n_allocatable_regs(const ir_graph *irg,
const arch_register_class_t *cls);
void be_after_irp_transform(const char *name);
/**
* Initialize the backend. Must be run first in init_firm();
*/
......
......@@ -416,6 +416,30 @@ static const char *get_timer_name(be_timer_id_t id)
}
ir_timer_t *be_timers[T_LAST+1];
static void dummy_after_transform(ir_graph *irg, const char *name)
{
(void)irg;
(void)name;
}
after_transform_func be_after_transform = dummy_after_transform;
void be_set_after_transform_func(after_transform_func after_transform)
{
be_after_transform = after_transform;
}
void be_after_irp_transform(const char *name)
{
if (be_after_transform == NULL)
return;
for (size_t i = get_irp_n_irgs(); i-- > 0; ) {
ir_graph *irg = get_irp_irg(i);
be_after_transform(irg, name);
}
}
void be_lower_for_target(void)
{
initialize_isa();
......
......@@ -1757,10 +1757,12 @@ static void ia32_lower_for_target(void)
* stackframe)
*/
lower_calls_with_compounds(LF_RETURN_HIDDEN | LF_DONT_LOWER_ARGUMENTS);
be_after_irp_transform("lower-calls");
/* replace floating point operations by function calls */
if (ia32_cg_config.use_softfloat) {
lower_floating_point();
be_after_irp_transform("lower-fp");
}
ir_builtin_kind supported[32];
......@@ -1785,19 +1787,23 @@ static void ia32_lower_for_target(void)
supported[s++] = ir_bk_compare_swap;
assert(s < ARRAY_SIZE(supported));
lower_builtins(s, supported);
be_after_irp_transform("lower-builtins");
for (size_t i = 0; i < n_irgs; ++i) {
ir_graph *irg = get_irp_irg(i);
/* break up switches with wide ranges */
lower_switch(irg, 4, 256, mode_gp);
be_after_transform(irg, "lower-switch");
}
ia32_lower64();
be_after_irp_transform("lower-64");
for (size_t i = 0; i < n_irgs; ++i) {
ir_graph *irg = get_irp_irg(i);
/* lower for mode_b stuff */
ir_lower_mode_b(irg, mode_Iu);
be_after_transform(irg, "lower-modeb");
}
for (size_t i = 0; i < n_irgs; ++i) {
......@@ -1806,6 +1812,7 @@ static void ia32_lower_for_target(void)
* so we can generate rep movs later, and turn all big CopyBs into
* memcpy calls. */
lower_CopyB(irg, 64, 8193, true);
be_after_transform(irg, "lower-copyb");
}
}
......
......@@ -494,39 +494,48 @@ static void sparc_end_codegeneration(void *self)
static void sparc_lower_for_target(void)
{
lower_calls_with_compounds(LF_RETURN_HIDDEN);
be_after_irp_transform("lower-calls");
for (size_t i = 0, n_irgs = get_irp_n_irgs(); i < n_irgs; ++i) {
ir_graph *irg = get_irp_irg(i);
/* Turn all small CopyBs into loads/stores and all bigger CopyBs into
* memcpy calls. */
lower_CopyB(irg, 31, 32, false);
be_after_transform(irg, "lower-copyb");
}
if (!sparc_cg_config.use_fpu)
if (!sparc_cg_config.use_fpu) {
lower_floating_point();
be_after_irp_transform("lower-fp");
}
ir_builtin_kind supported[8];
size_t s = 0;
size_t s = 0;
supported[s++] = ir_bk_saturating_increment;
if (sparc_cg_config.use_cas)
supported[s++] = ir_bk_compare_swap;
assert(s < ARRAY_SIZE(supported));
lower_builtins(s, supported);
be_after_irp_transform("lower-builtins");
ir_mode *mode_gp = sparc_reg_classes[CLASS_sparc_gp].mode;
for (size_t i = 0, n_irgs = get_irp_n_irgs(); i < n_irgs; ++i) {
ir_graph *irg = get_irp_irg(i);
lower_switch(irg, 4, 256, mode_gp);
be_after_transform(irg, "lower-switch");
}
sparc_lower_64bit();
be_after_irp_transform("lower-64");
for (size_t i = 0, n_irgs = get_irp_n_irgs(); i < n_irgs; ++i) {
ir_graph *irg = get_irp_irg(i);
ir_lower_mode_b(irg, mode_Iu);
be_after_transform(irg, "lower-modeb");
/* TODO: Pass SPARC_MIN_STACKSIZE as addr_delta as soon as
* Alloc nodes are implemented more efficiently. */
lower_alloc(irg, SPARC_STACK_ALIGNMENT, true, 0);
be_after_transform(irg, "lower-alloc");
}
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment