Commit ad9464a5 authored by Matthias Braun's avatar Matthias Braun
Browse files

eliminate the unnecessary and especially confusing concept of a...

eliminate the unnecessary and especially confusing concept of a 'code_generator' an isa-interface is enough

[r28009]
parent fca4a757
......@@ -272,10 +272,10 @@ static void TEMPLATE_register_transformers(void)
/**
* Transform generic IR-nodes into TEMPLATE machine instructions
*/
void TEMPLATE_transform_graph(TEMPLATE_code_gen_t *cg)
void TEMPLATE_transform_graph(ir_graph *irg)
{
TEMPLATE_register_transformers();
be_transform_graph(cg->irg, NULL);
be_transform_graph(irg, NULL);
}
void TEMPLATE_init_transform(void)
......
......@@ -27,6 +27,6 @@
void TEMPLATE_init_transform(void);
void TEMPLATE_transform_graph(TEMPLATE_code_gen_t *cg);
void TEMPLATE_transform_graph(ir_graph *irg);
#endif
......@@ -101,12 +101,10 @@ static const arch_irn_ops_t TEMPLATE_irn_ops = {
* Transforms the standard firm graph into
* a TEMLPATE firm graph
*/
static void TEMPLATE_prepare_graph(void *self)
static void TEMPLATE_prepare_graph(ir_graph *irg)
{
TEMPLATE_code_gen_t *cg = self;
/* transform nodes into assembler instructions */
TEMPLATE_transform_graph(cg);
TEMPLATE_transform_graph(irg);
}
......@@ -114,69 +112,27 @@ static void TEMPLATE_prepare_graph(void *self)
/**
* Called immediatly before emit phase.
*/
static void TEMPLATE_finish_irg(void *self)
static void TEMPLATE_finish_irg(ir_graph *irg)
{
(void) self;
(void) irg;
}
static void TEMPLATE_before_ra(void *self)
static void TEMPLATE_before_ra(ir_graph *irg)
{
(void) self;
(void) irg;
/* Some stuff you need to do after scheduling but before register allocation */
}
static void TEMPLATE_after_ra(void *self)
static void TEMPLATE_after_ra(ir_graph *irg)
{
(void) self;
(void) irg;
/* Some stuff you need to do immediatly after register allocation */
}
/**
* Emits the code, closes the output file and frees
* the code generator interface.
*/
static void TEMPLATE_emit_and_done(void *self)
static void TEMPLATE_init_graph(ir_graph *irg)
{
TEMPLATE_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
TEMPLATE_emit_routine(irg);
/* de-allocate code generator */
free(cg);
}
static void *TEMPLATE_cg_init(ir_graph *irg);
static const arch_code_generator_if_t TEMPLATE_code_gen_if = {
TEMPLATE_cg_init,
NULL, /* get_pic_base hook */
NULL, /* before abi introduce hook */
TEMPLATE_prepare_graph,
NULL, /* spill hook */
TEMPLATE_before_ra, /* before register allocation hook */
TEMPLATE_after_ra, /* after register allocation hook */
TEMPLATE_finish_irg,
TEMPLATE_emit_and_done
};
/**
* Initializes the code generator.
*/
static void *TEMPLATE_cg_init(ir_graph *irg)
{
const arch_env_t *arch_env = be_get_irg_arch_env(irg);
TEMPLATE_isa_t *isa = (TEMPLATE_isa_t *) arch_env;
TEMPLATE_code_gen_t *cg = XMALLOC(TEMPLATE_code_gen_t);
cg->impl = &TEMPLATE_code_gen_if;
cg->irg = irg;
cg->isa = isa;
return (arch_code_generator_t *)cg;
(void) irg;
}
......@@ -400,16 +356,6 @@ static int TEMPLATE_to_appear_in_schedule(void *block_env, const ir_node *irn)
return 1;
}
/**
* Initializes the code generator interface.
*/
static const arch_code_generator_if_t *TEMPLATE_get_code_generator_if(
void *self)
{
(void) self;
return &TEMPLATE_code_gen_if;
}
list_sched_selector_t TEMPLATE_sched_selector;
/**
......@@ -510,7 +456,6 @@ const arch_isa_if_t TEMPLATE_isa_if = {
TEMPLATE_get_reg_class,
TEMPLATE_get_reg_class_for_mode,
TEMPLATE_get_call_abi,
TEMPLATE_get_code_generator_if,
TEMPLATE_get_list_sched_selector,
TEMPLATE_get_ilp_sched_selector,
TEMPLATE_get_reg_class_alignment,
......@@ -520,7 +465,16 @@ const arch_isa_if_t TEMPLATE_isa_if = {
TEMPLATE_get_backend_irg_list,
NULL, /* mark remat */
TEMPLATE_parse_asm_constraint,
TEMPLATE_is_valid_clobber
TEMPLATE_is_valid_clobber,
TEMPLATE_init_graph,
NULL, /* get_pic_base */
NULL, /* before_abi */
TEMPLATE_prepare_graph,
TEMPLATE_before_ra,
TEMPLATE_after_ra,
TEMPLATE_finish_irg,
TEMPLATE_emit_routine,
};
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_TEMPLATE);
......
......@@ -31,30 +31,8 @@
#include "../beemitter.h"
#include "set.h"
typedef struct TEMPLATE_isa_t TEMPLATE_isa_t;
typedef struct TEMPLATE_code_gen_t TEMPLATE_code_gen_t;
typedef struct TEMPLATE_transform_env_t TEMPLATE_transform_env_t;
struct TEMPLATE_code_gen_t {
const arch_code_generator_if_t *impl; /**< implementation */
ir_graph *irg; /**< current irg */
TEMPLATE_isa_t *isa; /**< the isa instance */
};
struct TEMPLATE_isa_t {
typedef struct TEMPLATE_isa_t {
arch_env_t base; /**< must be derived from arch_isa */
};
/**
* this is a struct to minimize the number of parameters
* for transformation walker
*/
struct TEMPLATE_transform_env_t {
dbg_info *dbg; /**< The node debug info */
ir_graph *irg; /**< The irg, the node should be created in */
ir_node *block; /**< The block, the node should belong to */
ir_node *irn; /**< The irn, to be transformed */
ir_mode *mode; /**< The mode of the irn */
};
} TEMPLATE_isa_t;
#endif
......@@ -625,12 +625,11 @@ static void amd64_gen_labels(ir_node *block, void *env)
/**
* Main driver
*/
void amd64_gen_routine(const amd64_code_gen_t *cg, ir_graph *irg)
void amd64_gen_routine(ir_graph *irg)
{
ir_entity *entity = get_irg_entity(irg);
ir_node **blk_sched;
int i, n;
(void)cg;
/* register all emitter functions */
amd64_register_emitters();
......
......@@ -43,7 +43,6 @@ void amd64_emit_fp_offset(const ir_node *node);
int get_amd64_reg_nr(ir_node *irn, int posi, int in_out);
const char *get_amd64_in_reg_name(ir_node *irn, int pos);
void amd64_gen_routine(const amd64_code_gen_t *cg, ir_graph *irg);
void amd64_gen_routine(ir_graph *irg);
#endif
......@@ -47,12 +47,6 @@
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
/** holds the current code generator during transformation */
static amd64_code_gen_t *env_cg;
///* its enough to have those once */
//static ir_node *nomem, *noreg_GP;
/* Some support functions: */
static inline int mode_needs_gp_reg(ir_mode *mode)
......@@ -573,14 +567,6 @@ static ir_node *gen_be_FrameAddr(ir_node *node)
/* Boilerplate code for transformation: */
static void amd64_pretransform_node(void)
{
amd64_code_gen_t *cg = env_cg;
(void) cg;
// nomem = get_irg_no_mem(current_ir_graph);
}
static void amd64_register_transformers(void)
{
be_start_transform_setup();
......@@ -603,12 +589,10 @@ static void amd64_register_transformers(void)
be_set_transform_function(op_Minus, gen_Minus);
}
void amd64_transform_graph(amd64_code_gen_t *cg)
void amd64_transform_graph(ir_graph *irg)
{
amd64_register_transformers();
env_cg = cg;
be_transform_graph(cg->irg, amd64_pretransform_node);
be_transform_graph(irg, NULL);
}
void amd64_init_transform(void)
......
......@@ -27,6 +27,6 @@
void amd64_init_transform(void);
void amd64_transform_graph(amd64_code_gen_t *cg);
void amd64_transform_graph(ir_graph *irg);
#endif
......@@ -127,34 +127,27 @@ static const arch_irn_ops_t amd64_irn_ops = {
* Transforms the standard firm graph into
* a amd64 firm graph
*/
static void amd64_prepare_graph(void *self)
static void amd64_prepare_graph(ir_graph *irg)
{
amd64_code_gen_t *cg = self;
amd64_irg_data_t *irg_data = amd64_get_irg_data(irg);
amd64_transform_graph(irg);
amd64_transform_graph (cg);
if (cg->dump)
dump_ir_graph(cg->irg, "transformed");
if (irg_data->dump)
dump_ir_graph(irg, "transformed");
}
/**
* Called immediatly before emit phase.
*/
static void amd64_finish_irg(void *self)
static void amd64_finish_irg(ir_graph *irg)
{
amd64_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
dump_ir_graph(irg, "amd64-finished");
(void) irg;
}
static void amd64_before_ra(void *self)
static void amd64_before_ra(ir_graph *irg)
{
amd64_code_gen_t *cg = self;
be_sched_fix_flags(cg->irg, &amd64_reg_classes[CLASS_amd64_flags],
NULL, NULL);
be_sched_fix_flags(irg, &amd64_reg_classes[CLASS_amd64_flags], NULL, NULL);
}
......@@ -223,59 +216,23 @@ static void amd64_after_ra_walker(ir_node *block, void *data)
}
}
static void amd64_after_ra(void *self)
static void amd64_after_ra(ir_graph *irg)
{
amd64_code_gen_t *cg = self;
be_coalesce_spillslots(cg->irg);
be_coalesce_spillslots(irg);
irg_block_walk_graph(cg->irg, NULL, amd64_after_ra_walker, NULL);
irg_block_walk_graph(irg, NULL, amd64_after_ra_walker, NULL);
}
/**
* Emits the code, closes the output file and frees
* the code generator interface.
*/
static void amd64_emit_and_done(void *self)
{
amd64_code_gen_t *cg = self;
ir_graph *irg = cg->irg;
amd64_gen_routine(cg, irg);
/* de-allocate code generator */
free(cg);
}
static void *amd64_cg_init(ir_graph *irg);
static const arch_code_generator_if_t amd64_code_gen_if = {
amd64_cg_init,
NULL, /* get_pic_base hook */
NULL, /* before abi introduce hook */
amd64_prepare_graph,
NULL, /* spill hook */
amd64_before_ra, /* before register allocation hook */
amd64_after_ra, /* after register allocation hook */
amd64_finish_irg,
amd64_emit_and_done
};
/**
* Initializes the code generator.
*/
static void *amd64_cg_init(ir_graph *irg)
static void amd64_init_graph(ir_graph *irg)
{
const arch_env_t *arch_env = be_get_irg_arch_env(irg);
amd64_isa_t *isa = (amd64_isa_t *) arch_env;
amd64_code_gen_t *cg = XMALLOC(amd64_code_gen_t);
cg->impl = &amd64_code_gen_if;
cg->irg = irg;
cg->isa = isa;
cg->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) ? 1 : 0;
struct obstack *obst = be_get_be_obst(irg);
amd64_irg_data_t *irg_data = OALLOCZ(obst, amd64_irg_data_t);
irg_data->dump = (be_get_irg_options(irg)->dump_flags & DUMP_BE) ? 1 : 0;
return (arch_code_generator_t *)cg;
be_birg_from_irg(irg)->isa_link = irg_data;
}
......@@ -284,7 +241,7 @@ typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_node *block);
/**
* Used to create per-graph unique pseudo nodes.
*/
static inline ir_node *create_const(amd64_code_gen_t *cg, ir_node **place,
static inline ir_node *create_const(ir_graph *irg, ir_node **place,
create_const_node_func func,
const arch_register_t* reg)
{
......@@ -293,7 +250,7 @@ static inline ir_node *create_const(amd64_code_gen_t *cg, ir_node **place,
if (*place != NULL)
return *place;
block = get_irg_start_block(cg->irg);
block = get_irg_start_block(irg);
res = func(NULL, block);
arch_set_irn_register(res, reg);
*place = res;
......@@ -558,16 +515,6 @@ static int amd64_to_appear_in_schedule(void *block_env, const ir_node *irn)
return 1;
}
/**
* Initializes the code generator interface.
*/
static const arch_code_generator_if_t *amd64_get_code_generator_if(
void *self)
{
(void) self;
return &amd64_code_gen_if;
}
list_sched_selector_t amd64_sched_selector;
/**
......@@ -669,7 +616,6 @@ const arch_isa_if_t amd64_isa_if = {
amd64_get_reg_class,
amd64_get_reg_class_for_mode,
amd64_get_call_abi,
amd64_get_code_generator_if,
amd64_get_list_sched_selector,
amd64_get_ilp_sched_selector,
amd64_get_reg_class_alignment,
......@@ -679,7 +625,16 @@ const arch_isa_if_t amd64_isa_if = {
amd64_get_backend_irg_list,
NULL, /* mark remat */
amd64_parse_asm_constraint,
amd64_is_valid_clobber
amd64_is_valid_clobber,
amd64_init_graph,
NULL, /* get_pic_base */
NULL, /* before_abi */
amd64_prepare_graph,
amd64_before_ra,
amd64_after_ra,
amd64_finish_irg,
amd64_gen_routine,
};
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_amd64);
......
......@@ -32,16 +32,14 @@
#include "set.h"
typedef struct amd64_isa_t amd64_isa_t;
typedef struct amd64_code_gen_t amd64_code_gen_t;
typedef struct amd64_transform_env_t amd64_transform_env_t;
struct amd64_code_gen_t {
const arch_code_generator_if_t *impl; /**< implementation */
ir_graph *irg; /**< current irg */
amd64_isa_t *isa; /**< the isa instance */
char dump; /**< set to 1 if graphs should be dumped */
ir_node *noreg_gp; /**< unique NoReg_GP node */
};
typedef struct amd64_irg_data_t {
ir_graph *irg; /**< current irg */
amd64_isa_t *isa; /**< the isa instance */
char dump; /**< set to 1 if graphs should be dumped */
ir_node *noreg_gp; /**< unique NoReg_GP node */
} amd64_irg_data_t;
struct amd64_isa_t {
arch_env_t base; /**< must be derived from arch_isa */
......@@ -59,6 +57,11 @@ struct amd64_transform_env_t {
ir_mode *mode; /**< The mode of the irn */
};
ir_node *amd64_new_NoReg_gp(amd64_code_gen_t *cg);
static inline amd64_irg_data_t *amd64_get_irg_data(const ir_graph *irg)
{
return (amd64_irg_data_t*) be_birg_from_irg(irg)->isa_link;
}
ir_node *amd64_new_NoReg_gp(ir_graph *irg);
#endif
......@@ -62,8 +62,8 @@
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
static const arm_code_gen_t *cg;
static set *sym_or_tv;
static set *sym_or_tv;
static arm_isa_t *isa;
/**
* Returns the register at in position pos.
......@@ -756,7 +756,7 @@ static void emit_be_Copy(const ir_node *irn)
}
if (mode_is_float(mode)) {
if (USE_FPA(cg->isa)) {
if (USE_FPA(isa)) {
be_emit_cstring("\tmvf");
be_emit_char(' ');
arm_emit_dest_register(irn, 0);
......@@ -951,7 +951,8 @@ static void arm_emit_block_header(ir_node *block, ir_node *prev)
int n_cfgpreds;
int need_label;
int i, arity;
ir_exec_freq *exec_freq = be_get_irg_exec_freq(cg->irg);
ir_graph *irg = get_irn_irg(block);
ir_exec_freq *exec_freq = be_get_irg_exec_freq(irg);
need_label = 0;
n_cfgpreds = get_Block_n_cfgpreds(block);
......@@ -1042,14 +1043,15 @@ static int cmp_sym_or_tv(const void *elt, const void *key, size_t size)
return p1->u.generic != p2->u.generic;
}
void arm_gen_routine(const arm_code_gen_t *arm_cg, ir_graph *irg)
void arm_gen_routine(ir_graph *irg)
{
ir_node **blk_sched;
int i, n;
ir_node *last_block = NULL;
ir_entity *entity = get_irg_entity(irg);
ir_node *last_block = NULL;
ir_entity *entity = get_irg_entity(irg);
const arch_env_t *arch_env = be_get_irg_arch_env(irg);
ir_node **blk_sched;
int i, n;
cg = arm_cg;
isa = (arm_isa_t*) arch_env;
sym_or_tv = new_set(cmp_sym_or_tv, 8);
be_gas_elf_type_char = '%';
......
......@@ -46,7 +46,7 @@ void arm_emit_shifter_operand(const ir_node *node);
void arm_emit_load_mode(const ir_node *node);
void arm_emit_store_mode(const ir_node *node);
void arm_gen_routine(const arm_code_gen_t *cg, ir_graph *irg);
void arm_gen_routine(ir_graph *irg);
void arm_init_emitter(void);
......
......@@ -40,8 +40,6 @@
#include "arm_nodes_attr.h"
#include "arm_new_nodes.h"
static arm_code_gen_t *cg;
static unsigned arm_ror(unsigned v, unsigned ror)
{
return (v << (32 - ror)) | (v >> ror);
......@@ -267,10 +265,8 @@ static void register_peephole_optimisation(ir_op *op, peephole_opt_func func)
}
/* Perform peephole-optimizations. */
void arm_peephole_optimization(arm_code_gen_t *new_cg)
void arm_peephole_optimization(ir_graph *irg)
{
cg = new_cg;
/* register peephole optimizations */
clear_irp_opcodes_generic_func();
register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
......@@ -278,5 +274,5 @@ void arm_peephole_optimization(arm_code_gen_t *new_cg)
register_peephole_optimisation(op_arm_Ldr, peephole_arm_Str_Ldr);
register_peephole_optimisation(op_arm_FrameAddr, peephole_arm_FrameAddr);
be_peephole_opt(cg->irg);
be_peephole_opt(irg);
}
......@@ -54,8 +54,7 @@ void arm_gen_vals_from_word(unsigned int value, arm_vals *result);
* Performs Peephole Optimizations an a graph.
*
* @param irg the graph
* @param cg the code generator object
*/
void arm_peephole_optimization(arm_code_gen_t *cg);
void arm_peephole_optimization(ir_graph *irg);
#endif /* FIRM_BE_ARM_ARM_OPTIMIZE_H */
#endif
......@@ -58,14 +58,12 @@
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
/** hold the current code generator during transformation */
static arm_code_gen_t *env_cg;
static const arch_register_t *sp_reg = &arm_gp_regs[REG_SP];
static ir_mode *mode_gp;
static ir_mode *mode_fp;
static beabi_helper_env_t *abihelper;
static calling_convention_t *cconv = NULL;
static arm_isa_t *isa;
static pmap *node_to_stack;
......@@ -215,7 +213,7 @@ static ir_node *gen_Conv(ir_node *node)
return new_op;
if (mode_is_float(src_mode) || mode_is_float(dst_mode)) {
if (USE_FPA(env_cg->isa)) {
if (USE_FPA(isa)) {
if (mode_is_float(src_mode)) {
if (mode_is_float(dst_mode)) {
/* from float to float */
......@@ -232,7 +230,7 @@ static ir_node *gen_Conv(ir_node *node)
return new_bd_arm_FltX(dbg, block, new_op, dst_mode);
}
}
} else if (USE_VFP(env_cg->isa)) {
} else if (USE_VFP(isa)) {
panic("VFP not supported yet");
} else {
panic("Softfloat not supported yet");
......@@ -509,9 +507,9 @@ static ir_node *gen_Add(ir_node *node)
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *new_op1 = be_transform_node(op1);
ir_node *new_op2 = be_transform_node(op2);
if (USE_FPA(env_cg->isa)) {
if (USE_FPA(isa)) {
return new_bd_arm_Adf(dbgi, block, new_op1, new_op2, mode);
} else if (USE_VFP(env_cg->isa)) {
} else if (USE_VFP(isa)) {
assert(mode != mode_E && "IEEE Extended FP not supported");
panic("VFP not supported yet");
} else {
......@@ -556,9 +554,9 @@ static ir_node *gen_Mul(ir_node *node)
dbg_info *dbg = get_irn_dbg_info(node);
if (mode_is_float(mode)) {
if (USE_FPA(env_cg->isa)) {