Commit 4b9bb36a authored by Matthias Braun's avatar Matthias Braun
Browse files

remove arch_get_frame_entity()

Add callbacks on functions that really need them instead.
parent 5232416f
......@@ -36,17 +36,9 @@
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
static ir_entity *TEMPLATE_get_frame_entity(const ir_node *node)
{
(void)node;
/* TODO: return the ir_entity assigned to the frame */
return NULL;
}
/* fill register allocator interface */
static const arch_irn_ops_t TEMPLATE_irn_ops = {
.get_frame_entity = TEMPLATE_get_frame_entity,
};
/**
......
......@@ -118,7 +118,6 @@ static int amd64_get_sp_bias(const ir_node *node)
/* fill register allocator interface */
static const arch_irn_ops_t amd64_irn_ops = {
.get_frame_entity = amd64_get_frame_entity,
};
static void amd64_before_ra(ir_graph *irg)
......@@ -637,7 +636,8 @@ static void amd64_finish_graph(ir_graph *irg)
/* fix stack entity offsets */
be_fix_stack_nodes(irg, &amd64_registers[REG_RSP]);
be_abi_fix_stack_bias(irg, amd64_get_sp_bias, amd64_set_frame_offset);
be_abi_fix_stack_bias(irg, amd64_get_sp_bias, amd64_set_frame_offset,
amd64_get_frame_entity);
/* Fix 2-address code constraints. */
amd64_finish_irg(irg);
......
......@@ -153,6 +153,26 @@ static int arm_get_sp_bias(const ir_node *node)
return 0;
}
static ir_entity *arm_get_frame_entity(const ir_node *irn)
{
if (be_is_MemPerm(irn))
return be_get_MemPerm_in_entity(irn, 0);
const arm_attr_t *attr = get_arm_attr_const(irn);
if (is_arm_FrameAddr(irn)) {
const arm_Address_attr_t *frame_attr = get_arm_Address_attr_const(irn);
return frame_attr->entity;
}
if (attr->is_load_store) {
const arm_load_store_attr_t *load_store_attr
= get_arm_load_store_attr_const(irn);
if (load_store_attr->is_frame_entity) {
return load_store_attr->entity;
}
}
return NULL;
}
void arm_finish_graph(ir_graph *irg)
{
be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
......@@ -167,7 +187,8 @@ void arm_finish_graph(ir_graph *irg)
/* fix stack entity offsets */
be_fix_stack_nodes(irg, &arm_registers[REG_SP]);
be_abi_fix_stack_bias(irg, arm_get_sp_bias, arm_set_frame_offset);
be_abi_fix_stack_bias(irg, arm_get_sp_bias, arm_set_frame_offset,
arm_get_frame_entity);
/* do peephole optimizations and fix stack offsets */
arm_peephole_optimization(irg);
......
......@@ -59,28 +59,9 @@ arm_codegen_config_t arm_cg_config;
ir_mode *arm_mode_gp;
ir_mode *arm_mode_flags;
static ir_entity *arm_get_frame_entity(const ir_node *irn)
{
const arm_attr_t *attr = get_arm_attr_const(irn);
if (is_arm_FrameAddr(irn)) {
const arm_Address_attr_t *frame_attr = get_arm_Address_attr_const(irn);
return frame_attr->entity;
}
if (attr->is_load_store) {
const arm_load_store_attr_t *load_store_attr
= get_arm_load_store_attr_const(irn);
if (load_store_attr->is_frame_entity) {
return load_store_attr->entity;
}
}
return NULL;
}
/* fill register allocator interface */
const arch_irn_ops_t arm_irn_ops = {
.get_frame_entity = arm_get_frame_entity,
};
/**
......
......@@ -58,4 +58,6 @@ typedef struct copy_opt_t copy_opt_t;
typedef struct be_main_env_t be_main_env_t;
typedef struct be_options_t be_options_t;
typedef ir_entity *(*get_frame_entity_func)(const ir_node *node);
#endif
......@@ -38,12 +38,6 @@ static const arch_irn_ops_t *get_irn_ops(const ir_node *irn)
return be_ops;
}
ir_entity *arch_get_frame_entity(const ir_node *irn)
{
const arch_irn_ops_t *ops = get_irn_ops(irn);
return ops->get_frame_entity(irn);
}
void arch_perform_memory_operand(ir_node *irn, unsigned int i)
{
const arch_irn_ops_t *ops = get_irn_ops(irn);
......
......@@ -64,8 +64,6 @@ ENUM_BITSET(arch_register_req_type_t)
extern arch_register_req_t const arch_no_requirement;
#define arch_no_register_req (&arch_no_requirement)
ir_entity *arch_get_frame_entity(const ir_node *irn);
int arch_get_op_estimated_cost(const ir_node *irn);
void arch_perform_memory_operand(ir_node *irn, unsigned i);
......@@ -282,14 +280,6 @@ static inline bool reg_reqs_equal(const arch_register_req_t *req1,
}
struct arch_irn_ops_t {
/**
* Get the entity on the stack frame this node depends on.
* @param irn The node in question.
* @return The entity on the stack frame or NULL, if the node does not have
* a stack frame entity.
*/
ir_entity *(*get_frame_entity)(const ir_node *irn);
/**
* Get the estimated cycle count for @p irn.
*
......
......@@ -471,16 +471,8 @@ unsigned be_get_IncSP_align(const ir_node *irn)
return a->align;
}
static ir_entity *be_node_get_frame_entity(const ir_node *irn)
{
if (be_is_MemPerm(irn))
return be_get_MemPerm_in_entity(irn, 0);
return NULL;
}
/* for be nodes */
static const arch_irn_ops_t be_node_irn_ops = {
.get_frame_entity = be_node_get_frame_entity,
};
static unsigned get_start_reg_index(ir_graph *irg, const arch_register_t *reg)
......@@ -508,15 +500,8 @@ ir_node *be_get_initial_reg_value(ir_graph *irg, const arch_register_t *reg)
return proj ? proj : new_r_Proj(start, reg->cls->mode, i);
}
static ir_entity* dummy_get_frame_entity(const ir_node *node)
{
(void)node;
return NULL;
}
/* for "middleend" nodes */
static const arch_irn_ops_t dummy_be_irn_ops = {
.get_frame_entity = dummy_get_frame_entity,
};
ir_node *be_new_Phi(ir_node *block, int n_ins, ir_node **ins, ir_mode *mode,
......@@ -571,9 +556,6 @@ void be_dump_phi_reg_reqs(FILE *F, const ir_node *node, dump_reason_t reason)
}
static const arch_irn_ops_t phi_irn_ops = {
dummy_get_frame_entity,
NULL, /* get_op_estimated_cost */
NULL, /* perform_memory_operand */
};
/**
......
......@@ -209,12 +209,13 @@ static inline bool overlapping_regs(const arch_register_t *reg0,
}
bool be_can_move_down(ir_heights_t *heights, const ir_node *node,
const ir_node *before)
const ir_node *before,
get_frame_entity_func get_frame_entity)
{
assert(get_nodes_block(node) == get_nodes_block(before));
assert(sched_get_time_step(node) < sched_get_time_step(before));
const ir_entity *const entity = arch_get_frame_entity(node);
const ir_entity *const entity = get_frame_entity(node);
ir_node *schedpoint = sched_next(node);
while (schedpoint != before) {
......@@ -244,7 +245,7 @@ bool be_can_move_down(ir_heights_t *heights, const ir_node *node,
* kinda preliminary but enough for the sparc backend. */
if (entity != NULL) {
const ir_entity *const schedpoint_entity
= arch_get_frame_entity(schedpoint);
= get_frame_entity(schedpoint);
if (schedpoint_entity == entity)
return false;
if (be_is_MemPerm(schedpoint)) {
......
......@@ -59,7 +59,8 @@ bool be_has_only_one_user(ir_node *node);
* optimization phase.
*/
bool be_can_move_down(ir_heights_t *heights, const ir_node *node,
const ir_node *before);
const ir_node *before,
get_frame_entity_func get_frame_entity);
bool be_can_move_up(ir_heights_t *heights, const ir_node *node,
const ir_node *after);
......
......@@ -470,7 +470,6 @@ static void assign_spill_entity(be_fec_env_t *env, ir_node *node,
}
node = skip_Proj(node);
assert(arch_get_frame_entity(node) == NULL);
env->set_frame_entity(node, entity, type);
}
......
......@@ -77,6 +77,7 @@ typedef struct bias_walk {
ir_node *start_block; /**< The start block of the current graph. */
get_sp_bias_func get_sp_bias;
set_frame_offset_func set_frame_offset;
get_frame_entity_func get_frame_entity;
} bias_walk;
/**
......@@ -98,7 +99,7 @@ static int process_stack_bias(const bias_walk *bw, ir_node *bl, int real_bias)
/* Check, if the node relates to an entity on the stack frame.
* If so, set the true offset (including the bias) for that
* node. */
ir_entity *ent = arch_get_frame_entity(irn);
ir_entity *ent = bw->get_frame_entity(irn);
if (ent != NULL) {
int bias = sp_relative ? real_bias : 0;
int offset = be_get_stack_entity_offset(layout, ent, bias);
......@@ -160,7 +161,8 @@ static void stack_bias_walker(ir_node *bl, void *data)
}
void be_abi_fix_stack_bias(ir_graph *irg, get_sp_bias_func get_sp_bias,
set_frame_offset_func set_frame_offset)
set_frame_offset_func set_frame_offset,
get_frame_entity_func get_frame_entity)
{
be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
......@@ -171,6 +173,7 @@ void be_abi_fix_stack_bias(ir_graph *irg, get_sp_bias_func get_sp_bias,
bw.start_block = get_irg_start_block(irg);
bw.get_sp_bias = get_sp_bias;
bw.set_frame_offset = set_frame_offset;
bw.get_frame_entity = get_frame_entity;
bw.start_block_bias = process_stack_bias(&bw, bw.start_block, stack_layout->initial_bias);
/* fix the bias is all other blocks */
......
......@@ -44,7 +44,8 @@ void be_fix_stack_nodes(ir_graph *irg, arch_register_t const *sp);
* callback does not need to handle them.
*/
void be_abi_fix_stack_bias(ir_graph *irg, get_sp_bias_func get_sp_bias,
set_frame_offset_func set_frame_offset);
set_frame_offset_func set_frame_offset,
get_frame_entity_func get_frame_entity);
int be_get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent,
int bias);
......
......@@ -281,10 +281,11 @@ typedef struct spill_t {
} spill_t;
typedef struct {
ir_graph *irg;
set *spills;
ir_node **reloads;
bool problem_found;
ir_graph *irg;
set *spills;
ir_node **reloads;
bool problem_found;
get_frame_entity_func get_frame_entity;
} be_verify_spillslots_env_t;
static int cmp_spill(const void* d1, const void* d2, size_t size)
......@@ -342,7 +343,7 @@ static void be_check_entity(be_verify_spillslots_env_t *env, ir_node *node, ir_e
static void collect_spill(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent)
{
ir_entity *spillent = arch_get_frame_entity(node);
ir_entity *spillent = env->get_frame_entity(node);
be_check_entity(env, node, spillent);
get_spill(env, node, ent);
......@@ -434,7 +435,7 @@ static void collect_spills_walker(ir_node *node, void *data)
env->problem_found = true;
return;
}
ir_entity *ent = arch_get_frame_entity(node);
ir_entity *ent = env->get_frame_entity(node);
be_check_entity(env, node, ent);
collect(env, spill, node, ent);
......@@ -481,7 +482,7 @@ static void check_lonely_spills(ir_node *node, void *data)
|| (is_Proj(node) && be_is_MemPerm(get_Proj_pred(node)))) {
spill_t *spill = find_spill(env, node);
if (arch_irn_is(node, spill)) {
ir_entity *ent = arch_get_frame_entity(node);
ir_entity *ent = env->get_frame_entity(node);
be_check_entity(env, node, ent);
}
......@@ -492,14 +493,15 @@ static void check_lonely_spills(ir_node *node, void *data)
}
}
bool be_verify_spillslots(ir_graph *irg)
bool be_verify_spillslots(ir_graph *irg, get_frame_entity_func get_frame_entity)
{
be_verify_spillslots_env_t env;
env.irg = irg;
env.spills = new_set(cmp_spill, 10);
env.reloads = NEW_ARR_F(ir_node*, 0);
env.problem_found = false;
env.irg = irg;
env.spills = new_set(cmp_spill, 10);
env.reloads = NEW_ARR_F(ir_node*, 0);
env.problem_found = false;
env.get_frame_entity = get_frame_entity;
irg_walk_graph(irg, collect_spills_walker, NULL, &env);
irg_walk_graph(irg, check_lonely_spills, NULL, &env);
......
......@@ -38,11 +38,9 @@ bool be_verify_schedule(ir_graph *irg);
/**
* Verify spillslots
*
* @param irg The irg to check
* @return true if spillslots are valid, false otherwise
*/
bool be_verify_spillslots(ir_graph *irg);
bool be_verify_spillslots(ir_graph *irg,
get_frame_entity_func get_frame_entity);
/**
* Verify register allocation: Checks that no 2 live nodes have the same
......
......@@ -412,9 +412,8 @@ static void ia32_perform_memory_operand(ir_node *irn, unsigned int i)
/* register allocator interface */
static const arch_irn_ops_t ia32_irn_ops = {
.get_frame_entity = ia32_get_frame_entity,
.get_op_estimated_cost = ia32_get_op_estimated_cost,
.perform_memory_operand = ia32_perform_memory_operand,
.get_op_estimated_cost = ia32_get_op_estimated_cost,
.perform_memory_operand = ia32_perform_memory_operand,
};
static bool gprof;
......@@ -1176,7 +1175,8 @@ static void ia32_emit(ir_graph *irg)
/* fix stack entity offsets */
be_fix_stack_nodes(irg, &ia32_registers[REG_ESP]);
be_abi_fix_stack_bias(irg, ia32_get_sp_bias, ia32_set_frame_offset);
be_abi_fix_stack_bias(irg, ia32_get_sp_bias, ia32_set_frame_offset,
ia32_get_frame_entity);
/* fix 2-address code constraints */
ia32_finish_irg(irg);
......
......@@ -121,28 +121,9 @@ static const lc_opt_table_entry_t sparc_options[] = {
LC_OPT_LAST
};
static ir_entity *sparc_get_frame_entity(const ir_node *node)
{
if (is_sparc_FrameAddr(node)) {
const sparc_attr_t *attr = get_sparc_attr_const(node);
return attr->immediate_value_entity;
}
if (sparc_has_load_store_attr(node)) {
const sparc_load_store_attr_t *load_store_attr
= get_sparc_load_store_attr_const(node);
if (load_store_attr->is_frame_entity) {
return load_store_attr->base.immediate_value_entity;
}
}
return NULL;
}
/* fill register allocator interface */
const arch_irn_ops_t sparc_irn_ops = {
.get_frame_entity = sparc_get_frame_entity,
};
/**
......
......@@ -73,4 +73,6 @@ void sparc_create_stacklayout(ir_graph *irg, calling_convention_t *cconv);
void sparc_adjust_stack_entity_offsets(ir_graph *irg);
void sparc_fix_stack_bias(ir_graph *irg);
ir_entity *sparc_get_frame_entity(const ir_node *node);
#endif
......@@ -336,7 +336,7 @@ static bool can_move_down_into_delayslot(const ir_node *node, const ir_node *to)
if (!is_legal_delay_slot_filler(node))
return false;
if (!be_can_move_down(heights, node, to))
if (!be_can_move_down(heights, node, to, sparc_get_frame_entity))
return false;
if (is_sparc_Call(to)) {
......
......@@ -541,6 +541,11 @@ static void replace_with_restore_imm(ir_node *node, ir_node *replaced,
be_peephole_exchange(replaced, res);
}
static bool can_move_down(const ir_node *schedpoint, const ir_node *node)
{
return be_can_move_down(heights, schedpoint, node, sparc_get_frame_entity);
}
static void peephole_sparc_RestoreZero(ir_node *node)
{
/* restore gives us a free "add" instruction, let's try to use that to fold
......@@ -573,13 +578,13 @@ static void peephole_sparc_RestoreZero(ir_node *node)
if (!is_restorezeroopt_reg(reg))
continue;
if (be_is_Copy(schedpoint) && be_can_move_down(heights, schedpoint, node)) {
if (be_is_Copy(schedpoint) && can_move_down(schedpoint, node)) {
ir_node *const op = be_get_Copy_op(schedpoint);
replace_with_restore_imm(node, schedpoint, op, NULL, 0);
} else if (is_sparc_Or(schedpoint) &&
arch_get_irn_flags(schedpoint) & ((arch_irn_flags_t)sparc_arch_irn_flag_immediate_form) &&
arch_get_irn_register_in(schedpoint, 0) == &sparc_registers[REG_G0] &&
be_can_move_down(heights, schedpoint, node)) {
can_move_down(schedpoint, node)) {
/* it's a constant */
const sparc_attr_t *attr = get_sparc_attr_const(schedpoint);
ir_entity *entity = attr->immediate_value_entity;
......@@ -587,7 +592,7 @@ static void peephole_sparc_RestoreZero(ir_node *node)
ir_node *g0 = get_irn_n(schedpoint, 0);
replace_with_restore_imm(node, schedpoint, g0, entity, immediate);
} else if (is_sparc_Add(schedpoint) &&
be_can_move_down(heights, schedpoint, node)) {
can_move_down(schedpoint, node)) {
if (arch_get_irn_flags(schedpoint) & ((arch_irn_flags_t)sparc_arch_irn_flag_immediate_form)) {
ir_node *op = get_irn_n(schedpoint, 0);
const sparc_attr_t *attr = get_sparc_attr_const(schedpoint);
......@@ -602,7 +607,7 @@ static void peephole_sparc_RestoreZero(ir_node *node)
} else if (is_sparc_Sub(schedpoint) &&
arch_get_irn_flags(schedpoint) & ((arch_irn_flags_t)sparc_arch_irn_flag_immediate_form) &&
arch_get_irn_register_in(schedpoint, 0) == &sparc_registers[REG_G0] &&
be_can_move_down(heights, schedpoint, node)) {
can_move_down(schedpoint, node)) {
/* it's a constant */
const sparc_attr_t *attr = get_sparc_attr_const(schedpoint);
ir_entity *entity = attr->immediate_value_entity;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment