Commit 226dc474 authored by Matthias Braun's avatar Matthias Braun
Browse files

change get_sp_bias and set_frame_offset to be callbacks for be_abi_fix_stack_bias

parent 9d568a13
......@@ -43,29 +43,10 @@ static ir_entity *TEMPLATE_get_frame_entity(const ir_node *node)
return NULL;
}
/**
* This function is called by the generic backend to correct offsets for
* nodes accessing the stack.
*/
static void TEMPLATE_set_frame_offset(ir_node *irn, int offset)
{
(void)irn;
(void)offset;
/* TODO: correct offset if irn accesses the stack */
}
static int TEMPLATE_get_sp_bias(const ir_node *irn)
{
(void)irn;
return 0;
}
/* fill register allocator interface */
static const arch_irn_ops_t TEMPLATE_irn_ops = {
.get_frame_entity = TEMPLATE_get_frame_entity,
.set_frame_offset = TEMPLATE_set_frame_offset,
.get_sp_bias = TEMPLATE_get_sp_bias,
};
/**
......@@ -91,8 +72,6 @@ static void TEMPLATE_emit(ir_graph *irg)
/* fix stack entity offsets */
be_fix_stack_nodes(irg, &TEMPLATE_registers[REG_SP]);
//be_abi_fix_stack_bias(irg);
/* emit code */
TEMPLATE_emit_function(irg);
}
......
......@@ -119,8 +119,6 @@ static int amd64_get_sp_bias(const ir_node *node)
static const arch_irn_ops_t amd64_irn_ops = {
.get_frame_entity = amd64_get_frame_entity,
.set_frame_offset = amd64_set_frame_offset,
.get_sp_bias = amd64_get_sp_bias,
};
static void amd64_before_ra(ir_graph *irg)
......@@ -639,7 +637,7 @@ static void amd64_finish_graph(ir_graph *irg)
/* fix stack entity offsets */
be_fix_stack_nodes(irg, &amd64_registers[REG_RSP]);
be_abi_fix_stack_bias(irg);
be_abi_fix_stack_bias(irg, amd64_get_sp_bias, amd64_set_frame_offset);
/* Fix 2-address code constraints. */
amd64_finish_irg(irg);
......
......@@ -129,6 +129,30 @@ static void fix_should_be_same(ir_node *block, void *data)
}
}
/**
* This function is called by the generic backend to correct offsets for
* nodes accessing the stack.
*/
static void arm_set_frame_offset(ir_node *irn, int bias)
{
if (be_is_MemPerm(irn)) {
be_set_MemPerm_offset(irn, bias);
} else if (is_arm_FrameAddr(irn)) {
arm_Address_attr_t *attr = get_arm_Address_attr(irn);
attr->fp_offset += bias;
} else {
arm_load_store_attr_t *attr = get_arm_load_store_attr(irn);
assert(attr->base.is_load_store);
attr->offset += bias;
}
}
static int arm_get_sp_bias(const ir_node *node)
{
(void)node;
return 0;
}
void arm_finish_graph(ir_graph *irg)
{
be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
......@@ -143,7 +167,7 @@ void arm_finish_graph(ir_graph *irg)
/* fix stack entity offsets */
be_fix_stack_nodes(irg, &arm_registers[REG_SP]);
be_abi_fix_stack_bias(irg);
be_abi_fix_stack_bias(irg, arm_get_sp_bias, arm_set_frame_offset);
/* do peephole optimizations and fix stack offsets */
arm_peephole_optimization(irg);
......
......@@ -77,34 +77,10 @@ static ir_entity *arm_get_frame_entity(const ir_node *irn)
return NULL;
}
/**
* This function is called by the generic backend to correct offsets for
* nodes accessing the stack.
*/
static void arm_set_frame_offset(ir_node *irn, int bias)
{
if (is_arm_FrameAddr(irn)) {
arm_Address_attr_t *attr = get_arm_Address_attr(irn);
attr->fp_offset += bias;
} else {
arm_load_store_attr_t *attr = get_arm_load_store_attr(irn);
assert(attr->base.is_load_store);
attr->offset += bias;
}
}
static int arm_get_sp_bias(const ir_node *node)
{
(void)node;
return 0;
}
/* fill register allocator interface */
const arch_irn_ops_t arm_irn_ops = {
.get_frame_entity = arm_get_frame_entity,
.set_frame_offset = arm_set_frame_offset,
.get_sp_bias = arm_get_sp_bias,
};
/**
......
......@@ -38,24 +38,12 @@ static const arch_irn_ops_t *get_irn_ops(const ir_node *irn)
return be_ops;
}
void arch_set_frame_offset(ir_node *irn, int offset)
{
const arch_irn_ops_t *ops = get_irn_ops(irn);
ops->set_frame_offset(irn, offset);
}
ir_entity *arch_get_frame_entity(const ir_node *irn)
{
const arch_irn_ops_t *ops = get_irn_ops(irn);
return ops->get_frame_entity(irn);
}
int arch_get_sp_bias(ir_node *irn)
{
const arch_irn_ops_t *ops = get_irn_ops(irn);
return ops->get_sp_bias(irn);
}
void arch_perform_memory_operand(ir_node *irn, unsigned int i)
{
const arch_irn_ops_t *ops = get_irn_ops(irn);
......
......@@ -20,13 +20,6 @@
#include "beinfo.h"
#include "be.h"
/**
* this constant is returned by the get_sp_bias functions if the stack
* is reset (usually because the frame pointer is copied to the stack
* pointer
*/
#define SP_BIAS_RESET INT_MIN
typedef enum arch_register_class_flags_t {
arch_register_class_flag_none = 0,
/** don't do automatic register allocation for this class */
......@@ -71,12 +64,8 @@ ENUM_BITSET(arch_register_req_type_t)
extern arch_register_req_t const arch_no_requirement;
#define arch_no_register_req (&arch_no_requirement)
void arch_set_frame_offset(ir_node *irn, int bias);
ir_entity *arch_get_frame_entity(const ir_node *irn);
int arch_get_sp_bias(ir_node *irn);
int arch_get_op_estimated_cost(const ir_node *irn);
void arch_perform_memory_operand(ir_node *irn, unsigned i);
......@@ -301,26 +290,6 @@ struct arch_irn_ops_t {
*/
ir_entity *(*get_frame_entity)(const ir_node *irn);
/**
* Set the offset of a node carrying an entity on the stack frame.
* @param irn The node.
* @param offset The offset of the node's stack frame entity.
*/
void (*set_frame_offset)(ir_node *irn, int offset);
/**
* Returns the delta of the stackpointer for nodes that increment or
* decrement the stackpointer with a constant value. (push, pop
* nodes on most architectures).
* A positive value stands for an expanding stack area, a negative value for
* a shrinking one.
*
* @param irn The node
* @return 0 if the stackpointer is not modified with a constant
* value, otherwise the increment/decrement value
*/
int (*get_sp_bias)(const ir_node *irn);
/**
* Get the estimated cycle count for @p irn.
*
......
......@@ -478,24 +478,9 @@ static ir_entity *be_node_get_frame_entity(const ir_node *irn)
return NULL;
}
static void be_node_set_frame_offset(ir_node *irn, int offset)
{
if (be_is_MemPerm(irn))
be_set_MemPerm_offset(irn, offset);
}
static int be_node_get_sp_bias(const ir_node *irn)
{
if (be_is_IncSP(irn))
return be_get_IncSP_offset(irn);
return 0;
}
/* for be nodes */
static const arch_irn_ops_t be_node_irn_ops = {
.get_frame_entity = be_node_get_frame_entity,
.set_frame_offset = be_node_set_frame_offset,
.get_sp_bias = be_node_get_sp_bias,
};
static unsigned get_start_reg_index(ir_graph *irg, const arch_register_t *reg)
......@@ -529,24 +514,9 @@ static ir_entity* dummy_get_frame_entity(const ir_node *node)
return NULL;
}
static void dummy_set_frame_offset(ir_node *node, int bias)
{
(void)node;
(void)bias;
panic("should not be called");
}
static int dummy_get_sp_bias(const ir_node *node)
{
(void)node;
return 0;
}
/* for "middleend" nodes */
static const arch_irn_ops_t dummy_be_irn_ops = {
.get_frame_entity = dummy_get_frame_entity,
.set_frame_offset = dummy_set_frame_offset,
.get_sp_bias = dummy_get_sp_bias,
};
ir_node *be_new_Phi(ir_node *block, int n_ins, ir_node **ins, ir_mode *mode,
......@@ -602,8 +572,6 @@ void be_dump_phi_reg_reqs(FILE *F, const ir_node *node, dump_reason_t reason)
static const arch_irn_ops_t phi_irn_ops = {
dummy_get_frame_entity,
dummy_set_frame_offset,
dummy_get_sp_bias,
NULL, /* get_op_estimated_cost */
NULL, /* perform_memory_operand */
};
......
......@@ -75,6 +75,8 @@ static void stack_frame_compute_initial_offset(be_stack_layout_t *frame)
typedef struct bias_walk {
int start_block_bias; /**< The bias at the end of the start block. */
ir_node *start_block; /**< The start block of the current graph. */
get_sp_bias_func get_sp_bias;
set_frame_offset_func set_frame_offset;
} bias_walk;
/**
......@@ -85,7 +87,7 @@ typedef struct bias_walk {
*
* @return the bias at the end of this block
*/
static int process_stack_bias(ir_node *bl, int real_bias)
static int process_stack_bias(const bias_walk *bw, ir_node *bl, int real_bias)
{
int wanted_bias = real_bias;
ir_graph *irg = get_irn_irg(bl);
......@@ -100,7 +102,7 @@ static int process_stack_bias(ir_node *bl, int real_bias)
if (ent != NULL) {
int bias = sp_relative ? real_bias : 0;
int offset = be_get_stack_entity_offset(layout, ent, bias);
arch_set_frame_offset(irn, offset);
bw->set_frame_offset(irn, offset);
}
/* If the node modifies the stack pointer by a constant offset,
......@@ -131,7 +133,7 @@ static int process_stack_bias(ir_node *bl, int real_bias)
real_bias += ofs;
wanted_bias += ofs;
} else {
int ofs = arch_get_sp_bias(irn);
int ofs = bw->get_sp_bias(irn);
if (ofs == SP_BIAS_RESET) {
real_bias = 0;
wanted_bias = 0;
......@@ -154,10 +156,11 @@ static void stack_bias_walker(ir_node *bl, void *data)
{
bias_walk *bw = (bias_walk*)data;
if (bl != bw->start_block)
process_stack_bias(bl, bw->start_block_bias);
process_stack_bias(bw, bl, bw->start_block_bias);
}
void be_abi_fix_stack_bias(ir_graph *irg)
void be_abi_fix_stack_bias(ir_graph *irg, get_sp_bias_func get_sp_bias,
set_frame_offset_func set_frame_offset)
{
be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
......@@ -166,7 +169,9 @@ void be_abi_fix_stack_bias(ir_graph *irg)
/* Determine the stack bias at the end of the start block. */
bias_walk bw;
bw.start_block = get_irg_start_block(irg);
bw.start_block_bias = process_stack_bias(bw.start_block, stack_layout->initial_bias);
bw.get_sp_bias = get_sp_bias;
bw.set_frame_offset = set_frame_offset;
bw.start_block_bias = process_stack_bias(&bw, bw.start_block, stack_layout->initial_bias);
/* fix the bias is all other blocks */
irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
......
......@@ -15,6 +15,17 @@
#include "firm_types.h"
#include "be_types.h"
/**
* this constant is returned by the get_sp_bias_func functions if the stack
* is reset (usually because the frame pointer is copied to the stack
* pointer
*/
#define SP_BIAS_RESET INT_MIN
typedef void (*set_frame_offset_func)(ir_node *node, int offset);
typedef int (*get_sp_bias_func)(const ir_node *node);
/**
* Rewire all stack modifying nodes and their users to assure SSA property.
* @param sp The stack pointer register
......@@ -24,8 +35,16 @@ void be_fix_stack_nodes(ir_graph *irg, arch_register_t const *sp);
/**
* Fix the stack bias for all nodes accessing the stack frame using the
* stack pointer.
* @p get_sp_bias should return the delta of the stackpointer for nodes
* that increment or decrement the stackpointer with constant values.
* (Such as push and pop variants, be_IncSP, ...). A positive value stands
* for an expanding stack area, a negative value for a shrinking one,
* regardless of the actual stack direction of the calling convention.
* Note that the code already contains a special case for IncSP nodes so the
* callback does not need to handle them.
*/
void be_abi_fix_stack_bias(ir_graph *irg);
void be_abi_fix_stack_bias(ir_graph *irg, get_sp_bias_func get_sp_bias,
set_frame_offset_func set_frame_offset);
int be_get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent,
int bias);
......
......@@ -190,7 +190,7 @@ static void ia32_set_frame_offset(ir_node *irn, int bias)
add_ia32_am_offs_int(irn, bias);
}
static int ia32_get_sp_bias(const ir_node *node)
int ia32_get_sp_bias(const ir_node *node)
{
if (is_ia32_Call(node))
return -(int)get_ia32_call_attr_const(node)->pop;
......@@ -413,8 +413,6 @@ static void ia32_perform_memory_operand(ir_node *irn, unsigned int i)
/* register allocator interface */
static const arch_irn_ops_t ia32_irn_ops = {
.get_frame_entity = ia32_get_frame_entity,
.set_frame_offset = ia32_set_frame_offset,
.get_sp_bias = ia32_get_sp_bias,
.get_op_estimated_cost = ia32_get_op_estimated_cost,
.perform_memory_operand = ia32_perform_memory_operand,
};
......@@ -1178,7 +1176,7 @@ static void ia32_emit(ir_graph *irg)
/* fix stack entity offsets */
be_fix_stack_nodes(irg, &ia32_registers[REG_ESP]);
be_abi_fix_stack_bias(irg);
be_abi_fix_stack_bias(irg, ia32_get_sp_bias, ia32_set_frame_offset);
/* fix 2-address code constraints */
ia32_finish_irg(irg);
......
......@@ -116,6 +116,8 @@ void ia32_adjust_pic(ir_graph *irg);
ir_node *ia32_get_pic_base(ir_graph *irg);
int ia32_get_sp_bias(const ir_node *node);
static inline bool ia32_is_8bit_val(int32_t const v)
{
return -128 <= v && v < 128;
......
......@@ -52,6 +52,7 @@
#include "bedwarf.h"
#include "beemitter.h"
#include "begnuas.h"
#include "bestack.h"
#include "beutil.h"
#include "ia32_emitter.h"
......@@ -1424,7 +1425,7 @@ static void ia32_emit_node(ir_node *node)
be_emit_node(node);
if (sp_relative) {
int sp_change = arch_get_sp_bias(node);
int sp_change = ia32_get_sp_bias(node);
if (sp_change != 0) {
assert(sp_change != SP_BIAS_RESET);
callframe_offset += sp_change;
......
......@@ -139,40 +139,10 @@ static ir_entity *sparc_get_frame_entity(const ir_node *node)
return NULL;
}
/**
* This function is called by the generic backend to correct offsets for
* nodes accessing the stack.
*/
static void sparc_set_frame_offset(ir_node *node, int offset)
{
sparc_attr_t *attr = get_sparc_attr(node);
attr->immediate_value += offset;
/* must be a FrameAddr or a load/store node with frame_entity */
assert(is_sparc_FrameAddr(node) ||
get_sparc_load_store_attr_const(node)->is_frame_entity);
}
static int sparc_get_sp_bias(const ir_node *node)
{
if (is_sparc_Save(node)) {
const sparc_attr_t *attr = get_sparc_attr_const(node);
if (get_irn_arity(node) == 3)
panic("no support for _reg variant yet");
return -attr->immediate_value;
} else if (is_sparc_RestoreZero(node)) {
return SP_BIAS_RESET;
}
return 0;
}
/* fill register allocator interface */
const arch_irn_ops_t sparc_irn_ops = {
.get_frame_entity = sparc_get_frame_entity,
.set_frame_offset = sparc_set_frame_offset,
.get_sp_bias = sparc_get_sp_bias,
};
/**
......
......@@ -19,6 +19,37 @@
#include "bearch.h"
#include "benode.h"
#include "besched.h"
#include "bestack.h"
static void sparc_set_frame_offset(ir_node *node, int offset)
{
if (be_is_MemPerm(node)) {
be_set_MemPerm_offset(node, offset);
} else {
sparc_attr_t *attr = get_sparc_attr(node);
attr->immediate_value += offset;
/* must be a FrameAddr or a load/store node with frame_entity */
assert(is_sparc_FrameAddr(node) ||
get_sparc_load_store_attr_const(node)->is_frame_entity);
}
}
static int sparc_get_sp_bias(const ir_node *node)
{
if (be_is_IncSP(node))
return be_get_IncSP_offset(node);
if (is_sparc_Save(node)) {
const sparc_attr_t *attr = get_sparc_attr_const(node);
if (get_irn_arity(node) == 3)
panic("no support for _reg variant yet");
return -attr->immediate_value;
}
if (is_sparc_RestoreZero(node))
return SP_BIAS_RESET;
return 0;
}
static void set_irn_sp_bias(ir_node *node, int new_bias)
{
......@@ -48,7 +79,7 @@ static void process_bias(ir_node *block, bool sp_relative, int bias,
int offset = get_entity_offset(entity);
if (sp_relative)
offset += bias + SPARC_MIN_STACKSIZE;
arch_set_frame_offset(irn, offset);
sparc_set_frame_offset(irn, offset);
}
/* The additional alignment bytes cannot be used
......@@ -59,7 +90,7 @@ static void process_bias(ir_node *block, bool sp_relative, int bias,
assert(free_bytes == 0);
}
irn_bias = arch_get_sp_bias(irn);
irn_bias = sparc_get_sp_bias(irn);
if (irn_bias == 0) {
/* do nothing */
} else if (irn_bias == SP_BIAS_RESET) {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment