Commit fd1a2c6c authored by Matthias Braun's avatar Matthias Braun
Browse files

- New callback to ask nodes about constant stack pointer adjustment

- Make ia32_Push and ia32_Pop return their stackpointer adjustment
- replaced stack_dir enum + unsigned offsets with normal integer offsets
- make schedule verifier detect dead nodes in schedule
parent 80fae972
......@@ -218,10 +218,14 @@ static void TEMPLATE_set_frame_entity(const void *self, const ir_node *irn, enti
* This function is called by the generic backend to correct offsets for
* nodes accessing the stack.
*/
static void TEMPLATE_set_stack_bias(const void *self, ir_node *irn, int bias) {
static void TEMPLATE_set_frame_offset(const void *self, ir_node *irn, int offset) {
/* TODO: correct offset if irn accesses the stack */
}
static int TEMPLATE_get_sp_bias(const void *self, const ir_node *irn) {
return 0;
}
/* fill register allocator interface */
static const arch_irn_ops_if_t TEMPLATE_irn_ops_if = {
......@@ -232,7 +236,8 @@ static const arch_irn_ops_if_t TEMPLATE_irn_ops_if = {
TEMPLATE_get_flags,
TEMPLATE_get_frame_entity,
TEMPLATE_set_frame_entity,
TEMPLATE_set_stack_bias
TEMPLATE_set_frame_offset,
TEMPLATE_get_sp_bias,
NULL, /* get_inverse */
NULL, /* get_op_estimated_cost */
NULL, /* possible_memory_operand */
......
......@@ -106,9 +106,7 @@ static const char *node_offset_to_str(ir_node *n, char *buf, int buflen) {
entity *ent = be_get_frame_entity(n);
offset = get_entity_offset_bytes(ent);
} else if (irn_op == op_be_IncSP) {
int offs = be_get_IncSP_offset(n);
be_stack_dir_t dir = be_get_IncSP_direction(n);
offset = (dir == be_stack_dir_expand) ? -offs : offs;
offset = - be_get_IncSP_offset(n);
} else {
return "node_offset_to_str will fuer diesen Knotentyp noch implementiert werden";
}
......@@ -624,8 +622,9 @@ static void emit_be_Call(ir_node *irn, void *env) {
/** Emit an IncSP node */
static void emit_be_IncSP(const ir_node *irn, arm_emit_env_t *emit_env) {
FILE *F = emit_env->out;
unsigned offs = be_get_IncSP_offset(irn);
if (offs) {
int offs = be_get_IncSP_offset(irn);
if (offs != 0) {
char cmd_buf[SNPRINTF_BUF_LEN], cmnt_buf[SNPRINTF_BUF_LEN];
lc_esnprintf(arm_get_arg_env(), cmd_buf, SNPRINTF_BUF_LEN, "add %1D, %1S, #%O", irn, irn, irn );
lc_esnprintf(arm_get_arg_env(), cmnt_buf, SNPRINTF_BUF_LEN, "/* IncSP(%O) */", irn);
......
......@@ -950,12 +950,12 @@ static ir_node *gen_CopyB(ir_node *irn, arm_code_gen_t *cg) {
* access must be done relative the the fist IncSP ...
*/
static int get_sp_expand_offset(ir_node *inc_sp) {
unsigned offset = be_get_IncSP_offset(inc_sp);
be_stack_dir_t dir = be_get_IncSP_direction(inc_sp);
int offset = be_get_IncSP_offset(inc_sp);
if (offset == BE_STACK_FRAME_SIZE)
if (offset == BE_STACK_FRAME_SIZE_EXPAND)
return 0;
return dir == be_stack_dir_expand ? (int)offset : -(int)offset;
return offset;
}
static ir_node *gen_StackParam(ir_node *irn, arm_code_gen_t *cg) {
......
......@@ -231,7 +231,7 @@ static entity *arm_get_frame_entity(const void *self, const ir_node *irn) {
return NULL;
}
static void arm_set_frame_entity(const void *self, const ir_node *irn, entity *ent) {
static void arm_set_frame_entity(const void *self, ir_node *irn, entity *ent) {
/* TODO: set the entity assigned to the frame */
}
......@@ -243,6 +243,10 @@ static void arm_set_stack_bias(const void *self, ir_node *irn, int bias) {
/* TODO: correct offset if irn accesses the stack */
}
static int arm_get_sp_bias(const void *self, const ir_node *irn) {
return 0;
}
/* fill register allocator interface */
static const arch_irn_ops_if_t arm_irn_ops_if = {
......@@ -254,6 +258,7 @@ static const arch_irn_ops_if_t arm_irn_ops_if = {
arm_get_frame_entity,
arm_set_frame_entity,
arm_set_stack_bias,
arm_get_sp_bias,
NULL, /* get_inverse */
NULL, /* get_op_estimated_cost */
NULL, /* possible_memory_operand */
......@@ -936,7 +941,7 @@ static void arm_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_m
// TODO: Activate Omit fp in epilogue
if(env->flags.try_omit_fp) {
curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
curr_sp = be_new_IncSP(env->isa->sp, env->irg, bl, curr_sp, *mem, BE_STACK_FRAME_SIZE_SHRINK);
curr_lr = be_new_CopyKeep_single(&arm_reg_classes[CLASS_arm_gp], env->irg, bl, curr_lr, curr_sp, get_irn_mode(curr_lr));
be_node_set_reg_class(curr_lr, 1, &arm_reg_classes[CLASS_arm_gp]);
......
......@@ -471,7 +471,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
* moving the stack pointer along the stack's direction.
*/
if(stack_dir < 0 && !do_seq && !no_alloc) {
curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_expand);
curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size);
}
assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
......@@ -492,7 +492,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
if (do_seq) {
curr_ofs = 0;
addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, curr_mem,
param_size + arg->space_before, be_stack_dir_expand);
param_size + arg->space_before);
}
else {
curr_ofs += arg->space_before;
......@@ -691,7 +691,7 @@ static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
/* Clean up the stack frame if we allocated it */
if(!no_alloc)
curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_shrink);
curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, -stack_size);
}
be_abi_call_free(call);
......@@ -954,7 +954,7 @@ static ir_node *setup_frame(be_abi_irg_t *env)
int stack_nr = get_Proj_proj(stack);
if(flags.try_omit_fp) {
stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE_EXPAND);
frame = stack;
}
......@@ -968,7 +968,7 @@ static ir_node *setup_frame(be_abi_irg_t *env)
arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
}
stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE_EXPAND);
}
be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
......@@ -993,7 +993,7 @@ static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct
pmap_entry *ent;
if(env->call->flags.bits.try_omit_fp) {
stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, -BE_STACK_FRAME_SIZE_SHRINK);
}
else {
......@@ -1611,7 +1611,7 @@ static void modify_irg(be_abi_irg_t *env)
/* do the stack allocation BEFORE the barrier, or spill code
might be added before it */
env->init_sp = be_abi_reg_map_get(env->regs, sp);
env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE_EXPAND);
be_abi_reg_map_set(env->regs, sp, env->init_sp);
barrier = create_barrier(env, bl, &mem, env->regs, 0);
......@@ -1793,15 +1793,14 @@ struct fix_stack_walker_info {
static void collect_stack_nodes_walker(ir_node *irn, void *data)
{
struct fix_stack_walker_info *info = data;
ir_mode *mode;
if (is_Block(irn))
return;
mode = get_irn_mode(irn);
if (arch_irn_is(info->aenv, irn, modify_sp) && mode != mode_T && mode != mode_M)
if (arch_irn_is(info->aenv, irn, modify_sp)) {
assert(get_irn_mode(irn) != mode_M && get_irn_mode(irn) != mode_T);
pset_insert_ptr(info->nodes, irn);
}
}
void be_abi_fix_stack_nodes(be_abi_irg_t *env, be_lv_t *lv)
......@@ -1824,54 +1823,45 @@ void be_abi_fix_stack_nodes(be_abi_irg_t *env, be_lv_t *lv)
be_free_dominance_frontiers(df);
}
/**
* Translates a direction of an IncSP node (either be_stack_dir_shrink, or ...expand)
* into -1 or 1, respectively.
* @param irn The node.
* @return 1, if the direction of the IncSP was along, -1 if against.
*/
static int get_dir(ir_node *irn)
{
return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_shrink);
}
static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
{
const arch_env_t *aenv = env->birg->main_env->arch_env;
const arch_env_t *arch_env = env->birg->main_env->arch_env;
int omit_fp = env->call->flags.bits.try_omit_fp;
ir_node *irn;
sched_foreach(bl, irn) {
/*
If the node modifies the stack pointer by a constant offset,
record that in the bias.
*/
if(be_is_IncSP(irn)) {
int ofs = be_get_IncSP_offset(irn);
int dir = get_dir(irn);
if(ofs == BE_STACK_FRAME_SIZE) {
ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
be_set_IncSP_offset(irn, ofs);
}
if(omit_fp)
bias += dir * ofs;
Check, if the node relates to an entity on the stack frame.
If so, set the true offset (including the bias) for that
node.
*/
entity *ent = arch_get_frame_entity(arch_env, irn);
if(ent) {
int offset = get_stack_entity_offset(env->frame, ent, bias);
arch_set_frame_offset(arch_env, irn, offset);
DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n", ent, offset, bias));
}
/*
Else check, if the node relates to an entity on the stack frame.
If so, set the true offset (including the bias) for that
node.
*/
else {
entity *ent = arch_get_frame_entity(aenv, irn);
if(ent) {
int offset = get_stack_entity_offset(env->frame, ent, bias);
arch_set_frame_offset(aenv, irn, offset);
DBG((env->dbg, LEVEL_2, "%F has offset %d\n", ent, offset));
If the node modifies the stack pointer by a constant offset,
record that in the bias.
*/
if(arch_irn_is(arch_env, irn, modify_sp)) {
int ofs = arch_get_sp_bias(arch_env, irn);
if(be_is_IncSP(irn)) {
if(ofs == BE_STACK_FRAME_SIZE_EXPAND) {
ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
be_set_IncSP_offset(irn, ofs);
} else if(ofs == BE_STACK_FRAME_SIZE_SHRINK) {
ofs = - get_type_size_bytes(get_irg_frame_type(env->birg->irg));
be_set_IncSP_offset(irn, ofs);
}
}
if(omit_fp)
bias += ofs;
}
}
......@@ -2006,10 +1996,15 @@ static void abi_set_frame_entity(const void *_self, ir_node *irn, entity *ent)
{
}
static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias)
static void abi_set_frame_offset(const void *_self, ir_node *irn, int bias)
{
}
static int abi_get_sp_bias(const void *self, const ir_node *irn)
{
return 0;
}
static const arch_irn_ops_if_t abi_irn_ops = {
abi_get_irn_reg_req,
abi_set_irn_reg,
......@@ -2018,7 +2013,8 @@ static const arch_irn_ops_if_t abi_irn_ops = {
abi_get_flags,
abi_get_frame_entity,
abi_set_frame_entity,
abi_set_stack_bias,
abi_set_frame_offset,
abi_get_sp_bias,
NULL, /* get_inverse */
NULL, /* get_op_estimated_cost */
NULL, /* possible_memory_operand */
......
......@@ -117,6 +117,12 @@ void arch_set_frame_entity(const arch_env_t *env, ir_node *irn, entity *ent)
ops->impl->set_frame_entity(ops, irn, ent);
}
int arch_get_sp_bias(const arch_env_t *env, ir_node *irn)
{
const arch_irn_ops_t *ops = get_irn_ops(env, irn);
return ops->impl->get_sp_bias(ops, irn);
}
arch_inverse_t *arch_get_inverse(const arch_env_t *env, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obstack)
{
const arch_irn_ops_t *ops = get_irn_ops(env, irn);
......
......@@ -296,6 +296,20 @@ struct _arch_irn_ops_if_t {
*/
void (*set_frame_offset)(const void *self, ir_node *irn, int offset);
/**
* Returns the delta of the stackpointer for nodes that increment or
* decrement the stackpointer with a constant value. (push, pop
* nodes on most architectures).
* A positive value stands for an expanding stack area, a negative value for
* a shrinking one.
*
* @param self The this pointer
* @param irn The node
* @return 0 if the stackpointer is not modified with a constant
* value, otherwise the increment/decrement value
*/
int (*get_sp_bias)(const void *self, const ir_node *irn);
/**
* Returns an inverse operation which yields the i-th argument
* of the given node as result.
......@@ -354,6 +368,7 @@ extern void arch_set_frame_offset(const arch_env_t *env, ir_node *irn, int bias)
extern entity *arch_get_frame_entity(const arch_env_t *env, ir_node *irn);
extern void arch_set_frame_entity(const arch_env_t *env, ir_node *irn, entity *ent);
extern int arch_get_sp_bias(const arch_env_t *env, ir_node *irn);
extern int arch_get_op_estimated_cost(const arch_env_t *env, const ir_node *irn);
extern arch_inverse_t *arch_get_inverse(const arch_env_t *env, const ir_node *irn, int i, arch_inverse_t *inverse, struct obstack *obstack);
......
......@@ -263,10 +263,10 @@ static ir_node *search_def(ir_node *usage, int pos, pset *copies, pset *copy_blo
if(pset_find_ptr(phi_blocks, curr_bl)) {
ir_node *phi = get_irn_link(curr_bl);
if(!phi) {
if(phi == NULL) {
int i, n_preds = get_irn_arity(curr_bl);
ir_graph *irg = get_irn_irg(curr_bl);
ir_node **ins = xmalloc(n_preds * sizeof(ins[0]));
ir_node **ins = alloca(n_preds * sizeof(ins[0]));
for(i = 0; i < n_preds; ++i)
ins[i] = new_r_Bad(irg);
......@@ -276,7 +276,6 @@ static ir_node *search_def(ir_node *usage, int pos, pset *copies, pset *copy_blo
set_irn_link(curr_bl, phi);
sched_add_after(curr_bl, phi);
free(ins);
for(i = 0; i < n_preds; ++i) {
ir_node *arg = search_def(phi, i, copies, copy_blocks, phis, phi_blocks, mode);
......@@ -284,7 +283,7 @@ static ir_node *search_def(ir_node *usage, int pos, pset *copies, pset *copy_blo
set_irn_n(phi, i, arg);
}
if(phis)
if(phis != NULL)
pset_insert_ptr(phis, phi);
}
......@@ -354,6 +353,7 @@ static void fix_usages(pset *copies, pset *copy_blocks, pset *phi_blocks, pset *
obstack_free(&obst, NULL);
}
#if 0
/**
* Remove phis which are not necessary.
* During place_phi_functions() phi functions are put on the dominance
......@@ -366,26 +366,27 @@ static void fix_usages(pset *copies, pset *copy_blocks, pset *phi_blocks, pset *
*/
static void remove_odd_phis(pset *copies, pset *unused_copies)
{
ir_node *irn;
ir_node *irn;
for(irn = pset_first(copies); irn; irn = pset_next(copies)) {
if(is_Phi(irn)) {
int i, n;
int illegal = 0;
for(irn = pset_first(copies); irn; irn = pset_next(copies)) {
if(is_Phi(irn)) {
int i, n;
int illegal = 0;
assert(sched_is_scheduled(irn) && "phi must be scheduled");
for(i = 0, n = get_irn_arity(irn); i < n && !illegal; ++i)
illegal = get_irn_n(irn, i) == NULL;
assert(sched_is_scheduled(irn) && "phi must be scheduled");
for(i = 0, n = get_irn_arity(irn); i < n && !illegal; ++i)
illegal = get_irn_n(irn, i) == NULL;
if(illegal)
sched_remove(irn);
}
}
if(illegal)
sched_remove(irn);
}
}
for(irn = pset_first(unused_copies); irn; irn = pset_next(unused_copies)) {
for(irn = pset_first(unused_copies); irn; irn = pset_next(unused_copies)) {
sched_remove(irn);
}
}
#endif
void be_ssa_constr_phis_ignore(dom_front_info_t *info, be_lv_t *lv, int n, ir_node *nodes[], pset *phis, pset *ignore_uses)
{
......
......@@ -550,13 +550,20 @@ static void be_main_loop(FILE *file_handle)
/* fix stack offsets */
BE_TIMER_PUSH(t_abi);
be_abi_fix_stack_bias(birg.abi);
//be_abi_fix_stack_bias(birg.abi);
BE_TIMER_POP(t_abi);
BE_TIMER_PUSH(t_finish);
arch_code_generator_finish(birg.cg);
BE_TIMER_POP(t_finish);
/* fix stack offsets */
BE_TIMER_PUSH(t_abi);
be_abi_fix_stack_nodes(birg.abi, NULL);
be_remove_dead_nodes_from_schedule(birg.irg);
be_abi_fix_stack_bias(birg.abi);
BE_TIMER_POP(t_abi);
dump(DUMP_FINAL, irg, "-finish", dump_ir_block_graph_sched);
/* check schedule and register allocation */
......
......@@ -89,8 +89,7 @@ typedef struct {
/** The be_Stack attribute type. */
typedef struct {
be_node_attr_t node_attr;
int offset; /**< The offset by which the stack shall be increased/decreased. */
be_stack_dir_t dir; /**< The direction in which the stack shall be modified (expand or shrink). */
int offset; /**< The offset by which the stack shall be expanded/shrinked. */
} be_stack_attr_t;
/** The be_Frame attribute type. */
......@@ -530,7 +529,7 @@ int be_Return_get_n_rets(ir_node *ret)
return a->num_ret_vals;
}
ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *mem, unsigned offset, be_stack_dir_t dir)
ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *mem, int offset)
{
be_stack_attr_t *a;
ir_node *irn;
......@@ -540,7 +539,6 @@ ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_
in[1] = mem;
irn = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode, 2, in);
a = init_node_attr(irn, 1);
a->dir = dir;
a->offset = offset;
be_node_set_flags(irn, -1, arch_irn_flags_ignore | arch_irn_flags_modify_sp);
......@@ -902,34 +900,20 @@ ir_node *be_get_IncSP_mem(ir_node *irn) {
return get_irn_n(irn, 1);
}
void be_set_IncSP_offset(ir_node *irn, unsigned offset)
void be_set_IncSP_offset(ir_node *irn, int offset)
{
be_stack_attr_t *a = get_irn_attr(irn);
assert(be_is_IncSP(irn));
a->offset = offset;
}
unsigned be_get_IncSP_offset(const ir_node *irn)
int be_get_IncSP_offset(const ir_node *irn)
{
be_stack_attr_t *a = get_irn_attr(irn);
assert(be_is_IncSP(irn));
return a->offset;
}
void be_set_IncSP_direction(ir_node *irn, be_stack_dir_t dir)
{
be_stack_attr_t *a = get_irn_attr(irn);
assert(be_is_IncSP(irn));
a->dir = dir;
}
be_stack_dir_t be_get_IncSP_direction(const ir_node *irn)
{
be_stack_attr_t *a = get_irn_attr(irn);
assert(be_is_IncSP(irn));
return a->dir;
}
ir_node *be_spill(const arch_env_t *arch_env, ir_node *irn)
{
ir_node *bl = get_nodes_block(irn);
......@@ -1090,6 +1074,17 @@ static void be_node_set_frame_offset(const void *self, ir_node *irn, int offset)
}
}
static int be_node_get_sp_bias(const void *self, const ir_node *irn)
{
int result = 0;
if(be_is_IncSP(irn)) {
result = be_get_IncSP_offset(irn);
}
return result;
}
/*
___ ____ _ _ _ _ _ _
|_ _| _ \| \ | | | | | | __ _ _ __ __| | | ___ _ __
......@@ -1108,6 +1103,7 @@ static const arch_irn_ops_if_t be_node_irn_ops_if = {
be_node_get_frame_entity,
be_node_set_frame_entity,
be_node_set_frame_offset,
be_node_get_sp_bias,
NULL, /* get_inverse */
NULL, /* get_op_estimated_cost */
NULL, /* possible_memory_operand */
......@@ -1239,6 +1235,11 @@ static void phi_set_frame_offset(const void *_self, ir_node *irn, int bias)
{
}
static int phi_get_sp_bias(const void* self, const ir_node *irn)
{
return 0;
}
static const arch_irn_ops_if_t phi_irn_ops = {
phi_get_irn_reg_req,
phi_set_irn_reg,
......@@ -1248,6 +1249,7 @@ static const arch_irn_ops_if_t phi_irn_ops = {
phi_get_frame_entity,
phi_set_frame_entity,
phi_set_frame_offset,
phi_get_sp_bias,
NULL, /* get_inverse */
NULL, /* get_op_estimated_cost */
NULL, /* possible_memory_operand */
......@@ -1397,11 +1399,12 @@ static int dump_node(ir_node *irn, FILE *f, dump_reason_t reason)
case beo_IncSP:
{
be_stack_attr_t *a = (be_stack_attr_t *) at;
if (a->offset == BE_STACK_FRAME_SIZE)
if (a->offset == BE_STACK_FRAME_SIZE_EXPAND)
fprintf(f, "offset: FRAME_SIZE\n");
else if(a->offset == BE_STACK_FRAME_SIZE_SHRINK)
fprintf(f, "offset: -FRAME SIZE\n");
else
fprintf(f, "offset: %u\n", a->offset);
fprintf(f, "direction: %s\n", a->dir == be_stack_dir_expand ? "expand" : "shrink");
}
break;
case beo_Call:
......
......@@ -14,6 +14,8 @@
#include "firm_config.h"
#include <limits.h>
#include "irmode.h"
#include "irnode.h"
#include "entity_t.h"
......@@ -68,12 +70,6 @@ typedef enum {
beo_Last
} be_opcode_t;
/** Expresses the direction of the stack pointer increment of IncSP nodes. */
typedef enum {
be_stack_dir_expand = 0,
be_stack_dir_shrink = 1
} be_stack_dir_t;
/** Not used yet. */
typedef enum {
be_frame_flag_spill = 1,
......@@ -85,7 +81,8 @@ typedef enum {
* A "symbolic constant" for the size of the stack frame to use with IncSP nodes.
* It gets back-patched to the real size as soon it is known.
*/
#define BE_STACK_FRAME_SIZE ((unsigned) -1)
#define BE_STACK_FRAME_SIZE_EXPAND INT_MAX
#define BE_STACK_FRAME_SIZE_SHRINK INT_MIN
/**
* Determines if irn is a be_node.
......@@ -192,13 +189,13 @@ ir_node *be_new_SetSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_
* @param irg The graph to insert the node to.
* @param bl The block to insert the node into.
* @param old_sp The node defining the former stack pointer.
* @param amount The mount of bytes the stack pointer shall be increased/decreased.
* @param amount The mount of bytes the stack shall be expanded/shrinked (see set_IncSP_offset)
* @param dir The direction in which the stack pointer shall be modified:
* Along the stack's growing direction or against.
* @return A new stack pointer increment/decrement node.
* @note This node sets a register constraint to the @p sp register on its output.
*/
ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *mem, unsigned amount, be_stack_dir_t dir);
ir_node *be_new_IncSP(const arch_register_t *sp, ir_graph *irg, ir_node *bl, ir_node *old_sp, ir_node *mem, int offset);
/** Returns the previous node that computes the stack pointer. */
ir_node *be_get_IncSP_pred(ir_node *incsp);
......@@ -209,17 +206,16 @@ void be_set_IncSP_pred(ir_node *incsp, ir_node *pred);
/** Returns the memory input of the IncSP. */
ir_node *be_get_IncSP_mem(ir_node *irn);
/** Sets a new offset to a IncSP node. */
void be_set_IncSP_offset(ir_node *irn, unsigned offset);
/**
* Sets a new offset to a IncSP node.
* A positive offset means expanding the stack, a negative offset shrinking
* an offset is == BE_STACK_FRAME_SIZE will be replaced by the real size of the
* stackframe in the fix_stack_offsets phase.
*/
void be_set_IncSP_offset(ir_node *irn, int offset);
/** Gets the offset from a IncSP node. */
unsigned be_get_IncSP_offset(const ir_node *irn);
/** Sets a new direction to a IncSP node. */
void be_set_IncSP_direction(ir_node *irn, be_stack_dir_t dir);
/** Gets the direction from a IncSP node. */
be_stack_dir_t be_get_IncSP_direction(const ir_node *irn);