Commit 8fda947f authored by Matthias Braun's avatar Matthias Braun
Browse files

Alloc/Free only operate on the stack now

They are considered low level operations now which just allocate/free a
block of memory on the stack. There is no highlevel typeinformation
attached anymore or support for heap allocation. Frontends/liboo should
provide their custom highlevel nodes if they need these features.
parent 9df09758
......@@ -211,10 +211,10 @@ ENUM_BITSET(ir_cons_flags)
typedef enum op_pin_state {
op_pin_state_floats = 0, /**< Nodes of this opcode can be placed in any basic block. */
op_pin_state_pinned = 1, /**< Nodes must remain in this basic block. */
op_pin_state_exc_pinned, /**< Node must be remain in this basic block if it can throw an
exception, else can float. Used internally. */
op_pin_state_mem_pinned /**< Node must be remain in this basic block if it can throw an
exception or uses memory, else can float. Used internally. */
op_pin_state_exc_pinned, /**< Node must remain in this basic block if it
can throw an exception, else can float. */
op_pin_state_mem_pinned /**< Node must remain in this basic block if it
uses memory, else can float. */
} op_pin_state;
/**
......@@ -311,15 +311,6 @@ typedef union symconst_symbol {
ir_enum_const *enum_p; /**< The enumeration constant of a SymConst. */
} symconst_symbol;
/**
* @ingroup Alloc
* The allocation place.
*/
typedef enum ir_where_alloc {
stack_alloc, /**< Alloc allocates the object on the stack. */
heap_alloc /**< Alloc allocates the object on the heap. */
} ir_where_alloc;
/** A input/output constraint attribute.
* @ingroup ASM
*/
......
......@@ -26,7 +26,6 @@
static pmap *entity_access_map;
static pmap *entity_reference_map;
static pmap *type_alloc_map;
static pmap *type_pointertype_map;
static pmap *type_arraytype_map;
......@@ -74,28 +73,6 @@ static void set_entity_reference_array(const ir_entity *ent, ir_node **refs)
pmap_insert(entity_reference_map, ent, (void *)refs);
}
/**
* Return a flexible array containing all IR-nodes
* that allocate a given type.
*/
static ir_node **get_type_alloc_array(const ir_type *tp)
{
if (!type_alloc_map) type_alloc_map = pmap_create();
ir_node **res = pmap_get(ir_node*, type_alloc_map, tp);
if (!res) {
res = NEW_ARR_F(ir_node *, 0);
pmap_insert(type_alloc_map, tp, (void *)res);
}
return res;
}
static void set_type_alloc_array(const ir_type *tp, ir_node **alls)
{
pmap_insert(type_alloc_map, tp, (void *)alls);
}
/**
* Return a flexible array containing all pointer
* types that points-to a given type.
......@@ -197,30 +174,6 @@ static void add_entity_reference(const ir_entity *ent, ir_node *n)
/* Access routines for types */
/**------------------------------------------------------------------*/
/* Number of Alloc nodes that create an instance of this type */
size_t get_type_n_allocs(const ir_type *tp)
{
ir_node **allocs = get_type_alloc_array(tp);
return ARR_LEN(allocs);
}
/* Alloc node that creates an instance of this type */
ir_node *get_type_alloc(const ir_type *tp, size_t pos)
{
ir_node **allocs = get_type_alloc_array(tp);
assert(pos < get_type_n_allocs(tp));
return allocs[pos];
}
static void add_type_alloc(const ir_type *tp, ir_node *n)
{
ir_node **allocs = get_type_alloc_array(tp);
ARR_APP1(ir_node *, allocs, n);
set_type_alloc_array(tp, allocs);
}
/*------------------------------------------------------------------*/
size_t get_type_n_pointertypes_to(const ir_type *tp)
{
ir_type **pts = get_type_pointertype_array(tp);
......@@ -332,10 +285,7 @@ static void chain_accesses(ir_node *n, void *env)
{
(void) env;
ir_node *addr;
if (is_Alloc(n)) {
add_type_alloc(get_Alloc_type(n), n);
return;
} else if (is_Sel(n)) {
if (is_Sel(n)) {
add_entity_reference(get_Sel_entity(n), n);
return;
} else if (is_SymConst_addr_ent(n)) {
......@@ -415,17 +365,6 @@ void free_trouts(void)
entity_reference_map = NULL;
}
if (type_alloc_map != NULL) {
/*
for (ir_node **alls = (ir_node **)pmap_first(type_alloc_map);
alls != NULL; alls = (ir_node **)pmap_next(type_alloc_map)) {
DEL_ARR_F(alls);
}
*/
pmap_destroy(type_alloc_map);
type_alloc_map = NULL;
}
if (type_pointertype_map != NULL) {
/*
for (ir_node **pts = (ir_node **)pmap_first(type_pointertype_map);
......
......@@ -696,7 +696,8 @@ static ir_node *adjust_alloc_size(unsigned stack_alignment, ir_node *size,
}
/**
* Adjust an alloca.
* The alloca is transformed into a back end alloca node and connected to the stack nodes.
* The alloca is transformed into a back end alloca node and connected to the
* stack nodes.
*/
static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
{
......@@ -704,9 +705,6 @@ static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp
ir_graph *irg = get_Block_irg(block);
const arch_env_t *arch_env = be_get_irg_arch_env(irg);
/* all non-stack Alloc nodes should already be lowered before the backend */
assert(get_Alloc_where(alloc) == stack_alloc);
ir_node *alloc_mem = NULL;
ir_node *alloc_res = NULL;
foreach_out_edge(alloc, edge) {
......@@ -732,20 +730,8 @@ static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp
return curr_sp;
}
dbg_info *dbg = get_irn_dbg_info(alloc);
ir_node *count = get_Alloc_count(alloc);
ir_type *type = get_Alloc_type(alloc);
ir_node *size;
/* we might need to multiply the count with the element size */
if (!is_unknown_type(type) && get_type_size_bytes(type) != 1) {
ir_mode *mode = get_irn_mode(count);
ir_tarval *tv = new_tarval_from_long(get_type_size_bytes(type),
mode);
ir_node *cnst = new_rd_Const(dbg, irg, tv);
size = new_rd_Mul(dbg, block, count, cnst, mode);
} else {
size = count;
}
dbg_info *dbg = get_irn_dbg_info(alloc);
ir_node *size = get_Alloc_size(alloc);
/* The stack pointer will be modified in an unknown manner.
We cannot omit it. */
......@@ -783,10 +769,7 @@ static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp
*/
static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
{
/* all non-stack-alloc Free nodes should already be lowered before the
* backend phase */
assert(get_Free_where(free) == stack_alloc);
#if 0
/* we might need to multiply the size with the element size */
ir_type *type = get_Free_type(free);
ir_node *block = get_nodes_block(free);
......@@ -827,8 +810,12 @@ static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
/* kill the free */
exchange(free, sync);
curr_sp = res;
return curr_sp;
#endif
(void)env;
(void)free;
(void)curr_sp;
panic("beabi: Free nodes do not work properly yet");
}
/**
......@@ -881,9 +868,7 @@ static void link_ops_in_block_walker(ir_node *irn, void *data)
be_abi_irg_t *env = (be_abi_irg_t*)data;
unsigned code = get_irn_opcode(irn);
if (code == iro_Call ||
(code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
(code == iro_Free && get_Free_where(irn) == stack_alloc)) {
if (code == iro_Call || code == iro_Alloc || code == iro_Free) {
ir_node *bl = get_nodes_block(irn);
void *save = get_irn_link(bl);
......@@ -943,12 +928,10 @@ static void process_ops_in_block(ir_node *bl, void *data)
curr_sp = adjust_call(env, irn, curr_sp);
break;
case iro_Alloc:
if (get_Alloc_where(irn) == stack_alloc)
curr_sp = adjust_alloc(env, irn, curr_sp);
curr_sp = adjust_alloc(env, irn, curr_sp);
break;
case iro_Free:
if (get_Free_where(irn) == stack_alloc)
curr_sp = adjust_free(env, irn, curr_sp);
curr_sp = adjust_free(env, irn, curr_sp);
break;
default:
panic("invalid call");
......
......@@ -500,11 +500,9 @@ static void link_ops_in_block_walker(ir_node *node, void *data)
break;
case iro_Alloc:
/** all non-stack alloc nodes should be lowered before the backend */
assert(get_Alloc_where(node) == stack_alloc);
collect_node(node);
break;
case iro_Free:
assert(get_Free_where(node) == stack_alloc);
collect_node(node);
break;
case iro_Builtin:
......
......@@ -137,9 +137,7 @@ static void check_omit_fp(ir_node *node, void *env)
* - we have allocations on the stack
* - we have calls (with the exception of tail-calls once we support them)
*/
if ((is_Alloc(node) && get_Alloc_where(node) == stack_alloc)
|| (is_Free(node) && get_Free_where(node) == stack_alloc)
|| is_Call(node)) {
if (is_Alloc(node) || is_Free(node) || is_Call(node)) {
bool *can_omit_fp = (bool*) env;
*can_omit_fp = false;
}
......
......@@ -2222,18 +2222,11 @@ static ir_node *gen_Alloc(ir_node *node)
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *new_block = be_transform_node(block);
ir_type *type = get_Alloc_type(node);
ir_node *size = get_Alloc_count(node);
ir_node *size = get_Alloc_size(node);
ir_node *stack_pred = get_stack_pointer_for(node);
ir_node *mem = get_Alloc_mem(node);
ir_node *new_mem = be_transform_node(mem);
if (get_Alloc_where(node) != stack_alloc)
panic("only stack-alloc supported in sparc backend (at %+F)", node);
/* lowerer should have transformed all allocas to byte size */
if (!is_unknown_type(type) && get_type_size_bytes(type) != 1)
panic("Found non-byte alloc in sparc backend (at %+F)", node);
ir_node *subsp;
if (is_Const(size)) {
ir_tarval *tv = get_Const_tarval(size);
......@@ -2266,51 +2259,14 @@ static ir_node *gen_Proj_Alloc(ir_node *node)
switch ((pn_Alloc)pn) {
case pn_Alloc_M: return new_r_Proj(new_alloc, mode_M, pn_sparc_SubSP_M);
case pn_Alloc_res: return new_r_Proj(new_alloc, mode_gp, pn_sparc_SubSP_addr);
case pn_Alloc_X_regular:
case pn_Alloc_X_except:
panic("exception output of alloc not supported (at %+F)",
node);
}
panic("invalid Proj->Alloc");
}
static ir_node *gen_Free(ir_node *node)
{
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
ir_node *new_block = be_transform_node(block);
ir_type *type = get_Free_type(node);
ir_node *size = get_Free_count(node);
ir_node *mem = get_Free_mem(node);
ir_node *new_mem = be_transform_node(mem);
ir_node *stack_pred = get_stack_pointer_for(node);
if (get_Alloc_where(node) != stack_alloc)
panic("only stack-alloc supported in sparc backend (at %+F)", node);
/* lowerer should have transformed all allocas to byte size */
if (!is_unknown_type(type) && get_type_size_bytes(type) != 1)
panic("Found non-byte alloc in sparc backend (at %+F)", node);
ir_node *addsp;
if (is_Const(size)) {
ir_tarval *tv = get_Const_tarval(size);
long sizel = get_tarval_long(tv);
addsp = be_new_IncSP(sp_reg, new_block, stack_pred, -sizel, 0);
set_irn_dbg_info(addsp, dbgi);
} else {
ir_node *new_size = be_transform_node(size);
addsp = new_bd_sparc_AddSP(dbgi, new_block, stack_pred, new_size);
arch_set_irn_register(addsp, sp_reg);
}
/* if we are the last IncSP producer in a block then we have to keep
* the stack value.
* Note: This here keeps all producers which is more than necessary */
keep_alive(addsp);
pmap_insert(node_to_stack, node, addsp);
/* the "result" is the unmodified sp value */
return new_mem;
(void)node;
panic("Free not supported yet");
}
static const arch_register_req_t float1_req = {
......
......@@ -1564,12 +1564,6 @@ static void dump_node2type_edges(ir_node *n, void *env)
case iro_Call:
print_node_type_edge(F,n,get_Call_type(n),NODE2TYPE_EDGE_ATTR);
break;
case iro_Alloc:
print_node_type_edge(F,n,get_Alloc_type(n),NODE2TYPE_EDGE_ATTR);
break;
case iro_Free:
print_node_type_edge(F,n,get_Free_type(n),NODE2TYPE_EDGE_ATTR);
break;
default:
break;
}
......
......@@ -219,12 +219,7 @@ void dump_irnode_to_file(FILE *const F, const ir_node *const n)
}
break;
case iro_Alloc:
ir_fprintf(F, " allocating entity of type: %+F\n", get_Alloc_type(n));
fprintf(F, " allocating on: the %s\n", (get_Alloc_where(n) == stack_alloc) ? "stack" : "heap");
break;
case iro_Free:
ir_fprintf(F, " freeing entity of type %+F\n", get_Free_type(n));
fprintf(F, " allocated on: the %s\n", (get_Free_where(n) == stack_alloc) ? "stack" : "heap");
ir_fprintf(F, " alignment: %u\n", get_Alloc_alignment(n));
break;
case iro_Sel: {
const ir_entity *ent = get_Sel_entity(n);
......
......@@ -86,7 +86,6 @@ typedef enum typetag_t {
tt_type_state,
tt_visibility,
tt_volatility,
tt_where_alloc,
} typetag_t;
typedef enum keyword_t {
......@@ -278,9 +277,6 @@ static void symtbl_init(void)
INSERTENUM(tt_volatility, volatility_non_volatile);
INSERTENUM(tt_volatility, volatility_is_volatile);
INSERTENUM(tt_where_alloc, stack_alloc);
INSERTENUM(tt_where_alloc, heap_alloc);
#undef INSERTKEYWORD
#undef INSERTENUM
#undef INSERT
......@@ -457,15 +453,6 @@ static void write_relation(write_env_t *env, ir_relation relation)
write_long(env, (long)relation);
}
static void write_where_alloc(write_env_t *env, ir_where_alloc where_alloc)
{
switch (where_alloc) {
case stack_alloc: write_symbol(env, "stack_alloc"); return;
case heap_alloc: write_symbol(env, "heap_alloc"); return;
}
panic("invalid where_alloc value");
}
static void write_throws(write_env_t *env, bool throws)
{
write_symbol(env, throws ? "throw" : "nothrow");
......@@ -1539,7 +1526,6 @@ static const char *get_typetag_name(typetag_t typetag)
case tt_type_state: return "type state";
case tt_visibility: return "visibility";
case tt_volatility: return "volatility";
case tt_where_alloc: return "where alloc";
}
return "<UNKNOWN>";
}
......@@ -1611,11 +1597,6 @@ static ir_volatility read_volatility(read_env_t *env)
return (ir_volatility)read_enum(env, tt_volatility);
}
static ir_where_alloc read_where_alloc(read_env_t *env)
{
return (ir_where_alloc)read_enum(env, tt_where_alloc);
}
static bool read_throws(read_env_t *env)
{
return (bool)read_enum(env, tt_throws);
......
......@@ -1121,11 +1121,9 @@ static void register_get_entity_func(ir_op *op, get_entity_attr_func func)
void ir_register_getter_ops(void)
{
register_get_type_func(op_Alloc, get_Alloc_type);
register_get_type_func(op_Builtin, get_Builtin_type);
register_get_type_func(op_Call, get_Call_type);
register_get_type_func(op_CopyB, get_CopyB_type);
register_get_type_func(op_Free, get_Free_type);
register_get_type_func(op_InstOf, get_InstOf_type);
register_get_type_func(op_SymConst, get_SymConst_attr_type);
......
......@@ -254,17 +254,7 @@ static int node_cmp_attr_Alloc(const ir_node *a, const ir_node *b)
{
const alloc_attr *pa = &a->attr.alloc;
const alloc_attr *pb = &b->attr.alloc;
if (pa->where != pb->where || pa->type != pb->type)
return 1;
return node_cmp_exception(a, b);
}
/** Compares the attributes of two Free nodes. */
static int node_cmp_attr_Free(const ir_node *a, const ir_node *b)
{
const free_attr *pa = &a->attr.free;
const free_attr *pb = &b->attr.free;
return (pa->where != pb->where) || (pa->type != pb->type);
return pa->alignment != pb->alignment;
}
/** Compares the attributes of two SymConst nodes. */
......@@ -611,7 +601,6 @@ void firm_init_op(void)
set_op_cmp_attr(op_CopyB, node_cmp_attr_CopyB);
set_op_cmp_attr(op_Div, node_cmp_attr_Div);
set_op_cmp_attr(op_Dummy, node_cmp_attr_Dummy);
set_op_cmp_attr(op_Free, node_cmp_attr_Free);
set_op_cmp_attr(op_InstOf, node_cmp_attr_InstOf);
set_op_cmp_attr(op_Load, node_cmp_attr_Load);
set_op_cmp_attr(op_Mod, node_cmp_attr_Mod);
......
......@@ -272,17 +272,9 @@ typedef struct builtin_attr {
/** Alloc attributes. */
typedef struct alloc_attr {
except_attr exc; /**< the exception attribute. MUST be the first one. */
ir_where_alloc where; /**< stack, heap or other managed part of memory */
ir_type *type; /**< Type of the allocated object. */
unsigned alignment;
} alloc_attr;
/** Free attributes. */
typedef struct free_attr {
ir_type *type; /**< Type of the allocated object. */
ir_where_alloc where; /**< stack, heap or other managed part of memory */
} free_attr;
/** InstOf attributes. */
typedef struct io_attr {
except_attr exc; /**< the exception attribute. MUST be the first one. */
......@@ -380,7 +372,6 @@ typedef union ir_attr {
call_attr call; /**< For Call. */
builtin_attr builtin; /**< For Builtin. */
alloc_attr alloc; /**< For Alloc. */
free_attr free; /**< For Free. */
io_attr instof; /**< For InstOf */
load_attr load; /**< For Load. */
store_attr store; /**< For Store. */
......
......@@ -485,10 +485,8 @@ static int verify_node_Proj_Alloc(const ir_node *p)
ASSERT_AND_RET_DBG(
(
(proj == pn_Alloc_M && mode == mode_M) ||
(proj == pn_Alloc_X_regular && mode == mode_X) ||
(proj == pn_Alloc_X_except && mode == mode_X) ||
(proj == pn_Alloc_res && mode_is_reference(mode))
(proj == pn_Alloc_M && mode == mode_M) ||
(proj == pn_Alloc_res && mode_is_reference(mode))
),
"wrong Proj from Alloc", 0,
show_proj_failure(p);
......@@ -1407,10 +1405,10 @@ static int verify_node_Alloc(const ir_node *n)
{
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Alloc_mem(n));
ir_mode *op2mode = get_irn_mode(get_Alloc_count(n));
ir_mode *op2mode = get_irn_mode(get_Alloc_size(n));
ASSERT_AND_RET_DBG(
/* Alloc: BB x M x int_u --> M x X x ref */
/* Alloc: BB x M x int_u --> M x ref */
op1mode == mode_M &&
mode_is_int(op2mode) &&
!mode_is_signed(op2mode) &&
......@@ -1429,13 +1427,10 @@ static int verify_node_Free(const ir_node *n)
ir_mode *mymode = get_irn_mode(n);
ir_mode *op1mode = get_irn_mode(get_Free_mem(n));
ir_mode *op2mode = get_irn_mode(get_Free_ptr(n));
ir_mode *op3mode = get_irn_mode(get_Free_count(n));
ASSERT_AND_RET_DBG(
/* Free: BB x M x ref x int_u --> M */
op1mode == mode_M && mode_is_reference(op2mode) &&
mode_is_int(op3mode) &&
!mode_is_signed(op3mode) &&
mymode == mode_M,
"Free node", 0,
show_node_mode_mismatch(n, "/* Free: BB x M x ref x int_u --> M */");
......
......@@ -77,11 +77,7 @@ static void transform_Proj_Alloc(ir_node *node)
static void lower_alloca_free(ir_node *node, void *data)
{
(void) data;
ir_type *type;
if (is_Alloc(node)) {
type = get_Alloc_type(node);
} else if (is_Free(node)) {
type = get_Free_type(node);
} else if (is_Proj(node)) {
ir_node *proj_pred = get_Proj_pred(node);
if (is_Alloc(proj_pred)) {
......@@ -94,40 +90,16 @@ static void lower_alloca_free(ir_node *node, void *data)
if (!ir_nodeset_insert(&transformed, node))
return;
unsigned size = get_type_size_bytes(type);
if (is_unknown_type(type))
size = 1;
if (size == 1 && stack_alignment <= 1)
if (stack_alignment <= 1)
return;
ir_node *count;
ir_node *mem;
ir_where_alloc where;
if (is_Alloc(node)) {
count = get_Alloc_count(node);
mem = get_Alloc_mem(node);
where = get_Alloc_where(node);
} else {
count = get_Free_count(node);
mem = get_Free_mem(node);
where = get_Free_where(node);
}
ir_mode *const mode = get_irn_mode(count);
ir_node *const block = get_nodes_block(node);
ir_graph *const irg = get_irn_irg(node);
ir_node *const szconst = new_r_Const_long(irg, mode, (long)size);
ir_node *const mul = new_r_Mul(block, count, szconst, mode);
dbg_info *const dbgi = get_irn_dbg_info(node);
ir_node *const new_size = adjust_alloc_size(dbgi, mul, block);
ir_type *const new_type = get_unknown_type();
ir_node * new_node;
if (is_Alloc(node)) {
new_node = new_rd_Alloc(dbgi, block, mem, new_size, new_type, where);
} else {
ir_node *ptr = get_Free_ptr(node);
new_node
= new_rd_Free(dbgi, block, mem, ptr, new_size, new_type, where);
}
ir_node *const size = get_Alloc_size(node);
ir_node *const mem = get_Alloc_mem(node);
ir_node *const block = get_nodes_block(node);
dbg_info *const dbgi = get_irn_dbg_info(node);
ir_node *const new_size = adjust_alloc_size(dbgi, size, block);
ir_node *const new_node
= new_rd_Alloc(dbgi, block, mem, new_size, 1);
ir_nodeset_insert(&transformed, new_node);
if (new_node != node)
......
......@@ -263,7 +263,7 @@ int i_mapper_alloca(ir_node *call, void *ctx)
ir_node *mem = get_Call_mem(call);
ir_node *block = get_nodes_block(call);
ir_node *op = get_Call_param(call, 0);
ir_node *irn, *exc, *no_exc;
ir_node *irn;
dbg_info *dbg = get_irn_dbg_info(call);
(void) ctx;
......@@ -276,20 +276,13 @@ int i_mapper_alloca(ir_node *call, void *ctx)
op = new_rd_Conv(dbg, block, op, mode);
}
irn = new_rd_Alloc(dbg, block, mem, op, get_unknown_type(), stack_alloc);
irn = new_rd_Alloc(dbg, block, mem, op, 1);
mem = new_rd_Proj(dbg, irn, mode_M, pn_Alloc_M);
irn = new_rd_Proj(dbg, irn, get_modeP_data(), pn_Alloc_res);
if (ir_throws_exception(call)) {
no_exc = new_rd_Proj(dbg, irn, mode_X, pn_Alloc_X_regular);
exc = new_rd_Proj(dbg, irn, mode_X, pn_Alloc_X_except);
ir_set_throws_exception(irn, true);
} else {
no_exc = NULL;
exc = NULL;
}
assert(!ir_throws_exception(call));
DBG_OPT_ALGSIM0(call, irn, FS_OPT_RTS_ALLOCA);
replace_call(irn, call, mem, no_exc, exc);
replace_call(irn, call, mem, NULL, NULL);
return 1;
}
......
......@@ -255,17 +255,16 @@ static int can_escape(ir_node *n)
*/
static void find_allocations(ir_node *alloc, void *ctx)
{
/* TODO: check if we have a heap allocation */
(void)alloc;
(void)ctx;
return;
#if 0
walk_env_t *env = (walk_env_t*)ctx;
int i;
ir_node *adr;
if (! is_Alloc(alloc))