Commit 5332b7ba authored by Matthias Braun's avatar Matthias Braun
Browse files

remove support for trampolines/nested functions

- It was only available in the ia32 backend and broken there. As the
  feature is rarely used, I remove it for now instead of going through
  the trouble of fixing it.
parent b1de2595
......@@ -81,18 +81,6 @@ typedef enum {
DW_LANG_Go = 0x0016,
} dwarf_source_language;
/**
* Build a Trampoline for the closure.
* @param block the block where to build the trampoline
* @param mem memory
* @param trampoline address of a trampoline region
* @param env address of the environment
* @param callee address of the function to call
*
* @return modified memory
*/
typedef ir_node *(create_trampoline_fkt)(ir_node *block, ir_node *mem, ir_node *trampoline, ir_node *env, ir_node *callee);
/**
* This structure contains parameters that should be
* propagated to the libFirm parameter set.
......@@ -145,15 +133,6 @@ typedef struct backend_params {
*/
ir_type *type_long_double;
/** Size of the trampoline code. */
unsigned trampoline_size;
/** Alignment of the trampoline code. */
unsigned trampoline_align;
/** If non-zero, build the trampoline. */
create_trampoline_fkt *build_trampoline;
/** Alignment of stack parameters */
unsigned stack_param_align;
......
......@@ -289,8 +289,6 @@ typedef enum ir_builtin_kind {
ir_bk_bswap, /**< byte swap */
ir_bk_inport, /**< in port */
ir_bk_outport, /**< out port */
ir_bk_inner_trampoline, /**< address of a trampoline for GCC inner
functions */
ir_bk_saturating_increment, /**< saturating increment */
ir_bk_compare_swap, /**< compare exchange (aka. compare and swap) */
ir_bk_may_alias, /**< replaced by 0 if args cannot alias,
......
......@@ -689,37 +689,6 @@ static void analyse_irg_entity_usage(ir_graph *irg)
set_entity_usage(entity, (ir_entity_usage) flags);
}
/* check inner functions accessing outer frame */
unsigned static_link_arg = 0;
for (size_t i = 0, n = get_class_n_members(frame_type); i < n; ++i) {
ir_entity *ent = get_class_member(frame_type, i);
if (!is_method_entity(ent))
continue;
ir_graph *inner_irg = get_entity_irg(ent);
if (inner_irg == NULL)
continue;
assure_irg_outs(inner_irg);
ir_node *args = get_irg_args(inner_irg);
foreach_irn_out_r(args, j, arg) {
if (get_Proj_num(arg) == static_link_arg) {
foreach_irn_out_r(arg, k, succ) {
if (is_Member(succ)) {
ir_entity *entity = get_Member_entity(succ);
if (get_entity_owner(entity) == frame_type) {
/* found an access to the outer frame */
unsigned flags = get_entity_usage(entity);
flags |= determine_entity_usage(succ, entity);
set_entity_usage(entity, (ir_entity_usage) flags);
}
}
}
}
}
}
/* now computed */
add_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
}
......
......@@ -182,9 +182,6 @@ static const backend_params *TEMPLATE_get_backend_params(void)
NULL, /* long long type */
NULL, /* unsigned long long type */
NULL, /* long double type */
0, /* no trampoline support: size 0 */
0, /* no trampoline support: align 0 */
NULL, /* no trampoline support: no trampoline builder */
4, /* alignment of stack parameter: typically 4 (32bit) or 8 (64bit) */
ir_overflow_min_max
};
......
......@@ -782,9 +782,6 @@ static const backend_params *amd64_get_backend_params(void) {
NULL, /* long long type */
NULL, /* unsigned long long type */
NULL, /* long double type (not supported yet) */
0, /* no trampoline support: size 0 */
0, /* no trampoline support: align 0 */
NULL, /* no trampoline support: no trampoline builder */
8, /* alignment of stack parameter: typically 4 (32bit) or 8 (64bit) */
ir_overflow_indefinite
};
......
......@@ -1461,7 +1461,6 @@ static ir_node *gen_Builtin(ir_node *node)
case ir_bk_bswap:
case ir_bk_outport:
case ir_bk_inport:
case ir_bk_inner_trampoline:
case ir_bk_saturating_increment:
case ir_bk_compare_swap:
case ir_bk_may_alias:
......@@ -1497,7 +1496,6 @@ static ir_node *gen_Proj_Builtin(ir_node *proj)
assert(get_Proj_num(proj) == pn_Builtin_M);
return new_node;
case ir_bk_inport:
case ir_bk_inner_trampoline:
case ir_bk_saturating_increment:
case ir_bk_compare_swap:
case ir_bk_may_alias:
......
......@@ -349,9 +349,6 @@ static backend_params arm_backend_params = {
NULL, /* long long type */
NULL, /* unsigned long long type */
NULL, /* long double type */
0, /* no trampoline support: size 0 */
0, /* no trampoline support: align 0 */
NULL, /* no trampoline support: no trampoline builder */
4, /* alignment of stack parameter */
ir_overflow_min_max
};
......
......@@ -69,37 +69,6 @@ static void stack_frame_compute_initial_offset(be_stack_layout_t *frame)
}
}
/**
* Walker: finally lower all Members of outer frame or parameter
* entities.
*/
static void lower_outer_frame_members(ir_node *sel, void *ctx)
{
(void)ctx;
if (!is_Member(sel))
return;
ir_entity *ent = get_Member_entity(sel);
ir_type *owner = get_entity_owner(ent);
ir_node *ptr = get_Member_ptr(sel);
ir_graph *irg = get_irn_irg(sel);
be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
if (owner == layout->frame_type || owner == layout->arg_type) {
/* found access to outer frame or arguments */
int offset = be_get_stack_entity_offset(layout, ent, 0);
if (offset != 0) {
ir_node *bl = get_nodes_block(sel);
dbg_info *dbgi = get_irn_dbg_info(sel);
ir_mode *mode = get_irn_mode(sel);
ir_mode *mode_UInt = get_reference_mode_unsigned_eq(mode);
ir_node *cnst = new_r_Const_long(irg, mode_UInt, offset);
ptr = new_rd_Add(dbgi, bl, ptr, cnst, mode);
}
exchange(sel, ptr);
}
}
/**
* A helper struct for the bias walker.
*/
......@@ -201,18 +170,6 @@ void be_abi_fix_stack_bias(ir_graph *irg)
/* fix the bias is all other blocks */
irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
/* fix now inner functions: these still have Sel node to outer
frame and parameter entities */
ir_type *frame_tp = get_irg_frame_type(irg);
for (unsigned i = get_class_n_members(frame_tp); i-- > 0; ) {
ir_entity *ent = get_class_member(frame_tp, i);
if (!is_method_entity(ent))
continue;
ir_graph *irg = get_entity_irg(ent);
if (irg != NULL)
irg_walk_graph(irg, NULL, lower_outer_frame_members, NULL);
}
}
typedef struct fix_stack_walker_env_t {
......
......@@ -1573,35 +1573,6 @@ static int ia32_is_mux_allowed(ir_node *sel, ir_node *mux_false,
return true;
}
/**
* Create the trampoline code.
*/
static ir_node *ia32_create_trampoline_fkt(ir_node *block, ir_node *mem, ir_node *trampoline, ir_node *env, ir_node *callee)
{
ir_graph *const irg = get_irn_irg(block);
ir_node * p = trampoline;
ir_mode *const mode = get_irn_mode(p);
ir_node *const one = new_r_Const_one(irg, ia32_mode_gp);
ir_node *const four = new_r_Const_long(irg, ia32_mode_gp, 4);
ir_node * st;
/* mov ecx,<env> */
st = new_r_Store(block, mem, p, new_r_Const_long(irg, mode_Bu, 0xb9), cons_none);
mem = new_r_Proj(st, mode_M, pn_Store_M);
p = new_r_Add(block, p, one, mode);
st = new_r_Store(block, mem, p, env, cons_none);
mem = new_r_Proj(st, mode_M, pn_Store_M);
p = new_r_Add(block, p, four, mode);
/* jmp <callee> */
st = new_r_Store(block, mem, p, new_r_Const_long(irg, mode_Bu, 0xe9), cons_none);
mem = new_r_Proj(st, mode_M, pn_Store_M);
p = new_r_Add(block, p, one, mode);
st = new_r_Store(block, mem, p, callee, cons_none);
mem = new_r_Proj(st, mode_M, pn_Store_M);
return mem;
}
static const ir_settings_arch_dep_t ia32_arch_dep = {
1, /* also use subs */
4, /* maximum shifts */
......@@ -1624,9 +1595,6 @@ static backend_params ia32_backend_params = {
NULL, /* long long type */
NULL, /* unsigned long long type */
NULL, /* long double type */
12, /* size of trampoline code */
4, /* alignment of trampoline code */
ia32_create_trampoline_fkt,
4, /* alignment of stack parameter */
ir_overflow_indefinite
};
......@@ -1970,7 +1938,6 @@ static void ia32_lower_for_target(void)
supported[s++] = ir_bk_bswap;
supported[s++] = ir_bk_outport;
supported[s++] = ir_bk_inport;
supported[s++] = ir_bk_inner_trampoline;
supported[s++] = ir_bk_saturating_increment;
if (ia32_cg_config.use_popcnt)
supported[s++] = ir_bk_popcount;
......
......@@ -5353,78 +5353,6 @@ static ir_node *gen_compare_swap(ir_node *node)
return new_node;
}
/**
* Transform a builtin inner trampoline
*/
static ir_node *gen_inner_trampoline(ir_node *node)
{
ir_node *ptr = get_Builtin_param(node, 0);
ir_node *callee = get_Builtin_param(node, 1);
ir_node *env = be_transform_node(get_Builtin_param(node, 2));
ir_node *mem = get_Builtin_mem(node);
ir_node *block = get_nodes_block(node);
ir_node *new_block = be_transform_node(block);
dbg_info *dbgi = get_irn_dbg_info(node);
/* construct store address */
x86_address_t addr;
create_transformed_address_mode(&addr, ptr, x86_create_am_normal);
addr.mem = be_transform_node(mem);
ir_graph *const irg = get_irn_irg(new_block);
/* mov ecx, <env> */
ir_node *val = ia32_create_Immediate(irg, NULL, 0xB9);
ir_node *store = new_bd_ia32_Store_8bit(dbgi, new_block, addr.base,
addr.index, addr.mem, val);
set_irn_pinned(store, get_irn_pinned(node));
set_ia32_op_type(store, ia32_AddrModeD);
set_ia32_ls_mode(store, mode_Bu);
set_address(store, &addr);
addr.mem = store;
addr.offset += 1;
store = new_bd_ia32_Store(dbgi, new_block, addr.base,
addr.index, addr.mem, env);
set_irn_pinned(store, get_irn_pinned(node));
set_ia32_op_type(store, ia32_AddrModeD);
set_ia32_ls_mode(store, ia32_mode_gp);
set_address(store, &addr);
addr.mem = store;
addr.offset += 4;
/* jmp rel <callee> */
val = ia32_create_Immediate(irg, NULL, 0xE9);
store = new_bd_ia32_Store_8bit(dbgi, new_block, addr.base, addr.index, addr.mem, val);
set_irn_pinned(store, get_irn_pinned(node));
set_ia32_op_type(store, ia32_AddrModeD);
set_ia32_ls_mode(store, mode_Bu);
set_address(store, &addr);
addr.mem = store;
addr.offset += 1;
ir_node *trampoline = be_transform_node(ptr);
/* the callee is typically an immediate */
ir_node *rel;
if (is_Address(callee)) {
rel = new_bd_ia32_Const(dbgi, new_block, get_Address_entity(callee), 0, -10);
} else {
rel = new_bd_ia32_Lea(dbgi, new_block, be_transform_node(callee), noreg_GP);
add_ia32_am_offs_int(rel, -10);
}
rel = new_bd_ia32_Sub(dbgi, new_block, noreg_GP, noreg_GP, nomem, rel, trampoline);
store = new_bd_ia32_Store(dbgi, new_block, addr.base,
addr.index, addr.mem, rel);
set_irn_pinned(store, get_irn_pinned(node));
set_ia32_op_type(store, ia32_AddrModeD);
set_ia32_ls_mode(store, ia32_mode_gp);
set_address(store, &addr);
ir_node *in[] = { store, trampoline };
return new_r_Tuple(new_block, ARRAY_SIZE(in), in);
}
/**
* Transform Builtin node.
*/
......@@ -5459,8 +5387,6 @@ static ir_node *gen_Builtin(ir_node *node)
return gen_outport(node);
case ir_bk_inport:
return gen_inport(node);
case ir_bk_inner_trampoline:
return gen_inner_trampoline(node);
case ir_bk_saturating_increment:
return gen_saturating_increment(node);
case ir_bk_compare_swap:
......@@ -5505,13 +5431,6 @@ static ir_node *gen_Proj_Builtin(ir_node *proj)
assert(get_Proj_num(proj) == pn_Builtin_M);
return new_r_Proj(new_node, mode_M, pn_ia32_Inport_M);
}
case ir_bk_inner_trampoline:
if (get_Proj_num(proj) == pn_Builtin_max+1) {
return get_Tuple_pred(new_node, 1);
} else {
assert(get_Proj_num(proj) == pn_Builtin_M);
return get_Tuple_pred(new_node, 0);
}
case ir_bk_compare_swap:
assert(is_ia32_CmpXChgMem(new_node));
if (get_Proj_num(proj) == pn_Builtin_M) {
......
......@@ -569,9 +569,6 @@ static const backend_params *sparc_get_backend_params(void)
NULL, /* long long type */
NULL, /* usigned long long type */
NULL, /* long double type */
0, /* no trampoline support: size 0 */
0, /* no trampoline support: align 0 */
NULL, /* no trampoline support: no trampoline builder */
4, /* alignment of stack parameter: typically 4 (32bit) or 8 (64bit) */
ir_overflow_min_max
};
......
......@@ -96,14 +96,9 @@ static void process_bias(ir_node *block, bool sp_relative, int bias,
static void adjust_entity_offsets(ir_type *type, long offset)
{
size_t n_members = get_compound_n_members(type);
size_t i;
for (i = 0; i < n_members; ++i) {
for (size_t i = 0, n_members = get_compound_n_members(type);
i < n_members; ++i) {
ir_entity *member = get_compound_member(type, i);
/* ignore nested functions */
if (member->entity_kind == IR_ENTITY_METHOD)
continue;
int member_offset = get_entity_offset(member);
set_entity_offset(member, member_offset + offset);
}
......
......@@ -2417,7 +2417,6 @@ static ir_node *gen_Builtin(ir_node *node)
case ir_bk_frame_address:
case ir_bk_outport:
case ir_bk_inport:
case ir_bk_inner_trampoline:
/* not supported */
break;
case ir_bk_compare_swap:
......@@ -2454,7 +2453,6 @@ static ir_node *gen_Proj_Builtin(ir_node *proj)
case ir_bk_prefetch:
case ir_bk_outport:
case ir_bk_inport:
case ir_bk_inner_trampoline:
/* not supported / should be lowered */
break;
case ir_bk_saturating_increment:
......
......@@ -252,7 +252,6 @@ static void symtbl_init(void)
INSERTENUM(tt_builtin_kind, ir_bk_bswap);
INSERTENUM(tt_builtin_kind, ir_bk_inport);
INSERTENUM(tt_builtin_kind, ir_bk_outport);
INSERTENUM(tt_builtin_kind, ir_bk_inner_trampoline);
INSERTENUM(tt_builtin_kind, ir_bk_saturating_increment);
INSERTENUM(tt_builtin_kind, ir_bk_compare_swap);
......
......@@ -689,7 +689,6 @@ const char *get_builtin_kind_name(ir_builtin_kind kind)
X(ir_bk_bswap);
X(ir_bk_inport);
X(ir_bk_outport);
X(ir_bk_inner_trampoline);
X(ir_bk_saturating_increment);
X(ir_bk_compare_swap);
X(ir_bk_may_alias);
......@@ -986,7 +985,6 @@ int is_irn_const_memory(const ir_node *node)
case ir_bk_bswap:
case ir_bk_inport:
case ir_bk_outport:
case ir_bk_inner_trampoline:
case ir_bk_saturating_increment:
case ir_bk_may_alias:
return true;
......
......@@ -7359,68 +7359,6 @@ static ir_node *transform_node_Store(ir_node *n)
return n;
}
/**
* optimize a trampoline Call into a direct Call
*/
static ir_node *transform_node_Call(ir_node *call)
{
ir_node *callee = get_Call_ptr(call);
if (! is_Proj(callee))
return call;
callee = get_Proj_pred(callee);
if (! is_Builtin(callee))
return call;
if (get_Builtin_kind(callee) != ir_bk_inner_trampoline)
return call;
ir_node *mem = get_Call_mem(call);
if (skip_Proj(mem) == callee) {
/* memory is routed to the trampoline, skip */
mem = get_Builtin_mem(callee);
}
/* build a new call type */
ir_type *mtp = get_Call_type(call);
type_dbg_info *tdb = get_type_dbg_info(mtp);
size_t n_res = get_method_n_ress(mtp);
size_t n_param = get_method_n_params(mtp);
ir_type *ctp = new_type_method(n_param + 1, n_res);
set_type_dbg_info(ctp, tdb);
for (size_t i = 0; i < n_res; ++i)
set_method_res_type(ctp, i, get_method_res_type(mtp, i));
ir_node **in = ALLOCAN(ir_node*, n_param+1);
/* FIXME: we do not need a new pointer type in every step */
ir_graph *irg = get_irn_irg(call);
ir_type *tp = get_irg_frame_type(irg);
tp = new_type_pointer(tp);
set_method_param_type(ctp, 0, tp);
in[0] = get_Builtin_param(callee, 2);
for (size_t i = 0; i < n_param; ++i) {
set_method_param_type(ctp, i + 1, get_method_param_type(mtp, i));
in[i + 1] = get_Call_param(call, i);
}
ir_variadicity var = get_method_variadicity(mtp);
set_method_variadicity(ctp, var);
/* When we resolve a trampoline, the function must be called by a this-call */
set_method_calling_convention(ctp, get_method_calling_convention(mtp) | cc_this_call);
set_method_additional_properties(ctp, get_method_additional_properties(mtp));
ir_node *adr = get_Builtin_param(callee, 1);
dbg_info *db = get_irn_dbg_info(call);
ir_node *bl = get_nodes_block(call);
ir_node *res = new_rd_Call(db, bl, mem, adr, n_param + 1, in, ctp);
if (get_irn_pinned(call) == op_pin_state_floats)
set_irn_pinned(res, op_pin_state_floats);
return res;
}
/**
* Tries several [inplace] [optimizing] transformations and returns an
* equivalent node. The difference to equivalent_node() is that these
......@@ -7589,7 +7527,6 @@ void ir_register_opt_node_ops(void)
register_transform_node_func(op_And, transform_node_And);
register_transform_node_func(op_Bitcast, transform_node_Bitcast);
register_transform_node_func(op_Block, transform_node_Block);
register_transform_node_func(op_Call, transform_node_Call);
register_transform_node_func(op_Cmp, transform_node_Cmp);
register_transform_node_func(op_Cond, transform_node_Cond);
register_transform_node_func(op_Confirm, transform_node_Confirm);
......
......@@ -40,7 +40,6 @@ static const char *get_builtin_name(ir_builtin_kind kind)
case ir_bk_frame_address:
case ir_bk_inport:
case ir_bk_outport:
case ir_bk_inner_trampoline:
case ir_bk_saturating_increment:
case ir_bk_compare_swap:
case ir_bk_may_alias:
......@@ -173,7 +172,6 @@ static void lower_builtin(ir_node *node, void *env)
case ir_bk_frame_address:
case ir_bk_inport:
case ir_bk_outport:
case ir_bk_inner_trampoline:
case ir_bk_saturating_increment:
case ir_bk_compare_swap:
/* can't do anything about these, backend will probably fail now */
......
......@@ -2535,7 +2535,6 @@ static void lower_Builtin(ir_node *builtin, ir_mode *mode)
case ir_bk_compare_swap:
case ir_bk_debugbreak:
case ir_bk_frame_address:
case ir_bk_inner_trampoline:
case ir_bk_inport:
case ir_bk_may_alias:
case ir_bk_outport:
......
......@@ -388,7 +388,6 @@ next_no_change:
break;
case ir_bk_return_address:
case ir_bk_frame_address:
case ir_bk_inner_trampoline:
/* Access context information => not pure anymore */
max_prop &= ~mtp_property_pure;
break;
......
......@@ -52,8 +52,7 @@ void opt_frame_irg(ir_graph *irg)
ir_entity *list = NULL;
for (size_t i = n; i-- > 0;) {
ir_entity *entity = get_class_member(frame_tp, i);
/* beware of inner functions: those are NOT unused */
if (get_entity_link(entity) == NULL && !is_method_entity(entity)) {
if (get_entity_link(entity) == NULL) {
set_entity_link(entity, list);
list = entity;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment