Commit bdd5ec06 authored by Tobias Rapp's avatar Tobias Rapp Committed by Andreas Zwinkau
Browse files

Refactoring: Deleted unused inlining functionality.

parent c6182ec2
......@@ -709,82 +709,6 @@ FIRM_API void combo(ir_graph *irg);
*/
FIRM_API ir_graph_pass_t *combo_pass(const char *name);
/**
* Inlines all small methods at call sites where the called address comes
* from a SymConst node that references the entity representing the called
* method.
*
* @param irg the graph
* @param size maximum function size
*
* The size argument is a rough measure for the code size of the method:
* Methods where the obstack containing the firm graph is smaller than
* size are inlined. Further only a limited number of calls are inlined.
* If the method contains more than 1024 inlineable calls none will be
* inlined.
* Inlining is only performed if flags `optimize' and `inlining' are set.
* The graph may not be in state phase_building.
* It is recommended to call local_optimize_graph() after inlining as this
* function leaves a set of obscure Tuple nodes, e.g. a Proj-Tuple-Jmp
* combination as control flow operation.
*/
FIRM_API void inline_small_irgs(ir_graph *irg, int size);
/**
* Creates an ir_graph pass for inline_small_irgs().
*
* @param name the name of this pass or NULL
* @param size maximum function size
*
* @return the newly created ir_graph pass
*/
FIRM_API ir_graph_pass_t *inline_small_irgs_pass(const char *name, int size);
/**
* Inlineing with a different heuristic than inline_small_irgs().
*
* Inlines leaf functions. If inlining creates new leafs
* function inlines these, too. (If g calls f, and f calls leaf h,
* h is first inlined in f and then f in g.)
*
* Then inlines all small functions (this is not recursive).
*
* For a heuristic this inlining uses firm node counts. It does
* not count auxiliary nodes as Proj, Tuple, End, Start, Id, Sync.
* If the ignore_runtime flag is set, calls to functions marked with the
* mtp_property_runtime property are ignored.
*
* @param maxsize Do not inline any calls if a method has more than
* maxsize firm nodes. It may reach this limit by
* inlining.
* @param leafsize Inline leaf functions if they have less than leafsize
* nodes.
* @param size Inline all function smaller than size.
* @param ignore_runtime count a function only calling runtime functions as
* leaf
*/
FIRM_API void inline_leaf_functions(unsigned maxsize, unsigned leafsize,
unsigned size, int ignore_runtime);
/**
* Creates an ir_prog pass for inline_leaf_functions().
*
* @param name the name of this pass or NULL
* @param maxsize Do not inline any calls if a method has more than
* maxsize firm nodes. It may reach this limit by
* inlining.
* @param leafsize Inline leaf functions if they have less than leafsize
* nodes.
* @param size Inline all function smaller than size.
* @param ignore_runtime count a function only calling runtime functions as
* leaf
*
* @return the newly created ir_prog pass
*/
FIRM_API ir_prog_pass_t *inline_leaf_functions_pass(const char *name,
unsigned maxsize, unsigned leafsize, unsigned size,
int ignore_runtime);
/** pointer to an optimization function */
typedef void (*opt_ptr)(ir_graph *irg);
......
......@@ -612,14 +612,6 @@ typedef struct call_entry {
unsigned all_const:1; /**< Set if this call has only constant parameters. */
} call_entry;
/**
* environment for inlining small irgs
*/
typedef struct inline_env_t {
struct obstack obst; /**< An obstack where call_entries are allocated on. */
list_head calls; /**< The call entry list. */
} inline_env_t;
/**
* Returns the irg called from a Call node. If the irg is not
* known, NULL is returned.
......@@ -643,108 +635,6 @@ static ir_graph *get_call_called_irg(ir_node *call)
return NULL;
}
/**
* Walker: Collect all calls to known graphs inside a graph.
*/
static void collect_calls(ir_node *call, void *env)
{
(void) env;
if (is_Call(call)) {
ir_graph *called_irg = get_call_called_irg(call);
if (called_irg != NULL) {
/* The Call node calls a locally defined method. Remember to inline. */
inline_env_t *ienv = (inline_env_t*)env;
call_entry *entry = OALLOC(&ienv->obst, call_entry);
entry->call = call;
entry->callee = called_irg;
entry->loop_depth = 0;
entry->benefice = 0;
entry->local_adr = 0;
entry->all_const = 0;
list_add_tail(&entry->list, &ienv->calls);
}
}
}
/**
* Inlines all small methods at call sites where the called address comes
* from a Const node that references the entity representing the called
* method.
* The size argument is a rough measure for the code size of the method:
* Methods where the obstack containing the firm graph is smaller than
* size are inlined.
*/
void inline_small_irgs(ir_graph *irg, int size)
{
ir_graph *rem = current_ir_graph;
inline_env_t env;
current_ir_graph = irg;
free_callee_info(irg);
/* Find Call nodes to inline.
(We can not inline during a walk of the graph, as inlining the same
method several times changes the visited flag of the walked graph:
after the first inlining visited of the callee equals visited of
the caller. With the next inlining both are increased.) */
obstack_init(&env.obst);
INIT_LIST_HEAD(&env.calls);
irg_walk_graph(irg, NULL, collect_calls, &env);
if (! list_empty(&env.calls)) {
/* There are calls to inline */
ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
collect_phiprojs(irg);
list_for_each_entry(call_entry, entry, &env.calls, list) {
ir_graph *callee = entry->callee;
ir_entity *called = get_irg_entity(callee);
mtp_additional_properties props
= get_entity_additional_properties(called);
if (props & mtp_property_noinline)
continue;
struct obstack *const obst = get_irg_obstack(callee);
if ((props & mtp_property_always_inline) ||
_obstack_memory_used(obst) - (int)obstack_room(obst) < size) {
inline_method(entry->call, callee);
}
}
ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
}
obstack_free(&env.obst, NULL);
current_ir_graph = rem;
}
typedef struct inline_small_irgs_pass_t {
ir_graph_pass_t pass;
int size;
} inline_small_irgs_pass_t;
/**
* Wrapper to run inline_small_irgs() as a pass.
*/
static int inline_small_irgs_wrapper(ir_graph *irg, void *context)
{
inline_small_irgs_pass_t *pass = (inline_small_irgs_pass_t*)context;
inline_small_irgs(irg, pass->size);
return 0;
}
/* create a pass for inline_small_irgs() */
ir_graph_pass_t *inline_small_irgs_pass(const char *name, int size)
{
inline_small_irgs_pass_t *pass = XMALLOCZ(inline_small_irgs_pass_t);
pass->size = size;
return def_graph_pass_constructor(
&pass->pass, name ? name : "inline_small_irgs", inline_small_irgs_wrapper);
}
/**
* Environment for inlining irgs.
*/
......@@ -852,26 +742,6 @@ static void collect_calls2(ir_node *call, void *ctx)
}
}
/**
* Returns TRUE if the number of callers is 0 in the irg's environment,
* hence this irg is a leaf.
*/
inline static int is_leaf(ir_graph *irg)
{
inline_irg_env *env = (inline_irg_env*)get_irg_link(irg);
return env->n_call_nodes == 0;
}
/**
* Returns TRUE if the number of nodes in the callee is
* smaller then size in the irg's environment.
*/
inline static int is_smaller(ir_graph *callee, unsigned size)
{
inline_irg_env *env = (inline_irg_env*)get_irg_link(callee);
return env->n_nodes < size;
}
/**
* Duplicate a call entry.
*
......@@ -894,311 +764,6 @@ static call_entry *duplicate_call_entry(const call_entry *entry,
return nentry;
}
/**
* Append all call nodes of the source environment to the nodes of in the destination
* environment.
*
* @param dst destination environment
* @param src source environment
* @param loop_depth the loop depth of the call that is replaced by the src list
*/
static void append_call_list(inline_irg_env *dst, inline_irg_env *src, int loop_depth)
{
call_entry *nentry;
/* Note that the src list points to Call nodes in the inlined graph, but
we need Call nodes in our graph. Luckily the inliner leaves this information
in the link field. */
list_for_each_entry(call_entry, entry, &src->calls, list) {
nentry = duplicate_call_entry(entry, (ir_node*)get_irn_link(entry->call), loop_depth);
list_add_tail(&nentry->list, &dst->calls);
}
dst->n_call_nodes += src->n_call_nodes;
dst->n_nodes += src->n_nodes;
}
/*
* Inlines small leaf methods at call sites where the called address comes
* from a Const node that references the entity representing the called
* method.
* The size argument is a rough measure for the code size of the method:
* Methods where the obstack containing the firm graph is smaller than
* size are inlined.
*/
void inline_leaf_functions(unsigned maxsize, unsigned leafsize,
unsigned size, int ignore_runtime)
{
inline_irg_env *env;
ir_graph *irg;
size_t i, n_irgs;
ir_graph *rem;
int did_inline;
wenv_t wenv;
pmap *copied_graphs;
pmap_entry *pm_entry;
rem = current_ir_graph;
obstack_init(&temp_obst);
/* a map for the copied graphs, used to inline recursive calls */
copied_graphs = pmap_create();
/* extend all irgs by a temporary data structure for inlining. */
n_irgs = get_irp_n_irgs();
for (i = 0; i < n_irgs; ++i)
set_irg_link(get_irp_irg(i), alloc_inline_irg_env());
/* Pre-compute information in temporary data structure. */
wenv.ignore_runtime = ignore_runtime;
wenv.ignore_callers = 0;
for (i = 0; i < n_irgs; ++i) {
ir_graph *irg = get_irp_irg(i);
free_callee_info(irg);
assure_irg_properties(irg,
IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
wenv.x = (inline_irg_env*)get_irg_link(irg);
irg_walk_graph(irg, NULL, collect_calls2, &wenv);
confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
}
/* -- and now inline. -- */
/* Inline leafs recursively -- we might construct new leafs. */
do {
did_inline = 0;
for (i = 0; i < n_irgs; ++i) {
int phiproj_computed = 0;
current_ir_graph = get_irp_irg(i);
env = (inline_irg_env*)get_irg_link(current_ir_graph);
ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
if (env->n_nodes > maxsize)
break;
ir_node *call = entry->call;
ir_graph *callee = entry->callee;
ir_entity *called = get_irg_entity(callee);
mtp_additional_properties props
= get_entity_additional_properties(called);
if (props & mtp_property_noinline)
continue;
if (is_leaf(callee) && (
is_smaller(callee, leafsize) || (props & mtp_property_always_inline))) {
if (!phiproj_computed) {
phiproj_computed = 1;
collect_phiprojs(current_ir_graph);
}
did_inline = inline_method(call, callee);
if (did_inline) {
inline_irg_env *callee_env = (inline_irg_env*)get_irg_link(callee);
/* call was inlined, Phi/Projs for current graph must be recomputed */
phiproj_computed = 0;
/* Do some statistics */
env->got_inline = 1;
--env->n_call_nodes;
env->n_nodes += callee_env->n_nodes;
--callee_env->n_callers;
/* remove this call from the list */
list_del(&entry->list);
continue;
}
}
}
ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
}
} while (did_inline);
/* inline other small functions. */
for (i = 0; i < n_irgs; ++i) {
int phiproj_computed = 0;
current_ir_graph = get_irp_irg(i);
env = (inline_irg_env*)get_irg_link(current_ir_graph);
ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
/* note that the list of possible calls is updated during the process */
list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
ir_node *call = entry->call;
ir_graph *callee = entry->callee;
ir_entity *called = get_irg_entity(callee);
mtp_additional_properties props = get_entity_additional_properties(called);
if (props & mtp_property_noinline)
continue;
ir_graph *calleee = pmap_get(ir_graph, copied_graphs, callee);
if (calleee != NULL) {
/*
* Remap callee if we have a copy.
* FIXME: Should we do this only for recursive Calls ?
*/
callee = calleee;
}
if ((props & mtp_property_always_inline) ||
(is_smaller(callee, size) && env->n_nodes < maxsize) /* small function */) {
if (current_ir_graph == callee) {
/*
* Recursive call: we cannot directly inline because we cannot walk
* the graph and change it. So we have to make a copy of the graph
* first.
*/
inline_irg_env *callee_env;
ir_graph *copy;
ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
/*
* No copy yet, create one.
* Note that recursive methods are never leafs, so it is sufficient
* to test this condition here.
*/
copy = create_irg_copy(callee);
/* create_irg_copy() destroys the Proj links, recompute them */
phiproj_computed = 0;
ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
/* allocate new environment */
callee_env = alloc_inline_irg_env();
set_irg_link(copy, callee_env);
assure_irg_properties(copy,
IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
wenv.x = callee_env;
wenv.ignore_callers = 1;
irg_walk_graph(copy, NULL, collect_calls2, &wenv);
/*
* Enter the entity of the original graph. This is needed
* for inline_method(). However, note that ent->irg still points
* to callee, NOT to copy.
*/
set_irg_entity(copy, get_irg_entity(callee));
pmap_insert(copied_graphs, callee, copy);
callee = copy;
/* we have only one caller: the original graph */
callee_env->n_callers = 1;
callee_env->n_callers_orig = 1;
}
if (! phiproj_computed) {
phiproj_computed = 1;
collect_phiprojs(current_ir_graph);
}
did_inline = inline_method(call, callee);
if (did_inline) {
inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
/* call was inlined, Phi/Projs for current graph must be recomputed */
phiproj_computed = 0;
/* callee was inline. Append its call list. */
env->got_inline = 1;
--env->n_call_nodes;
append_call_list(env, callee_env, entry->loop_depth);
--callee_env->n_callers;
/* after we have inlined callee, all called methods inside callee
are now called once more */
list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
inline_irg_env *penv = (inline_irg_env*)get_irg_link(centry->callee);
++penv->n_callers;
}
/* remove this call from the list */
list_del(&entry->list);
continue;
}
}
}
ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
}
for (i = 0; i < n_irgs; ++i) {
irg = get_irp_irg(i);
env = (inline_irg_env*)get_irg_link(irg);
if (env->got_inline) {
optimize_graph_df(irg);
optimize_cf(irg);
}
if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
env->n_callers_orig, env->n_callers,
get_entity_name(get_irg_entity(irg))));
}
}
/* kill the copied graphs: we don't need them anymore */
foreach_pmap(copied_graphs, pm_entry) {
ir_graph *copy = (ir_graph*)pm_entry->value;
/* reset the entity, otherwise it will be deleted in the next step ... */
set_irg_entity(copy, NULL);
free_ir_graph(copy);
}
pmap_destroy(copied_graphs);
obstack_free(&temp_obst, NULL);
current_ir_graph = rem;
}
typedef struct inline_leaf_functions_pass_t {
ir_prog_pass_t pass;
unsigned maxsize;
unsigned leafsize;
unsigned size;
int ignore_runtime;
} inline_leaf_functions_pass_t;
/**
* Wrapper to run inline_leaf_functions() as a ir_prog pass.
*/
static int inline_leaf_functions_wrapper(ir_prog *irp, void *context)
{
inline_leaf_functions_pass_t *pass = (inline_leaf_functions_pass_t*)context;
(void)irp;
inline_leaf_functions(
pass->maxsize, pass->leafsize,
pass->size, pass->ignore_runtime);
return 0;
}
/* create a pass for inline_leaf_functions() */
ir_prog_pass_t *inline_leaf_functions_pass(
const char *name, unsigned maxsize, unsigned leafsize,
unsigned size, int ignore_runtime)
{
inline_leaf_functions_pass_t *pass = XMALLOCZ(inline_leaf_functions_pass_t);
pass->maxsize = maxsize;
pass->leafsize = leafsize;
pass->size = size;
pass->ignore_runtime = ignore_runtime;
return def_prog_pass_constructor(
&pass->pass,
name ? name : "inline_leaf_functions",
inline_leaf_functions_wrapper);
}
/**
* Calculate the parameter weights for transmitting the address of a local variable.
*/
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment