Commit 83de1143 authored by Matthias Braun's avatar Matthias Braun
Browse files

remove some flags, ported some phases to new debug interface

[r16573]
parent 9bf7b5a5
......@@ -228,7 +228,7 @@ static void sel_methods_walker(ir_node * node, void *env) {
*/
assert (get_entity_peculiarity(ent) == peculiarity_description);
}
else if (get_opt_optimize() && get_opt_closed_world() && get_opt_dyn_meth_dispatch() &&
else if (get_opt_closed_world() && get_opt_dyn_meth_dispatch() &&
(ARR_LEN(arr) == 1 && arr[0] != NULL)) {
ir_node *new_node;
......
......@@ -30,6 +30,7 @@
#include <string.h>
#endif
#include "debug.h"
#include "interval_analysis.h"
#include "execution_frequency.h"
#include "firm_common_t.h"
......@@ -44,6 +45,8 @@
#include "irprintf.h"
#include "hashptr.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg);
/*------------------------------------------------------------------*/
/* A new in array via a hashmap. */
/* The in array refers to the loop the block is contained in if the */
......@@ -249,9 +252,7 @@ static void construct_interval_block(ir_node *b, ir_loop *l) {
if (is_backedge(b, i)) {
if (b != get_loop_element(l, 0).node) {
if (get_firm_verbosity()) {
ir_printf("Loophead not at loop position 0. %+F\n", b);
}
DB((dbg, LEVEL_1, "Loophead not at loop position 0. %+F\n", b));
}
/* There are no backedges in the interval decomposition. */
add_region_in(b, NULL);
......@@ -280,9 +281,7 @@ static void construct_interval_block(ir_node *b, ir_loop *l) {
int found = find_inner_loop(b, l, pred, cfop);
if (!found) {
if (b != get_loop_element(l, 0).node) {
if (get_firm_verbosity()) {
ir_printf("Loop entry not at loop position 0. %+F\n", b);
}
DB((dbg, LEVEL_1, "Loop entry not at loop position 0. %+F\n", b));
}
found = find_outer_loop(l, pred_l, pred, cfop);
if (found) add_region_in(b, NULL); /* placeholder */
......@@ -326,6 +325,8 @@ void construct_intervals(ir_graph *irg) {
ir_graph *rem = current_ir_graph;
current_ir_graph = irg;
FIRM_DBG_REGISTER(dbg, "firm.ana.interval");
if (!region_attr_set)
region_attr_set = new_set(region_attr_cmp, 256);
......
......@@ -482,8 +482,6 @@ void rta_delete_dead_graphs (void)
int rem_vpi = get_visit_pseudo_irgs();
set_visit_pseudo_irgs(1);
if (!get_optimize() || !get_opt_dead_method_elimination()) return;
dead_graphs = xmalloc(sizeof(*dead_graphs) * get_irp_n_irgs());
for (i = 0; i < n_graphs; i++) {
......
......@@ -58,15 +58,6 @@ I_FLAG(control_flow_weak_simplification , 6, ON)
/** */
I_FLAG(control_flow_strong_simplification , 7, ON)
/** Reclaim memory. */
I_FLAG(dead_node_elimination , 8, ON)
/** Reassociate nodes. */
I_FLAG(reassociation , 9, ON)
/** Do inlining transformation. */
I_FLAG(inline , 10, ON)
/** Remove dynamic method dispatch. */
E_FLAG(dyn_meth_dispatch , 11, ON)
......@@ -75,30 +66,15 @@ E_FLAG(dyn_meth_dispatch , 11, ON)
*/
I_FLAG(normalize , 12, ON)
/** Remove tail-recursion. */
I_FLAG(tail_recursion , 13, ON)
/** Free never called methods */
I_FLAG(dead_method_elimination , 14, ON)
/** precise exception context */
I_FLAG(precise_exc_context , 15, ON)
/** Do loop unrolling */
I_FLAG(loop_unrolling , 16, OFF) /* currently buggy, 2.1.2006 */
/** Do Strength reduction */
I_FLAG(strength_red , 17, ON)
/** Optimize Loads and Stores */
I_FLAG(redundant_loadstore , 18, ON)
/** Optimize Fragile OPs */
I_FLAG(fragile_ops , 19, OFF)
/** Optimize function calls. */
I_FLAG(function_call , 20, ON)
/** Optimize cast nodes. */
E_FLAG(optimize_class_casts , 21, ON)
E_FLAG(suppress_downcast_optimization , 22, OFF)
......
......@@ -612,68 +612,65 @@ copy_graph_env(int copy_node_nr) {
* Adds all new nodes to a new hash table for CSE. Does not
* perform CSE, so the hash table might contain common subexpressions.
*/
void
dead_node_elimination(ir_graph *irg) {
if (get_opt_optimize() && get_opt_dead_node_elimination()) {
ir_graph *rem;
void dead_node_elimination(ir_graph *irg) {
ir_graph *rem;
#ifdef INTERPROCEDURAL_VIEW
int rem_ipview = get_interprocedural_view();
int rem_ipview = get_interprocedural_view();
#endif
struct obstack *graveyard_obst = NULL;
struct obstack *rebirth_obst = NULL;
assert(! edges_activated(irg) && "dead node elimination requires disabled edges");
struct obstack *graveyard_obst = NULL;
struct obstack *rebirth_obst = NULL;
assert(! edges_activated(irg) && "dead node elimination requires disabled edges");
/* inform statistics that we started a dead-node elimination run */
hook_dead_node_elim(irg, 1);
/* inform statistics that we started a dead-node elimination run */
hook_dead_node_elim(irg, 1);
/* Remember external state of current_ir_graph. */
rem = current_ir_graph;
current_ir_graph = irg;
/* Remember external state of current_ir_graph. */
rem = current_ir_graph;
current_ir_graph = irg;
#ifdef INTERPROCEDURAL_VIEW
set_interprocedural_view(0);
set_interprocedural_view(0);
#endif
assert(get_irg_phase_state(irg) != phase_building);
assert(get_irg_phase_state(irg) != phase_building);
/* Handle graph state */
free_callee_info(irg);
free_irg_outs(irg);
free_trouts();
/* Handle graph state */
free_callee_info(irg);
free_irg_outs(irg);
free_trouts();
/* @@@ so far we loose loops when copying */
free_loop_information(irg);
/* @@@ so far we loose loops when copying */
free_loop_information(irg);
set_irg_doms_inconsistent(irg);
set_irg_doms_inconsistent(irg);
/* A quiet place, where the old obstack can rest in peace,
until it will be cremated. */
graveyard_obst = irg->obst;
/* A quiet place, where the old obstack can rest in peace,
until it will be cremated. */
graveyard_obst = irg->obst;
/* A new obstack, where the reachable nodes will be copied to. */
rebirth_obst = xmalloc(sizeof(*rebirth_obst));
irg->obst = rebirth_obst;
obstack_init(irg->obst);
irg->last_node_idx = 0;
/* A new obstack, where the reachable nodes will be copied to. */
rebirth_obst = xmalloc(sizeof(*rebirth_obst));
irg->obst = rebirth_obst;
obstack_init(irg->obst);
irg->last_node_idx = 0;
/* We also need a new value table for CSE */
del_identities(irg->value_table);
irg->value_table = new_identities();
/* We also need a new value table for CSE */
del_identities(irg->value_table);
irg->value_table = new_identities();
/* Copy the graph from the old to the new obstack */
copy_graph_env(/*copy_node_nr=*/1);
/* Copy the graph from the old to the new obstack */
copy_graph_env(/*copy_node_nr=*/1);
/* Free memory from old unoptimized obstack */
obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
xfree(graveyard_obst); /* ... then free it. */
/* Free memory from old unoptimized obstack */
obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
xfree(graveyard_obst); /* ... then free it. */
/* inform statistics that the run is over */
hook_dead_node_elim(irg, 0);
/* inform statistics that the run is over */
hook_dead_node_elim(irg, 0);
current_ir_graph = rem;
current_ir_graph = rem;
#ifdef INTERPROCEDURAL_VIEW
set_interprocedural_view(rem_ipview);
set_interprocedural_view(rem_ipview);
#endif
}
}
/**
......@@ -995,8 +992,8 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
ir_type *called_frame;
irg_inline_property prop = get_irg_inline_property(called_graph);
if ( (prop < irg_inline_forced) &&
(!get_opt_optimize() || !get_opt_inline() || (prop == irg_inline_forbidden))) return 0;
if ( (prop < irg_inline_forced) || (prop == irg_inline_forbidden))
return 0;
/* Do not inline variadic functions. */
if (get_method_variadicity(get_entity_type(get_irg_entity(called_graph))) == variadicity_variadic)
......@@ -1388,8 +1385,6 @@ void inline_small_irgs(ir_graph *irg, int size) {
call_entry *entry;
DEBUG_ONLY(firm_dbg_module_t *dbg;)
if (!(get_opt_optimize() && get_opt_inline())) return;
FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
current_ir_graph = irg;
......@@ -1571,8 +1566,6 @@ void inline_leave_functions(int maxsize, int leavesize, int size, int ignore_run
struct obstack obst;
DEBUG_ONLY(firm_dbg_module_t *dbg;)
if (!(get_opt_optimize() && get_opt_inline())) return;
FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
rem = current_ir_graph;
obstack_init(&obst);
......
......@@ -3798,9 +3798,6 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) {
}
}
if (!get_opt_reassociation())
return proj;
/*
* First step: normalize the compare op
* by placing the constant on the right side
......@@ -5230,19 +5227,17 @@ void del_identities(pset *value_table) {
* @param n The node to normalize
*/
static void normalize_node(ir_node *n) {
if (get_opt_reassociation()) {
if (is_op_commutative(get_irn_op(n))) {
ir_node *l = get_binop_left(n);
ir_node *r = get_binop_right(n);
/* For commutative operators perform a OP b == b OP a but keep
* constants on the RIGHT side. This helps greatly in some
* optimizations. Moreover we use the idx number to make the form
* deterministic. */
if (!operands_are_normalized(l, r)) {
set_binop_left(n, r);
set_binop_right(n, l);
}
if (is_op_commutative(get_irn_op(n))) {
ir_node *l = get_binop_left(n);
ir_node *r = get_binop_right(n);
/* For commutative operators perform a OP b == b OP a but keep
* constants on the RIGHT side. This helps greatly in some
* optimizations. Moreover we use the idx number to make the form
* deterministic. */
if (!operands_are_normalized(l, r)) {
set_binop_left(n, r);
set_binop_right(n, l);
}
}
} /* normalize_node */
......
......@@ -513,9 +513,6 @@ void optimize_funccalls(int force_run)
unsigned num_const = 0;
unsigned num_pure = 0;
if (! get_opt_function_call())
return;
/* prepare: mark all graphs as not analyzed */
n = get_irp_n_irgs();
for (i = n - 1; i >= 0; --i)
......
......@@ -625,7 +625,7 @@ static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
if (res)
break;
return res;
}
}
......@@ -1201,6 +1201,7 @@ static void do_load_store_optimize(ir_node *n, void *env) {
case iro_Phi:
wenv->changes |= optimize_phi(n, wenv);
break;
default:
;
......@@ -1728,9 +1729,6 @@ void optimize_load_store(ir_graph *irg) {
assert(get_irg_pinned(irg) != op_pin_state_floats &&
"LoadStore optimization needs pinned graph");
if (! get_opt_redundant_loadstore())
return;
/* we need landing pads */
remove_critical_cf_edges(irg);
......
......@@ -948,8 +948,6 @@ void optimize_loop_unrolling(ir_graph *irg /* unroll factor, max body size */)
ir_graph *rem;
int unroll_done = 0;
if ( !get_opt_loop_unrolling()) return;
rem = current_ir_graph;
current_ir_graph = irg;
......
......@@ -749,10 +749,6 @@ void optimize_reassociation(ir_graph *irg)
assert(get_irg_pinned(irg) != op_pin_state_floats &&
"Reassociation needs pinned graph to work properly");
/* reassociation needs constant folding */
if (!get_opt_reassociation() || !get_opt_constant_folding())
return;
rem = current_ir_graph;
current_ir_graph = irg;
......
......@@ -31,6 +31,7 @@
#include <string.h>
#include <assert.h>
#include "debug.h"
#include "iroptimize.h"
#include "scalar_replace.h"
#include "array.h"
......@@ -47,6 +48,8 @@
#include "irhooks.h"
#include "xmalloc.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg);
/**
* the environment for collecting data
*/
......@@ -297,9 +300,6 @@ int opt_tail_rec_irg(ir_graph *irg) {
ir_node *rets = NULL;
ir_type *mtd_type, *call_type;
if (! get_opt_tail_recursion() || ! get_opt_optimize())
return 0;
if (! check_lifetime_of_locals(irg))
return 0;
......@@ -390,9 +390,8 @@ int opt_tail_rec_irg(ir_graph *irg) {
if (! n_tail_calls)
return 0;
if (get_opt_tail_recursion_verbose() && get_firm_verbosity() > 1)
printf(" Performing tail recursion for graph %s and %d Calls\n",
get_entity_ld_name(get_irg_entity(irg)), n_tail_calls);
DB((dbg, LEVEL_2, " Performing tail recursion for graph %s and %d Calls\n",
get_entity_ld_name(get_irg_entity(irg)), n_tail_calls));
hook_tail_rec(irg, n_tail_calls);
do_opt_tail_rec(irg, rets, n_tail_calls);
......@@ -408,8 +407,7 @@ void opt_tail_recursion(void) {
int n_opt_applications = 0;
ir_graph *irg;
if (! get_opt_tail_recursion() || ! get_opt_optimize())
return;
FIRM_DBG_REGISTER(dbg, "firm.opt.tailrec");
for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
irg = get_irp_irg(i);
......@@ -420,6 +418,6 @@ void opt_tail_recursion(void) {
++n_opt_applications;
}
if (get_opt_tail_recursion_verbose())
printf("Performed tail recursion for %d of %d graphs\n", n_opt_applications, get_irp_n_irgs());
DB((dbg, LEVEL_1, "Performed tail recursion for %d of %d graphs\n",
n_opt_applications, get_irp_n_irgs()));
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment