Commit dc401952 authored by Matthias Braun's avatar Matthias Braun
Browse files

cleanup statistics stuff

[r22378]
parent 6365c91e
......@@ -222,100 +222,8 @@ static INLINE void check_for_memory_operands(be_chordal_env_t *chordal_env) {
irg_walk_graph(chordal_env->irg, NULL, memory_operand_walker, chordal_env);
}
/**
* Sorry for doing stats again...
*/
typedef struct _node_stat_t {
unsigned int n_phis; /**< Phis of the current register class. */
unsigned int n_mem_phis; /**< Memory Phis (Phis with spill operands). */
unsigned int n_copies; /**< Copies */
unsigned int n_perms; /**< Perms */
unsigned int n_spills; /**< Spill nodes */
unsigned int n_reloads; /**< Reloads */
unsigned int n_remats; /**< Remats */
} node_stat_t;
struct node_stat_walker {
node_stat_t *stat;
const arch_env_t *arch_env;
};
static void node_stat_walker(ir_node *irn, void *data)
{
struct node_stat_walker *env = data;
const arch_env_t *aenv = env->arch_env;
/* if the node is a normal phi */
if(is_Phi(irn)) {
if (get_irn_mode(irn) == mode_M) {
env->stat->n_mem_phis++;
} else {
env->stat->n_phis++;
}
} else {
arch_irn_class_t classify = arch_irn_classify(aenv, irn);
if(classify & arch_irn_class_spill)
++env->stat->n_spills;
if(classify & arch_irn_class_reload)
++env->stat->n_reloads;
if(classify & arch_irn_class_remat)
++env->stat->n_remats;
if(classify & arch_irn_class_copy)
++env->stat->n_copies;
if(classify & arch_irn_class_perm)
++env->stat->n_perms;
}
}
static void node_stats(be_irg_t *birg, node_stat_t *stat)
{
struct node_stat_walker env;
memset(stat, 0, sizeof(*stat));
env.arch_env = birg->main_env->arch_env;
env.stat = stat;
irg_walk_graph(birg->irg, NULL, node_stat_walker, &env);
}
static void insn_count_walker(ir_node *irn, void *data)
{
unsigned long *cnt = data;
switch(get_irn_opcode(irn)) {
case iro_Proj:
case iro_Phi:
case iro_Start:
case iro_End:
break;
default:
(*cnt)++;
}
}
static unsigned long count_insns(ir_graph *irg)
{
unsigned long cnt = 0;
irg_walk_graph(irg, insn_count_walker, NULL, &cnt);
return cnt;
}
static void block_count_walker(ir_node *node, void *data)
{
unsigned long *cnt = data;
if (node == get_irg_end_block(current_ir_graph))
return;
(*cnt)++;
}
static unsigned long count_blocks(ir_graph *irg)
{
unsigned long cnt = 0;
irg_block_walk_graph(irg, block_count_walker, NULL, &cnt);
return cnt;
}
static node_stat_t last_node_stat;
static be_node_stats_t last_node_stats;
/**
* Perform things which need to be done per register class before spilling.
......@@ -400,19 +308,21 @@ static void post_spill(post_spill_env_t *pse, int iteration) {
BE_TIMER_POP(t_ra_ifg);
stat_ev_if {
be_ifg_stat_t stat;
node_stat_t node_stat;
be_ifg_stat_t stat;
be_node_stats_t node_stats;
be_ifg_stat(birg, chordal_env->ifg, &stat);
stat_ev_dbl("bechordal_ifg_nodes", stat.n_nodes);
stat_ev_dbl("bechordal_ifg_edges", stat.n_edges);
stat_ev_dbl("bechordal_ifg_comps", stat.n_comps);
node_stats(birg, &node_stat);
be_collect_node_stats(&node_stats, birg);
be_subtract_node_stats(&node_stats, &last_node_stats);
stat_ev_dbl("bechordal_perms_before_coal",
node_stat.n_perms - last_node_stat.n_perms);
node_stats[BE_STAT_PERMS]);
stat_ev_dbl("bechordal_copies_before_coal",
node_stat.n_copies - last_node_stat.n_copies);
node_stats[BE_STAT_COPIES]);
}
/* copy minimization */
......@@ -481,9 +391,7 @@ static void be_ra_chordal_main(be_irg_t *birg)
BE_TIMER_POP(t_ra_prolog);
stat_ev_if {
be_stat_ev("bechordal_insns_before", count_insns(irg));
be_stat_ev("bechordal_blocks_before", count_blocks(irg));
node_stats(birg, &last_node_stat);
be_collect_node_stats(&last_node_stats, birg);
}
if (! arch_code_generator_has_spiller(birg->cg)) {
......@@ -519,25 +427,13 @@ static void be_ra_chordal_main(be_irg_t *birg)
post_spill(&pse, 0);
stat_ev_if {
node_stat_t node_stat;
node_stats(birg, &node_stat);
stat_ev_dbl("bechordal_phis",
node_stat.n_phis - last_node_stat.n_phis);
stat_ev_dbl("bechordal_mem_phis",
node_stat.n_mem_phis - last_node_stat.n_mem_phis);
stat_ev_dbl("bechordal_reloads",
node_stat.n_reloads - last_node_stat.n_reloads);
stat_ev_dbl("bechordal_remats",
node_stat.n_remats - last_node_stat.n_remats);
stat_ev_dbl("bechordal_spills",
node_stat.n_spills - last_node_stat.n_spills);
stat_ev_dbl("bechordal_perms_after_coal",
node_stat.n_perms - last_node_stat.n_perms);
stat_ev_dbl("bechordal_copies_after_coal",
node_stat.n_copies - last_node_stat.n_copies);
last_node_stat = node_stat;
be_node_stats_t node_stats;
be_collect_node_stats(&node_stats, birg);
be_subtract_node_stats(&node_stats, &last_node_stats);
be_emit_node_stats(&node_stats, "bechordal_");
be_copy_node_stats(&last_node_stats, &node_stats);
stat_ev_ctx_pop("bechordal_cls");
}
}
......@@ -578,10 +474,6 @@ static void be_ra_chordal_main(be_irg_t *birg)
BE_TIMER_POP(t_ra_epilog);
BE_TIMER_POP(t_ra_other);
stat_ev_if {
be_stat_ev("bechordal_insns_after", count_insns(irg));
}
}
static be_ra_t be_ra_chordal_allocator = {
......
......@@ -516,8 +516,6 @@ static void list_sched_block(ir_node *block, void *env_ptr)
/* Iterate over all remaining nodes */
while (ir_nodeset_size(&be.cands) > 0) {
ir_nodeset_iterator_t iter;
/* collect statistics about amount of ready nodes */
be_do_stat_sched_ready(block, &be.cands);
/* Keeps must be scheduled immediately */
foreach_ir_nodeset(&be.cands, irn, iter) {
......
......@@ -365,8 +365,6 @@ static void lower_perm_node(ir_node *irn, void *walk_env) {
real_size = n - get_n_checked_pairs(pairs, n);
be_do_stat_perm(reg_class->name, reg_class->n_regs, irn, block, n, real_size);
/* check for cycles and chains */
while (get_n_checked_pairs(pairs, n) < n) {
i = n_ops = 0;
......@@ -496,8 +494,6 @@ static void lower_perm_node(ir_node *irn, void *walk_env) {
}
}
be_do_stat_permcycle(reg_class->name, irn, block, cycle->type == PERM_CHAIN, cycle->n_elems, n_ops);
free((void *) cycle->elems);
free(cycle);
}
......
......@@ -459,9 +459,6 @@ static void initialize_birg(be_irg_t *birg, ir_graph *irg, be_main_env_t *env)
dump(DUMP_INITIAL, irg, "-begin", dump_ir_block_graph);
be_stat_init_irg(env->arch_env, irg);
be_do_stat_nodes(irg, "01 Begin");
/* set the current graph (this is important for several firm functions) */
current_ir_graph = irg;
......@@ -615,7 +612,11 @@ static void be_main_loop(FILE *file_handle, const char *cup_name)
/* reset the phi handler. */
be_phi_handler_reset();
stat_ev_ctx_push_fobj("bemain_irg", irg);
stat_ev_if {
stat_ev_ctx_push_fobj("bemain_irg", irg);
be_stat_ev("bemain_insns_start", be_count_insns(irg));
be_stat_ev("bemain_blocks_start", be_count_blocks(irg));
}
/* stop and reset timers */
BE_TIMER_PUSH(t_other); /* t_other */
......@@ -646,7 +647,6 @@ static void be_main_loop(FILE *file_handle, const char *cup_name)
BE_TIMER_POP(t_abi);
dump(DUMP_ABI, irg, "-abi", dump_ir_block_graph);
be_do_stat_nodes(irg, "02 Abi");
if (be_options.vrfy_option == BE_VRFY_WARN) {
be_check_dominance(irg);
......@@ -664,8 +664,6 @@ static void be_main_loop(FILE *file_handle, const char *cup_name)
/* reset the phi handler. */
be_phi_handler_reset();
be_do_stat_nodes(irg, "03 Prepare");
dump(DUMP_PREPARED, irg, "-prepared", dump_ir_block_graph);
if (be_options.vrfy_option == BE_VRFY_WARN) {
......@@ -722,8 +720,6 @@ static void be_main_loop(FILE *file_handle, const char *cup_name)
be_sched_vrfy(birg, be_options.vrfy_option);
BE_TIMER_POP(t_verify);
be_do_stat_nodes(irg, "04 Schedule");
/* introduce patterns to assure constraints */
BE_TIMER_PUSH(t_constr);
/* we switch off optimizations here, because they might cause trouble */
......@@ -738,7 +734,6 @@ static void be_main_loop(FILE *file_handle, const char *cup_name)
BE_TIMER_POP(t_constr);
dump(DUMP_SCHED, irg, "-assured", dump_ir_block_graph_sched);
be_do_stat_nodes(irg, "05 Constraints");
/* stuff needs to be done after scheduling but before register allocation */
BE_TIMER_PUSH(t_codegen);
......@@ -757,9 +752,12 @@ static void be_main_loop(FILE *file_handle, const char *cup_name)
be_sched_vrfy(birg, be_options.vrfy_option);
BE_TIMER_POP(t_verify);
#ifdef FIRM_STATISTICS
stat_ev_dbl("bemain_costs_before_ra", be_estimate_irg_costs(irg, arch_env, birg->exec_freq));
#endif
stat_ev_if {
stat_ev_dbl("bemain_costs_before_ra",
be_estimate_irg_costs(irg, arch_env, birg->exec_freq));
be_stat_ev("bemain_insns_before_ra", be_count_insns(irg));
be_stat_ev("bemain_blocks_before_ra", be_count_blocks(irg));
}
/* Do register allocation */
be_allocate_registers(birg);
......@@ -769,7 +767,6 @@ static void be_main_loop(FILE *file_handle, const char *cup_name)
#endif
dump(DUMP_RA, irg, "-ra", dump_ir_block_graph_sched);
be_do_stat_nodes(irg, "06 Register Allocation");
/* let the code generator prepare the graph for emitter */
BE_TIMER_PUSH(t_finish);
......@@ -791,6 +788,11 @@ static void be_main_loop(FILE *file_handle, const char *cup_name)
dump(DUMP_FINAL, irg, "-finish", dump_ir_block_graph_sched);
stat_ev_if {
be_stat_ev("bemain_insns_finish", be_count_insns(irg));
be_stat_ev("bemain_blocks_finish", be_count_blocks(irg));
}
/* check schedule and register allocation */
BE_TIMER_PUSH(t_verify);
if (be_options.vrfy_option == BE_VRFY_WARN) {
......@@ -821,7 +823,6 @@ static void be_main_loop(FILE *file_handle, const char *cup_name)
be_abi_free(birg->abi);
BE_TIMER_POP(t_abi);
be_do_stat_nodes(irg, "07 Final");
restore_optimization_state(&state);
BE_TIMER_POP(t_other);
......
......@@ -98,7 +98,6 @@ static be_uses_t *uses; /**< env for the next-use magic */
static ir_node *instr; /**< current instruction */
static unsigned instr_nr; /**< current instruction number
(relative to block start) */
static ir_nodeset_t used;
static spill_env_t *senv; /**< see bespill.h */
static pdeq *worklist;
......@@ -336,10 +335,6 @@ static void displace(workset_t *new_vals, int is_usage)
workset_foreach(new_vals, val, iter) {
bool reloaded = false;
/* mark value as used */
if (is_usage)
ir_nodeset_insert(&used, val);
if (! workset_contains(ws, val)) {
DB((dbg, DBG_DECIDE, " insert %+F\n", val));
if (is_usage) {
......@@ -400,14 +395,6 @@ static void displace(workset_t *new_vals, int is_usage)
after_pos));
be_add_spill(senv, val, after_pos);
}
} else {
/* Logic for not needed live-ins: If a value is disposed
* before its first use, remove it from start workset
* We don't do this for phis though */
if (!is_Phi(val) && ! ir_nodeset_contains(&used, val)) {
workset_remove(ws_start, val);
DB((dbg, DBG_DECIDE, " (and removing %+F from start workset)\n", val));
}
}
}
......@@ -829,7 +816,6 @@ static void belady(ir_node *block)
/* process the block from start to end */
DB((dbg, DBG_WSETS, "Processing...\n"));
ir_nodeset_init(&used);
instr_nr = 0;
/* TODO: this leaks (into the obstack)... */
new_vals = new_workset();
......@@ -879,7 +865,6 @@ static void belady(ir_node *block)
instr_nr++;
}
ir_nodeset_destroy(&used);
/* Remember end-workset for this block */
block_info->end_workset = workset_clone(ws);
......
......@@ -20,7 +20,7 @@
/**
* @file
* @brief Provides several statistic functions for the backend.
* @author Christian Wuerdig
* @author Christian Wuerdig, Matthias Braun
* @version $Id$
*/
#ifdef HAVE_CONFIG_H
......@@ -34,84 +34,19 @@
#include "irgwalk.h"
#include "irhooks.h"
#include "execfreq.h"
#include "dbginfo_t.h"
#include "firmstat_t.h"
#include "irtools.h"
#include "pset.h"
#include "statev.h"
#include "error.h"
#include "bearch_t.h"
#include "beirg_t.h"
#include "bestat.h"
#include "belive_t.h"
#include "besched.h"
#include "benode_t.h"
#ifdef FIRM_STATISTICS
typedef struct _be_stat_irg_t {
ir_graph *irg; /**< the irg, the statistic is about */
pset *phases; /**< node statistics for each phase */
struct obstack obst; /**< the obstack containing the information */
const arch_env_t *arch_env; /**< the current arch env */
} be_stat_irg_t;
typedef struct _be_stat_phase_t {
const arch_env_t *arch_env; /**< the current arch env */
const char *phase; /**< the name of the phase the statistic is about */
unsigned long num_nodes; /**< overall number of reachable nodes in the irg */
unsigned long num_data; /**< number of data nodes ((mode_datab && ! Proj && ! Phi) || mode_T) */
unsigned long num_proj; /**< number of Projs */
unsigned long num_phi; /**< number of Phis */
unsigned long num_load; /**< number of Loads */
unsigned long num_store; /**< number of Stores */
unsigned long num_spill; /**< number of Spills */
unsigned long num_reload; /**< number of Reloads */
} be_stat_phase_t;
static set *be_stat_data = NULL;
static int cmp_stat_phase(const void *a, const void *b) {
const be_stat_phase_t *p1 = a;
const be_stat_phase_t *p2 = b;
return p1->phase != p2->phase;
}
static int cmp_stat_data(const void *a, const void *b, size_t len) {
const be_stat_irg_t *p1 = a;
const be_stat_irg_t *p2 = b;
(void) len;
return p1->irg != p2->irg;
}
static be_stat_irg_t *find_stat_irg_entry(ir_graph *irg) {
be_stat_irg_t *entry, key;
if (! be_stat_data)
return NULL;
key.irg = irg;
entry = set_find(be_stat_data, &key, sizeof(key), HASH_PTR(irg));
return entry;
}
static be_stat_irg_t *get_stat_irg_entry(ir_graph *irg) {
be_stat_irg_t *entry, key;
if (! be_stat_data)
return NULL;
entry = find_stat_irg_entry(irg);
if (! entry) {
key.irg = irg;
entry = set_insert(be_stat_data, &key, sizeof(key), HASH_PTR(irg));
}
return entry;
}
typedef struct pressure_walker_env_t pressure_walker_env_t;
struct pressure_walker_env_t {
......@@ -159,9 +94,6 @@ static void check_reg_pressure_class(pressure_walker_env_t *env,
ir_nodeset_destroy(&live_nodes);
}
/**
* Collect reg pressure statistics per block and per class.
*/
static void stat_reg_pressure_block(ir_node *block, void *data) {
pressure_walker_env_t *env = data;
......@@ -189,169 +121,8 @@ void be_do_stat_reg_pressure(be_irg_t *birg, const arch_register_class_t *cls) {
stat_ev_emit("bechordal_maximum_register_pressure", env.max_pressure);
}
/**
* Notify statistic module about amount of ready nodes.
*/
void be_do_stat_sched_ready(ir_node *block, const ir_nodeset_t *ready_set) {
if (stat_is_active()) {
stat_be_block_sched_ready(get_irn_irg(block), block, MIN(ir_nodeset_size(ready_set), 5));
}
}
/**
* Pass information about a perm to the statistic module.
*/
void be_do_stat_perm(const char *class_name, int n_regs, ir_node *perm, ir_node *block, int n, int real_size) {
if (stat_is_active()) {
stat_be_block_stat_perm(class_name, n_regs, perm, block, n, real_size);
}
}
/**
* Pass information about a cycle or chain in a perm to the statistic module.
*/
void be_do_stat_permcycle(const char *class_name, ir_node *perm, ir_node *block, int is_chain, int n_elems, int n_ops) {
if (stat_is_active()) {
stat_be_block_stat_permcycle(class_name, perm, block, is_chain, n_elems, n_ops);
}
}
/**
* Updates nodes statistics.
*/
static void do_nodes_stat(ir_node *irn, void *env) {
be_stat_phase_t *phase = env;
ir_mode *mode;
ir_opcode opc;
arch_irn_class_t irn_class;
if (is_Block(irn))
return;
mode = get_irn_mode(irn);
opc = get_irn_opcode(irn);
phase->num_nodes++;
/* check for nodes we want to ignore */
if (be_is_Keep(irn) ||
be_is_CopyKeep(irn) ||
opc == iro_Start ||
opc == iro_End)
return;
if (is_Proj(irn) && (mode != mode_X)) {
phase->num_proj++;
return;
}
else if (is_Phi(irn)) {
phase->num_phi++;
return;
}
else if (mode_is_datab(mode) || ((mode == mode_T) && ! is_be_node(irn)) || (is_Proj(irn) && (mode == mode_X)))
phase->num_data++;
if (opc == iro_Load)
phase->num_load++;
else if (opc == iro_Store)
phase->num_store++;
irn_class = arch_irn_classify(phase->arch_env, irn);
if (irn_class & arch_irn_class_spill)
phase->num_spill++;
else if (irn_class & arch_irn_class_reload)
phase->num_reload++;
else if (irn_class & arch_irn_class_stackparam)
phase->num_load++;
else if (irn_class & arch_irn_class_load)
phase->num_load++;
else if (irn_class & arch_irn_class_store)
phase->num_store++;
}
/**
* Collects node statistics.
*
* @param irg the to do statistics for
* @param phase the phase to collect the statistic for
*/
void be_do_stat_nodes(ir_graph *irg, const char *phase) {
be_stat_irg_t *irg_entry;
be_stat_phase_t *phase_entry, phase_key;
irg_entry = find_stat_irg_entry(irg);
if (! irg_entry)
return;
phase_key.phase = phase;
phase_entry = pset_find_ptr(irg_entry->phases, &phase_key);
if (! phase_entry) {
phase_entry = obstack_alloc(&irg_entry->obst, sizeof(*phase_entry));
phase_entry = pset_insert(irg_entry->phases, phase_entry, HASH_PTR(phase));
}
memset(phase_entry, 0, sizeof(*phase_entry));
phase_entry->phase = phase;
phase_entry->arch_env = irg_entry->arch_env;
irg_walk_blkwise_graph(irg_entry->irg, NULL, do_nodes_stat, phase_entry);
}
/**
* Dumps statistics about nodes (called from dump_snapshot)
*/
static void be_dump_node_stat(dumper_t *dmp, graph_entry_t *entry) {
be_stat_irg_t *stat_irg = find_stat_irg_entry(entry->irg);
be_stat_phase_t *phase;
if (! stat_irg || ! stat_irg->phases)
return;
fprintf(dmp->f, "===> BE NODE STATISTIC BEGIN <===\n");
foreach_pset(stat_irg->phases, phase) {
fprintf(dmp->f, "--> Phase: %s\n", phase->phase);
fprintf(dmp->f, "# nodes: %ld\n", phase->num_nodes);
fprintf(dmp->f, "# data nodes: %ld\n", phase->num_data);
fprintf(dmp->f, "# Proj: %ld\n", phase->num_proj);
fprintf(dmp->f, "# Phi: %ld\n", phase->num_phi);
fprintf(dmp->f, "# Load: %ld\n", phase->num_load);
fprintf(dmp->f, "# Store: %ld\n", phase->num_store);
fprintf(dmp->f, "# Spill: %ld\n", phase->num_spill);
fprintf(dmp->f, "# Reload: %ld\n", phase->num_reload);
}
fprintf(dmp->f, "===> BE NODE STATISTIC END <===\n");
}
/**
* Returns a be statistic object for the given irg.
*/
void be_stat_init_irg(const arch_env_t *arch_env, ir_graph *irg) {
static int reg_func = 1;
if (stat_is_active()) {
be_stat_irg_t *stat_irg;
if (! be_stat_data)
be_stat_data = new_set(cmp_stat_data, 8);
stat_irg = get_stat_irg_entry(irg);
stat_irg->irg = irg;
stat_irg->phases = new_pset(cmp_stat_phase, 8);
stat_irg->arch_env = arch_env;
obstack_init(&stat_irg->obst);
if (reg_func) {
/* first init: register dumper */
stat_register_dumper_func(be_dump_node_stat);
reg_func = 0;
}
}
}
#endif /* FIRM_STATISTICS */
typedef struct _estimate_irg_costs_env_t {
const arch_env_t *arch_env;
......@@ -385,15 +156,120 @@ double be_estimate_irg_costs(ir_graph *irg, const arch_env_t *arch_env, ir_exec_
return env.costs;