Commit 98e70a71 authored by Matthias Braun's avatar Matthias Braun
Browse files

remove regpress and trace schedulers

In their current state they produce very bad code and are just a
maintenance burden.
parent 1ecdfe8c
......@@ -32,8 +32,6 @@ void be_init_spilloptions(void);
void be_init_listsched(void);
void be_init_sched_rand(void);
void be_init_sched_normal(void);
void be_init_sched_regpress(void);
void be_init_sched_trace(void);
void be_init_sched_trivial(void);
void be_init_chordal(void);
void be_init_pbqp_coloring(void);
......@@ -89,8 +87,6 @@ void be_init_modules(void)
be_init_listsched();
be_init_sched_normal();
be_init_sched_trace();
be_init_sched_regpress();
be_init_sched_rand();
be_init_sched_trivial();
......
/*
* This file is part of libFirm.
* Copyright (C) 2012 University of Karlsruhe.
*/
/**
* @file
* @brief Register pressure node selector.
* @author Sebastian Hack
* @date 29.08.2006
*/
#include <stdlib.h>
#include "iredges_t.h"
#include "irgwalk.h"
#include "irtools.h"
#include "util.h"
#include "besched.h"
#include "belistsched.h"
#include "benode.h"
#include "bemodule.h"
typedef struct usage_stats_t {
ir_node *irn;
struct usage_stats_t *next;
int max_hops;
int uses_in_block; /**< Number of uses inside the current block. */
int already_consumed; /**< Number of insns using this value already
scheduled. */
} usage_stats_t;
typedef struct {
struct obstack obst;
usage_stats_t *root;
ir_nodeset_t already_scheduled;
} reg_pressure_selector_env_t;
static inline usage_stats_t *get_or_set_usage_stats(reg_pressure_selector_env_t *env, ir_node *irn)
{
usage_stats_t *us = (usage_stats_t*)get_irn_link(irn);
if (!us) {
us = OALLOC(&env->obst, usage_stats_t);
us->irn = irn;
us->already_consumed = 0;
us->max_hops = INT_MAX;
us->next = env->root;
env->root = us;
set_irn_link(irn, us);
}
return us;
}
static int max_hops_walker(reg_pressure_selector_env_t *env, ir_node *irn, ir_node *curr_bl, int depth, unsigned visited_nr)
{
ir_node *bl = get_nodes_block(irn);
/*
* If the reached node is not in the block desired,
* return the value passed for this situation.
*/
if (get_nodes_block(irn) != bl)
return block_dominates(bl, curr_bl) ? 0 : INT_MAX;
/*
* If the node is in the current block but not
* yet scheduled, we keep on searching from that node.
*/
if (!ir_nodeset_contains(&env->already_scheduled, irn)) {
int i, n;
int res = 0;
for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
ir_node *operand = get_irn_in_or_dep(irn, i);
if (get_irn_visited(operand) < visited_nr) {
int tmp;
set_irn_visited(operand, visited_nr);
tmp = max_hops_walker(env, operand, bl, depth + 1, visited_nr);
res = MAX(tmp, res);
}
}
return res;
}
/*
* If the node is in the current block and scheduled, return
* the depth which indicates the number of steps to the
* region of scheduled nodes.
*/
return depth;
}
static int compute_max_hops(reg_pressure_selector_env_t *env, ir_node *irn)
{
ir_node *bl = get_nodes_block(irn);
ir_graph *irg = get_irn_irg(bl);
int res = 0;
foreach_out_edge(irn, edge) {
ir_node *user = get_edge_src_irn(edge);
unsigned visited_nr = get_irg_visited(irg) + 1;
int max_hops;
set_irg_visited(irg, visited_nr);
max_hops = max_hops_walker(env, user, irn, 0, visited_nr);
res = MAX(res, max_hops);
}
return res;
}
static void *reg_pressure_graph_init(ir_graph *irg)
{
irg_walk_graph(irg, firm_clear_link, NULL, NULL);
return NULL;
}
static void *reg_pressure_block_init(void *graph_env, ir_node *bl)
{
reg_pressure_selector_env_t *env = XMALLOC(reg_pressure_selector_env_t);
(void) graph_env;
obstack_init(&env->obst);
ir_nodeset_init(&env->already_scheduled);
env->root = NULL;
/*
* Collect usage statistics.
*/
sched_foreach(bl, irn) {
for (int i = 0, n = get_irn_arity(irn); i < n; ++i) {
usage_stats_t *us = get_or_set_usage_stats(env, irn);
us->uses_in_block++;
}
}
return env;
}
static void reg_pressure_block_free(void *block_env)
{
reg_pressure_selector_env_t *env = (reg_pressure_selector_env_t*)block_env;
usage_stats_t *us;
for (us = env->root; us; us = us->next)
set_irn_link(us->irn, NULL);
obstack_free(&env->obst, NULL);
ir_nodeset_destroy(&env->already_scheduled);
free(env);
}
static int get_result_hops_sum(reg_pressure_selector_env_t *env, ir_node *irn)
{
int res = 0;
if (get_irn_mode(irn) == mode_T) {
foreach_out_edge(irn, edge)
res += get_result_hops_sum(env, get_edge_src_irn(edge));
}
else if (mode_is_data(get_irn_mode(irn)))
res = compute_max_hops(env, irn);
return res;
}
static inline int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
{
int sum = 0;
foreach_irn_in(irn, i, op) {
if (is_Proj(op)
|| (arch_get_irn_flags(op) & arch_irn_flag_not_scheduled))
continue;
sum += compute_max_hops(env, op);
}
sum += get_result_hops_sum(env, irn);
return sum;
}
static ir_node *reg_pressure_select(void *block_env, ir_nodeset_t *ready_set)
{
reg_pressure_selector_env_t *env = (reg_pressure_selector_env_t*)block_env;
ir_node *res = NULL;
int curr_cost = INT_MAX;
assert(ir_nodeset_size(ready_set) > 0);
foreach_ir_nodeset(ready_set, irn, iter) {
/*
Ignore branch instructions for the time being.
They should only be scheduled if there is nothing else.
*/
if (!is_cfop(irn)) {
int costs = reg_pr_costs(env, irn);
if (costs <= curr_cost) {
res = irn;
curr_cost = costs;
}
}
}
/*
There was no result so we only saw a branch.
Take it and finish.
*/
if (!res) {
res = ir_nodeset_first(ready_set);
assert(res && "There must be a node scheduled.");
}
ir_nodeset_insert(&env->already_scheduled, res);
return res;
}
static void sched_reg_pressure(ir_graph *irg)
{
static const list_sched_selector_t reg_pressure_selector = {
reg_pressure_graph_init,
reg_pressure_block_init,
reg_pressure_select,
NULL, /* node_ready */
NULL, /* node_selected */
reg_pressure_block_free,
free
};
be_list_sched_graph(irg, &reg_pressure_selector);
}
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_sched_regpress)
void be_init_sched_regpress(void)
{
be_register_scheduler("regpress", sched_reg_pressure);
}
/*
* This file is part of libFirm.
* Copyright (C) 2012 University of Karlsruhe.
*/
/**
* @file
* @brief Implements a trace scheduler as presented in Muchnik[TM].
* @author Michael Beck
* @date 28.08.2006
*/
#include <stdlib.h>
#include "../../adt/util.h"
#include "iredges_t.h"
#include "beirg.h"
#include "besched.h"
#include "belistsched.h"
#include "benode.h"
#include "belive.h"
#include "bemodule.h"
/* we need a special mark */
static char _mark;
#define MARK &_mark
typedef struct trace_irn {
sched_timestep_t delay; /**< The delay for this node if already calculated, else 0. */
sched_timestep_t etime; /**< The earliest time of this node. */
unsigned num_user; /**< The number real users (mode data) of this node */
int reg_diff; /**< The difference of num(out registers) - num(in registers) */
int preorder; /**< The pre-order position */
unsigned critical_path_len; /**< The weighted length of the longest critical path */
unsigned is_root : 1; /**< is a root node of a block */
} trace_irn_t;
typedef struct trace_env {
trace_irn_t *sched_info; /**< trace scheduling information about the nodes */
sched_timestep_t curr_time; /**< current time of the scheduler */
be_lv_t *liveness; /**< The liveness for the irg */
DEBUG_ONLY(firm_dbg_module_t *dbg;)
} trace_env_t;
/**
* Returns a random node from a nodeset
*/
static ir_node *get_nodeset_node(const ir_nodeset_t *nodeset)
{
return ir_nodeset_first(nodeset);
}
/**
* Returns non-zero if the node is a root node
*/
static inline unsigned is_root_node(trace_env_t *env, ir_node *n)
{
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].is_root;
}
/**
* Mark a node as root node
*/
static inline void mark_root_node(trace_env_t *env, ir_node *n)
{
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].is_root = 1;
}
/**
* Get the current delay.
*/
static inline sched_timestep_t get_irn_delay(trace_env_t *env, ir_node *n)
{
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].delay;
}
/**
* Set the current delay.
*/
static inline void set_irn_delay(trace_env_t *env, ir_node *n, sched_timestep_t delay)
{
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].delay = delay;
}
/**
* Get the current etime.
*/
static inline sched_timestep_t get_irn_etime(trace_env_t *env, ir_node *n)
{
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].etime;
}
/**
* Set the current etime.
*/
static inline void set_irn_etime(trace_env_t *env, ir_node *n, sched_timestep_t etime)
{
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].etime = etime;
}
/**
* Get the number of users.
*/
static inline unsigned get_irn_num_user(trace_env_t *env, ir_node *n)
{
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].num_user;
}
/**
* Set the number of users.
*/
static inline void set_irn_num_user(trace_env_t *env, ir_node *n, unsigned num_user)
{
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].num_user = num_user;
}
/**
* Get the register difference.
*/
static inline int get_irn_reg_diff(trace_env_t *env, ir_node *n)
{
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].reg_diff;
}
/**
* Set the register difference.
*/
static inline void set_irn_reg_diff(trace_env_t *env, ir_node *n, int reg_diff)
{
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].reg_diff = reg_diff;
}
/**
* Get the pre-order position.
*/
static inline int get_irn_preorder(trace_env_t *env, ir_node *n)
{
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].preorder;
}
/**
* Set the pre-order position.
*/
static inline void set_irn_preorder(trace_env_t *env, ir_node *n, int pos)
{
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].preorder = pos;
}
/**
* Get the pre-order position.
*/
static inline unsigned get_irn_critical_path_len(trace_env_t *env, ir_node *n)
{
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].critical_path_len;
}
/**
* Set the pre-order position.
*/
static inline void set_irn_critical_path_len(trace_env_t *env, ir_node *n, unsigned len)
{
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].critical_path_len = len;
}
/**
* returns the exec-time for node n.
*/
static sched_timestep_t exectime(trace_env_t *env, ir_node *n)
{
(void) env;
if (be_is_Keep(n) || is_Proj(n))
return 0;
return 1;
}
/**
* Calculates the latency for between two ops
*/
static sched_timestep_t latency(trace_env_t *env, ir_node *pred, int pred_cycle, ir_node *curr, int curr_cycle)
{
(void) pred_cycle;
(void) curr_cycle;
/* a Keep hides a root */
if (be_is_Keep(curr))
return exectime(env, pred);
/* Proj's are executed immediately */
if (is_Proj(curr))
return 0;
return 1;
}
/**
* Returns the number of users of a node having mode data.
*/
static int get_num_successors(ir_node *irn)
{
int sum = 0;
if (get_irn_mode(irn) == mode_T) {
/* for mode_T nodes: count the users of all Projs */
foreach_out_edge(irn, edge) {
ir_node *proj = get_edge_src_irn(edge);
ir_mode *mode = get_irn_mode(proj);
if (mode == mode_T)
sum += get_num_successors(proj);
else if (mode_is_data(mode))
sum += get_irn_n_edges(proj);
}
} else {
/* do not count keep-alive edges */
foreach_out_edge(irn, edge) {
if (!is_End(get_edge_src_irn(edge)))
sum++;
}
}
return sum;
}
/**
* Returns the difference of regs_output - regs_input;
*/
static int get_reg_difference(trace_env_t *env, ir_node *irn)
{
int num_out = 0;
int num_in = 0;
ir_node *block = get_nodes_block(irn);
if (be_is_Call(irn)) {
/* we want calls preferred */
return -5;
}
if (get_irn_mode(irn) == mode_T) {
/* mode_T nodes: num out regs == num Projs with mode data */
foreach_out_edge(irn, edge) {
ir_node *proj = get_edge_src_irn(edge);
if (mode_is_data(get_irn_mode(proj)))
num_out++;
}
}
else
num_out = 1;
/* num in regs: number of ins with mode data and not ignore */
foreach_irn_in_r(irn, i, in) {
if (!mode_is_data(get_irn_mode(in)))
continue;
if (arch_irn_is_ignore(in))
continue;
if (be_is_live_end(env->liveness, block, in))
continue;
num_in++;
}
return num_out - num_in;
}
/**
* descent into a dag and create a pre-order list.
*/
static void descent(ir_node *root, ir_node *block, ir_node **list, trace_env_t *env, unsigned path_len)
{
if (! is_Phi(root)) {
path_len += exectime(env, root);
if (get_irn_critical_path_len(env, root) < path_len) {
set_irn_critical_path_len(env, root, path_len);
}
/* calculate number of users (needed for heuristic) */
set_irn_num_user(env, root, get_num_successors(root));
/* calculate register difference (needed for heuristic) */
set_irn_reg_diff(env, root, get_reg_difference(env, root));
/* Phi nodes always leave the block */
foreach_irn_in_r(root, i, pred) {
DBG((env->dbg, LEVEL_3, " node %+F\n", pred));
/* Blocks may happen as predecessors of End nodes */
if (is_Block(pred))
continue;
/* already seen nodes are not marked */
if (get_irn_link(pred) != MARK)
continue;
/* don't leave our block */
if (get_nodes_block(pred) != block)
continue;
set_irn_link(pred, NULL);
descent(pred, block, list, env, path_len);
}
}
set_irn_link(root, *list);
*list = root;
}
/**
* Returns non-zero if root is a root in the block block.
*/
static int is_root(ir_node *root, ir_node *block)
{
foreach_out_edge(root, edge) {
ir_node *succ = get_edge_src_irn(edge);
if (is_Block(succ))
continue;
/* Phi nodes are always in "another block */
if (is_Phi(succ))
continue;
if (get_nodes_block(succ) == block)
return 0;
}
return 1;
}