Commit f8f2653a authored by Matthias Braun's avatar Matthias Braun
Browse files

remove mostly unused/broken liveness tracking in scheduler

parent d7952a5e
......@@ -87,7 +87,6 @@ typedef struct block_sched_env_t {
ir_nodeset_t cands; /**< the set of candidates */
ir_node *block; /**< the current block */
sched_env_t *sched_env; /**< the scheduler environment */
ir_nodeset_t live; /**< simple liveness during scheduling */
const list_sched_selector_t *selector;
void *selector_block_env;
} block_sched_env_t;
......@@ -231,76 +230,6 @@ static inline int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int
return env->sched_info[idx].num_not_sched_user;
}
/**
* Returns the number of users of a node having mode datab.
*/
static int get_num_successors(ir_node *irn)
{
int sum = 0;
const ir_edge_t *edge;
if (get_irn_mode(irn) == mode_T) {
/* for mode_T nodes: count the users of all Projs */
foreach_out_edge(irn, edge) {
ir_node *proj = get_edge_src_irn(edge);
ir_mode *mode = get_irn_mode(proj);
if (mode == mode_T) {
sum += get_num_successors(proj);
} else if (mode_is_datab(mode)) {
sum += get_irn_n_edges(proj);
}
}
}
else {
/* do not count keep-alive edges */
foreach_out_edge(irn, edge) {
if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
sum++;
}
}
return sum;
}
/**
* Adds irn to @p live, updates all inputs that this user is scheduled
* and counts all of its non scheduled users.
*/
static void update_sched_liveness(block_sched_env_t *env, ir_node *irn)
{
int i;
/* ignore Projs */
if (is_Proj(irn))
return;
for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
ir_node *in = get_irn_in_or_dep(irn, i);
/* if in is a proj: update predecessor */
in = skip_Proj(in);
/* if in is still in the live set: reduce number of users by one */
if (ir_nodeset_contains(&env->live, in)) {
if (add_irn_not_sched_user(env, in, -1) <= 0)
ir_nodeset_remove(&env->live, in);
}
}
/*
get_num_successors returns the number of all users. This includes
users in different blocks as well. As the each block is scheduled separately
the liveness info of those users will not be updated and so these
users will keep up the register pressure as it is desired.
*/
i = get_num_successors(irn);
if (i > 0) {
set_irn_not_sched_user(env, irn, i);
ir_nodeset_insert(&env->live, irn);
}
}
static void selected(block_sched_env_t *env, ir_node *node)
{
/* notify the selector about the finally selected node. */
......@@ -323,7 +252,6 @@ static void add_to_sched(block_sched_env_t *env, ir_node *irn)
{
assert(! (arch_irn_get_flags(irn) & arch_irn_flags_not_scheduled));
update_sched_liveness(env, irn);
sched_add_before(env->block, irn);
DBG((dbg, LEVEL_2, "\tadding %+F\n", irn));
......@@ -362,7 +290,6 @@ static void list_sched_block(ir_node *block, void *env_ptr)
be.sched_info = env->sched_info;
be.block = block;
ir_nodeset_init_size(&be.cands, get_irn_n_edges(block));
ir_nodeset_init_size(&be.live, get_irn_n_edges(block));
be.selector = selector;
be.sched_env = env;
......@@ -412,9 +339,6 @@ static void list_sched_block(ir_node *block, void *env_ptr)
if (get_nodes_block(operand) == block) {
ready = 0;
break;
} else {
/* live in values increase register pressure */
ir_nodeset_insert(&be.live, operand);
}
}
......@@ -438,7 +362,7 @@ static void list_sched_block(ir_node *block, void *env_ptr)
}
if (irn == NULL) {
irn = be.selector->select(be.selector_block_env, &be.cands, &be.live);
irn = be.selector->select(be.selector_block_env, &be.cands);
}
DB((dbg, LEVEL_2, "\tpicked node %+F\n", irn));
......@@ -454,7 +378,6 @@ static void list_sched_block(ir_node *block, void *env_ptr)
selector->finish_block(be.selector_block_env);
ir_nodeset_destroy(&be.cands);
ir_nodeset_destroy(&be.live);
}
/* List schedule a graph. */
......
......@@ -71,11 +71,9 @@ typedef struct list_sched_selector_t {
* @param block_env Some private information as returned by init_block().
* @param sched_head The schedule so far.
* @param ready_set A set containing all ready nodes. Pick one of these nodes.
* @param live_set A set containing all nodes currently alive.
* @return The chosen node.
*/
ir_node *(*select)(void *block_env, ir_nodeset_t *ready_set,
ir_nodeset_t *live_set);
ir_node *(*select)(void *block_env, ir_nodeset_t *ready_set);
/**
* This function gets executed after a node finally has been made ready.
......
......@@ -54,8 +54,7 @@ static int must_be_scheduled(const ir_node* const irn)
}
static ir_node *normal_select(void *block_env, ir_nodeset_t *ready_set,
ir_nodeset_t *live_set)
static ir_node *normal_select(void *block_env, ir_nodeset_t *ready_set)
{
instance_t* inst = (instance_t*)block_env;
ir_node* irn;
......@@ -63,8 +62,6 @@ static ir_node *normal_select(void *block_env, ir_nodeset_t *ready_set,
ir_node* last = NULL;
ir_nodeset_iterator_t iter;
(void)live_set;
for (irn = inst->curr_list; irn != NULL; last = irn, irn = next) {
next = (ir_node*)get_irn_link(irn);
if (ir_nodeset_contains(ready_set, irn)) {
......
......@@ -36,14 +36,12 @@
* The random selector:
* Just assure that branches are executed last, otherwise select a random node
*/
static ir_node *random_select(void *block_env, ir_nodeset_t *ready_set,
ir_nodeset_t *live_set)
static ir_node *random_select(void *block_env, ir_nodeset_t *ready_set)
{
ir_nodeset_iterator_t iter;
ir_node *irn = NULL;
int only_branches_left = 1;
(void)block_env;
(void)live_set;
/* assure that branches and constants are executed last */
ir_nodeset_iterator_init(&iter, ready_set);
......
......@@ -257,14 +257,12 @@ static inline int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
return sum;
}
static ir_node *reg_pressure_select(void *block_env, ir_nodeset_t *ready_set,
ir_nodeset_t *live_set)
static ir_node *reg_pressure_select(void *block_env, ir_nodeset_t *ready_set)
{
ir_nodeset_iterator_t iter;
reg_pressure_selector_env_t *env = (reg_pressure_selector_env_t*)block_env;
ir_node *irn, *res = NULL;
int curr_cost = INT_MAX;
(void) live_set;
assert(ir_nodeset_size(ready_set) > 0);
......
......@@ -570,14 +570,13 @@ static ir_node *basic_selection(ir_nodeset_t *ready_set)
/**
* The muchnik selector.
*/
static ir_node *muchnik_select(void *block_env, ir_nodeset_t *ready_set, ir_nodeset_t *live_set)
static ir_node *muchnik_select(void *block_env, ir_nodeset_t *ready_set)
{
trace_env_t *env = (trace_env_t*)block_env;
ir_nodeset_t mcands, ecands;
ir_nodeset_iterator_t iter;
sched_timestep_t max_delay = 0;
ir_node *irn;
(void) live_set;
/* calculate the max delay of all candidates */
foreach_ir_nodeset(ready_set, irn, iter) {
......@@ -658,15 +657,18 @@ static void sched_muchnik(ir_graph *irg)
/**
* Execute the heuristic function.
*/
static ir_node *heuristic_select(void *block_env, ir_nodeset_t *ns, ir_nodeset_t *lv)
static ir_node *heuristic_select(void *block_env, ir_nodeset_t *ns)
{
trace_env_t *trace_env = (trace_env_t*)block_env;
ir_node *irn, *cand = NULL;
int max_prio = INT_MIN;
int cur_prio = INT_MIN;
int cur_pressure = ir_nodeset_size(lv);
int reg_fact, cand_reg_fact;
ir_nodeset_iterator_t iter;
/* Note: register pressure calculation needs an overhaul, you need correct
* tracking for each register class indidually and weight by each class
int cur_pressure = ir_nodeset_size(lv); */
int cur_pressure = 1;
/* prefer instructions which can be scheduled early */
#define PRIO_TIME 3
......@@ -689,7 +691,6 @@ static ir_node *heuristic_select(void *block_env, ir_nodeset_t *ns, ir_nodeset_t
int sign = rdiff < 0;
int chg = (rdiff < 0 ? -rdiff : rdiff) << PRIO_CHG_PRESS;
/* reg_fact = chg << cur_pressure; */
reg_fact = chg * cur_pressure;
if (reg_fact < chg)
reg_fact = INT_MAX - 2;
......
......@@ -42,13 +42,11 @@
* Just assure that branches are executed last, otherwise select
* the first node ready.
*/
static ir_node *trivial_select(void *block_env, ir_nodeset_t *ready_set,
ir_nodeset_t *live_set)
static ir_node *trivial_select(void *block_env, ir_nodeset_t *ready_set)
{
ir_node *irn;
ir_nodeset_iterator_t iter;
(void)block_env;
(void)live_set;
/* assure that branches and constants are executed last */
foreach_ir_nodeset(ready_set, irn, iter) {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment