Commit 7a2c7fed authored by Matthias Braun's avatar Matthias Braun
Browse files

cleanup besched header

parent 4520b859
......@@ -50,6 +50,7 @@
#include "beflags.h"
#include "bearch.h"
#include "beirg.h"
#include "beirgmod.h"
#include "besched.h"
#include "benode.h"
#include "belive.h"
......
......@@ -274,6 +274,60 @@ int be_remove_empty_blocks(ir_graph *irg)
return blocks_removed;
}
//---------------------------------------------------------------------------
typedef struct remove_dead_nodes_env_t_ {
bitset_t *reachable;
ir_graph *irg;
be_lv_t *lv;
} remove_dead_nodes_env_t;
/**
* Post-walker: remember all visited nodes in a bitset.
*/
static void mark_dead_nodes_walker(ir_node *node, void *data)
{
remove_dead_nodes_env_t *env = (remove_dead_nodes_env_t*) data;
bitset_set(env->reachable, get_irn_idx(node));
}
/**
* Post-block-walker:
* Walk through the schedule of every block and remove all dead nodes from it.
*/
static void remove_dead_nodes_walker(ir_node *block, void *data)
{
remove_dead_nodes_env_t *env = (remove_dead_nodes_env_t*) data;
ir_node *node, *next;
for (node = sched_first(block); ! sched_is_end(node); node = next) {
/* get next node now, as after calling sched_remove it will be invalid */
next = sched_next(node);
if (bitset_is_set(env->reachable, get_irn_idx(node)))
continue;
if (env->lv)
be_liveness_remove(env->lv, node);
sched_remove(node);
kill_node(node);
}
}
void be_remove_dead_nodes_from_schedule(ir_graph *irg)
{
remove_dead_nodes_env_t env;
env.reachable = bitset_alloca(get_irg_last_idx(irg));
env.lv = be_get_irg_liveness(irg);
env.irg = irg;
// mark all reachable nodes
irg_walk_graph(irg, mark_dead_nodes_walker, NULL, &env);
// walk schedule and remove non-marked nodes
irg_block_walk_graph(irg, remove_dead_nodes_walker, NULL, &env);
}
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_irgmod);
void be_init_irgmod(void)
{
......
......@@ -55,4 +55,10 @@ ir_node *insert_Perm_after(ir_graph *irg, const arch_register_class_t *cls,
*/
int be_remove_empty_blocks(ir_graph *irg);
/**
* Removes dead nodes from schedule
* @param irg the graph
*/
void be_remove_dead_nodes_from_schedule(ir_graph *irg);
#endif
......@@ -35,6 +35,34 @@
#include "bearch.h"
#include "beirg.h"
/**
* Checks, if a node is to appear in a schedule. Such nodes either
* consume real data (mode datab) or produce such.
* @param irn The node to check for.
* @return 1, if the node consumes/produces data, false if not.
*/
static inline bool to_appear_in_schedule(const ir_node *irn)
{
switch(get_irn_opcode(irn)) {
case iro_Anchor:
case iro_Bad:
case iro_Block:
case iro_Confirm:
case iro_Dummy:
case iro_End:
case iro_NoMem:
case iro_Pin:
case iro_Proj:
case iro_Sync:
case iro_Unknown:
return false;
case iro_Phi:
return mode_is_data(get_irn_mode(irn));
default:
return ! (arch_irn_get_flags(irn) & arch_irn_flags_not_scheduled);
}
}
/**
* A selector interface which is used by the list schedule framework.
* You can implement your own list scheduler by implementing these
......
......@@ -966,7 +966,9 @@ ir_node *be_reload(const arch_register_class_t *cls, ir_node *insert, ir_mode *m
reload = be_new_Reload(cls, cls_frame, bl, frame, spill, mode);
if (is_Block(insert)) {
insert = sched_skip(insert, 0, sched_skip_cf_predicator, NULL);
do {
insert = sched_prev(insert);
} while (is_cfop(insert));
sched_add_after(insert, reload);
} else {
sched_add_before(insert, reload);
......
......@@ -44,33 +44,9 @@
#include "belistsched.h"
#include "belive.h"
size_t sched_irn_data_offset = 0;
#define SCHED_INITIAL_GRANULARITY (1 << 14)
static void block_sched_dumper(ir_node *block, void *env)
{
FILE *f = (FILE*)env;
const ir_node *curr;
ir_fprintf(f, "%+F:\n", block);
sched_foreach(block, curr) {
sched_info_t *info = get_irn_sched_info(curr);
ir_fprintf(f, "\t%6d: %+F\n", info->time_step, curr);
}
}
void be_sched_dump(FILE *f, ir_graph *irg)
{
irg_block_walk_graph(irg, block_sched_dumper, NULL, f);
}
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_sched);
void be_init_sched(void)
{
sched_irn_data_offset = firm_register_additional_node_data(sizeof(sched_info_t));
}
void sched_renumber(const ir_node *block)
static void sched_renumber(const ir_node *block)
{
ir_node *irn;
sched_info_t *inf;
......@@ -83,159 +59,93 @@ void sched_renumber(const ir_node *block)
}
}
int sched_skip_cf_predicator(const ir_node *irn, void *data)
{
(void)data;
return is_cfop(irn);
}
int sched_skip_phi_predicator(const ir_node *irn, void *data)
{
(void) data;
return is_Phi(irn);
}
/* Skip nodes in a schedule. */
ir_node *sched_skip(ir_node *from, int forward, sched_predicator_t *predicator, void *data)
{
const ir_node *bl = get_block_const(from);
ir_node *curr;
if (forward) {
if (is_Block(from))
from = sched_next(from);
for (curr = from; curr != bl && predicator(curr, data); curr = sched_next(curr)) {
static inline void sched_set_time_stamp(const ir_node *irn)
{
sched_info_t *info = get_irn_sched_info(irn);
const sched_info_t *prev_info = get_irn_sched_info(info->prev);
const sched_info_t *next_info = get_irn_sched_info(info->next);
sched_timestep_t before_ts = prev_info->time_step;
sched_timestep_t after_ts = next_info->time_step;
/*
* If we are the last, we can give us a big time step,
* else we have to compute our time step from our
* neighbours.
*/
if(before_ts >= after_ts) {
info->time_step = before_ts + SCHED_INITIAL_GRANULARITY;
/* overflow? */
if (info->time_step <= before_ts) {
sched_renumber(get_nodes_block(irn));
}
} else {
if (is_Block(from))
from = sched_prev(from);
for (curr = from; curr != bl && predicator(curr, data); curr = sched_prev(curr)) {
}
}
return curr;
}
//---------------------------------------------------------------------------
typedef struct remove_dead_nodes_env_t_ {
bitset_t *reachable;
ir_graph *irg;
be_lv_t *lv;
} remove_dead_nodes_env_t;
/**
* Post-walker: remember all visited nodes in a bitset.
*/
static void mark_dead_nodes_walker(ir_node *node, void *data)
{
remove_dead_nodes_env_t *env = (remove_dead_nodes_env_t*) data;
bitset_set(env->reachable, get_irn_idx(node));
}
/**
* Post-block-walker:
* Walk through the schedule of every block and remove all dead nodes from it.
*/
static void remove_dead_nodes_walker(ir_node *block, void *data)
{
remove_dead_nodes_env_t *env = (remove_dead_nodes_env_t*) data;
ir_node *node, *next;
for (node = sched_first(block); ! sched_is_end(node); node = next) {
/* get next node now, as after calling sched_remove it will be invalid */
next = sched_next(node);
if (bitset_is_set(env->reachable, get_irn_idx(node)))
continue;
if (env->lv)
be_liveness_remove(env->lv, node);
sched_remove(node);
kill_node(node);
sched_timestep_t ts = (before_ts + after_ts) / 2;
/*
* If the resolution went out, we have to renumber
* this block.
*/
if(ts == before_ts || ts == after_ts)
sched_renumber(get_nodes_block(irn));
else
info->time_step = ts;
}
}
void be_remove_dead_nodes_from_schedule(ir_graph *irg)
void sched_add_before(ir_node *before, ir_node *irn)
{
remove_dead_nodes_env_t env;
env.reachable = bitset_alloca(get_irg_last_idx(irg));
env.lv = be_get_irg_liveness(irg);
env.irg = irg;
sched_info_t *info = get_irn_sched_info(irn);
ir_node *next = before;
sched_info_t *next_info = get_irn_sched_info(next);
ir_node *prev = next_info->prev;
sched_info_t *prev_info = get_irn_sched_info(prev);
assert(sched_is_scheduled(before));
assert(!sched_is_scheduled(irn));
assert(!is_Proj(before));
assert(!is_Proj(irn));
// mark all reachable nodes
irg_walk_graph(irg, mark_dead_nodes_walker, NULL, &env);
// walk schedule and remove non-marked nodes
irg_block_walk_graph(irg, remove_dead_nodes_walker, NULL, &env);
info->prev = prev;
info->next = next;
prev_info->next = irn;
next_info->prev = irn;
sched_set_time_stamp(irn);
}
int (sched_get_time_step)(const ir_node *node)
void sched_add_after(ir_node *after, ir_node *irn)
{
return _sched_get_time_step(node);
}
sched_info_t *info = get_irn_sched_info(irn);
ir_node *prev = after;
sched_info_t *prev_info = get_irn_sched_info(prev);
ir_node *next = prev_info->next;
sched_info_t *next_info = get_irn_sched_info(next);
assert(sched_is_scheduled(after));
assert(!sched_is_scheduled(irn));
assert(!is_Proj(after));
assert(!is_Proj(irn));
int (sched_has_next)(const ir_node *node)
{
return _sched_has_next(node);
info->prev = prev;
info->next = next;
prev_info->next = irn;
next_info->prev = irn;
sched_set_time_stamp(irn);
}
int (sched_has_prev)(const ir_node *node)
void sched_remove(ir_node *irn)
{
return _sched_has_prev(node);
}
sched_info_t *info = get_irn_sched_info(irn);
ir_node *prev = info->prev;
ir_node *next = info->next;
sched_info_t *prev_info = get_irn_sched_info(prev);
sched_info_t *next_info = get_irn_sched_info(next);
assert(sched_is_scheduled(irn));
ir_node *(sched_next)(const ir_node *node)
{
return _sched_next(node);
prev_info->next = next;
next_info->prev = prev;
info->next = NULL;
info->prev = NULL;
}
ir_node *(sched_prev)(const ir_node *node)
{
return _sched_prev(node);
}
int (sched_is_scheduled)(const ir_node *node)
{
return _sched_is_scheduled(node);
}
ir_node *(sched_first)(const ir_node *node)
{
return _sched_first(node);
}
ir_node *(sched_last)(const ir_node *node)
{
return _sched_last(node);
}
void (sched_add_after)(ir_node *after, ir_node *node)
{
_sched_add_after(after, node);
}
void (sched_add_before)(ir_node *before, ir_node *node)
{
_sched_add_before(before, node);
}
void (sched_init_block)(ir_node *block)
{
_sched_init_block(block);
}
void (sched_remove)(ir_node *node)
{
_sched_remove(node);
}
void (sched_reset)(ir_node *node)
{
_sched_reset(node);
}
int (sched_comes_after)(const ir_node *n1, const ir_node *n2)
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_sched);
void be_init_sched(void)
{
return _sched_comes_after(n1, n2);
}
......@@ -19,8 +19,9 @@
/**
* @file
* @brief Scheduling utilities for nodes in Blocks and Blocks.
* @author Sebastian Hack
* @brief data structures for scheduling nodes in basic blocks.
* (This file does not contain the scheduling algorithms)
* @author Sebastian Hack, Matthias Braun
* @version $Id$
*/
#ifndef FIRM_BE_BESCHED_H
......@@ -35,117 +36,42 @@
#include "beinfo.h"
#include "beutil.h"
void be_sched_dump(FILE *f, ir_graph *irg);
/**
* returns the time step of a node. Each node in a block has a timestep
* unique to that block. a node schedule before another node has a lower
* timestep than this node.
*/
int sched_get_time_step(const ir_node *irn);
int sched_has_next(const ir_node *irn);
int sched_has_prev(const ir_node *irn);
int sched_is_scheduled(const ir_node *irn);
ir_node *sched_next(const ir_node *irn);
ir_node *sched_prev(const ir_node *irn);
ir_node *sched_first(const ir_node *block);
ir_node *sched_last(const ir_node *block);
void sched_add_before(ir_node *before, ir_node *irn);
void sched_add_after(ir_node *after, ir_node *irn);
void sched_init_block(ir_node *block);
void sched_reset(ir_node *node);
int sched_comes_after(const ir_node *n1, const ir_node *n2);
void sched_remove(ir_node *irn);
#define sched_is_end(irn) is_Block(irn)
#define sched_is_begin(irn) is_Block(irn)
#define sched_foreach_from(from, irn) \
for(irn = from; !sched_is_end(irn); irn = sched_next(irn))
#define sched_foreach_reverse_from(from, irn) \
for(irn = from; !sched_is_begin(irn); irn = sched_prev(irn))
/**
* A shorthand macro for iterating over a schedule.
* @param block The block.
* @param irn A ir node pointer used as an iterator.
*/
#define sched_foreach(block,irn) \
sched_foreach_from(sched_first(block), irn)
/**
* A shorthand macro for reversely iterating over a schedule.
* @param block The block.
* @param irn A ir node pointer used as an iterator.
*/
#define sched_foreach_reverse(block,irn) \
sched_foreach_reverse_from(sched_last(block), irn)
/**
* A shorthand macro for iterating over all Phi nodes of a schedule.
* @param block The block.
* @param phi A ir node pointer used as an iterator.
*/
#define sched_foreach_Phi(block,phi) \
for (phi = sched_first(block); is_Phi(phi); phi = sched_next(phi))
/**
* Removes dead nodes from schedule
* @param irg the graph
*/
void be_remove_dead_nodes_from_schedule(ir_graph *irg);
#define SCHED_INITIAL_GRANULARITY (1 << 14)
#define get_irn_sched_info(irn) (&be_get_info(skip_Proj_const(irn))->sched_info)
static sched_info_t *get_irn_sched_info(const ir_node *node)
{
return &be_get_info(skip_Proj_const(node))->sched_info;
}
/**
* Check, if the node is scheduled.
* @param irn The node.
* @return 1, if the node is scheduled, 0 if not.
*/
static inline int _sched_is_scheduled(const ir_node *irn)
static inline bool sched_is_scheduled(const ir_node *irn)
{
return get_irn_sched_info(irn)->next != NULL;
}
/**
* Get the time step of an irn in a schedule.
* returns the time step of a node. Each node in a block has a timestep
* unique to that block. a node schedule before another node has a lower
* timestep than this node.
* @param irn The node.
* @return The time step in the schedule.
*/
static inline int _sched_get_time_step(const ir_node *irn)
static inline sched_timestep_t sched_get_time_step(const ir_node *irn)
{
assert(_sched_is_scheduled(irn));
assert(sched_is_scheduled(irn));
return get_irn_sched_info(irn)->time_step;
}
/**
* Checks, if a node is to appear in a schedule. Such nodes either
* consume real data (mode datab) or produce such.
* @param irn The node to check for.
* @return 1, if the node consumes/produces data, false if not.
*/
static inline bool to_appear_in_schedule(const ir_node *irn)
static inline bool sched_is_end(const ir_node *node)
{
return is_Block(node);
}
static inline bool sched_is_begin(const ir_node *node)
{
switch(get_irn_opcode(irn)) {
case iro_Anchor:
case iro_Bad:
case iro_Block:
case iro_Confirm:
case iro_Dummy:
case iro_End:
case iro_NoMem:
case iro_Pin:
case iro_Proj:
case iro_Sync:
case iro_Unknown:
return false;
case iro_Phi:
return mode_is_data(get_irn_mode(irn));
default:
return ! (arch_irn_get_flags(irn) & arch_irn_flags_not_scheduled);
}
return is_Block(node);
}
/**
......@@ -153,7 +79,7 @@ static inline bool to_appear_in_schedule(const ir_node *irn)
* @param irn The ir node.
* @return 1, if the node has a scheduling successor, 0 if not.
*/
static inline int _sched_has_next(const ir_node *irn)
static inline bool sched_has_next(const ir_node *irn)
{
const sched_info_t *info = get_irn_sched_info(irn);
const ir_node *block = is_Block(irn) ? irn : get_nodes_block(irn);
......@@ -165,7 +91,7 @@ static inline int _sched_has_next(const ir_node *irn)
* @param irn The ir node.
* @return 1, if the node has a scheduling predecessor, 0 if not.
*/
static inline int _sched_has_prev(const ir_node *irn)
static inline bool sched_has_prev(const ir_node *irn)
{
const sched_info_t *info = get_irn_sched_info(irn);
const ir_node *block = is_Block(irn) ? irn : get_nodes_block(irn);
......@@ -177,7 +103,7 @@ static inline int _sched_has_prev(const ir_node *irn)
* @param irn The node.
* @return The next ir node in the schedule or the block, if the node has no next node.
*/
static inline ir_node *_sched_next(const ir_node *irn)
static inline ir_node *sched_next(const ir_node *irn)
{
const sched_info_t *info = get_irn_sched_info(irn);
return info->next;
......@@ -189,7 +115,7 @@ static inline ir_node *_sched_next(const ir_node *irn)
* @return The next ir node in the schedule or the block, if the node has no predecessor.
* predecessor.
*/
static inline ir_node *_sched_prev(const ir_node *irn)
static inline ir_node *sched_prev(const ir_node *irn)
{
const sched_info_t *info = get_irn_sched_info(irn);
return info->prev;
......@@ -201,10 +127,10 @@ static inline ir_node *_sched_prev(const ir_node *irn)
* @return The first node in the schedule or the block itself
* if there is no node in the schedule.
*/
static inline ir_node *_sched_first(const ir_node *block)
static inline ir_node *sched_first(const ir_node *block)
{
assert(is_Block(block) && "Need a block here");
return _sched_next(block);
return sched_next(block);
}
/**
......@@ -213,49 +139,10 @@ static inline ir_node *_sched_first(const ir_node *block)
* @return The last ir node in a schedule, or the block itself
* if there is no node in the schedule.
*/
static inline ir_node *_sched_last(const ir_node *block)
static inline ir_node *sched_last(const ir_node *block)
{
assert(is_Block(block) && "Need a block here");
return _sched_prev(block);
}
/**
* Reassign the time steps in the schedule.
* @param block The schedule to update.
*/
void sched_renumber(const ir_node *block);
static inline void _sched_set_time_stamp(const ir_node *irn)
{
sched_info_t *info = get_irn_sched_info(irn);
const sched_info_t *prev_info = get_irn_sched_info(info->prev);
const sched_info_t *next_info = get_irn_sched_info(info->next);
sched_timestep_t before_ts = prev_info->time_step;
sched_timestep_t after_ts = next_info->time_step;
/*
* If we are the last, we can give us a big time step,
* else we have to compute our time step from our
* neighbours.
*/
if(before_ts >= after_ts) {
info->time_step = before_ts + SCHED_INITIAL_GRANULARITY;
/* overflow? */
if (info->time_step <= before_ts) {
sched_renumber(get_nodes_block(irn));
}
} else {