Commit 45449d34 authored by Matthias Braun's avatar Matthias Braun
Browse files

cleanup, use C99

parent 4fb44f3d
This diff is collapsed.
......@@ -44,9 +44,9 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
* Scheduling environment for the whole graph.
*/
typedef struct sched_env_t {
unsigned *scheduled; /**< bitset of already scheduled nodes */
const list_sched_selector_t *selector; /**< The node selector. */
void *selector_env; /**< A pointer to give to the selector. */
unsigned *scheduled; /**< bitset of already scheduled nodes */
const list_sched_selector_t *selector; /**< The node selector. */
void *selector_env; /**< A pointer to give to the selector. */
} sched_env_t;
/**
......@@ -117,8 +117,6 @@ static void node_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
*/
static void try_make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
{
int i, n;
/* we schedule one block at a time, so no need to consider users in other
* blocks */
if (is_Block(irn) || get_nodes_block(irn) != env->block)
......@@ -126,14 +124,13 @@ static void try_make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
if (is_Phi(irn) || is_End(irn))
return;
/* check if all operands are already available */
n = get_irn_ins_or_deps(irn);
for (i = 0; i < n; ++i) {
for (int i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
ir_node *op = get_irn_in_or_dep(irn, i);
/* If the operand is local to the scheduled block and not yet
* scheduled, this nodes cannot be made ready, so exit. */
if (get_nodes_block(op) == env->block
&& !is_already_scheduled(env->sched_env, op))
&& !is_already_scheduled(env->sched_env, op))
return;
}
......@@ -193,19 +190,18 @@ static void add_to_sched(block_sched_env_t *env, ir_node *irn)
*/
static void list_sched_block(ir_node *block, void *env_ptr)
{
sched_env_t *env = (sched_env_t*)env_ptr;
sched_env_t *env = (sched_env_t*)env_ptr;
const list_sched_selector_t *selector = env->selector;
block_sched_env_t be;
ir_nodeset_t *cands = &be.cands;
/* Initialize the block's list head that will hold the schedule. */
sched_init_block(block);
/* Initialize the block scheduling environment */
block_sched_env_t be;
be.block = block;
be.selector = selector;
be.sched_env = env;
ir_nodeset_t *cands = &be.cands;
ir_nodeset_init_size(cands, get_irn_n_edges(block));
DB((dbg, LEVEL_1, "scheduling %+F\n", block));
......@@ -249,19 +245,16 @@ static void list_sched_block(ir_node *block, void *env_ptr)
/* List schedule a graph. */
void be_list_sched_graph(ir_graph *irg, const list_sched_selector_t *selector)
{
int num_nodes;
sched_env_t env;
/* Matze: This is very slow, we should avoid it to improve backend speed,
* we just have to make sure that we have no dangling out-edges at this
* point...
*/
* point... */
edges_deactivate(irg);
edges_activate(irg);
num_nodes = get_irg_last_idx(irg);
unsigned num_nodes = get_irg_last_idx(irg);
/* initialize environment for list scheduler */
sched_env_t env;
memset(&env, 0, sizeof(env));
env.selector = selector;
env.scheduled = rbitset_malloc(num_nodes);
......
......@@ -31,11 +31,10 @@
static void sched_renumber(ir_node *const block)
{
sched_info_t *inf;
sched_timestep_t step = SCHED_INITIAL_GRANULARITY;
sched_foreach(block, irn) {
inf = get_irn_sched_info(irn);
sched_info_t *inf = get_irn_sched_info(irn);
inf->time_step = step;
step += SCHED_INITIAL_GRANULARITY;
}
......
......@@ -20,15 +20,15 @@
#include "util.h"
#include "array.h"
// XXX there is no one time init for schedulers
//#define NORMAL_DBG
#include "irprintf.h"
//#define NORMAL_DBG
/** An instance of the normal scheduler. */
typedef struct instance_t {
ir_graph* irg; /**< the IR graph of this instance */
ir_graph *irg; /**< the IR graph of this instance */
struct obstack obst; /**< obstack for temporary data */
ir_node* curr_list; /**< current block schedule list */
ir_node *curr_list; /**< current block schedule list */
} instance_t;
static int must_be_scheduled(const ir_node* const irn)
......@@ -39,15 +39,12 @@ static int must_be_scheduled(const ir_node* const irn)
static ir_node *normal_select(void *block_env, ir_nodeset_t *ready_set)
{
instance_t* inst = (instance_t*)block_env;
ir_node* irn;
ir_node* next;
ir_node* last = NULL;
for (irn = inst->curr_list; irn != NULL; last = irn, irn = next) {
instance_t *inst = (instance_t*)block_env;
for (ir_node *irn = inst->curr_list, *last = NULL, *next; irn != NULL;
last = irn, irn = next) {
next = (ir_node*)get_irn_link(irn);
if (ir_nodeset_contains(ready_set, irn)) {
#if defined NORMAL_DBG
#ifdef NORMAL_DBG
ir_fprintf(stderr, "scheduling %+F\n", irn);
#endif
if (last == NULL)
......@@ -63,40 +60,38 @@ static ir_node *normal_select(void *block_env, ir_nodeset_t *ready_set)
typedef struct irn_cost_pair {
ir_node* irn;
int cost;
ir_node *irn;
unsigned cost;
} irn_cost_pair;
static int cost_cmp(const void* a, const void* b)
{
const irn_cost_pair* const a1 = (const irn_cost_pair*)a;
const irn_cost_pair* const b1 = (const irn_cost_pair*)b;
int ret = b1->cost - a1->cost;
int ret = (int)b1->cost - (int)a1->cost;
if (ret == 0)
ret = (int)get_irn_idx(a1->irn) - (int)get_irn_idx(b1->irn);
#if defined NORMAL_DBG
ir_fprintf(stderr, "cost %+F %s %+F\n", a1->irn, ret < 0 ? "<" : ret > 0 ? ">" : "=", b1->irn);
#ifdef NORMAL_DBG
ir_fprintf(stderr, "cost %+F %s %+F\n", a1->irn,
ret < 0 ? "<" : ret > 0 ? ">" : "=", b1->irn);
#endif
return ret;
}
typedef struct flag_and_cost {
int no_root;
bool no_root;
irn_cost_pair costs[];
} flag_and_cost;
#define get_irn_fc(irn) ((flag_and_cost*)get_irn_link(irn))
#define set_irn_fc(irn, fc) set_irn_link(irn, fc)
static int count_result(const ir_node* irn)
static unsigned count_result(const ir_node *irn)
{
const ir_mode* mode = get_irn_mode(irn);
const ir_mode *mode = get_irn_mode(irn);
if (mode == mode_M || mode == mode_X)
return 0;
if (mode == mode_T)
return 1;
......@@ -107,53 +102,41 @@ static int count_result(const ir_node* irn)
return 1;
}
/* TODO high cost for store trees
*/
static int normal_tree_cost(ir_node* irn, instance_t *inst)
static unsigned normal_tree_cost(ir_node *irn, instance_t *inst)
{
flag_and_cost* fc;
int arity;
ir_node* last;
int n_res;
int cost;
int n_op_res = 0;
if (be_is_Keep(irn))
return 0;
if (is_Proj(irn)) {
if (is_Proj(irn))
return normal_tree_cost(get_Proj_pred(irn), inst);
}
arity = get_irn_arity(irn);
fc = get_irn_fc(irn);
int arity = get_irn_arity(irn);
flag_and_cost *fc = get_irn_fc(irn);
if (fc == NULL) {
irn_cost_pair* costs;
ir_node* block = get_nodes_block(irn);
ir_node *block = get_nodes_block(irn);
fc = OALLOCF(&inst->obst, flag_and_cost, costs, arity);
fc->no_root = 0;
costs = fc->costs;
fc->no_root = false;
irn_cost_pair *costs = fc->costs;
foreach_irn_in(irn, i, pred) {
unsigned cost;
if (is_Phi(irn) || get_irn_mode(pred) == mode_M) {
cost = 0;
} else if (get_nodes_block(pred) != block) {
cost = 1;
} else {
flag_and_cost* pred_fc;
ir_node* real_pred;
cost = normal_tree_cost(pred, inst);
if (!arch_irn_is_ignore(pred)) {
real_pred = (is_Proj(pred) ? get_Proj_pred(pred) : pred);
pred_fc = get_irn_fc(real_pred);
pred_fc->no_root = 1;
#if defined NORMAL_DBG
ir_fprintf(stderr, "%+F says that %+F is no root\n", irn, real_pred);
ir_node *real_pred = is_Proj(pred)
? get_Proj_pred(pred) : pred;
flag_and_cost *pred_fc = get_irn_fc(real_pred);
pred_fc->no_root = true;
#ifdef NORMAL_DBG
ir_fprintf(stderr, "%+F says that %+F is no root\n", irn,
real_pred);
#endif
}
}
......@@ -166,14 +149,14 @@ static int normal_tree_cost(ir_node* irn, instance_t *inst)
set_irn_link(irn, fc);
}
cost = 0;
last = 0;
unsigned cost = 0;
unsigned n_op_res = 0;
ir_node *last = 0;
for (int i = 0; i < arity; ++i) {
ir_node* op = fc->costs[i].irn;
ir_mode* mode;
ir_node *op = fc->costs[i].irn;
if (op == last)
continue;
mode = get_irn_mode(op);
ir_mode *mode = get_irn_mode(op);
if (mode == mode_M)
continue;
if (arch_irn_is_ignore(op))
......@@ -182,22 +165,21 @@ static int normal_tree_cost(ir_node* irn, instance_t *inst)
last = op;
++n_op_res;
}
n_res = count_result(irn);
unsigned n_res = count_result(irn);
cost = MAX(n_res, cost);
#if defined NORMAL_DBG
ir_fprintf(stderr, "reguse of %+F is %d\n", irn, cost);
#ifdef NORMAL_DBG
ir_fprintf(stderr, "reguse of %+F is %u\n", irn, cost);
#endif
return cost;
}
static void normal_cost_walker(ir_node* irn, void* env)
static void normal_cost_walker(ir_node *irn, void *env)
{
instance_t *inst = (instance_t*)env;
#if defined NORMAL_DBG
#ifdef NORMAL_DBG
ir_fprintf(stderr, "cost walking node %+F\n", irn);
#endif
if (is_Block(irn)) {
......@@ -210,45 +192,43 @@ static void normal_cost_walker(ir_node* irn, void* env)
}
static void collect_roots(ir_node* irn, void* env)
static void collect_roots(ir_node *irn, void *env)
{
int is_root;
(void)env;
if (!must_be_scheduled(irn))
return;
if (!must_be_scheduled(irn)) return;
is_root = be_is_Keep(irn) || !get_irn_fc(irn)->no_root;
#if defined NORMAL_DBG
bool is_root = be_is_Keep(irn) || !get_irn_fc(irn)->no_root;
#ifdef NORMAL_DBG
ir_fprintf(stderr, "%+F is %sroot\n", irn, is_root ? "" : "no ");
#endif
if (is_root) {
ir_node* block = get_nodes_block(irn);
ir_node** roots = (ir_node**)get_irn_link(block);
ir_node *block = get_nodes_block(irn);
ir_node **roots = (ir_node**)get_irn_link(block);
ARR_APP1(ir_node*, roots, irn);
set_irn_link(block, roots);
}
}
static ir_node** sched_node(ir_node** sched, ir_node* irn)
static ir_node** sched_node(ir_node**sched, ir_node *irn)
{
if (irn_visited_else_mark(irn)) return sched;
if (irn_visited_else_mark(irn))
return sched;
if (!is_Phi(irn) && !be_is_Keep(irn)) {
ir_node* block = get_nodes_block(irn);
int arity = get_irn_arity(irn);
flag_and_cost* fc = get_irn_fc(irn);
irn_cost_pair* irns = fc->costs;
int i;
for (i = 0; i < arity; ++i) {
ir_node* pred = irns[i].irn;
if (get_nodes_block(pred) != block) continue;
if (get_irn_mode(pred) == mode_M) continue;
if (is_Proj(pred)) pred = get_Proj_pred(pred);
ir_node *block = get_nodes_block(irn);
flag_and_cost *fc = get_irn_fc(irn);
irn_cost_pair *irns = fc->costs;
for (int i = 0, arity = get_irn_arity(irn); i < arity; ++i) {
ir_node *pred = irns[i].irn;
if (get_nodes_block(pred) != block)
continue;
if (get_irn_mode(pred) == mode_M)
continue;
if (is_Proj(pred))
pred = get_Proj_pred(pred);
sched = sched_node(sched, pred);
}
}
......@@ -268,7 +248,7 @@ static int root_cmp(const void* a, const void* b)
} else if (is_irn_forking(b1->irn) && !is_irn_forking(a1->irn)) {
ret = -1;
} else {
ret = b1->cost - a1->cost;
ret = (int)b1->cost - (int)a1->cost;
if (ret == 0) {
/* place live-out nodes later */
ret = (count_result(a1->irn) != 0) - (count_result(b1->irn) != 0);
......@@ -278,89 +258,77 @@ static int root_cmp(const void* a, const void* b)
}
}
}
#if defined NORMAL_DBG
ir_fprintf(stderr, "root %+F %s %+F\n", a1->irn, ret < 0 ? "<" : ret > 0 ? ">" : "=", b1->irn);
#ifdef NORMAL_DBG
ir_fprintf(stderr, "root %+F %s %+F\n", a1->irn,
ret < 0 ? "<" : ret > 0 ? ">" : "=", b1->irn);
#endif
return ret;
}
static void normal_sched_block(ir_node* block, void* env)
static void normal_sched_block(ir_node *block, void *env)
{
ir_node** roots = (ir_node**)get_irn_link(block);
ir_heights_t* heights = (ir_heights_t*)env;
int i;
ir_node** sched;
ir_node **roots = (ir_node**)get_irn_link(block);
ir_heights_t *heights = (ir_heights_t*)env;
#if defined NORMAL_DBG
#ifdef NORMAL_DBG
ir_fprintf(stderr, "sched walking block %+F\n", block);
#endif
int const root_count = ARR_LEN(roots);
if (root_count == 0) {
#if defined NORMAL_DBG
#ifdef NORMAL_DBG
fprintf(stderr, "has no roots\n");
#endif
return;
}
irn_cost_pair *root_costs = ALLOCAN(irn_cost_pair, root_count);
for (i = 0; i < root_count; ++i) {
for (int i = 0; i < root_count; ++i) {
root_costs[i].irn = roots[i];
root_costs[i].cost = get_irn_height(heights, roots[i]);
#if defined NORMAL_DBG
ir_fprintf(stderr, "height of %+F is %u\n", roots[i], root_costs[i].cost);
#ifdef NORMAL_DBG
ir_fprintf(stderr, "height of %+F is %u\n", roots[i],
root_costs[i].cost);
#endif
}
QSORT(root_costs, root_count, root_cmp);
#if defined NORMAL_DBG
{
int n = root_count;
int i;
ir_fprintf(stderr, "Root Scheduling of %+F:\n", block);
for (i = 0; i < n; ++i) {
ir_fprintf(stderr, " %+F\n", root_costs[i].irn);
}
fprintf(stderr, "\n");
#ifdef NORMAL_DBG
ir_fprintf(stderr, "Root Scheduling of %+F:\n", block);
for (int i = 0, n = root_count; i < n; ++i) {
ir_fprintf(stderr, " %+F\n", root_costs[i].irn);
}
fprintf(stderr, "\n");
#endif
sched = NEW_ARR_F(ir_node*, 0);
for (i = 0; i < root_count; ++i) {
ir_node* irn = root_costs[i].irn;
ir_node **sched = NEW_ARR_F(ir_node*, 0);
for (int i = 0; i < root_count; ++i) {
ir_node *irn = root_costs[i].irn;
assert(must_be_scheduled(irn));
sched = sched_node(sched, irn);
}
set_irn_link(block, sched);
DEL_ARR_F(roots);
#if defined NORMAL_DBG
{
int n = ARR_LEN(sched);
int i;
ir_fprintf(stderr, "Scheduling of %+F:\n", block);
for (i = 0; i < n; ++i) {
ir_fprintf(stderr, " %+F\n", sched[i]);
}
fprintf(stderr, "\n");
#ifdef NORMAL_DBG
ir_fprintf(stderr, "Scheduling of %+F:\n", block);
for (int i = 0, n = ARR_LEN(sched); i < n; ++i) {
ir_fprintf(stderr, " %+F\n", sched[i]);
}
fprintf(stderr, "\n");
#endif
}
static void *normal_init_graph(ir_graph *irg)
{
instance_t *inst = XMALLOC(instance_t);
ir_heights_t *heights;
be_clear_links(irg);
instance_t *inst = XMALLOC(instance_t);
obstack_init(&inst->obst);
inst->irg = irg;
inst->irg = irg;
heights = heights_new(irg);
ir_heights_t *heights = heights_new(irg);
ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
irg_walk_graph(irg, normal_cost_walker, NULL, inst);
......@@ -377,15 +345,14 @@ static void *normal_init_graph(ir_graph *irg)
static void *normal_init_block(void *graph_env, ir_node *block)
{
instance_t* inst = (instance_t*)graph_env;
ir_node** sched = (ir_node**)get_irn_link(block);
ir_node* first = NULL;
int i;
instance_t *inst = (instance_t*)graph_env;
ir_node **sched = (ir_node**)get_irn_link(block);
ir_node *first = NULL;
/* turn into a list, so we can easily remove nodes.
The link field is used anyway. */
for (i = ARR_LEN(sched) - 1; i >= 0; --i) {
ir_node* irn = sched[i];
for (int i = ARR_LEN(sched); i-- > 0; ) {
ir_node *irn = sched[i];
if (!is_cfop(irn)) {
set_irn_link(irn, first);
first = irn;
......
......@@ -21,18 +21,18 @@
*/
static ir_node *random_select(void *block_env, ir_nodeset_t *ready_set)
{
int only_branches_left = 1;
(void)block_env;
bool only_branches_left = true;
/* assure that branches and constants are executed last */
foreach_ir_nodeset(ready_set, irn, iter) {
if (!is_cfop(irn)) {
only_branches_left = 0;
only_branches_left = false;
break;
}
}
ir_node *rand_node = NULL;
ir_node *rand_node;
if (only_branches_left) {
/* at last: schedule branches */
rand_node = ir_nodeset_first(ready_set);
......
......@@ -10,14 +10,14 @@
*/
#include <time.h>
#include "../adt/util.h"
#include "irnode_t.h"
#include "irgwalk.h"
#include "irhooks.h"
#include "error.h"
#include "execfreq.h"
#include "firmstat_t.h"
#include "error.h"
#include "irgwalk.h"
#include "irhooks.h"
#include "irnode_t.h"
#include "statev_t.h"
#include "util.h"
#include "bearch.h"
#include "beirg.h"
......@@ -26,15 +26,13 @@
#include "besched.h"
#include "benode.h"
typedef struct pressure_walker_env_t pressure_walker_env_t;
struct pressure_walker_env_t {
ir_graph *irg;
be_lv_t *lv;
double insn_count;
double regpressure;
size_t max_pressure;
ir_graph *irg;
be_lv_t *lv;
double insn_count;
double regpressure;
unsigned max_pressure;
const arch_register_class_t *cls;
};
......@@ -42,13 +40,10 @@ static void check_reg_pressure_class(pressure_walker_env_t *env,
ir_node *block,
const arch_register_class_t *cls)
{
ir_graph *irg = env->irg;
ir_nodeset_t live_nodes;
size_t max_live;
ir_nodeset_init(&live_nodes);
be_liveness_end_of_block(env->lv, cls, block, &live_nodes);
max_live = ir_nodeset_size(&live_nodes);
unsigned max_live = ir_nodeset_size(&live_nodes);
env->regpressure += max_live;
sched_foreach_reverse(block, irn) {
......@@ -67,7 +62,7 @@ static void check_reg_pressure_class(pressure_walker_env_t *env,
if (max_live > env->max_pressure)
env->max_pressure = max_live;
stat_be_block_regpressure(irg, block, max_live, cls->name);
stat_be_block_regpressure(env->irg, block, max_live, cls->name);
ir_nodeset_destroy(&live_nodes);
}
......@@ -80,28 +75,23 @@ static void stat_reg_pressure_block(ir_node *block, void *data)
void be_do_stat_reg_pressure(ir_graph *irg, const arch_register_class_t *cls)
{
pressure_walker_env_t env;
double average_pressure;
be_assure_live_sets(irg);
pressure_walker_env_t env;
env.irg = irg;
env.insn_count = 0;
env.max_pressure = 0;
env.regpressure = 0;
be_assure_live_sets(irg);
env.lv = be_get_irg_liveness(irg);
env.cls = cls;
/* Collect register pressure information for each block */