Commit 7c767284 authored by Christoph Mallon's avatar Christoph Mallon
Browse files

Remove the unused parameter const arch_env_t *env from arch_irn_classify() and arch_irn_class_is().

[r22702]
parent 0dad4e22
......@@ -232,10 +232,9 @@ void arch_set_irn_register(ir_node *irn, const arch_register_t *reg)
ops->set_irn_reg(irn, reg);
}
extern arch_irn_class_t arch_irn_classify(const arch_env_t *env, const ir_node *irn)
arch_irn_class_t arch_irn_classify(const ir_node *irn)
{
const arch_irn_ops_t *ops = get_irn_ops(irn);
(void)env; // TODO remove parameter
return ops->classify(irn);
}
......
......@@ -217,13 +217,12 @@ void arch_set_irn_register(ir_node *irn, const arch_register_t *reg);
/**
* Classify a node.
* @param env The architecture environment.
* @param irn The node.
* @return A classification of the node.
*/
extern arch_irn_class_t arch_irn_classify(const arch_env_t *env, const ir_node *irn);
arch_irn_class_t arch_irn_classify(const ir_node *irn);
#define arch_irn_class_is(env, irn, irn_class) ((arch_irn_classify(env, irn) & arch_irn_class_ ## irn_class) != 0)
#define arch_irn_class_is(irn, irn_class) ((arch_irn_classify(irn) & arch_irn_class_ ## irn_class) != 0)
/**
* Get the flags of a node.
......
......@@ -228,7 +228,7 @@ int co_is_optimizable_root(const copy_opt_t *co, ir_node *irn) {
return 0;
req = arch_get_register_req(irn, -1);
if (is_Reg_Phi(irn) || is_Perm_Proj(co->aenv, irn) || is_2addr_code(req))
if (is_Reg_Phi(irn) || is_Perm_Proj(irn) || is_2addr_code(req))
return 1;
return 0;
......@@ -438,7 +438,7 @@ static void co_collect_units(ir_node *irn, void *env) {
}
unit->nodes = XREALLOC(unit->nodes, ir_node*, unit->node_count);
unit->costs = XREALLOC(unit->costs, int, unit->node_count);
} else if (is_Perm_Proj(co->aenv, irn)) {
} else if (is_Perm_Proj(irn)) {
/* Proj of a perm with corresponding arg */
assert(!nodes_interfere(co->cenv, irn, get_Perm_src(irn)));
unit->nodes = XMALLOCN(ir_node*, 2);
......@@ -783,8 +783,7 @@ static void build_graph_walker(ir_node *irn, void *env) {
ir_node *arg = get_irn_n(irn, pos);
add_edges(co, irn, arg, co->get_costs(co, irn, arg, pos));
}
}
else if (is_Perm_Proj(co->aenv, irn)) { /* Perms */
} else if (is_Perm_Proj(irn)) { /* Perms */
ir_node *arg = get_Perm_src(irn);
add_edges(co, irn, arg, co->get_costs(co, irn, arg, 0));
}
......
......@@ -67,9 +67,9 @@ struct _copy_opt_t {
#define is_Reg_Phi(irn) (is_Phi(irn) && mode_is_data(get_irn_mode(irn)))
#define get_Perm_src(irn) (get_irn_n(get_Proj_pred(irn), get_Proj_proj(irn)))
#define is_Perm(arch_env, irn) (arch_irn_classify(arch_env, irn) == arch_irn_class_perm)
#define is_Perm_Proj(arch_env, irn) (is_Proj(irn) && is_Perm(arch_env, get_Proj_pred(irn)))
#define get_Perm_src(irn) (get_irn_n(get_Proj_pred(irn), get_Proj_proj(irn)))
#define is_Perm(irn) (arch_irn_classify(irn) == arch_irn_class_perm)
#define is_Perm_Proj(irn) (is_Proj(irn) && is_Perm(get_Proj_pred(irn)))
static INLINE int is_2addr_code(const arch_register_req_t *req)
{
......
......@@ -139,8 +139,10 @@ void copystat_reset(void) {
/**
* Collect general data
*/
static void irg_stat_walker(ir_node *node, void *env) {
arch_env_t *arch_env = env;
static void irg_stat_walker(ir_node *node, void *env)
{
(void)env;
curr_vals[I_ALL_NODES]++; /* count all nodes */
if (is_Block(node)) /* count all blocks */
......@@ -149,14 +151,15 @@ static void irg_stat_walker(ir_node *node, void *env) {
if (is_Reg_Phi(node)) /* collect phis */
ir_nodeset_insert(all_phi_nodes, node);
if (is_Perm_Proj(arch_env, node))
if (is_Perm_Proj(node))
ir_nodeset_insert(all_copy_nodes, node);
/* TODO: Add 2-Addr-Code nodes */
}
static void copystat_collect_irg(ir_graph *irg, arch_env_t *arch_env) {
irg_walk_graph(irg, irg_stat_walker, NULL, arch_env);
static void copystat_collect_irg(ir_graph *irg)
{
irg_walk_graph(irg, irg_stat_walker, NULL, NULL);
last_irg = irg;
}
......@@ -289,14 +292,13 @@ static void stat_phi_class(be_chordal_env_t *chordal_env, ir_node **pc) {
static void copystat_collect_cls(be_chordal_env_t *cenv) {
ir_graph *irg = cenv->irg;
arch_env_t *aenv = cenv->birg->main_env->arch_env;
ir_node *n, **pc;
phi_classes_t *pc_obj;
pset *all_phi_classes;
ir_nodeset_iterator_t iter;
copystat_reset();
copystat_collect_irg(irg, aenv);
copystat_collect_irg(irg);
/* compute the Phi classes of the collected Phis */
pc_obj = phi_class_new_from_set(cenv->irg, all_phi_nodes, 0);
......
......@@ -105,9 +105,10 @@ void sched_renumber(const ir_node *block)
}
}
int sched_skip_cf_predicator(const ir_node *irn, void *data) {
arch_env_t *ae = data;
return arch_irn_class_is(ae, irn, branch);
int sched_skip_cf_predicator(const ir_node *irn, void *data)
{
(void)data;
return arch_irn_class_is(irn, branch);
}
int sched_skip_phi_predicator(const ir_node *irn, void *data) {
......
......@@ -72,7 +72,7 @@ static ir_node *normal_select(void *block_env, ir_nodeset_t *ready_set,
for (; sched_count-- != 0; ++sched) {
ir_node* irn = *sched;
if (ir_nodeset_contains(ready_set, irn) &&
!arch_irn_class_is(cur_arch_env, irn, branch)) {
!arch_irn_class_is(irn, branch)) {
#if defined NORMAL_DBG
ir_fprintf(stderr, "scheduling %+F\n", irn);
#endif
......
......@@ -41,15 +41,15 @@ static ir_node *random_select(void *block_env, ir_nodeset_t *ready_set,
ir_nodeset_t *live_set)
{
ir_nodeset_iterator_t iter;
const arch_env_t *arch_env = block_env;
ir_node *irn = NULL;
int only_branches_left = 1;
(void) live_set;
(void)block_env;
(void)live_set;
/* assure that branches and constants are executed last */
ir_nodeset_iterator_init(&iter, ready_set);
while( (irn = ir_nodeset_iterator_next(&iter)) != NULL) {
if (! arch_irn_class_is(arch_env, irn, branch)) {
if (!arch_irn_class_is(irn, branch)) {
only_branches_left = 0;
break;
}
......@@ -71,7 +71,7 @@ static ir_node *random_select(void *block_env, ir_nodeset_t *ready_set,
}
++i;
}
} while(arch_irn_class_is(arch_env, irn, branch));
} while (arch_irn_class_is(irn, branch));
}
return irn;
......@@ -79,18 +79,20 @@ static ir_node *random_select(void *block_env, ir_nodeset_t *ready_set,
static void *random_init_graph(const list_sched_selector_t *vtab, const be_irg_t *birg)
{
(void) vtab;
(void)vtab;
(void)birg;
/* Using time(NULL) as a seed here gives really random results,
but is NOT deterministic which makes debugging impossible.
Moreover no-one want non-deterministic compilers ... */
srand(0x4711);
return (void *) be_get_birg_arch_env(birg);
return NULL;
}
static void *random_init_block(void *graph_env, ir_node *block)
{
(void) block;
return graph_env;
(void)graph_env;
(void)block;
return NULL;
}
const list_sched_selector_t random_selector = {
......
......@@ -294,7 +294,7 @@ static ir_node *reg_pressure_select(void *block_env, ir_nodeset_t *ready_set,
Ignore branch instructions for the time being.
They should only be scheduled if there is nothing else.
*/
if (! arch_irn_class_is(env->main_env->arch_env, irn, branch)) {
if (!arch_irn_class_is(irn, branch)) {
int costs = reg_pr_costs(env, irn);
if (costs <= curr_cost) {
res = irn;
......
......@@ -428,7 +428,7 @@ static void trace_preprocess_block(trace_env_t *env, ir_node *block) {
for (cur_pos = 0, curr = root; curr; curr = get_irn_link(curr), cur_pos++) {
sched_timestep_t d;
if (arch_irn_class_is(env->arch_env, curr, branch)) {
if (arch_irn_class_is(curr, branch)) {
/* assure, that branches can be executed last */
d = 0;
}
......@@ -526,13 +526,14 @@ static void trace_free(void *data) {
/**
* Simple selector. Just assure that jumps are scheduled last.
*/
static ir_node *basic_selection(const arch_env_t *arch_env, ir_nodeset_t *ready_set) {
static ir_node *basic_selection(ir_nodeset_t *ready_set)
{
ir_node *irn = NULL;
ir_nodeset_iterator_t iter;
/* assure that branches and constants are executed last */
foreach_ir_nodeset(ready_set, irn, iter) {
if (! arch_irn_class_is(arch_env, irn, branch)) {
if (!arch_irn_class_is(irn, branch)) {
return irn;
}
}
......@@ -584,7 +585,7 @@ static ir_node *muchnik_select(void *block_env, ir_nodeset_t *ready_set, ir_node
if (cnt == 1) {
irn = get_nodeset_node(&ecands);
if (arch_irn_class_is(env->arch_env, irn, branch)) {
if (arch_irn_class_is(irn, branch)) {
/* BEWARE: don't select a JUMP if others are still possible */
goto force_mcands;
}
......@@ -592,12 +593,12 @@ static ir_node *muchnik_select(void *block_env, ir_nodeset_t *ready_set, ir_node
}
else if (cnt > 1) {
DB((env->dbg, LEVEL_3, "\tecand = %d, max_delay = %u\n", cnt, max_delay));
irn = basic_selection(env->arch_env, &ecands);
irn = basic_selection(&ecands);
}
else {
force_mcands:
DB((env->dbg, LEVEL_3, "\tmcand = %d\n", ir_nodeset_size(&mcands)));
irn = basic_selection(env->arch_env, &mcands);
irn = basic_selection(&mcands);
}
}
......@@ -660,7 +661,7 @@ static ir_node *heuristic_select(void *block_env, ir_nodeset_t *ns, ir_nodeset_t
/* priority based selection, heuristic inspired by mueller diss */
foreach_ir_nodeset(ns, irn, iter) {
/* make sure that branches are scheduled last */
if (! arch_irn_class_is(trace_env->arch_env, irn, branch)) {
if (!arch_irn_class_is(irn, branch)) {
int rdiff = get_irn_reg_diff(trace_env, irn);
int sign = rdiff < 0;
int chg = (rdiff < 0 ? -rdiff : rdiff) << PRIO_CHG_PRESS;
......@@ -700,7 +701,7 @@ static ir_node *heuristic_select(void *block_env, ir_nodeset_t *ns, ir_nodeset_t
DBG((trace_env->dbg, LEVEL_4, "heuristic selected %+F:\n", cand));
}
else {
cand = basic_selection(trace_env->arch_env, ns);
cand = basic_selection(ns);
}
return cand;
......
......@@ -45,14 +45,14 @@
static ir_node *trivial_select(void *block_env, ir_nodeset_t *ready_set,
ir_nodeset_t *live_set)
{
const arch_env_t *arch_env = block_env;
ir_node *irn = NULL;
ir_nodeset_iterator_t iter;
(void) live_set;
ir_node *irn;
ir_nodeset_iterator_t iter;
(void)block_env;
(void)live_set;
/* assure that branches and constants are executed last */
foreach_ir_nodeset(ready_set, irn, iter) {
if (! arch_irn_class_is(arch_env, irn, branch)) {
if (!arch_irn_class_is(irn, branch)) {
return irn;
}
}
......@@ -66,14 +66,16 @@ static ir_node *trivial_select(void *block_env, ir_nodeset_t *ready_set,
static void *trivial_init_graph(const list_sched_selector_t *vtab, const be_irg_t *birg)
{
(void) vtab;
return (void *) be_get_birg_arch_env(birg);
(void)vtab;
(void)birg;
return NULL;
}
static void *trivial_init_block(void *graph_env, ir_node *block)
{
(void) block;
return graph_env;
(void)graph_env;
(void)block;
return NULL;
}
const list_sched_selector_t trivial_selector = {
......
......@@ -814,7 +814,6 @@ void be_assign_entities(be_fec_env_t *env)
static void collect_spills_walker(ir_node *node, void *data)
{
be_fec_env_t *env = data;
const arch_env_t *arch_env = env->arch_env;
const ir_mode *mode;
const arch_register_class_t *cls;
int align;
......@@ -823,12 +822,12 @@ static void collect_spills_walker(ir_node *node, void *data)
if (is_Proj(node))
return;
if (!arch_irn_class_is(arch_env, node, reload))
if (!arch_irn_class_is(node, reload))
return;
mode = get_irn_mode(node);
cls = arch_get_irn_reg_class(node, -1);
align = arch_env_get_reg_class_alignment(arch_env, cls);
align = arch_env_get_reg_class_alignment(env->arch_env, cls);
be_node_needs_frame_entity(env, node, mode, align);
}
......
......@@ -158,8 +158,7 @@ double be_estimate_irg_costs(ir_graph *irg, const arch_env_t *arch_env, ir_exec_
static const arch_env_t *arch_env;
static be_node_stats_t *stats;
static be_node_stats_t *stats;
static void node_stat_walker(ir_node *irn, void *data)
{
......@@ -173,7 +172,7 @@ static void node_stat_walker(ir_node *irn, void *data)
(*stats)[BE_STAT_PHIS]++;
}
} else {
arch_irn_class_t classify = arch_irn_classify(arch_env, irn);
arch_irn_class_t classify = arch_irn_classify(irn);
if(classify & arch_irn_class_spill)
(*stats)[BE_STAT_SPILLS]++;
......@@ -190,8 +189,7 @@ static void node_stat_walker(ir_node *irn, void *data)
void be_collect_node_stats(be_node_stats_t *new_stats, be_irg_t *birg)
{
arch_env = birg->main_env->arch_env;
stats = new_stats;
stats = new_stats;
memset(stats, 0, sizeof(*stats));
irg_walk_graph(birg->irg, NULL, node_stat_walker, NULL);
......
......@@ -527,13 +527,12 @@ static void collect(be_verify_spillslots_env_t *env, ir_node *node, ir_node *rel
*/
static void collect_spills_walker(ir_node *node, void *data) {
be_verify_spillslots_env_t *env = data;
const arch_env_t *arch_env = env->arch_env;
/* @@@ ia32_classify returns classification of Proj_pred :-/ */
if(is_Proj(node))
return;
if(arch_irn_class_is(arch_env, node, reload)) {
if (arch_irn_class_is(node, reload)) {
ir_node *spill = get_memory_edge(node);
ir_entity *ent;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment