Commit a4cf97f9 authored by Christian Würdig's avatar Christian Würdig
Browse files

irn classify is now a mask

added convienience macro for checking against a certain irn class
parent 62f064cf
......@@ -177,17 +177,17 @@ extern char *arch_register_req_format(char *buf, size_t len, const arch_register
* Certain node classes which are relevant for the register allocator.
*/
typedef enum _arch_irn_class_t {
arch_irn_class_normal,
arch_irn_class_spill,
arch_irn_class_reload,
arch_irn_class_copy,
arch_irn_class_perm,
arch_irn_class_branch,
arch_irn_class_call,
arch_irn_class_const,
arch_irn_class_load,
arch_irn_class_store,
arch_irn_class_stackparam,
arch_irn_class_normal = 1 << 0,
arch_irn_class_spill = 1 << 1,
arch_irn_class_reload = 1 << 2,
arch_irn_class_copy = 1 << 3,
arch_irn_class_perm = 1 << 4,
arch_irn_class_branch = 1 << 5,
arch_irn_class_call = 1 << 6,
arch_irn_class_const = 1 << 7,
arch_irn_class_load = 1 << 8,
arch_irn_class_store = 1 << 9,
arch_irn_class_stackparam = 1 << 10,
} arch_irn_class_t;
/**
......@@ -456,6 +456,8 @@ extern void arch_set_irn_register(const arch_env_t *env, ir_node *irn,
*/
extern arch_irn_class_t arch_irn_classify(const arch_env_t *env, const ir_node *irn);
#define arch_irn_class_is(env, irn, irn_class) ((arch_irn_classify(env, irn) & arch_irn_class_ ## irn_class) != 0)
/**
* Get the flags of a node.
* @param env The architecture environment.
......
......@@ -102,7 +102,7 @@ static ir_node *trivial_select(void *block_env, nodeset *ready_set)
for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
arch_irn_class_t irn_class = arch_irn_classify(arch_env, irn);
if (irn_class != arch_irn_class_branch && (const_last ? (irn_class != arch_irn_class_const) : 1)) {
if (! arch_irn_class_is(arch_env, irn, branch) && (const_last ? (! arch_irn_class_is(arch_env, irn, const)) : 1)) {
nodeset_break(ready_set);
return irn;
}
......@@ -111,7 +111,7 @@ static ir_node *trivial_select(void *block_env, nodeset *ready_set)
/* assure that constants are executed before branches */
if (const_last) {
for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
if (arch_irn_classify(arch_env, irn) != arch_irn_class_branch) {
if (! arch_irn_class_is(arch_env, irn, branch)) {
nodeset_break(ready_set);
return irn;
}
......@@ -371,7 +371,7 @@ static ir_node *reg_pressure_select(void *block_env, nodeset *ready_set)
Ignore branch instructions for the time being.
They should only be scheduled if there is nothing else.
*/
if (arch_irn_classify(env->main_env->arch_env, irn) != arch_irn_class_branch) {
if (! arch_irn_class_is(env->main_env->arch_env, irn, branch)) {
int costs = reg_pr_costs(env, irn);
if (costs <= curr_cost) {
res = irn;
......@@ -777,7 +777,7 @@ static void list_sched_block(ir_node *block, void *env_ptr)
FIRM_DBG_REGISTER(be.dbg, "firm.be.sched");
FIRM_DBG_REGISTER(xxxdbg, "firm.be.sched");
// firm_dbg_set_mask(be.dbg, SET_LEVEL_3);
// firm_dbg_set_mask(be.dbg, SET_LEVEL_3);
if (selector->init_block)
be.selector_block_env = selector->init_block(env->selector_env, block);
......@@ -807,12 +807,12 @@ static void list_sched_block(ir_node *block, void *env_ptr)
root = preord;
/* Third step: calculate the Delay. Note that our
* list is now in pre-order, starting at root
*/
* list is now in pre-order, starting at root
*/
for (curr = root; curr; curr = get_irn_link(curr)) {
sched_timestep_t d;
if (arch_irn_classify(env->arch_env, curr) == arch_irn_class_branch) {
if (arch_irn_class_is(env->arch_env, curr, branch)) {
/* assure, that branches can be executed last */
d = 0;
}
......@@ -851,7 +851,7 @@ static void list_sched_block(ir_node *block, void *env_ptr)
if (is_Phi(irn)) {
/* Phi functions are scheduled immediately, since they only transfer
* data flow from the predecessors to this block. */
* data flow from the predecessors to this block. */
/* Increase the time step. */
be.curr_time += get_irn_etime(&be, irn);
......@@ -867,7 +867,7 @@ static void list_sched_block(ir_node *block, void *env_ptr)
}
else {
/* Other nodes must have all operands in other blocks to be made
* ready */
* ready */
int ready = 1;
/* Check, if the operands of a node are not local to this block */
......@@ -907,10 +907,10 @@ static void list_sched_block(ir_node *block, void *env_ptr)
/* calculate mcands and ecands */
foreach_nodeset(be.cands, irn) {
if (be_is_Keep(irn)) {
nodeset_break(be.cands);
break;
}
if (be_is_Keep(irn)) {
nodeset_break(be.cands);
break;
}
if (get_irn_delay(&be, irn) == max_delay) {
nodeset_insert(mcands, irn);
if (get_irn_etime(&be, irn) <= be.curr_time)
......@@ -918,42 +918,39 @@ static void list_sched_block(ir_node *block, void *env_ptr)
}
}
if (irn) {
/* Keeps must be immediately scheduled */
}
else {
DB((be.dbg, LEVEL_2, "\tbe.curr_time = %u\n", be.curr_time));
/* select a node to be scheduled and check if it was ready */
if (nodeset_count(mcands) == 1) {
DB((be.dbg, LEVEL_3, "\tmcand = 1, max_delay = %u\n", max_delay));
irn = nodeset_first(mcands);
}
else {
int cnt = nodeset_count(ecands);
if (cnt == 1) {
arch_irn_class_t irn_class;
irn = nodeset_first(ecands);
irn_class = arch_irn_classify(env->arch_env, irn);
if (irn_class == arch_irn_class_branch) {
if (irn) {
/* Keeps must be immediately scheduled */
}
else {
DB((be.dbg, LEVEL_2, "\tbe.curr_time = %u\n", be.curr_time));
/* select a node to be scheduled and check if it was ready */
if (nodeset_count(mcands) == 1) {
DB((be.dbg, LEVEL_3, "\tmcand = 1, max_delay = %u\n", max_delay));
irn = nodeset_first(mcands);
}
else {
int cnt = nodeset_count(ecands);
if (cnt == 1) {
irn = nodeset_first(ecands);
if (arch_irn_class_is(env->arch_env, irn, branch)) {
/* BEWARE: don't select a JUMP if others are still possible */
goto force_mcands;
}
DB((be.dbg, LEVEL_3, "\tecand = 1, max_delay = %u\n", max_delay));
}
else if (cnt > 1) {
DB((be.dbg, LEVEL_3, "\tecand = %d, max_delay = %u\n", cnt, max_delay));
irn = select_node_heuristic(&be, ecands);
}
else {
DB((be.dbg, LEVEL_3, "\tecand = 1, max_delay = %u\n", max_delay));
}
else if (cnt > 1) {
DB((be.dbg, LEVEL_3, "\tecand = %d, max_delay = %u\n", cnt, max_delay));
irn = select_node_heuristic(&be, ecands);
}
else {
force_mcands:
DB((be.dbg, LEVEL_3, "\tmcand = %d\n", nodeset_count(mcands)));
irn = select_node_heuristic(&be, mcands);
}
}
}
DB((be.dbg, LEVEL_3, "\tmcand = %d\n", nodeset_count(mcands)));
irn = select_node_heuristic(&be, mcands);
}
}
}
del_nodeset(mcands);
del_nodeset(ecands);
......
......@@ -36,7 +36,7 @@
#endif
#undef is_Perm
#define is_Perm(arch_env, irn) (arch_irn_classify(arch_env, irn) == arch_irn_class_perm)
#define is_Perm(arch_env, irn) (arch_irn_class_is(arch_env, irn, perm))
/* associates op with it's copy and CopyKeep */
typedef struct {
......
......@@ -403,7 +403,7 @@ static void dump_affinities_walker(ir_node *irn, void *env) {
vi1 = be_get_var_info(irn);
/* copies have affinities */
if (arch_irn_classify(raenv->aenv, irn) == arch_irn_class_copy) {
if (arch_irn_class_is(raenv->aenv, irn, copy)) {
ir_node *other = be_get_Copy_op(irn);
if (! arch_irn_is(raenv->aenv, other, ignore)) {
......
......@@ -183,7 +183,7 @@ int sched_verify_irg(ir_graph *irg)
int sched_skip_cf_predicator(const ir_node *irn, void *data) {
arch_env_t *ae = data;
return arch_irn_classify(ae, irn) == arch_irn_class_branch;
return arch_irn_class_is(ae, irn, branch);
}
int sched_skip_phi_predicator(const ir_node *irn, void *data) {
......
......@@ -462,7 +462,7 @@ static void Cli_ents(int start, int end, int nr, void *env) {
cli->ents = pset_new_ptr_default();
foreach_pset(surrounder, ent) {pset_insert_ptr(cli->ents,ent);}
pmap_insert(spi->Cli_ents, nr, cli);
pmap_insert(spi->Cli_ents, (void *)nr, cli);
del_pset(surrounder);
}
......
......@@ -35,7 +35,7 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
#define get_reg(irn) arch_get_irn_register(get_chordal_arch(chordal_env), irn)
#define set_reg(irn, reg) arch_set_irn_register(get_chordal_arch(chordal_env), irn, reg)
#define is_Perm(irn) (arch_irn_classify(arch_env, irn) == arch_irn_class_perm)
#define is_Perm(irn) (arch_irn_class_is(arch_env, irn, perm))
#define get_reg_cls(irn) (arch_get_irn_reg_class(arch_env, irn, -1))
#define is_curr_reg_class(irn) (get_reg_cls(p) == chordal_env->cls)
......
......@@ -169,9 +169,10 @@ void be_do_stat_permcycle(const char *class_name, ir_node *perm, ir_node *block,
* Updates nodes statistics.
*/
static void do_nodes_stat(ir_node *irn, void *env) {
be_stat_phase_t *phase = env;
ir_mode *mode;
opcode opc;
be_stat_phase_t *phase = env;
ir_mode *mode;
opcode opc;
arch_irn_class_t irn_class;
if (is_Block(irn))
return;
......@@ -204,23 +205,17 @@ static void do_nodes_stat(ir_node *irn, void *env) {
else if (opc == iro_Store)
phase->num_store++;
switch (arch_irn_classify(phase->arch_env, irn)) {
case arch_irn_class_spill:
phase->num_spill++;
break;
case arch_irn_class_reload:
phase->num_reload++;
break;
case arch_irn_class_stackparam:
case arch_irn_class_load:
phase->num_load++;
break;
case arch_irn_class_store:
phase->num_store++;
break;
default:
break;
}
irn_class = arch_irn_classify(phase->arch_env, irn);
if (irn_class & arch_irn_class_spill)
phase->num_spill++;
else if (irn_class & arch_irn_class_reload)
phase->num_reload++;
else if (irn_class & arch_irn_class_stackparam)
phase->num_load++;
else if (irn_class & arch_irn_class_load)
phase->num_load++;
else if (irn_class & arch_irn_class_store)
phase->num_store++;
}
/**
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment