Commit c3ce3973 authored by Christoph Mallon's avatar Christoph Mallon
Browse files

bearch: Use arch_register_req_is().

parent 256f62cc
......@@ -186,7 +186,7 @@ bool arch_reg_is_allocatable(const arch_register_req_t *req,
return false;
if (reg->type & arch_register_type_virtual)
return true;
if (req->type & arch_register_req_type_limited)
if (arch_register_req_is(req, limited))
return rbitset_is_set(req->limited, reg->index);
return true;
}
......
......@@ -570,16 +570,14 @@ struct arch_env_t {
static inline bool arch_irn_is_ignore(const ir_node *irn)
{
const arch_register_req_t *req = arch_get_irn_register_req(irn);
return req->type & arch_register_req_type_ignore;
return arch_register_req_is(req, ignore);
}
static inline bool arch_irn_consider_in_reg_alloc(
const arch_register_class_t *cls, const ir_node *node)
{
const arch_register_req_t *req = arch_get_irn_register_req(node);
return
req->cls == cls &&
!(req->type & arch_register_req_type_ignore);
return req->cls == cls && !arch_register_req_is(req, ignore);
}
/**
......@@ -607,11 +605,11 @@ static inline bool arch_irn_consider_in_reg_alloc(
} \
} while (0)
#define be_foreach_definition(node, ccls, value, code) \
be_foreach_definition_(node, ccls, value, \
if (req_->type & arch_register_req_type_ignore) \
continue; \
code \
#define be_foreach_definition(node, ccls, value, code) \
be_foreach_definition_(node, ccls, value, \
if (arch_register_req_is(req_, ignore)) \
continue; \
code \
)
static inline const arch_register_class_t *arch_get_irn_reg_class(
......
......@@ -70,11 +70,8 @@ static unsigned check_alignment_constraints(ir_node *node)
{
const arch_register_req_t *req = arch_get_irn_register_req(node);
// For larger than 1 variables, support only aligned constraints
assert(((!(req->type & arch_register_req_type_aligned)
&& req->width == 1)
|| (req->type & arch_register_req_type_aligned))
&& "Unaligned large (width > 1) variables not supported");
return (req->type & arch_register_req_type_aligned) && req->width > 1;
assert((arch_register_req_is(req, aligned) || req->width == 1) && "Unaligned large (width > 1) variables not supported");
return arch_register_req_is(req, aligned) && req->width > 1;
}
static void make_color_var_name(char *buf, size_t buf_size,
......
......@@ -323,7 +323,8 @@ static void int_comp_rec(be_ifg_t *ifg, ir_node *n, bitset_t *seen)
if (bitset_is_set(seen, get_irn_idx(m)))
continue;
if (arch_get_irn_register_req(m)->type & arch_register_req_type_ignore)
arch_register_req_t const *const req = arch_get_irn_register_req(m);
if (arch_register_req_is(req, ignore))
continue;
bitset_set(seen, get_irn_idx(m));
......@@ -344,7 +345,8 @@ static int int_component_stat(ir_graph *irg, be_ifg_t *ifg)
if (bitset_is_set(seen, get_irn_idx(n)))
continue;
if (arch_get_irn_register_req(n)->type & arch_register_req_type_ignore)
arch_register_req_t const *const req = arch_get_irn_register_req(n);
if (arch_register_req_is(req, ignore))
continue;
++n_comp;
......
......@@ -518,7 +518,7 @@ void be_liveness_transfer(const arch_register_class_t *cls,
continue;
ir_node *op = get_irn_n(node, i);
const arch_register_req_t *op_req = arch_get_irn_register_req(op);
if (op_req->type & arch_register_req_type_ignore)
if (arch_register_req_is(op_req, ignore))
continue;
ir_nodeset_insert(nodeset, op);
}
......
......@@ -983,9 +983,8 @@ static int get_start_reg_index(ir_graph *irg, const arch_register_t *reg)
/* do a naive linear search... */
for (i = 0; i < (int)n_outs; ++i) {
const arch_register_req_t *out_req
= arch_get_irn_register_req_out(start, i);
if (! (out_req->type & arch_register_req_type_limited))
arch_register_req_t const *const out_req = arch_get_irn_register_req_out(start, i);
if (!arch_register_req_is(out_req, limited))
continue;
if (out_req->cls != reg->reg_class)
continue;
......@@ -1020,7 +1019,7 @@ int be_find_return_reg_input(ir_node *ret, const arch_register_t *reg)
/* do a naive linear search... */
for (i = 0; i < arity; ++i) {
const arch_register_req_t *req = arch_get_irn_register_req_in(ret, i);
if (! (req->type & arch_register_req_type_limited))
if (!arch_register_req_is(req, limited))
continue;
if (req->cls != reg->reg_class)
continue;
......
......@@ -257,13 +257,13 @@ static void check_defs(const ir_nodeset_t *live_nodes, float weight,
ir_node *node)
{
const arch_register_req_t *req = arch_get_irn_register_req(node);
if (req->type & arch_register_req_type_limited) {
if (arch_register_req_is(req, limited)) {
const unsigned *limited = req->limited;
float penalty = weight * DEF_FACTOR;
give_penalties_for_limits(live_nodes, penalty, limited, node);
}
if (req->type & arch_register_req_type_should_be_same) {
if (arch_register_req_is(req, should_be_same)) {
ir_node *insn = skip_Proj(node);
allocation_info_t *info = get_allocation_info(node);
int arity = get_irn_arity(insn);
......@@ -344,7 +344,7 @@ static void analyze_block(ir_node *block, void *data)
const arch_register_req_t *req
= arch_get_irn_register_req_in(node, i);
if (!(req->type & arch_register_req_type_limited))
if (!arch_register_req_is(req, limited))
continue;
const unsigned *limited = req->limited;
......@@ -361,7 +361,7 @@ static void congruence_def(ir_nodeset_t *live_nodes, const ir_node *node)
const arch_register_req_t *req = arch_get_irn_register_req(node);
/* should be same constraint? */
if (req->type & arch_register_req_type_should_be_same) {
if (arch_register_req_is(req, should_be_same)) {
const ir_node *insn = skip_Proj_const(node);
int arity = get_irn_arity(insn);
unsigned node_idx = get_irn_idx(node);
......@@ -697,12 +697,12 @@ static void assign_reg(const ir_node *block, ir_node *node,
}
/* ignore reqs must be preassigned */
assert (! (req->type & arch_register_req_type_ignore));
assert(!arch_register_req_is(req, ignore));
/* give should_be_same boni */
allocation_info_t *info = get_allocation_info(node);
ir_node *in_node = skip_Proj(node);
if (req->type & arch_register_req_type_should_be_same) {
if (arch_register_req_is(req, should_be_same)) {
float weight = (float)get_block_execfreq(block);
int arity = get_irn_arity(in_node);
......@@ -738,7 +738,7 @@ static void assign_reg(const ir_node *block, ir_node *node,
DB((dbg, LEVEL_2, "\n"));
const unsigned *allowed_regs = normal_regs;
if (req->type & arch_register_req_type_limited) {
if (arch_register_req_is(req, limited)) {
allowed_regs = req->limited;
}
......@@ -750,8 +750,7 @@ static void assign_reg(const ir_node *block, ir_node *node,
continue;
/* alignment constraint? */
if (width > 1) {
if ((req->type & arch_register_req_type_aligned)
&& (final_reg_index % width) != 0)
if (arch_register_req_is(req, aligned) && (final_reg_index % width) != 0)
continue;
bool fine = true;
for (unsigned r0 = r+1; r0 < r+width; ++r0) {
......@@ -1042,7 +1041,7 @@ static void solve_lpp(ir_nodeset_t *live_nodes, ir_node *node,
continue;
const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
if (!(req->type & arch_register_req_type_limited))
if (!arch_register_req_is(req, limited))
continue;
const unsigned *limited = req->limited;
......@@ -1180,13 +1179,13 @@ static void enforce_constraints(ir_nodeset_t *live_nodes, ir_node *node,
double_width = true;
const arch_register_t *reg = arch_get_irn_register(op);
unsigned reg_index = reg->index;
if (req->type & arch_register_req_type_aligned) {
if (arch_register_req_is(req, aligned)) {
if (!is_aligned(reg_index, req->width)) {
good = false;
continue;
}
}
if (!(req->type & arch_register_req_type_limited))
if (!arch_register_req_is(req, limited))
continue;
const unsigned *limited = req->limited;
......@@ -1203,7 +1202,7 @@ static void enforce_constraints(ir_nodeset_t *live_nodes, ir_node *node,
(void)value;
if (req_->width > 1)
double_width = true;
if (! (req_->type & arch_register_req_type_limited))
if (!arch_register_req_is(req_, limited))
continue;
if (live_through_regs == NULL) {
live_through_regs = rbitset_alloca(n_regs);
......@@ -1262,7 +1261,7 @@ static void enforce_constraints(ir_nodeset_t *live_nodes, ir_node *node,
continue;
const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
if (!(req->type & arch_register_req_type_limited))
if (!arch_register_req_is(req, limited))
continue;
const unsigned *limited = req->limited;
......@@ -1571,7 +1570,7 @@ static void allocate_coalesce_block(ir_node *block, void *data)
if (req->cls != cls)
continue;
if (req->type & arch_register_req_type_ignore) {
if (arch_register_req_is(req, limited)) {
allocation_info_t *info = get_allocation_info(node);
info->current_value = node;
......
......@@ -116,7 +116,8 @@ static int count_result(const ir_node* irn)
if (mode == mode_T)
return 1;
if (arch_get_irn_register_req(irn)->type & arch_register_req_type_ignore)
arch_register_req_t const *const req = arch_get_irn_register_req(irn);
if (arch_register_req_is(req, ignore))
return 0;
return 1;
......
......@@ -88,7 +88,7 @@ static void prepare_constr_insn(be_pre_spill_env_t *env, ir_node *node)
rbitset_is_set(birg->allocatable_regs, reg->global_index))
continue;
if (! (req->type & arch_register_req_type_limited))
if (!arch_register_req_is(req, limited))
continue;
if (rbitset_is_set(req->limited, reg->index))
continue;
......@@ -111,7 +111,7 @@ static void prepare_constr_insn(be_pre_spill_env_t *env, ir_node *node)
if (req->cls != cls)
continue;
if (! (req->type & arch_register_req_type_limited))
if (!arch_register_req_is(req, limited))
continue;
in = get_irn_n(node, i);
......@@ -125,7 +125,7 @@ static void prepare_constr_insn(be_pre_spill_env_t *env, ir_node *node)
req2 = arch_get_irn_register_req_in(node, i2);
if (req2->cls != cls)
continue;
if (! (req2->type & arch_register_req_type_limited))
if (!arch_register_req_is(req2, limited))
continue;
in2 = get_irn_n(node, i2);
......@@ -151,7 +151,7 @@ static void prepare_constr_insn(be_pre_spill_env_t *env, ir_node *node)
/* collect all registers occurring in out constraints. */
be_foreach_definition(node, cls, def,
(void)def;
if (! (req_->type & arch_register_req_type_limited))
if (!arch_register_req_is(req_, limited))
continue;
if (def_constr == NULL) {
def_constr = rbitset_alloca(cls->n_regs);
......@@ -183,7 +183,7 @@ static void prepare_constr_insn(be_pre_spill_env_t *env, ir_node *node)
req = arch_get_irn_register_req_in(node, i);
if (req->cls != cls)
continue;
if (!(req->type & arch_register_req_type_limited))
if (!arch_register_req_is(req, limited))
continue;
in = get_irn_n(node, i);
......
......@@ -388,8 +388,7 @@ static void ssa_destruction_check_walker(ir_node *bl, void *data)
for (i = 0, max = get_irn_arity(phi); i < max; ++i) {
ir_node *arg = get_irn_n(phi, i);
const arch_register_req_t *req = arch_get_irn_register_req(arg);
if (req->type & arch_register_req_type_ignore)
if (arch_register_req_is(req, ignore))
continue;
arg_reg = arch_get_irn_register(arg);
......
......@@ -405,15 +405,13 @@ static int ia32_possible_memory_operand(const ir_node *irn, unsigned int i)
case ia32_am_binary:
switch (i) {
case n_ia32_binary_left: {
const arch_register_req_t *req;
if (!is_ia32_commutative(irn))
return 0;
/* we can't swap left/right for limited registers
* (As this (currently) breaks constraint handling copies)
*/
req = arch_get_irn_register_req_in(irn, n_ia32_binary_left);
if (req->type & arch_register_req_type_limited)
* (As this (currently) breaks constraint handling copies) */
arch_register_req_t const *const req = arch_get_irn_register_req_in(irn, n_ia32_binary_left);
if (arch_register_req_is(req, limited))
return 0;
break;
}
......
......@@ -393,8 +393,8 @@ static bool can_match(const arch_register_req_t *in,
{
if (in->cls != out->cls)
return false;
if ( (in->type & arch_register_req_type_limited) == 0
|| (out->type & arch_register_req_type_limited) == 0 )
if (!arch_register_req_is(in, limited) ||
!arch_register_req_is(out, limited))
return true;
return (*in->limited & *out->limited) != 0;
......@@ -617,7 +617,7 @@ ir_node *ia32_gen_ASM(ir_node *node)
}
/* add a new (dummy) input which occupies the register */
assert(outreq->type & arch_register_req_type_limited);
assert(arch_register_req_is(outreq, limited));
in_reg_reqs[n_ins] = outreq;
in[n_ins] = new_bd_ia32_ProduceVal(NULL, block);
++n_ins;
......@@ -659,7 +659,7 @@ ir_node *ia32_gen_ASM(ir_node *node)
}
/* add a new (dummy) output which occupies the register */
assert(inreq->type & arch_register_req_type_limited);
assert(arch_register_req_is(inreq, limited));
out_reg_reqs[out_arity] = inreq;
++out_arity;
}
......
......@@ -5511,15 +5511,13 @@ static ir_node *gen_Proj_be_Call(ir_node *node)
int const n_outs = arch_get_irn_n_outs(new_call);
int i;
assert(proj >= pn_be_Call_first_res);
assert(req->type & arch_register_req_type_limited);
assert(proj >= pn_be_Call_first_res);
assert(arch_register_req_is(req, limited));
for (i = 0; i < n_outs; ++i) {
arch_register_req_t const *const new_req
= arch_get_irn_register_req_out(new_call, i);
if (!(new_req->type & arch_register_req_type_limited) ||
new_req->cls != req->cls ||
arch_register_req_t const *const new_req = arch_get_irn_register_req_out(new_call, i);
if (!arch_register_req_is(new_req, limited) ||
new_req->cls != req->cls ||
*new_req->limited != *req->limited)
continue;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment