Commit f2c2e45e authored by Matthias Braun's avatar Matthias Braun
Browse files

cleanup backend: make pre_spill_prepare_constraint independent of chordal...

cleanup backend: make pre_spill_prepare_constraint independent of chordal allocator structures (and rework it a bit in the process)

[r26304]
parent 1e8792d4
......@@ -223,149 +223,6 @@ static be_insn_t *chordal_scan_insn(be_chordal_env_t *env, ir_node *irn)
return be_scan_insn(&ie, irn);
}
static ir_node *prepare_constr_insn(be_chordal_env_t *env, ir_node *irn)
{
bitset_t *tmp = bitset_alloca(env->cls->n_regs);
bitset_t *def_constr = bitset_alloca(env->cls->n_regs);
ir_node *bl = get_nodes_block(irn);
const be_irg_t *birg = env->birg;
be_lv_t *lv = birg->lv;
be_insn_t *insn;
int i, j;
for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
ir_node *op = get_irn_n(irn, i);
ir_node *copy;
const arch_register_t *reg;
const arch_register_req_t *req;
req = arch_get_register_req(irn, i);
if (req->cls != env->cls)
continue;
reg = arch_get_irn_register(op);
if (reg == NULL || !arch_register_type_is(reg, ignore))
continue;
if (arch_register_type_is(reg, joker))
continue;
if (!arch_register_req_is(req, limited))
continue;
if (rbitset_is_set(req->limited, reg->index))
continue;
copy = be_new_Copy(env->cls, bl, op);
be_stat_ev("constr_copy", 1);
sched_add_before(irn, copy);
set_irn_n(irn, i, copy);
DBG((dbg, LEVEL_3, "inserting ignore arg copy %+F for %+F pos %d\n", copy, irn, i));
}
insn = chordal_scan_insn(env, irn);
if (!insn->has_constraints)
goto end;
/* insert copies for nodes that occur constrained more than once. */
for (i = insn->use_start; i < insn->n_ops; ++i) {
be_operand_t *op = &insn->ops[i];
if (!op->has_constraints)
continue;
for (j = i + 1; j < insn->n_ops; ++j) {
ir_node *copy;
be_operand_t *a_op = &insn->ops[j];
if (a_op->carrier != op->carrier || !a_op->has_constraints)
continue;
/* if the constraint is the same, no copy is necessary
* TODO generalise unequal but overlapping constraints */
if (a_op->req == op->req)
continue;
if (be_is_Copy(get_irn_n(insn->irn, a_op->pos)))
continue;
copy = be_new_Copy(env->cls, bl, op->carrier);
be_stat_ev("constr_copy", 1);
sched_add_before(insn->irn, copy);
set_irn_n(insn->irn, a_op->pos, copy);
DBG((dbg, LEVEL_3, "inserting multiple constr copy %+F for %+F pos %d\n", copy, insn->irn, a_op->pos));
}
}
/* collect all registers occurring in out constraints. */
for (i = 0; i < insn->use_start; ++i) {
be_operand_t *op = &insn->ops[i];
if (op->has_constraints)
bitset_or(def_constr, op->regs);
}
/*
* insert copies for all constrained arguments living through the node
* and being constrained to a register which also occurs in out constraints.
*/
for (i = insn->use_start; i < insn->n_ops; ++i) {
ir_node *copy;
be_operand_t *op = &insn->ops[i];
bitset_copy(tmp, op->regs);
bitset_and(tmp, def_constr);
/*
* Check, if
* 1) the operand is constrained.
* 2) lives through the node.
* 3) is constrained to a register occurring in out constraints.
*/
if (!op->has_constraints ||
!values_interfere(birg, insn->irn, op->carrier) ||
bitset_is_empty(tmp))
continue;
/*
* only create the copy if the operand is no copy.
* this is necessary since the assure constraints phase inserts
* Copies and Keeps for operands which must be different from the
* results. Additional copies here would destroy this.
*/
if (be_is_Copy(get_irn_n(insn->irn, op->pos)))
continue;
copy = be_new_Copy(env->cls, bl, op->carrier);
sched_add_before(insn->irn, copy);
set_irn_n(insn->irn, op->pos, copy);
DBG((dbg, LEVEL_3, "inserting constr copy %+F for %+F pos %d\n", copy, insn->irn, op->pos));
be_liveness_update(lv, op->carrier);
}
end:
obstack_free(env->obst, insn);
return insn->next_insn;
}
static void pre_spill_prepare_constr_walker(ir_node *bl, void *data)
{
be_chordal_env_t *env = data;
ir_node *irn;
for (irn = sched_first(bl); !sched_is_end(irn);) {
irn = prepare_constr_insn(env, irn);
}
}
void be_pre_spill_prepare_constr(be_chordal_env_t *cenv)
{
irg_block_walk_graph(cenv->irg, pre_spill_prepare_constr_walker, NULL, cenv);
}
static void pair_up_operands(const be_chordal_alloc_env_t *alloc_env, be_insn_t *insn)
{
const be_chordal_env_t *env = alloc_env->chordal_env;
......@@ -389,7 +246,7 @@ static void pair_up_operands(const be_chordal_alloc_env_t *alloc_env, be_insn_t
if (op->partner != NULL)
continue;
if (values_interfere(env->birg, op->irn, op->carrier))
if (be_values_interfere(env->birg->lv, op->irn, op->carrier))
continue;
bitset_clear_all(bs);
......@@ -503,7 +360,6 @@ static ir_node *handle_constraints(be_chordal_alloc_env_t *alloc_env,
be_insn_t *insn = chordal_scan_insn(env, irn);
ir_node *res = insn->next_insn;
int be_silent = *silent;
be_irg_t *birg = env->birg;
bipartite_t *bp;
if (insn->pre_colored) {
......@@ -611,7 +467,8 @@ static ir_node *handle_constraints(be_chordal_alloc_env_t *alloc_env,
assert(is_Proj(proj));
if (!values_interfere(birg, proj, irn) || pmap_contains(partners, proj))
if (!be_values_interfere(env->birg->lv, proj, irn)
|| pmap_contains(partners, proj))
continue;
/* don't insert a node twice */
......
......@@ -238,7 +238,7 @@ static void pre_spill(post_spill_env_t *pse, const arch_register_class_t *cls)
be_put_ignore_regs(birg, pse->cls, chordal_env->ignore_colors);
BE_TIMER_PUSH(t_ra_constr);
be_pre_spill_prepare_constr(chordal_env);
be_pre_spill_prepare_constr(chordal_env->birg, chordal_env->cls);
BE_TIMER_POP(t_ra_constr);
dump(BE_CH_DUMP_CONSTR, birg->irg, pse->cls, "-constr-pre", dump_ir_block_graph_sched);
......
......@@ -121,8 +121,6 @@ struct be_ra_chordal_opts_t {
char ilp_solver[128];
};
void be_pre_spill_prepare_constr(be_chordal_env_t *cenv);
void check_for_memory_operands(ir_graph *irg);
#endif /* FIRM_BE_BECHORDAL_T_H */
......@@ -93,7 +93,7 @@ static inline int nodes_interfere(const be_chordal_env_t *env, const ir_node *a,
if (env->ifg)
return be_ifg_connected(env->ifg, a, b);
else
return values_interfere(env->birg, a, b);
return be_values_interfere(env->birg->lv, a, b);
}
static int set_cmp_conflict_t(const void *x, const void *y, size_t size) {
......
......@@ -167,7 +167,7 @@ static int nodes_interfere(const be_chordal_env_t *env, const ir_node *a, const
if (env->ifg)
return be_ifg_connected(env->ifg, a, b);
else
return values_interfere(env->birg, a, b);
return be_values_interfere(env->birg->lv, a, b);
}
......
......@@ -244,7 +244,7 @@ static void stat_phi_node(be_chordal_env_t *chordal_env, ir_node *phi)
static void stat_copy_node(be_chordal_env_t *chordal_env, ir_node *root) {
curr_vals[I_CPY_CNT]++;
curr_vals[I_COPIES_MAX]++;
if (values_interfere(chordal_env->birg, root, get_Perm_src(root))) {
if (be_values_interfere(chordal_env->birg->lv, root, get_Perm_src(root))) {
curr_vals[I_COPIES_IF]++;
assert(0 && "A Perm pair (in/out) should never interfere!");
}
......@@ -280,7 +280,7 @@ static void stat_phi_class(be_chordal_env_t *chordal_env, ir_node **pc) {
curr_vals[I_CLS_IF_MAX] += size * (size - 1) / 2;
for (if_free = 1, i = 0; i < size - 1; ++i)
for (o = i + 1; o < size; ++o)
if (values_interfere(chordal_env->birg, pc[i], pc[o])) {
if (be_values_interfere(chordal_env->birg->lv, pc[i], pc[o])) {
if_free = 0;
curr_vals[I_CLS_IF_CNT]++;
}
......
......@@ -60,7 +60,7 @@ static void ifg_std_free(void *self)
static int ifg_std_connected(const void *self, const ir_node *a, const ir_node *b)
{
const ifg_std_t *ifg = self;
return values_interfere(ifg->env->birg, a, b);
return be_values_interfere(ifg->env->birg->lv, a, b);
}
typedef struct _nodes_iter_t {
......
......@@ -109,7 +109,7 @@ static inline int _value_strictly_dominates(const ir_node *a, const ir_node *b)
* @param b The second value.
* @return 1, if a and b interfere, 0 if not.
*/
static inline int _lv_values_interfere(const be_lv_t *lv, const ir_node *a, const ir_node *b)
static inline int be_values_interfere(const be_lv_t *lv, const ir_node *a, const ir_node *b)
{
int a2b = _value_dominates(a, b);
int b2a = _value_dominates(b, a);
......@@ -265,7 +265,6 @@ static inline int _be_lv_chk_after_irn(const be_irg_t *birg, const ir_node *irn,
#define value_dominates_intrablock(a, b) _value_dominates_intrablock(a, b)
#define value_dominates(a, b) _value_dominates(a, b)
#define values_interfere(birg, a, b) _lv_values_interfere(be_get_birg_liveness(birg), a, b)
#define dominates_use(a, e) _dominates_use(a, e)
#define strictly_dominates_use(a, e) _strictly_dominates_use(a, e)
#define be_lv_chk_before_irn(birg, a, b) _be_lv_chk_before_irn(birg, a, b)
......
......@@ -882,7 +882,7 @@ static int push_through_perm(ir_node *perm, lower_env_t *env)
for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
ir_node *op = get_irn_n(irn, i);
if (arch_irn_consider_in_reg_alloc(cls, op) &&
!values_interfere(env->birg, op, one_proj)) {
!be_values_interfere(env->birg->lv, op, one_proj)) {
frontier = irn;
goto found_front;
}
......
......@@ -812,17 +812,8 @@ static void be_straight_alloc_cls(void)
*/
static void spill(void)
{
/* TODO: rewrite pre_spill_prepare to work without chordal_env... */
be_chordal_env_t cenv;
memset(&cenv, 0, sizeof(cenv));
cenv.obst = &obst;
cenv.irg = irg;
cenv.birg = birg;
cenv.cls = cls;
cenv.ignore_colors = ignore_regs;
/* make sure all nodes show their real register pressure */
be_pre_spill_prepare_constr(&cenv);
be_pre_spill_prepare_constr(birg, cls);
/* spill */
be_do_spill(birg, cls);
......
......@@ -43,6 +43,4 @@ void be_register_allocator(const char *name, be_ra_t *allocator);
*/
void be_allocate_registers(be_irg_t *birg);
int (values_interfere)(const be_irg_t *birg, const ir_node *a, const ir_node *b);
#endif /* FIRM_BE_BERA_H */
#endif
......@@ -27,14 +27,237 @@
#include "config.h"
#include "irtools.h"
#include "debug.h"
#include "iredges_t.h"
#include "adt/raw_bitset.h"
#include "statev.h"
#include "irgwalk.h"
#include "bespilloptions.h"
#include "bemodule.h"
#include "be.h"
#include "belive_t.h"
#include "beirg_t.h"
#include "bearch.h"
#include "benode_t.h"
#include "besched_t.h"
#include "bera.h"
#include "beintlive_t.h"
#include "lc_opts.h"
#include "lc_opts_enum.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
typedef struct be_pre_spill_env_t {
be_irg_t *birg;
const arch_register_class_t *cls;
} be_pre_spill_env_t;
static void prepare_constr_insn(be_pre_spill_env_t *env, ir_node *node)
{
const arch_register_class_t *cls = env->cls;
ir_node *block = get_nodes_block(node);
const be_irg_t *birg = env->birg;
be_lv_t *lv = birg->lv;
unsigned *tmp = NULL;
unsigned *def_constr = NULL;
int arity = get_irn_arity(node);
int i, i2;
/* Insert a copy for constraint inputs attached to a value which can't
* fullfil the constraint
* (typical example: stack pointer as input to copyb)
* TODO: This really just checks precolored registers at the moment and
* ignore the general case of not matching in/out constraints
*/
for (i = 0; i < arity; ++i) {
ir_node *op = get_irn_n(node, i);
ir_node *copy;
const arch_register_t *reg;
const arch_register_req_t *req;
req = arch_get_register_req(node, i);
if (req->cls != cls)
continue;
reg = arch_get_irn_register(op);
if (reg == NULL)
continue;
/* precolored with an ignore register (which is not a joker like
unknown/noreg) */
if (arch_register_type_is(reg, joker)
|| !arch_register_type_is(reg, ignore))
continue;
if (! (req->type & arch_register_req_type_limited))
continue;
if (rbitset_is_set(req->limited, reg->index))
continue;
copy = be_new_Copy(cls, block, op);
stat_ev_int("constr_copy", 1);
sched_add_before(node, copy);
set_irn_n(node, i, copy);
DBG((dbg, LEVEL_3, "inserting ignore arg copy %+F for %+F pos %d\n", copy, node, i));
}
/* insert copies for nodes that occur constrained more than once. */
for (i = 0; i < arity; ++i) {
ir_node *in;
ir_node *copy;
const arch_register_req_t *req;
req = arch_get_register_req(node, i);
if (req->cls != cls)
continue;
if (! (req->type & arch_register_req_type_limited))
continue;
in = get_irn_n(node, i);
if (!arch_irn_consider_in_reg_alloc(cls, in))
continue;
for (i2 = i + 1; i2 < arity; ++i2) {
ir_node *in2;
const arch_register_req_t *req2;
req2 = arch_get_register_req(node, i2);
if (req2->cls != cls)
continue;
if (! (req2->type & arch_register_req_type_limited))
continue;
in2 = get_irn_n(node, i2);
if (in2 != in)
continue;
/* if the constraint is the same, no copy is necessary
* TODO generalise unequal but overlapping constraints */
if (rbitset_equal(req->limited, req2->limited, cls->n_regs))
continue;
#if 0
/* Matze: looks fishy to me disabled it for now */
if (be_is_Copy(get_irn_n(insn->irn, a_op->pos)))
continue;
#endif
copy = be_new_Copy(cls, block, in);
stat_ev_int("constr_copy", 1);
sched_add_before(node, copy);
set_irn_n(node, i2, copy);
DBG((dbg, LEVEL_3,
"inserting multiple constr copy %+F for %+F pos %d\n",
copy, node, i2));
}
}
/* collect all registers occurring in out constraints. */
if (get_irn_mode(node) == mode_T) {
const ir_edge_t *edge;
foreach_out_edge(node, edge) {
ir_node *proj = get_edge_src_irn(edge);
const arch_register_req_t *req = arch_get_register_req_out(proj);
if (! (req->type & arch_register_req_type_limited))
continue;
if (def_constr == NULL) {
rbitset_alloca(def_constr, cls->n_regs);
}
rbitset_or(def_constr, req->limited, cls->n_regs);
}
} else {
const arch_register_req_t *req = arch_get_register_req_out(node);
if (req->type & arch_register_req_type_limited) {
rbitset_alloca(def_constr, cls->n_regs);
rbitset_or(def_constr, req->limited, cls->n_regs);
}
}
/* no output constraints => we're good */
if (def_constr == NULL) {
return;
}
/*
* insert copies for all constrained arguments living through the node
* and being constrained to a register which also occurs in out constraints.
*/
rbitset_alloca(tmp, cls->n_regs);
for (i = 0; i < arity; ++i) {
const arch_register_req_t *req;
ir_node *in;
ir_node *copy;
/*
* Check, if
* 1) the operand is constrained.
* 2) lives through the node.
* 3) is constrained to a register occurring in out constraints.
*/
req = arch_get_register_req(node, i);
if (req->cls != cls)
continue;
if (! (req->type & arch_register_req_type_limited))
continue;
in = get_irn_n(node, i);
if (!arch_irn_consider_in_reg_alloc(cls, in))
continue;
if (!be_values_interfere(lv, node, in))
continue;
rbitset_copy(tmp, req->limited, cls->n_regs);
rbitset_and(tmp, def_constr, cls->n_regs);
if (rbitset_is_empty(tmp, cls->n_regs))
continue;
/*
* only create the copy if the operand is no copy.
* this is necessary since the assure constraints phase inserts
* Copies and Keeps for operands which must be different from the
* results. Additional copies here would destroy this.
*/
if (be_is_Copy(in))
continue;
copy = be_new_Copy(cls, block, in);
sched_add_before(node, copy);
set_irn_n(node, i, copy);
DBG((dbg, LEVEL_3, "inserting constr copy %+F for %+F pos %d\n",
copy, node, i));
be_liveness_update(lv, in);
}
}
static void pre_spill_prepare_constr_walker(ir_node *block, void *data)
{
be_pre_spill_env_t *env = data;
ir_node *node;
sched_foreach(block, node) {
prepare_constr_insn(env, node);
}
}
void be_pre_spill_prepare_constr(be_irg_t *birg,
const arch_register_class_t *cls)
{
ir_graph *irg = birg->irg;
be_pre_spill_env_t env;
memset(&env, 0, sizeof(env));
env.birg = birg;
env.cls = cls;
irg_block_walk_graph(irg, pre_spill_prepare_constr_walker, NULL, &env);
}
int be_coalesce_spill_slots = 1;
int be_do_remats = 1;
......@@ -70,6 +293,8 @@ void be_init_spilloptions(void)
lc_opt_add_table(spill_grp, be_spill_options);
be_add_module_list_opt(spill_grp, "spiller", "spill algorithm",
&spillers, (void**) &selected_spiller);
FIRM_DBG_REGISTER(dbg, "firm.be.spillprepare");
}
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spilloptions);
......@@ -63,4 +63,12 @@ void be_register_spiller(const char *name, be_spiller_t *spiller);
*/
void be_do_spill(be_irg_t *birg, const arch_register_class_t *cls);
/**
* Adds additional copies, so constraints needing additional registers to be
* solved correctly induce the additional register pressure.
*/
void be_pre_spill_prepare_constr(be_irg_t *birg,
const arch_register_class_t *cls);
#endif
......@@ -258,7 +258,7 @@ static void set_regs_or_place_dupls_walker(ir_node *bl, void *data) {
DBG((dbg, LEVEL_1, " for %+F(%s) -- %+F(%s)\n", phi, phi_reg->name, arg, arg_reg->name));
if (values_interfere(chordal_env->birg, phi, arg)) {
if (be_values_interfere(lv, phi, arg)) {
/*
Insert a duplicate in arguments block,
make it the new phi arg,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment