Commit 3619ae08 authored by Christoph Mallon's avatar Christoph Mallon
Browse files

Replace...

Replace be_peephole_before_exchange()+sched_remove()+exchange()+be_peephole_new_node() by the new function be_peephole_exchange().

[r20788]
parent c1c50386
......@@ -245,10 +245,7 @@ static void peephole_be_Spill(ir_node *node) {
panic("peephole_be_Spill: spill not supported for this mode");
}
be_peephole_before_exchange(node, store);
sched_remove(node);
exchange(node, store);
be_peephole_after_exchange(store);
be_peephole_exchange(node, store);
}
/**
......@@ -305,10 +302,7 @@ static void peephole_be_Reload(ir_node *node) {
panic("peephole_be_Spill: spill not supported for this mode");
}
be_peephole_before_exchange(node, proj);
sched_remove(node);
exchange(node, proj);
be_peephole_after_exchange(proj);
be_peephole_exchange(node, proj);
}
/**
......
......@@ -125,47 +125,20 @@ static void set_uses(ir_node *node)
}
}
void be_peephole_before_exchange(const ir_node *old_node, ir_node *new_node)
{
const arch_register_t *reg;
const arch_register_class_t *cls;
unsigned reg_idx;
unsigned cls_idx;
DBG((dbg, LEVEL_1, "About to exchange %+F with %+F\n", old_node, new_node));
if (old_node == current_node) {
if (is_Proj(new_node)) {
current_node = get_Proj_pred(new_node);
} else {
current_node = new_node;
}
}
if (!mode_is_data(get_irn_mode(old_node)))
return;
reg = arch_get_irn_register(arch_env, old_node);
if (reg == NULL) {
panic("No register assigned at %+F\n", old_node);
}
cls = arch_register_get_class(reg);
reg_idx = arch_register_get_index(reg);
cls_idx = arch_register_class_index(cls);
if (register_values[cls_idx][reg_idx] == old_node) {
register_values[cls_idx][reg_idx] = new_node;
}
be_liveness_remove(lv, old_node);
}
void be_peephole_after_exchange(ir_node *new_node)
void be_peephole_new_node(ir_node *const nw)
{
be_liveness_introduce(lv, new_node);
be_liveness_introduce(lv, nw);
}
void be_peephole_before_exchange_and_kill(const ir_node *old_node, ir_node *new_node)
/**
* must be called from peephole optimisations before a node will be killed
* and its users will be redirected to new_node.
* so bepeephole can update it's internal state.
*
* Note: killing a node and rewiring os only allowed if new_node produces
* the same registers as old_node.
*/
void be_peephole_before_exchange(const ir_node *old_node, ir_node *new_node)
{
const arch_register_t *reg;
const arch_register_class_t *cls;
......@@ -174,9 +147,9 @@ void be_peephole_before_exchange_and_kill(const ir_node *old_node, ir_node *new_
DBG((dbg, LEVEL_1, "About to exchange and kill %+F with %+F\n", old_node, new_node));
if (old_node == current_node) {
/* current_node will be killed. Its scheduling predecessor
must be processed next. */
if (current_node == old_node) {
/* next node to be processed will be killed. Its scheduling predecessor
* must be processed next. */
prev_node = sched_prev(current_node);
}
......@@ -201,6 +174,14 @@ void be_peephole_before_exchange_and_kill(const ir_node *old_node, ir_node *new_
be_liveness_remove(lv, old_node);
}
void be_peephole_exchange(ir_node *const old, ir_node *const nw)
{
be_peephole_before_exchange(old, nw);
sched_remove(old);
exchange(old, nw);
be_peephole_new_node(nw);
}
/**
* block-walker: run peephole optimization on the given block.
*/
......@@ -230,11 +211,12 @@ static void process_block(ir_node *block, void *data)
/* walk the block from last insn to the first */
current_node = sched_last(block);
for( ; !sched_is_begin(current_node);
current_node = prev_node != NULL ? prev_node : sched_prev(current_node)) {
current_node = prev_node != NULL ? prev_node : sched_prev(current_node)) {
ir_op *op;
ir_node *last;
peephole_opt_func peephole_node;
assert(!is_Bad(current_node));
prev_node = NULL;
if (is_Phi(current_node))
break;
......@@ -341,14 +323,7 @@ ir_node *be_peephole_IncSP_IncSP(ir_node *node)
/* add node offset to pred and remove our IncSP */
be_set_IncSP_offset(pred, offs);
be_peephole_before_exchange_and_kill(node, pred);
/* rewire dependency/data edges */
edges_reroute_kind(node, pred, EDGE_KIND_DEP, current_ir_graph);
edges_reroute(node, pred, current_ir_graph);
sched_remove(node);
be_kill_node(node);
be_peephole_exchange(node, pred);
return pred;
}
......
......@@ -52,26 +52,15 @@ static INLINE ir_node *be_peephole_get_reg_value(const arch_register_t *reg)
typedef void (*peephole_opt_func) (ir_node *node);
/**
* must be called from peephole optimisations before a node is exchanged,
* so bepeephole can update it's internal state.
*/
void be_peephole_before_exchange(const ir_node *old_node, ir_node *new_node);
/**
* must be called from peephole optimisations after a node is exchanged,
* so bepeephole can update it's internal state.
*/
void be_peephole_after_exchange(ir_node *new_node);
* Notify the peephole phase about a newly added node, so it can update its
* internal state. This is not needed for the new node, when
* be_peephole_exchange() is used. */
void be_peephole_new_node(ir_node *nw);
/**
* must be called from peephole optimisations before a node will be killed
* and its users will be redirected to new_node.
* so bepeephole can update it's internal state.
*
* Note: killing a node and rewiring os only allowed if new_node produces
* the same registers as old_node.
*/
void be_peephole_before_exchange_and_kill(const ir_node *old_node, ir_node *new_node);
* When doing peephole optimisation use this function instead of plain
* exchange(), so it can update its internal state. */
void be_peephole_exchange(ir_node *old, ir_node *nw);
/**
* Tries to optimize a beIncSp node with it's previous IncSP node.
......
......@@ -154,12 +154,8 @@ static ir_node *turn_into_mode_t(ir_node *node)
reg = arch_get_irn_register(arch_env, node);
arch_set_irn_register(arch_env, res_proj, reg);
be_peephole_before_exchange(node, res_proj);
sched_add_before(node, new_node);
sched_remove(node);
exchange(node, res_proj);
be_peephole_after_exchange(res_proj);
be_peephole_exchange(node, res_proj);
return new_node;
}
......@@ -232,10 +228,7 @@ static void peephole_ia32_Test(ir_node *node)
assert(get_irn_mode(node) != mode_T);
be_peephole_before_exchange(node, flags_proj);
exchange(node, flags_proj);
sched_remove(node);
be_peephole_after_exchange(flags_proj);
be_peephole_exchange(node, flags_proj);
}
/**
......@@ -732,10 +725,7 @@ static void peephole_be_IncSP(ir_node *node)
}
}
be_peephole_before_exchange(node, stack);
sched_remove(node);
exchange(node, stack);
be_peephole_after_exchange(stack);
be_peephole_exchange(node, stack);
}
/**
......@@ -778,10 +768,7 @@ static void peephole_ia32_Const(ir_node *node)
sched_add_before(node, produceval);
sched_add_before(node, xor);
be_peephole_before_exchange(node, xor);
exchange(node, xor);
sched_remove(node);
be_peephole_after_exchange(xor);
be_peephole_exchange(node, xor);
}
static INLINE int is_noreg(ia32_code_gen_t *cg, const ir_node *node)
......@@ -986,11 +973,8 @@ exchange:
DBG_OPT_LEA2ADD(node, res);
/* exchange the Add and the LEA */
be_peephole_before_exchange(node, res);
sched_add_before(node, res);
sched_remove(node);
exchange(node, res);
be_peephole_after_exchange(res);
be_peephole_exchange(node, res);
}
/**
......@@ -1041,7 +1025,7 @@ static void peephole_ia32_Imul_split(ir_node *imul) {
res = new_rd_Proj(dbgi, irg, block, load, mode_Iu, pn_ia32_Load_res);
arch_set_irn_register(arch_env, res, reg);
be_peephole_after_exchange(res);
be_peephole_new_node(res);
set_irn_n(imul, n_ia32_IMul_mem, mem);
noreg = get_irn_n(imul, n_ia32_IMul_left);
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment