Commit ca3c5f44 authored by Matthias Braun's avatar Matthias Braun
Browse files

optimize_graph_df: iterate marking of unreachable blocks to guarantee that all...

optimize_graph_df: iterate marking of unreachable blocks to guarantee that all unreachable code is killed
parent dc4ccc6c
......@@ -135,12 +135,12 @@ void dump_irnode_to_file(FILE *F, ir_node *n)
fprintf(F, " Label: %lu\n", get_entity_label(get_Block_entity(n)));
fprintf(F, " block visited: %ld\n", get_Block_block_visited(n));
fprintf(F, " block marked: %u\n", get_Block_mark(n));
if (get_irg_dom_state(get_irn_irg(n)) != dom_none) {
if (get_irg_dom_state(get_irn_irg(n)) == dom_consistent) {
fprintf(F, " dom depth %d\n", get_Block_dom_depth(n));
fprintf(F, " domtree pre num %d\n", get_Block_dom_tree_pre_num(n));
fprintf(F, " max subtree pre num %d\n", get_Block_dom_max_subtree_pre_num(n));
}
if (get_irg_postdom_state(get_irn_irg(n)) != dom_none) {
if (get_irg_postdom_state(get_irn_irg(n)) == dom_consistent) {
fprintf(F, " pdom depth %d\n", get_Block_postdom_depth(n));
fprintf(F, " pdomtree pre num %d\n", get_Block_pdom_tree_pre_num(n));
fprintf(F, " max pdomsubtree pre num %d\n", get_Block_pdom_max_subtree_pre_num(n));
......
......@@ -100,12 +100,35 @@ void local_optimize_node(ir_node *n)
current_ir_graph = rem;
}
/**
* Enqueue all users of a node to a wait queue.
* Handles mode_T nodes.
*/
static void enqueue_users(ir_node *n, pdeq *waitq)
{
const ir_edge_t *edge;
foreach_out_edge(n, edge) {
ir_node *succ = get_edge_src_irn(edge);
if (get_irn_link(succ) != waitq) {
pdeq_putr(waitq, succ);
set_irn_link(succ, waitq);
}
if (get_irn_mode(succ) == mode_T) {
/* A mode_T node has Proj's. Because most optimizations
run on the Proj's we have to enqueue them also. */
enqueue_users(succ, waitq);
}
}
}
/**
* Block-Walker: uses dominance depth to mark dead blocks.
*/
static void kill_dead_blocks(ir_node *block, void *env)
{
(void) env;
pdeq *waitq = (pdeq*) env;
if (get_Block_dom_depth(block) < 0) {
/*
......@@ -113,6 +136,7 @@ static void kill_dead_blocks(ir_node *block, void *env)
* the End block, i.e. it is always reachable from Start
*/
ir_graph *irg = get_irn_irg(block);
enqueue_users(block, waitq);
exchange(block, get_irg_bad(irg));
}
}
......@@ -128,29 +152,6 @@ void local_optimize_graph(ir_graph *irg)
current_ir_graph = rem;
}
/**
* Enqueue all users of a node to a wait queue.
* Handles mode_T nodes.
*/
static void enqueue_users(ir_node *n, pdeq *waitq)
{
const ir_edge_t *edge;
foreach_out_edge(n, edge) {
ir_node *succ = get_edge_src_irn(edge);
if (get_irn_link(succ) != waitq) {
pdeq_putr(waitq, succ);
set_irn_link(succ, waitq);
}
if (get_irn_mode(succ) == mode_T) {
/* A mode_T node has Proj's. Because most optimizations
run on the Proj's we have to enqueue them also. */
enqueue_users(succ, waitq);
}
}
}
/**
* Data flow optimization walker.
* Optimizes all nodes and enqueue its users
......@@ -181,7 +182,6 @@ int optimize_graph_df(ir_graph *irg)
current_ir_graph = irg;
state = edges_assure(irg);
assure_doms(irg);
/* Clean the value_table in irg for the CSE. */
new_identities(irg);
......@@ -191,11 +191,8 @@ int optimize_graph_df(ir_graph *irg)
}
/* The following enables unreachable code elimination (=Blocks may be
* Bad). We cannot enable it in global_cse nodes since we can't
* determine a nodes block there and therefore can't remove all code
* in unreachable blocks */
* Bad). */
set_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK);
irg_block_walk_graph(irg, NULL, kill_dead_blocks, NULL);
/* invalidate info */
set_irg_outs_inconsistent(irg);
......@@ -211,11 +208,17 @@ int optimize_graph_df(ir_graph *irg)
* so if it's not empty, the graph has been changed */
changed = !pdeq_empty(waitq);
/* finish the wait queue */
while (! pdeq_empty(waitq)) {
ir_node *n = (ir_node*)pdeq_getl(waitq);
opt_walker(n, waitq);
}
do {
/* finish the wait queue */
while (! pdeq_empty(waitq)) {
ir_node *n = (ir_node*)pdeq_getl(waitq);
opt_walker(n, waitq);
}
/* kill newly generated unreachable code */
set_irg_outs_inconsistent(irg);
compute_doms(irg);
irg_block_walk_graph(irg, NULL, kill_dead_blocks, waitq);
} while (! pdeq_empty(waitq));
del_pdeq(waitq);
......@@ -226,7 +229,7 @@ int optimize_graph_df(ir_graph *irg)
/* Finally kill BAD and doublets from the keep alives.
Doing this AFTER edges where deactivated saves cycles */
end = get_irg_end(irg);
end = get_irg_end(irg);
remove_End_Bads_and_doublets(end);
clear_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK);
......
......@@ -6284,28 +6284,7 @@ static ir_node *gigo(ir_node *node)
return get_irg_bad(irg);
}
/* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the
blocks predecessors is dead. */
if (op != op_Block && op != op_Phi && op != op_Tuple && op != op_Anchor
&& op != op_Sync && op != op_End) {
ir_graph *irg = get_irn_irg(node);
int irn_arity = get_irn_arity(node);
int i;
for (i = 0; i < irn_arity; i++) {
ir_node *pred = get_irn_n(node, i);
if (is_Bad(pred)) {
/* be careful not to kill cfopts too early or we might violate
* the 1 cfop per block property */
if (!is_cfop(node)
|| is_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK))
return get_irg_bad(irg);
}
}
}
return node;
return false;
}
/**
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment