Commit 43aca1df authored by Matthias Braun's avatar Matthias Braun
Browse files

remove MacroBlock concept

[r28020]
parent 8cb87306
......@@ -4218,20 +4218,6 @@ FIRM_API ir_node *new_Dummy(ir_mode *mode);
FIRM_API ir_node *new_d_immBlock(dbg_info *db);
FIRM_API ir_node *new_immBlock(void);
/** Create an immature PartBlock.
*
* An immature block has only one Block or PartBlock predecessor.
* A PartBlock forms together with one BLock and possibly other
* PartBlocks a MacroBlock.
*
* Adds the PartBlock to the graph in current_ir_graph. Does set
* current_block. Can be used with automatic Phi node construction.
* This constructor can only be used if the graph is in
* state_building.
*/
FIRM_API ir_node *new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp);
FIRM_API ir_node *new_immPartBlock(ir_node *pred_jmp);
/** Add a control flow edge to an immature block. */
FIRM_API void add_immBlock_pred(ir_node *immblock, ir_node *jmp);
......
......@@ -333,10 +333,8 @@ typedef enum {
ir_dump_flag_node_addresses = 1U << 14,
/** dump all anchor nodes, even the unused ones */
ir_dump_flag_all_anchors = 1U << 15,
/** dumps macroblock edges from every block to its macroblock */
ir_dump_flag_macroblock_edges = 1U << 16,
/** dumps marked blocks with an asterisk in the label */
ir_dump_flag_show_marks = 1U << 17,
ir_dump_flag_show_marks = 1U << 16,
/** turns of dumping of constant entity values in typegraphs */
ir_dump_flag_no_entity_values = 1U << 20,
......
......@@ -55,7 +55,6 @@ FIRM_API void turn_into_tuple(ir_node *node, int arity);
* Further it collects all Proj nodes in a list of the node producing
* the tuple. In case of nested tuples the Projs are collected in the
* node producing the outermost Tuple.
* All partBlocks are linked to its macroblock header.
* All other link fields are cleared afterwards.
*/
FIRM_API void collect_phiprojs(ir_graph *irg);
......@@ -67,10 +66,11 @@ FIRM_API void collect_phiprojs(ir_graph *irg);
* (old_block) of node. Moves node and its predecessors from old_block to
* new_block. Moves all Projs that depend on moved nodes and are in old_block
* to new_block. Moves all Phi nodes from old_block to new_block. To achieve
* this the routine assumes that all Phi nodes are in the Phi list (see get_Block_phis())
* of old_block. Further it assumes that all Proj nodes are accessible by the link field
* of the nodes producing the Tuple and all partBlocks are linked to its MacroBlock header.
* This can be established by collect_phiprojs(). part_block() conserves this property.
* this the routine assumes that all Phi nodes are in the Phi list (see
* get_Block_phis()) of old_block.
* Further it assumes that all Proj nodes are accessible by the link field of
* the nodes producing the Tuple. This can be established by collect_phiprojs().
* part_block() conserves this property.
* Adds a Jmp node to new_block that jumps to old_block.
*
* @param node The node were to break the block
......
......@@ -251,19 +251,6 @@ FIRM_API ir_node *get_nodes_block (const ir_node *node);
/** Sets the Block of a node. */
FIRM_API void set_nodes_block (ir_node *node, ir_node *block);
/**
* Return the MacroBlock the node belongs to. This is only
* possible for pinned nodes or if the graph is in pinned state.
* Otherwise the MacroBlock may be incorrect. This condition is
* now checked by an assertion.
*
* This works for all except Block. It can return Blocks or the Bad node.
*
* To express the difference to access routines that work for all
* nodes we use infix "nodes" and do not name this function
* get_irn_MacroBlock(). */
FIRM_API ir_node *get_nodes_MacroBlock(const ir_node *node);
/** Test whether arbitrary node is frame pointer.
*
* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
......@@ -336,12 +323,6 @@ FIRM_API int Block_block_visited(const ir_node *node);
FIRM_API ir_extblk *get_Block_extbb(const ir_node *block);
/** Sets the extended basic block a block belongs to. */
FIRM_API void set_Block_extbb(ir_node *block, ir_extblk *extblk);
/** Get the Macro Block header of a (sub-) block. */
FIRM_API ir_node *get_Block_MacroBlock(const ir_node *block);
/** Set the Macro Block header of a (sub-) block. */
FIRM_API void set_Block_MacroBlock(ir_node *block, ir_node *mbh);
/** Get the Macro Block header of a node. */
FIRM_API ir_node *get_irn_MacroBlock(const ir_node *n);
/** Returns the ir_graph this Block belongs to. */
FIRM_API ir_graph *get_Block_irg(const ir_node *block);
/** Returns non-zero if the block has an entity assigned */
......
......@@ -207,12 +207,6 @@ static void remove_empty_block(ir_node *block)
if (node == jump)
continue;
if (is_Block(node)) {
/* a Block->Block edge: This should be the MacroBlock
edge, ignore it. */
assert(get_Block_MacroBlock(node) == block && "Wrong Block->Block edge");
continue;
}
/* we simply kill Pins, because there are some strange interactions
* between jump threading, which produce PhiMs with Pins, we simply
* kill the pins here, everything is scheduled anyway */
......
......@@ -439,11 +439,6 @@ static void list_sched_block(ir_node *block, void *env_ptr)
if (code == iro_End) {
/* Skip the end node because of keep-alive edges. */
continue;
} else if (code == iro_Block) {
/* A Block-Block edge. This should be the MacroBlock
* edge, ignore it. */
assert(get_Block_MacroBlock(irn) == block && "Block-Block edge found");
continue;
}
users = get_irn_n_edges(irn);
......
......@@ -414,12 +414,6 @@ static void trace_preprocess_block(trace_env_t *env, ir_node *block)
foreach_out_edge(block, edge) {
ir_node *succ = get_edge_src_irn(edge);
if (is_Block(succ)) {
/* A Block-Block edge. This should be the MacroBlock
* edge, ignore it. */
assert(get_Block_MacroBlock(succ) == block && "Block-Block edge found");
continue;
}
if (is_Anchor(succ)) {
/* ignore a keep alive edge */
continue;
......
......@@ -374,7 +374,6 @@ static ir_node *gen_Block(ir_node *node)
{
ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *macroblock = get_Block_MacroBlock(node);
ir_node *block;
block = new_ir_node(dbgi, irg, NULL, get_irn_op(node), get_irn_mode(node),
......@@ -382,14 +381,6 @@ static ir_node *gen_Block(ir_node *node)
copy_node_attr(irg, node, block);
block->node_nr = node->node_nr;
if (node == macroblock) {
/* this node is a macroblock header */
set_Block_MacroBlock(block, block);
} else {
macroblock = be_transform_node(macroblock);
set_Block_MacroBlock(block, macroblock);
}
/* put the preds in the worklist */
be_enqueue_preds(node);
......
......@@ -107,12 +107,9 @@ void copy_irn_to_irg(ir_node *n, ir_graph *irg)
copy_node_attr(irg, n, nn);
set_irn_link(n, nn);
/* fix the irg for blocks */
if (is_Block(nn)) {
/* fix the irg for nodes containing a reference to it */
if (ir_has_irg_ref(nn)) {
nn->attr.block.irg.irg = irg;
/* we cannot allow blocks WITHOUT macroblock input */
set_Block_MacroBlock(nn, get_Block_MacroBlock(n));
}
}
......@@ -144,9 +141,6 @@ ir_node *irn_copy_into_irg(const ir_node *node, ir_graph *irg)
/* copy the attributes */
copy_node_attr(irg, node, res);
if (op == op_Block) {
set_Block_MacroBlock(res, get_Block_MacroBlock(node));
}
/* duplicate dependency edges */
n_deps = get_irn_deps(node);
......@@ -176,15 +170,7 @@ void irn_rewire_inputs(ir_node *node)
new_node = get_new_node(node);
if (is_Block(node)) {
/* copy the macro block header */
ir_node *mbh = get_Block_MacroBlock(node);
/* get the macro block header */
ir_node *nmbh = get_new_node(mbh);
assert(nmbh != NULL);
set_Block_MacroBlock(new_node, nmbh);
} else {
if (!is_Block(node)) {
ir_node *block = get_nodes_block(node);
ir_node *new_block = get_new_node(block);
set_nodes_block(new_node, new_block);
......
......@@ -933,19 +933,14 @@ ir_node *new_d_immBlock(dbg_info *db)
/* creates a new dynamic in-array as length of in is -1 */
res = new_ir_node(db, current_ir_graph, NULL, op_Block, mode_BB, -1, NULL);
/* macroblock head */
res->in[0] = res;
res->attr.block.is_matured = 0;
res->attr.block.is_dead = 0;
res->attr.block.is_mb_head = 1;
res->attr.block.irg.irg = current_ir_graph;
res->attr.block.backedge = NULL;
res->attr.block.in_cg = NULL;
res->attr.block.cg_backedge = NULL;
res->attr.block.extblk = NULL;
res->attr.block.region = NULL;
res->attr.block.mb_depth = 0;
res->attr.block.entity = NULL;
set_Block_block_visited(res, 0);
......@@ -966,27 +961,6 @@ ir_node *new_immBlock(void)
return new_d_immBlock(NULL);
} /* new_immBlock */
/* immature PartBlock with its predecessors */
ir_node *new_d_immPartBlock(dbg_info *db, ir_node *pred_jmp)
{
ir_node *res = new_d_immBlock(db);
ir_node *blk = get_nodes_block(pred_jmp);
res->in[0] = blk->in[0];
assert(res->in[0] != NULL);
add_immBlock_pred(res, pred_jmp);
res->attr.block.is_mb_head = 0;
res->attr.block.mb_depth = blk->attr.block.mb_depth + 1;
return res;
} /* new_d_immPartBlock */
ir_node *new_immPartBlock(ir_node *pred_jmp)
{
return new_d_immPartBlock(NULL, pred_jmp);
} /* new_immPartBlock */
/* add an edge to a jmp/control flow node */
void add_immBlock_pred(ir_node *block, ir_node *jmp)
{
......@@ -994,7 +968,6 @@ void add_immBlock_pred(ir_node *block, ir_node *jmp)
assert(is_Block(block) && "Error: Must be a Block");
assert(!block->attr.block.is_matured && "Error: Block already matured!\n");
assert(block->attr.block.is_mb_head && "Error: Cannot add a predecessor to a PartBlock");
assert(is_ir_node(jmp));
ARR_APP1(ir_node *, block->in, jmp);
......
......@@ -1492,16 +1492,6 @@ static void dump_ir_data_edges(FILE *F, ir_node *n)
print_edge_vcgattr(F, n, i);
fprintf(F, "}\n");
}
if ((flags & ir_dump_flag_macroblock_edges) && is_Block(n)) {
ir_node *mb = get_Block_MacroBlock(n);
fprintf(F, "edge: {sourcename: \"");
PRINT_NODEID(n);
fprintf(F, "\" targetname: \"");
PRINT_NODEID(mb);
fprintf(F, "\" label: \"mb\" " MACROBLOCK_EDGE_ATTR);
fprintf(F, "}\n");
}
}
/**
......
......@@ -103,7 +103,6 @@ typedef enum {
#define KEEP_ALIVE_DF_EDGE_ATTR "class:20 priority:10 color:purple"
#define ANCHOR_EDGE_ATTR "class:20 priority:60 color:purple linestyle:dotted"
#define OUT_EDGE_ATTR "class:21 priority:10 color:gold linestyle:dashed"
#define MACROBLOCK_EDGE_ATTR "class:22 priority:10 color:green linestyle:dashed"
#define BACK_EDGE_ATTR "linestyle:dashed "
......
......@@ -133,7 +133,6 @@ void dump_irnode_to_file(FILE *F, ir_node *n)
case iro_Block: {
if (has_Block_entity(n))
fprintf(F, " Label: %lu\n", get_entity_label(get_Block_entity(n)));
ir_fprintf(F, " macro Block: %+F\n", get_Block_MacroBlock(n));
fprintf(F, " block visited: %ld\n", get_Block_block_visited(n));
fprintf(F, " block marked: %u\n", get_Block_mark(n));
if (get_irg_dom_state(get_irn_irg(n)) != dom_none) {
......
......@@ -472,17 +472,13 @@ void edges_notify_edge(ir_node *src, int pos, ir_node *tgt, ir_node *old_tgt, ir
}
if (edges_activated_kind(irg, EDGE_KIND_BLOCK) && is_Block(src)) {
if (pos == -1) {
/* a MacroBlock edge: ignore it here */
} else {
ir_node *bl_old = old_tgt ? get_nodes_block(skip_Proj(old_tgt)) : NULL;
ir_node *bl_tgt = NULL;
ir_node *bl_old = old_tgt ? get_nodes_block(skip_Proj(old_tgt)) : NULL;
ir_node *bl_tgt = NULL;
if (tgt)
bl_tgt = is_Bad(tgt) ? tgt : get_nodes_block(skip_Proj(tgt));
if (tgt)
bl_tgt = is_Bad(tgt) ? tgt : get_nodes_block(skip_Proj(tgt));
edges_notify_edge_kind(src, pos, bl_tgt, bl_old, EDGE_KIND_BLOCK, irg);
}
edges_notify_edge_kind(src, pos, bl_tgt, bl_old, EDGE_KIND_BLOCK, irg);
}
}
......
......@@ -119,8 +119,7 @@ void exchange(ir_node *old, ir_node *nw)
/**
* Walker: links all Phi nodes to their Blocks lists,
* all Proj nodes to there predecessors and all
* partBlocks to there MacroBlock header.
* all Proj nodes to there predecessors.
*/
static void collect_phiprojs_walker(ir_node *n, void *env)
{
......@@ -138,13 +137,6 @@ static void collect_phiprojs_walker(ir_node *n, void *env)
set_irn_link(n, get_irn_link(pred));
set_irn_link(pred, n);
} else if (is_Block(n)) {
ir_node *mbh = get_Block_MacroBlock(n);
if (mbh != n) {
set_irn_link(n, get_irn_link(mbh));
set_irn_link(mbh, n);
}
}
}
......@@ -194,8 +186,8 @@ static void move(ir_node *node, ir_node *from_bl, ir_node *to_bl)
void part_block(ir_node *node)
{
ir_node *new_block, *old_block, *mbh;
ir_node *phi, *jmp, *next, *block;
ir_node *new_block, *old_block;
ir_node *phi, *jmp;
ir_graph *rem = current_ir_graph;
/* Turn off optimizations so that blocks are not merged again. */
......@@ -206,18 +198,9 @@ void part_block(ir_node *node)
/* Transform the control flow */
old_block = get_nodes_block(node);
mbh = get_Block_MacroBlock(old_block);
new_block = new_Block(get_Block_n_cfgpreds(old_block),
get_Block_cfgpred_arr(old_block));
if (mbh != old_block) {
/* we splitting a partBlock */
set_Block_MacroBlock(new_block, mbh);
} else {
/* we are splitting a header: this creates a new header */
set_Block_MacroBlock(new_block, new_block);
}
/* create a jump from new_block to old_block, which is now the lower one */
jmp = new_r_Jmp(new_block);
set_irn_in(old_block, 1, &jmp);
......@@ -234,70 +217,10 @@ void part_block(ir_node *node)
phi = get_Phi_next(phi);
}
/* rewire partBlocks: This is necessary, because old_block is a new MacroBlock
header now */
if (mbh != old_block) {
ir_node *list = NULL;
/* move blocks from mbh to old_block if old_block dominates them */
block = get_irn_link(mbh);
/* mbh's list will be rebuild */
set_irn_link(mbh, NULL);
/* old_block is a new mbh */
set_Block_MacroBlock(old_block, old_block);
/* note that we must splice the list of partBlock here */
for (; block != NULL; block = next) {
ir_node *curr = block;
assert(is_Block(curr));
next = get_irn_link(block);
if (block == old_block) {
/* this effectively removed old_block from mbh's list */
continue;
}
assert(get_Block_MacroBlock(curr) == mbh);
for (;;) {
if (curr == old_block) {
/* old_block dominates the block, so old_block will be
the new macro block header */
set_Block_MacroBlock(block, old_block);
set_irn_link(block, list);
list = block;
break;
}
if (curr == mbh) {
/* leave it in the mbh */
set_irn_link(block, get_irn_link(mbh));
set_irn_link(mbh, block);
break;
}
assert(get_Block_n_cfgpreds(curr) == 1);
curr = get_Block_cfgpred_block(curr, 0);
}
}
/* beware: do NOT directly manipulate old_block's list, as old_block is
in mbh's list and this would destroy the list! */
set_irn_link(old_block, list);
/* finally add new_block to mbh's list */
set_irn_link(new_block, get_irn_link(mbh));
set_irn_link(mbh, new_block);
} else {
/* old_block is the mbh, as well as new_block */
set_Block_MacroBlock(new_block, new_block);
}
set_optimize(rem_opt);
current_ir_graph = rem;
}
/* kill a node by setting its predecessors to Bad and finally exchange the node by Bad itself. */
void kill_node(ir_node *node)
{
ir_graph *irg = get_irn_irg(node);
......
......@@ -519,13 +519,6 @@ void set_nodes_block(ir_node *node, ir_node *block)
set_irn_n(node, -1, block);
}
/* this works for all except Block */
ir_node *get_nodes_MacroBlock(const ir_node *node)
{
assert(node->op != op_Block);
return get_Block_MacroBlock(get_irn_n(node, -1));
}
/* Test whether arbitrary node is frame pointer, i.e. Proj(pn_Start_P_frame_base)
* from Start. If so returns frame type, else Null. */
ir_type *is_frame_pointer(const ir_node *n)
......@@ -648,39 +641,6 @@ void set_Block_extbb(ir_node *block, ir_extblk *extblk)
block->attr.block.extblk = extblk;
}
/* Returns the macro block header of a block.*/
ir_node *get_Block_MacroBlock(const ir_node *block)
{
ir_node *mbh;
assert(is_Block(block));
mbh = get_irn_n(block, -1);
/* once macro block header is respected by all optimizations,
this assert can be removed */
assert(mbh != NULL);
return mbh;
}
/* Sets the macro block header of a block. */
void set_Block_MacroBlock(ir_node *block, ir_node *mbh)
{
assert(is_Block(block));
mbh = skip_Id(mbh);
assert(is_Block(mbh));
set_irn_n(block, -1, mbh);
}
/* returns the macro block header of a node. */
ir_node *get_irn_MacroBlock(const ir_node *n)
{
if (! is_Block(n)) {
n = get_nodes_block(n);
/* if the Block is Bad, do NOT try to get it's MB, it will fail. */
if (is_Bad(n))
return (ir_node *)n;
}
return get_Block_MacroBlock(n);
}
/* returns the graph of a Block. */
ir_graph *(get_Block_irg)(const ir_node *block)
{
......
......@@ -1670,14 +1670,13 @@ static ir_node *equivalent_node_Proj_Bound(ir_node *proj)
ret_tuple = 1;
else if (is_Bound(pred)) {
/*
* idx was Bounds checked in the same MacroBlock previously,
* it is still valid if lower <= pred_lower && pred_upper <= upper.
* idx was Bounds checked previously, it is still valid if
* lower <= pred_lower && pred_upper <= upper.
*/
ir_node *lower = get_Bound_lower(bound);
ir_node *upper = get_Bound_upper(bound);
if (get_Bound_lower(pred) == lower &&
get_Bound_upper(pred) == upper &&
get_irn_MacroBlock(bound) == get_irn_MacroBlock(pred)) {
get_Bound_upper(pred) == upper) {
/*
* One could expect that we simply return the previous
* Bound here. However, this would be wrong, as we could
......@@ -4830,14 +4829,13 @@ static ir_node *transform_node_Proj_Bound(ir_node *proj)
ret_tuple = 1;
else if (is_Bound(pred)) {
/*
* idx was Bounds checked in the same MacroBlock previously,
* it is still valid if lower <= pred_lower && pred_upper <= upper.
* idx was Bounds checked previously, it is still valid if
* lower <= pred_lower && pred_upper <= upper.
*/
ir_node *lower = get_Bound_lower(bound);
ir_node *upper = get_Bound_upper(bound);
if (get_Bound_lower(pred) == lower &&
get_Bound_upper(pred) == upper &&
get_irn_MacroBlock(bound) == get_irn_MacroBlock(pred)) {
get_Bound_upper(pred) == upper) {
/*
* One could expect that we simply return the previous
* Bound here. However, this would be wrong, as we could
......@@ -6432,13 +6430,17 @@ int identities_cmp(const void *elt, const void *key)
if (irn_arity_a != get_irn_arity(b))
return 1;
/* blocks are never the same */
if (is_Block(a))
return 1;
if (get_irn_pinned(a) == op_pin_state_pinned) {
/* for pinned nodes, the block inputs must be equal */
if (get_irn_n(a, -1) != get_irn_n(b, -1))
return 1;
} else if (! get_opt_global_cse()) {
/* for block-local CSE both nodes must be in the same MacroBlock */
if (get_irn_MacroBlock(a) != get_irn_MacroBlock(b))
/* for block-local CSE both nodes must be in the same Block */
if (get_nodes_block(a) != get_nodes_block(b))
return 1;
}
......@@ -6507,54 +6509,6 @@ void ir_normalize_node(ir_node *n)
}
} /* ir_normalize_node */
/**
* Update the nodes after a match in the value table. If both nodes have
* the same MacroBlock but different Blocks, we must ensure that the node
* with the dominating Block (the node that is near to the MacroBlock header
* is stored in the table.
* Because a MacroBlock has only one "non-exception" flow, we don't need
* dominance info here: We known, that one block must dominate the other and
* following the only block input will allow to find it.
*/
static void update_known_irn(ir_node *known_irn, const ir_node *new_ir_node)
{
ir_node *known_blk, *new_block, *block, *mbh;
if (get_opt_global_cse()) {
/* Block inputs are meaning less */
return;
}
known_blk = get_irn_n(known_irn, -1);
new_block = get_irn_n(new_ir_node, -1);
if (known_blk == new_block) {
/* already in the same block */
return;
}
/*
* We expect the typical case when we built the graph. In that case, the
* known_irn is already the upper one, so checking this should be faster.
*/
block = new_block;
mbh = get_Block_MacroBlock(new_block);
for (;;) {
if (block == known_blk) {
/* ok, we have found it: known_block dominates new_block as expected */
return;
}
if (block == mbh) {
/*
* We have reached the MacroBlock header NOT founding
* the known_block. new_block must dominate known_block.
* Update known_irn.
*/
set_irn_n(known_irn, -1, new_block);
return;
}
assert(get_Block_n_cfgpreds(block) == 1);
block = get_Block_cfgpred_block(block, 0);
}
} /* update_value_table */
/*
* Return the canonical node computing the same value as n.
* Looks up the node in a hash table, enters it in the table
......@@ -6579,8 +6533,6 @@ ir_node *identify_remember(ir_node *n)
nn = pset_insert(value_table, n, ir_node_hash(n));
if (nn != n) {
update_known_irn(nn, n);
/* n is reachable again */
edges_node_revival(nn, get_irn_irg(nn));
}
......@@ -6589,9 +6541,9 @@ ir_node *identify_remember(ir_node *n)
} /* identify_remember */
/**
* During construction we set the op_pin_state_pinned flag in the graph right when the
* optimization is performed. The flag turning on procedure global cse could
* be changed between two allocations. This way we are safe.
* During construction we set the op_pin_state_pinned flag in the graph right
* when the optimization is performed. The flag turning on procedure global
* cse could be changed between two allocations. This way we are safe.
*
* @param n The node to lookup
*/
......@@ -6600,7 +6552,7 @@ static inline ir_node *identify_cons(ir_node *n)