Commit e51bd960 authored by Michael Beck's avatar Michael Beck
Browse files

BugFix: find_constant_entity() now checks global entities to be constant

Memory disambiguator calls added

[r8502]
parent e059f092
......@@ -5,7 +5,7 @@
* Author: Michael Beck
* Created:
* CVS-ID: $Id$
* Copyright: (c) 1998-2004 Universitt Karlsruhe
* Copyright: (c) 1998-2007 Universitt Karlsruhe
* Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
*/
#ifdef HAVE_CONFIG_H
......@@ -39,6 +39,7 @@
#include "iredges.h"
#include "irtools.h"
#include "opt_polymorphy.h"
#include "irmemory.h"
#ifdef DO_CACHEOPT
#include "cacheopt/cachesim.h"
......@@ -108,30 +109,25 @@ static ldst_info_t *get_ldst_info(ir_node *node, walk_env_t *env) {
if (! info) {
info = obstack_alloc(&env->obst, sizeof(*info));
memset(info, 0, sizeof(*info));
set_irn_link(node, info);
}
return info;
}
} /* get_ldst_info */
/**
* get the Block info of a node
*/
static block_info_t *get_block_info(ir_node *node, walk_env_t *env)
{
static block_info_t *get_block_info(ir_node *node, walk_env_t *env) {
block_info_t *info = get_irn_link(node);
if (! info) {
info = obstack_alloc(&env->obst, sizeof(*info));
memset(info, 0, sizeof(*info));
set_irn_link(node, info);
}
return info;
}
} /* get_block_info */
/**
* update the projection info for a Load/Store
......@@ -151,7 +147,7 @@ static unsigned update_projs(ldst_info_t *info, ir_node *proj)
info->projs[nr] = proj;
return 0;
}
}
} /* update_projs */
/**
* update the exception block info for a Load/Store node.
......@@ -167,7 +163,7 @@ static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
info->exc_block = block;
info->exc_idx = pos;
return 0;
}
} /* update_exc */
/** Return the number of uses of an address node */
#define get_irn_n_uses(adr) get_irn_n_edges(adr)
......@@ -213,8 +209,7 @@ static void collect_nodes(ir_node *node, void *env)
wenv->changes |= DF_CHANGED;
set_nodes_block(node, pred_blk);
}
}
else if (op == op_Store) {
} else if (op == op_Store) {
ldst_info = get_ldst_info(pred, wenv);
wenv->changes |= update_projs(ldst_info, node);
......@@ -237,8 +232,7 @@ static void collect_nodes(ir_node *node, void *env)
set_nodes_block(node, pred_blk);
}
}
}
else if (op == op_Block) {
} else if (op == op_Block) {
int i;
for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
......@@ -266,10 +260,14 @@ static void collect_nodes(ir_node *node, void *env)
}
}
}
}
} /* collect_nodes */
/**
* Returns an entity if the address ptr points to a constant one.
*
* @param ptr the address
*
* @return an entity or NULL
*/
static ir_entity *find_constant_entity(ir_node *ptr)
{
......@@ -277,9 +275,11 @@ static ir_entity *find_constant_entity(ir_node *ptr)
ir_op *op = get_irn_op(ptr);
if (op == op_SymConst && (get_SymConst_kind(ptr) == symconst_addr_ent)) {
return get_SymConst_entity(ptr);
}
else if (op == op_Sel) {
ir_entity *ent = get_SymConst_entity(ptr);
if (variability_constant == get_entity_variability(ent))
return ent;
return NULL;
} else if (op == op_Sel) {
ir_entity *ent = get_Sel_entity(ptr);
ir_type *tp = get_entity_owner(ent);
......@@ -289,9 +289,6 @@ static ir_entity *find_constant_entity(ir_node *ptr)
(get_entity_n_overwrittenby(ent) != 0) ) )
return NULL;
if (variability_constant == get_entity_variability(ent))
return ent;
if (is_Array_type(tp)) {
/* check bounds */
int i, n;
......@@ -323,13 +320,15 @@ static ir_entity *find_constant_entity(ir_node *ptr)
}
}
if (variability_constant == get_entity_variability(ent))
return ent;
/* try next */
ptr = get_Sel_ptr(ptr);
}
else
} else
return NULL;
}
}
} /* find_constant_entity */
/**
* Return the Selection index of a Sel node from dimension n
......@@ -338,7 +337,7 @@ static long get_Sel_array_index_long(ir_node *n, int dim) {
ir_node *index = get_Sel_index(n, dim);
assert(get_irn_op(index) == op_Const);
return get_tarval_long(get_Const_tarval(index));
}
} /* get_Sel_array_index_long */
/**
* Returns the accessed component graph path for an
......@@ -361,8 +360,7 @@ static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
assert(get_SymConst_kind(ptr) == symconst_addr_ent);
root = get_SymConst_entity(ptr);
res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
}
else {
} else {
assert(get_irn_op(ptr) == op_Sel);
/* it's a Sel, go up until we find the root */
res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
......@@ -379,14 +377,14 @@ static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
}
}
return res;
}
} /* rec_get_accessed_path */
/** Returns an access path or NULL. The access path is only
* valid, if the graph is in phase_high and _no_ address computation is used.
*/
static compound_graph_path *get_accessed_path(ir_node *ptr) {
return rec_get_accessed_path(ptr, 0);
}
} /* get_accessed_path */
/* forward */
static void reduce_adr_usage(ir_node *ptr);
......@@ -410,7 +408,7 @@ static void handle_load_update(ir_node *load) {
exchange(load, new_Bad());
reduce_adr_usage(ptr);
}
}
} /* handle_load_update */
/**
* A Use of an address node is vanished. Check if this was a Proj
......@@ -421,9 +419,8 @@ static void reduce_adr_usage(ir_node *ptr) {
if (get_irn_n_edges(ptr) <= 0) {
/* this Proj is dead now */
ir_node *pred = get_Proj_pred(ptr);
opcode code = get_irn_opcode(pred);
if (code == iro_Load) {
if (is_Load(pred)) {
ldst_info_t *info = get_irn_link(pred);
info->projs[get_Proj_proj(ptr)] = NULL;
......@@ -432,18 +429,19 @@ static void reduce_adr_usage(ir_node *ptr) {
}
}
}
}
} /* reduce_adr_usage */
/**
* Follow the memory chain as long as there are only Loads
* and try to replace current Load or Store by a previous one.
* and alias free Stores and try to replace current Load or Store
* by a previous ones.
* Note that in unreachable loops it might happen that we reach
* load again, as well as we can fall into a cycle.
* We break such cycles using a special visited flag.
*
* INC_MASTER() must be called before dive into
*/
static unsigned follow_Load_chain(ir_node *load, ir_node *curr) {
static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
unsigned res = 0;
ldst_info_t *info = get_irn_link(load);
ir_node *pred;
......@@ -451,7 +449,7 @@ static unsigned follow_Load_chain(ir_node *load, ir_node *curr) {
ir_node *mem = get_Load_mem(load);
ir_mode *load_mode = get_Load_mode(load);
for (pred = curr; load != pred; pred = skip_Proj(get_Load_mem(pred))) {
for (pred = curr; load != pred; ) {
ldst_info_t *pred_info = get_irn_link(pred);
/*
......@@ -460,7 +458,6 @@ static unsigned follow_Load_chain(ir_node *load, ir_node *curr) {
* This is only true in strong typed languages, not in C were the following
* is possible a = *(ir_type1 *)p; b = *(ir_type2 *)p ...
*/
if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
get_irn_mode(get_Store_value(pred)) == load_mode) {
/*
......@@ -496,8 +493,7 @@ static unsigned follow_Load_chain(ir_node *load, ir_node *curr) {
reduce_adr_usage(ptr);
return res | DF_CHANGED;
}
}
else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
} else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
get_Load_mode(pred) == load_mode) {
/*
* a Load after a Load -- a read after read.
......@@ -518,8 +514,7 @@ static unsigned follow_Load_chain(ir_node *load, ir_node *curr) {
if (info->projs[pn_Load_M])
exchange(info->projs[pn_Load_M], mem);
}
else {
} else {
if (info->projs[pn_Load_res]) {
set_Proj_pred(info->projs[pn_Load_res], pred);
set_nodes_block(info->projs[pn_Load_res], get_nodes_block(pred));
......@@ -544,9 +539,23 @@ static unsigned follow_Load_chain(ir_node *load, ir_node *curr) {
}
}
if (get_irn_op(pred) == op_Store) {
/* check if we can pass thru this store */
ir_alias_relation rel = get_alias_relation(
current_ir_graph,
get_Store_ptr(pred),
get_irn_mode(get_Store_value(pred)),
ptr, load_mode, opt_non_opt);
/* if the might be an alias, we cannot pass this Store */
if (rel != no_alias)
break;
pred = skip_Proj(get_Store_mem(pred));
} else if (get_irn_op(pred) == op_Load) {
pred = skip_Proj(get_Load_mem(pred));
} else {
/* follow only Load chains */
if (get_irn_op(pred) != op_Load)
break;
}
/* check for cycles */
if (NODE_VISITED(pred_info))
......@@ -559,17 +568,19 @@ static unsigned follow_Load_chain(ir_node *load, ir_node *curr) {
/* handle all Sync predecessors */
for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
res |= follow_Load_chain(load, skip_Proj(get_Sync_pred(pred, i)));
res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
if (res)
break;
}
}
return res;
}
} /* follow_Mem_chain */
/**
* optimize a Load
*
* @param load the Load node
*/
static unsigned optimize_load(ir_node *load)
{
......@@ -597,6 +608,7 @@ static unsigned optimize_load(ir_node *load)
if (is_Sel(ptr)) {
ir_node *mem = get_Sel_mem(ptr);
/* FIXME: works with the current FE, but better use the base */
if (get_irn_op(skip_Proj(mem)) == op_Alloc) {
/* ok, check the types */
ir_entity *ent = get_Sel_entity(ptr);
......@@ -612,8 +624,7 @@ static unsigned optimize_load(ir_node *load)
res |= CF_CHANGED;
}
}
}
else if ((get_irn_op(skip_Proj(ptr)) == op_Alloc) ||
} else if ((get_irn_op(skip_Proj(ptr)) == op_Alloc) ||
((get_irn_op(ptr) == op_Cast) && (get_irn_op(skip_Proj(get_Cast_op(ptr))) == op_Alloc))) {
/* simple case: a direct load after an Alloc. Firm Alloc throw
* an exception in case of out-of-memory. So, there is no way for an
......@@ -626,7 +637,7 @@ static unsigned optimize_load(ir_node *load)
}
}
/* the mem of the Load. Must still be returned after optimization */
/* The mem of the Load. Must still be returned after optimization. */
mem = get_Load_mem(load);
if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
......@@ -701,8 +712,7 @@ static unsigned optimize_load(ir_node *load)
exchange(load, new_Bad());
reduce_adr_usage(ptr);
return res;
}
else if (variability_constant == get_entity_variability(ent)) {
} else if (variability_constant == get_entity_variability(ent)) {
compound_graph_path *path = get_accessed_path(ptr);
if (path) {
......@@ -738,8 +748,7 @@ static unsigned optimize_load(ir_node *load)
exchange(load, new_Bad());
reduce_adr_usage(ptr);
return res;
}
else {
} else {
/* We can not determine a correct access path. E.g., in jack, we load
a byte from an object to generate an exception. Happens in test program
Reflectiontest.
......@@ -766,16 +775,16 @@ static unsigned optimize_load(ir_node *load)
* We break such cycles using a special visited flag.
*/
INC_MASTER();
res = follow_Load_chain(load, skip_Proj(mem));
res = follow_Mem_chain(load, skip_Proj(mem));
return res;
}
} /* optimize_load */
/**
* follow the memory chain as long as there are only Loads.
* follow the memory chain as long as there are only Loads and alias free Stores.
*
* INC_MASTER() must be called before dive into
*/
static unsigned follow_Load_chain_for_Store(ir_node *store, ir_node *curr) {
static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
unsigned res = 0;
ldst_info_t *info = get_irn_link(store);
ir_node *pred;
......@@ -785,7 +794,7 @@ static unsigned follow_Load_chain_for_Store(ir_node *store, ir_node *curr) {
ir_mode *mode = get_irn_mode(value);
ir_node *block = get_nodes_block(store);
for (pred = curr; pred != store; pred = skip_Proj(get_Load_mem(pred))) {
for (pred = curr; pred != store;) {
ldst_info_t *pred_info = get_irn_link(pred);
/*
......@@ -809,8 +818,7 @@ static unsigned follow_Load_chain_for_Store(ir_node *store, ir_node *curr) {
reduce_adr_usage(ptr);
return DF_CHANGED;
}
}
else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
} else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
value == pred_info->projs[pn_Load_res]) {
/*
* a Store of a value after a Load -- a write after read.
......@@ -825,9 +833,23 @@ static unsigned follow_Load_chain_for_Store(ir_node *store, ir_node *curr) {
}
}
if (get_irn_op(pred) == op_Store) {
/* check if we can pass thru this store */
ir_alias_relation rel = get_alias_relation(
current_ir_graph,
get_Store_ptr(pred),
get_irn_mode(get_Store_value(pred)),
ptr, mode, opt_non_opt);
/* if the might be an alias, we cannot pass this Store */
if (rel != no_alias)
break;
pred = skip_Proj(get_Store_mem(pred));
} else if (get_irn_op(pred) == op_Load) {
pred = skip_Proj(get_Load_mem(pred));
} else {
/* follow only Load chains */
if (get_irn_op(pred) != op_Load)
break;
}
/* check for cycles */
if (NODE_VISITED(pred_info))
......@@ -840,19 +862,20 @@ static unsigned follow_Load_chain_for_Store(ir_node *store, ir_node *curr) {
/* handle all Sync predecessors */
for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
res |= follow_Load_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
if (res)
break;
}
}
return res;
}
} /* follow_Mem_chain_for_Store */
/**
* optimize a Store
*
* @param store the Store node
*/
static unsigned optimize_store(ir_node *store)
{
static unsigned optimize_store(ir_node *store) {
ir_node *ptr, *mem;
if (get_Store_volatility(store) == volatility_is_volatile)
......@@ -860,8 +883,8 @@ static unsigned optimize_store(ir_node *store)
ptr = get_Store_ptr(store);
/* Check, if the address of this load is used more than once.
* If not, this load cannot be removed in any case. */
/* Check, if the address of this Store is used more than once.
* If not, this Store cannot be removed in any case. */
if (get_irn_n_uses(ptr) <= 1)
return 0;
......@@ -869,8 +892,8 @@ static unsigned optimize_store(ir_node *store)
/* follow the memory chain as long as there are only Loads */
INC_MASTER();
return follow_Load_chain_for_Store(store, skip_Proj(mem));
}
return follow_Mem_chain_for_Store(store, skip_Proj(mem));
} /* optimize_store */
/**
* walker, optimizes Phi after Stores to identical places:
......@@ -1065,13 +1088,12 @@ static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
exchange(phi, projM);
return res | DF_CHANGED;
}
} /* optimize_phi */
/**
* walker, do the optimizations
*/
static void do_load_store_optimize(ir_node *n, void *env)
{
static void do_load_store_optimize(ir_node *n, void *env) {
walk_env_t *wenv = env;
switch (get_irn_opcode(n)) {
......@@ -1090,13 +1112,12 @@ static void do_load_store_optimize(ir_node *n, void *env)
default:
;
}
}
} /* do_load_store_optimize */
/*
* do the load store optimization
*/
void optimize_load_store(ir_graph *irg)
{
void optimize_load_store(ir_graph *irg) {
walk_env_t env;
assert(get_irg_phase_state(irg) != phase_building);
......@@ -1111,6 +1132,11 @@ void optimize_load_store(ir_graph *irg)
/* for Phi optimization post-dominators are needed ... */
assure_postdoms(irg);
if (get_opt_alias_analysis()) {
assure_irg_address_taken_computed(irg);
assure_irp_globals_address_taken_computed();
}
obstack_init(&env.obst);
env.changes = 0;
......@@ -1134,4 +1160,4 @@ void optimize_load_store(ir_graph *irg)
have Bad() predecessors. */
set_irg_doms_inconsistent(irg);
}
}
} /* optimize_load_store */
......@@ -5,7 +5,7 @@
* Author: Michael Beck
* Created:
* CVS-ID: $Id$
* Copyright: (c) 1998-2004 Universität Karlsruhe
* Copyright: (c) 1998-2007 Universität Karlsruhe
* Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
*/
......@@ -23,7 +23,7 @@
/** Load/Store optimization.
*
* Removes redundand non-volatile Loads and Stores.
* Removes redundant non-volatile Loads and Stores.
* May introduce Bad nodes if exceptional control flow
* is removed. The following cases are optimized:
*
......@@ -31,11 +31,11 @@
* is removed.
*
* Load after Store: A Load after a Store is removed, if
* the Load doesn't have an exception handler or is in
* the Load doesn't have an exception handler OR is in
* the same block as the Store.
*
* Load after Load: A Load after a Load is removed, if the
* Load doesn't have an exception handler or is in the
* Load doesn't have an exception handler OR is in the
* same block as the previous Load.
*
* Store before Store: A Store immediately before another
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment