Commit 9732efd1 authored by Matthias Braun's avatar Matthias Braun
Browse files

remove non-strict exception mode

[r28017]
parent c9b4f410
......@@ -228,23 +228,6 @@ FIRM_API void set_opt_normalize(int value);
*/
FIRM_API void set_opt_allow_conv_b(int value);
/** Enable/Disable precise exception context.
*
* If enabled, all exceptions form a barrier for values, as in the
* following example:
*
* @code
* a = 1;
* b = 3 / 0;
* a = 2;
* @endcode
*
* If precise exception handling is enabled, an exception handler see a == 1,
* else it might see a == 2.
* Enable this for languages with strict exception order like Java.
*/
FIRM_API void set_opt_precise_exc_context(int value);
/** Enable/Disable Alias analysis.
*
* If enabled, memory disambiguation by alias analysis is used.
......
......@@ -619,152 +619,6 @@ static ir_node *get_r_value_internal(ir_node *block, int pos, ir_mode *mode);
static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin, int ins);
/**
* Construct a new frag_array for node n.
* Copy the content from the current graph_arr of the corresponding block:
* this is the current state.
* Set ProjM(n) as current memory state.
* Further the last entry in frag_arr of current block points to n. This
* constructs a chain block->last_frag_op-> ... first_frag_op of all frag ops in the block.
*/
static inline ir_node **new_frag_arr(ir_node *n)
{
ir_node **arr;
int opt;
arr = NEW_ARR_D (ir_node *, current_ir_graph->obst, current_ir_graph->n_loc);
memcpy(arr, current_ir_graph->current_block->attr.block.graph_arr,
sizeof(ir_node *)*current_ir_graph->n_loc);
/* turn off optimization before allocating Proj nodes, as res isn't
finished yet. */
opt = get_opt_optimize(); set_optimize(0);
/* Here we rely on the fact that all frag ops have Memory as first result! */
if (is_Call(n)) {
arr[0] = new_Proj(n, mode_M, pn_Call_M);
} else if (is_CopyB(n)) {
arr[0] = new_Proj(n, mode_M, pn_CopyB_M);
} else {
assert((pn_Quot_M == pn_DivMod_M) &&
(pn_Quot_M == pn_Div_M) &&
(pn_Quot_M == pn_Mod_M) &&
(pn_Quot_M == pn_Load_M) &&
(pn_Quot_M == pn_Store_M) &&
(pn_Quot_M == pn_Alloc_M) &&
(pn_Quot_M == pn_Bound_M));
arr[0] = new_Proj(n, mode_M, pn_Alloc_M);
}
set_optimize(opt);
current_ir_graph->current_block->attr.block.graph_arr[current_ir_graph->n_loc-1] = n;
return arr;
} /* new_frag_arr */
/**
* Returns the frag_arr from a node.
*/
static inline ir_node **get_frag_arr(ir_node *n)
{
switch (get_irn_opcode(n)) {
case iro_Call:
return n->attr.call.exc.frag_arr;
case iro_Alloc:
return n->attr.alloc.exc.frag_arr;
case iro_Load:
return n->attr.load.exc.frag_arr;
case iro_Store:
return n->attr.store.exc.frag_arr;
default:
return n->attr.except.frag_arr;
}
} /* get_frag_arr */
static void set_frag_value(ir_node **frag_arr, int pos, ir_node *val)
{
#ifdef DEBUG_libfirm
int i;
for (i = 1024; i >= 0; --i)
#else
for (;;)
#endif
{
if (frag_arr[pos] == NULL)
frag_arr[pos] = val;
if (frag_arr[current_ir_graph->n_loc - 1] != NULL) {
ir_node **arr = get_frag_arr(frag_arr[current_ir_graph->n_loc - 1]);
assert(arr != frag_arr && "Endless recursion detected");
frag_arr = arr;
} else
return;
}
assert(!"potential endless recursion in set_frag_value");
} /* set_frag_value */
static ir_node *get_r_frag_value_internal(ir_node *block, ir_node *cfOp,
int pos, ir_mode *mode)
{
ir_node *res;
ir_node **frag_arr;
assert(is_fragile_op(cfOp) && !is_Bad(cfOp));
frag_arr = get_frag_arr(cfOp);
res = frag_arr[pos];
if (res == NULL) {
if (block->attr.block.graph_arr[pos] != NULL) {
/* There was a set_value() after the cfOp and no get_value() before that
set_value(). We must build a Phi node now. */
if (block->attr.block.is_matured) {
int ins = get_irn_arity(block);
ir_node **nin;
NEW_ARR_A(ir_node *, nin, ins);
res = phi_merge(block, pos, mode, nin, ins);
} else {
res = new_rd_Phi0(current_ir_graph, block, mode);
res->attr.phi.u.pos = pos;
res->attr.phi.next = block->attr.block.phis;
block->attr.block.phis = res;
}
assert(res != NULL);
/* It's a Phi, we can write this into all graph_arrs with NULL */
set_frag_value(block->attr.block.graph_arr, pos, res);
} else {
res = get_r_value_internal(block, pos, mode);
set_frag_value(block->attr.block.graph_arr, pos, res);
}
}
return res;
} /* get_r_frag_value_internal */
/**
* Check whether a control flownode cf_pred represents an exception flow.
*
* @param cf_pred the control flow node
* @param prev_cf_op if cf_pred is a Proj, the predecessor node, else equal to cf_pred
*/
static int is_exception_flow(ir_node *cf_pred, ir_node *prev_cf_op)
{
/*
* Note: all projections from a raise are "exceptional control flow" we we handle it
* like a normal Jmp, because there is no "regular" one.
* That's why Raise is no "fragile_op"!
*/
if (is_fragile_op(prev_cf_op)) {
if (is_Proj(cf_pred)) {
if (get_Proj_proj(cf_pred) == pn_Generic_X_regular) {
/* the regular control flow, NO exception */
return 0;
}
assert(get_Proj_proj(cf_pred) == pn_Generic_X_except);
return 1;
}
/* Hmm, exception but not a Proj? */
panic("unexpected condition: fragile op without a proj");
}
return 0;
} /* is_exception_flow */
/**
* Computes the predecessors for the real phi node, and then
* allocates and returns this node. The routine called to allocate the
......@@ -784,17 +638,17 @@ static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin,
ir_graph *irg = current_ir_graph;
if (block == get_irg_start_block(irg)) {
/* Collapsing to Bad tarvals is no good idea.
So we call a user-supplied routine here that deals with this case as
appropriate for the given language. Sorrily the only help we can give
here is the position.
Even if all variables are defined before use, it can happen that
we get to the start block, if a Cond has been replaced by a tuple
(bad, jmp). In this case we call the function needlessly, eventually
generating an non existent error.
However, this SHOULD NOT HAPPEN, as bad control flow nodes are intercepted
before recurring.
/* Collapsing to Bad tarvals is no good idea.
So we call a user-supplied routine here that deals with this
case as appropriate for the given language. Sorrily the only
help we can give here is the position.
Even if all variables are defined before use, it can happen that
we get to the start block, if a Cond has been replaced by a tuple
(bad, jmp). In this case we call the function needlessly,
eventually generating an non existent error.
However, this SHOULD NOT HAPPEN, as bad control flow nodes are
intercepted before recurring.
*/
if (default_initialize_local_variable != NULL) {
ir_node *rem = get_cur_block();
......@@ -802,21 +656,13 @@ static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin,
set_cur_block(block);
block->attr.block.graph_arr[pos] = default_initialize_local_variable(irg, mode, pos - 1);
set_cur_block(rem);
}
else
} else {
block->attr.block.graph_arr[pos] = new_Unknown(mode);
/* We don't need to care about exception ops in the start block.
There are none by definition. */
}
return block->attr.block.graph_arr[pos];
} else {
phi0 = new_rd_Phi0(irg, block, mode);
block->attr.block.graph_arr[pos] = phi0;
if (get_opt_precise_exc_context()) {
/* Set graph_arr for fragile ops. Also here we should break recursion.
We could choose a cyclic path through an cfop. But the recursion would
break at some point. */
set_frag_value(block->attr.block.graph_arr, pos, phi0);
}
}
}
......@@ -836,11 +682,7 @@ static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin,
prevBlock = prevCfOp->in[0]; /* go past control flow op to prev block */
assert(prevBlock);
if (!is_Bad(prevBlock)) {
if (get_opt_precise_exc_context() && is_exception_flow(cf_pred, prevCfOp)) {
assert(get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode));
nin[i-1] = get_r_frag_value_internal(prevBlock, prevCfOp, pos, mode);
} else
nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
nin[i-1] = get_r_value_internal(prevBlock, pos, mode);
} else {
nin[i-1] = new_Bad();
}
......@@ -872,8 +714,6 @@ static ir_node *phi_merge(ir_node *block, int pos, ir_mode *mode, ir_node **nin,
if (phi0 != NULL) {
exchange(phi0, res);
block->attr.block.graph_arr[pos] = res;
/* Don't set_frag_value as it does not overwrite. Doesn't matter, is
only an optimization. */
}
return res;
......@@ -1046,26 +886,6 @@ ir_node *new_d_defaultProj(dbg_info *db, ir_node *arg, long max_proj)
return res;
} /* new_d_defaultProj */
/**
* Allocate a frag array for a node if the current graph state is phase_building.
*
* @param irn the node for which the frag array should be allocated
* @param op the opcode of the (original) node, if does not match opcode of irn,
* nothing is done
* @param frag_store the address of the frag store in irn attributes, if this
* address contains a value != NULL, does nothing
*/
void firm_alloc_frag_arr(ir_node *irn, ir_op *op, ir_node ***frag_store)
{
if (get_opt_precise_exc_context()) {
if ((current_ir_graph->phase_state == phase_building) &&
(get_irn_op(irn) == op) && /* Could be optimized away. */
!*frag_store) /* Could be a cse where the arr is already set. */ {
*frag_store = new_frag_arr(irn);
}
}
} /* firm_alloc_frag_arr */
ir_node *new_d_simpleSel(dbg_info *db, ir_node *store, ir_node *objptr, ir_entity *ent)
/* GL: objptr was called frame before. Frame was a bad choice for the name
as the operand could as well be a pointer to a dynamic object. */
......
......@@ -74,9 +74,6 @@ I_FLAG(normalize , 12, ON)
I_FLAG(allow_conv_b , 13, ON)
/** precise exception context */
I_FLAG(precise_exc_context , 15, OFF)
/** Optimize cast nodes. */
E_FLAG(suppress_downcast_optimization , 22, OFF)
......
......@@ -130,18 +130,11 @@ void irg_set_nloc(ir_graph *res, int n_loc)
{
assert(res->phase_state == phase_building);
if (get_opt_precise_exc_context()) {
res->n_loc = n_loc + 1 + 1; /* number of local variables that are never
dereferenced in this graph plus one for
the store plus one for links to fragile
operations. n_loc is not the number of
parameters to the procedure! */
} else {
res->n_loc = n_loc + 1; /* number of local variables that are never
dereferenced in this graph plus one for
the store. This is not the number of parameters
to the procedure! */
}
res->n_loc = n_loc + 1; /* number of local variables that are never
dereferenced in this graph plus one for
the store. This is not the number of
parameters to the procedure! */
if (res->loc_descriptions) {
xfree(res->loc_descriptions);
res->loc_descriptions = NULL;
......@@ -677,10 +670,7 @@ ir_type *get_irg_value_param_type(ir_graph *irg)
int get_irg_n_locs(ir_graph *irg)
{
if (get_opt_precise_exc_context())
return irg->n_loc - 1 - 1;
else
return irg->n_loc - 1;
return irg->n_loc - 1;
}
/* Returns the obstack associated with the graph. */
......
......@@ -31,66 +31,21 @@
#include "irgwalk.h"
/**
* Post-walker: prepare the graph nodes for new SSA construction cycle by allocation
* new arrays.
* Post-walker: prepare the graph nodes for new SSA construction cycle by
* allocation new arrays.
*/
static void prepare_nodes(ir_node *irn, void *env)
{
(void)env;
switch (get_irn_opcode(irn)) {
case iro_Block:
if (is_Block(irn)) {
unsigned n_loc = current_ir_graph->n_loc;
struct obstack *obst = current_ir_graph->obst;
/* reset mature flag */
irn->attr.block.is_matured = 0;
irn->attr.block.graph_arr = NEW_ARR_D(ir_node *, current_ir_graph->obst,
current_ir_graph->n_loc);
memset(irn->attr.block.graph_arr, 0, sizeof(ir_node *) * current_ir_graph->n_loc);
irn->attr.block.graph_arr = NEW_ARR_D(ir_node *, obst, n_loc);
memset(irn->attr.block.graph_arr, 0, sizeof(ir_node*) * n_loc);
irn->attr.block.phis = NULL;
break;
/* note that the frag array must be cleared first, else firm_alloc_frag_arr()
will not allocate new memory. */
case iro_Quot:
irn->attr.except.frag_arr = NULL;
firm_alloc_frag_arr(irn, op_Quot, &irn->attr.except.frag_arr);
break;
case iro_DivMod:
irn->attr.except.frag_arr = NULL;
firm_alloc_frag_arr(irn, op_DivMod, &irn->attr.except.frag_arr);
break;
case iro_Div:
irn->attr.except.frag_arr = NULL;
firm_alloc_frag_arr(irn, op_Div, &irn->attr.except.frag_arr);
break;
case iro_Mod:
irn->attr.except.frag_arr = NULL;
firm_alloc_frag_arr(irn, op_Mod, &irn->attr.except.frag_arr);
break;
case iro_Call:
irn->attr.call.exc.frag_arr = NULL;
firm_alloc_frag_arr(irn, op_Call, &irn->attr.call.exc.frag_arr);
break;
case iro_Load:
irn->attr.load.exc.frag_arr = NULL;
firm_alloc_frag_arr(irn, op_Load, &irn->attr.load.exc.frag_arr);
break;
case iro_Store:
irn->attr.store.exc.frag_arr = NULL;
firm_alloc_frag_arr(irn, op_Store, &irn->attr.store.exc.frag_arr);
break;
case iro_Alloc:
irn->attr.alloc.exc.frag_arr = NULL;
firm_alloc_frag_arr(irn, op_Alloc, &irn->attr.alloc.exc.frag_arr);
break;
case iro_CopyB:
irn->attr.copyb.exc.frag_arr = NULL;
firm_alloc_frag_arr(irn, op_CopyB, &irn->attr.copyb.exc.frag_arr);
break;
case iro_Raise:
irn->attr.bound.exc.frag_arr = NULL;
firm_alloc_frag_arr(irn, op_Bound, &irn->attr.bound.exc.frag_arr);
break;
default:
break;
}
}
......
......@@ -191,7 +191,6 @@ typedef struct {
op_pin_state pin_state; /**< the pin state for operations that might generate a exception:
If it's know that no exception will be generated, could be set to
op_pin_state_floats. */
struct ir_node **frag_arr; /**< For Phi node construction in case of exception */
} except_attr;
/** Call attributes. */
......
......@@ -340,9 +340,6 @@ ir_node *new_d_{{node.constrname}}(
{{node|curblock}}
{{node|nodearguments}}
{% endfilter %});
{%- if "fragile" in node.flags %}
firm_alloc_frag_arr(res, op_{{node.name}}, &res->attr.except.frag_arr);
{%- endif %}
return res;
}
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment