Commit bc350520 authored by Matthias Braun's avatar Matthias Braun
Browse files

remove the concept of M_except, we always use the normal M proj now

[r26778]
parent e669cdc1
......@@ -70,7 +70,7 @@
* Some projection numbers must be always equal to support automatic phi construction
*/
enum pn_generic {
pn_Generic_M_regular = 0, /**< The memory result. */
pn_Generic_M = 0, /**< The memory result. */
pn_Generic_X_regular = 1, /**< Execution result if no exception occurred. */
pn_Generic_X_except = 2, /**< The control flow result branching to the exception handler */
pn_Generic_other = 3 /**< First free projection number */
......@@ -596,17 +596,14 @@ void set_Sel_entity (ir_node *node, ir_entity *ent);
* Projection numbers for result of Call node: use for Proj nodes!
*/
typedef enum {
pn_Call_M_regular = pn_Generic_M_regular, /**< The memory result. */
pn_Call_M = pn_Generic_M, /**< The memory result. */
pn_Call_X_regular = pn_Generic_X_regular, /**< The control flow result when no exception occurs. */
pn_Call_X_except = pn_Generic_X_except, /**< The control flow result branching to the exception handler. */
pn_Call_T_result = pn_Generic_other, /**< The tuple containing all (0, 1, 2, ...) results. */
pn_Call_M_except, /**< The memory result in case the called method terminated with
an exception. */
pn_Call_P_value_res_base, /**< A pointer to the memory region containing copied results
passed by value (for compound result types). */
pn_Call_max /**< number of projections from a Call */
} pn_Call; /* Projection numbers for Call. */
#define pn_Call_M pn_Call_M_regular
/** Retrieve the memory input of a Call. */
ir_node *get_Call_mem(const ir_node *node);
......@@ -669,9 +666,9 @@ void remove_Call_callee_arr(ir_node *node);
* Projection numbers for result of Builtin node: use for Proj nodes!
*/
typedef enum {
pn_Builtin_M = pn_Generic_M_regular, /**< The memory result. */
pn_Builtin_1_result = pn_Generic_other, /**< first result. */
pn_Builtin_max /**< number of projections from a Builtin */
pn_Builtin_M = pn_Generic_M, /**< The memory result. */
pn_Builtin_1_result = pn_Generic_other, /**< first result. */
pn_Builtin_max /**< number of projections from a Builtin */
} pn_Builtin; /* Projection numbers for Builtin. */
ir_node *get_Builtin_mem(const ir_node *node);
......@@ -762,7 +759,7 @@ void set_Quot_resmode(ir_node *node, ir_mode *mode);
* Projection numbers for Quot: use for Proj nodes!
*/
typedef enum {
pn_Quot_M = pn_Generic_M_regular, /**< Memory result. */
pn_Quot_M = pn_Generic_M, /**< Memory result. */
pn_Quot_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
pn_Quot_X_except = pn_Generic_X_except, /**< Execution result if exception occurred. */
pn_Quot_res = pn_Generic_other, /**< Result of computation. */
......@@ -782,7 +779,7 @@ void set_DivMod_resmode(ir_node *node, ir_mode *mode);
* Projection numbers for DivMod: use for Proj nodes!
*/
typedef enum {
pn_DivMod_M = pn_Generic_M_regular, /**< Memory result. */
pn_DivMod_M = pn_Generic_M, /**< Memory result. */
pn_DivMod_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
pn_DivMod_X_except = pn_Generic_X_except, /**< Execution result if exception occurred. */
pn_DivMod_res_div = pn_Generic_other, /**< Result of computation a / b. */
......@@ -805,7 +802,7 @@ void set_Div_no_remainder(ir_node *node, int no_remainder);
* Projection numbers for Div: use for Proj nodes!
*/
typedef enum {
pn_Div_M = pn_Generic_M_regular, /**< Memory result. */
pn_Div_M = pn_Generic_M, /**< Memory result. */
pn_Div_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
pn_Div_X_except = pn_Generic_X_except, /**< Execution result if exception occurred. */
pn_Div_res = pn_Generic_other, /**< Result of computation. */
......@@ -825,7 +822,7 @@ void set_Mod_resmode(ir_node *node, ir_mode *mode);
* Projection numbers for Mod: use for Proj nodes!
*/
typedef enum {
pn_Mod_M = pn_Generic_M_regular, /**< Memory result. */
pn_Mod_M = pn_Generic_M, /**< Memory result. */
pn_Mod_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
pn_Mod_X_except = pn_Generic_X_except, /**< Execution result if exception occurred. */
pn_Mod_res = pn_Generic_other, /**< Result of computation. */
......@@ -966,7 +963,7 @@ void set_memop_ptr(ir_node *node, ir_node *ptr);
* Projection numbers for Load: use for Proj nodes!
*/
typedef enum {
pn_Load_M = pn_Generic_M_regular, /**< Memory result. */
pn_Load_M = pn_Generic_M, /**< Memory result. */
pn_Load_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
pn_Load_X_except = pn_Generic_X_except, /**< Execution result if exception occurred. */
pn_Load_res = pn_Generic_other, /**< Result of load operation. */
......@@ -988,7 +985,7 @@ void set_Load_align(ir_node *node, ir_align align);
* Projection numbers for Store: use for Proj nodes!
*/
typedef enum {
pn_Store_M = pn_Generic_M_regular, /**< Memory result. */
pn_Store_M = pn_Generic_M, /**< Memory result. */
pn_Store_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
pn_Store_X_except = pn_Generic_X_except, /**< Execution result if exception occurred. */
pn_Store_max = pn_Generic_other /**< number of projections from a Store */
......@@ -1009,7 +1006,7 @@ void set_Store_align(ir_node *node, ir_align align);
* Projection numbers for Alloc: use for Proj nodes!
*/
typedef enum {
pn_Alloc_M = pn_Generic_M_regular, /**< Memory result. */
pn_Alloc_M = pn_Generic_M, /**< Memory result. */
pn_Alloc_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
pn_Alloc_X_except = pn_Generic_X_except, /**< Execution result if exception occurred. */
pn_Alloc_res = pn_Generic_other, /**< Result of allocation. */
......@@ -1095,12 +1092,10 @@ void set_Mux_true(ir_node *node, ir_node *ir_true);
* Projection numbers for result of CopyB node: use for Proj nodes!
*/
typedef enum {
pn_CopyB_M_regular = pn_Generic_M_regular, /**< The memory result. */
pn_CopyB_M_regular = pn_Generic_M, /**< The memory result. */
pn_CopyB_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
pn_CopyB_X_except = pn_Generic_X_except, /**< The control flow result branching to the exception handler */
pn_CopyB_M_except = pn_Generic_other, /**< The memory result in case the runtime function terminated with
an exception */
pn_CopyB_max /**< number of projections from a CopyB */
pn_CopyB_max = pn_Generic_other /**< number of projections from a CopyB */
} pn_CopyB; /* Projection numbers for CopyB. */
#define pn_CopyB_M pn_CopyB_M_regular
......@@ -1117,12 +1112,10 @@ void set_CopyB_type(ir_node *node, ir_type *data_type);
* Projection numbers for result of InstOf node: use for Proj nodes!
*/
typedef enum {
pn_InstOf_M_regular = pn_Generic_M_regular, /**< The memory result. */
pn_InstOf_M_regular = pn_Generic_M, /**< The memory result. */
pn_InstOf_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
pn_InstOf_X_except = pn_Generic_X_except, /**< The control flow result branching to the exception handler */
pn_InstOf_res = pn_Generic_other, /**< The checked object pointer. */
pn_InstOf_M_except, /**< The memory result in case the runtime function terminated with
an exception */
pn_InstOf_max /**< number of projections from an InstOf */
} pn_InstOf;
#define pn_InstOf_M pn_InstOf_M_regular
......@@ -1139,7 +1132,7 @@ void set_InstOf_obj(ir_node *node, ir_node *obj);
* Projection numbers for Raise.
*/
typedef enum {
pn_Raise_M = pn_Generic_M_regular, /**< The Memory result. */
pn_Raise_M = pn_Generic_M, /**< The Memory result. */
pn_Raise_X = pn_Generic_X_regular, /**< The control flow to the exception handler. */
pn_Raise_max /**< number of projections from a Raise */
} pn_Raise; /* Projection numbers for Raise. */
......@@ -1153,7 +1146,7 @@ void set_Raise_exo_ptr(ir_node *node, ir_node *exoptr);
* Projection numbers for result of Bound node: use for Proj nodes!
*/
typedef enum {
pn_Bound_M = pn_Generic_M_regular, /**< The memory result. */
pn_Bound_M = pn_Generic_M, /**< The memory result. */
pn_Bound_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
pn_Bound_X_except = pn_Generic_X_except, /**< The control flow result branching to the exception handler */
pn_Bound_res = pn_Generic_other, /**< The checked index. */
......
......@@ -273,9 +273,9 @@ enum {
* Projection numbers for result of be_Call node: use for Proj nodes!
*/
typedef enum {
pn_be_Call_M_regular = pn_Call_M_regular, /**< The memory result of a be_Call. */
pn_be_Call_M_regular = pn_Call_M, /**< The memory result of a be_Call. */
pn_be_Call_sp = pn_Call_max,
pn_be_Call_first_res /**< The first result proj number of a be_Call. */
pn_be_Call_first_res /**< The first result proj number of a be_Call. */
} pn_be_Call;
/**
......
......@@ -130,7 +130,7 @@ static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph
/* should not happen here */
edges_reroute(proj, bad, irg);
break;
case pn_Call_M_except:
case pn_Call_M:
/* should not happen here */
edges_reroute(proj, nomem, irg);
break;
......@@ -154,7 +154,6 @@ static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph
}
turn_into_tuple(call, pn_Call_max);
set_Tuple_pred(call, pn_Call_M_regular, nomem);
/*
* Beware:
* We do not check here if this call really has exception and regular Proj's.
......@@ -167,10 +166,10 @@ static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph
jmp = new_r_Jmp(block);
set_opt_cse(old_cse);
set_Tuple_pred(call, pn_Call_M, nomem);
set_Tuple_pred(call, pn_Call_X_regular, jmp);
set_Tuple_pred(call, pn_Call_X_except, bad);
set_Tuple_pred(call, pn_Call_T_result, res);
set_Tuple_pred(call, pn_Call_M_except, nomem);
set_Tuple_pred(call, pn_Call_P_value_res_base, bad);
}
}
......
......@@ -90,7 +90,7 @@ static ir_node *own_gen_convert_call(ppc32_transform_env_t *env, ir_node *op, co
callee = new_rd_SymConst_addr_ent(env->dbg, env->irg, mode_P_code, method_ent, method_type);
call = new_rd_Call(env->dbg, env->block, memory, callee, 1, in, method_type);
call_results = new_rd_Proj(env->dbg, env->block, call, mode_T, pn_Call_T_result);
memory = new_rd_Proj(env->dbg, env->block, call, mode_M, pn_Call_M_regular);
memory = new_rd_Proj(env->dbg, env->block, call, mode_M, pn_Call_M);
return new_rd_Proj(env->dbg, env->block, call_results, to_mode, 0);
}
......
......@@ -89,7 +89,7 @@ void instrument_initcall(ir_graph *irg, ir_entity *ent) {
adr = new_r_SymConst(irg, mode_P_code, sym, symconst_addr_ent);
call = new_r_Call(first_block, get_irg_no_mem(irg), adr, 0, NULL, get_entity_type(ent));
new_mem = new_r_Proj(first_block, call, mode_M, pn_Call_M_regular);
new_mem = new_r_Proj(first_block, call, mode_M, pn_Call_M);
initial_mem = get_irg_initial_mem(irg);
edges_reroute(initial_mem, new_mem, irg);
......
......@@ -786,9 +786,9 @@ static inline ir_node **new_frag_arr(ir_node *n) {
opt = get_opt_optimize(); set_optimize(0);
/* Here we rely on the fact that all frag ops have Memory as first result! */
if (is_Call(n)) {
arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
arr[0] = new_Proj(n, mode_M, pn_Call_M);
} else if (is_CopyB(n)) {
arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
arr[0] = new_Proj(n, mode_M, pn_CopyB_M);
} else {
assert((pn_Quot_M == pn_DivMod_M) &&
(pn_Quot_M == pn_Div_M) &&
......
......@@ -946,11 +946,10 @@ static const pns_lookup_t cond_lut[] = {
/** the lookup table for Proj(Call) names */
static const pns_lookup_t call_lut[] = {
#define X(a) { pn_Call_##a, #a }
X(M_regular),
X(M),
X(X_regular),
X(X_except),
X(T_result),
X(M_except),
X(P_value_res_base)
#undef X
};
......@@ -1031,7 +1030,6 @@ static const pns_lookup_t copyb_lut[] = {
X(M),
X(X_regular),
X(X_except),
X(M_except)
#undef X
};
......@@ -1042,7 +1040,6 @@ static const pns_lookup_t instof_lut[] = {
X(X_regular),
X(X_except),
X(res),
X(M_except),
#undef X
};
......
......@@ -2752,7 +2752,7 @@ ir_node *get_fragile_op_mem(ir_node *node) {
case iro_Alloc :
case iro_Bound :
case iro_CopyB :
return get_irn_n(node, pn_Generic_M_regular);
return get_irn_n(node, pn_Generic_M);
case iro_Bad :
case iro_Unknown:
return node;
......
......@@ -1596,7 +1596,6 @@ static ir_node *equivalent_node_Proj_CopyB(ir_node *proj) {
DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
break;
case pn_CopyB_M_except:
case pn_CopyB_X_except:
DBG_OPT_EXC_REM(proj);
proj = get_irg_bad(current_ir_graph);
......@@ -4654,7 +4653,6 @@ static ir_node *transform_node_Proj_CopyB(ir_node *proj) {
DBG_OPT_EXC_REM(proj);
proj = new_r_Jmp(get_nodes_block(copyb));
break;
case pn_CopyB_M_except:
case pn_CopyB_X_except:
DBG_OPT_EXC_REM(proj);
proj = get_irg_bad(get_irn_irg(proj));
......
......@@ -242,7 +242,7 @@ gen_initializer_irg(ir_entity *ent_filename, ir_entity *bblock_id, ir_entity *bb
ins[3] = new_r_Const_long(irg, mode_Iu, n_blocks);
call = new_r_Call(bb, get_irg_initial_mem(irg), symconst, 4, ins, init_type);
ret = new_r_Return(bb, new_r_Proj(bb, call, mode_M, pn_Call_M_regular), 0, NULL);
ret = new_r_Return(bb, new_r_Proj(bb, call, mode_M, pn_Call_M), 0, NULL);
mature_immBlock(bb);
add_immBlock_pred(get_irg_end_block(irg), ret);
......
......@@ -359,11 +359,10 @@ static int verify_node_Proj_InstOf(ir_node *n, ir_node *p) {
ASSERT_AND_RET_DBG(
(
(proj == pn_InstOf_M_regular && mode == mode_M) ||
(proj == pn_InstOf_M && mode == mode_M) ||
(proj == pn_InstOf_X_regular && mode == mode_X) ||
(proj == pn_InstOf_X_except && mode == mode_X) ||
(proj == pn_InstOf_res && mode_is_reference(mode)) ||
(proj == pn_InstOf_M_except && mode == mode_M)
(proj == pn_InstOf_res && mode_is_reference(mode))
),
"wrong Proj from InstOf", 0,
show_proj_failure(p);
......@@ -380,11 +379,10 @@ static int verify_node_Proj_Call(ir_node *n, ir_node *p) {
ASSERT_AND_RET_DBG(
(
(proj == pn_Call_M_regular && mode == mode_M) ||
(proj == pn_Call_M && mode == mode_M) ||
(proj == pn_Call_X_regular && mode == mode_X) ||
(proj == pn_Call_X_except && mode == mode_X) ||
(proj == pn_Call_T_result && mode == mode_T) ||
(proj == pn_Call_M_except && mode == mode_M) ||
(proj == pn_Call_P_value_res_base && mode_is_reference(mode))
),
"wrong Proj from Call", 0,
......@@ -399,7 +397,7 @@ static int verify_node_Proj_Call(ir_node *n, ir_node *p) {
ASSERT_AND_RET(
!is_NoMem(get_Call_mem(n)),
"Exception Proj from FunctionCall", 0);
else if (proj == pn_Call_M_regular || proj == pn_Call_M_except)
else if (proj == pn_Call_M)
ASSERT_AND_RET(
(!is_NoMem(get_Call_mem(n)) || 1),
"Memory Proj from FunctionCall", 0);
......@@ -781,10 +779,9 @@ static int verify_node_Proj_CopyB(ir_node *n, ir_node *p) {
ASSERT_AND_RET_DBG(
(
(proj == pn_CopyB_M_regular && mode == mode_M) ||
(proj == pn_CopyB_M && mode == mode_M) ||
(proj == pn_CopyB_X_regular && mode == mode_X) ||
(proj == pn_CopyB_X_except && mode == mode_X) ||
(proj == pn_CopyB_M_except && mode == mode_M)
(proj == pn_CopyB_X_except && mode == mode_X)
),
"wrong Proj from CopyB", 0,
show_proj_failure(p);
......
......@@ -566,8 +566,7 @@ static void add_hidden_param(ir_graph *irg, int n_com, ir_node **ins, cl_entry *
/* get rid of the CopyB */
turn_into_tuple(p, pn_CopyB_max);
set_Tuple_pred(p, pn_CopyB_M_regular, mem);
set_Tuple_pred(p, pn_CopyB_M_except, get_irg_bad(irg));
set_Tuple_pred(p, pn_CopyB_M, mem);
set_Tuple_pred(p, pn_CopyB_X_regular, new_r_Jmp(blk));
set_Tuple_pred(p, pn_CopyB_X_except, get_irg_bad(irg));
++n_args;
......@@ -795,7 +794,7 @@ static void transform_irg(const lower_params_t *lp, ir_graph *irg)
pred,
tp
);
mem = new_r_Proj(bl, copy, mode_M, pn_CopyB_M_regular);
mem = new_r_Proj(bl, copy, mode_M, pn_CopyB_M);
}
}
if (lp->flags & LF_RETURN_HIDDEN) {
......
......@@ -118,10 +118,9 @@ static void lower_copyb_nodes(ir_node *irn, unsigned mode_bytes) {
}
turn_into_tuple(irn, pn_CopyB_max);
set_Tuple_pred(irn, pn_CopyB_M_regular, mem);
set_Tuple_pred(irn, pn_CopyB_M, mem);
set_Tuple_pred(irn, pn_CopyB_X_regular, get_irg_bad(irg));
set_Tuple_pred(irn, pn_CopyB_X_except, get_irg_bad(irg));
set_Tuple_pred(irn, pn_CopyB_M_except, get_irg_bad(irg));
}
/**
......
......@@ -575,7 +575,7 @@ static void lower_Div(ir_node *node, ir_mode *mode, lower_env_t *env) {
case pn_Div_M: /* Memory result. */
/* reroute to the call */
set_Proj_pred(proj, call);
set_Proj_proj(proj, pn_Call_M_except);
set_Proj_proj(proj, pn_Call_M);
break;
case pn_Div_X_except: /* Execution result if exception occurred. */
/* reroute to the call */
......@@ -652,7 +652,7 @@ static void lower_Mod(ir_node *node, ir_mode *mode, lower_env_t *env) {
case pn_Mod_M: /* Memory result. */
/* reroute to the call */
set_Proj_pred(proj, call);
set_Proj_proj(proj, pn_Call_M_except);
set_Proj_proj(proj, pn_Call_M);
break;
case pn_Mod_X_except: /* Execution result if exception occurred. */
/* reroute to the call */
......@@ -755,7 +755,7 @@ static void lower_DivMod(ir_node *node, ir_mode *mode, lower_env_t *env) {
case pn_DivMod_M: /* Memory result. */
/* reroute to the first call */
set_Proj_pred(proj, callDiv ? callDiv : (callMod ? callMod : mem));
set_Proj_proj(proj, pn_Call_M_except);
set_Proj_proj(proj, pn_Call_M);
break;
case pn_DivMod_X_except: /* Execution result if exception occurred. */
/* reroute to the first call */
......
......@@ -225,11 +225,10 @@ static void replace_call(ir_node *irn, ir_node *call, ir_node *mem, ir_node *reg
irn = new_r_Tuple(block, 1, &irn);
turn_into_tuple(call, pn_Call_max);
set_Tuple_pred(call, pn_Call_M_regular, mem);
set_Tuple_pred(call, pn_Call_M, mem);
set_Tuple_pred(call, pn_Call_X_regular, reg_jmp);
set_Tuple_pred(call, pn_Call_X_except, exc_jmp);
set_Tuple_pred(call, pn_Call_T_result, irn);
set_Tuple_pred(call, pn_Call_M_except, mem);
set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
} /* replace_call */
......@@ -1101,7 +1100,7 @@ int i_mapper_RuntimeCall(ir_node *node, runtime_rt *rt) {
for (i = 0; i < n_proj; ++i)
set_Tuple_pred(node, i, new_r_Bad(irg));
if (rt->mem_proj_nr >= 0)
set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(bl, call, mode_M, pn_Call_M_regular));
set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(bl, call, mode_M, pn_Call_M));
if (!is_NoMem(mem)) {
/* Exceptions can only be handled with real memory */
if (rt->regular_proj_nr >= 0)
......@@ -1109,7 +1108,7 @@ int i_mapper_RuntimeCall(ir_node *node, runtime_rt *rt) {
if (rt->exc_proj_nr >= 0)
set_Tuple_pred(node, rt->exc_proj_nr, new_r_Proj(bl, call, mode_X, pn_Call_X_except));
if (rt->exc_mem_proj_nr >= 0)
set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(bl, call, mode_M, pn_Call_M_except));
set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(bl, call, mode_M, pn_Call_M));
}
if (rt->res_proj_nr >= 0)
......
......@@ -485,11 +485,10 @@ static void transform_alloc_calls(ir_graph *irg, walk_env_t *env)
mem = get_Call_mem(call);
blk = get_nodes_block(call);
turn_into_tuple(call, pn_Call_max);
set_Tuple_pred(call, pn_Call_M_regular, mem);
set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(blk));
set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg));
set_Tuple_pred(call, pn_Call_M_except, mem);
set_Tuple_pred(call, pn_Call_M, mem);
set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(blk));
set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg));
set_Tuple_pred(call, pn_Call_P_value_res_base, new_r_Bad(irg));
++env->nr_deads;
......
......@@ -154,10 +154,9 @@ static void collect_const_and_pure_calls(ir_node *node, void *env) {
/* collect the Proj's in the Proj list */
switch (get_Proj_proj(node)) {
case pn_Call_M_regular:
case pn_Call_M:
case pn_Call_X_except:
case pn_Call_X_regular:
case pn_Call_M_except:
set_irn_link(node, ctx->proj_list);
ctx->proj_list = node;
break;
......@@ -222,14 +221,13 @@ static void fix_const_call_lists(ir_graph *irg, env_t *ctx) {
assert(get_irn_mode(mem) == mode_M);
switch (get_Proj_proj(proj)) {
case pn_Call_M_regular: {
case pn_Call_M: {
/* in dead code there might be cycles where proj == mem */
if (proj != mem)
exchange(proj, mem);
break;
}
case pn_Call_X_except:
case pn_Call_M_except:
exc_changed = 1;
exchange(proj, get_irg_bad(irg));
break;
......@@ -320,10 +318,9 @@ static void collect_nothrow_calls(ir_node *node, void *env) {
/* collect the Proj's in the Proj list */
switch (get_Proj_proj(node)) {
case pn_Call_M_regular:
case pn_Call_M:
case pn_Call_X_except:
case pn_Call_X_regular:
case pn_Call_M_except:
set_irn_link(node, ctx->proj_list);
ctx->proj_list = node;
break;
......@@ -368,7 +365,6 @@ static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *pr
/* kill any exception flow */
switch (get_Proj_proj(proj)) {
case pn_Call_X_except:
case pn_Call_M_except:
exc_changed = 1;
exchange(proj, get_irg_bad(irg));
break;
......
......@@ -837,9 +837,8 @@ static int can_inline(ir_node *call, ir_graph *called_graph) {
}
enum exc_mode {
exc_handler = 0, /**< There is a handler. */
exc_to_end = 1, /**< Branches to End. */
exc_no_handler = 2 /**< Exception handling not represented. */
exc_handler, /**< There is a handler. */
exc_no_handler /**< Exception handling not represented. */
};
/* Inlines a method at the given call site. */
......@@ -853,6 +852,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
ir_node **args_in;
ir_node *ret, *phi;
int arity, n_ret, n_exc, n_res, i, n, j, rem_opt, irn_arity, n_params;
int n_mem_phi;
enum exc_mode exc_handling;
ir_type *called_frame, *curr_frame, *mtp, *ctp;
ir_entity *ent;
......@@ -964,18 +964,15 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
for the Call node, or do we branch directly to End on an exception?
exc_handling:
0 There is a handler.
1 Branches to End.
2 Exception handling not represented in Firm. -- */
{
ir_node *proj, *Mproj = NULL, *Xproj = NULL;
ir_node *Xproj = NULL;
ir_node *proj;
for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
long proj_nr = get_Proj_proj(proj);
if (proj_nr == pn_Call_X_except) Xproj = proj;
if (proj_nr == pn_Call_M_except) Mproj = proj;
}
if (Mproj) { assert(Xproj); exc_handling = exc_handler; } /* Mproj */
else if (Xproj) { exc_handling = exc_to_end; } /* !Mproj && Xproj */
else { exc_handling = exc_no_handler; } /* !Mproj && !Xproj */
exc_handling = Xproj != NULL ? exc_handler : exc_no_handler;
}
/* create the argument tuple */
......@@ -1123,16 +1120,24 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
Add Phi node if there was more than one Return. -- */
turn_into_tuple(post_call, pn_Call_max);
/* First the Memory-Phi */
n_ret = 0;
n_mem_phi = 0;
for (i = 0; i < arity; i++) {
ret = get_Block_cfgpred(end_bl, i);
if (is_Return(ret)) {
cf_pred[n_ret] = get_Return_mem(ret);
n_ret++;
cf_pred[n_mem_phi++] = get_Return_mem(ret);
}
/* memory output for some exceptions is directly connected to End */
if (is_Call(ret)) {
cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 3);
} else if (is_fragile_op(ret)) {
/* We rely that all cfops have the memory output at the same position. */
cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 0);
} else if (is_Raise(ret)) {
cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 1);
}
}
phi = new_Phi(n_ret, cf_pred, mode_M);
set_Tuple_pred(call, pn_Call_M_regular, phi);
phi = new_Phi(n_mem_phi, cf_pred, mode_M);
set_Tuple_pred(call, pn_Call_M, phi);
/* Conserve Phi-list for further inlinings -- but might be optimized */
if (get_nodes_block(phi) == post_bl) {
set_irn_link(phi, get_irn_link(post_bl));
......@@ -1178,15 +1183,16 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
/* Finally the exception control flow.
We have two (three) possible situations:
First if the Call branches to an exception handler: We need to add a Phi node to
We have two possible situations:
First if the Call branches to an exception handler:
We need to add a Phi node to
collect the memory containing the exception objects. Further we need
to add another block to get a correct representation of this Phi. To
this block we add a Jmp that resolves into the X output of the Call
when the Call is turned into a tuple.
Second the Call branches to End, the exception is not handled. Just
add all inlined exception branches to the End node.
Third: there is no Exception edge at all. Handle as case two. */
Second: There is no exception edge. Just add all inlined exception
branches to the End node.
*/
if (exc_handling == exc_handler) {
n_exc = 0;
for (i = 0; i < arity; i++) {
......@@ -1201,29 +1207,9 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
if (n_exc > 0) {
ir_node *block = new_Block(n_exc, cf_pred);
set_cur_block(block);
set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
/* The Phi for the memories with the exception objects */
n_exc = 0;
for (i = 0; i < arity; i++) {
ir_node *ret;
ret = skip_Proj(get_Block_cfgpred(end_bl, i));
if (is_Call(ret)) {
cf_pred[n_exc] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 3);
n_exc++;
} else if (is_fragile_op(ret)) {
/* We rely that all cfops have the memory output at the same position. */
cf_pred[n_exc] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 0);
n_exc++;
} else if (is_Raise(ret)) {
cf_pred[n_exc] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 1);
n_exc++;
}
}
set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
} else {
set_Tuple_pred(call, pn_Call_X_except, new_Bad());
set_Tuple_pred(call, pn_Call_M_except, new_Bad());
}
} else {
ir_node *main_end_bl;
......@@ -1251,7 +1237,6 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
set_Tuple_pred(call, pn_Call_X_except, new_Bad());
set_Tuple_pred(call, pn_Call_M_except, new_Bad());
free(end_preds);