Commit cd4e22d1 authored by Michael Beck's avatar Michael Beck
Browse files

Added new Proj_X_regular for all nodes producing a Proj_X_exc to support the new macro blocks

[r14008]
parent 975a2349
......@@ -63,6 +63,9 @@ void ia32_handle_intrinsics(void) {
#define BINOP_Right_Low 2
#define BINOP_Right_High 3
/**
* Replace a call be a tuple of l_res, h_res.
*/
static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph *irg, ir_node *block) {
ir_node *res, *in[2];
......@@ -72,6 +75,7 @@ static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph
turn_into_tuple(call, pn_Call_max);
set_Tuple_pred(call, pn_Call_M_regular, get_irg_no_mem(irg));
set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(irg, block));
set_Tuple_pred(call, pn_Call_X_except, get_irg_bad(irg));
set_Tuple_pred(call, pn_Call_T_result, res);
set_Tuple_pred(call, pn_Call_M_except, get_irg_no_mem(irg));
......
......@@ -45,7 +45,7 @@
* This is useful if a node returning a tuple is removed, but the Projs
* extracting values from the tuple are not available.
*/
void turn_into_tuple (ir_node *node, int arity)
void turn_into_tuple(ir_node *node, int arity)
{
assert(node);
set_irn_op(node, op_Tuple);
......
......@@ -30,7 +30,7 @@
/** Exchanges two nodes by conserving edges leaving old (i.e.,
pointers pointing to old). Turns the old node into an Id. */
void exchange (ir_node *old, ir_node *nw);
void exchange(ir_node *old, ir_node *nw);
/** Turns a node into a "useless" Tuple.
*
......@@ -43,7 +43,7 @@ void exchange (ir_node *old, ir_node *nw);
* @param node The node to be turned into a tuple.
* @param arity The number of values formed into a Tuple.
*/
void turn_into_tuple (ir_node *node, int arity);
void turn_into_tuple(ir_node *node, int arity);
/** Walks over the passed ir graph and collects all Phi nodes as a
* list built with the link field in their corresponding block.
......@@ -73,4 +73,4 @@ void collect_phiprojs(ir_graph *irg);
*/
void part_block(ir_node *node);
#endif
#endif /* FIRM_IR_IRGMOD_H */
......@@ -1114,8 +1114,8 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
n_res = get_method_n_ress(get_Call_type(call));
res_pred = xmalloc (n_res * sizeof(*res_pred));
cf_pred = xmalloc (arity * sizeof(*res_pred));
res_pred = xmalloc(n_res * sizeof(*res_pred));
cf_pred = xmalloc(arity * sizeof(*res_pred));
set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
......@@ -1141,7 +1141,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
/* -- Build a Tuple for all results of the method.
Add Phi node if there was more than one Return. -- */
turn_into_tuple(post_call, 4);
turn_into_tuple(post_call, 4); /* FIXME: is th 4 corrct here ? */
/* First the Memory-Phi */
n_ret = 0;
for (i = 0; i < arity; i++) {
......
......@@ -131,7 +131,7 @@ int add_irn_dep(ir_node *node, ir_node *dep);
/**
* Copy all dependencies from a node to another.
* @param tgt The node which sould be enriched.
* @param tgt The node which should be enriched.
* @param src The node whose dependencies shall be copied.
*/
void add_irn_deps(ir_node *tgt, ir_node *src);
......@@ -378,20 +378,20 @@ int Block_block_visited(const ir_node *node);
* predecessors are removed, the node has the same predecessors in
* both views.
* @@@ Maybe better: arity is zero if no cg preds. */
void set_Block_cg_cfgpred_arr(ir_node * node, int arity, ir_node ** in);
void set_Block_cg_cfgpred(ir_node * node, int pos, ir_node * pred);
void set_Block_cg_cfgpred_arr(ir_node *node, int arity, ir_node **in);
void set_Block_cg_cfgpred(ir_node *node, int pos, ir_node *pred);
/* @@@ not supported */
ir_node **get_Block_cg_cfgpred_arr(ir_node * node);
ir_node **get_Block_cg_cfgpred_arr(ir_node *node);
/** Returns the number of interprocedural predecessors. 0 if none. */
int get_Block_cg_n_cfgpreds(ir_node * node);
int get_Block_cg_n_cfgpreds(ir_node *node);
/** Return the interprocedural predecessor at position pos. */
ir_node *get_Block_cg_cfgpred(ir_node * node, int pos);
/* frees the memory. */
void remove_Block_cg_cfgpred_arr(ir_node * node);
ir_node *get_Block_cg_cfgpred(ir_node *node, int pos);
/** Frees the memory allocated for interprocedural predecessors. */
void remove_Block_cg_cfgpred_arr(ir_node *node);
/** returns the extended basic block a block belongs to */
/** Returns the extended basic block a block belongs to. */
ir_extblk *get_Block_extbb(const ir_node *block);
/** sets the extended basic block a block belongs to */
/** Sets the extended basic block a block belongs to. */
void set_Block_extbb(ir_node *block, ir_extblk *extblk);
/** Return the number of Keep alive node. */
......@@ -471,6 +471,9 @@ int get_Return_n_ress(ir_node *node);
ir_node *get_Return_res(ir_node *node, int pos);
void set_Return_res(ir_node *node, int pos, ir_node *res);
/**
* Possible classes for constant classification.
*/
typedef enum {
CNST_NULL = 0, /**< The node is a const(0). */
CNST_ONE = +1, /**< The node is a const(1). */
......@@ -594,13 +597,14 @@ void set_Sel_entity (ir_node *node, ir_entity *ent);
*/
typedef enum {
pn_Call_M_regular = 0, /**< The memory result. */
pn_Call_X_except = 1, /**< The control flow result branching to the exception handler */
pn_Call_T_result = 2, /**< The tuple containing all (0, 1, 2, ...) results */
pn_Call_M_except = 3, /**< The memory result in case the called method terminated with
an exception */
pn_Call_P_value_res_base = 4,/**< A pointer to the memory region containing copied results
pn_Call_X_regular = 1, /**< The control flow result when no exception occurs. */
pn_Call_X_except = 2, /**< The control flow result branching to the exception handler. */
pn_Call_T_result = 3, /**< The tuple containing all (0, 1, 2, ...) results. */
pn_Call_M_except = 4, /**< The memory result in case the called method terminated with
an exception. */
pn_Call_P_value_res_base = 5,/**< A pointer to the memory region containing copied results
passed by value (for compound result types). */
pn_Call_max = 5 /**< number of projections from a Call */
pn_Call_max = 6 /**< number of projections from a Call */
} pn_Call; /* Projection numbers for Call. */
#define pn_Call_M pn_Call_M_regular
......@@ -700,7 +704,8 @@ void set_Quot_resmode(ir_node *node, ir_mode *mode);
* Projection numbers for Quot: use for Proj nodes!
*/
typedef enum {
pn_Quot_M, /**< Memory result. */
pn_Quot_M, /**< Memory result. */
pn_Quot_X_regular, /**< Execution result if no exception occurred. */
pn_Quot_X_except, /**< Execution result if exception occurred. */
pn_Quot_res, /**< Result of computation. */
pn_Quot_max /**< number of projections from a Quot */
......@@ -719,7 +724,8 @@ void set_DivMod_resmode(ir_node *node, ir_mode *mode);
* Projection numbers for DivMod: use for Proj nodes!
*/
typedef enum {
pn_DivMod_M, /**< Memory result. */
pn_DivMod_M, /**< Memory result. */
pn_DivMod_X_regular, /**< Execution result if no exception occurred. */
pn_DivMod_X_except, /**< Execution result if exception occurred. */
pn_DivMod_res_div, /**< Result of computation a / b. */
pn_DivMod_res_mod, /**< Result of computation a % b. */
......@@ -739,7 +745,8 @@ void set_Div_resmode(ir_node *node, ir_mode *mode);
* Projection numbers for Div: use for Proj nodes!
*/
typedef enum {
pn_Div_M, /**< Memory result. */
pn_Div_M, /**< Memory result. */
pn_Div_X_regular, /**< Execution result if no exception occurred. */
pn_Div_X_except, /**< Execution result if exception occurred. */
pn_Div_res, /**< Result of computation. */
pn_Div_max /**< number of projections from a Div */
......@@ -759,6 +766,7 @@ void set_Mod_resmode(ir_node *node, ir_mode *mode);
*/
typedef enum {
pn_Mod_M, /**< Memory result. */
pn_Mod_X_regular, /**< Execution result if no exception occurred. */
pn_Mod_X_except, /**< Execution result if exception occurred. */
pn_Mod_res, /**< Result of computation. */
pn_Mod_max /**< number of projections from a Mod */
......@@ -921,7 +929,8 @@ void set_memop_ptr(ir_node *node, ir_node *ptr);
* Projection numbers for Load: use for Proj nodes!
*/
typedef enum {
pn_Load_M, /**< Memory result. */
pn_Load_M, /**< Memory result. */
pn_Load_X_regular, /**< Execution result if no exception occurred. */
pn_Load_X_except, /**< Execution result if exception occurred. */
pn_Load_res, /**< Result of load operation. */
pn_Load_max /**< number of projections from a Load */
......@@ -940,7 +949,8 @@ void set_Load_volatility(ir_node *node, ir_volatility volatility);
* Projection numbers for Store: use for Proj nodes!
*/
typedef enum {
pn_Store_M, /**< Memory result. */
pn_Store_M, /**< Memory result. */
pn_Store_X_regular, /**< Execution result if no exception occurred. */
pn_Store_X_except, /**< Execution result if exception occurred. */
pn_Store_max /**< number of projections from a Store */
} pn_Store; /* Projection numbers for Store. */
......@@ -959,6 +969,7 @@ void set_Store_volatility(ir_node *node, ir_volatility volatility);
*/
typedef enum {
pn_Alloc_M, /**< Memory result. */
pn_Alloc_X_regular, /**< Execution result if no exception occurred. */
pn_Alloc_X_except, /**< Execution result if exception occurred. */
pn_Alloc_res, /**< Result of allocation. */
pn_Alloc_max /**< number of projections from an Alloc */
......@@ -1055,10 +1066,11 @@ int get_Psi_n_conds(ir_node *node);
*/
typedef enum {
pn_CopyB_M_regular = 0, /**< The memory result. */
pn_CopyB_X_except = 1, /**< The control flow result branching to the exception handler */
pn_CopyB_M_except = 2, /**< The memory result in case the runtime function terminated with
pn_CopyB_X_regular = 1, /**< Execution result if no exception occurred. */
pn_CopyB_X_except = 2, /**< The control flow result branching to the exception handler */
pn_CopyB_M_except = 3, /**< The memory result in case the runtime function terminated with
an exception */
pn_CopyB_max = 3 /**< number of projections from a CopyB */
pn_CopyB_max = 4 /**< number of projections from a CopyB */
} pn_CopyB; /* Projection numbers for CopyB. */
#define pn_CopyB_M pn_CopyB_M_regular
......@@ -1076,11 +1088,12 @@ void set_CopyB_type(ir_node *node, ir_type *data_type);
*/
typedef enum {
pn_InstOf_M_regular = 0, /**< The memory result. */
pn_InstOf_X_except = 1, /**< The control flow result branching to the exception handler */
pn_InstOf_res = 2, /**< The checked object pointer. */
pn_InstOf_M_except = 3, /**< The memory result in case the runtime function terminated with
pn_InstOf_X_regular = 1, /**< Execution result if no exception occurred. */
pn_InstOf_X_except = 2, /**< The control flow result branching to the exception handler */
pn_InstOf_res = 3, /**< The checked object pointer. */
pn_InstOf_M_except = 4, /**< The memory result in case the runtime function terminated with
an exception */
pn_InstOf_max = 4 /**< number of projections from an InstOf */
pn_InstOf_max = 5 /**< number of projections from an InstOf */
} pn_InstOf;
#define pn_InstOf_M pn_InstOf_M_regular
......@@ -1111,9 +1124,10 @@ void set_Raise_exo_ptr(ir_node *node, ir_node *exoptr);
*/
typedef enum {
pn_Bound_M = 0, /**< The memory result. */
pn_Bound_X_except = 1, /**< The control flow result branching to the exception handler */
pn_Bound_res = 2, /**< The checked index. */
pn_Bound_max = 3 /**< number of projections from a Bound */
pn_Bound_X_regular = 1, /**< Execution result if no exception occurred. */
pn_Bound_X_except = 2, /**< The control flow result branching to the exception handler */
pn_Bound_res = 3, /**< The checked index. */
pn_Bound_max = 4 /**< number of projections from a Bound */
} pn_Bound;
/** Returns the memory input of a Bound operation. */
......
......@@ -1051,10 +1051,12 @@ static ir_node *equivalent_node_Div(ir_node *n) {
if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
/* Turn Div into a tuple (mem, bad, a) */
ir_node *mem = get_Div_mem(n);
ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_Div_max);
set_Tuple_pred(n, pn_Div_M, mem);
set_Tuple_pred(n, pn_Div_X_except, new_Bad()); /* no exception */
set_Tuple_pred(n, pn_Div_res, a);
set_Tuple_pred(n, pn_Div_M, mem);
set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(current_ir_graph, blk));
set_Tuple_pred(n, pn_Div_X_except, new_Bad()); /* no exception */
set_Tuple_pred(n, pn_Div_res, a);
}
return n;
} /* equivalent_node_Div */
......@@ -1070,10 +1072,12 @@ static ir_node *equivalent_node_Quot(ir_node *n) {
if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* Quot(x, 1) == x */
/* Turn Quot into a tuple (mem, bad, a) */
ir_node *mem = get_Quot_mem(n);
ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_Quot_max);
set_Tuple_pred(n, pn_Quot_M, mem);
set_Tuple_pred(n, pn_Quot_X_except, new_Bad()); /* no exception */
set_Tuple_pred(n, pn_Quot_res, a);
set_Tuple_pred(n, pn_Quot_M, mem);
set_Tuple_pred(n, pn_Quot_X_regular, new_r_Jmp(current_ir_graph, blk));
set_Tuple_pred(n, pn_Quot_X_except, new_Bad()); /* no exception */
set_Tuple_pred(n, pn_Quot_res, a);
}
return n;
} /* equivalent_node_Quot */
......@@ -1082,20 +1086,22 @@ static ir_node *equivalent_node_Quot(ir_node *n) {
* Optimize a / 1 = a.
*/
static ir_node *equivalent_node_DivMod(ir_node *n) {
ir_node *a = get_DivMod_left(n);
ir_node *b = get_DivMod_right(n);
/* Div is not commutative. */
if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
/* Turn DivMod into a tuple (mem, bad, a, 0) */
ir_node *a = get_DivMod_left(n);
ir_node *mem = get_Div_mem(n);
ir_mode *mode = get_irn_mode(b);
ir_node *blk = get_nodes_block(n);
ir_mode *mode = get_DivMod_resmode(n);
turn_into_tuple(n, pn_DivMod_max);
set_Tuple_pred(n, pn_DivMod_M, mem);
set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
set_Tuple_pred(n, pn_DivMod_res_div, a);
set_Tuple_pred(n, pn_DivMod_res_mod, new_Const(mode, get_mode_null(mode)));
set_Tuple_pred(n, pn_DivMod_M, mem);
set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(current_ir_graph, blk));
set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
set_Tuple_pred(n, pn_DivMod_res_div, a);
set_Tuple_pred(n, pn_DivMod_res_mod, new_Const(mode, get_mode_null(mode)));
}
return n;
} /* equivalent_node_DivMod */
......@@ -1520,8 +1526,10 @@ static ir_node *equivalent_node_CopyB(ir_node *n) {
if (a == b) {
/* Turn CopyB into a tuple (mem, bad, bad) */
ir_node *mem = get_CopyB_mem(n);
ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_CopyB_max);
set_Tuple_pred(n, pn_CopyB_M, mem);
set_Tuple_pred(n, pn_CopyB_X_regular, new_r_Jmp(current_ir_graph, blk));
set_Tuple_pred(n, pn_CopyB_X_except, new_Bad()); /* no exception */
set_Tuple_pred(n, pn_CopyB_M_except, new_Bad());
}
......@@ -1565,10 +1573,12 @@ static ir_node *equivalent_node_Bound(ir_node *n) {
if (ret_tuple) {
/* Turn Bound into a tuple (mem, bad, idx) */
ir_node *mem = get_Bound_mem(n);
ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_Bound_max);
set_Tuple_pred(n, pn_Bound_M, mem);
set_Tuple_pred(n, pn_Bound_X_except, new_Bad()); /* no exception */
set_Tuple_pred(n, pn_Bound_res, idx);
set_Tuple_pred(n, pn_Bound_M, mem);
set_Tuple_pred(n, pn_Bound_X_regular, new_r_Jmp(current_ir_graph, blk)); /* no exception */
set_Tuple_pred(n, pn_Bound_X_except, new_Bad()); /* no exception */
set_Tuple_pred(n, pn_Bound_res, idx);
}
return n;
} /* equivalent_node_Bound */
......@@ -2137,11 +2147,13 @@ static ir_node *transform_node_Div(ir_node *n) {
if (value != n) {
/* Turn Div into a tuple (mem, bad, value) */
ir_node *mem = get_Div_mem(n);
ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_Div_max);
set_Tuple_pred(n, pn_Div_M, mem);
set_Tuple_pred(n, pn_Div_X_except, new_Bad());
set_Tuple_pred(n, pn_Div_res, value);
set_Tuple_pred(n, pn_Div_M, mem);
set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(current_ir_graph, blk));
set_Tuple_pred(n, pn_Div_X_except, new_Bad());
set_Tuple_pred(n, pn_Div_res, value);
}
return n;
} /* transform_node_Div */
......@@ -2165,11 +2177,13 @@ static ir_node *transform_node_Mod(ir_node *n) {
if (value != n) {
/* Turn Mod into a tuple (mem, bad, value) */
ir_node *mem = get_Mod_mem(n);
ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_Mod_max);
set_Tuple_pred(n, pn_Mod_M, mem);
set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
set_Tuple_pred(n, pn_Mod_res, value);
set_Tuple_pred(n, pn_Mod_M, mem);
set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(current_ir_graph, blk));
set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
set_Tuple_pred(n, pn_Mod_res, value);
}
return n;
} /* transform_node_Mod */
......@@ -2222,10 +2236,12 @@ static ir_node *transform_node_DivMod(ir_node *n) {
if (evaluated) { /* replace by tuple */
ir_node *mem = get_DivMod_mem(n);
ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_DivMod_max);
set_Tuple_pred(n, pn_DivMod_M, mem);
set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
set_Tuple_pred(n, pn_DivMod_res_div, a);
set_Tuple_pred(n, pn_DivMod_M, mem);
set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(current_ir_graph, blk));
set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
set_Tuple_pred(n, pn_DivMod_res_div, a);
set_Tuple_pred(n, pn_DivMod_res_mod, b);
}
......
......@@ -344,7 +344,7 @@ static void find_allocation_calls(ir_node *call, void *ctx)
*/
static void transform_allocs(ir_graph *irg, walk_env_t *env)
{
ir_node *alloc, *next, *mem, *sel, *size;
ir_node *alloc, *next, *mem, *sel, *size, *blk;
ir_type *ftp, *atp, *tp;
ir_entity *ent;
char name[128];
......@@ -358,8 +358,10 @@ static void transform_allocs(ir_graph *irg, walk_env_t *env)
DBG((dbgHandle, LEVEL_1, "%+F allocation of %+F unused, deleted.\n", irg, alloc));
mem = get_Alloc_mem(alloc);
blk = get_nodes_block(alloc);
turn_into_tuple(alloc, pn_Alloc_max);
set_Tuple_pred(alloc, pn_Alloc_M, mem);
set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(irg, blk));
set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg));
++env->nr_deads;
......@@ -404,6 +406,7 @@ static void transform_allocs(ir_graph *irg, walk_env_t *env)
turn_into_tuple(alloc, pn_Alloc_max);
set_Tuple_pred(alloc, pn_Alloc_M, mem);
set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(irg, blk));
set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg));
set_Tuple_pred(alloc, pn_Alloc_res, sel);
......@@ -438,7 +441,7 @@ static void transform_allocs(ir_graph *irg, walk_env_t *env)
*/
static void transform_alloc_calls(ir_graph *irg, walk_env_t *env)
{
ir_node *call, *next, *mem, *size;
ir_node *call, *next, *mem, *size, *blk;
ir_type *ftp, *atp, *tp;
/* kill all dead allocs */
......@@ -448,8 +451,10 @@ static void transform_alloc_calls(ir_graph *irg, walk_env_t *env)
DBG((dbgHandle, LEVEL_1, "%+F allocation of %+F unused, deleted.\n", irg, call));
mem = get_Call_mem(call);
blk = get_nodes_block(call);
turn_into_tuple(call, pn_Call_max);
set_Tuple_pred(call, pn_Call_M_regular, mem);
set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(irg, blk));
set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg));
set_Tuple_pred(call, pn_Call_M_except, mem);
......
......@@ -530,9 +530,10 @@ static void topologic_walker(ir_node *node, void *ctx)
val = new_d_Conv(get_irn_dbg_info(node), val, mode);
turn_into_tuple(node, pn_Load_max);
set_Tuple_pred(node, pn_Load_M, mem);
set_Tuple_pred(node, pn_Load_res, val);
set_Tuple_pred(node, pn_Load_X_except, new_Bad());
set_Tuple_pred(node, pn_Load_M, mem);
set_Tuple_pred(node, pn_Load_res, val);
set_Tuple_pred(node, pn_Load_X_regular, new_r_Jmp(current_ir_graph, block));
set_Tuple_pred(node, pn_Load_X_except, new_Bad());
} else {
l = obstack_alloc(&env->obst, sizeof(*l));
l->node = node;
......@@ -565,10 +566,12 @@ static void topologic_walker(ir_node *node, void *ctx)
value_arr[vnum] = val;
mem = get_Store_mem(node);
block = get_nodes_block(node);
turn_into_tuple(node, pn_Store_max);
set_Tuple_pred(node, pn_Store_M, mem);
set_Tuple_pred(node, pn_Store_X_except, new_Bad());
set_Tuple_pred(node, pn_Store_M, mem);
set_Tuple_pred(node, pn_Store_X_regular, new_r_Jmp(current_ir_graph, block));
set_Tuple_pred(node, pn_Store_X_except, new_Bad());
} else if (op == op_Phi && get_irn_mode(node) == mode_M) {
/*
* found a memory Phi: Here, we must create new Phi nodes
......@@ -695,16 +698,18 @@ static void fix_loads(env_t *env)
val = new_Unknown(env->modes[l->vnum]);
}
mem = get_Load_mem(load);
/* Beware: A Load can contain a hidden conversion in Firm.
Handle this here. */
mode = get_Load_mode(load);
if (mode != get_irn_mode(val))
val = new_d_Conv(get_irn_dbg_info(load), val, mode);
mem = get_Load_mem(load);
turn_into_tuple(load, pn_Load_max);
set_Tuple_pred(load, pn_Load_M, mem);
set_Tuple_pred(load, pn_Load_res, val);
set_Tuple_pred(load, pn_Load_X_except, new_r_Jmp(current_ir_graph, block));
set_Tuple_pred(load, pn_Load_X_except, new_Bad());
}
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment