Commit b9a1bfdb authored by Christoph Mallon's avatar Christoph Mallon
Browse files

irgmod: Pass the new inputs to turn_into_tuple() instead of initialising them...

irgmod: Pass the new inputs to turn_into_tuple() instead of initialising them with Bad and setting them afterwards.
parent 351e5ae6
......@@ -47,7 +47,7 @@ FIRM_API void exchange(ir_node *old, ir_node *nw);
* @param node The node to be turned into a tuple.
* @param arity The number of values formed into a Tuple.
*/
FIRM_API void turn_into_tuple(ir_node *node, int arity);
FIRM_API void turn_into_tuple(ir_node *node, int arity, ir_node *const in[]);
/** Walks over the passed IR graph and collects all Phi nodes as a
* list in their corresponding block (using get_Block_phis() API).
......
......@@ -85,7 +85,7 @@ FIRM_API ir_node *get_irn_n(const ir_node *node, int n);
* This function is necessary to adjust in arrays of blocks, calls and phis.
* "in" must contain all predecessors except the block that are required for
* the nodes opcode. */
FIRM_API void set_irn_in(ir_node *node, int arity, ir_node *in[]);
FIRM_API void set_irn_in(ir_node *node, int arity, ir_node *const in[]);
/**
* Add an artificial dependency to the node.
......
......@@ -35,6 +35,7 @@
#include "lower_dw.h"
#include "array.h"
#include "error.h"
#include "util.h"
#include "ia32_new_nodes.h"
#include "bearch_ia32_t.h"
......@@ -162,11 +163,13 @@ static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph
jmp = new_r_Jmp(block);
set_opt_cse(old_cse);
turn_into_tuple(call, pn_Call_max+1);
set_Tuple_pred(call, pn_Call_M, nomem);
set_Tuple_pred(call, pn_Call_X_regular, jmp);
set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
set_Tuple_pred(call, pn_Call_T_result, res);
ir_node *const in[] = {
[pn_Call_M] = nomem,
[pn_Call_T_result] = res,
[pn_Call_X_regular] = jmp,
[pn_Call_X_except] = new_r_Bad(irg, mode_X),
};
turn_into_tuple(call, ARRAY_SIZE(in), in);
}
}
......
......@@ -36,17 +36,8 @@
#include "irtools.h"
#include "error.h"
void turn_into_tuple(ir_node *node, int arity)
void turn_into_tuple(ir_node *const node, int const arity, ir_node *const *const in)
{
ir_graph *irg = get_irn_irg(node);
ir_node **in = ALLOCAN(ir_node*, arity);
ir_node *bad = new_r_Bad(irg, mode_ANY);
int i;
/* construct a new in array, with every input being bad */
for (i = 0; i < arity; ++i) {
in[i] = bad;
}
set_irn_in(node, arity, in);
set_irn_op(node, op_Tuple);
}
......
......@@ -161,7 +161,7 @@ ir_node **get_irn_in(const ir_node *node)
return node->in;
}
void set_irn_in(ir_node *node, int arity, ir_node **in)
void set_irn_in(ir_node *const node, int const arity, ir_node *const *const in)
{
int i;
ir_node *** pOld_in;
......
......@@ -3085,11 +3085,13 @@ make_tuple:
/* skip a potential Pin */
mem = skip_Pin(mem);
turn_into_tuple(n, pn_Div_max+1);
set_Tuple_pred(n, pn_Div_M, mem);
set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(blk));
set_Tuple_pred(n, pn_Div_X_except, new_r_Bad(irg, mode_X));
set_Tuple_pred(n, pn_Div_res, value);
ir_node *const in[] = {
[pn_Div_M] = mem,
[pn_Div_res] = value,
[pn_Div_X_regular] = new_r_Jmp(blk),
[pn_Div_X_except] = new_r_Bad(irg, mode_X),
};
turn_into_tuple(n, ARRAY_SIZE(in), in);
}
return n;
}
......@@ -3177,11 +3179,13 @@ make_tuple:
/* skip a potential Pin */
mem = skip_Pin(mem);
turn_into_tuple(n, pn_Mod_max+1);
set_Tuple_pred(n, pn_Mod_M, mem);
set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(blk));
set_Tuple_pred(n, pn_Mod_X_except, new_r_Bad(irg, mode_X));
set_Tuple_pred(n, pn_Mod_res, value);
ir_node *const in[] = {
[pn_Mod_M] = mem,
[pn_Mod_res] = value,
[pn_Mod_X_regular] = new_r_Jmp(blk),
[pn_Mod_X_except] = new_r_Bad(irg, mode_X),
};
turn_into_tuple(n, ARRAY_SIZE(in), in);
}
return n;
}
......@@ -3197,7 +3201,6 @@ static ir_node *transform_node_Cond(ir_node *n)
ir_node *a = get_Cond_selector(n);
ir_graph *irg = get_irn_irg(n);
ir_tarval *ta;
ir_node *jmp;
/* we need block info which is not available in floating irgs */
if (get_irg_pinned(irg) == op_pin_state_floats)
......@@ -3213,16 +3216,15 @@ static ir_node *transform_node_Cond(ir_node *n)
if (ta != tarval_bad) {
/* It's branching on a boolean constant.
Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
ir_node *blk = get_nodes_block(n);
jmp = new_r_Jmp(blk);
turn_into_tuple(n, pn_Cond_max+1);
if (ta == tarval_b_true) {
set_Tuple_pred(n, pn_Cond_false, new_r_Bad(irg, mode_X));
set_Tuple_pred(n, pn_Cond_true, jmp);
} else {
set_Tuple_pred(n, pn_Cond_false, jmp);
set_Tuple_pred(n, pn_Cond_true, new_r_Bad(irg, mode_X));
}
ir_node *const blk = get_nodes_block(n);
ir_node *const jmp = new_r_Jmp(blk);
ir_node *const bad = new_r_Bad(irg, mode_X);
bool const cond = ta == tarval_b_true;
ir_node *const in[] = {
[pn_Cond_false] = cond ? bad : jmp,
[pn_Cond_true] = cond ? jmp : bad,
};
turn_into_tuple(n, ARRAY_SIZE(in), in);
clear_irg_properties(irg, IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE);
}
return n;
......
......@@ -34,6 +34,7 @@
#include "irgwalk.h"
#include "iroptimize.h"
#include "error.h"
#include "util.h"
static pmap *entities;
static bool dont_lower[ir_bk_last+1];
......@@ -112,9 +113,11 @@ static void replace_with_call(ir_node *node)
call_ress = new_r_Proj(call, mode_T, pn_Call_T_result);
call_res = new_r_Proj(call_ress, res_mode, 0);
turn_into_tuple(node, 2);
set_irn_n(node, pn_Builtin_M, call_mem);
set_irn_n(node, pn_Builtin_max+1, call_res);
ir_node *const in[] = {
[pn_Builtin_M] = call_mem,
[pn_Builtin_max + 1] = call_res,
};
turn_into_tuple(node, ARRAY_SIZE(in), in);
}
static void lower_builtin(ir_node *node, void *env)
......@@ -132,10 +135,11 @@ static void lower_builtin(ir_node *node, void *env)
case ir_bk_prefetch: {
/* just remove it */
ir_node *mem = get_Builtin_mem(node);
turn_into_tuple(node, 1);
set_irn_n(node, pn_Builtin_M, mem);
ir_node *const in[] = { mem };
turn_into_tuple(node, ARRAY_SIZE(in), in);
break;
}
case ir_bk_ffs:
case ir_bk_clz:
case ir_bk_ctz:
......
......@@ -41,6 +41,7 @@
#include "array_t.h"
#include "pmap.h"
#include "error.h"
#include "util.h"
static pmap *pointer_types;
static pmap *lowered_mtps;
......@@ -541,13 +542,15 @@ static void add_hidden_param(ir_graph *irg, size_t n_com, ir_node **ins,
/* get rid of the CopyB */
if (ir_throws_exception(p)) {
turn_into_tuple(p, pn_CopyB_max+1);
set_Tuple_pred(p, pn_CopyB_M, mem);
set_Tuple_pred(p, pn_CopyB_X_regular, new_r_Jmp(block));
set_Tuple_pred(p, pn_CopyB_X_except, new_r_Bad(irg, mode_X));
ir_node *const in[] = {
[pn_CopyB_M] = mem,
[pn_CopyB_X_regular] = new_r_Jmp(block),
[pn_CopyB_X_except] = new_r_Bad(irg, mode_X),
};
turn_into_tuple(p, ARRAY_SIZE(in), in);
} else {
turn_into_tuple(p, pn_CopyB_M+1);
set_Tuple_pred(p, pn_CopyB_M, mem);
ir_node *const in[] = { mem };
turn_into_tuple(p, ARRAY_SIZE(in), in);
}
++n_args;
}
......
......@@ -35,6 +35,7 @@
#include "irgmod.h"
#include "error.h"
#include "be.h"
#include "util.h"
typedef struct entry entry_t;
struct entry {
......@@ -155,10 +156,13 @@ static void lower_small_copyb_node(ir_node *irn)
mode_bytes /= 2;
}
turn_into_tuple(irn, pn_CopyB_max+1);
set_Tuple_pred(irn, pn_CopyB_M, mem);
set_Tuple_pred(irn, pn_CopyB_X_regular, new_r_Bad(irg, mode_X));
set_Tuple_pred(irn, pn_CopyB_X_except, new_r_Bad(irg, mode_X));
ir_node *const bad = new_r_Bad(irg, mode_X);
ir_node *const in[] = {
[pn_CopyB_M] = mem,
[pn_CopyB_X_regular] = bad,
[pn_CopyB_X_except] = bad,
};
turn_into_tuple(irn, ARRAY_SIZE(in), in);
}
static ir_type *get_memcpy_methodtype(void)
......@@ -212,8 +216,8 @@ static void lower_large_copyb_node(ir_node *irn)
call = new_rd_Call(dbgi, block, mem, symconst, 3, in, call_tp);
call_mem = new_r_Proj(call, mode_M, pn_Call_M);
turn_into_tuple(irn, 1);
set_irn_n(irn, pn_CopyB_M, call_mem);
ir_node *const tuple_in[] = { call_mem };
turn_into_tuple(irn, ARRAY_SIZE(tuple_in), tuple_in);
}
static void lower_copyb_node(ir_node *irn)
......
......@@ -2647,9 +2647,11 @@ static void lower_reduce_builtin(ir_node *builtin, ir_mode *mode)
panic("unexpected builtin");
}
turn_into_tuple(builtin, 2);
set_irn_n(builtin, pn_Builtin_M, mem);
set_irn_n(builtin, pn_Builtin_max+1, res);
ir_node *const in[] = {
[pn_Builtin_M] = mem,
[pn_Builtin_max + 1] = res,
};
turn_into_tuple(builtin, ARRAY_SIZE(in), in);
}
}
......
......@@ -203,25 +203,31 @@ static void replace_call(ir_node *irn, ir_node *call, ir_node *mem,
ir_node *rest = new_r_Tuple(block, 1, &irn);
if (ir_throws_exception(call)) {
turn_into_tuple(call, pn_Call_max+1);
if (reg_jmp == NULL) {
reg_jmp = new_r_Jmp(block);
}
if (exc_jmp == NULL) {
exc_jmp = new_r_Bad(irg, mode_X);
}
set_Tuple_pred(call, pn_Call_X_regular, reg_jmp);
set_Tuple_pred(call, pn_Call_X_except, exc_jmp);
ir_node *const in[] = {
[pn_Call_M] = mem,
[pn_Call_T_result] = rest,
[pn_Call_X_regular] = reg_jmp,
[pn_Call_X_except] = exc_jmp,
};
turn_into_tuple(call, ARRAY_SIZE(in), in);
} else {
assert(reg_jmp == NULL);
assert(exc_jmp == NULL);
turn_into_tuple(call, pn_Call_T_result+1);
assert(pn_Call_M <= pn_Call_T_result);
assert(pn_Call_X_regular > pn_Call_T_result);
assert(pn_Call_X_except > pn_Call_T_result);
ir_node *const in[] = {
[pn_Call_M] = mem,
[pn_Call_T_result] = rest,
};
turn_into_tuple(call, ARRAY_SIZE(in), in);
}
set_Tuple_pred(call, pn_Call_M, mem);
set_Tuple_pred(call, pn_Call_T_result, rest);
}
int i_mapper_abs(ir_node *call, void *ctx)
......@@ -1202,23 +1208,28 @@ int i_mapper_RuntimeCall(ir_node *node, runtime_rt *rt)
if (n_proj > 0) {
n_proj += n_res - 1;
/* we are ready */
turn_into_tuple(node, n_proj);
ir_node **const in = ALLOCAN(ir_node*, n_proj);
ir_node *const bad = new_r_Bad(irg, mode_ANY);
for (i = 0; i != n_proj; ++i) {
in[i] = bad;
}
if (rt->mem_proj_nr >= 0)
set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(call, mode_M, pn_Call_M));
in[rt->mem_proj_nr] = new_r_Proj(call, mode_M, pn_Call_M);
if (throws_exception) {
set_Tuple_pred(node, op->pn_x_regular, new_r_Proj(call, mode_X, pn_Call_X_regular));
set_Tuple_pred(node, op->pn_x_except, new_r_Proj(call, mode_X, pn_Call_X_except));
in[op->pn_x_regular] = new_r_Proj(call, mode_X, pn_Call_X_regular);
in[op->pn_x_except] = new_r_Proj(call, mode_X, pn_Call_X_except);
}
if (rt->res_proj_nr >= 0) {
for (i = 0; i < n_res; ++i) {
ir_mode *mode = get_type_mode(get_method_res_type(mtp, i));
ir_node *proj = new_r_Proj(res_proj, mode, i);
set_Tuple_pred(node, rt->res_proj_nr + i, proj);
in[rt->res_proj_nr + i] = proj;
}
}
turn_into_tuple(node, n_proj, in);
return 1;
} else {
/* only one return value supported */
......
......@@ -38,6 +38,7 @@
#include "irprintf.h"
#include "debug.h"
#include "error.h"
#include "util.h"
/**
* walker environment
......@@ -383,10 +384,12 @@ static void transform_allocs(ir_graph *irg, walk_env_t *env)
mem = get_Alloc_mem(alloc);
blk = get_nodes_block(alloc);
turn_into_tuple(alloc, pn_Alloc_max+1);
set_Tuple_pred(alloc, pn_Alloc_M, mem);
set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(blk));
set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg, mode_X));
ir_node *const in[] = {
[pn_Alloc_M] = mem,
[pn_Alloc_X_regular] = new_r_Jmp(blk),
[pn_Alloc_X_except] = new_r_Bad(irg, mode_X),
};
turn_into_tuple(alloc, ARRAY_SIZE(in), in);
++env->nr_deads;
}
......@@ -428,11 +431,13 @@ static void transform_allocs(ir_graph *irg, walk_env_t *env)
sel = new_rd_simpleSel(dbg, get_nodes_block(alloc), get_irg_no_mem(irg), get_irg_frame(irg), ent);
mem = get_Alloc_mem(alloc);
turn_into_tuple(alloc, pn_Alloc_max+1);
set_Tuple_pred(alloc, pn_Alloc_M, mem);
set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(blk));
set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg, mode_X));
set_Tuple_pred(alloc, pn_Alloc_res, sel);
ir_node *const in[] = {
[pn_Alloc_M] = mem,
[pn_Alloc_res] = sel,
[pn_Alloc_X_regular] = new_r_Jmp(blk),
[pn_Alloc_X_except] = new_r_Bad(irg, mode_X),
};
turn_into_tuple(alloc, ARRAY_SIZE(in), in);
++env->nr_removed;
}
......@@ -470,11 +475,13 @@ static void transform_alloc_calls(ir_graph *irg, walk_env_t *env)
mem = get_Call_mem(call);
blk = get_nodes_block(call);
turn_into_tuple(call, pn_Call_max+1);
set_Tuple_pred(call, pn_Call_M, mem);
set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(blk));
set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T));
ir_node *const in[] = {
[pn_Call_M] = mem,
[pn_Call_T_result] = new_r_Bad(irg, mode_T),
[pn_Call_X_regular] = new_r_Jmp(blk),
[pn_Call_X_except] = new_r_Bad(irg, mode_X),
};
turn_into_tuple(call, ARRAY_SIZE(in), in);
++env->nr_deads;
}
......
......@@ -304,7 +304,7 @@ static void copy_frame_entities(ir_graph *from, ir_graph *to)
}
/* Inlines a method at the given call site. */
int inline_method(ir_node *call, ir_graph *called_graph)
int inline_method(ir_node *const call, ir_graph *called_graph)
{
/* we cannot inline some types of calls */
if (! can_inline(call, called_graph))
......@@ -381,7 +381,6 @@ int inline_method(ir_node *call, ir_graph *called_graph)
in[pn_Start_P_frame_base] = get_irg_frame(irg);
in[pn_Start_T_args] = new_r_Tuple(post_bl, n_params, args_in);
ir_node *pre_call = new_r_Tuple(post_bl, pn_Start_max+1, in);
ir_node *post_call = call;
/* --
The new block gets the ins of the old block, pre_call and all its
......@@ -466,7 +465,6 @@ int inline_method(ir_node *call, ir_graph *called_graph)
/* build a Tuple for all results of the method.
* add Phi node if there was more than one Return. */
turn_into_tuple(post_call, pn_Call_max+1);
/* First the Memory-Phi */
int n_mem_phi = 0;
for (int i = 0; i < arity; i++) {
......@@ -484,14 +482,14 @@ int inline_method(ir_node *call, ir_graph *called_graph)
cf_pred[n_mem_phi++] = new_r_Proj(ret, mode_M, 1);
}
}
ir_node *phi = new_r_Phi(post_bl, n_mem_phi, cf_pred, mode_M);
set_Tuple_pred(call, pn_Call_M, phi);
ir_node *const call_mem = new_r_Phi(post_bl, n_mem_phi, cf_pred, mode_M);
/* Conserve Phi-list for further inlinings -- but might be optimized */
if (get_nodes_block(phi) == post_bl) {
set_irn_link(phi, get_irn_link(post_bl));
set_irn_link(post_bl, phi);
if (get_nodes_block(call_mem) == post_bl) {
set_irn_link(call_mem, get_irn_link(post_bl));
set_irn_link(post_bl, call_mem);
}
/* Now the real results */
ir_node *call_res;
if (n_res > 0) {
for (int j = 0; j < n_res; j++) {
ir_type *res_type = get_method_res_type(ctp, j);
......@@ -509,11 +507,9 @@ int inline_method(ir_node *call, ir_graph *called_graph)
n_ret++;
}
}
if (n_ret > 0) {
phi = new_r_Phi(post_bl, n_ret, cf_pred, res_mode);
} else {
phi = new_r_Bad(irg, res_mode);
}
ir_node *const phi = n_ret > 0
? new_r_Phi(post_bl, n_ret, cf_pred, res_mode)
: new_r_Bad(irg, res_mode);
res_pred[j] = phi;
/* Conserve Phi-list for further inlinings -- but might be optimized */
if (get_nodes_block(phi) == post_bl) {
......@@ -521,13 +517,12 @@ int inline_method(ir_node *call, ir_graph *called_graph)
set_Block_phis(post_bl, phi);
}
}
ir_node *result_tuple = new_r_Tuple(post_bl, n_res, res_pred);
set_Tuple_pred(call, pn_Call_T_result, result_tuple);
call_res = new_r_Tuple(post_bl, n_res, res_pred);
} else {
set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T));
call_res = new_r_Bad(irg, mode_T);
}
/* handle the regular call */
set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(post_bl));
ir_node *const call_x_reg = new_r_Jmp(post_bl);
/* Finally the exception control flow.
We have two possible situations:
......@@ -540,6 +535,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
Second: There is no exception edge. Just add all inlined exception
branches to the End node.
*/
ir_node *call_x_exc;
if (exc_handling == exc_handler) {
int n_exc = 0;
for (int i = 0; i < arity; i++) {
......@@ -553,13 +549,13 @@ int inline_method(ir_node *call, ir_graph *called_graph)
if (n_exc > 0) {
if (n_exc == 1) {
/* simple fix */
set_Tuple_pred(call, pn_Call_X_except, cf_pred[0]);
call_x_exc = cf_pred[0];
} else {
ir_node *block = new_r_Block(irg, n_exc, cf_pred);
set_Tuple_pred(call, pn_Call_X_except, new_r_Jmp(block));
call_x_exc = new_r_Jmp(block);
}
} else {
set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
call_x_exc = new_r_Bad(irg, mode_X);
}
} else {
/* assert(exc_handling == 1 || no exceptions. ) */
......@@ -582,12 +578,20 @@ int inline_method(ir_node *call, ir_graph *called_graph)
for (int i = 0; i < n_exc; ++i)
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
call_x_exc = new_r_Bad(irg, mode_X);
free(end_preds);
}
free(res_pred);
free(cf_pred);
ir_node *const call_in[] = {
[pn_Call_M] = call_mem,
[pn_Call_T_result] = call_res,
[pn_Call_X_regular] = call_x_reg,
[pn_Call_X_except] = call_x_exc,
};
turn_into_tuple(call, ARRAY_SIZE(call_in), call_in);
/* -- Turn CSE back on. -- */
set_optimize(rem_opt);
current_ir_graph = rem;
......
......@@ -594,11 +594,13 @@ static void walker(ir_node *node, void *ctx)
val = new_rd_Conv(get_irn_dbg_info(node), block, val, mode);
mem = get_Load_mem(node);
turn_into_tuple(node, pn_Load_max+1);
set_Tuple_pred(node, pn_Load_M, mem);
set_Tuple_pred(node, pn_Load_res, val);
set_Tuple_pred(node, pn_Load_X_regular, new_r_Jmp(block));
set_Tuple_pred(node, pn_Load_X_except, new_r_Bad(irg, mode_X));
ir_node *const in[] = {
[pn_Load_M] = mem,
[pn_Load_res] = val,
[pn_Load_X_regular] = new_r_Jmp(block),
[pn_Load_X_except] = new_r_Bad(irg, mode_X),
};
turn_into_tuple(node, ARRAY_SIZE(in), in);
} else if (is_Store(node)) {
DB((dbg, SET_LEVEL_3, " checking %+F for replacement ", node));
......@@ -631,10 +633,12 @@ static void walker(ir_node *node, void *ctx)
set_value(vnum, val);
mem = get_Store_mem(node);
turn_into_tuple(node, pn_Store_max+1);
set_Tuple_pred(node, pn_Store_M, mem);
set_Tuple_pred(node, pn_Store_X_regular, new_r_Jmp(block));
set_Tuple_pred(node, pn_Store_X_except, new_r_Bad(irg, mode_X));
ir_node *const in[] = {
[pn_Store_M] = mem,
[pn_Store_X_regular] = new_r_Jmp(block),
[pn_Store_X_except] = new_r_Bad(irg, mode_X),
};
turn_into_tuple(node, ARRAY_SIZE(in), in);
}
}
......
......@@ -45,6 +45,7 @@
#include "irhooks.h"
#include "ircons_t.h"
#include "irpass.h"
#include "util.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg;)
......@@ -322,11 +323,13 @@ static void do_opt_tail_rec(ir_graph *irg, tr_env *env)
/* create a new tuple for the return values */
tuple = new_r_Tuple(block, env->n_ress, in);
turn_into_tuple(call, pn_Call_max+1);
set_Tuple_pred(call, pn_Call_M, mem);
set_Tuple_pred(call, pn_Call_X_regular, jmp);
set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
set_Tuple_pred(call, pn_Call_T_result, tuple);
ir_node *const in[] = {
[pn_Call_M] = mem,
[pn_Call_T_result] = tuple,
[pn_Call_X_regular] = jmp,
[pn_Call_X_except] = new_r_Bad(irg, mode_X),
};
turn_into_tuple(call, ARRAY_SIZE(in), in);
for (i = 0; i < env->n_ress; ++i) {
ir_node *res = get_Return_res(p, i);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment