lower_dw.c 80.2 KB
Newer Older
Christian Würdig's avatar
Christian Würdig committed
1
/*
2
 * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
Christian Würdig's avatar
Christian Würdig committed
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
 *
 * This file is part of libFirm.
 *
 * This file may be distributed and/or modified under the terms of the
 * GNU General Public License version 2 as published by the Free Software
 * Foundation and appearing in the file LICENSE.GPL included in the
 * packaging of this file.
 *
 * Licensees holding valid libFirm Professional Edition licenses may use
 * this file in accordance with the libFirm Commercial License.
 * Agreement provided with the Software.
 *
 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE.
 */

Michael Beck's avatar
Michael Beck committed
20
21
/**
 * @file
yb9976's avatar
yb9976 committed
22
 * @brief   Lower double word operations, i.e. 64bit -> 32bit, 32bit -> 16bit etc.
Michael Beck's avatar
Michael Beck committed
23
24
25
 * @date    8.10.2004
 * @author  Michael Beck
 * @version $Id$
26
 */
Matthias Braun's avatar
Matthias Braun committed
27
#include "config.h"
28

29
#include <string.h>
30
#include <stdlib.h>
31
#include <stdbool.h>
32
33
#include <assert.h>

34
#include "error.h"
35
#include "lowering.h"
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
#include "irnode_t.h"
#include "irgraph_t.h"
#include "irmode_t.h"
#include "iropt_t.h"
#include "irgmod.h"
#include "tv_t.h"
#include "dbginfo_t.h"
#include "iropt_dbg.h"
#include "irflag_t.h"
#include "firmstat.h"
#include "irgwalk.h"
#include "ircons.h"
#include "irflag.h"
#include "irtools.h"
#include "debug.h"
#include "set.h"
#include "pmap.h"
#include "pdeq.h"
Christian Würdig's avatar
Christian Würdig committed
54
#include "irdump.h"
55
#include "array_t.h"
Michael Beck's avatar
Michael Beck committed
56
#include "irpass_t.h"
57
#include "lower_dw.h"
58

Michael Beck's avatar
Michael Beck committed
59
/** A map from (op, imode, omode) to Intrinsic functions entities. */
60
61
static set *intrinsic_fkt;

Michael Beck's avatar
Michael Beck committed
62
63
64
/** A map from (imode, omode) to conv function types. */
static set *conv_types;

65
66
67
68
/** A map from a method type to its lowered type. */
static pmap *lowered_type;

/** The types for the binop and unop intrinsics. */
69
static ir_type *binop_tp_u, *binop_tp_s, *unop_tp_u, *unop_tp_s, *tp_s, *tp_u;
70
71
72
73
74

/** the debug handle */
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)

/**
Michael Beck's avatar
Michael Beck committed
75
 * An entry in the (op, imode, omode) -> entity map.
76
 */
77
typedef struct op_mode_entry {
78
	const ir_op   *op;    /**< the op */
Michael Beck's avatar
Michael Beck committed
79
80
	const ir_mode *imode; /**< the input mode */
	const ir_mode *omode; /**< the output mode */
81
	ir_entity     *ent;   /**< the associated entity of this (op, imode, omode) triple */
82
83
} op_mode_entry_t;

Michael Beck's avatar
Michael Beck committed
84
85
86
/**
 * An entry in the (imode, omode) -> tp map.
 */
87
typedef struct conv_tp_entry {
Michael Beck's avatar
Michael Beck committed
88
89
90
91
92
	const ir_mode *imode; /**< the input mode */
	const ir_mode *omode; /**< the output mode */
	ir_type       *mtd;   /**< the associated method type of this (imode, omode) pair */
} conv_tp_entry_t;

93
enum lower_flags {
94
95
	MUST_BE_LOWERED = 1,  /**< graph must be lowered */
	CF_CHANGED      = 2,  /**< control flow was changed */
96
97
98
99
100
};

/**
 * The lower environment.
 */
101
102
typedef struct lower_dw_env_t {
	lower64_entry_t **entries;     /**< entries per node */
103
	ir_graph      *irg;
104
	struct obstack obst;           /**< an obstack holding the temporary data */
Matthias Braun's avatar
Matthias Braun committed
105
	ir_type   *l_mtp;              /**< lowered method type of the current method */
106
107
	ir_tarval *tv_mode_bytes;      /**< a tarval containing the number of bytes in the lowered modes */
	ir_tarval *tv_mode_bits;       /**< a tarval containing the number of bits in the lowered modes */
Matthias Braun's avatar
Matthias Braun committed
108
109
110
111
112
113
114
115
	pdeq      *waitq;              /**< a wait queue of all nodes that must be handled later */
	ir_node  **lowered_phis;       /**< list of lowered phis */
	ir_mode   *high_signed;        /**< doubleword signed type */
	ir_mode   *high_unsigned;      /**< doubleword unsigned type */
	ir_mode   *low_signed;         /**< word signed type */
	ir_mode   *low_unsigned;       /**< word unsigned type */
	ident     *first_id;           /**< .l for little and .h for big endian */
	ident     *next_id;            /**< .h for little and .l for big endian */
116
117
118
119
120
	const lwrdw_param_t *params;   /**< transformation parameter */
	unsigned flags;                /**< some flags */
	unsigned n_entries;            /**< number of entries */
	ir_type  *value_param_tp;      /**< the old value param type */
} lower_dw_env_t;
121

122
123
124
125
static lower_dw_env_t *env;

static void lower_node(ir_node *node);
static bool mtp_must_be_lowered(ir_type *mtp);
126

Michael Beck's avatar
Michael Beck committed
127
128
129
/**
 * Create a method type for a Conv emulation from imode to omode.
 */
130
static ir_type *get_conv_type(ir_mode *imode, ir_mode *omode)
131
{
Michael Beck's avatar
Michael Beck committed
132
133
134
135
136
137
138
	conv_tp_entry_t key, *entry;
	ir_type *mtd;

	key.imode = imode;
	key.omode = omode;
	key.mtd   = NULL;

139
	entry = (conv_tp_entry_t*)set_insert(conv_types, &key, sizeof(key), HASH_PTR(imode) ^ HASH_PTR(omode));
Michael Beck's avatar
Michael Beck committed
140
141
142
	if (! entry->mtd) {
		int n_param = 1, n_res = 1;

143
		if (imode == env->high_signed || imode == env->high_unsigned)
Michael Beck's avatar
Michael Beck committed
144
			n_param = 2;
145
		if (omode == env->high_signed || omode == env->high_unsigned)
Michael Beck's avatar
Michael Beck committed
146
147
148
			n_res = 2;

		/* create a new one */
149
		mtd = new_type_method(n_param, n_res);
Michael Beck's avatar
Michael Beck committed
150
151
152

		/* set param types and result types */
		n_param = 0;
153
		if (imode == env->high_signed) {
154
			set_method_param_type(mtd, n_param++, tp_u);
Michael Beck's avatar
Michael Beck committed
155
			set_method_param_type(mtd, n_param++, tp_s);
156
		} else if (imode == env->high_unsigned) {
Michael Beck's avatar
Michael Beck committed
157
158
			set_method_param_type(mtd, n_param++, tp_u);
			set_method_param_type(mtd, n_param++, tp_u);
Michael Beck's avatar
BugFix:    
Michael Beck committed
159
		} else {
160
			ir_type *tp = get_type_for_mode(imode);
Michael Beck's avatar
Michael Beck committed
161
			set_method_param_type(mtd, n_param++, tp);
162
		}
Michael Beck's avatar
Michael Beck committed
163
164

		n_res = 0;
165
		if (omode == env->high_signed) {
166
			set_method_res_type(mtd, n_res++, tp_u);
Michael Beck's avatar
Michael Beck committed
167
			set_method_res_type(mtd, n_res++, tp_s);
168
		} else if (omode == env->high_unsigned) {
Michael Beck's avatar
Michael Beck committed
169
170
			set_method_res_type(mtd, n_res++, tp_u);
			set_method_res_type(mtd, n_res++, tp_u);
Michael Beck's avatar
BugFix:    
Michael Beck committed
171
		} else {
172
			ir_type *tp = get_type_for_mode(omode);
Michael Beck's avatar
Michael Beck committed
173
			set_method_res_type(mtd, n_res++, tp);
174
		}
Michael Beck's avatar
Michael Beck committed
175
		entry->mtd = mtd;
Michael Beck's avatar
BugFix:    
Michael Beck committed
176
	} else {
Michael Beck's avatar
Michael Beck committed
177
		mtd = entry->mtd;
178
	}
Michael Beck's avatar
Michael Beck committed
179
	return mtd;
180
}
Michael Beck's avatar
Michael Beck committed
181

182
183
184
185
186
187
188
189
/**
 * Add an additional control flow input to a block.
 * Patch all Phi nodes. The new Phi inputs are copied from
 * old input number nr.
 */
static void add_block_cf_input_nr(ir_node *block, int nr, ir_node *cf)
{
	int i, arity = get_irn_arity(block);
190
191
	ir_node **in;
	const ir_edge_t *edge;
192
193
194
195
196
197
198
199
200
201

	assert(nr < arity);

	NEW_ARR_A(ir_node *, in, arity + 1);
	for (i = 0; i < arity; ++i)
		in[i] = get_irn_n(block, i);
	in[i] = cf;

	set_irn_in(block, i + 1, in);

202
203
204
205
206
	foreach_out_edge(block, edge) {
		ir_node *phi = get_edge_src_irn(edge);
		if (!is_Phi(phi))
			continue;

207
208
209
210
		for (i = 0; i < arity; ++i)
			in[i] = get_irn_n(phi, i);
		in[i] = in[nr];
		set_irn_in(phi, i + 1, in);
211
212
	}
}
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227

/**
 * Add an additional control flow input to a block.
 * Patch all Phi nodes. The new Phi inputs are copied from
 * old input from cf tmpl.
 */
static void add_block_cf_input(ir_node *block, ir_node *tmpl, ir_node *cf)
{
	int i, arity = get_irn_arity(block);
	int nr = 0;

	for (i = 0; i < arity; ++i) {
		if (get_irn_n(block, i) == tmpl) {
			nr = i;
			break;
228
229
		}
	}
230
231
	assert(i < arity);
	add_block_cf_input_nr(block, nr, cf);
232
}
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251

/**
 * Return the "operational" mode of a Firm node.
 */
static ir_mode *get_irn_op_mode(ir_node *node)
{
	switch (get_irn_opcode(node)) {
	case iro_Load:
		return get_Load_mode(node);
	case iro_Store:
		return get_irn_mode(get_Store_value(node));
	case iro_Div:
		return get_irn_mode(get_Div_left(node));
	case iro_Mod:
		return get_irn_mode(get_Mod_left(node));
	case iro_Cmp:
		return get_irn_mode(get_Cmp_left(node));
	default:
		return get_irn_mode(node);
252
253
	}
}
254
255

/**
256
257
 * Walker, prepare the node links and determine which nodes need to be lowered
 * at all.
258
 */
259
static void prepare_links(ir_node *node)
260
{
261
262
	ir_mode         *mode = get_irn_op_mode(node);
	lower64_entry_t *link;
263

264
	if (mode == env->high_signed || mode == env->high_unsigned) {
265
		unsigned idx = get_irn_idx(node);
266
		/* ok, found a node that will be lowered */
267
		link = OALLOCZ(&env->obst, lower64_entry_t);
268

269
		if (idx >= env->n_entries) {
Michael Beck's avatar
Michael Beck committed
270
			/* enlarge: this happens only for Rotl nodes which is RARELY */
271
			unsigned old   = env->n_entries;
272
			unsigned n_idx = idx + (idx >> 3);
Michael Beck's avatar
Michael Beck committed
273

274
			ARR_RESIZE(lower64_entry_t *, env->entries, n_idx);
275
276
			memset(&env->entries[old], 0, (n_idx - old) * sizeof(env->entries[0]));
			env->n_entries = n_idx;
Michael Beck's avatar
Michael Beck committed
277
		}
278
279
		env->entries[idx] = link;
		env->flags |= MUST_BE_LOWERED;
280
	} else if (is_Conv(node)) {
281
282
283
284
		/* Conv nodes have two modes */
		ir_node *pred = get_Conv_op(node);
		mode = get_irn_mode(pred);

285
		if (mode == env->high_signed || mode == env->high_unsigned) {
Michael Beck's avatar
BugFix:    
Michael Beck committed
286
			/* must lower this node either but don't need a link */
287
			env->flags |= MUST_BE_LOWERED;
288
		}
289
		return;
290
291
	}
}
292

293
lower64_entry_t *get_node_entry(ir_node *node)
294
295
296
297
298
299
{
	unsigned idx = get_irn_idx(node);
	assert(idx < env->n_entries);
	return env->entries[idx];
}

300
void ir_set_dw_lowered(ir_node *old, ir_node *new_low, ir_node *new_high)
301
{
302
	lower64_entry_t *entry = get_node_entry(old);
303
304
305
306
	entry->low_word  = new_low;
	entry->high_word = new_high;
}

307
308
309
310
311
ir_mode *ir_get_low_unsigned_mode(void)
{
	return env->low_unsigned;
}

312
313
314
/**
 * Translate a Constant: create two.
 */
315
static void lower_Const(ir_node *node, ir_mode *mode)
316
{
Matthias Braun's avatar
Matthias Braun committed
317
318
319
320
321
322
323
324
325
	ir_graph  *irg      = get_irn_irg(node);
	dbg_info  *dbg      = get_irn_dbg_info(node);
	ir_mode   *low_mode = env->low_unsigned;
	ir_tarval *tv       = get_Const_tarval(node);
	ir_tarval *tv_l     = tarval_convert_to(tv, low_mode);
	ir_node   *res_low  = new_rd_Const(dbg, irg, tv_l);
	ir_tarval *tv_shrs  = tarval_shrs(tv, env->tv_mode_bits);
	ir_tarval *tv_h     = tarval_convert_to(tv_shrs, mode);
	ir_node   *res_high = new_rd_Const(dbg, irg, tv_h);
326

327
	ir_set_dw_lowered(node, res_low, res_high);
328
}
329
330
331
332

/**
 * Translate a Load: create two.
 */
333
static void lower_Load(ir_node *node, ir_mode *mode)
334
{
335
	ir_mode    *low_mode = env->low_unsigned;
336
	ir_graph   *irg = get_irn_irg(node);
337
338
	ir_node    *adr = get_Load_ptr(node);
	ir_node    *mem = get_Load_mem(node);
339
340
341
	ir_node    *low;
	ir_node    *high;
	ir_node    *proj_m;
342
343
	dbg_info   *dbg;
	ir_node    *block = get_nodes_block(node);
344
	ir_cons_flags volatility = get_Load_volatility(node) == volatility_is_volatile
345
	                         ? cons_volatile : cons_none;
346
347
	const ir_edge_t *edge;
	const ir_edge_t *next;
348
349
350

	if (env->params->little_endian) {
		low  = adr;
351
		high = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
Michael Beck's avatar
BugFix:    
Michael Beck committed
352
	} else {
353
		low  = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
354
		high = adr;
355
	}
356
357

	/* create two loads */
358
359
360
361
	dbg    = get_irn_dbg_info(node);
	low    = new_rd_Load(dbg, block, mem,  low,  low_mode, volatility);
	proj_m = new_r_Proj(low, mode_M, pn_Load_M);
	high   = new_rd_Load(dbg, block, proj_m, high, mode, volatility);
362

363
364
365
366
	foreach_out_edge_safe(node, edge, next) {
		ir_node *proj = get_edge_src_irn(edge);
		if (!is_Proj(proj))
			continue;
367
368
369
370
371
372
373
374
375
376

		switch (get_Proj_proj(proj)) {
		case pn_Load_M:         /* Memory result. */
			/* put it to the second one */
			set_Proj_pred(proj, high);
			break;
		case pn_Load_X_except:  /* Execution result if exception occurred. */
			/* put it to the first one */
			set_Proj_pred(proj, low);
			break;
377
378
379
		case pn_Load_res: {       /* Result of load operation. */
			ir_node *res_low  = new_r_Proj(low,  low_mode, pn_Load_res);
			ir_node *res_high = new_r_Proj(high, mode,     pn_Load_res);
380
			ir_set_dw_lowered(proj, res_low, res_high);
381
			break;
382
		}
383
384
		default:
			assert(0 && "unexpected Proj number");
385
		}
386
387
		/* mark this proj: we have handled it already, otherwise we might fall
		 * into out new nodes. */
388
		mark_irn_visited(proj);
389
390
	}
}
391
392
393
394

/**
 * Translate a Store: create two.
 */
395
static void lower_Store(ir_node *node, ir_mode *mode)
396
{
397
398
	ir_graph              *irg;
	ir_node               *block, *adr, *mem;
399
	ir_node               *low, *high, *proj_m;
400
401
402
	dbg_info              *dbg;
	ir_node               *value = get_Store_value(node);
	const lower64_entry_t *entry = get_node_entry(value);
403
	ir_cons_flags volatility = get_Store_volatility(node) == volatility_is_volatile
404
	                           ? cons_volatile : cons_none;
405
406
	const ir_edge_t *edge;
	const ir_edge_t *next;
Matthias Braun's avatar
Matthias Braun committed
407
	(void) mode;
408
409
410
411
412
413
414

	assert(entry);

	if (! entry->low_word) {
		/* not ready yet, wait */
		pdeq_putr(env->waitq, node);
		return;
415
	}
416

417
	irg = get_irn_irg(node);
418
419
420
421
422
423
	adr = get_Store_ptr(node);
	mem = get_Store_mem(node);
	block = get_nodes_block(node);

	if (env->params->little_endian) {
		low  = adr;
424
		high = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
Michael Beck's avatar
BugFix:    
Michael Beck committed
425
	} else {
426
		low  = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
427
		high = adr;
428
	}
429
430

	/* create two Stores */
431
432
433
434
	dbg    = get_irn_dbg_info(node);
	low    = new_rd_Store(dbg, block, mem, low,  entry->low_word, volatility);
	proj_m = new_r_Proj(low, mode_M, pn_Store_M);
	high   = new_rd_Store(dbg, block, proj_m, high, entry->high_word, volatility);
435

436
437
438
439
	foreach_out_edge_safe(node, edge, next) {
		ir_node *proj = get_edge_src_irn(edge);
		if (!is_Proj(proj))
			continue;
440
441
442
443
444
445
446
447
448
449
450
451

		switch (get_Proj_proj(proj)) {
		case pn_Store_M:         /* Memory result. */
			/* put it to the second one */
			set_Proj_pred(proj, high);
			break;
		case pn_Store_X_except:  /* Execution result if exception occurred. */
			/* put it to the first one */
			set_Proj_pred(proj, low);
			break;
		default:
			assert(0 && "unexpected Proj number");
452
		}
453
454
455
		/* mark this proj: we have handled it already, otherwise we might fall into
		 * out new nodes. */
		mark_irn_visited(proj);
456
457
	}
}
458
459
460

/**
 * Return a node containing the address of the intrinsic emulation function.
Michael Beck's avatar
Michael Beck committed
461
462
463
464
465
466
 *
 * @param method  the method type of the emulation function
 * @param op      the emulated ir_op
 * @param imode   the input mode of the emulated opcode
 * @param omode   the output mode of the emulated opcode
 * @param env     the lower environment
467
 */
Michael Beck's avatar
Michael Beck committed
468
static ir_node *get_intrinsic_address(ir_type *method, ir_op *op,
469
                                      ir_mode *imode, ir_mode *omode)
470
{
471
	symconst_symbol sym;
472
	ir_entity *ent;
473
474
	op_mode_entry_t key, *entry;

Michael Beck's avatar
Michael Beck committed
475
476
477
478
	key.op    = op;
	key.imode = imode;
	key.omode = omode;
	key.ent   = NULL;
479

480
	entry = (op_mode_entry_t*)set_insert(intrinsic_fkt, &key, sizeof(key),
Michael Beck's avatar
Michael Beck committed
481
				HASH_PTR(op) ^ HASH_PTR(imode) ^ (HASH_PTR(omode) << 8));
482
483
	if (! entry->ent) {
		/* create a new one */
Michael Beck's avatar
Michael Beck committed
484
		ent = env->params->create_intrinsic(method, op, imode, omode, env->params->ctx);
485
486
487

		assert(ent && "Intrinsic creator must return an entity");
		entry->ent = ent;
Michael Beck's avatar
BugFix:    
Michael Beck committed
488
	} else {
489
		ent = entry->ent;
490
	}
491
	sym.entity_p = ent;
492
	return new_r_SymConst(env->irg, mode_P_code, sym, symconst_addr_ent);
493
}
494
495
496
497
498
499

/**
 * Translate a Div.
 *
 * Create an intrinsic Call.
 */
500
static void lower_Div(ir_node *node, ir_mode *mode)
501
{
502
503
504
505
506
507
508
509
510
511
512
513
514
	ir_node         *left   = get_Div_left(node);
	ir_node         *right  = get_Div_right(node);
	ir_node         *block  = get_nodes_block(node);
	dbg_info        *dbgi   = get_irn_dbg_info(node);
	ir_type         *mtp    = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
	ir_mode         *opmode = get_irn_op_mode(node);
	ir_node         *addr
	    = get_intrinsic_address(mtp, get_irn_op(node), opmode, opmode);
	ir_node         *in[4];
	ir_node         *call;
	ir_node         *resproj;
	const ir_edge_t *edge;
	const ir_edge_t *next;
515

516
517
518
519
520
521
522
523
524
525
526
527
528
	if (env->params->little_endian) {
		in[0] = get_lowered_low(left);
		in[1] = get_lowered_high(left);
		in[2] = get_lowered_low(right);
		in[3] = get_lowered_high(right);
	} else {
		in[0] = get_lowered_high(left);
		in[1] = get_lowered_low(left);
		in[2] = get_lowered_high(right);
		in[3] = get_lowered_low(right);
	}
	call    = new_rd_Call(dbgi, block, get_Div_mem(node), addr, 4, in, mtp);
	resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
529
530
	set_irn_pinned(call, get_irn_pinned(node));

531
532
533
534
535
	foreach_out_edge_safe(node, edge, next) {
		ir_node *proj = get_edge_src_irn(edge);
		if (!is_Proj(proj))
			continue;

536
537
538
539
		switch (get_Proj_proj(proj)) {
		case pn_Div_M:         /* Memory result. */
			/* reroute to the call */
			set_Proj_pred(proj, call);
540
			set_Proj_proj(proj, pn_Call_M);
541
			break;
542
543
544
545
546
		case pn_Div_X_regular:
			set_Proj_pred(proj, call);
			set_Proj_proj(proj, pn_Call_X_regular);
			break;
		case pn_Div_X_except:
547
548
549
			set_Proj_pred(proj, call);
			set_Proj_proj(proj, pn_Call_X_except);
			break;
550
551
552
553
554
555
556
557
558
559
		case pn_Div_res:
			if (env->params->little_endian) {
				ir_node *res_low  = new_r_Proj(resproj, env->low_unsigned, 0);
				ir_node *res_high = new_r_Proj(resproj, mode,              1);
				ir_set_dw_lowered(proj, res_low, res_high);
			} else {
				ir_node *res_low  = new_r_Proj(resproj, env->low_unsigned, 1);
				ir_node *res_high = new_r_Proj(resproj, mode,              0);
				ir_set_dw_lowered(proj, res_low, res_high);
			}
560
561
562
			break;
		default:
			assert(0 && "unexpected Proj number");
563
		}
564
565
566
		/* mark this proj: we have handled it already, otherwise we might fall into
		 * out new nodes. */
		mark_irn_visited(proj);
567
568
	}
}
569
570
571
572
573
574

/**
 * Translate a Mod.
 *
 * Create an intrinsic Call.
 */
575
static void lower_Mod(ir_node *node, ir_mode *mode)
576
{
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
	ir_node         *left   = get_Mod_left(node);
	ir_node         *right  = get_Mod_right(node);
	dbg_info        *dbgi   = get_irn_dbg_info(node);
	ir_node         *block  = get_nodes_block(node);
	ir_type         *mtp    = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
	ir_mode         *opmode = get_irn_op_mode(node);
	ir_node         *addr
	    = get_intrinsic_address(mtp, get_irn_op(node), opmode, opmode);
	ir_node         *in[4];
	ir_node         *call;
	ir_node         *resproj;
	const ir_edge_t *edge;
	const ir_edge_t *next;

	if (env->params->little_endian) {
		in[0] = get_lowered_low(left);
		in[1] = get_lowered_high(left);
		in[2] = get_lowered_low(right);
		in[3] = get_lowered_high(right);
	} else {
		in[0] = get_lowered_high(left);
		in[1] = get_lowered_low(left);
		in[2] = get_lowered_high(right);
		in[3] = get_lowered_low(right);
	}
	call    = new_rd_Call(dbgi, block, get_Mod_mem(node), addr, 4, in, mtp);
	resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
604
605
	set_irn_pinned(call, get_irn_pinned(node));

606
607
608
609
610
	foreach_out_edge_safe(node, edge, next) {
		ir_node *proj = get_edge_src_irn(edge);
		if (!is_Proj(proj))
			continue;

611
612
613
614
		switch (get_Proj_proj(proj)) {
		case pn_Mod_M:         /* Memory result. */
			/* reroute to the call */
			set_Proj_pred(proj, call);
615
			set_Proj_proj(proj, pn_Call_M);
616
			break;
617
618
619
620
621
		case pn_Div_X_regular:
			set_Proj_pred(proj, call);
			set_Proj_proj(proj, pn_Call_X_regular);
			break;
		case pn_Mod_X_except:
622
623
624
			set_Proj_pred(proj, call);
			set_Proj_proj(proj, pn_Call_X_except);
			break;
625
626
627
628
629
630
631
632
633
634
		case pn_Mod_res:
			if (env->params->little_endian) {
				ir_node *res_low  = new_r_Proj(resproj, env->low_unsigned, 0);
				ir_node *res_high = new_r_Proj(resproj, mode,              1);
				ir_set_dw_lowered(proj, res_low, res_high);
			} else {
				ir_node *res_low  = new_r_Proj(resproj, env->low_unsigned, 1);
				ir_node *res_high = new_r_Proj(resproj, mode,              0);
				ir_set_dw_lowered(proj, res_low, res_high);
			}
635
636
637
			break;
		default:
			assert(0 && "unexpected Proj number");
638
		}
639
640
		/* mark this proj: we have handled it already, otherwise we might fall
		 * into out new nodes. */
641
		mark_irn_visited(proj);
642
643
	}
}
644
645

/**
646
 * Translate a binop.
647
648
649
 *
 * Create an intrinsic Call.
 */
650
static void lower_binop(ir_node *node, ir_mode *mode)
651
{
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
	ir_node  *left  = get_binop_left(node);
	ir_node  *right = get_binop_right(node);
	dbg_info *dbgi  = get_irn_dbg_info(node);
	ir_node  *block = get_nodes_block(node);
	ir_graph *irg   = get_irn_irg(block);
	ir_type  *mtp   = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
	ir_node  *addr  = get_intrinsic_address(mtp, get_irn_op(node), mode, mode);
	ir_node  *in[4];
	ir_node  *call;
	ir_node  *resproj;

	if (env->params->little_endian) {
		in[0] = get_lowered_low(left);
		in[1] = get_lowered_high(left);
		in[2] = get_lowered_low(right);
		in[3] = get_lowered_high(right);
	} else {
		in[0] = get_lowered_high(left);
		in[1] = get_lowered_low(left);
		in[2] = get_lowered_high(right);
		in[3] = get_lowered_low(right);
	}
	call    = new_rd_Call(dbgi, block, get_irg_no_mem(irg), addr, 4, in, mtp);
	resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
676
	set_irn_pinned(call, get_irn_pinned(node));
677
678
679
680
681
682
683
684
685
686

	if (env->params->little_endian) {
		ir_node *res_low  = new_r_Proj(resproj, env->low_unsigned, 0);
		ir_node *res_high = new_r_Proj(resproj, mode,              1);
		ir_set_dw_lowered(node, res_low, res_high);
	} else {
		ir_node *res_low  = new_r_Proj(resproj, env->low_unsigned, 1);
		ir_node *res_high = new_r_Proj(resproj, mode,              0);
		ir_set_dw_lowered(node, res_low, res_high);
	}
687
}
688

689
static ir_node *create_conv(ir_node *block, ir_node *node, ir_mode *dest_mode)
690
{
691
692
693
	if (get_irn_mode(node) == dest_mode)
		return node;
	return new_r_Conv(block, node, dest_mode);
694
}
Christian Würdig's avatar
Christian Würdig committed
695

Michael Beck's avatar
Michael Beck committed
696
/**
697
698
 * Moves node and all predecessors of node from from_bl to to_bl.
 * Does not move predecessors of Phi nodes (or block nodes).
Michael Beck's avatar
Michael Beck committed
699
 */
700
static void move(ir_node *node, ir_node *from_bl, ir_node *to_bl)
701
{
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
	int i, arity;

	/* move this node */
	set_nodes_block(node, to_bl);

	/* move its Projs */
	if (get_irn_mode(node) == mode_T) {
		const ir_edge_t *edge;
		foreach_out_edge(node, edge) {
			ir_node *proj = get_edge_src_irn(edge);
			if (!is_Proj(proj))
				continue;
			move(proj, from_bl, to_bl);
		}
	}
Michael Beck's avatar
Michael Beck committed
717

718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
	/* We must not move predecessors of Phi nodes, even if they are in
	 * from_bl. (because these are values from an earlier loop iteration
	 * which are not predecessors of node here)
	 */
	if (is_Phi(node))
		return;

	/* recursion ... */
	arity = get_irn_arity(node);
	for (i = 0; i < arity; i++) {
		ir_node *pred      = get_irn_n(node, i);
		ir_mode *pred_mode = get_irn_mode(pred);
		if (get_nodes_block(pred) == from_bl)
			move(pred, from_bl, to_bl);
		if (pred_mode == env->high_signed || pred_mode == env->high_unsigned) {
			ir_node *pred_low  = get_lowered_low(pred);
			ir_node *pred_high = get_lowered_high(pred);
			if (get_nodes_block(pred_low) == from_bl)
				move(pred_low, from_bl, to_bl);
			if (pred_high != NULL && get_nodes_block(pred_high) == from_bl)
				move(pred_high, from_bl, to_bl);
739
740
741
		}
	}
}
Michael Beck's avatar
Michael Beck committed
742
743

/**
744
745
746
747
748
 * We need a custom version of part_block_edges because during transformation
 * not all data-dependencies are explicit yet if a lowered nodes users are not
 * lowered yet.
 * We can fix this by modifying move to look for such implicit dependencies.
 * Additionally we have to keep the proj_2_block map updated
Michael Beck's avatar
Michael Beck committed
749
 */
750
static ir_node *part_block_dw(ir_node *node)
751
{
752
753
754
755
756
757
758
	ir_graph *irg        = get_irn_irg(node);
	ir_node  *old_block  = get_nodes_block(node);
	int       n_cfgpreds = get_Block_n_cfgpreds(old_block);
	ir_node **cfgpreds   = get_Block_cfgpred_arr(old_block);
	ir_node  *new_block  = new_r_Block(irg, n_cfgpreds, cfgpreds);
	const ir_edge_t *edge;
	const ir_edge_t *next;
759

760
761
762
763
764
765
766
767
768
769
770
771
	/* old_block has no predecessors anymore for now */
	set_irn_in(old_block, 0, NULL);

	/* move node and its predecessors to new_block */
	move(node, old_block, new_block);

	/* move Phi nodes to new_block */
	foreach_out_edge_safe(old_block, edge, next) {
		ir_node *phi = get_edge_src_irn(edge);
		if (!is_Phi(phi))
			continue;
		set_nodes_block(phi, new_block);
772
	}
773
	return old_block;
774
}
Michael Beck's avatar
Michael Beck committed
775

776
777
778
779
780
781
typedef ir_node* (*new_rd_shr_func)(dbg_info *dbgi, ir_node *block,
                                    ir_node *left, ir_node *right,
                                    ir_mode *mode);

static void lower_shr_helper(ir_node *node, ir_mode *mode,
                             new_rd_shr_func new_rd_shrs)
782
{
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
	ir_node  *right         = get_binop_right(node);
	ir_node  *left          = get_binop_left(node);
	ir_mode  *shr_mode      = get_irn_mode(node);
	unsigned  modulo_shift  = get_mode_modulo_shift(shr_mode);
	ir_mode  *low_unsigned  = env->low_unsigned;
	unsigned  modulo_shift2 = get_mode_modulo_shift(mode);
	ir_graph *irg           = get_irn_irg(node);
	ir_node  *left_low      = get_lowered_low(left);
	ir_node  *left_high     = get_lowered_high(left);
	dbg_info *dbgi          = get_irn_dbg_info(node);
	ir_node  *lower_block;
	ir_node  *block;
	ir_node  *cnst;
	ir_node  *and;
	ir_node  *cmp;
	ir_node  *cond;
	ir_node  *proj_true;
	ir_node  *proj_false;
	ir_node  *phi_low;
	ir_node  *phi_high;
	ir_node  *lower_in[2];
	ir_node  *phi_low_in[2];
	ir_node  *phi_high_in[2];

	/* this version is optimized for modulo shift architectures
	 * (and can't handle anything else) */
	if (modulo_shift != get_mode_size_bits(shr_mode)
			|| modulo_shift2<<1 != modulo_shift) {
		panic("Shr lowering only implemented for modulo shift shr operations");
	}
	if (!is_po2(modulo_shift) || !is_po2(modulo_shift2)) {
		panic("Shr lowering only implemented for power-of-2 modes");
	}
	/* without 2-complement the -x instead of (bit_width-x) trick won't work */
	if (get_mode_arithmetic(shr_mode) != irma_twos_complement) {
		panic("Shr lowering only implemented for two-complement modes");
	}

821
822
	block = get_nodes_block(node);

823
824
825
826
827
828
	/* if the right operand is a 64bit value, we're only interested in the
	 * lower word */
	if (get_irn_mode(right) == env->high_unsigned) {
		right = get_lowered_low(right);
	} else {
		/* shift should never have signed mode on the right */
Michael Beck's avatar
Michael Beck committed
829
		assert(get_irn_mode(right) != env->high_signed);
830
831
		right = create_conv(block, right, low_unsigned);
	}
Michael Beck's avatar
Michael Beck committed
832

833
834
835
836
	lower_block = part_block_dw(node);
	env->flags |= CF_CHANGED;
	block = get_nodes_block(node);

Michael Beck's avatar
Michael Beck committed
837
	/* add a Cmp to test if highest bit is set <=> whether we shift more
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
	 * than half the word width */
	cnst       = new_r_Const_long(irg, low_unsigned, modulo_shift2);
	and        = new_r_And(block, right, cnst, low_unsigned);
	cnst       = new_r_Const(irg, get_mode_null(low_unsigned));
	cmp        = new_rd_Cmp(dbgi, block, and, cnst, ir_relation_equal);
	cond       = new_rd_Cond(dbgi, block, cmp);
	proj_true  = new_r_Proj(cond, mode_X, pn_Cond_true);
	proj_false = new_r_Proj(cond, mode_X, pn_Cond_false);

	/* the true block => shift_width < 1word */
	{
		/* In theory the low value (for 64bit shifts) is:
		 *    Or(High << (32-x)), Low >> x)
		 * In practice High << 32-x will fail when x is zero (since we have
		 * modulo shift and 32 will be 0). So instead we use:
		 *    Or(High<<1<<~x, Low >> x)
		 */
		ir_node *in[1]        = { proj_true };
		ir_node *block_true   = new_r_Block(irg, ARRAY_SIZE(in), in);
		ir_node *res_high     = new_rd_shrs(dbgi, block_true, left_high,
		                                    right, mode);
		ir_node *shift_low    = new_rd_Shr(dbgi, block_true, left_low, right,
		                                   low_unsigned);
		ir_node *not_shiftval = new_rd_Not(dbgi, block_true, right,
		                                   low_unsigned);
		ir_node *conv         = create_conv(block_true, left_high,
		                                    low_unsigned);
		ir_node *one          = new_r_Const(irg, get_mode_one(low_unsigned));
		ir_node *carry0       = new_rd_Shl(dbgi, block_true, conv, one,
		                                   low_unsigned);
		ir_node *carry1       = new_rd_Shl(dbgi, block_true, carry0,
		                                   not_shiftval, low_unsigned);
		ir_node *res_low      = new_rd_Or(dbgi, block_true, shift_low, carry1,
		                                  low_unsigned);
		lower_in[0]           = new_r_Jmp(block_true);
		phi_low_in[0]         = res_low;
		phi_high_in[0]        = res_high;
	}

	/* false block => shift_width > 1word */
	{
		ir_node *in[1]       = { proj_false };
		ir_node *block_false = new_r_Block(irg, ARRAY_SIZE(in), in);
		ir_node *conv        = create_conv(block_false, left_high, low_unsigned);
		ir_node *res_low     = new_rd_shrs(dbgi, block_false, conv, right,
		                                   low_unsigned);
		int      cnsti       = modulo_shift2-1;
885
		ir_node *cnst2       = new_r_Const_long(irg, low_unsigned, cnsti);
886
887
		ir_node *res_high;
		if (new_rd_shrs == new_rd_Shrs) {
888
			res_high = new_rd_shrs(dbgi, block_false, left_high, cnst2, mode);
889
890
		} else {
			res_high = new_r_Const(irg, get_mode_null(mode));
891
		}
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
		lower_in[1]          = new_r_Jmp(block_false);
		phi_low_in[1]        = res_low;
		phi_high_in[1]       = res_high;
	}

	/* patch lower block */
	set_irn_in(lower_block, ARRAY_SIZE(lower_in), lower_in);
	phi_low  = new_r_Phi(lower_block, ARRAY_SIZE(phi_low_in), phi_low_in,
	                     low_unsigned);
	phi_high = new_r_Phi(lower_block, ARRAY_SIZE(phi_high_in), phi_high_in,
	                     mode);
	ir_set_dw_lowered(node, phi_low, phi_high);
}

static void lower_Shr(ir_node *node, ir_mode *mode)
{
	lower_shr_helper(node, mode, new_rd_Shr);
}

static void lower_Shrs(ir_node *node, ir_mode *mode)
{
	lower_shr_helper(node, mode, new_rd_Shrs);
}

static void lower_Shl(ir_node *node, ir_mode *mode)
{
	ir_node  *right         = get_binop_right(node);
	ir_node  *left          = get_binop_left(node);
	ir_mode  *shr_mode      = get_irn_mode(node);
	unsigned  modulo_shift  = get_mode_modulo_shift(shr_mode);
	ir_mode  *low_unsigned  = env->low_unsigned;
	unsigned  modulo_shift2 = get_mode_modulo_shift(mode);
	ir_graph *irg           = get_irn_irg(node);
	ir_node  *left_low      = get_lowered_low(left);
	ir_node  *left_high     = get_lowered_high(left);
	dbg_info *dbgi          = get_irn_dbg_info(node);
	ir_node  *lower_block   = get_nodes_block(node);
	ir_node  *block;
	ir_node  *cnst;
	ir_node  *and;
	ir_node  *cmp;
	ir_node  *cond;
	ir_node  *proj_true;
	ir_node  *proj_false;
	ir_node  *phi_low;
	ir_node  *phi_high;
	ir_node  *lower_in[2];
	ir_node  *phi_low_in[2];
	ir_node  *phi_high_in[2];

	/* this version is optimized for modulo shift architectures
	 * (and can't handle anything else) */
	if (modulo_shift != get_mode_size_bits(shr_mode)
			|| modulo_shift2<<1 != modulo_shift) {
		panic("Shr lowering only implemented for modulo shift shr operations");
	}
	if (!is_po2(modulo_shift) || !is_po2(modulo_shift2)) {
		panic("Shr lowering only implemented for power-of-2 modes");
	}
	/* without 2-complement the -x instead of (bit_width-x) trick won't work */
	if (get_mode_arithmetic(shr_mode) != irma_twos_complement) {
		panic("Shr lowering only implemented for two-complement modes");
	}

	/* if the right operand is a 64bit value, we're only interested in the
	 * lower word */
	if (get_irn_mode(right) == env->high_unsigned) {
		right = get_lowered_low(right);
	} else {
		/* shift should never have signed mode on the right */
		assert(get_irn_mode(right) != env->high_signed);
		right = create_conv(lower_block, right, low_unsigned);
964
	}
965
966
967
968
969

	part_block_dw(node);
	env->flags |= CF_CHANGED;
	block = get_nodes_block(node);

Michael Beck's avatar
Michael Beck committed
970
	/* add a Cmp to test if highest bit is set <=> whether we shift more
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
	 * than half the word width */
	cnst       = new_r_Const_long(irg, low_unsigned, modulo_shift2);
	and        = new_r_And(block, right, cnst, low_unsigned);
	cnst       = new_r_Const(irg, get_mode_null(low_unsigned));
	cmp        = new_rd_Cmp(dbgi, block, and, cnst, ir_relation_equal);
	cond       = new_rd_Cond(dbgi, block, cmp);
	proj_true  = new_r_Proj(cond, mode_X, pn_Cond_true);
	proj_false = new_r_Proj(cond, mode_X, pn_Cond_false);

	/* the true block => shift_width < 1word */
	{
		ir_node *in[1]        = { proj_true };
		ir_node *block_true   = new_r_Block(irg, ARRAY_SIZE(in), in);

		ir_node *res_low      = new_rd_Shl(dbgi, block_true, left_low,
		                                   right, low_unsigned);
		ir_node *shift_high   = new_rd_Shl(dbgi, block_true, left_high, right,
		                                   mode);
		ir_node *not_shiftval = new_rd_Not(dbgi, block_true, right,
		                                   low_unsigned);
		ir_node *conv         = create_conv(block_true, left_low, mode);
992
		ir_node *one          = new_r_Const(irg, get_mode_one(low_unsigned));
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
		ir_node *carry0       = new_rd_Shr(dbgi, block_true, conv, one, mode);
		ir_node *carry1       = new_rd_Shr(dbgi, block_true, carry0,
		                                   not_shiftval, mode);
		ir_node *res_high     = new_rd_Or(dbgi, block_true, shift_high, carry1,
		                                  mode);
		lower_in[0]           = new_r_Jmp(block_true);
		phi_low_in[0]         = res_low;
		phi_high_in[0]        = res_high;
	}

	/* false block => shift_width > 1word */
	{
		ir_node *in[1]       = { proj_false };
		ir_node *block_false = new_r_Block(irg, ARRAY_SIZE(in), in);
		ir_node *res_low     = new_r_Const(irg, get_mode_null(low_unsigned));
		ir_node *conv        = create_conv(block_false, left_low, mode);
		ir_node *res_high    = new_rd_Shl(dbgi, block_false, conv, right, mode);
		lower_in[1]          = new_r_Jmp(block_false);
		phi_low_in[1]        = res_low;
		phi_high_in[1]       = res_high;
	}

	/* patch lower block */
	set_irn_in(lower_block, ARRAY_SIZE(lower_in), lower_in);
	phi_low  = new_r_Phi(lower_block, ARRAY_SIZE(phi_low_in), phi_low_in,
	                     low_unsigned);
	phi_high = new_r_Phi(lower_block, ARRAY_SIZE(phi_high_in), phi_high_in,
	                     mode);
	ir_set_dw_lowered(node, phi_low, phi_high);
1022
}
Michael Beck's avatar
Michael Beck committed
1023
1024

/**
Michael Beck's avatar
Michael Beck committed
1025
 * Rebuild Rotl nodes into Or(Shl, Shr) and prepare all nodes.
Michael Beck's avatar
Michael Beck committed
1026
 */
1027
static void prepare_links_and_handle_rotl(ir_node *node, void *data)
1028
{
1029
	(void) data;
Michael Beck's avatar
Michael Beck committed
1030
	if (is_Rotl(node)) {
1031
1032
		ir_mode  *mode = get_irn_op_mode(node);
		ir_node  *right;
1033
		ir_node  *left, *shl, *shr, *ornode, *block, *sub, *c;
1034
1035
1036
1037
1038
		ir_mode  *omode, *rmode;
		ir_graph *irg;
		dbg_info *dbg;
		optimization_state_t state;

1039
1040
		if (mode != env->high_signed && mode != env->high_unsigned) {
			prepare_links(node);
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
			return;
		}

		/* replace the Rotl(x,y) by an Or(Shl(x,y), Shr(x,64-y)) */
		right = get_Rotl_right(node);
		irg   = get_irn_irg(node);
		dbg   = get_irn_dbg_info(node);
		omode = get_irn_mode(node);
		left  = get_Rotl_left(node);
		block = get_nodes_block(node);
		shl   = new_rd_Shl(dbg, block, left, right, omode);
		rmode = get_irn_mode(right);
		c     = new_r_Const_long(irg, rmode, get_mode_size_bits(omode));
		sub   = new_rd_Sub(dbg, block, c, right, rmode);
		shr   = new_rd_Shr(dbg, block, left, sub, omode);

		/* switch optimization off here, or we will get the Rotl back */
		save_optimization_state(&state);
		set_opt_algebraic_simplification(0);
1060
		ornode = new_rd_Or(dbg, block, shl, shr, omode);
1061
1062
		restore_optimization_state(&state);

1063
		exchange(node, ornode);
1064
1065

		/* do lowering on the new nodes */
1066
1067
1068
1069
1070
		prepare_links(shl);
		prepare_links(c);
		prepare_links(sub);
		prepare_links(shr);
		prepare_links(ornode);
1071
		return;
Michael Beck's avatar
Michael Beck committed
1072
	}
1073

1074
	prepare_links(node);
Michael Beck's avatar
Michael Beck committed
1075
}
Michael Beck's avatar
Michael Beck committed
1076

1077
1078
1079
1080
1081
/**
 * Translate an Unop.
 *
 * Create an intrinsic Call.
 */
1082
static void lower_unop(ir_node *node, ir_mode *mode)
1083
{
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
	ir_node  *op       = get_unop_op(node);
	dbg_info *dbgi     = get_irn_dbg_info(node);
	ir_node  *block    = get_nodes_block(node);
	ir_graph *irg      = get_irn_irg(block);
	ir_type  *mtp      = mode_is_signed(mode) ? unop_tp_s : unop_tp_u;
	ir_op    *irop     = get_irn_op(node);
	ir_node  *addr     = get_intrinsic_address(mtp, irop, mode, mode);
	ir_node  *nomem    = get_irg_no_mem(irg);
	ir_node  *in[2];
	ir_node  *call;
	ir_node  *resproj;

	if (env->params->little_endian) {
		in[0] = get_lowered_low(op);
		in[1] = get_lowered_high(op);
	} else {
		in[0] = get_lowered_high(op);
		in[1] = get_lowered_low(op);
	}
	call    = new_rd_Call(dbgi, block, nomem, addr, 2, in, mtp);
	resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
1105
	set_irn_pinned(call, get_irn_pinned(node));
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115

	if (env->params->little_endian) {
		ir_node *res_low  = new_r_Proj(resproj, env->low_unsigned, 0);
		ir_node *res_high = new_r_Proj(resproj, mode,              1);
		ir_set_dw_lowered(node, res_low, res_high);
	} else {
		ir_node *res_low  = new_r_Proj(resproj, env->low_unsigned, 1);
		ir_node *res_high = new_r_Proj(resproj, mode,              0);
		ir_set_dw_lowered(node, res_low, res_high);
	}
1116
}
1117
1118

/**
1119
 * Translate a logical binop.
1120
 *
1121
 * Create two logical binops.
1122
 */
1123
static void lower_binop_logical(ir_node *node, ir_mode *mode,
1124
1125
								ir_node *(*constr_rd)(dbg_info *db, ir_node *block, ir_node *op1, ir_node *op2, ir_mode *mode) )
{
1126
1127
1128
1129
1130
1131
1132
	ir_node               *left        = get_binop_left(node);
	ir_node               *right       = get_binop_right(node);
	const lower64_entry_t *left_entry  = get_node_entry(left);
	const lower64_entry_t *right_entry = get_node_entry(right);
	dbg_info              *dbgi        = get_irn_dbg_info(node);
	ir_node               *block       = get_nodes_block(node);
	ir_node               *res_low
1133
1134
		= constr_rd(dbgi, block, left_entry->low_word, right_entry->low_word,
		            env->low_unsigned);
1135
	ir_node               *res_high
1136
1137
		= constr_rd(dbgi, block, left_entry->high_word, right_entry->high_word,
		            mode);
1138
	ir_set_dw_lowered(node, res_low, res_high);
1139
}
1140

1141
static void lower_And(ir_node *node, ir_mode *mode)
1142
{
1143
	lower_binop_logical(node, mode, new_rd_And);
1144
}
1145

1146
static void lower_Or(ir_node *node, ir_mode *mode)
1147
{
1148
	lower_binop_logical(node, mode, new_rd_Or);
1149
1150
}

1151
static void lower_Eor(ir_node *node, ir_mode *mode)
1152
{
1153
	lower_binop_logical(node, mode, new_rd_Eor);
1154
}
1155
1156
1157
1158
1159
1160

/**
 * Translate a Not.
 *
 * Create two logical Nots.
 */
1161
static void lower_Not(ir_node *node, ir_mode *mode)
1162
{
1163
1164
1165
1166
1167
	ir_node               *op       = get_Not_op(node);
	const lower64_entry_t *op_entry = get_node_entry(op);
	dbg_info              *dbgi     = get_irn_dbg_info(node);
	ir_node               *block    = get_nodes_block(node);
	ir_node               *res_low
1168
		= new_rd_Not(dbgi, block, op_entry->low_word, env->low_unsigned);
1169
	ir_node               *res_high