lower_dw.c 94.9 KB
Newer Older
Christian Würdig's avatar
Christian Würdig committed
1
2
/*
 * This file is part of libFirm.
3
 * Copyright (C) 2012 University of Karlsruhe.
Christian Würdig's avatar
Christian Würdig committed
4
5
 */

Michael Beck's avatar
Michael Beck committed
6
7
/**
 * @file
yb9976's avatar
yb9976 committed
8
 * @brief   Lower double word operations, i.e. 64bit -> 32bit, 32bit -> 16bit etc.
Michael Beck's avatar
Michael Beck committed
9
10
 * @date    8.10.2004
 * @author  Michael Beck
11
 */
12
#include <string.h>
13
#include <stdlib.h>
14
#include <stdbool.h>
15
16
#include <assert.h>

yb9976's avatar
yb9976 committed
17
#include "be.h"
18
#include "error.h"
19
#include "lowering.h"
20
#include "irnode_t.h"
yb9976's avatar
yb9976 committed
21
#include "irnodeset.h"
22
23
24
25
26
27
28
29
30
31
32
33
#include "irgraph_t.h"
#include "irmode_t.h"
#include "iropt_t.h"
#include "irgmod.h"
#include "tv_t.h"
#include "dbginfo_t.h"
#include "iropt_dbg.h"
#include "irflag_t.h"
#include "firmstat.h"
#include "irgwalk.h"
#include "ircons.h"
#include "irflag.h"
yb9976's avatar
yb9976 committed
34
#include "iroptimize.h"
35
36
37
38
#include "debug.h"
#include "set.h"
#include "pmap.h"
#include "pdeq.h"
Christian Würdig's avatar
Christian Würdig committed
39
#include "irdump.h"
40
#include "array_t.h"
41
#include "lower_dw.h"
42

Michael Beck's avatar
Michael Beck committed
43
/** A map from (op, imode, omode) to Intrinsic functions entities. */
44
45
static set *intrinsic_fkt;

Michael Beck's avatar
Michael Beck committed
46
47
48
/** A map from (imode, omode) to conv function types. */
static set *conv_types;

49
50
51
/** A map from a method type to its lowered type. */
static pmap *lowered_type;

yb9976's avatar
yb9976 committed
52
53
54
55
/** A map from a builtin type to its lower and higher type. */
static pmap *lowered_builtin_type_high;
static pmap *lowered_builtin_type_low;

56
/** The types for the binop and unop intrinsics. */
57
static ir_type *binop_tp_u, *binop_tp_s, *unop_tp_u, *unop_tp_s, *tp_s, *tp_u;
58

yb9976's avatar
yb9976 committed
59
60
static ir_nodeset_t created_mux_nodes;

61
62
63
64
/** the debug handle */
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)

/**
Michael Beck's avatar
Michael Beck committed
65
 * An entry in the (op, imode, omode) -> entity map.
66
 */
67
typedef struct op_mode_entry {
68
	const ir_op   *op;    /**< the op */
Michael Beck's avatar
Michael Beck committed
69
70
	const ir_mode *imode; /**< the input mode */
	const ir_mode *omode; /**< the output mode */
71
	ir_entity     *ent;   /**< the associated entity of this (op, imode, omode) triple */
72
73
} op_mode_entry_t;

Michael Beck's avatar
Michael Beck committed
74
75
76
/**
 * An entry in the (imode, omode) -> tp map.
 */
77
typedef struct conv_tp_entry {
Michael Beck's avatar
Michael Beck committed
78
79
80
81
82
	const ir_mode *imode; /**< the input mode */
	const ir_mode *omode; /**< the output mode */
	ir_type       *mtd;   /**< the associated method type of this (imode, omode) pair */
} conv_tp_entry_t;

83
enum lower_flags {
84
85
	MUST_BE_LOWERED = 1,  /**< graph must be lowered */
	CF_CHANGED      = 2,  /**< control flow was changed */
86
87
88
89
90
};

/**
 * The lower environment.
 */
91
92
typedef struct lower_dw_env_t {
	lower64_entry_t **entries;     /**< entries per node */
93
	ir_graph      *irg;
94
95
	struct obstack obst;           /**< an obstack holding the temporary data */
	ir_tarval *tv_mode_bytes;      /**< a tarval containing the number of bytes in the lowered modes */
Matthias Braun's avatar
Matthias Braun committed
96
97
98
99
100
101
102
103
	pdeq      *waitq;              /**< a wait queue of all nodes that must be handled later */
	ir_node  **lowered_phis;       /**< list of lowered phis */
	ir_mode   *high_signed;        /**< doubleword signed type */
	ir_mode   *high_unsigned;      /**< doubleword unsigned type */
	ir_mode   *low_signed;         /**< word signed type */
	ir_mode   *low_unsigned;       /**< word unsigned type */
	ident     *first_id;           /**< .l for little and .h for big endian */
	ident     *next_id;            /**< .h for little and .l for big endian */
104
105
106
107
	const lwrdw_param_t *params;   /**< transformation parameter */
	unsigned flags;                /**< some flags */
	unsigned n_entries;            /**< number of entries */
} lower_dw_env_t;
108

109
110
111
static lower_dw_env_t *env;

static void lower_node(ir_node *node);
112

Michael Beck's avatar
Michael Beck committed
113
114
115
/**
 * Create a method type for a Conv emulation from imode to omode.
 */
116
static ir_type *get_conv_type(ir_mode *imode, ir_mode *omode)
117
{
Michael Beck's avatar
Michael Beck committed
118
119
120
121
122
123
124
	conv_tp_entry_t key, *entry;
	ir_type *mtd;

	key.imode = imode;
	key.omode = omode;
	key.mtd   = NULL;

125
	entry = set_insert(conv_tp_entry_t, conv_types, &key, sizeof(key), hash_ptr(imode) ^ hash_ptr(omode));
Michael Beck's avatar
Michael Beck committed
126
127
128
	if (! entry->mtd) {
		int n_param = 1, n_res = 1;

129
		if (imode == env->high_signed || imode == env->high_unsigned)
Michael Beck's avatar
Michael Beck committed
130
			n_param = 2;
131
		if (omode == env->high_signed || omode == env->high_unsigned)
Michael Beck's avatar
Michael Beck committed
132
133
134
			n_res = 2;

		/* create a new one */
135
		mtd = new_type_method(n_param, n_res);
Michael Beck's avatar
Michael Beck committed
136
137
138

		/* set param types and result types */
		n_param = 0;
139
		if (imode == env->high_signed) {
140
141
142
143
144
145
146
			if (env->params->little_endian) {
				set_method_param_type(mtd, n_param++, tp_u);
				set_method_param_type(mtd, n_param++, tp_s);
			} else {
				set_method_param_type(mtd, n_param++, tp_s);
				set_method_param_type(mtd, n_param++, tp_u);
			}
147
		} else if (imode == env->high_unsigned) {
Michael Beck's avatar
Michael Beck committed
148
149
			set_method_param_type(mtd, n_param++, tp_u);
			set_method_param_type(mtd, n_param++, tp_u);
Michael Beck's avatar
BugFix:    
Michael Beck committed
150
		} else {
151
			ir_type *tp = get_type_for_mode(imode);
Michael Beck's avatar
Michael Beck committed
152
			set_method_param_type(mtd, n_param++, tp);
153
		}
Michael Beck's avatar
Michael Beck committed
154
155

		n_res = 0;
156
		if (omode == env->high_signed) {
157
158
159
160
161
162
163
			if (env->params->little_endian) {
				set_method_res_type(mtd, n_res++, tp_u);
				set_method_res_type(mtd, n_res++, tp_s);
			} else {
				set_method_res_type(mtd, n_res++, tp_s);
				set_method_res_type(mtd, n_res++, tp_u);
			}
164
		} else if (omode == env->high_unsigned) {
Michael Beck's avatar
Michael Beck committed
165
166
			set_method_res_type(mtd, n_res++, tp_u);
			set_method_res_type(mtd, n_res++, tp_u);
Michael Beck's avatar
BugFix:    
Michael Beck committed
167
		} else {
168
			ir_type *tp = get_type_for_mode(omode);
Michael Beck's avatar
Michael Beck committed
169
			set_method_res_type(mtd, n_res++, tp);
170
		}
Michael Beck's avatar
Michael Beck committed
171
		entry->mtd = mtd;
Michael Beck's avatar
BugFix:    
Michael Beck committed
172
	} else {
Michael Beck's avatar
Michael Beck committed
173
		mtd = entry->mtd;
174
	}
Michael Beck's avatar
Michael Beck committed
175
	return mtd;
176
}
Michael Beck's avatar
Michael Beck committed
177

178
179
180
181
182
183
184
/**
 * Add an additional control flow input to a block.
 * Patch all Phi nodes. The new Phi inputs are copied from
 * old input number nr.
 */
static void add_block_cf_input_nr(ir_node *block, int nr, ir_node *cf)
{
185
	int i, arity = get_Block_n_cfgpreds(block);
186
	ir_node **in;
187
188
189
190
191

	assert(nr < arity);

	NEW_ARR_A(ir_node *, in, arity + 1);
	for (i = 0; i < arity; ++i)
192
		in[i] = get_Block_cfgpred(block, i);
193
194
195
196
	in[i] = cf;

	set_irn_in(block, i + 1, in);

197
198
199
200
201
	foreach_out_edge(block, edge) {
		ir_node *phi = get_edge_src_irn(edge);
		if (!is_Phi(phi))
			continue;

202
203
204
205
		for (i = 0; i < arity; ++i)
			in[i] = get_irn_n(phi, i);
		in[i] = in[nr];
		set_irn_in(phi, i + 1, in);
206
207
	}
}
208
209
210
211
212
213
214
215

/**
 * Add an additional control flow input to a block.
 * Patch all Phi nodes. The new Phi inputs are copied from
 * old input from cf tmpl.
 */
static void add_block_cf_input(ir_node *block, ir_node *tmpl, ir_node *cf)
{
216
	int i, arity = get_Block_n_cfgpreds(block);
217
218
219
	int nr = 0;

	for (i = 0; i < arity; ++i) {
220
		if (get_Block_cfgpred(block, i) == tmpl) {
221
222
			nr = i;
			break;
223
224
		}
	}
225
226
	assert(i < arity);
	add_block_cf_input_nr(block, nr, cf);
227
}
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246

/**
 * Return the "operational" mode of a Firm node.
 */
static ir_mode *get_irn_op_mode(ir_node *node)
{
	switch (get_irn_opcode(node)) {
	case iro_Load:
		return get_Load_mode(node);
	case iro_Store:
		return get_irn_mode(get_Store_value(node));
	case iro_Div:
		return get_irn_mode(get_Div_left(node));
	case iro_Mod:
		return get_irn_mode(get_Mod_left(node));
	case iro_Cmp:
		return get_irn_mode(get_Cmp_left(node));
	default:
		return get_irn_mode(node);
247
248
	}
}
249
250

/**
251
252
 * Walker, prepare the node links and determine which nodes need to be lowered
 * at all.
253
 */
254
static void prepare_links(ir_node *node)
255
{
256
257
	ir_mode         *mode = get_irn_op_mode(node);
	lower64_entry_t *link;
258

259
	if (mode == env->high_signed || mode == env->high_unsigned) {
260
		unsigned idx = get_irn_idx(node);
261
		/* ok, found a node that will be lowered */
262
		link = OALLOCZ(&env->obst, lower64_entry_t);
263

264
		if (idx >= env->n_entries) {
Michael Beck's avatar
Michael Beck committed
265
			/* enlarge: this happens only for Rotl nodes which is RARELY */
266
			unsigned old   = env->n_entries;
267
			unsigned n_idx = idx + (idx >> 3);
Michael Beck's avatar
Michael Beck committed
268

269
			ARR_RESIZE(lower64_entry_t *, env->entries, n_idx);
270
271
			memset(&env->entries[old], 0, (n_idx - old) * sizeof(env->entries[0]));
			env->n_entries = n_idx;
Michael Beck's avatar
Michael Beck committed
272
		}
273
274
		env->entries[idx] = link;
		env->flags |= MUST_BE_LOWERED;
275
	} else if (is_Conv(node)) {
276
277
278
279
		/* Conv nodes have two modes */
		ir_node *pred = get_Conv_op(node);
		mode = get_irn_mode(pred);

280
		if (mode == env->high_signed || mode == env->high_unsigned) {
Michael Beck's avatar
BugFix:    
Michael Beck committed
281
			/* must lower this node either but don't need a link */
282
			env->flags |= MUST_BE_LOWERED;
283
		}
284
		return;
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
	} else if (is_Call(node)) {
		/* Special case:  If the result of the Call is never used, we won't
		 * find a Proj with a mode that potentially triggers MUST_BE_LOWERED
		 * to be set.  Thus, if we see a call, we check its result types and
		 * decide whether MUST_BE_LOWERED has to be set.
		 */
		ir_type *tp = get_Call_type(node);
		size_t   n_res, i;

		n_res = get_method_n_ress(tp);
		for (i = 0; i < n_res; ++i) {
			ir_type *rtp = get_method_res_type(tp, i);

			if (is_Primitive_type(rtp)) {
				ir_mode *rmode = get_type_mode(rtp);

				if (rmode == env->high_signed || rmode == env->high_unsigned) {
					env->flags |= MUST_BE_LOWERED;
				}
			}
		}
306
307
	}
}
308

309
lower64_entry_t *get_node_entry(ir_node *node)
310
311
312
313
314
315
{
	unsigned idx = get_irn_idx(node);
	assert(idx < env->n_entries);
	return env->entries[idx];
}

316
void ir_set_dw_lowered(ir_node *old, ir_node *new_low, ir_node *new_high)
317
{
318
	lower64_entry_t *entry = get_node_entry(old);
319
320
321
322
	entry->low_word  = new_low;
	entry->high_word = new_high;
}

323
324
325
326
327
ir_mode *ir_get_low_unsigned_mode(void)
{
	return env->low_unsigned;
}

328
329
330
/**
 * Translate a Constant: create two.
 */
331
static void lower_Const(ir_node *node, ir_mode *mode)
332
{
Matthias Braun's avatar
Matthias Braun committed
333
334
335
336
337
338
	ir_graph  *irg      = get_irn_irg(node);
	dbg_info  *dbg      = get_irn_dbg_info(node);
	ir_mode   *low_mode = env->low_unsigned;
	ir_tarval *tv       = get_Const_tarval(node);
	ir_tarval *tv_l     = tarval_convert_to(tv, low_mode);
	ir_node   *res_low  = new_rd_Const(dbg, irg, tv_l);
339
	ir_tarval *tv_shrs  = tarval_shrs_unsigned(tv, get_mode_size_bits(low_mode));
Matthias Braun's avatar
Matthias Braun committed
340
341
	ir_tarval *tv_h     = tarval_convert_to(tv_shrs, mode);
	ir_node   *res_high = new_rd_Const(dbg, irg, tv_h);
342

343
	ir_set_dw_lowered(node, res_low, res_high);
344
}
345
346
347
348

/**
 * Translate a Load: create two.
 */
349
static void lower_Load(ir_node *node, ir_mode *mode)
350
{
351
	ir_mode    *low_mode = env->low_unsigned;
352
	ir_graph   *irg = get_irn_irg(node);
353
354
	ir_node    *adr = get_Load_ptr(node);
	ir_node    *mem = get_Load_mem(node);
355
356
357
	ir_node    *low;
	ir_node    *high;
	ir_node    *proj_m;
358
359
	dbg_info   *dbg;
	ir_node    *block = get_nodes_block(node);
360
	ir_cons_flags volatility = get_Load_volatility(node) == volatility_is_volatile
361
	                         ? cons_volatile : cons_none;
362
363
364

	if (env->params->little_endian) {
		low  = adr;
365
		high = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
Michael Beck's avatar
BugFix:    
Michael Beck committed
366
	} else {
367
		low  = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
368
		high = adr;
369
	}
370
371

	/* create two loads */
372
373
374
375
	dbg    = get_irn_dbg_info(node);
	low    = new_rd_Load(dbg, block, mem,  low,  low_mode, volatility);
	proj_m = new_r_Proj(low, mode_M, pn_Load_M);
	high   = new_rd_Load(dbg, block, proj_m, high, mode, volatility);
376

377
	foreach_out_edge_safe(node, edge) {
378
379
380
		ir_node *proj = get_edge_src_irn(edge);
		if (!is_Proj(proj))
			continue;
381
382
383
384
385
386
387
388
389
390

		switch (get_Proj_proj(proj)) {
		case pn_Load_M:         /* Memory result. */
			/* put it to the second one */
			set_Proj_pred(proj, high);
			break;
		case pn_Load_X_except:  /* Execution result if exception occurred. */
			/* put it to the first one */
			set_Proj_pred(proj, low);
			break;
391
392
393
		case pn_Load_res: {       /* Result of load operation. */
			ir_node *res_low  = new_r_Proj(low,  low_mode, pn_Load_res);
			ir_node *res_high = new_r_Proj(high, mode,     pn_Load_res);
394
			ir_set_dw_lowered(proj, res_low, res_high);
395
			break;
396
		}
397
398
		default:
			assert(0 && "unexpected Proj number");
399
		}
400
401
		/* mark this proj: we have handled it already, otherwise we might fall
		 * into out new nodes. */
402
		mark_irn_visited(proj);
403
404
	}
}
405
406
407
408

/**
 * Translate a Store: create two.
 */
409
static void lower_Store(ir_node *node, ir_mode *mode)
410
{
411
412
	ir_graph              *irg;
	ir_node               *block, *adr, *mem;
413
	ir_node               *low, *high, *proj_m;
414
415
416
	dbg_info              *dbg;
	ir_node               *value = get_Store_value(node);
	const lower64_entry_t *entry = get_node_entry(value);
417
	ir_cons_flags volatility = get_Store_volatility(node) == volatility_is_volatile
418
	                           ? cons_volatile : cons_none;
Matthias Braun's avatar
Matthias Braun committed
419
	(void) mode;
420
421
422
423
424
425
426

	assert(entry);

	if (! entry->low_word) {
		/* not ready yet, wait */
		pdeq_putr(env->waitq, node);
		return;
427
	}
428

429
	irg = get_irn_irg(node);
430
431
432
433
434
435
	adr = get_Store_ptr(node);
	mem = get_Store_mem(node);
	block = get_nodes_block(node);

	if (env->params->little_endian) {
		low  = adr;
436
		high = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
Michael Beck's avatar
BugFix:    
Michael Beck committed
437
	} else {
438
		low  = new_r_Add(block, adr, new_r_Const(irg, env->tv_mode_bytes), get_irn_mode(adr));
439
		high = adr;
440
	}
441
442

	/* create two Stores */
443
444
445
446
	dbg    = get_irn_dbg_info(node);
	low    = new_rd_Store(dbg, block, mem, low,  entry->low_word, volatility);
	proj_m = new_r_Proj(low, mode_M, pn_Store_M);
	high   = new_rd_Store(dbg, block, proj_m, high, entry->high_word, volatility);
447

448
	foreach_out_edge_safe(node, edge) {
449
450
451
		ir_node *proj = get_edge_src_irn(edge);
		if (!is_Proj(proj))
			continue;
452
453
454
455
456
457
458
459
460
461
462
463

		switch (get_Proj_proj(proj)) {
		case pn_Store_M:         /* Memory result. */
			/* put it to the second one */
			set_Proj_pred(proj, high);
			break;
		case pn_Store_X_except:  /* Execution result if exception occurred. */
			/* put it to the first one */
			set_Proj_pred(proj, low);
			break;
		default:
			assert(0 && "unexpected Proj number");
464
		}
465
466
467
		/* mark this proj: we have handled it already, otherwise we might fall into
		 * out new nodes. */
		mark_irn_visited(proj);
468
469
	}
}
470
471
472

/**
 * Return a node containing the address of the intrinsic emulation function.
Michael Beck's avatar
Michael Beck committed
473
474
475
476
477
478
 *
 * @param method  the method type of the emulation function
 * @param op      the emulated ir_op
 * @param imode   the input mode of the emulated opcode
 * @param omode   the output mode of the emulated opcode
 * @param env     the lower environment
479
 */
Michael Beck's avatar
Michael Beck committed
480
static ir_node *get_intrinsic_address(ir_type *method, ir_op *op,
481
                                      ir_mode *imode, ir_mode *omode)
482
{
483
	symconst_symbol sym;
484
	ir_entity *ent;
485
486
	op_mode_entry_t key, *entry;

Michael Beck's avatar
Michael Beck committed
487
488
489
490
	key.op    = op;
	key.imode = imode;
	key.omode = omode;
	key.ent   = NULL;
491

492
	entry = set_insert(op_mode_entry_t, intrinsic_fkt, &key, sizeof(key),
493
				hash_ptr(op) ^ hash_ptr(imode) ^ (hash_ptr(omode) << 8));
494
495
	if (! entry->ent) {
		/* create a new one */
Michael Beck's avatar
Michael Beck committed
496
		ent = env->params->create_intrinsic(method, op, imode, omode, env->params->ctx);
497
498
499

		assert(ent && "Intrinsic creator must return an entity");
		entry->ent = ent;
Michael Beck's avatar
BugFix:    
Michael Beck committed
500
	} else {
501
		ent = entry->ent;
502
	}
503
	sym.entity_p = ent;
504
	return new_r_SymConst(env->irg, mode_P_code, sym, symconst_addr_ent);
505
}
506
507
508
509
510
511

/**
 * Translate a Div.
 *
 * Create an intrinsic Call.
 */
512
static void lower_Div(ir_node *node, ir_mode *mode)
513
{
514
515
516
517
518
519
520
521
522
523
	ir_node  *left   = get_Div_left(node);
	ir_node  *right  = get_Div_right(node);
	ir_node  *block  = get_nodes_block(node);
	dbg_info *dbgi   = get_irn_dbg_info(node);
	ir_type  *mtp    = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
	ir_mode  *opmode = get_irn_op_mode(node);
	ir_node  *addr   = get_intrinsic_address(mtp, get_irn_op(node), opmode, opmode);
	ir_node  *in[4];
	ir_node  *call;
	ir_node  *resproj;
524

525
526
527
528
529
530
531
532
533
534
535
536
537
	if (env->params->little_endian) {
		in[0] = get_lowered_low(left);
		in[1] = get_lowered_high(left);
		in[2] = get_lowered_low(right);
		in[3] = get_lowered_high(right);
	} else {
		in[0] = get_lowered_high(left);
		in[1] = get_lowered_low(left);
		in[2] = get_lowered_high(right);
		in[3] = get_lowered_low(right);
	}
	call    = new_rd_Call(dbgi, block, get_Div_mem(node), addr, 4, in, mtp);
	resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
538
539
	set_irn_pinned(call, get_irn_pinned(node));

540
	foreach_out_edge_safe(node, edge) {
541
542
543
544
		ir_node *proj = get_edge_src_irn(edge);
		if (!is_Proj(proj))
			continue;

545
546
547
548
		switch (get_Proj_proj(proj)) {
		case pn_Div_M:         /* Memory result. */
			/* reroute to the call */
			set_Proj_pred(proj, call);
549
			set_Proj_proj(proj, pn_Call_M);
550
			break;
551
552
553
554
555
		case pn_Div_X_regular:
			set_Proj_pred(proj, call);
			set_Proj_proj(proj, pn_Call_X_regular);
			break;
		case pn_Div_X_except:
556
557
558
			set_Proj_pred(proj, call);
			set_Proj_proj(proj, pn_Call_X_except);
			break;
559
560
561
562
563
564
565
566
567
568
		case pn_Div_res:
			if (env->params->little_endian) {
				ir_node *res_low  = new_r_Proj(resproj, env->low_unsigned, 0);
				ir_node *res_high = new_r_Proj(resproj, mode,              1);
				ir_set_dw_lowered(proj, res_low, res_high);
			} else {
				ir_node *res_low  = new_r_Proj(resproj, env->low_unsigned, 1);
				ir_node *res_high = new_r_Proj(resproj, mode,              0);
				ir_set_dw_lowered(proj, res_low, res_high);
			}
569
570
571
			break;
		default:
			assert(0 && "unexpected Proj number");
572
		}
573
574
575
		/* mark this proj: we have handled it already, otherwise we might fall into
		 * out new nodes. */
		mark_irn_visited(proj);
576
577
	}
}
578
579
580
581
582
583

/**
 * Translate a Mod.
 *
 * Create an intrinsic Call.
 */
584
static void lower_Mod(ir_node *node, ir_mode *mode)
585
{
586
587
588
589
590
591
592
593
594
595
	ir_node  *left   = get_Mod_left(node);
	ir_node  *right  = get_Mod_right(node);
	dbg_info *dbgi   = get_irn_dbg_info(node);
	ir_node  *block  = get_nodes_block(node);
	ir_type  *mtp    = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
	ir_mode  *opmode = get_irn_op_mode(node);
	ir_node  *addr   = get_intrinsic_address(mtp, get_irn_op(node), opmode, opmode);
	ir_node  *in[4];
	ir_node  *call;
	ir_node  *resproj;
596
597
598
599
600
601
602
603
604
605
606
607
608
609

	if (env->params->little_endian) {
		in[0] = get_lowered_low(left);
		in[1] = get_lowered_high(left);
		in[2] = get_lowered_low(right);
		in[3] = get_lowered_high(right);
	} else {
		in[0] = get_lowered_high(left);
		in[1] = get_lowered_low(left);
		in[2] = get_lowered_high(right);
		in[3] = get_lowered_low(right);
	}
	call    = new_rd_Call(dbgi, block, get_Mod_mem(node), addr, 4, in, mtp);
	resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
610
611
	set_irn_pinned(call, get_irn_pinned(node));

612
	foreach_out_edge_safe(node, edge) {
613
614
615
616
		ir_node *proj = get_edge_src_irn(edge);
		if (!is_Proj(proj))
			continue;

617
618
619
620
		switch (get_Proj_proj(proj)) {
		case pn_Mod_M:         /* Memory result. */
			/* reroute to the call */
			set_Proj_pred(proj, call);
621
			set_Proj_proj(proj, pn_Call_M);
622
			break;
623
624
625
626
627
		case pn_Div_X_regular:
			set_Proj_pred(proj, call);
			set_Proj_proj(proj, pn_Call_X_regular);
			break;
		case pn_Mod_X_except:
628
629
630
			set_Proj_pred(proj, call);
			set_Proj_proj(proj, pn_Call_X_except);
			break;
631
632
633
634
635
636
637
638
639
640
		case pn_Mod_res:
			if (env->params->little_endian) {
				ir_node *res_low  = new_r_Proj(resproj, env->low_unsigned, 0);
				ir_node *res_high = new_r_Proj(resproj, mode,              1);
				ir_set_dw_lowered(proj, res_low, res_high);
			} else {
				ir_node *res_low  = new_r_Proj(resproj, env->low_unsigned, 1);
				ir_node *res_high = new_r_Proj(resproj, mode,              0);
				ir_set_dw_lowered(proj, res_low, res_high);
			}
641
642
643
			break;
		default:
			assert(0 && "unexpected Proj number");
644
		}
645
646
		/* mark this proj: we have handled it already, otherwise we might fall
		 * into out new nodes. */
647
		mark_irn_visited(proj);
648
649
	}
}
650
651

/**
652
 * Translate a binop.
653
654
655
 *
 * Create an intrinsic Call.
 */
656
static void lower_binop(ir_node *node, ir_mode *mode)
657
{
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
	ir_node  *left  = get_binop_left(node);
	ir_node  *right = get_binop_right(node);
	dbg_info *dbgi  = get_irn_dbg_info(node);
	ir_node  *block = get_nodes_block(node);
	ir_graph *irg   = get_irn_irg(block);
	ir_type  *mtp   = mode_is_signed(mode) ? binop_tp_s : binop_tp_u;
	ir_node  *addr  = get_intrinsic_address(mtp, get_irn_op(node), mode, mode);
	ir_node  *in[4];
	ir_node  *call;
	ir_node  *resproj;

	if (env->params->little_endian) {
		in[0] = get_lowered_low(left);
		in[1] = get_lowered_high(left);
		in[2] = get_lowered_low(right);
		in[3] = get_lowered_high(right);
	} else {
		in[0] = get_lowered_high(left);
		in[1] = get_lowered_low(left);
		in[2] = get_lowered_high(right);
		in[3] = get_lowered_low(right);
	}
	call    = new_rd_Call(dbgi, block, get_irg_no_mem(irg), addr, 4, in, mtp);
	resproj = new_r_Proj(call, mode_T, pn_Call_T_result);
682
	set_irn_pinned(call, get_irn_pinned(node));
683
684
685
686
687
688
689
690
691
692

	if (env->params->little_endian) {
		ir_node *res_low  = new_r_Proj(resproj, env->low_unsigned, 0);
		ir_node *res_high = new_r_Proj(resproj, mode,              1);
		ir_set_dw_lowered(node, res_low, res_high);
	} else {
		ir_node *res_low  = new_r_Proj(resproj, env->low_unsigned, 1);
		ir_node *res_high = new_r_Proj(resproj, mode,              0);
		ir_set_dw_lowered(node, res_low, res_high);
	}
693
}
694

695
static ir_node *create_conv(ir_node *block, ir_node *node, ir_mode *dest_mode)
696
{
697
698
699
	if (get_irn_mode(node) == dest_mode)
		return node;
	return new_r_Conv(block, node, dest_mode);
700
}
Christian Würdig's avatar
Christian Würdig committed
701

Michael Beck's avatar
Michael Beck committed
702
/**
703
704
 * Moves node and all predecessors of node from from_bl to to_bl.
 * Does not move predecessors of Phi nodes (or block nodes).
Michael Beck's avatar
Michael Beck committed
705
 */
706
static void move(ir_node *node, ir_node *from_bl, ir_node *to_bl)
707
{
708
709
710
711
712
713
714
715
716
717
718
719
720
721
	int i, arity;

	/* move this node */
	set_nodes_block(node, to_bl);

	/* move its Projs */
	if (get_irn_mode(node) == mode_T) {
		foreach_out_edge(node, edge) {
			ir_node *proj = get_edge_src_irn(edge);
			if (!is_Proj(proj))
				continue;
			move(proj, from_bl, to_bl);
		}
	}
Michael Beck's avatar
Michael Beck committed
722

723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
	/* We must not move predecessors of Phi nodes, even if they are in
	 * from_bl. (because these are values from an earlier loop iteration
	 * which are not predecessors of node here)
	 */
	if (is_Phi(node))
		return;

	/* recursion ... */
	arity = get_irn_arity(node);
	for (i = 0; i < arity; i++) {
		ir_node *pred      = get_irn_n(node, i);
		ir_mode *pred_mode = get_irn_mode(pred);
		if (get_nodes_block(pred) == from_bl)
			move(pred, from_bl, to_bl);
		if (pred_mode == env->high_signed || pred_mode == env->high_unsigned) {
			ir_node *pred_low  = get_lowered_low(pred);
			ir_node *pred_high = get_lowered_high(pred);
			if (get_nodes_block(pred_low) == from_bl)
				move(pred_low, from_bl, to_bl);
			if (pred_high != NULL && get_nodes_block(pred_high) == from_bl)
				move(pred_high, from_bl, to_bl);
744
745
746
		}
	}
}
Michael Beck's avatar
Michael Beck committed
747
748

/**
749
750
751
752
753
 * We need a custom version of part_block_edges because during transformation
 * not all data-dependencies are explicit yet if a lowered nodes users are not
 * lowered yet.
 * We can fix this by modifying move to look for such implicit dependencies.
 * Additionally we have to keep the proj_2_block map updated
Michael Beck's avatar
Michael Beck committed
754
 */
755
static ir_node *part_block_dw(ir_node *node)
756
{
757
758
759
760
761
	ir_graph *irg        = get_irn_irg(node);
	ir_node  *old_block  = get_nodes_block(node);
	int       n_cfgpreds = get_Block_n_cfgpreds(old_block);
	ir_node **cfgpreds   = get_Block_cfgpred_arr(old_block);
	ir_node  *new_block  = new_r_Block(irg, n_cfgpreds, cfgpreds);
762

763
764
765
766
767
768
769
	/* old_block has no predecessors anymore for now */
	set_irn_in(old_block, 0, NULL);

	/* move node and its predecessors to new_block */
	move(node, old_block, new_block);

	/* move Phi nodes to new_block */
770
	foreach_out_edge_safe(old_block, edge) {
771
772
773
774
		ir_node *phi = get_edge_src_irn(edge);
		if (!is_Phi(phi))
			continue;
		set_nodes_block(phi, new_block);
775
	}
776
	return old_block;
777
}
Michael Beck's avatar
Michael Beck committed
778

779
780
781
782
783
784
typedef ir_node* (*new_rd_shr_func)(dbg_info *dbgi, ir_node *block,
                                    ir_node *left, ir_node *right,
                                    ir_mode *mode);

static void lower_shr_helper(ir_node *node, ir_mode *mode,
                             new_rd_shr_func new_rd_shrs)
785
{
786
787
788
789
790
791
792
793
794
795
796
797
798
	ir_node  *right         = get_binop_right(node);
	ir_node  *left          = get_binop_left(node);
	ir_mode  *shr_mode      = get_irn_mode(node);
	unsigned  modulo_shift  = get_mode_modulo_shift(shr_mode);
	ir_mode  *low_unsigned  = env->low_unsigned;
	unsigned  modulo_shift2 = get_mode_modulo_shift(mode);
	ir_graph *irg           = get_irn_irg(node);
	ir_node  *left_low      = get_lowered_low(left);
	ir_node  *left_high     = get_lowered_high(left);
	dbg_info *dbgi          = get_irn_dbg_info(node);
	ir_node  *lower_block;
	ir_node  *block;
	ir_node  *cnst;
799
	ir_node  *andn;
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
	ir_node  *cmp;
	ir_node  *cond;
	ir_node  *proj_true;
	ir_node  *proj_false;
	ir_node  *phi_low;
	ir_node  *phi_high;
	ir_node  *lower_in[2];
	ir_node  *phi_low_in[2];
	ir_node  *phi_high_in[2];

	/* this version is optimized for modulo shift architectures
	 * (and can't handle anything else) */
	if (modulo_shift != get_mode_size_bits(shr_mode)
			|| modulo_shift2<<1 != modulo_shift) {
		panic("Shr lowering only implemented for modulo shift shr operations");
	}
	if (!is_po2(modulo_shift) || !is_po2(modulo_shift2)) {
		panic("Shr lowering only implemented for power-of-2 modes");
	}
	/* without 2-complement the -x instead of (bit_width-x) trick won't work */
	if (get_mode_arithmetic(shr_mode) != irma_twos_complement) {
		panic("Shr lowering only implemented for two-complement modes");
	}

824
825
	block = get_nodes_block(node);

826
827
828
829
830
831
	/* if the right operand is a 64bit value, we're only interested in the
	 * lower word */
	if (get_irn_mode(right) == env->high_unsigned) {
		right = get_lowered_low(right);
	} else {
		/* shift should never have signed mode on the right */
Michael Beck's avatar
Michael Beck committed
832
		assert(get_irn_mode(right) != env->high_signed);
833
834
		right = create_conv(block, right, low_unsigned);
	}
Michael Beck's avatar
Michael Beck committed
835

836
837
838
839
	lower_block = part_block_dw(node);
	env->flags |= CF_CHANGED;
	block = get_nodes_block(node);

Michael Beck's avatar
Michael Beck committed
840
	/* add a Cmp to test if highest bit is set <=> whether we shift more
841
842
	 * than half the word width */
	cnst       = new_r_Const_long(irg, low_unsigned, modulo_shift2);
843
	andn       = new_r_And(block, right, cnst, low_unsigned);
844
	cnst       = new_r_Const(irg, get_mode_null(low_unsigned));
845
	cmp        = new_rd_Cmp(dbgi, block, andn, cnst, ir_relation_equal);
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
	cond       = new_rd_Cond(dbgi, block, cmp);
	proj_true  = new_r_Proj(cond, mode_X, pn_Cond_true);
	proj_false = new_r_Proj(cond, mode_X, pn_Cond_false);

	/* the true block => shift_width < 1word */
	{
		/* In theory the low value (for 64bit shifts) is:
		 *    Or(High << (32-x)), Low >> x)
		 * In practice High << 32-x will fail when x is zero (since we have
		 * modulo shift and 32 will be 0). So instead we use:
		 *    Or(High<<1<<~x, Low >> x)
		 */
		ir_node *in[1]        = { proj_true };
		ir_node *block_true   = new_r_Block(irg, ARRAY_SIZE(in), in);
		ir_node *res_high     = new_rd_shrs(dbgi, block_true, left_high,
		                                    right, mode);
		ir_node *shift_low    = new_rd_Shr(dbgi, block_true, left_low, right,
		                                   low_unsigned);
		ir_node *not_shiftval = new_rd_Not(dbgi, block_true, right,
		                                   low_unsigned);
		ir_node *conv         = create_conv(block_true, left_high,
		                                    low_unsigned);
		ir_node *one          = new_r_Const(irg, get_mode_one(low_unsigned));
		ir_node *carry0       = new_rd_Shl(dbgi, block_true, conv, one,
		                                   low_unsigned);
		ir_node *carry1       = new_rd_Shl(dbgi, block_true, carry0,
		                                   not_shiftval, low_unsigned);
		ir_node *res_low      = new_rd_Or(dbgi, block_true, shift_low, carry1,
		                                  low_unsigned);
		lower_in[0]           = new_r_Jmp(block_true);
		phi_low_in[0]         = res_low;
		phi_high_in[0]        = res_high;
	}

	/* false block => shift_width > 1word */
	{
		ir_node *in[1]       = { proj_false };
		ir_node *block_false = new_r_Block(irg, ARRAY_SIZE(in), in);
		ir_node *conv        = create_conv(block_false, left_high, low_unsigned);
		ir_node *res_low     = new_rd_shrs(dbgi, block_false, conv, right,
		                                   low_unsigned);
		int      cnsti       = modulo_shift2-1;
888
		ir_node *cnst2       = new_r_Const_long(irg, low_unsigned, cnsti);
889
890
		ir_node *res_high;
		if (new_rd_shrs == new_rd_Shrs) {
891
			res_high = new_rd_shrs(dbgi, block_false, left_high, cnst2, mode);
892
893
		} else {
			res_high = new_r_Const(irg, get_mode_null(mode));
894
		}
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
		lower_in[1]          = new_r_Jmp(block_false);
		phi_low_in[1]        = res_low;
		phi_high_in[1]       = res_high;
	}

	/* patch lower block */
	set_irn_in(lower_block, ARRAY_SIZE(lower_in), lower_in);
	phi_low  = new_r_Phi(lower_block, ARRAY_SIZE(phi_low_in), phi_low_in,
	                     low_unsigned);
	phi_high = new_r_Phi(lower_block, ARRAY_SIZE(phi_high_in), phi_high_in,
	                     mode);
	ir_set_dw_lowered(node, phi_low, phi_high);
}

static void lower_Shr(ir_node *node, ir_mode *mode)
{
	lower_shr_helper(node, mode, new_rd_Shr);
}

static void lower_Shrs(ir_node *node, ir_mode *mode)
{
	lower_shr_helper(node, mode, new_rd_Shrs);
}

static void lower_Shl(ir_node *node, ir_mode *mode)
{
	ir_node  *right         = get_binop_right(node);
	ir_node  *left          = get_binop_left(node);
	ir_mode  *shr_mode      = get_irn_mode(node);
	unsigned  modulo_shift  = get_mode_modulo_shift(shr_mode);
	ir_mode  *low_unsigned  = env->low_unsigned;
	unsigned  modulo_shift2 = get_mode_modulo_shift(mode);
	ir_graph *irg           = get_irn_irg(node);
	ir_node  *left_low      = get_lowered_low(left);
	ir_node  *left_high     = get_lowered_high(left);
	dbg_info *dbgi          = get_irn_dbg_info(node);
	ir_node  *lower_block   = get_nodes_block(node);
	ir_node  *block;
	ir_node  *cnst;
934
	ir_node  *andn;
935
936
937
938
939
940
941
942
943
944
945
946
947
948
	ir_node  *cmp;
	ir_node  *cond;
	ir_node  *proj_true;
	ir_node  *proj_false;
	ir_node  *phi_low;
	ir_node  *phi_high;
	ir_node  *lower_in[2];
	ir_node  *phi_low_in[2];
	ir_node  *phi_high_in[2];

	/* this version is optimized for modulo shift architectures
	 * (and can't handle anything else) */
	if (modulo_shift != get_mode_size_bits(shr_mode)
			|| modulo_shift2<<1 != modulo_shift) {
yb9976's avatar
yb9976 committed
949
		panic("Shl lowering only implemented for modulo shift shl operations");
950
951
	}
	if (!is_po2(modulo_shift) || !is_po2(modulo_shift2)) {
yb9976's avatar
yb9976 committed
952
		panic("Shl lowering only implemented for power-of-2 modes");
953
954
955
	}
	/* without 2-complement the -x instead of (bit_width-x) trick won't work */
	if (get_mode_arithmetic(shr_mode) != irma_twos_complement) {
yb9976's avatar
yb9976 committed
956
		panic("Shl lowering only implemented for two-complement modes");
957
958
959
960
961
962
963
964
965
966
	}

	/* if the right operand is a 64bit value, we're only interested in the
	 * lower word */
	if (get_irn_mode(right) == env->high_unsigned) {
		right = get_lowered_low(right);
	} else {
		/* shift should never have signed mode on the right */
		assert(get_irn_mode(right) != env->high_signed);
		right = create_conv(lower_block, right, low_unsigned);
967
	}
968
969
970
971
972

	part_block_dw(node);
	env->flags |= CF_CHANGED;
	block = get_nodes_block(node);

Michael Beck's avatar
Michael Beck committed
973
	/* add a Cmp to test if highest bit is set <=> whether we shift more
974
975
	 * than half the word width */
	cnst       = new_r_Const_long(irg, low_unsigned, modulo_shift2);
976
	andn       = new_r_And(block, right, cnst, low_unsigned);
977
	cnst       = new_r_Const(irg, get_mode_null(low_unsigned));
978
	cmp        = new_rd_Cmp(dbgi, block, andn, cnst, ir_relation_equal);
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
	cond       = new_rd_Cond(dbgi, block, cmp);
	proj_true  = new_r_Proj(cond, mode_X, pn_Cond_true);
	proj_false = new_r_Proj(cond, mode_X, pn_Cond_false);

	/* the true block => shift_width < 1word */
	{
		ir_node *in[1]        = { proj_true };
		ir_node *block_true   = new_r_Block(irg, ARRAY_SIZE(in), in);

		ir_node *res_low      = new_rd_Shl(dbgi, block_true, left_low,
		                                   right, low_unsigned);
		ir_node *shift_high   = new_rd_Shl(dbgi, block_true, left_high, right,
		                                   mode);
		ir_node *not_shiftval = new_rd_Not(dbgi, block_true, right,
		                                   low_unsigned);
		ir_node *conv         = create_conv(block_true, left_low, mode);
995
		ir_node *one          = new_r_Const(irg, get_mode_one(low_unsigned));
996
997
998
999
1000
		ir_node *carry0       = new_rd_Shr(dbgi, block_true, conv, one, mode);
		ir_node *carry1       = new_rd_Shr(dbgi, block_true, carry0,
		                                   not_shiftval, mode);
		ir_node *res_high     = new_rd_Or(dbgi, block_true, shift_high, carry1,
		                                  mode);
For faster browsing, not all history is shown. View entire blame