betranshlp.c 26.1 KB
Newer Older
1
2
/*
 * This file is part of libFirm.
3
 * Copyright (C) 2012 University of Karlsruhe.
4
5
6
7
8
 */

/**
 * @file
 * @brief       be transform helper extracted from the ia32 backend.
Michael Beck's avatar
Michael Beck committed
9
 * @author      Matthias Braun, Michael Beck
10
11
 * @date        14.06.2007
 */
Matthias Braun's avatar
Matthias Braun committed
12
#include "bearch.h"
13
14
#include "beirg.h"
#include "belive.h"
15
#include "benode.h"
16
#include "betranshlp.h"
17
#include "beutil.h"
18
19
20
21
#include "cgana.h"
#include "debug.h"
#include "execfreq_t.h"
#include "heights.h"
22
#include "irargs_t.h"
23
#include "ircons_t.h"
24
#include "iredges_t.h"
25
26
27
28
29
30
31
32
33
34
#include "irgmod.h"
#include "irgraph_t.h"
#include "irgwalk.h"
#include "irhooks.h"
#include "irnodemap.h"
#include "irnode_t.h"
#include "irop_t.h"
#include "iropt_t.h"
#include "irouts.h"
#include "irtools.h"
35
#include "panic.h"
36
#include "pdeq.h"
37
#include "util.h"
38
#include "vrp.h"
39
40

typedef struct be_transform_env_t {
41
	pdeq *worklist;  /**< worklist of nodes that still need to be transformed */
42
43
44
45
} be_transform_env_t;

static be_transform_env_t env;

46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
#ifndef NDEBUG
static void be_set_orig_node_rec(ir_node *const node, char const *const name)
{
	if (!is_Proj(node)) {
		char const **const orig = &be_get_info(node)->orig_node;
		if (*orig)
			return;
		*orig = name;
	}
	foreach_irn_in(node, i, in) {
		be_set_orig_node_rec(in, name);
	}
}

static void be_set_orig_node(ir_node *const new_node, ir_node const *const old_node)
{
	if (!is_Proj(old_node)) {
		ir_graph       *const irg  = get_irn_irg(old_node);
		struct obstack *const obst = be_get_be_obst(irg);
		lc_eoprintf(firm_get_arg_env(), obst, "%+F", old_node);
		obstack_1grow(obst, 0);
		char const *const name = (char const*)obstack_finish(obst);
		be_set_orig_node_rec(new_node, name);
	}
}
#endif

73
74
void be_set_transformed_node(ir_node *old_node, ir_node *new_node)
{
75
	set_irn_link(old_node, new_node);
76
	mark_irn_visited(old_node);
77
78
79
#ifndef NDEBUG
	be_set_orig_node(new_node, old_node);
#endif
80
81
}

Matthias Braun's avatar
Matthias Braun committed
82
bool be_is_transformed(const ir_node *node)
83
{
84
85
86
	return irn_visited(node);
}

87
88
ir_node *be_transform_phi(ir_node *node, const arch_register_req_t *req)
{
89
	ir_node  *block = be_transform_nodes_block(node);
90
	ir_graph *irg   = get_irn_irg(block);
91
92
93
94
	dbg_info *dbgi  = get_irn_dbg_info(node);

	/* phi nodes allow loops, so we use the old arguments for now
	 * and fix this later */
95
	ir_node **ins   = get_irn_in(node);
96
	int       arity = get_irn_arity(node);
97
	ir_mode  *mode  = req->cls->mode;
98
99
100
101
	ir_node  *phi   = new_ir_node(dbgi, irg, block, op_Phi, mode, arity, ins);
	copy_node_attr(irg, node, phi);

	backend_info_t *info = be_get_info(phi);
102
	info->in_reqs = be_allocate_in_reqs(irg, arity);
103
104
105
106
107
	for (int i = 0; i < arity; ++i) {
		info->in_reqs[i] = req;
	}

	arch_set_irn_register_req_out(phi, 0, req);
108
	be_enqueue_operands(node);
109
110
111
112

	return phi;
}

113
114
void be_set_transform_function(ir_op *op, be_transform_func func)
{
115
116
	/* Shouldn't be assigned twice. */
	assert(!op->ops.generic);
117
118
119
	op->ops.generic = (op_func) func;
}

120
121
122
123
124
void be_set_transform_proj_function(ir_op *op, be_transform_func func)
{
	op->ops.generic1 = (op_func) func;
}

125
126
127
128
129
/**
 * Transform helper for blocks.
 */
static ir_node *transform_block(ir_node *node)
{
130
	ir_node *const block = exact_copy(node);
131
132
133
	block->node_nr = node->node_nr;

	/* put the preds in the worklist */
134
	be_enqueue_operands(node);
135
136
137
138
139
140

	return block;
}

static ir_node *transform_end(ir_node *node)
{
141
142
	/* Do not transform predecessors yet to keep the pre-transform
	 * phase from visiting all the graph. */
143
144
145
	ir_node *const block   = be_transform_nodes_block(node);
	ir_node *const new_end = exact_copy(node);
	set_nodes_block(new_end, block);
146

147
	ir_graph *const irg = get_irn_irg(new_end);
148
149
	set_irg_end(irg, new_end);

150
	be_enqueue_operands(node);
151
152
153
154

	return new_end;
}

155
156
157
158
159
160
161
static ir_node *transform_proj(ir_node *node)
{
	ir_node *pred    = get_Proj_pred(node);
	ir_op   *pred_op = get_irn_op(pred);
	be_transform_func *proj_transform
		= (be_transform_func*)pred_op->ops.generic1;
	/* we should have a Proj transformer registered */
162
163
164
165
166
167
168
169
170
171
172
173
#ifdef DEBUG_libfirm
	if (!proj_transform) {
		unsigned const node_pn = get_Proj_num(node);
		if (is_Proj(pred)) {
			unsigned const pred_pn   = get_Proj_num(pred);
			ir_node *const pred_pred = get_Proj_pred(pred);
			panic("no transformer for %+F (%u) -> %+F (%u) -> %+F", node, node_pn, pred, pred_pn, pred_pred);
		} else {
			panic("no transformer for %+F (%u) -> %+F", node, node_pn, pred);
		}
	}
#endif
174
175
176
	return proj_transform(node);
}

177
178
179
180
181
182
183
184
185
186
187
static ir_node *transform_Proj_ASM(ir_node *const node)
{
  ir_node *const pred     = get_Proj_pred(node);
  ir_node *const new_pred = be_transform_node(pred);
  ir_mode *const mode     = get_irn_mode(node);
  unsigned const num      = mode == mode_M ?
    arch_get_irn_n_outs(new_pred) - 1 :
    get_Proj_num(node);
  return be_new_Proj(new_pred, num);
}

188
ir_node *be_duplicate_node(ir_node *const node)
189
{
190
191
192
193
	int       const arity = get_irn_arity(node);
	ir_node **const ins   = ALLOCAN(ir_node*, arity);
	foreach_irn_in(node, i, in) {
		ins[i] = be_transform_node(in);
194
195
	}

196
	ir_node *const block    = be_transform_nodes_block(node);
197
198
	ir_node *const new_node = new_similar_node(node, block, ins);

199
200
201
202
	new_node->node_nr = node->node_nr;
	return new_node;
}

203
204
ir_node *be_transform_node(ir_node *node)
{
205
206
207
208
	ir_node *new_node;
	if (be_is_transformed(node)) {
		new_node = (ir_node*)get_irn_link(node);
	} else {
209
#ifdef DEBUG_libfirm
210
		set_irn_link(node, NULL);
211
#endif
212
		mark_irn_visited(node);
213
214
215

		ir_op             *const op        = get_irn_op(node);
		be_transform_func *const transform = (be_transform_func*)op->ops.generic;
216
#ifdef DEBUG_libfirm
217
218
		if (!transform)
			panic("no transformer for %+F", node);
219
#endif
220

221
222
223
224
		new_node = transform(node);
		be_set_transformed_node(node, new_node);
	}
	assert(new_node);
225
226
227
	return new_node;
}

228
229
230
231
232
233
ir_node *be_transform_nodes_block(ir_node const *const node)
{
	ir_node *const block = get_nodes_block(node);
	return be_transform_node(block);
}

234
void be_enqueue_operands(ir_node *node)
235
{
236
	/* put the preds in the worklist */
237
	foreach_irn_in(node, i, pred) {
238
239
240
241
242
243
244
		pdeq_putr(env.worklist, pred);
	}
}

/**
 * Rewire nodes which are potential loops (like Phis) to avoid endless loops.
 */
245
246
static void fix_loops(ir_node *node)
{
247
	if (irn_visited_else_mark(node))
248
249
		return;

Matthias Braun's avatar
Matthias Braun committed
250
	bool changed = false;
251
252
	if (! is_Block(node)) {
		ir_node *block     = get_nodes_block(node);
253
		ir_node *new_block = (ir_node*)get_irn_link(block);
254
255
256
257

		if (new_block != NULL) {
			set_nodes_block(node, new_block);
			block = new_block;
Matthias Braun's avatar
Matthias Braun committed
258
			changed = true;
259
260
261
262
263
		}

		fix_loops(block);
	}

264
265
	foreach_irn_in(node, i, pred) {
		ir_node *in = pred;
266
		ir_node *nw = (ir_node*)get_irn_link(in);
267
268
269
270

		if (nw != NULL && nw != in) {
			set_irn_n(node, i, nw);
			in = nw;
Matthias Braun's avatar
Matthias Braun committed
271
			changed = true;
272
273
274
275
276
		}

		fix_loops(in);
	}

277
	if (changed) {
278
		identify_remember(node);
279
	}
280
281
282
283
284
}

/**
 * Transforms all nodes. Deletes the old obstack and creates a new one.
 */
285
static void transform_nodes(ir_graph *irg, arch_pretrans_nodes *pre_transform)
286
{
287
288
	inc_irg_visited(irg);

289
	env.worklist = new_pdeq();
290

291
	ir_node *const old_anchor = irg->anchor;
292
	ir_node *const new_anchor = new_r_Anchor(irg);
293
	ir_node *const old_end    = get_irg_end(irg);
294
	irg->anchor = new_anchor;
295

296
297
	/* Pre-transform all anchors (so they are available in the other transform
	 * functions) and put them into the worklist. */
298
299
	foreach_irn_in(old_anchor, i, old) {
		ir_node *const nw = be_transform_node(old);
300
		set_irn_n(new_anchor, i, nw);
301
	}
302
303

	if (pre_transform)
304
		pre_transform(irg);
305
306

	/* process worklist (this should transform all nodes in the graph) */
307
308
	while (!pdeq_empty(env.worklist)) {
		ir_node *node = (ir_node*)pdeq_getl(env.worklist);
309
310
311
		be_transform_node(node);
	}

312
	/* Fix loops. */
313
	inc_irg_visited(irg);
314
315
	foreach_irn_in_r(new_anchor, i, n) {
		fix_loops(n);
316
317
	}

318
	del_pdeq(env.worklist);
319
320
321
	free_End(old_end);
}

322
void be_transform_graph(ir_graph *irg, arch_pretrans_nodes *func)
323
324
{
	/* create a new obstack */
325
326
	struct obstack old_obst = irg->obst;
	obstack_init(&irg->obst);
327
328
	irg->last_node_idx = 0;

329
	free_vrp_data(irg);
330

331
	/* create new value table for CSE */
332
	new_identities(irg);
333
334

	/* do the main transformation */
335
	ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
336
	transform_nodes(irg, func);
337
	ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
338
339

	/* free the old obstack */
340
	obstack_free(&old_obst, 0);
341

342
	/* most analysis info is wrong after transformation */
343
	be_invalidate_live_chk(irg);
Matthias Braun's avatar
Matthias Braun committed
344
	confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_NONE);
345

346
347
348
	/* recalculate edges */
	edges_activate(irg);
}
349
350
351
352

bool be_upper_bits_clean(const ir_node *node, ir_mode *mode)
{
	ir_op *op = get_irn_op(node);
353
	if (op->ops.generic2 == NULL)
354
		return false;
355
	upper_bits_clean_func func = (upper_bits_clean_func)op->ops.generic2;
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
	return func(node, mode);
}

static bool bit_binop_upper_bits_clean(const ir_node *node, ir_mode *mode)
{
	return be_upper_bits_clean(get_binop_left(node), mode)
	    && be_upper_bits_clean(get_binop_right(node), mode);
}

static bool mux_upper_bits_clean(const ir_node *node, ir_mode *mode)
{
	return be_upper_bits_clean(get_Mux_true(node), mode)
	    && be_upper_bits_clean(get_Mux_false(node), mode);
}

static bool and_upper_bits_clean(const ir_node *node, ir_mode *mode)
{
	if (!mode_is_signed(mode)) {
		return be_upper_bits_clean(get_And_left(node), mode)
		    || be_upper_bits_clean(get_And_right(node), mode);
	} else {
		return bit_binop_upper_bits_clean(node, mode);
	}
}

static bool shr_upper_bits_clean(const ir_node *node, ir_mode *mode)
{
	if (mode_is_signed(mode)) {
		return false;
	} else {
		const ir_node *right = get_Shr_right(node);
		if (is_Const(right)) {
388
			long const val = get_Const_long(right);
389
390
391
392
393
394
395
396
397
398
399
400
401
402
			if (val >= 32 - (long)get_mode_size_bits(mode))
				return true;
		}
		return be_upper_bits_clean(get_Shr_left(node), mode);
	}
}

static bool shrs_upper_bits_clean(const ir_node *node, ir_mode *mode)
{
	return be_upper_bits_clean(get_Shrs_left(node), mode);
}

static bool const_upper_bits_clean(const ir_node *node, ir_mode *mode)
{
403
	long const val = get_Const_long(node);
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
	if (mode_is_signed(mode)) {
		long    shifted = val >> (get_mode_size_bits(mode)-1);
		return shifted == 0 || shifted == -1;
	} else {
		unsigned long shifted = (unsigned long)val;
		shifted >>= get_mode_size_bits(mode)-1;
		shifted >>= 1;
		return shifted == 0;
	}
}

static bool conv_upper_bits_clean(const ir_node *node, ir_mode *mode)
{
	ir_mode       *dest_mode = get_irn_mode(node);
	const ir_node *op        = get_Conv_op(node);
	ir_mode       *src_mode  = get_irn_mode(op);
	if (mode_is_float(src_mode))
		return true;

	unsigned src_bits  = get_mode_size_bits(src_mode);
	unsigned dest_bits = get_mode_size_bits(dest_mode);
	/* downconvs are a nop */
	if (src_bits >= dest_bits)
		return be_upper_bits_clean(op, mode);
	/* upconvs are fine if src is big enough or if sign matches */
	if (src_bits <= get_mode_size_bits(mode)
		&& mode_is_signed(src_mode) == mode_is_signed(mode))
		return true;
	return false;
}

static bool proj_upper_bits_clean(const ir_node *node, ir_mode *mode)
{
	const ir_node *pred = get_Proj_pred(node);
	switch (get_irn_opcode(pred)) {
	case iro_Load: {
		ir_mode *load_mode = get_Load_mode(pred);
		unsigned load_bits = get_mode_size_bits(load_mode);
		if (load_bits > get_mode_size_bits(mode))
			return false;
		if (mode_is_signed(load_mode) != mode_is_signed(mode))
			return false;
		return true;
	}
	default:
		break;
	}
	return false;
}

void be_set_upper_bits_clean_function(ir_op *op, upper_bits_clean_func func)
{
456
	op->ops.generic2 = (op_func)func;
457
458
459
460
461
462
}

void be_start_transform_setup(void)
{
	ir_clear_opcodes_generic_func();

463
464
465
466
467
468
	be_set_transform_function(op_Block, transform_block);
	be_set_transform_function(op_End,   transform_end);
	be_set_transform_function(op_NoMem, be_duplicate_node);
	be_set_transform_function(op_Pin,   be_duplicate_node);
	be_set_transform_function(op_Proj,  transform_proj);
	be_set_transform_function(op_Sync,  be_duplicate_node);
469

470
471
	be_set_transform_proj_function(op_ASM, transform_Proj_ASM);

472
473
474
475
476
477
478
479
480
481
	be_set_upper_bits_clean_function(op_And,   and_upper_bits_clean);
	be_set_upper_bits_clean_function(op_Const, const_upper_bits_clean);
	be_set_upper_bits_clean_function(op_Conv,  conv_upper_bits_clean);
	be_set_upper_bits_clean_function(op_Eor,   bit_binop_upper_bits_clean);
	be_set_upper_bits_clean_function(op_Mux,   mux_upper_bits_clean);
	be_set_upper_bits_clean_function(op_Or,    bit_binop_upper_bits_clean);
	be_set_upper_bits_clean_function(op_Proj,  proj_upper_bits_clean);
	be_set_upper_bits_clean_function(op_Shr,   shr_upper_bits_clean);
	be_set_upper_bits_clean_function(op_Shrs,  shrs_upper_bits_clean);
}
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539

bool be_pattern_is_rotl(ir_node const *const irn_or, ir_node **const left,
                        ir_node **const right)
{
	assert(is_Add(irn_or) || is_Or(irn_or));

	ir_mode *mode = get_irn_mode(irn_or);
	if (!mode_is_int(mode))
		return false;

	ir_node *shl = get_binop_left(irn_or);
	ir_node *shr = get_binop_right(irn_or);
	if (is_Shr(shl)) {
		if (!is_Shl(shr))
			return false;

		ir_node *tmp = shl;
		shl = shr;
		shr = tmp;
	} else if (!is_Shl(shl)) {
		return false;
	} else if (!is_Shr(shr)) {
		return false;
	}

	ir_node *x = get_Shl_left(shl);
	if (x != get_Shr_left(shr))
		return false;

	ir_node *c1 = get_Shl_right(shl);
	ir_node *c2 = get_Shr_right(shr);
	if (is_Const(c1) && is_Const(c2)) {
		ir_tarval *tv1 = get_Const_tarval(c1);
		if (!tarval_is_long(tv1))
			return false;

		ir_tarval *tv2 = get_Const_tarval(c2);
		if (!tarval_is_long(tv2))
			return false;

		if (get_tarval_long(tv1) + get_tarval_long(tv2)
		    != (long) get_mode_size_bits(mode))
			return false;

		*left  = x;
		*right = c1;
		return true;
	}

	/* Note: the obvious rot formulation (a << x) | (a >> (32-x)) gets
	 * transformed to (a << x) | (a >> -x) by transform_node_shift_modulo() */
	if (!ir_is_negated_value(c1, c2))
		return false;

	*left  = x;
	*right = c1;
	return true;
}
Matthias Braun's avatar
Matthias Braun committed
540
541
542
543
544
545
546
547
548
549
550
551
552
553

void be_map_exc_node_to_runtime_call(ir_node *node, ir_mode *res_mode,
                                     ir_entity *runtime_entity,
                                     long pn_M, long pn_X_regular,
                                     long pn_X_except, long pn_res)
{
	assert(is_memop(node));

	size_t    n_in = get_irn_arity(node)-1;
	ir_node **in   = ALLOCAN(ir_node*, n_in);
	ir_type  *mtp  = get_entity_type(runtime_entity);

	assert(get_method_n_params(mtp) == n_in);
	size_t p = 0;
554
	foreach_irn_in(node, i, n) {
Matthias Braun's avatar
Matthias Braun committed
555
556
557
558
559
560
561
562
		if (get_irn_mode(n) == mode_M)
			continue;
		in[p++] = n;
	}
	assert(p == n_in);

	ir_graph *irg   = get_irn_irg(node);
	dbg_info *dbgi  = get_irn_dbg_info(node);
563
	ir_node  *addr  = new_r_Address(irg, runtime_entity);
Matthias Braun's avatar
Matthias Braun committed
564
565
566
567
568
569
570
	ir_node  *block = get_nodes_block(node);
	ir_node  *mem   = get_memop_mem(node);
	ir_node  *call  = new_rd_Call(dbgi, block, mem, addr, n_in, in, mtp);
	set_irn_pinned(call, get_irn_pinned(node));
	int throws_exception = ir_throws_exception(node);
	ir_set_throws_exception(call, throws_exception);

571
572
573
574
	assert(pn_M < 2 && pn_res < 2 && pn_X_regular < 4 && pn_X_except < 4);
	int const         n_proj     = 4;
	int               n_operands = 2;
	ir_node   **const tuple_in   = ALLOCAN(ir_node*, n_proj);
Matthias Braun's avatar
Matthias Braun committed
575
576
577
578
	tuple_in[pn_M] = new_r_Proj(call, mode_M, pn_Call_M);
	ir_node *ress = new_r_Proj(call, mode_T, pn_Call_T_result);
	tuple_in[pn_res] = new_r_Proj(ress, res_mode, 0);
	if (throws_exception) {
579
580
581
		tuple_in[pn_X_regular]  = new_r_Proj(call, mode_X, pn_Call_X_regular);
		tuple_in[pn_X_except]   = new_r_Proj(call, mode_X, pn_Call_X_except);
		n_operands             += 2;
Matthias Braun's avatar
Matthias Braun committed
582
583
	}

584
	turn_into_tuple(node, n_operands, tuple_in);
Matthias Braun's avatar
Matthias Braun committed
585
}
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601

static ir_heights_t *heights;

/**
 * Check if a node is somehow data dependent on another one.
 * both nodes must be in the same basic block.
 * @param n1 The first node.
 * @param n2 The second node.
 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
 */
static int dependent_on(const ir_node *n1, const ir_node *n2)
{
	assert(get_nodes_block(n1) == get_nodes_block(n2));
	return heights_reachable_in_block(heights, n1, n2);
}

602
603
604
605
606
607
struct be_stack_change_t {
	ir_node  *before;
	unsigned  pos;
	ir_node  *after;
};

608
609
610
/**
 * Classical qsort() comparison function behavior:
 *
611
 *  0 if both elements are equal, no node depend on the other
612
613
 * +1 if first depends on second (first is greater)
 * -1 if second depends on first (second is greater)
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
614
 */
615
static int cmp_stack_dependency(const void *c1, const void *c2)
616
{
617
618
619
620
	be_stack_change_t const *const s1 = (be_stack_change_t const*)c1;
	be_stack_change_t const *const s2 = (be_stack_change_t const*)c2;

	/* Sort blockwise. */
621
622
623
624
625
626
	ir_node *const b1  = s1->before;
	ir_node *const b2  = s2->before;
	ir_node *const bl1 = get_nodes_block(b1);
	ir_node *const bl2 = get_nodes_block(b2);
	if (bl1 != bl2)
		return get_irn_idx(bl2) - get_irn_idx(bl1);
627
628
629
630

	/* If one change chain does not produce a new value, it must be the last. */
	ir_node *const n1 = s1->after;
	if (!n1)
631
		return 1;
632
633
634
635
636
637
638
	ir_node *const n2 = s2->after;
	if (!n2)
		return -1;

	/* If one change chain is data dependent on the other, it must come later.
	 * The after nodes cannot be dependent on each other, because they are unused.
	 * So compare after of one with before of the other. */
639
	if (dependent_on(n1, b2))
640
		return 1;
641
	if (dependent_on(n2, b1))
642
643
644
645
646
647
648
649
		return -1;

	/* The nodes have no depth order, but we need a total order because qsort()
	 * is not stable.
	 *
	 * Additionally, we need to respect transitive dependencies. Consider a
	 * Call a depending on Call b and an independent Call c.
	 * We MUST NOT order c > a and b > c. */
650
651
	unsigned h1 = get_irn_height(heights, b1);
	unsigned h2 = get_irn_height(heights, b2);
Matthias Braun's avatar
Matthias Braun committed
652
653
654
655
	if (h1 < h2)
		return 1;
	if (h1 > h2)
		return -1;
656
657
658
659
	/* Same height, so use a random (but stable) order */
	return get_irn_idx(n2) - get_irn_idx(n1);
}

660
void be_stack_init(be_stack_env_t *const env)
661
{
662
	env->changes = NEW_ARR_F(be_stack_change_t, 0);
663
664
}

665
void be_stack_record_chain(be_stack_env_t *const env, ir_node *const before, unsigned const pos, ir_node *const after)
666
{
667
	assert(!after || get_nodes_block(after) == get_nodes_block(before));
668

669
670
671
672
673
674
675
676
	be_stack_change_t const change = { before, pos, after };
	ARR_APP1(be_stack_change_t, env->changes, change);
	/* FIXME: This should be not be necessary, but not keeping the till now unused
	 * stack nodes triggers problems with out edges, because they get deactivated
	 * before be_stack_finish() is called. It should suffice to keep the last
	 * stack producer per block in be_stack_finish(). */
	if (after)
		keep_alive(after);
677
678
}

679
void be_stack_finish(be_stack_env_t *const env)
680
{
681
682
	be_stack_change_t *const changes = env->changes;
	env->changes = NULL;
683

684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
	unsigned const n_changes = ARR_LEN(changes);
	if (n_changes != 0) {
		/* Order the stack changes according to their data dependencies. */
		ir_graph *const irg = get_irn_irg(changes[0].before);
		heights = heights_new(irg);
		QSORT(changes, n_changes, cmp_stack_dependency);
		heights_free(heights);

		/* Wire the stack change chains within each block, i.e. connect before of
		 * each change to after of its predecessor. */
		ir_node *prev_block = NULL;
		for (unsigned n = n_changes; n-- != 0;) {
			be_stack_change_t const *const c     = &changes[n];
			ir_node                 *const block = get_nodes_block(c->before);
			if (block == prev_block)
				set_irn_n(c[1].before, c[1].pos, c[0].after);
			prev_block = block;
		}
	}

	DEL_ARR_F(changes);
705
706
}

707
708
void be_add_parameter_entity_stores_list(ir_graph *irg, unsigned n_entities,
                                         ir_entity **entities)
709
{
710
711
712
713
714
715
716
	if (n_entities == 0)
		return;

	ir_node *const frame       = get_irg_frame(irg);
	ir_node *const initial_mem = get_irg_initial_mem(irg);
	ir_node *const start_block = get_irg_start_block(irg);
	ir_node *const args        = get_irg_args(irg);
717
718
719

	/* all parameter entities left in the frame type require stores.
	 * (The ones passed on the stack have been moved to the arg type) */
720
721
722
723
724
725
726
	ir_node *first_store = NULL;
	ir_node *mem         = initial_mem;
	for (unsigned i = 0; i < n_entities; ++i) {
		ir_entity *const entity = entities[i];
		ir_type   *const tp     = get_entity_type(entity);
		size_t     const arg    = get_entity_parameter_number(entity);
		ir_node   *const addr   = new_r_Member(start_block, frame, entity);
727

728
		if (entity->attr.parameter.is_lowered_doubleword) {
729
730
			ir_type *mt          = get_entity_type(get_irg_entity(irg));
			ir_type *param_type0 = get_method_param_type(mt, arg);
731
732
733
734
735
736
737
738
739
740
741
742
			ir_type *param_type1 = get_method_param_type(mt, arg + 1);
			ir_mode *m0          = get_type_mode(param_type0);
			ir_mode *m1          = get_type_mode(param_type1);
			ir_node *val0        = new_r_Proj(args, m0, arg);
			ir_node *val1        = new_r_Proj(args, m1, arg + 1);
			ir_node *store0      = new_r_Store(start_block, mem, addr, val0,
			                                   tp, cons_none);
			ir_node *mem0        = new_r_Proj(store0, mode_M, pn_Store_M);
			size_t   offset      = get_mode_size_bytes(m0);
			ir_mode *mode_ref    = get_irn_mode(addr);
			ir_mode *mode_offs   = get_reference_offset_mode(mode_ref);
			ir_node *cnst        = new_r_Const_long(irg, mode_offs, offset);
743
			ir_node *next_addr   = new_r_Add(start_block, addr, cnst);
744
745
			ir_node *store1      = new_r_Store(start_block, mem0, next_addr, val1,
			                                   tp, cons_none);
746
747
748
			mem = new_r_Proj(store1, mode_M, pn_Store_M);
			if (first_store == NULL)
				first_store = store0;
749

750
		} else {
751
752
753
754
755
			ir_mode *const mode  = is_compound_type(tp) ? mode_P
			                                            : get_type_mode(tp);
			ir_node *const val   = new_r_Proj(args, mode, arg);
			ir_node *const store = new_r_Store(start_block, mem, addr, val, tp,
			                                   cons_none);
756
757
758
759
760
761
			mem = new_r_Proj(store, mode_M, pn_Store_M);
			if (first_store == NULL)
				first_store = store;
		}
	}

762
763
	edges_reroute_except(initial_mem, mem, first_store);
	set_irg_initial_mem(irg, initial_mem);
764
765
766
767
}

void be_add_parameter_entity_stores(ir_graph *irg)
{
768
769
770
771
772
	ir_type    *function_type = get_entity_type(get_irg_entity(irg));
	unsigned    n_parameters  = get_method_n_params(function_type);
	ir_entity **need_stores   = XMALLOCN(ir_entity*, n_parameters);
	unsigned    n_need_stores = 0;
	ir_type    *type          = get_irg_frame_type(irg);
773

774
775
776
777
778
779
780
781
782
783
784
785
786
787
788

	/* Assume that all parameter entities without an explicit offset set need a
	 * store. */
	for (size_t i = 0, n = get_compound_n_members(type); i < n; ++i) {
		ir_entity *entity = get_compound_member(type, i);
		if (!is_parameter_entity(entity))
			continue;
		if (get_entity_offset(entity) != INVALID_OFFSET)
			continue;

		assert(n_need_stores < n_parameters);
		need_stores[n_need_stores++] = entity;
	}
	be_add_parameter_entity_stores_list(irg, n_need_stores, need_stores);
	free(need_stores);
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
}

unsigned be_get_n_allocatable_regs(const ir_graph *irg,
                                   const arch_register_class_t *cls)
{
	unsigned *const bs = rbitset_alloca(cls->n_regs);
	be_get_allocatable_regs(irg, cls, bs);
	return rbitset_popcount(bs, cls->n_regs);
}

void be_get_allocatable_regs(ir_graph const *const irg,
                             arch_register_class_t const *const cls,
                             unsigned *const raw_bitset)
{
	be_irg_t *birg             = be_birg_from_irg(irg);
	unsigned *allocatable_regs = birg->allocatable_regs;

	rbitset_clear_all(raw_bitset, cls->n_regs);
	for (unsigned i = 0; i < cls->n_regs; ++i) {
		const arch_register_t *reg = &cls->regs[i];
		if (rbitset_is_set(allocatable_regs, reg->global_index))
			rbitset_set(raw_bitset, i);
	}
}
813
814
815
816
817
818
819
820
821
822

uint32_t be_get_tv_bits32(ir_tarval *const tv, unsigned const offset)
{
	uint32_t val;
	val  = (uint32_t)get_tarval_sub_bits(tv, offset);
	val |= (uint32_t)get_tarval_sub_bits(tv, offset + 1) <<  8;
	val |= (uint32_t)get_tarval_sub_bits(tv, offset + 2) << 16;
	val |= (uint32_t)get_tarval_sub_bits(tv, offset + 3) << 24;
	return val;
}
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849

static bool mode_needs_gp_reg(ir_mode *const mode)
{
	return get_mode_arithmetic(mode) == irma_twos_complement;
}

ir_node *be_skip_downconv(ir_node *node, bool const single_user)
{
	assert(mode_needs_gp_reg(get_irn_mode(node)));
	for (;;) {
		if (single_user && get_irn_n_edges(node) > 1) {
			/* we only want to skip the conv when we're the only user
			 * (because this test is used in the context of address-mode selection
			 *  and we don't want to use address mode for multiple users) */
			break;
		} else if (is_Conv(node)) {
			ir_node *const op       = get_Conv_op(node);
			ir_mode *const src_mode = get_irn_mode(op);
			if (!mode_needs_gp_reg(src_mode) || get_mode_size_bits(get_irn_mode(node)) > get_mode_size_bits(src_mode))
				break;
			node = op;
		} else {
			break;
		}
	}
	return node;
}
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871

ir_node *be_skip_sameconv(ir_node *node)
{
	assert(mode_needs_gp_reg(get_irn_mode(node)));
	for (;;) {
		if (get_irn_n_edges(node) > 1) {
			/* we only want to skip the conv when we're the only user
			 * (because this test is used in the context of address-mode selection
			 *  and we don't want to use address mode for multiple users) */
			break;
		} else if (is_Conv(node)) {
			ir_node *const op       = get_Conv_op(node);
			ir_mode *const src_mode = get_irn_mode(op);
			if (!mode_needs_gp_reg(src_mode) || get_mode_size_bits(get_irn_mode(node)) != get_mode_size_bits(src_mode))
				break;
			node = op;
		} else {
			break;
		}
	}
	return node;
}
872

Matthias Braun's avatar
Matthias Braun committed
873
874
bool be_match_immediate(ir_node const *const node, ir_tarval **const tarval_out,
                        ir_entity **const entity_out, unsigned *reloc_kind_out)
875
{
Matthias Braun's avatar
Matthias Braun committed
876
877
878
	unsigned         reloc_kind;
	ir_entity       *entity;
	ir_node   const *cnst;
879
	if (is_Const(node)) {
Matthias Braun's avatar
Matthias Braun committed
880
881
882
		entity     = NULL;
		cnst       = node;
		reloc_kind = 0;
883
	} else if (is_Address(node)) {
Matthias Braun's avatar
Matthias Braun committed
884
885
886
887
888
889
890
		entity     = get_Address_entity(node);
		cnst       = NULL;
		reloc_kind = 0;
	} else if (be_is_Relocation(node)) {
		entity     = be_get_Relocation_entity(node);
		cnst       = NULL;
		reloc_kind = be_get_Relocation_kind(node);
891
	} else if (is_Add(node)) {
Matthias Braun's avatar
Matthias Braun committed
892
893
894
895
896
897
898
		ir_node const *l = get_Add_left(node);
		ir_node const *r = get_Add_right(node);
		if (be_is_Relocation(r) || is_Address(r)) {
			ir_node const *tmp = l;
			l = r;
			r = tmp;
		}
899
		if (!is_Const(r))
Matthias Braun's avatar
Matthias Braun committed
900
			return false;
901
902
903
		cnst = r;
		if (is_Address(l)) {
			entity     = get_Address_entity(l);
Matthias Braun's avatar
Matthias Braun committed
904
			reloc_kind = 0;
905
906
907
		} else if (be_is_Relocation(l)) {
			entity     = be_get_Relocation_entity(l);
			reloc_kind = be_get_Relocation_kind(l);
908
909
910
911
912
913
914
		} else {
			return false;
		}
	} else {
		return false;
	}

915
916
917
	if (entity && is_tls_entity(entity))
		return false;

Matthias Braun's avatar
Matthias Braun committed
918
919
920
	*tarval_out     = cnst ? get_Const_tarval(cnst) : NULL;
	*entity_out     = entity;
	*reloc_kind_out = reloc_kind;
921
922
	return true;
}
923
924
925
926
927
928
929

ir_node *be_make_Sync(ir_node *const block, int const arity, ir_node **const ins)
{
	return
		arity == 1 ? ins[0] :
		new_r_Sync(block, arity, ins);
}