betranshlp.c 24.5 KB
Newer Older
1
2
/*
 * This file is part of libFirm.
3
 * Copyright (C) 2012 University of Karlsruhe.
4
5
6
7
8
 */

/**
 * @file
 * @brief       be transform helper extracted from the ia32 backend.
Michael Beck's avatar
Michael Beck committed
9
 * @author      Matthias Braun, Michael Beck
10
11
 * @date        14.06.2007
 */
Matthias Braun's avatar
Matthias Braun committed
12
#include "bearch.h"
13
14
#include "beirg.h"
#include "belive.h"
15
#include "benode.h"
16
#include "betranshlp.h"
17
#include "beutil.h"
18
19
20
21
22
#include "cgana.h"
#include "debug.h"
#include "execfreq_t.h"
#include "heights.h"
#include "ircons_t.h"
23
#include "iredges_t.h"
24
25
26
27
28
29
30
31
32
33
#include "irgmod.h"
#include "irgraph_t.h"
#include "irgwalk.h"
#include "irhooks.h"
#include "irnodemap.h"
#include "irnode_t.h"
#include "irop_t.h"
#include "iropt_t.h"
#include "irouts.h"
#include "irtools.h"
34
#include "panic.h"
35
#include "pdeq.h"
36
#include "util.h"
37
#include "vrp.h"
38
39

typedef struct be_transform_env_t {
40
	pdeq *worklist;  /**< worklist of nodes that still need to be transformed */
41
42
43
44
} be_transform_env_t;

static be_transform_env_t env;

45
46
void be_set_transformed_node(ir_node *old_node, ir_node *new_node)
{
47
	set_irn_link(old_node, new_node);
48
	mark_irn_visited(old_node);
49
50
}

Matthias Braun's avatar
Matthias Braun committed
51
bool be_is_transformed(const ir_node *node)
52
{
53
54
55
	return irn_visited(node);
}

56
57
ir_node *be_transform_phi(ir_node *node, const arch_register_req_t *req)
{
58
	ir_node  *block = be_transform_nodes_block(node);
59
	ir_graph *irg   = get_irn_irg(block);
60
61
62
63
64
65
	dbg_info *dbgi  = get_irn_dbg_info(node);

	/* phi nodes allow loops, so we use the old arguments for now
	 * and fix this later */
	ir_node **ins   = get_irn_in(node)+1;
	int       arity = get_irn_arity(node);
66
	ir_mode  *mode  = req->cls->mode;
67
68
69
70
	ir_node  *phi   = new_ir_node(dbgi, irg, block, op_Phi, mode, arity, ins);
	copy_node_attr(irg, node, phi);

	backend_info_t *info = be_get_info(phi);
71
	info->in_reqs = be_allocate_in_reqs(irg, arity);
72
73
74
75
76
77
78
79
80
81
	for (int i = 0; i < arity; ++i) {
		info->in_reqs[i] = req;
	}

	arch_set_irn_register_req_out(phi, 0, req);
	be_enqueue_preds(node);

	return phi;
}

82
83
void be_set_transform_function(ir_op *op, be_transform_func func)
{
84
85
	/* Shouldn't be assigned twice. */
	assert(!op->ops.generic);
86
87
88
	op->ops.generic = (op_func) func;
}

89
90
91
92
93
void be_set_transform_proj_function(ir_op *op, be_transform_func func)
{
	op->ops.generic1 = (op_func) func;
}

94
95
96
97
98
/**
 * Transform helper for blocks.
 */
static ir_node *transform_block(ir_node *node)
{
99
	ir_node *const block = exact_copy(node);
100
101
102
103
104
105
106
107
108
109
	block->node_nr = node->node_nr;

	/* put the preds in the worklist */
	be_enqueue_preds(node);

	return block;
}

static ir_node *transform_end(ir_node *node)
{
110
111
	/* Do not transform predecessors yet to keep the pre-transform
	 * phase from visiting all the graph. */
112
113
114
	ir_node *const block   = be_transform_nodes_block(node);
	ir_node *const new_end = exact_copy(node);
	set_nodes_block(new_end, block);
115

116
	ir_graph *const irg = get_irn_irg(new_end);
117
118
119
120
121
122
123
	set_irg_end(irg, new_end);

	be_enqueue_preds(node);

	return new_end;
}

124
125
126
127
128
129
130
static ir_node *transform_proj(ir_node *node)
{
	ir_node *pred    = get_Proj_pred(node);
	ir_op   *pred_op = get_irn_op(pred);
	be_transform_func *proj_transform
		= (be_transform_func*)pred_op->ops.generic1;
	/* we should have a Proj transformer registered */
131
132
133
134
135
136
137
138
139
140
141
142
#ifdef DEBUG_libfirm
	if (!proj_transform) {
		unsigned const node_pn = get_Proj_num(node);
		if (is_Proj(pred)) {
			unsigned const pred_pn   = get_Proj_num(pred);
			ir_node *const pred_pred = get_Proj_pred(pred);
			panic("no transformer for %+F (%u) -> %+F (%u) -> %+F", node, node_pn, pred, pred_pn, pred_pred);
		} else {
			panic("no transformer for %+F (%u) -> %+F", node, node_pn, pred);
		}
	}
#endif
143
144
145
	return proj_transform(node);
}

146
147
148
149
150
151
152
153
154
155
156
static ir_node *transform_Proj_ASM(ir_node *const node)
{
  ir_node *const pred     = get_Proj_pred(node);
  ir_node *const new_pred = be_transform_node(pred);
  ir_mode *const mode     = get_irn_mode(node);
  unsigned const num      = mode == mode_M ?
    arch_get_irn_n_outs(new_pred) - 1 :
    get_Proj_num(node);
  return be_new_Proj(new_pred, num);
}

157
ir_node *be_duplicate_node(ir_node *const node)
158
{
159
160
161
162
	int       const arity = get_irn_arity(node);
	ir_node **const ins   = ALLOCAN(ir_node*, arity);
	foreach_irn_in(node, i, in) {
		ins[i] = be_transform_node(in);
163
164
	}

165
	ir_node *const block    = be_transform_nodes_block(node);
166
167
	ir_node *const new_node = new_similar_node(node, block, ins);

168
169
170
171
	new_node->node_nr = node->node_nr;
	return new_node;
}

172
173
ir_node *be_transform_node(ir_node *node)
{
174
175
176
177
	ir_node *new_node;
	if (be_is_transformed(node)) {
		new_node = (ir_node*)get_irn_link(node);
	} else {
178
179
180
181
182
#ifdef DEBUG_libfirm
		be_set_transformed_node(node, NULL);
#else
		mark_irn_visited(node);
#endif
183
184
185

		ir_op             *const op        = get_irn_op(node);
		be_transform_func *const transform = (be_transform_func*)op->ops.generic;
186
#ifdef DEBUG_libfirm
187
188
		if (!transform)
			panic("no transformer for %+F", node);
189
#endif
190

191
192
193
194
		new_node = transform(node);
		be_set_transformed_node(node, new_node);
	}
	assert(new_node);
195
196
197
	return new_node;
}

198
199
200
201
202
203
ir_node *be_transform_nodes_block(ir_node const *const node)
{
	ir_node *const block = get_nodes_block(node);
	return be_transform_node(block);
}

204
205
void be_enqueue_preds(ir_node *node)
{
206
	/* put the preds in the worklist */
207
	foreach_irn_in(node, i, pred) {
208
209
210
211
212
213
214
		pdeq_putr(env.worklist, pred);
	}
}

/**
 * Rewire nodes which are potential loops (like Phis) to avoid endless loops.
 */
215
216
static void fix_loops(ir_node *node)
{
217
	if (irn_visited_else_mark(node))
218
219
		return;

Matthias Braun's avatar
Matthias Braun committed
220
	bool changed = false;
221
222
	if (! is_Block(node)) {
		ir_node *block     = get_nodes_block(node);
223
		ir_node *new_block = (ir_node*)get_irn_link(block);
224
225
226
227

		if (new_block != NULL) {
			set_nodes_block(node, new_block);
			block = new_block;
Matthias Braun's avatar
Matthias Braun committed
228
			changed = true;
229
230
231
232
233
		}

		fix_loops(block);
	}

234
235
	foreach_irn_in(node, i, pred) {
		ir_node *in = pred;
236
		ir_node *nw = (ir_node*)get_irn_link(in);
237
238
239
240

		if (nw != NULL && nw != in) {
			set_irn_n(node, i, nw);
			in = nw;
Matthias Braun's avatar
Matthias Braun committed
241
			changed = true;
242
243
244
245
246
		}

		fix_loops(in);
	}

247
	if (changed) {
248
		identify_remember(node);
249
	}
250
251
252
253
254
}

/**
 * Transforms all nodes. Deletes the old obstack and creates a new one.
 */
255
static void transform_nodes(ir_graph *irg, arch_pretrans_nodes *pre_transform)
256
{
257
258
	inc_irg_visited(irg);

259
	env.worklist = new_pdeq();
260

261
	ir_node *const old_anchor = irg->anchor;
262
	ir_node *const new_anchor = new_r_Anchor(irg);
263
	ir_node *const old_end    = get_irg_end(irg);
264
	irg->anchor = new_anchor;
265

266
267
	/* Pre-transform all anchors (so they are available in the other transform
	 * functions) and put them into the worklist. */
268
269
	foreach_irn_in(old_anchor, i, old) {
		ir_node *const nw = be_transform_node(old);
270
		set_irn_n(new_anchor, i, nw);
271
	}
272
273

	if (pre_transform)
274
		pre_transform(irg);
275
276

	/* process worklist (this should transform all nodes in the graph) */
277
278
	while (!pdeq_empty(env.worklist)) {
		ir_node *node = (ir_node*)pdeq_getl(env.worklist);
279
280
281
		be_transform_node(node);
	}

282
	/* Fix loops. */
283
	inc_irg_visited(irg);
284
285
	foreach_irn_in_r(new_anchor, i, n) {
		fix_loops(n);
286
287
	}

288
	del_pdeq(env.worklist);
289
290
291
	free_End(old_end);
}

292
void be_transform_graph(ir_graph *irg, arch_pretrans_nodes *func)
293
294
{
	/* create a new obstack */
295
296
	struct obstack old_obst = irg->obst;
	obstack_init(&irg->obst);
297
298
	irg->last_node_idx = 0;

299
	free_vrp_data(irg);
300

301
	/* create new value table for CSE */
302
	new_identities(irg);
303
304

	/* do the main transformation */
305
	ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
306
	transform_nodes(irg, func);
307
	ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
308
309

	/* free the old obstack */
310
	obstack_free(&old_obst, 0);
311

312
	/* most analysis info is wrong after transformation */
313
	be_invalidate_live_chk(irg);
Matthias Braun's avatar
Matthias Braun committed
314
	confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_NONE);
315

316
317
318
	/* recalculate edges */
	edges_activate(irg);
}
319
320
321
322

bool be_upper_bits_clean(const ir_node *node, ir_mode *mode)
{
	ir_op *op = get_irn_op(node);
323
	if (op->ops.generic2 == NULL)
324
		return false;
325
	upper_bits_clean_func func = (upper_bits_clean_func)op->ops.generic2;
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
	return func(node, mode);
}

static bool bit_binop_upper_bits_clean(const ir_node *node, ir_mode *mode)
{
	return be_upper_bits_clean(get_binop_left(node), mode)
	    && be_upper_bits_clean(get_binop_right(node), mode);
}

static bool mux_upper_bits_clean(const ir_node *node, ir_mode *mode)
{
	return be_upper_bits_clean(get_Mux_true(node), mode)
	    && be_upper_bits_clean(get_Mux_false(node), mode);
}

static bool and_upper_bits_clean(const ir_node *node, ir_mode *mode)
{
	if (!mode_is_signed(mode)) {
		return be_upper_bits_clean(get_And_left(node), mode)
		    || be_upper_bits_clean(get_And_right(node), mode);
	} else {
		return bit_binop_upper_bits_clean(node, mode);
	}
}

static bool shr_upper_bits_clean(const ir_node *node, ir_mode *mode)
{
	if (mode_is_signed(mode)) {
		return false;
	} else {
		const ir_node *right = get_Shr_right(node);
		if (is_Const(right)) {
358
			long const val = get_Const_long(right);
359
360
361
362
363
364
365
366
367
368
369
370
371
372
			if (val >= 32 - (long)get_mode_size_bits(mode))
				return true;
		}
		return be_upper_bits_clean(get_Shr_left(node), mode);
	}
}

static bool shrs_upper_bits_clean(const ir_node *node, ir_mode *mode)
{
	return be_upper_bits_clean(get_Shrs_left(node), mode);
}

static bool const_upper_bits_clean(const ir_node *node, ir_mode *mode)
{
373
	long const val = get_Const_long(node);
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
	if (mode_is_signed(mode)) {
		long    shifted = val >> (get_mode_size_bits(mode)-1);
		return shifted == 0 || shifted == -1;
	} else {
		unsigned long shifted = (unsigned long)val;
		shifted >>= get_mode_size_bits(mode)-1;
		shifted >>= 1;
		return shifted == 0;
	}
}

static bool conv_upper_bits_clean(const ir_node *node, ir_mode *mode)
{
	ir_mode       *dest_mode = get_irn_mode(node);
	const ir_node *op        = get_Conv_op(node);
	ir_mode       *src_mode  = get_irn_mode(op);
	if (mode_is_float(src_mode))
		return true;

	unsigned src_bits  = get_mode_size_bits(src_mode);
	unsigned dest_bits = get_mode_size_bits(dest_mode);
	/* downconvs are a nop */
	if (src_bits >= dest_bits)
		return be_upper_bits_clean(op, mode);
	/* upconvs are fine if src is big enough or if sign matches */
	if (src_bits <= get_mode_size_bits(mode)
		&& mode_is_signed(src_mode) == mode_is_signed(mode))
		return true;
	return false;
}

static bool proj_upper_bits_clean(const ir_node *node, ir_mode *mode)
{
	const ir_node *pred = get_Proj_pred(node);
	switch (get_irn_opcode(pred)) {
	case iro_Load: {
		ir_mode *load_mode = get_Load_mode(pred);
		unsigned load_bits = get_mode_size_bits(load_mode);
		if (load_bits > get_mode_size_bits(mode))
			return false;
		if (mode_is_signed(load_mode) != mode_is_signed(mode))
			return false;
		return true;
	}
	default:
		break;
	}
	return false;
}

void be_set_upper_bits_clean_function(ir_op *op, upper_bits_clean_func func)
{
426
	op->ops.generic2 = (op_func)func;
427
428
429
430
431
432
}

void be_start_transform_setup(void)
{
	ir_clear_opcodes_generic_func();

433
434
435
436
437
438
	be_set_transform_function(op_Block, transform_block);
	be_set_transform_function(op_End,   transform_end);
	be_set_transform_function(op_NoMem, be_duplicate_node);
	be_set_transform_function(op_Pin,   be_duplicate_node);
	be_set_transform_function(op_Proj,  transform_proj);
	be_set_transform_function(op_Sync,  be_duplicate_node);
439

440
441
	be_set_transform_proj_function(op_ASM, transform_Proj_ASM);

442
443
444
445
446
447
448
449
450
451
	be_set_upper_bits_clean_function(op_And,   and_upper_bits_clean);
	be_set_upper_bits_clean_function(op_Const, const_upper_bits_clean);
	be_set_upper_bits_clean_function(op_Conv,  conv_upper_bits_clean);
	be_set_upper_bits_clean_function(op_Eor,   bit_binop_upper_bits_clean);
	be_set_upper_bits_clean_function(op_Mux,   mux_upper_bits_clean);
	be_set_upper_bits_clean_function(op_Or,    bit_binop_upper_bits_clean);
	be_set_upper_bits_clean_function(op_Proj,  proj_upper_bits_clean);
	be_set_upper_bits_clean_function(op_Shr,   shr_upper_bits_clean);
	be_set_upper_bits_clean_function(op_Shrs,  shrs_upper_bits_clean);
}
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509

bool be_pattern_is_rotl(ir_node const *const irn_or, ir_node **const left,
                        ir_node **const right)
{
	assert(is_Add(irn_or) || is_Or(irn_or));

	ir_mode *mode = get_irn_mode(irn_or);
	if (!mode_is_int(mode))
		return false;

	ir_node *shl = get_binop_left(irn_or);
	ir_node *shr = get_binop_right(irn_or);
	if (is_Shr(shl)) {
		if (!is_Shl(shr))
			return false;

		ir_node *tmp = shl;
		shl = shr;
		shr = tmp;
	} else if (!is_Shl(shl)) {
		return false;
	} else if (!is_Shr(shr)) {
		return false;
	}

	ir_node *x = get_Shl_left(shl);
	if (x != get_Shr_left(shr))
		return false;

	ir_node *c1 = get_Shl_right(shl);
	ir_node *c2 = get_Shr_right(shr);
	if (is_Const(c1) && is_Const(c2)) {
		ir_tarval *tv1 = get_Const_tarval(c1);
		if (!tarval_is_long(tv1))
			return false;

		ir_tarval *tv2 = get_Const_tarval(c2);
		if (!tarval_is_long(tv2))
			return false;

		if (get_tarval_long(tv1) + get_tarval_long(tv2)
		    != (long) get_mode_size_bits(mode))
			return false;

		*left  = x;
		*right = c1;
		return true;
	}

	/* Note: the obvious rot formulation (a << x) | (a >> (32-x)) gets
	 * transformed to (a << x) | (a >> -x) by transform_node_shift_modulo() */
	if (!ir_is_negated_value(c1, c2))
		return false;

	*left  = x;
	*right = c1;
	return true;
}
Matthias Braun's avatar
Matthias Braun committed
510
511
512
513
514
515
516
517
518
519
520
521
522
523

void be_map_exc_node_to_runtime_call(ir_node *node, ir_mode *res_mode,
                                     ir_entity *runtime_entity,
                                     long pn_M, long pn_X_regular,
                                     long pn_X_except, long pn_res)
{
	assert(is_memop(node));

	size_t    n_in = get_irn_arity(node)-1;
	ir_node **in   = ALLOCAN(ir_node*, n_in);
	ir_type  *mtp  = get_entity_type(runtime_entity);

	assert(get_method_n_params(mtp) == n_in);
	size_t p = 0;
524
	foreach_irn_in(node, i, n) {
Matthias Braun's avatar
Matthias Braun committed
525
526
527
528
529
530
531
532
		if (get_irn_mode(n) == mode_M)
			continue;
		in[p++] = n;
	}
	assert(p == n_in);

	ir_graph *irg   = get_irn_irg(node);
	dbg_info *dbgi  = get_irn_dbg_info(node);
533
	ir_node  *addr  = new_r_Address(irg, runtime_entity);
Matthias Braun's avatar
Matthias Braun committed
534
535
536
537
538
539
540
	ir_node  *block = get_nodes_block(node);
	ir_node  *mem   = get_memop_mem(node);
	ir_node  *call  = new_rd_Call(dbgi, block, mem, addr, n_in, in, mtp);
	set_irn_pinned(call, get_irn_pinned(node));
	int throws_exception = ir_throws_exception(node);
	ir_set_throws_exception(call, throws_exception);

541
542
543
544
	assert(pn_M < 2 && pn_res < 2 && pn_X_regular < 4 && pn_X_except < 4);
	int const         n_proj     = 4;
	int               n_operands = 2;
	ir_node   **const tuple_in   = ALLOCAN(ir_node*, n_proj);
Matthias Braun's avatar
Matthias Braun committed
545
546
547
548
	tuple_in[pn_M] = new_r_Proj(call, mode_M, pn_Call_M);
	ir_node *ress = new_r_Proj(call, mode_T, pn_Call_T_result);
	tuple_in[pn_res] = new_r_Proj(ress, res_mode, 0);
	if (throws_exception) {
549
550
551
		tuple_in[pn_X_regular]  = new_r_Proj(call, mode_X, pn_Call_X_regular);
		tuple_in[pn_X_except]   = new_r_Proj(call, mode_X, pn_Call_X_except);
		n_operands             += 2;
Matthias Braun's avatar
Matthias Braun committed
552
553
	}

554
	turn_into_tuple(node, n_operands, tuple_in);
Matthias Braun's avatar
Matthias Braun committed
555
}
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571

static ir_heights_t *heights;

/**
 * Check if a node is somehow data dependent on another one.
 * both nodes must be in the same basic block.
 * @param n1 The first node.
 * @param n2 The second node.
 * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
 */
static int dependent_on(const ir_node *n1, const ir_node *n2)
{
	assert(get_nodes_block(n1) == get_nodes_block(n2));
	return heights_reachable_in_block(heights, n1, n2);
}

572
573
574
575
576
577
struct be_stack_change_t {
	ir_node  *before;
	unsigned  pos;
	ir_node  *after;
};

578
579
580
/**
 * Classical qsort() comparison function behavior:
 *
581
 *  0 if both elements are equal, no node depend on the other
582
583
 * +1 if first depends on second (first is greater)
 * -1 if second depends on first (second is greater)
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
584
 */
585
static int cmp_stack_dependency(const void *c1, const void *c2)
586
{
587
588
589
590
	be_stack_change_t const *const s1 = (be_stack_change_t const*)c1;
	be_stack_change_t const *const s2 = (be_stack_change_t const*)c2;

	/* Sort blockwise. */
591
592
593
594
595
596
	ir_node *const b1  = s1->before;
	ir_node *const b2  = s2->before;
	ir_node *const bl1 = get_nodes_block(b1);
	ir_node *const bl2 = get_nodes_block(b2);
	if (bl1 != bl2)
		return get_irn_idx(bl2) - get_irn_idx(bl1);
597
598
599
600

	/* If one change chain does not produce a new value, it must be the last. */
	ir_node *const n1 = s1->after;
	if (!n1)
601
		return 1;
602
603
604
605
606
607
608
	ir_node *const n2 = s2->after;
	if (!n2)
		return -1;

	/* If one change chain is data dependent on the other, it must come later.
	 * The after nodes cannot be dependent on each other, because they are unused.
	 * So compare after of one with before of the other. */
609
	if (dependent_on(n1, b2))
610
		return 1;
611
	if (dependent_on(n2, b1))
612
613
614
615
616
617
618
619
		return -1;

	/* The nodes have no depth order, but we need a total order because qsort()
	 * is not stable.
	 *
	 * Additionally, we need to respect transitive dependencies. Consider a
	 * Call a depending on Call b and an independent Call c.
	 * We MUST NOT order c > a and b > c. */
620
621
	unsigned h1 = get_irn_height(heights, b1);
	unsigned h2 = get_irn_height(heights, b2);
Matthias Braun's avatar
Matthias Braun committed
622
623
624
625
	if (h1 < h2)
		return 1;
	if (h1 > h2)
		return -1;
626
627
628
629
	/* Same height, so use a random (but stable) order */
	return get_irn_idx(n2) - get_irn_idx(n1);
}

630
void be_stack_init(be_stack_env_t *const env)
631
{
632
	env->changes = NEW_ARR_F(be_stack_change_t, 0);
633
634
}

635
void be_stack_record_chain(be_stack_env_t *const env, ir_node *const before, unsigned const pos, ir_node *const after)
636
{
637
	assert(!after || get_nodes_block(after) == get_nodes_block(before));
638

639
640
641
642
643
644
645
646
	be_stack_change_t const change = { before, pos, after };
	ARR_APP1(be_stack_change_t, env->changes, change);
	/* FIXME: This should be not be necessary, but not keeping the till now unused
	 * stack nodes triggers problems with out edges, because they get deactivated
	 * before be_stack_finish() is called. It should suffice to keep the last
	 * stack producer per block in be_stack_finish(). */
	if (after)
		keep_alive(after);
647
648
}

649
void be_stack_finish(be_stack_env_t *const env)
650
{
651
652
	be_stack_change_t *const changes = env->changes;
	env->changes = NULL;
653

654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
	unsigned const n_changes = ARR_LEN(changes);
	if (n_changes != 0) {
		/* Order the stack changes according to their data dependencies. */
		ir_graph *const irg = get_irn_irg(changes[0].before);
		heights = heights_new(irg);
		QSORT(changes, n_changes, cmp_stack_dependency);
		heights_free(heights);

		/* Wire the stack change chains within each block, i.e. connect before of
		 * each change to after of its predecessor. */
		ir_node *prev_block = NULL;
		for (unsigned n = n_changes; n-- != 0;) {
			be_stack_change_t const *const c     = &changes[n];
			ir_node                 *const block = get_nodes_block(c->before);
			if (block == prev_block)
				set_irn_n(c[1].before, c[1].pos, c[0].after);
			prev_block = block;
		}
	}

	DEL_ARR_F(changes);
675
676
677
678
679
680
681
682
683
684
685
686
687
}

static void create_stores_for_type(ir_graph *irg, ir_type *type)
{
	ir_node *frame       = get_irg_frame(irg);
	ir_node *initial_mem = get_irg_initial_mem(irg);
	ir_node *mem         = initial_mem;
	ir_node *first_store = NULL;
	ir_node *start_block = get_irg_start_block(irg);
	ir_node *args        = get_irg_args(irg);

	/* all parameter entities left in the frame type require stores.
	 * (The ones passed on the stack have been moved to the arg type) */
Matthias Braun's avatar
Matthias Braun committed
688
	for (size_t i = 0, n = get_compound_n_members(type); i < n; ++i) {
689
690
691
692
693
		ir_entity *entity = get_compound_member(type, i);
		ir_type   *tp     = get_entity_type(entity);
		if (!is_parameter_entity(entity))
			continue;

Matthias Braun's avatar
Matthias Braun committed
694
		size_t arg = get_entity_parameter_number(entity);
695
696
697
		if (arg == IR_VA_START_PARAMETER_NUMBER)
			continue;

Matthias Braun's avatar
Matthias Braun committed
698
		ir_node *addr = new_r_Member(start_block, frame, entity);
699
700
701
702
703
704
705
706
707
		if (entity->attr.parameter.doubleword_low_mode != NULL) {
			ir_mode *mode      = entity->attr.parameter.doubleword_low_mode;
			ir_node *val0      = new_r_Proj(args, mode, arg);
			ir_node *val1      = new_r_Proj(args, mode, arg+1);
			ir_node *store0    = new_r_Store(start_block, mem, addr, val0,
			                                 tp, cons_none);
			ir_node *mem0      = new_r_Proj(store0, mode_M, pn_Store_M);
			size_t   offset    = get_mode_size_bits(mode)/8;
			ir_mode *mode_ref  = get_irn_mode(addr);
708
			ir_mode *mode_offs = get_reference_offset_mode(mode_ref);
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
			ir_node *cnst      = new_r_Const_long(irg, mode_offs, offset);
			ir_node *next_addr = new_r_Add(start_block, addr, cnst, mode_ref);
			ir_node *store1    = new_r_Store(start_block, mem0, next_addr, val1,
			                                 tp, cons_none);
			mem = new_r_Proj(store1, mode_M, pn_Store_M);
			if (first_store == NULL)
				first_store = store0;
		} else {
			ir_mode *mode  = is_compound_type(tp) ? mode_P : get_type_mode(tp);
			ir_node *val   = new_r_Proj(args, mode, arg);
			ir_node *store = new_r_Store(start_block, mem, addr, val, tp, cons_none);
			mem = new_r_Proj(store, mode_M, pn_Store_M);
			if (first_store == NULL)
				first_store = store;
		}
	}

	if (mem != initial_mem) {
		edges_reroute_except(initial_mem, mem, first_store);
		set_irg_initial_mem(irg, initial_mem);
	}
}

void be_add_parameter_entity_stores(ir_graph *irg)
{
	ir_type           *frame_type   = get_irg_frame_type(irg);
	be_stack_layout_t *layout       = be_get_irg_stack_layout(irg);
	ir_type           *between_type = layout->between_type;

	create_stores_for_type(irg, frame_type);
Matthias Braun's avatar
Matthias Braun committed
739
	if (between_type != NULL)
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
		create_stores_for_type(irg, between_type);
}

unsigned be_get_n_allocatable_regs(const ir_graph *irg,
                                   const arch_register_class_t *cls)
{
	unsigned *const bs = rbitset_alloca(cls->n_regs);
	be_get_allocatable_regs(irg, cls, bs);
	return rbitset_popcount(bs, cls->n_regs);
}

void be_get_allocatable_regs(ir_graph const *const irg,
                             arch_register_class_t const *const cls,
                             unsigned *const raw_bitset)
{
	be_irg_t *birg             = be_birg_from_irg(irg);
	unsigned *allocatable_regs = birg->allocatable_regs;

	rbitset_clear_all(raw_bitset, cls->n_regs);
	for (unsigned i = 0; i < cls->n_regs; ++i) {
		const arch_register_t *reg = &cls->regs[i];
		if (rbitset_is_set(allocatable_regs, reg->global_index))
			rbitset_set(raw_bitset, i);
	}
}
765
766
767
768
769
770
771
772
773
774

uint32_t be_get_tv_bits32(ir_tarval *const tv, unsigned const offset)
{
	uint32_t val;
	val  = (uint32_t)get_tarval_sub_bits(tv, offset);
	val |= (uint32_t)get_tarval_sub_bits(tv, offset + 1) <<  8;
	val |= (uint32_t)get_tarval_sub_bits(tv, offset + 2) << 16;
	val |= (uint32_t)get_tarval_sub_bits(tv, offset + 3) << 24;
	return val;
}
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801

static bool mode_needs_gp_reg(ir_mode *const mode)
{
	return get_mode_arithmetic(mode) == irma_twos_complement;
}

ir_node *be_skip_downconv(ir_node *node, bool const single_user)
{
	assert(mode_needs_gp_reg(get_irn_mode(node)));
	for (;;) {
		if (single_user && get_irn_n_edges(node) > 1) {
			/* we only want to skip the conv when we're the only user
			 * (because this test is used in the context of address-mode selection
			 *  and we don't want to use address mode for multiple users) */
			break;
		} else if (is_Conv(node)) {
			ir_node *const op       = get_Conv_op(node);
			ir_mode *const src_mode = get_irn_mode(op);
			if (!mode_needs_gp_reg(src_mode) || get_mode_size_bits(get_irn_mode(node)) > get_mode_size_bits(src_mode))
				break;
			node = op;
		} else {
			break;
		}
	}
	return node;
}
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823

ir_node *be_skip_sameconv(ir_node *node)
{
	assert(mode_needs_gp_reg(get_irn_mode(node)));
	for (;;) {
		if (get_irn_n_edges(node) > 1) {
			/* we only want to skip the conv when we're the only user
			 * (because this test is used in the context of address-mode selection
			 *  and we don't want to use address mode for multiple users) */
			break;
		} else if (is_Conv(node)) {
			ir_node *const op       = get_Conv_op(node);
			ir_mode *const src_mode = get_irn_mode(op);
			if (!mode_needs_gp_reg(src_mode) || get_mode_size_bits(get_irn_mode(node)) != get_mode_size_bits(src_mode))
				break;
			node = op;
		} else {
			break;
		}
	}
	return node;
}
824

Matthias Braun's avatar
Matthias Braun committed
825
826
bool be_match_immediate(ir_node const *const node, ir_tarval **const tarval_out,
                        ir_entity **const entity_out, unsigned *reloc_kind_out)
827
{
Matthias Braun's avatar
Matthias Braun committed
828
829
830
	unsigned         reloc_kind;
	ir_entity       *entity;
	ir_node   const *cnst;
831
	if (is_Const(node)) {
Matthias Braun's avatar
Matthias Braun committed
832
833
834
		entity     = NULL;
		cnst       = node;
		reloc_kind = 0;
835
	} else if (is_Address(node)) {
Matthias Braun's avatar
Matthias Braun committed
836
837
838
839
840
841
842
		entity     = get_Address_entity(node);
		cnst       = NULL;
		reloc_kind = 0;
	} else if (be_is_Relocation(node)) {
		entity     = be_get_Relocation_entity(node);
		cnst       = NULL;
		reloc_kind = be_get_Relocation_kind(node);
843
	} else if (is_Add(node)) {
Matthias Braun's avatar
Matthias Braun committed
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
		ir_node const *l = get_Add_left(node);
		ir_node const *r = get_Add_right(node);
		if (be_is_Relocation(r) || is_Address(r)) {
			ir_node const *tmp = l;
			l = r;
			r = tmp;
		}
		if (!is_Const(l))
			return false;
		cnst = l;
		if (is_Address(r)) {
			entity     = get_Address_entity(node);
			reloc_kind = 0;
		} else if (be_is_Relocation(r)) {
			entity     = get_Address_entity(node);
			reloc_kind = be_get_Relocation_kind(node);
860
861
862
863
864
865
866
		} else {
			return false;
		}
	} else {
		return false;
	}

867
868
869
	if (entity && is_tls_entity(entity))
		return false;

Matthias Braun's avatar
Matthias Braun committed
870
871
872
	*tarval_out     = cnst ? get_Const_tarval(cnst) : NULL;
	*entity_out     = entity;
	*reloc_kind_out = reloc_kind;
873
874
	return true;
}
875
876
877
878
879
880
881

ir_node *be_make_Sync(ir_node *const block, int const arity, ir_node **const ins)
{
	return
		arity == 1 ? ins[0] :
		new_r_Sync(block, arity, ins);
}