opt_ldst.c 47.6 KB
Newer Older
1
2
/*
 * This file is part of libFirm.
3
 * Copyright (C) 2012 University of Karlsruhe.
4
5
6
7
8
9
10
11
 */

/**
 * @file
 * @brief   Dataflow driven Load/Store optimizations, uses some ideas from
 *          VanDrunen's LEPRE
 * @author  Michael Beck
 */
Christoph Mallon's avatar
Christoph Mallon committed
12

Matthias Braun's avatar
Matthias Braun committed
13
#include "util.h"
14
15
#include "irnode_t.h"
#include "irflag_t.h"
16
#include "array.h"
17
18
19
20
#include "ircons.h"
#include "irdom.h"
#include "irgmod.h"
#include "irgwalk.h"
21
#include "irouts_t.h"
22
#include "irgopt.h"
Michael Beck's avatar
Michael Beck committed
23
24
#include "iropt.h"
#include "iroptimize.h"
25
#include "irnodehashmap.h"
26
27
#include "raw_bitset.h"
#include "debug.h"
Matthias Braun's avatar
Matthias Braun committed
28
#include "panic.h"
29
#include "type_t.h"
30

31
/* maximum number of output Proj's */
32
#define MAX_PROJ MAX((unsigned)pn_Load_max, (unsigned)pn_Store_max)
33

34
/**
35
 * Mapping an address to an dense ID.
36
37
38
39
40
41
42
43
44
 */
typedef struct address_entry_t {
	unsigned id;          /**< The ID */
} address_entry;

/**
 * Memop-flags.
 */
enum memop_flags {
45
46
47
48
	FLAG_KILL_ALL    = 1, /**< KILL all addresses */
	FLAG_KILLED_NODE = 2, /**< this node was killed */
	FLAG_EXCEPTION   = 4, /**< this node has exception flow */
	FLAG_IGNORE      = 8, /**< ignore this node (volatile or other) */
49
50
51
52
53
54
55
56
57
58
59
};

/**
 * A value: This represents a value stored at a given address in
 * memory. Do not confuse with values from value numbering.
 */
typedef struct value_t value_t;
struct value_t {
	ir_node  *address;    /**< the address of this value */
	ir_node  *value;      /**< the value itself */
	ir_mode  *mode;       /**< the mode of the value */
60
	ir_type  *type;       /**< the type of the value */
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
	unsigned id;          /**< address id */
};

/**
 * A memop describes an memory-related operation.
 * These are Loads/Store and all other ops that might modify
 * memory (Calls, CopyB) or causing exceptions.
 */
typedef struct memop_t memop_t;
struct memop_t {
	value_t  value;      /**< the value of this memop: only defined for Load/Store */
	ir_node  *node;      /**< the memory op itself */
	ir_node  *mem;       /**< the memory FROM this node */
	ir_node  *replace;   /**< the replacement node if this memop is replaced */
	memop_t  *next;      /**< links to the next memory op in the block in forward order. */
	memop_t  *prev;      /**< links to the previous memory op in the block in forward order. */
	unsigned flags;      /**< memop flags */
78
	ir_node  *projs[MAX_PROJ+1]; /**< Projs of this memory op */
79
80
81
82
83
84
85
86
87
88
89
90
};

/**
 * Additional data for every basic block.
 */
typedef struct block_t block_t;
struct block_t {
	memop_t  *memop_forward;     /**< topologically sorted list of memory ops in this block */
	memop_t  *memop_backward;    /**< last memop in the list */
	unsigned *avail_out;         /**< out-set of available addresses */
	memop_t  **id_2_memop_avail; /**< maps avail address ids to memops */
	unsigned *anticL_in;         /**< in-set of anticipated Load addresses */
Michael Beck's avatar
Michael Beck committed
91
	memop_t  **id_2_memop_antic; /**< maps anticipated address ids to memops */
92
93
94
95
	ir_node  *block;             /**< the associated block */
	block_t  *forward_next;      /**< next block entry for forward iteration */
	block_t  *backward_next;     /**< next block entry for backward iteration */
	memop_t  *avail;             /**< used locally for the avail map */
96
	memop_t  **trans_results;    /**< used to cached translated nodes due antic calculation. */
97
98
99
100
101
102
};

/**
 * Metadata for this pass.
 */
typedef struct ldst_env_t {
103
104
	struct obstack   obst;             /**< obstack for temporary data */
	ir_nodehashmap_t adr_map;          /**< Map addresses to */
Michael Beck's avatar
Michael Beck committed
105
106
107
108
109
110
111
	block_t         *forward;          /**< Inverse post-order list of all blocks Start->End */
	block_t         *backward;         /**< Inverse post-order list of all blocks End->Start */
	ir_node         *end_bl;           /**< end block of the current graph */
	unsigned        *curr_set;         /**< current set of addresses */
	memop_t         **curr_id_2_memop; /**< current map of address ids to memops */
	unsigned        curr_adr_id;       /**< number for address mapping */
	unsigned        n_mem_ops;         /**< number of memory operations (Loads/Stores) */
112
	size_t          rbs_size;          /**< size of all bitsets in bytes */
113
	int             max_cfg_preds;     /**< maximum number of block cfg predecessors */
Michael Beck's avatar
Michael Beck committed
114
	int             changed;           /**< Flags for changed graph state */
115
116
117
#ifdef DEBUG_libfirm
	ir_node         **id_2_address;    /**< maps an id to the used address */
#endif
118
119
} ldst_env;

120
121
122
/* the one and only environment */
static ldst_env env;

123
124
125
126
127
128
129
130
131
#ifdef DEBUG_libfirm

static firm_dbg_module_t *dbg;

/**
 * Dumps the block list.
 *
 * @param ldst environment
 */
132
133
static void dump_block_list(ldst_env *env)
{
134
135
136
137
138
139
140
141
142
143
144
	block_t *entry;
	memop_t *op;
	int     i;

	for (entry = env->forward; entry != NULL; entry = entry->forward_next) {
		DB((dbg, LEVEL_2, "%+F {", entry->block));

		i = 0;
		for (op = entry->memop_forward; op != NULL; op = op->next) {
			if (i == 0) {
				DB((dbg, LEVEL_2, "\n\t"));
145
146
			}
			DB((dbg, LEVEL_2, "%+F", op->node));
147
148
			if ((op->flags & FLAG_KILL_ALL) == FLAG_KILL_ALL)
				DB((dbg, LEVEL_2, "X"));
149
150
			else if (op->flags & FLAG_KILL_ALL)
				DB((dbg, LEVEL_2, "K"));
151
152
153
154
155
156
			DB((dbg, LEVEL_2, ", "));

			i = (i + 1) & 3;
		}
		DB((dbg, LEVEL_2, "\n}\n\n"));
	}
157
}
158
159
160
161
162
163
164

/**
 * Dumps the current set.
 *
 * @param bl   current block
 * @param s    name of the set
 */
165
166
static void dump_curr(block_t *bl, const char *s)
{
167
168
169
	size_t end = env.rbs_size - 1;
	size_t pos;
	int    i;
170
171
172

	DB((dbg, LEVEL_2, "%s[%+F] = {", s, bl->block));
	i = 0;
173
	for (pos = rbitset_next(env.curr_set, 0, 1); pos < end; pos = rbitset_next(env.curr_set, pos + 1, 1)) {
Michael Beck's avatar
Michael Beck committed
174
		memop_t *op = env.curr_id_2_memop[pos];
175
176
177
178

		if (i == 0) {
			DB((dbg, LEVEL_2, "\n\t"));
		}
179

180
181
182
183
		DB((dbg, LEVEL_2, "<%+F, %+F>, ", op->value.address, op->value.value));
		i = (i + 1) & 3;
	}
	DB((dbg, LEVEL_2, "\n}\n"));
184
}
185
186

#else
187
188
static void dump_block_list(ldst_env *env)
{
Matthias Braun's avatar
cleanup    
Matthias Braun committed
189
	(void)env;
190
}
191
192
static void dump_curr(block_t *bl, const char *s)
{
Matthias Braun's avatar
cleanup    
Matthias Braun committed
193
194
	(void)bl;
	(void)s;
195
}
196
197
#endif /* DEBUG_libfirm */

198
/** Get the block entry for a block node */
199
200
static block_t *get_block_entry(const ir_node *block)
{
201
202
	assert(is_Block(block));

203
	return (block_t*)get_irn_link(block);
204
}
205
206

/** Get the memop entry for a memory operation node */
207
208
static memop_t *get_irn_memop(const ir_node *irn)
{
209
	assert(! is_Block(irn));
210
	return (memop_t*)get_irn_link(irn);
211
}
212

213
214
/**
 * Walk over the memory edges from definition to users.
Michael Beck's avatar
Michael Beck committed
215
 * This ensures, that even operation without memory output are found.
216
217
218
219
220
221
 *
 * @param irn   start node
 * @param pre   pre walker function
 * @param post  post walker function
 * @param ctx   context parameter for the walker functions
 */
222
223
static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, void *ctx)
{
224
225
226
227
228
229
230
231
232
233
	ir_mode *mode;

	mark_irn_visited(irn);

	if (pre)
		pre(irn, ctx);

	mode = get_irn_mode(irn);
	if (mode == mode_M) {
		/* every successor uses memory */
234
		foreach_irn_out_r(irn, i, succ) {
235
236
237
238
239
			if (! irn_visited(succ))
				walk_memory(succ, pre, post, ctx);
		}
	} else if (mode == mode_T) {
		/* only some Proj's uses memory */
240
		foreach_irn_out_r(irn, i, proj) {
241
242
243
244
245
246
			if (get_irn_mode(proj) == mode_M && ! irn_visited(proj))
				walk_memory(proj, pre, post, ctx);
		}
	}
	if (post)
		post(irn, ctx);
247
}
248
249
250
251
252
253
254
255
256

/**
 * Walks over all memory nodes of a graph.
 *
 * @param irg   a graph
 * @param pre   pre walker function
 * @param post  post walker function
 * @param ctx   context parameter for the walker functions
 */
257
258
static void walk_memory_irg(ir_graph *irg, irg_walk_func pre, irg_walk_func post, void *ctx)
{
259
260
261
262
263
264
265
266
267
268
269
	inc_irg_visited(irg);

	ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);

	/*
	 * there are two possible sources for memory: initial_mem and nomem
	 * we ignore nomem as this should NOT change the memory
	 */
	walk_memory(get_irg_initial_mem(irg), pre, post, ctx);

	ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
270
}
271
272

/**
273
274
275
276
277
278
 * Register an address and allocate a (sparse, 0..n) ID for it.
 *
 * @param adr  the IR-node representing the address
 *
 * @return the allocated id
 */
279
280
static unsigned register_address(ir_node *adr)
{
281
282
	address_entry *entry;

Matthias Braun's avatar
Matthias Braun committed
283
	/* skip Confirms */
284
285
286
287
288
289
restart:
	if (is_Confirm(adr)) {
		adr = get_Confirm_value(adr);
		goto restart;
	}

290
	entry = ir_nodehashmap_get(address_entry, &env.adr_map, adr);
291
292
293

	if (entry == NULL) {
		/* new address */
294
		entry = OALLOC(&env.obst, address_entry);
295
296

		entry->id = env.curr_adr_id++;
297
		ir_nodehashmap_insert(&env.adr_map, adr, entry);
298
299
300
301
302
303
304

		DB((dbg, LEVEL_3, "ADDRESS %+F has ID %u\n", adr, entry->id));
#ifdef DEBUG_libfirm
		ARR_APP1(ir_node *, env.id_2_address, adr);
#endif
	}
	return entry->id;
305
}
306
307
308
309
310
311
312
313
314
315


/**
 * translate an address through a Phi node into a given predecessor
 * block.
 *
 * @param address  the address
 * @param block    the block
 * @param pos      the position of the predecessor in block
 */
316
317
static ir_node *phi_translate(ir_node *address, const ir_node *block, int pos)
{
318
319
320
	if (is_Phi(address) && get_nodes_block(address) == block)
		address = get_Phi_pred(address, pos);
	return address;
321
}
322
323
324
325

/**
 * Walker: allocate an block entry for every block
 * and register all potential addresses.
326
 */
327
328
static void prepare_blocks(ir_node *irn, void *ctx)
{
329
	(void)ctx;
330

331
	if (is_Block(irn)) {
332
		block_t *entry = OALLOC(&env.obst, block_t);
333
334
335
336
337
338
339
340
		int     n;

		entry->memop_forward    = NULL;
		entry->memop_backward   = NULL;
		entry->avail_out        = NULL;
		entry->id_2_memop_avail = NULL;
		entry->anticL_in        = NULL;
		entry->id_2_memop_antic = NULL;
341
		entry->block            = irn;
342
343
344
		entry->forward_next     = NULL;
		entry->backward_next    = NULL;
		entry->avail            = NULL;
345
346
		entry->trans_results    = NULL;
		set_irn_link(irn, entry);
347

348
		set_Block_phis(irn, NULL);
349
350

		/* use block marks to track unreachable blocks */
351
		set_Block_mark(irn, 0);
352

353
		n = get_Block_n_cfgpreds(irn);
354
355
		if (n > env.max_cfg_preds)
			env.max_cfg_preds = n;
356
357
358
359
360
361
362
363
364
365
366
	} else {
		ir_mode *mode = get_irn_mode(irn);

		if (mode_is_reference(mode)) {
			/*
			 * Register ALL possible addresses: this is overkill yet but
			 * simpler then doing it for all possible translated addresses
			 * (which would be sufficient in the moment.
			 */
			(void)register_address(irn);
		}
367
	}
368
}
369
370
371
372

/**
 * Post-Walker, link in all Phi's
 */
373
374
static void link_phis(ir_node *irn, void *ctx)
{
375
	(void)ctx;
376

377
378
379
380
	if (is_Phi(irn)) {
		ir_node *block = get_nodes_block(irn);
		add_Block_phi(block, irn);
	}
381
}
382
383
384
385

/**
 * Block walker: creates the inverse post-order list for the CFG.
 */
386
387
static void inverse_post_order(ir_node *block, void *ctx)
{
388
389
390
	block_t *entry = get_block_entry(block);

	(void)ctx;
391

392
393
394
	/* mark this block IS reachable from start */
	set_Block_mark(block, 1);

395
	/* create the list in inverse order */
396
397
398
399
400
401
	entry->forward_next = env.forward;
	env.forward         = entry;

	/* remember the first visited (last in list) entry, needed for later */
	if (env.backward == NULL)
		env.backward = entry;
402
}
403
404
405
406

/**
 * Block walker: create backward links for the memops of a block.
 */
407
408
static void collect_backward(ir_node *block, void *ctx)
{
409
410
411
	block_t *entry = get_block_entry(block);
	memop_t *last, *op;

412
	(void)ctx;
413

414
415
416
417
418
419
420
421
422
423
424
	/*
	 * Do NOT link in the end block yet. We want it to be
	 * the first in the list. This is NOT guaranteed by the walker
	 * if we have endless loops.
	 */
	if (block != env.end_bl) {
		entry->backward_next = env.backward;

		/* create the list in inverse order */
		env.backward = entry;
	}
425
426
427
428
429
430
431
432

	/* create backward links for all memory ops */
	last = NULL;
	for (op = entry->memop_forward; op != NULL; op = op->next) {
		op->prev = last;
		last     = op;
	}
	entry->memop_backward = last;
433
}
434
435
436
437

/**
 * Allocate a memop.
 *
438
439
440
441
 * @param irn  the IR-node representing the memop or NULL
 *             if this is a translated (virtual) memop
 *
 * @return the allocated memop
442
 */
443
444
static memop_t *alloc_memop(ir_node *irn)
{
445
	memop_t *m = OALLOC(&env.obst, memop_t);
446
447
448
449

	m->value.address = NULL;
	m->value.value   = NULL;
	m->value.mode    = NULL;
450
	m->value.type    = NULL;
451
452

	m->node          = irn;
Michael Beck's avatar
Michael Beck committed
453
	m->mem           = NULL;
454
455
456
457
	m->replace       = NULL;
	m->next          = NULL;
	m->flags         = 0;

458
459
	memset(m->projs, 0, sizeof(m->projs));

460
461
	if (irn != NULL)
		set_irn_link(irn, m);
462
	return m;
463
}
464

Michael Beck's avatar
Michael Beck committed
465
466
467
468
469
470
/**
 * Create a memop for a Phi-replacement.
 *
 * @param op   the memop to clone
 * @param phi  the Phi-node representing the new value
 */
471
472
static memop_t *clone_memop_phi(memop_t *op, ir_node *phi)
{
473
	memop_t *m = OALLOC(&env.obst, memop_t);
Michael Beck's avatar
Michael Beck committed
474
475
476
477
478
479
480
481
482
483
484

	m->value         = op->value;
	m->value.value   = phi;

	m->node          = phi;
	m->replace       = NULL;
	m->next          = NULL;
	m->flags         = 0;

	set_irn_link(phi, m);
	return m;
485
}
486

487
static bool is_Call_pure(const ir_node *call)
488
{
489
	ir_type *call_tp = get_Call_type(call);
490
491
	mtp_additional_properties prop = get_method_additional_properties(call_tp);
	return prop & mtp_property_pure;
492
}
493

Michael Beck's avatar
Michael Beck committed
494
495
496
497
498
499
500
/**
 * Returns an entity if the address ptr points to a constant one.
 *
 * @param ptr  the address
 *
 * @return an entity or NULL
 */
501
502
static ir_entity *find_constant_entity(ir_node *ptr)
{
Michael Beck's avatar
Michael Beck committed
503
	for (;;) {
504
505
		if (is_Address(ptr)) {
			return get_Address_entity(ptr);
506
507
508
		} else if (is_Member(ptr)) {
			ir_entity *entity = get_Member_entity(ptr);
			ir_type   *owner  = get_entity_owner(entity);
Michael Beck's avatar
Michael Beck committed
509
510

			/* Do not fiddle with polymorphism. */
511
512
513
			if (is_Class_type(owner) &&
			    ((get_entity_n_overwrites(entity) > 0) ||
			    (get_entity_n_overwrittenby(entity) > 0)))
Michael Beck's avatar
Michael Beck committed
514
515
516
				return NULL;

			/* try next */
517
518
			ptr = get_Member_ptr(ptr);
		} else if (is_Sel(ptr)) {
Michael Beck's avatar
Michael Beck committed
519
520
521
522
523
524
525
526
527
528
529
530
531
			ptr = get_Sel_ptr(ptr);
		} else if (is_Add(ptr)) {
			ir_node *l = get_Add_left(ptr);
			ir_node *r = get_Add_right(ptr);

			if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
				ptr = l;
			else if (get_irn_mode(r) == get_irn_mode(ptr) && is_Const(l))
				ptr = r;
			else
				return NULL;

			/* for now, we support only one addition, reassoc should fold all others */
532
			if (!is_Address(ptr) && !is_Align(ptr) && !is_Offset(ptr) && !is_Sel(ptr) && !is_Size(ptr))
Michael Beck's avatar
Michael Beck committed
533
534
535
536
537
				return NULL;
		} else if (is_Sub(ptr)) {
			ir_node *l = get_Sub_left(ptr);
			ir_node *r = get_Sub_right(ptr);

538
			if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
Michael Beck's avatar
Michael Beck committed
539
540
541
542
				ptr = l;
			else
				return NULL;
			/* for now, we support only one subtraction, reassoc should fold all others */
543
			if (!is_Address(ptr) && !is_Align(ptr) && !is_Offset(ptr) && !is_Sel(ptr) && !is_Size(ptr))
Michael Beck's avatar
Michael Beck committed
544
545
546
547
				return NULL;
		} else
			return NULL;
	}
548
}
Michael Beck's avatar
Michael Beck committed
549
550
551
552
553
554

/**
 * Mark a Load memop to be replace by a definition
 *
 * @param op  the Load memop
 */
555
556
static void mark_replace_load(memop_t *op, ir_node *def)
{
Michael Beck's avatar
Michael Beck committed
557
558
559
	op->replace = def;
	op->flags |= FLAG_KILLED_NODE;
	env.changed = 1;
560
}
Michael Beck's avatar
Michael Beck committed
561
562
563
564
565
566

/**
 * Mark a Store memop to be removed.
 *
 * @param op  the Store memop
 */
567
568
static void mark_remove_store(memop_t *op)
{
Michael Beck's avatar
Michael Beck committed
569
570
	op->flags |= FLAG_KILLED_NODE;
	env.changed = 1;
571
}
Michael Beck's avatar
Michael Beck committed
572

573
574
575
576
577
/**
 * Update a memop for a Load.
 *
 * @param m  the memop
 */
578
579
static void update_Load_memop(memop_t *m)
{
Michael Beck's avatar
Michael Beck committed
580
581
582
	ir_node   *load = m->node;
	ir_node   *ptr;
	ir_entity *ent;
583
584
585
586

	if (get_Load_volatility(load) == volatility_is_volatile)
		m->flags |= FLAG_IGNORE;

Michael Beck's avatar
Michael Beck committed
587
588
589
	ptr = get_Load_ptr(load);

	m->value.address = ptr;
590
	m->value.type    = get_Load_type(load);
591

592
	foreach_irn_out_r(load, i, proj) {
593
594
595
596
		/* beware of keep edges */
		if (is_End(proj))
			continue;

597
		unsigned const pn = get_Proj_num(proj);
598
599
		m->projs[pn] = proj;
		switch (pn) {
600
601
602
603
604
605
606
607
608
609
		case pn_Load_res:
			m->value.value = proj;
			m->value.mode  = get_irn_mode(proj);
			break;
		case pn_Load_X_except:
			m->flags |= FLAG_EXCEPTION;
			break;
		case pn_Load_M:
			m->mem = proj;
			break;
610
611
612
613
		case pn_Load_X_regular:
			break;
		default:
			panic("Unsupported Proj from Load %+F", proj);
614
615
616
		}
	}

Michael Beck's avatar
Michael Beck committed
617
618
619
	/* check if we can determine the entity that will be loaded */
	ent = find_constant_entity(ptr);

620
	if (ent != NULL && get_entity_visibility(ent) != ir_visibility_external) {
Michael Beck's avatar
Michael Beck committed
621
622
623
624
625
		/* a static allocation that is not external: there should be NO exception
		 * when loading even if we cannot replace the load itself. */

		/* no exception, clear the m fields as it might be checked later again */
		if (m->projs[pn_Load_X_except]) {
626
			ir_graph *irg = get_irn_irg(ptr);
Matthias Braun's avatar
Matthias Braun committed
627
			exchange(m->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
Michael Beck's avatar
Michael Beck committed
628
629
630
631
632
			m->projs[pn_Load_X_except] = NULL;
			m->flags &= ~FLAG_EXCEPTION;
			env.changed = 1;
		}
		if (m->projs[pn_Load_X_regular]) {
633
			exchange(m->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
Michael Beck's avatar
Michael Beck committed
634
635
636
637
638
			m->projs[pn_Load_X_regular] = NULL;
			env.changed = 1;
		}
	}

639
640
	if (m->value.value != NULL && !(m->flags & FLAG_IGNORE)) {
		/* only create an address if this node is NOT killed immediately or ignored */
Michael Beck's avatar
Michael Beck committed
641
		m->value.id = register_address(ptr);
642
643
644
		++env.n_mem_ops;
	} else {
		/* no user, KILL it */
645
		mark_replace_load(m, NULL);
646
	}
647
}
648
649
650
651
652
653

/**
 * Update a memop for a Store.
 *
 * @param m  the memop
 */
654
655
static void update_Store_memop(memop_t *m)
{
656
657
658
659
660
661
662
663
664
665
666
667
668
	ir_node *store = m->node;
	ir_node *adr   = get_Store_ptr(store);

	if (get_Store_volatility(store) == volatility_is_volatile) {
		m->flags |= FLAG_IGNORE;
	} else {
		/* only create an address if this node is NOT ignored */
		m->value.id = register_address(adr);
		++env.n_mem_ops;
	}

	m->value.address = adr;

669
	foreach_irn_out_r(store, i, proj) {
670
671
672
673
		/* beware of keep edges */
		if (is_End(proj))
			continue;

674
		unsigned const pn = get_Proj_num(proj);
675
676
		m->projs[pn] = proj;
		switch (pn) {
677
678
679
680
681
682
		case pn_Store_X_except:
			m->flags |= FLAG_EXCEPTION;
			break;
		case pn_Store_M:
			m->mem = proj;
			break;
683
684
685
686
		case pn_Store_X_regular:
			break;
		default:
			panic("Unsupported Proj from Store %+F", proj);
687
688
689
690
		}
	}
	m->value.value = get_Store_value(store);
	m->value.mode  = get_irn_mode(m->value.value);
691
}
692
693
694
695
696
697

/**
 * Update a memop for a Call.
 *
 * @param m  the memop
 */
698
699
static void update_Call_memop(memop_t *m)
{
700
701
	ir_node *call = m->node;
	if (is_Call_pure(call)) {
702
		m->flags = 0;
703
	} else {
704
		m->flags = FLAG_KILL_ALL;
705
	}
706

707
	foreach_irn_out_r(call, i, proj) {
708
709
710
711
		/* beware of keep edges */
		if (is_End(proj))
			continue;

712
		switch (get_Proj_num(proj)) {
713
714
715
		case pn_Call_X_except:
			m->flags |= FLAG_EXCEPTION;
			break;
716
		case pn_Call_M:
717
718
719
720
			m->mem = proj;
			break;
		}
	}
721
}
722
723

/**
724
 * Update a memop for a Div/Mod.
725
726
727
 *
 * @param m  the memop
 */
Matthias Braun's avatar
Matthias Braun committed
728
static void update_Div_memop(memop_t *m)
729
{
730
731
	ir_node *div = m->node;

732
	foreach_irn_out_r(div, i, proj) {
733
734
735
736
		/* beware of keep edges */
		if (is_End(proj))
			continue;

737
		switch (get_Proj_num(proj)) {
Matthias Braun's avatar
Matthias Braun committed
738
		case pn_Div_X_except:
739
740
			m->flags |= FLAG_EXCEPTION;
			break;
Matthias Braun's avatar
Matthias Braun committed
741
		case pn_Div_M:
742
743
744
745
			m->mem = proj;
			break;
		}
	}
Matthias Braun's avatar
Matthias Braun committed
746
747
748
749
750
751
}

static void update_Mod_memop(memop_t *m)
{
	ir_node *div = m->node;

752
	foreach_irn_out_r(div, i, proj) {
Matthias Braun's avatar
Matthias Braun committed
753
754
755
756
		/* beware of keep edges */
		if (is_End(proj))
			continue;

757
		switch (get_Proj_num(proj)) {
Matthias Braun's avatar
Matthias Braun committed
758
759
760
761
762
763
764
765
766
		case pn_Mod_X_except:
			m->flags |= FLAG_EXCEPTION;
			break;
		case pn_Mod_M:
			m->mem = proj;
			break;
		}
	}
}
767

Michael Beck's avatar
Michael Beck committed
768
769
770
771
772
/**
 * Update a memop for a Phi.
 *
 * @param m  the memop
 */
773
774
static void update_Phi_memop(memop_t *m)
{
775
	/* the Phi is its own mem */
Michael Beck's avatar
Michael Beck committed
776
	m->mem = m->node;
777
}
Michael Beck's avatar
Michael Beck committed
778

779
780
781
/**
 * Memory walker: collect all memory ops and build topological lists.
 */
782
783
static void collect_memops(ir_node *irn, void *ctx)
{
784
785
786
787
	memop_t  *op;
	ir_node  *block;
	block_t  *entry;

Matthias Braun's avatar
cleanup    
Matthias Braun committed
788
	(void)ctx;
789
790
	if (is_Proj(irn)) {
		/* we can safely ignore ProjM's except the initial memory */
791
792
		ir_graph *irg = get_irn_irg(irn);
		if (irn != get_irg_initial_mem(irg))
793
794
795
796
797
798
799
800
			return;
	}

	op    = alloc_memop(irn);
	block = get_nodes_block(irn);
	entry = get_block_entry(block);

	if (is_Phi(irn)) {
Michael Beck's avatar
Michael Beck committed
801
		update_Phi_memop(op);
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
		/* Phis must be always placed first */
		op->next = entry->memop_forward;
		entry->memop_forward = op;
		if (entry->memop_backward == NULL)
			entry->memop_backward = op;
	} else {
		switch (get_irn_opcode(irn)) {
		case iro_Load:
			update_Load_memop(op);
			break;
		case iro_Store:
			update_Store_memop(op);
			break;
		case iro_Call:
			update_Call_memop(op);
			break;
		case iro_Sync:
		case iro_Pin:
			op->mem = irn;
			break;
		case iro_Proj:
			/* initial memory */
			op->mem = irn;
			break;
		case iro_Return:
		case iro_End:
			/* we can those to find the memory edge */
			break;
		case iro_Div:
Matthias Braun's avatar
Matthias Braun committed
831
832
			update_Div_memop(op);
			break;
833
		case iro_Mod:
Matthias Braun's avatar
Matthias Braun committed
834
			update_Mod_memop(op);
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
			break;

		case iro_Builtin:
			/* TODO: handle some builtins */
		default:
			/* unsupported operation */
			op->flags = FLAG_KILL_ALL;
		}


		/* all other should be placed last */
		if (entry->memop_backward == NULL) {
			entry->memop_forward = entry->memop_backward = op;
		} else {
			entry->memop_backward->next = op;
			entry->memop_backward       = op;
		}
	}
853
}
854
855
856
857
858

/**
 * Find an address in the current set.
 *
 * @param value  the value to be searched for
859
860
861
862
 *
 * @return a memop for the value or NULL if the value does
 *         not exists in the set or cannot be converted into
 *         the requested mode
863
 */
864
865
static memop_t *find_address(const value_t *value)
{
866
	if (rbitset_is_set(env.curr_set, value->id)) {
Michael Beck's avatar
Michael Beck committed
867
		memop_t *res = env.curr_id_2_memop[value->id];
868
869
870
871
872
873
874
875
876
877

		if (res->value.mode == value->mode)
			return res;
		/* allow hidden casts */
		if (get_mode_arithmetic(res->value.mode) == irma_twos_complement &&
		    get_mode_arithmetic(value->mode) == irma_twos_complement &&
		    get_mode_size_bits(res->value.mode) == get_mode_size_bits(value->mode))
			return res;
	}
	return NULL;
878
}
879
880
881
882
883
884

/**
 * Find an address in the avail_out set.
 *
 * @param bl     the block
 */
885
886
static memop_t *find_address_avail(const block_t *bl, unsigned id, const ir_mode *mode)
{
Michael Beck's avatar
Michael Beck committed
887
888
	if (rbitset_is_set(bl->avail_out, id)) {
		memop_t *res = bl->id_2_memop_avail[id];
889

Michael Beck's avatar
Michael Beck committed
890
		if (res->value.mode == mode)
891
892
893
			return res;
		/* allow hidden casts */
		if (get_mode_arithmetic(res->value.mode) == irma_twos_complement &&
Michael Beck's avatar
Michael Beck committed
894
895
		    get_mode_arithmetic(mode) == irma_twos_complement &&
		    get_mode_size_bits(res->value.mode) == get_mode_size_bits(mode))
896
897
898
			return res;
	}
	return NULL;
899
}
900
901
902
903

/**
 * Kill all addresses from the current set.
 */
904
905
static void kill_all(void)
{
906
907
908
909
	rbitset_clear_all(env.curr_set, env.rbs_size);

	/* set sentinel */
	rbitset_set(env.curr_set, env.rbs_size - 1);
910
}
911
912
913
914
915
916

/**
 * Kill memops that are not alias free due to a Store value from the current set.
 *
 * @param value  the Store value
 */
917
918
static void kill_memops(const value_t *value)
{
919
920
	size_t end = env.rbs_size - 1;
	size_t pos;
921

922
	for (pos = rbitset_next(env.curr_set, 0, 1); pos < end; pos = rbitset_next(env.curr_set, pos + 1, 1)) {
Michael Beck's avatar
Michael Beck committed
923
		memop_t *op = env.curr_id_2_memop[pos];
924

925
926
		ir_type *value_type = get_type_for_mode(value->mode);
		ir_type *op_type    = get_type_for_mode(op->value.mode);
Matthias Braun's avatar
Matthias Braun committed
927
928
929
930
		/* TODO: determining the access size by the type of the accessed objects
		 * is too conservative... */
		unsigned value_size = get_type_size_bytes(value_type);
		unsigned op_size    = get_type_size_bytes(op_type);
931

Matthias Braun's avatar
Matthias Braun committed
932
933
		if (ir_no_alias != get_alias_relation(value->address, value_type, value_size,
		                                      op->value.address, op_type, op_size)) {
934
			rbitset_clear(env.curr_set, pos);
Michael Beck's avatar
Michael Beck committed
935
			env.curr_id_2_memop[pos] = NULL;
936
			DB((dbg, LEVEL_2, "KILLING %+F because of possible alias address %+F\n", op->node, value->address));
937
938
		}
	}
939
}
940
941
942
943
944
945

/**
 * Add the value of a memop to the current set.
 *
 * @param op  the memory op
 */
946
947
static void add_memop(memop_t *op)
{
948
	rbitset_set(env.curr_set, op->value.id);
Michael Beck's avatar
Michael Beck committed
949
	env.curr_id_2_memop[op->value.id] = op;
950
}
951
952
953
954
955
956
957

/**
 * Add the value of a memop to the avail_out set.
 *
 * @param bl  the block
 * @param op  the memory op
 */
958
959
static void add_memop_avail(block_t *bl, memop_t *op)
{
960
961
	rbitset_set(bl->avail_out, op->value.id);
	bl->id_2_memop_avail[op->value.id] = op;
962
}
963

Michael Beck's avatar
Michael Beck committed
964
965
966
/**
 * Check, if we can convert a value of one mode to another mode
 * without changing the representation of bits.
967
968
969
 *
 * @param from  the original mode
 * @param to    the destination mode
Michael Beck's avatar
Michael Beck committed
970
 */
971
972
static int can_convert_to(const ir_mode *from, const ir_mode *to)
{
Michael Beck's avatar
Michael Beck committed
973
974
975
976
977
	if (get_mode_arithmetic(from) == irma_twos_complement &&
	    get_mode_arithmetic(to) == irma_twos_complement &&
	    get_mode_size_bits(from) == get_mode_size_bits(to))
		return 1;
	return 0;
978
}
Michael Beck's avatar
Michael Beck committed
979

980
/**
981
982
983
984
985
986
987
 * Add a Conv to the requested mode if needed.
 *
 * @param irn   the IR-node to convert
 * @param mode  the destination mode
 *
 * @return the possible converted node or NULL
 *         if the conversion is not possible
988
 */
989
990
static ir_node *conv_to(ir_node *irn, ir_mode *mode)
{
991
992
993
	ir_mode *other = get_irn_mode(irn);
	if (other != mode) {
		/* different modes: check if conversion is possible without changing the bits */
Michael Beck's avatar
Michael Beck committed
994
		if (can_convert_to(other, mode)) {
995
			ir_node *block = get_nodes_block(irn);
996
			return new_r_Conv(block, irn, mode);
997
		}
998
999
		/* otherwise not possible ... yet */
		return NULL;
1000
	}
1001
	return irn;
1002
}
1003

1004
1005
1006
1007
1008
1009
/**
 * Update the address of an value if this address was a load result
 * and the load is killed now.
 *
 * @param value  the value whose address is updated
 */
1010
1011
static void update_address(value_t *value)
{
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
	if (is_Proj(value->address)) {
		ir_node *load = get_Proj_pred(value->address);

		if (is_Load(load)) {
			const memop_t *op = get_irn_memop(load);

			if (op->flags & FLAG_KILLED_NODE)
				value->address = op->replace;
		}
	}
1022
}
1023

1024
/**
Michael Beck's avatar
Michael Beck committed
1025
1026
 * Do forward dataflow analysis on the given block and calculate the
 * GEN and KILL in the current (avail) set.
1027
1028
1029
 *
 * @param bl  the block
 */
1030
1031
static void calc_gen_kill_avail(block_t *bl)
{
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
	memop_t *op;
	ir_node *def;

	for (op = bl->memop_forward; op != NULL; op = op->next) {
		switch (get_irn_opcode(op->node)) {
		case iro_Phi:
			/* meet */
			break;
		case iro_Sync:
			/* join */
			break;
		case iro_Load:
			if (! (op->flags & (FLAG_KILLED_NODE|FLAG_IGNORE))) {
				/* do we have this already? */
1046
1047
1048
1049
				memop_t *other;

				update_address(&op->value);
				other = find_address(&op->value);
1050
1051
				if (other != NULL && other != op) {
					def = conv_to(other->value.value, op->value.mode);
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
					if (def != NULL) {
#ifdef DEBUG_libfirm
						if (is_Store(other->node)) {
							/* RAW */
							DB((dbg, LEVEL_1, "RAW %+F <- %+F(%+F)\n", op->node, def, other->node));
						} else {
							/* RAR */
							DB((dbg, LEVEL_1, "RAR %+F <- %+F(%+F)\n", op->node, def, other->node));
						}
#endif
						mark_replace_load(op, def);
1063
1064
						/* do NOT change the memop table */
						continue;
1065
1066
					}
				}
1067
1068
				/* add this value */
				add_memop(op);
1069
1070
1071
			}
			break;
		case iro_Store:
1072
			if (! (op->flags & FLAG_KILLED_NODE)) {
1073
				/* do we have this store already */
1074
1075
1076
1077
				memop_t *other;

				update_address(&op->value);
				other = find_address(&op->value);
1078
				if (other != NULL) {
Michael Beck's avatar
Michael Beck committed
1079
					if (is_Store(other->node)) {
1080
1081
						if (op != other && !(other->flags & FLAG_IGNORE) &&
						    get_nodes_block(other->node) == get_nodes_block(op->node)) {
Michael Beck's avatar
Michael Beck committed
1082
1083
1084
1085
1086
							/*
							 * A WAW in the same block we can kick the first store.
							 * This is a shortcut: we know that the second Store will be anticipated
							 * then in an case.
							 */
1087
							DB((dbg, LEVEL_1, "WAW %+F <- %+F\n", other->node, op->node));
Michael Beck's avatar
Michael Beck committed
1088
1089
1090
							mark_remove_store(other);
							/* FIXME: a Load might be get freed due to this killed store */
						}
1091
					} else if (other->value.value == op->value.value && !(op->flags & FLAG_IGNORE)) {
1092
						/* WAR */
1093
						DB((dbg, LEVEL_1, "WAR %+F <- %+F\n", op->node, other->node));
1094
						mark_remove_store(op);
1095
1096
						/* do NOT change the memop table */
						continue;
1097
1098
					}
				}
1099
1100
1101
1102
				/* KILL all possible aliases */
				kill_memops(&op->value);
				/* add this value */
				add_memop(op);
1103
1104
1105
			}
			break;
		default:
1106
			if (op->flags & FLAG_KILL_ALL)
1107
1108
1109
				kill_all();
		}
	}
1110
}
1111

Michael Beck's avatar
Michael Beck committed
1112
1113
1114
1115
1116
1117
/**
 * Do forward dataflow analysis on a given block to calculate the avail_out set
 * for this block only.
 *
 * @param block  the block
 */
1118
1119
static void forward_avail(block_t *bl)
{
Michael Beck's avatar
Michael Beck committed
1120
1121
1122
1123
1124
	/* fill the data from the current block */
	env.curr_id_2_memop = bl->id_2_memop_avail;
	env.curr_set        = bl->avail_out;

	calc_gen_kill_avail(bl);
1125
	dump_curr(bl, "Avail_out");
1126
}
1127
1128
1129
1130
1131
1132
1133
1134
1135

/**
 * Do backward dataflow analysis on a given block to calculate the antic set
 * of Loaded addresses.
 *
 * @param bl  the block
 *
 * @return non-zero if the set has changed since last iteration
 */
1136
1137
static int backward_antic(block_t *bl)
{
1138
	memop_t *op;
1139
1140
1141
1142
1143
1144
1145
	ir_node *block = bl->block;
	int     n = get_Block_n_cfg_outs(block);

	if (n == 1) {
		ir_node  *succ    = get_Block_cfg_out(block, 0);
		block_t  *succ_bl = get_block_entry(succ);
		int      pred_pos = get_Block_cfgpred_pos(succ, block);
1146
1147
		size_t   end      = env.rbs_size - 1;
		size_t   pos;
1148
1149
1150
1151
1152

		kill_all();

		if (bl->trans_results == NULL) {
			/* allocate the translate cache */
1153
			bl->trans_results = OALLOCNZ(&env.obst, memop_t*, env.curr_adr_id);
1154
		}
1155

1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171