ldstopt.c 66.3 KB
Newer Older
Christian Würdig's avatar
Christian Würdig committed
1
2
/*
 * This file is part of libFirm.
3
 * Copyright (C) 2012 University of Karlsruhe.
Christian Würdig's avatar
Christian Würdig committed
4
5
 */

Michael Beck's avatar
Michael Beck committed
6
7
8
9
/**
 * @file
 * @brief   Load/Store optimizations.
 * @author  Michael Beck
Michael Beck's avatar
Michael Beck committed
10
 */
11
#include <string.h>
Michael Beck's avatar
Michael Beck committed
12

Matthias Braun's avatar
Matthias Braun committed
13
14
15
16
#include "array.h"
#include "be.h"
#include "dbginfo_t.h"
#include "debug.h"
17
#include "entity_t.h"
Matthias Braun's avatar
Matthias Braun committed
18
#include "panic.h"
19
#include "ircons_t.h"
Matthias Braun's avatar
Matthias Braun committed
20
21
#include "iredges.h"
#include "irflag_t.h"
22
#include "irgmod.h"
Matthias Braun's avatar
Matthias Braun committed
23
24
#include "irgopt.h"
#include "irgraph_t.h"
25
26
#include "irgwalk.h"
#include "irhooks.h"
27
#include "irmemory.h"
Matthias Braun's avatar
Matthias Braun committed
28
29
#include "irmode_t.h"
#include "irnode_t.h"
30
#include "irnodehashmap.h"
Matthias Braun's avatar
Matthias Braun committed
31
32
33
34
#include "iropt_dbg.h"
#include "iropt_t.h"
#include "iroptimize.h"
#include "irtools.h"
35
#include "set.h"
Matthias Braun's avatar
Matthias Braun committed
36
37
#include "tv_t.h"
#include "type_t.h"
38
#include "util.h"
39
40
41

/** The debug handle. */
DEBUG_ONLY(static firm_dbg_module_t *dbg;)
42

43
#define MAX_PROJ MAX(MAX((unsigned)pn_Load_max, (unsigned)pn_Store_max), (unsigned)pn_Call_max)
Michael Beck's avatar
Michael Beck committed
44

Matthias Braun's avatar
Matthias Braun committed
45
46
typedef enum changes_t {
	NO_CHANGES = 0,
47
48
49
50
51
	DF_CHANGED = (1 << 0), /**< data flow changed */
	CF_CHANGED = (1 << 1), /**< control flow changed */
	/** nodes have been created but are not reachable. This is a bad hack
	 * try hard to avoid it! */
	NODES_CREATED = (1 << 2),
Matthias Braun's avatar
Matthias Braun committed
52
} changes_t;
53

Michael Beck's avatar
Michael Beck committed
54
55
56
/**
 * walker environment
 */
57
typedef struct walk_env_t {
Matthias Braun's avatar
Matthias Braun committed
58
59
	struct obstack obst;    /**< list of all stores */
	changes_t      changes; /**< a bitmask of graph changes */
Michael Beck's avatar
Michael Beck committed
60
61
} walk_env_t;

62
/** A Load/Store info. */
63
typedef struct ldst_info_t {
Matthias Braun's avatar
Matthias Braun committed
64
65
66
67
	ir_node  *projs[MAX_PROJ+1]; /**< list of Proj's of this node */
	ir_node  *exc_block;         /**< the exception block if available */
	int      exc_idx;            /**< predecessor index in exception block */
	unsigned visited;            /**< visited counter for breaking loops */
Michael Beck's avatar
Michael Beck committed
68
69
} ldst_info_t;

70
71
72
73
74
75
76
77
78
79
80
81
typedef struct base_offset_t {
	ir_node *base;
	long     offset;
} base_offset_t;

typedef struct track_load_env_t {
	ir_node      *load;
	base_offset_t base_offset;
	ir_node      *ptr; /* deprecated: alternative representation of
	                      base_offset */
} track_load_env_t;

82
/**
83
 * flags for control flow.
84
 */
Matthias Braun's avatar
Matthias Braun committed
85
86
87
88
typedef enum block_flags_t {
	BLOCK_HAS_COND = (1 << 0), /**< Block has conditional control flow */
	BLOCK_HAS_EXC  = (1 << 1), /**< Block has exceptional control flow */
} block_flags_t;
89
90

/**
91
 * a Block info.
92
 */
93
typedef struct block_info_t {
Matthias Braun's avatar
Matthias Braun committed
94
	block_flags_t flags;  /**< flags for the block */
95
96
} block_info_t;

97
/** the master visited flag for loop detection. */
Matthias Braun's avatar
Matthias Braun committed
98
static unsigned master_visited;
99
100
101
102
103

#define INC_MASTER()       ++master_visited
#define MARK_NODE(info)    (info)->visited = master_visited
#define NODE_VISITED(info) (info)->visited >= master_visited

Michael Beck's avatar
Michael Beck committed
104
105
106
/**
 * get the Load/Store info of a node
 */
107
108
static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst)
{
109
	ldst_info_t *info = (ldst_info_t*)get_irn_link(node);
Matthias Braun's avatar
Matthias Braun committed
110
	if (info == NULL) {
111
		info = OALLOCZ(obst, ldst_info_t);
112
113
114
		set_irn_link(node, info);
	}
	return info;
115
}
Michael Beck's avatar
Michael Beck committed
116

117
118
119
/**
 * get the Block info of a node
 */
120
121
static block_info_t *get_block_info(ir_node *node, struct obstack *obst)
{
122
	block_info_t *info = (block_info_t*)get_irn_link(node);
Matthias Braun's avatar
Matthias Braun committed
123
	if (info == NULL) {
124
		info = OALLOCZ(obst, block_info_t);
125
126
127
		set_irn_link(node, info);
	}
	return info;
128
}
129

Michael Beck's avatar
Michael Beck committed
130
/**
Michael Beck's avatar
Michael Beck committed
131
 * update the projection info for a Load/Store
Michael Beck's avatar
Michael Beck committed
132
 */
Matthias Braun's avatar
Matthias Braun committed
133
static changes_t update_projs(ldst_info_t *info, ir_node *proj)
Michael Beck's avatar
Michael Beck committed
134
{
135
136
	unsigned nr = get_Proj_num(proj);
	assert(nr <= MAX_PROJ);
Michael Beck's avatar
Michael Beck committed
137

Matthias Braun's avatar
Matthias Braun committed
138
	if (info->projs[nr] != NULL) {
139
140
141
		/* there is already one, do CSE */
		exchange(proj, info->projs[nr]);
		return DF_CHANGED;
Matthias Braun's avatar
Matthias Braun committed
142
	} else {
143
		info->projs[nr] = proj;
Matthias Braun's avatar
Matthias Braun committed
144
		return NO_CHANGES;
145
	}
146
}
Michael Beck's avatar
Michael Beck committed
147
148

/**
149
150
151
152
153
 * update the exception block info for a Load/Store node.
 *
 * @param info   the load/store info struct
 * @param block  the exception handler block for this load/store
 * @param pos    the control flow input of the block
Michael Beck's avatar
Michael Beck committed
154
 */
Matthias Braun's avatar
Matthias Braun committed
155
static void update_exc(ldst_info_t *info, ir_node *block, int pos)
Michael Beck's avatar
Michael Beck committed
156
{
Matthias Braun's avatar
Matthias Braun committed
157
	assert(info->exc_block == NULL);
158
159
	info->exc_block = block;
	info->exc_idx   = pos;
160
}
Michael Beck's avatar
Michael Beck committed
161
162

/**
163
 * walker, collects all Proj/Load/Store/Call/CopyB nodes
164
 *
165
 * walks from Start -> End
Michael Beck's avatar
Michael Beck committed
166
 */
Michael Beck's avatar
Michael Beck committed
167
static void collect_nodes(ir_node *node, void *env)
Michael Beck's avatar
Michael Beck committed
168
{
Matthias Braun's avatar
Matthias Braun committed
169
170
	walk_env_t *wenv   = (walk_env_t *)env;
	unsigned    opcode = get_irn_opcode(node);
171

172
	if (opcode == iro_Proj) {
Matthias Braun's avatar
Matthias Braun committed
173
		ir_node *pred = get_Proj_pred(node);
174
		opcode = get_irn_opcode(pred);
175

176
		if (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call) {
Andreas Fried's avatar
Andreas Fried committed
177
			ldst_info_t *ldst_info = get_ldst_info(pred, &wenv->obst);
178
179
180
181

			wenv->changes |= update_projs(ldst_info, node);

			/*
182
183
184
185
186
			 * Place the Proj's to the same block as the
			 * predecessor Load. This is always ok and prevents
			 * "non-SSA" form after optimizations if the Proj
			 * is in a wrong block.
			 */
Andreas Fried's avatar
Andreas Fried committed
187
188
			ir_node *blk      = get_nodes_block(node);
			ir_node *pred_blk = get_nodes_block(pred);
189
190
191
192
193
			if (blk != pred_blk) {
				wenv->changes |= DF_CHANGED;
				set_nodes_block(node, pred_blk);
			}
		}
194
	} else if (opcode == iro_Block) {
Matthias Braun's avatar
Matthias Braun committed
195
196
		for (int i = get_Block_n_cfgpreds(node); i-- > 0; ) {
			bool     is_exc = false;
Andreas Fried's avatar
Andreas Fried committed
197
198
			ir_node *proj   = get_Block_cfgpred(node, i);
			ir_node *pred   = proj;
199
200
201
202
203

			/* ignore Bad predecessors, they will be removed later */
			if (is_Bad(pred))
				continue;

Matthias Braun's avatar
Matthias Braun committed
204
205
206
207
			if (is_Proj(proj)) {
				pred   = get_Proj_pred(proj);
				is_exc = is_x_except_Proj(proj);
			}
Andreas Fried's avatar
Andreas Fried committed
208
209
			ir_node      *pred_block = get_nodes_block(pred);
			block_info_t *bl_info    = get_block_info(pred_block, &wenv->obst);
210

211
			if (is_fragile_op(pred) && is_exc)
212
213
214
215
				bl_info->flags |= BLOCK_HAS_EXC;
			else if (is_irn_forking(pred))
				bl_info->flags |= BLOCK_HAS_COND;

216
			opcode = get_irn_opcode(pred);
Matthias Braun's avatar
Matthias Braun committed
217
218
			if (is_exc && (opcode == iro_Load || opcode == iro_Store
			               || opcode == iro_Call)) {
Andreas Fried's avatar
Andreas Fried committed
219
				ldst_info_t *ldst_info = get_ldst_info(pred, &wenv->obst);
220

Matthias Braun's avatar
Matthias Braun committed
221
				update_exc(ldst_info, node, i);
222
223
			}
		}
224
225
	} else if (is_memop(node)) {
		/* Just initialize a ldst_info */
226
		(void) get_ldst_info(node, &wenv->obst);
227
	}
228
}
Michael Beck's avatar
Michael Beck committed
229

230
/* forward */
231
static void reduce_node_usage(ir_node *ptr);
232
233

/**
Christoph Mallon's avatar
Christoph Mallon committed
234
 * Update a Load that may have lost its users.
235
 */
236
237
static void handle_load_update(ir_node *load)
{
238
239
240
	/* do NOT touch volatile loads for now */
	if (get_Load_volatility(load) == volatility_is_volatile)
		return;
241

Matthias Braun's avatar
Matthias Braun committed
242
243
	ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
	if (!info->projs[pn_Load_res] && !info->projs[pn_Load_X_except]) {
244
245
		ir_node *ptr = get_Load_ptr(load);
		ir_node *mem = get_Load_mem(load);
246

Christoph Mallon's avatar
Christoph Mallon committed
247
		/* a Load whose value is neither used nor exception checked, remove it */
Andreas Zwinkau's avatar
Andreas Zwinkau committed
248
		exchange(info->projs[pn_Load_M], mem);
Matthias Braun's avatar
Matthias Braun committed
249
250
251
252
		if (info->projs[pn_Load_X_regular]) {
			ir_node *jmp = new_r_Jmp(get_nodes_block(load));
			exchange(info->projs[pn_Load_X_regular], jmp);
		}
253
		kill_node(load);
254
		reduce_node_usage(ptr);
255
	}
256
}
257
258

/**
259
 * A use of a node has vanished. Check if this was a Proj
260
261
 * node and update the counters.
 */
262
static void reduce_node_usage(ir_node *ptr)
263
{
264
265
266
267
	if (!is_Proj(ptr))
		return;
	if (get_irn_n_edges(ptr) > 0)
		return;
268

269
	/* this Proj is dead now */
Matthias Braun's avatar
Matthias Braun committed
270
	ir_node *pred = get_Proj_pred(ptr);
271
	if (is_Load(pred)) {
272
		ldst_info_t *info = (ldst_info_t*)get_irn_link(pred);
273
		info->projs[get_Proj_num(ptr)] = NULL;
274

275
276
		/* this node lost its result proj, handle that */
		handle_load_update(pred);
277
	}
278
}
279

280
281
282
283
/**
 * Kill a Load or Store and all other nodes which are not needed after
 * it has been killed.
 */
Matthias Braun's avatar
Matthias Braun committed
284
285
static void kill_and_reduce_usage(ir_node *node)
{
286
	ir_node *ptr;
Matthias Braun's avatar
Matthias Braun committed
287
	ir_node *value;
yb9976's avatar
yb9976 committed
288
	switch (get_irn_opcode(node)) {
289
	case iro_Load:
Matthias Braun's avatar
Matthias Braun committed
290
291
		ptr   = get_Load_ptr(node);
		value = NULL;
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
		break;
	case iro_Store:
		ptr   = get_Store_ptr(node);
		value = get_Store_value(node);
		break;
	default:
		panic("Cannot handle node %+F", node);
	}

	kill_node(node);
	reduce_node_usage(ptr);
	if (value != NULL) {
		reduce_node_usage(value);
	}
}

308
static void get_base_and_offset(ir_node *ptr, base_offset_t *base_offset)
309
{
310
311
312
313
	/* TODO: long might not be enough, we should probably use some tarval
	 * thingy, or at least detect long overflows and abort */
	long     offset = 0;
	ir_mode *mode   = get_irn_mode(ptr);
Michael Beck's avatar
Michael Beck committed
314
315
316
317
318
319
320
321
	for (;;) {
		if (is_Add(ptr)) {
			ir_node *l = get_Add_left(ptr);
			ir_node *r = get_Add_right(ptr);

			if (get_irn_mode(l) != mode || !is_Const(r))
				break;

322
			offset += get_Const_long(r);
Michael Beck's avatar
Michael Beck committed
323
324
325
326
327
328
329
330
			ptr     = l;
		} else if (is_Sub(ptr)) {
			ir_node *l = get_Sub_left(ptr);
			ir_node *r = get_Sub_right(ptr);

			if (get_irn_mode(l) != mode || !is_Const(r))
				break;

331
			offset -= get_Const_long(r);
Michael Beck's avatar
Michael Beck committed
332
333
			ptr     = l;
		} else if (is_Sel(ptr)) {
334
335
336
337
338
339
340
341
342
343
344
			ir_node *index = get_Sel_index(ptr);
			if (!is_Const(index))
				break;

			ir_type *type         = get_Sel_type(ptr);
			ir_type *element_type = get_array_element_type(type);
			if (get_type_state(element_type) != layout_fixed)
				break;

			/* TODO: may overflow here */
			int size = get_type_size_bytes(element_type);
345
			offset += size * get_Const_long(index);
Michael Beck's avatar
Michael Beck committed
346
			ptr = get_Sel_ptr(ptr);
347
348
349
350
351
352
353
		} else if (is_Member(ptr)) {
			ir_entity *entity = get_Member_entity(ptr);
			ir_type   *owner  = get_entity_owner(entity);
			if (get_type_state(owner) != layout_fixed)
				break;
			offset += get_entity_offset(entity);
			ptr = get_Member_ptr(ptr);
Michael Beck's avatar
Michael Beck committed
354
		} else
355
356
357
			break;
	}

358
359
	base_offset->offset = offset;
	base_offset->base   = ptr;
360
361
}

362
/**
363
364
 * Checks whether the memory of the first access
 * is contained within the second one.
365
 */
366
367
368
static bool is_contained_in(
	ir_mode *const load_mode, const base_offset_t *const load_bo,
	ir_mode *const prev_mode, const base_offset_t *const prev_bo)
369
{
370
	if (load_bo->base != prev_bo->base)
371
		return false;
372

373
374
375
	/* ensure the load value is completely contained in the previous one */
	long delta = load_bo->offset - prev_bo->offset;
	if (delta < 0)
376
		return false;
377
378
379
	long load_mode_len = get_mode_size_bytes(load_mode);
	long prev_mode_len = get_mode_size_bytes(prev_mode);
	if (delta+load_mode_len > prev_mode_len)
380
		return false;
381

382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
	/* simple case: previous value has the same mode */
	if (load_mode == prev_mode)
		return true;

	ir_mode_arithmetic prev_arithmetic = get_mode_arithmetic(prev_mode);
	ir_mode_arithmetic load_arithmetic = get_mode_arithmetic(load_mode);
	return (prev_arithmetic == irma_twos_complement &&
	        load_arithmetic == irma_twos_complement)
	       || (prev_arithmetic != load_arithmetic
	           && load_mode_len == prev_mode_len);
}


/**
 * This is called for load-after-load and load-after-store.
 * If possible the value of the previous load/store is transformed in a way
 * so the 2nd load can be left out/replaced by arithmetic on the previous
 * value.
 */
static ir_node *transform_previous_value(ir_mode *const load_mode,
	const long load_offset, ir_mode *const prev_mode,
	const long prev_offset, ir_node *const prev_value,
	ir_node *const block)
{
406
407
408
409
410
	/* simple case: previous value has the same mode */
	if (load_mode == prev_mode)
		return prev_value;

	/* two complement values can be transformed with bitops */
411
412
	long               load_mode_len   = get_mode_size_bytes(load_mode);
	long               prev_mode_len   = get_mode_size_bytes(prev_mode);
413
414
415
	ir_mode_arithmetic prev_arithmetic = get_mode_arithmetic(prev_mode);
	ir_mode_arithmetic load_arithmetic = get_mode_arithmetic(load_mode);
	if (prev_arithmetic == irma_twos_complement &&
416
	    load_arithmetic == irma_twos_complement) {
417
		/* produce a shift to adjust offset delta */
418
		long           delta = load_offset - prev_offset;
419
420
421
422
423
		unsigned const shift = be_get_backend_param()->byte_order_big_endian
			? prev_mode_len - load_mode_len - delta
			: delta;
		ir_node *new_value = prev_value;
		if (shift != 0) {
424
			ir_graph *const irg   = get_irn_irg(block);
425
426
			ir_node  *const cnst  = new_r_Const_long(irg, mode_Iu, shift * 8);
			new_value = new_r_Shr(block, new_value, cnst, prev_mode);
427
		}
428
429

		return new_r_Conv(block, new_value, load_mode);
430
431
	} else {
		assert(prev_arithmetic != load_arithmetic && load_mode_len == prev_mode_len);
432
		return new_r_Bitcast(block, prev_value, load_mode);
433
	}
434
}
Michael Beck's avatar
Michael Beck committed
435

436
437
static changes_t replace_load(ir_node *load, ir_node *new_value)
{
Matthias Braun's avatar
Matthias Braun committed
438
	const ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
439
440
441
	if (info->projs[pn_Load_M])
		exchange(info->projs[pn_Load_M], get_Load_mem(load));

Matthias Braun's avatar
Matthias Braun committed
442
	changes_t res = NO_CHANGES;
443
	/* no exception */
444
	if (info->projs[pn_Load_X_except] != NULL) {
445
		ir_graph *irg = get_irn_irg(load);
Matthias Braun's avatar
Matthias Braun committed
446
447
		ir_node  *bad = new_r_Bad(irg, mode_X);
		exchange(info->projs[pn_Load_X_except], bad);
448
		res |= CF_CHANGED;
449
450

		assert(info->projs[pn_Load_X_regular] != NULL);
Matthias Braun's avatar
Matthias Braun committed
451
452
		ir_node *jmp = new_r_Jmp(get_nodes_block(load));
		exchange(info->projs[pn_Load_X_regular], jmp);
453
454
	}

455
456
457
	/* loads without user should already be optimized away */
	assert(info->projs[pn_Load_res] != NULL);
	exchange(info->projs[pn_Load_res], new_value);
458

459
	kill_and_reduce_usage(load);
460
461
462
	return res | DF_CHANGED;
}

463
464
465
466
467
/**
 * returns false if op cannot be reached through the X_regular proj of
 * @p prev_op or if there are no exceptions possible.
 */
static bool on_regular_path(ir_node *op, ir_node *prev_op)
468
{
469
470
471
472
473
	/* TODO: create a real test, for now we just make sure the previous node
	 * does not throw an exception. */
	(void)op;
	return !is_fragile_op(prev_op) || !ir_throws_exception(prev_op);
}
474

475
476
477
478
479
static changes_t try_load_after_store(track_load_env_t *env, ir_node *store)
{
	ir_node *const load = env->load;
	if (!on_regular_path(load, store))
		return NO_CHANGES;
480

481
	ir_node      *store_ptr = get_Store_ptr(store);
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
	base_offset_t prev_base_offset;
	get_base_and_offset(store_ptr, &prev_base_offset);
	ir_mode       *const load_mode   = get_Load_mode(load);
	ir_mode       *const store_mode  = get_irn_mode(get_Store_value(store));
	base_offset_t *const base_offset = &env->base_offset;

	/* load value completely contained in previous store? */
	if (is_contained_in(load_mode, base_offset, store_mode, &prev_base_offset)) {
		ir_node *const store_value = get_Store_value(store);
		ir_node *const block       = get_nodes_block(load);
		ir_node *const new_value
			= transform_previous_value(load_mode, base_offset->offset, store_mode,
			                           prev_base_offset.offset, store_value, block);
		DBG_OPT_RAW(load, new_value);
		return replace_load(load, new_value);
	}

	return NO_CHANGES;
500

501
}
502

503
504
505
506
507
static changes_t try_load_after_load(track_load_env_t *env, ir_node *prev_load)
{
	ir_node *const load = env->load;
	if (!on_regular_path(load, prev_load))
		return NO_CHANGES;
508

509
510
	ldst_info_t *const info       = (ldst_info_t*)get_irn_link(prev_load);
	ir_node     *const prev_value = info->projs[pn_Load_res];
511
512
513
	/* the other load is unused and will get removed later anyway */
	if (prev_value == NULL)
		return NO_CHANGES;
514

515
	ir_node *const prev_ptr = get_Load_ptr(prev_load);
516
517
518
519
520
	base_offset_t prev_base_offset;
	get_base_and_offset(prev_ptr, &prev_base_offset);
	ir_mode       *const load_mode   = get_Load_mode(load);
	ir_mode       *const prev_mode   = get_Load_mode(prev_load);
	base_offset_t *const base_offset = &env->base_offset;
521
522

	/* load value completely contained in previous load? */
523
524
525
526
527
528
529
530
531
	if (is_contained_in(load_mode, base_offset, prev_mode, &prev_base_offset)) {
		ir_node *const block     = get_nodes_block(load);
		ir_node *const new_value
			= transform_previous_value(load_mode, base_offset->offset, prev_mode,
			                           prev_base_offset.offset, prev_value, block);
		DBG_OPT_RAR(prev_load, load);
		return replace_load(load, new_value);
	}

532
	/* previous load value completely contained in current load? */
yb9976's avatar
yb9976 committed
533
534
	if (get_Load_type(load) == get_Load_type(prev_load) &&
	    is_contained_in(prev_mode, &prev_base_offset, load_mode, base_offset)) {
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
		ir_graph *const irg   = get_irn_irg(prev_load);
		ir_node  *const block = get_nodes_block(prev_load);

		/* Adapt pointer of previous load. */
		const long        delta         = base_offset->offset - prev_base_offset.offset;
		assert(delta <= 0);
		ir_mode    *const ptr_mode      = get_irn_mode(prev_ptr);
		ir_mode    *const ptr_mode_offs = get_reference_mode_unsigned_eq(ptr_mode);
		ir_node    *const c             = new_r_Const_long(irg, ptr_mode_offs, delta);
		ir_node    *const add           = new_r_Add(block, prev_ptr, c, ptr_mode);
		set_Load_ptr(prev_load, add);

		/* Change mode of previous load. */
		set_Load_mode(prev_load, load_mode);
		ir_node   *const original_proj = info->projs[pn_Load_res];
		ir_node   *const result_proj   = new_r_Proj(prev_load, load_mode, pn_Load_res);
		changes_t        changed       = replace_load(load, result_proj);
		ir_node   *const new_prev_value
			= transform_previous_value(prev_mode, prev_base_offset.offset, load_mode,
			                           base_offset->offset, result_proj, block);
		exchange(original_proj, new_prev_value);
		info->projs[pn_Load_res] = result_proj;
		changed |= DF_CHANGED;
		DBG_OPT_RAR(prev_load, load);
		return changed;
	}

562
	return NO_CHANGES;
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593

}

static bool try_update_ptr_CopyB(track_load_env_t *env, ir_node *copyb)
{
	ir_node *copyb_dst = get_CopyB_dst(copyb);
	base_offset_t dst_base_offset;
	get_base_and_offset(copyb_dst, &dst_base_offset);
	if (dst_base_offset.base != env->base_offset.base)
		return false;

	/* see if bytes loaded are fully contained in the CopyB */
	if (env->base_offset.offset < dst_base_offset.offset)
		return false;

	ir_type *copyb_type = get_CopyB_type(copyb);
	long     n_copy     = get_type_size_bytes(copyb_type);
	ir_node *copyb_src  = get_CopyB_src(copyb);
	base_offset_t src_base_offset;
	get_base_and_offset(copyb_src, &src_base_offset);

	long     delta     = env->base_offset.offset - dst_base_offset.offset;
	ir_node *load      = env->load;
	ir_mode *load_mode = get_Load_mode(load);
	long     load_size = get_mode_size_bytes(load_mode);
	if (delta + load_size > n_copy)
		return false;

	/* track src input */
	env->base_offset.base   = src_base_offset.base;
	env->base_offset.offset = src_base_offset.offset + delta;
594
595
596
597
598

	/*
	 * Everything is OK, we can replace
	 *   ptr = (load_base_ptr + load_offset)
	 * with
599
	 *   new_load_ptr = (src_base_ptr + delta)
600
601
602
	 */
	ir_graph *irg          = get_irn_irg(load);
	ir_node  *block        = get_nodes_block(load);
603
	ir_mode  *mode_ref     = get_irn_mode(src_base_offset.base);
604
	ir_mode  *mode_ref_int = get_reference_mode_unsigned_eq(mode_ref);
605
606
607
608
609
	ir_node  *cnst         = new_r_Const_long(irg, mode_ref_int,
	                                          src_base_offset.offset + delta);
	ir_node  *new_load_ptr = new_r_Add(block, src_base_offset.base, cnst, mode_P);
	env->ptr = new_load_ptr;
	return true;
610
611
}

612
613
614
615
616
617
618
619
/* Note on terminology:
 * In the following functions, two types are associated with each Load
 * and Store: load_type/store_type are the types of the actual
 * operations (derived from the modes of the IR nodes), whereas
 * load_objt/store_objt are the types of the objects in memory which are
 * accessed by the Load/Store nodes.
 */

620
621
622
623
/**
 * Follow the memory chain as long as there are only Loads,
 * alias free Stores, and constant Calls and try to replace the
 * current Load by a previous ones.
Michael Beck's avatar
Michael Beck committed
624
625
626
627
628
629
 * Note that in unreachable loops it might happen that we reach
 * load again, as well as we can fall into a cycle.
 * We break such cycles using a special visited flag.
 *
 * INC_MASTER() must be called before dive into
 */
630
static changes_t follow_load_mem_chain(track_load_env_t *env, ir_node *start)
631
{
632
	ir_node *load      = env->load;
Matthias Braun's avatar
Matthias Braun committed
633
634
	ir_type *load_type = get_Load_type(load);
	unsigned load_size = get_mode_size_bytes(get_Load_mode(load));
635

636
637
638
639
	ir_node  *node = start;
	changes_t res  = NO_CHANGES;
	for (;;) {
		ldst_info_t *node_info = (ldst_info_t*)get_irn_link(node);
Michael Beck's avatar
Michael Beck committed
640

641
642
643
		if (is_Store(node)) {
			/* first try load-after-store */
			changes_t changes = try_load_after_store(env, node);
Matthias Braun's avatar
Matthias Braun committed
644
			if (changes != NO_CHANGES)
645
				return changes | res;
646

647
			/* check if we can pass through this store */
648
649
			const ir_node *ptr        = get_Store_ptr(node);
			const ir_node *value      = get_Store_value(node);
Matthias Braun's avatar
Matthias Braun committed
650
651
			const ir_type *store_type = get_Store_type(node);
			unsigned       store_size = get_mode_size_bytes(get_irn_mode(value));
652
			ir_alias_relation rel = get_alias_relation(
Matthias Braun's avatar
Matthias Braun committed
653
654
				ptr, store_type, store_size,
				env->ptr, load_type, load_size);
655
			/* if the might be an alias, we cannot pass this Store */
Michael Beck's avatar
Michael Beck committed
656
			if (rel != ir_no_alias)
657
				break;
658
659
660
661
662
663
664
665
666
			node = skip_Proj(get_Store_mem(node));
		} else if (is_Load(node)) {
			/* try load-after-load */
			changes_t changes = try_load_after_load(env, node);
			if (changes != NO_CHANGES)
				return changes | res;
			/* we can skip any load */
			node = skip_Proj(get_Load_mem(node));
		} else if (is_CopyB(node)) {
667
668
669
670
671
672
673
674
			/*
			 * We cannot replace the Load with another
			 * Load from the CopyB's source directly,
			 * because there may be Stores in between,
			 * destroying the source data. However, we can
			 * use the source address from this point
			 * onwards for further optimizations.
			 */
675
676
677
678
679
680
			bool updated = try_update_ptr_CopyB(env, node);
			if (updated) {
				/* Special case: If new_ptr points to
				 * a constant, we *can* replace the
				 * Load immediately.
				 */
681
				res |= NODES_CREATED;
Matthias Braun's avatar
Matthias Braun committed
682
				ir_mode *load_mode = get_Load_mode(load);
683
684
685
				ir_node *new_value = predict_load(env->ptr, load_mode);
				if (new_value != NULL)
					return replace_load(load, new_value) | res;
686
687
			}

688
			/* check aliasing with the CopyB */
Matthias Braun's avatar
Matthias Braun committed
689
690
691
			ir_node *dst  = get_CopyB_dst(node);
			ir_type *type = get_CopyB_type(node);
			unsigned size = get_type_size_bytes(type);
692
			ir_alias_relation rel = get_alias_relation(
Matthias Braun's avatar
Matthias Braun committed
693
694
				dst, type, size,
				env->ptr, load_type, load_size);
695
696
697
698
			/* possible alias => we cannot continue */
			if (rel != ir_no_alias)
				break;
			node = skip_Proj(get_CopyB_mem(node));
699
700
		} else if (is_irn_const_memory(node)) {
			node = skip_Proj(get_memop_mem(node));
701
		} else {
702
703
			/* be conservative about any other node and assume aliasing
			 * that changes the loaded value */
704
705
706
707
			break;
		}

		/* check for cycles */
708
		if (NODE_VISITED(node_info))
709
			break;
710
		MARK_NODE(node_info);
711
712
	}

713
	if (is_Sync(node)) {
714
		/* handle all Sync predecessors */
715
716
717
718
		foreach_irn_in(node, i, in) {
			ir_node *skipped = skip_Proj(in);
			res |= follow_load_mem_chain(env, skipped);
			if ((res & ~NODES_CREATED) != NO_CHANGES)
Matthias Braun's avatar
Matthias Braun committed
719
				break;
720
721
722
		}
	}
	return res;
723
}
Michael Beck's avatar
Michael Beck committed
724

725
726
ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c)
{
727
728
729
730
	ir_mode  *c_mode = get_irn_mode(c);
	ir_mode  *l_mode = get_Load_mode(load);
	ir_node  *block  = get_nodes_block(load);
	dbg_info *dbgi   = get_irn_dbg_info(load);
731
	ir_node  *res    = duplicate_subgraph(dbgi, c, block);
732
733
734
735
736

	if (c_mode != l_mode) {
		/* check, if the mode matches OR can be easily converted info */
		if (is_reinterpret_cast(c_mode, l_mode)) {
			/* copy the value from the const code irg and cast it */
737
			res = new_rd_Conv(dbgi, block, res, l_mode);
Andreas Zwinkau's avatar
Andreas Zwinkau committed
738
739
		} else {
			return NULL;
740
741
742
		}
	}
	return res;
743
}
744

Michael Beck's avatar
Michael Beck committed
745
746
/**
 * optimize a Load
747
748
 *
 * @param load  the Load node
Michael Beck's avatar
Michael Beck committed
749
 */
Matthias Braun's avatar
Matthias Braun committed
750
static changes_t optimize_load(ir_node *load)
Michael Beck's avatar
Michael Beck committed
751
{
Matthias Braun's avatar
Matthias Braun committed
752
753
	const ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
	changes_t          res  = NO_CHANGES;
754
755
756

	/* do NOT touch volatile loads for now */
	if (get_Load_volatility(load) == volatility_is_volatile)
Matthias Braun's avatar
Matthias Braun committed
757
		return NO_CHANGES;
758
759

	/* the address of the load to be optimized */
Andreas Fried's avatar
Andreas Fried committed
760
	ir_node *ptr = get_Load_ptr(load);
761
762

	/* The mem of the Load. Must still be returned after optimization. */
Andreas Fried's avatar
Andreas Fried committed
763
	ir_node *mem = get_Load_mem(load);
764

765
	if (info->projs[pn_Load_res] == NULL
766
767
	    && info->projs[pn_Load_X_except] == NULL) {
		assert(info->projs[pn_Load_X_regular] == NULL);
768
		/* the value is never used and we don't care about exceptions, remove */
769
		exchange(info->projs[pn_Load_M], mem);
770
		kill_and_reduce_usage(load);
771
772
773
		return res | DF_CHANGED;
	}

774
775
776
777
778
779
780
781
	track_load_env_t env;
	get_base_and_offset(ptr, &env.base_offset);
	env.ptr = ptr;

	/* Check, if the base address of this load is used more than once.
	 * If not, we won't find another store/load/CopyB anyway.
	 * TODO: can we miss values with multiple users in between? */
	if (get_irn_n_edges(ptr) <= 1 && get_irn_n_edges(env.base_offset.base) <= 1)
782
783
784
785
786
787
788
789
790
791
		return res;

	/*
	 * follow the memory chain as long as there are only Loads
	 * and try to replace current Load or Store by a previous one.
	 * Note that in unreachable loops it might happen that we reach
	 * load again, as well as we can fall into a cycle.
	 * We break such cycles using a special visited flag.
	 */
	INC_MASTER();
792
793
	env.load = load;
	res = follow_load_mem_chain(&env, skip_Proj(mem));
794
	return res;
795
}
Michael Beck's avatar
Michael Beck committed
796

797
798
799
/**
 * Check whether small is a part of large (starting at same address).
 */
Matthias Braun's avatar
Matthias Braun committed
800
static bool is_partially_same(ir_node *small, ir_node *large)
801
{
Matthias Braun's avatar
Matthias Braun committed
802
803
	const ir_mode *sm = get_irn_mode(small);
	const ir_mode *lm = get_irn_mode(large);
804
805
806
807
808

	/* FIXME: Check endianness */
	return is_Conv(small) && get_Conv_op(small) == large
	    && get_mode_size_bytes(sm) < get_mode_size_bytes(lm)
	    && get_mode_arithmetic(sm) == irma_twos_complement
809
	    && get_mode_arithmetic(lm) == irma_twos_complement;
810
}
811

Michael Beck's avatar
Michael Beck committed
812
/**
Matthias Braun's avatar
Matthias Braun committed
813
814
 * follow the memory chain as long as there are only Loads and alias free
 * Stores.
Michael Beck's avatar
Michael Beck committed
815
816
 * INC_MASTER() must be called before dive into
 */
Matthias Braun's avatar
Matthias Braun committed
817
static changes_t follow_store_mem_chain(ir_node *store, ir_node *start,
818
                                        bool had_split)
819
{
Matthias Braun's avatar
Matthias Braun committed
820
821
822
823
824
	changes_t    res   = NO_CHANGES;
	ldst_info_t *info  = (ldst_info_t*)get_irn_link(store);
	ir_node     *ptr   = get_Store_ptr(store);
	ir_node     *mem   = get_Store_mem(store);
	ir_node     *value = get_Store_value(store);
Matthias Braun's avatar
Matthias Braun committed
825
826
	ir_type     *type  = get_Store_type(store);
	unsigned     size  = get_mode_size_bytes(get_irn_mode(value));
Matthias Braun's avatar
Matthias Braun committed
827
828
	ir_node     *block = get_nodes_block(store);

Matthias Braun's avatar
Matthias Braun committed
829
830
831
	ir_node *node = start;
	while (node != store) {
		ldst_info_t *node_info = (ldst_info_t*)get_irn_link(node);
832
833
834
835

		/*
		 * BEWARE: one might think that checking the modes is useless, because
		 * if the pointers are identical, they refer to the same object.
Matthias Braun's avatar
Matthias Braun committed
836
837
838
839
840
		 * This is only true in strong typed languages, not is C were the
		 * following is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
		 * However, if the size of the mode that is written is bigger or equal
		 * the size of the old one, the old value is completely overwritten and
		 * can be killed ...
841
		 */
Matthias Braun's avatar
Matthias Braun committed
842
843
		if (is_Store(node) && !had_split && get_Store_ptr(node) == ptr &&
		    get_nodes_block(node) == block) {
844
			/*
Matthias Braun's avatar
Matthias Braun committed
845
			 * a Store after a Store in the same Block -- a write after write.
846
847
848
849
850
851
			 */

			/*
			 * We may remove the first Store, if the old value is completely
			 * overwritten or the old value is a part of the new value,
			 * and if it does not have an exception handler.
852
853
854
			 *
			 * TODO: What, if both have the same exception handler ???
			 */
Matthias Braun's avatar
Matthias Braun committed
855
856
857
			if (get_Store_volatility(node) != volatility_is_volatile
			    && !node_info->projs[pn_Store_X_except]) {
				ir_node *predvalue = get_Store_value(node);
858
				ir_mode *predmode  = get_irn_mode(predvalue);
Matthias Braun's avatar
Matthias Braun committed
859
				unsigned predsize  = get_mode_size_bytes(predmode);
860

Matthias Braun's avatar
Matthias Braun committed
861
				if (predsize <= size || is_partially_same(predvalue, value)) {
Matthias Braun's avatar
Matthias Braun committed
862
863
864
865
866
					DBG_OPT_WAW(node, store);
					DB((dbg, LEVEL_1, "  killing store %+F (override by %+F)\n",
					    node, store));
					exchange(node_info->projs[pn_Store_M], get_Store_mem(node));
					kill_and_reduce_usage(node);
867
868
869
870
871
872
873
874
875
876
877
878
					return DF_CHANGED;
				}
			}

			/*
			 * We may remove the Store, if the old value already contains
			 * the new value, and if it does not have an exception handler.
			 *
			 * TODO: What, if both have the same exception handler ???
			 */
			if (get_Store_volatility(store) != volatility_is_volatile
			        && !info->projs[pn_Store_X_except]) {
Matthias Braun's avatar
Matthias Braun committed
879
				ir_node *predvalue = get_Store_value(node);
880

881
				if (is_partially_same(value, predvalue)) {
Matthias Braun's avatar
Matthias Braun committed
882
883
884
					DBG_OPT_WAW(node, store);
					DB((dbg, LEVEL_1, "  killing store %+F (override by %+F)\n",
					    node, store));
885
					exchange(info->projs[pn_Store_M], mem);
886
					kill_and_reduce_usage(store);
887
888
					return DF_CHANGED;
				}
889
			}
Matthias Braun's avatar
Matthias Braun committed
890
891
		} else if (is_Load(node) && get_Load_ptr(node) == ptr &&
		           value == node_info->projs[pn_Load_res]) {
892
			/*
893
894
895
896
			 * a Store of a value just loaded from the same address
			 * -- a write after read.
			 * We may remove the Store, if it does not have an exception
			 * handler.
897
			 */
Matthias Braun's avatar
Matthias Braun committed
898
			if (!info->projs[pn_Store_X_except]) {
Matthias Braun's avatar
Matthias Braun committed
899
900
901
902
				DBG_OPT_WAR(store, node);
				DB((dbg, LEVEL_1,
				    "  killing store %+F (read %+F from same address)\n",
				    store, node));
903
				exchange(info->projs[pn_Store_M], mem);
904
				kill_and_reduce_usage(store);
905
906
907
908
				return DF_CHANGED;
			}
		}

Matthias Braun's avatar
Matthias Braun committed
909
		if (is_Store(node)) {
Michael Beck's avatar
Michael Beck committed
910
			/* check if we can pass through this store */
Matthias Braun's avatar
Matthias Braun committed
911
912
			ir_node *store_ptr   = get_Store_ptr(node);
			ir_node *store_value = get_Store_value(node);
Matthias Braun's avatar
Matthias Braun committed
913
914
			unsigned store_size  = get_mode_size_bytes(get_irn_mode(store_value));
			ir_type *store_type  = get_Store_type(node);
915
			ir_alias_relation rel = get_alias_relation(
Matthias Braun's avatar
Matthias Braun committed
916
917
				store_ptr, store_type, store_size,
				ptr, type, size);
918
			/* if the might be an alias, we cannot pass this Store */
Michael Beck's avatar
Michael Beck committed
919
			if (rel != ir_no_alias)
920
				break;
Matthias Braun's avatar
Matthias Braun committed
921
922
923
			node = skip_Proj(get_Store_mem(node));
		} else if (is_Load(node)) {
			ir_node *load_ptr  = get_Load_ptr(node);
Matthias Braun's avatar
Matthias Braun committed
924
925
			ir_type *load_type = get_Load_type(node);
			unsigned load_size = get_mode_size_bytes(get_Load_mode(node));
926
			ir_alias_relation rel = get_alias_relation(
Matthias Braun's avatar
Matthias Braun committed
927
928
				load_ptr, load_type, load_size,
				ptr, type, size);
Michael Beck's avatar
Michael Beck committed
929
			if (rel != ir_no_alias)
930
931
				break;

Matthias Braun's avatar
Matthias Braun committed
932
933
934
935
			node = skip_Proj(get_Load_mem(node));
		} else if (is_CopyB(node)) {
			ir_node *copyb_src  = get_CopyB_src(node);
			ir_type *copyb_type = get_CopyB_type(node);
Matthias Braun's avatar
Matthias Braun committed
936
			unsigned copyb_size = get_type_size_bytes(copyb_type);
937
			ir_alias_relation src_rel = get_alias_relation(
Matthias Braun's avatar
Matthias Braun committed
938
939
				copyb_src, copyb_type, copyb_size,
				ptr, type, size);
Matthias Braun's avatar
Matthias Braun committed
940
941
942
			if (src_rel != ir_no_alias)
				break;
			ir_node *copyb_dst = get_CopyB_dst(node);
943
			ir_alias_relation dst_rel = get_alias_relation(
Matthias Braun's avatar
Matthias Braun committed
944
945
				copyb_dst, copyb_type, copyb_size,
				ptr, type, size);
Matthias Braun's avatar
Matthias Braun committed
946
			if (dst_rel != ir_no_alias)
947
				break;
948
949
950
951
952
953
		} else {
			/* follow only Load chains */
			break;
		}

		/* check for cycles */
Matthias Braun's avatar
Matthias Braun committed
954
		if (NODE_VISITED(node_info))
955
			break;
Matthias Braun's avatar
Matthias Braun committed
956
		MARK_NODE(node_info);
957
958
	}

Matthias Braun's avatar
Matthias Braun committed
959
	if (is_Sync(node)) {
960
		/* handle all Sync predecessors */
Matthias Braun's avatar
Matthias Braun committed
961
962
963
964
		foreach_irn_in(node, i, in) {
			ir_node *skipped = skip_Proj(in);
			res |= follow_store_mem_chain(store, skipped, true);
			if (res != NO_CHANGES)
965
966
967
968
				break;
		}
	}
	return res;
969
}
Michael Beck's avatar
Michael Beck committed
970

971
972
static ir_entity *find_entity(ir_node *ptr)
{
973
	switch (get_irn_opcode(ptr)) {
974
975
976
977
978
	case iro_Address:
		return get_Address_entity(ptr);

	case iro_Offset:
		return get_Offset_entity(ptr);
979