ldstopt.c 62.2 KB
Newer Older
Christian Würdig's avatar
Christian Würdig committed
1
2
/*
 * This file is part of libFirm.
3
 * Copyright (C) 2012 University of Karlsruhe.
Christian Würdig's avatar
Christian Würdig committed
4
5
 */

Michael Beck's avatar
Michael Beck committed
6
7
8
9
/**
 * @file
 * @brief   Load/Store optimizations.
 * @author  Michael Beck
Michael Beck's avatar
Michael Beck committed
10
 */
11
#include <string.h>
Michael Beck's avatar
Michael Beck committed
12

Matthias Braun's avatar
Matthias Braun committed
13
14
15
16
#include "array.h"
#include "be.h"
#include "dbginfo_t.h"
#include "debug.h"
17
#include "entity_t.h"
Matthias Braun's avatar
Matthias Braun committed
18
#include "panic.h"
19
#include "ircons_t.h"
Matthias Braun's avatar
Matthias Braun committed
20
21
#include "iredges.h"
#include "irflag_t.h"
22
#include "irgmod.h"
Matthias Braun's avatar
Matthias Braun committed
23
24
#include "irgopt.h"
#include "irgraph_t.h"
25
26
#include "irgwalk.h"
#include "irhooks.h"
27
#include "irmemory.h"
Matthias Braun's avatar
Matthias Braun committed
28
29
#include "irmode_t.h"
#include "irnode_t.h"
30
#include "irnodehashmap.h"
Matthias Braun's avatar
Matthias Braun committed
31
32
33
34
#include "iropt_dbg.h"
#include "iropt_t.h"
#include "iroptimize.h"
#include "irtools.h"
35
#include "set.h"
Matthias Braun's avatar
Matthias Braun committed
36
37
#include "tv_t.h"
#include "type_t.h"
38
#include "util.h"
39
40
41

/** The debug handle. */
DEBUG_ONLY(static firm_dbg_module_t *dbg;)
42

43
#define MAX_PROJ MAX(MAX((long)pn_Load_max, (long)pn_Store_max), (long)pn_Call_max)
Michael Beck's avatar
Michael Beck committed
44

Matthias Braun's avatar
Matthias Braun committed
45
46
typedef enum changes_t {
	NO_CHANGES = 0,
47
48
49
50
51
	DF_CHANGED = (1 << 0), /**< data flow changed */
	CF_CHANGED = (1 << 1), /**< control flow changed */
	/** nodes have been created but are not reachable. This is a bad hack
	 * try hard to avoid it! */
	NODES_CREATED = (1 << 2),
Matthias Braun's avatar
Matthias Braun committed
52
} changes_t;
53

Michael Beck's avatar
Michael Beck committed
54
55
56
/**
 * walker environment
 */
57
typedef struct walk_env_t {
Matthias Braun's avatar
Matthias Braun committed
58
59
	struct obstack obst;    /**< list of all stores */
	changes_t      changes; /**< a bitmask of graph changes */
Michael Beck's avatar
Michael Beck committed
60
61
} walk_env_t;

62
/** A Load/Store info. */
63
typedef struct ldst_info_t {
Matthias Braun's avatar
Matthias Braun committed
64
65
66
67
	ir_node  *projs[MAX_PROJ+1]; /**< list of Proj's of this node */
	ir_node  *exc_block;         /**< the exception block if available */
	int      exc_idx;            /**< predecessor index in exception block */
	unsigned visited;            /**< visited counter for breaking loops */
Michael Beck's avatar
Michael Beck committed
68
69
} ldst_info_t;

70
71
72
73
74
75
76
77
78
79
80
81
typedef struct base_offset_t {
	ir_node *base;
	long     offset;
} base_offset_t;

typedef struct track_load_env_t {
	ir_node      *load;
	base_offset_t base_offset;
	ir_node      *ptr; /* deprecated: alternative representation of
	                      base_offset */
} track_load_env_t;

82
/**
83
 * flags for control flow.
84
 */
Matthias Braun's avatar
Matthias Braun committed
85
86
87
88
typedef enum block_flags_t {
	BLOCK_HAS_COND = (1 << 0), /**< Block has conditional control flow */
	BLOCK_HAS_EXC  = (1 << 1), /**< Block has exceptional control flow */
} block_flags_t;
89
90

/**
91
 * a Block info.
92
 */
93
typedef struct block_info_t {
Matthias Braun's avatar
Matthias Braun committed
94
	block_flags_t flags;  /**< flags for the block */
95
96
} block_info_t;

97
/** the master visited flag for loop detection. */
Matthias Braun's avatar
Matthias Braun committed
98
static unsigned master_visited;
99
100
101
102
103

#define INC_MASTER()       ++master_visited
#define MARK_NODE(info)    (info)->visited = master_visited
#define NODE_VISITED(info) (info)->visited >= master_visited

Michael Beck's avatar
Michael Beck committed
104
105
106
/**
 * get the Load/Store info of a node
 */
107
108
static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst)
{
109
	ldst_info_t *info = (ldst_info_t*)get_irn_link(node);
Matthias Braun's avatar
Matthias Braun committed
110
	if (info == NULL) {
111
		info = OALLOCZ(obst, ldst_info_t);
112
113
114
		set_irn_link(node, info);
	}
	return info;
115
}
Michael Beck's avatar
Michael Beck committed
116

117
118
119
/**
 * get the Block info of a node
 */
120
121
static block_info_t *get_block_info(ir_node *node, struct obstack *obst)
{
122
	block_info_t *info = (block_info_t*)get_irn_link(node);
Matthias Braun's avatar
Matthias Braun committed
123
	if (info == NULL) {
124
		info = OALLOCZ(obst, block_info_t);
125
126
127
		set_irn_link(node, info);
	}
	return info;
128
}
129

Michael Beck's avatar
Michael Beck committed
130
/**
Michael Beck's avatar
Michael Beck committed
131
 * update the projection info for a Load/Store
Michael Beck's avatar
Michael Beck committed
132
 */
Matthias Braun's avatar
Matthias Braun committed
133
static changes_t update_projs(ldst_info_t *info, ir_node *proj)
Michael Beck's avatar
Michael Beck committed
134
{
135
	long nr = get_Proj_proj(proj);
Matthias Braun's avatar
Matthias Braun committed
136
	assert(0 <= nr && nr <= MAX_PROJ);
Michael Beck's avatar
Michael Beck committed
137

Matthias Braun's avatar
Matthias Braun committed
138
	if (info->projs[nr] != NULL) {
139
140
141
		/* there is already one, do CSE */
		exchange(proj, info->projs[nr]);
		return DF_CHANGED;
Matthias Braun's avatar
Matthias Braun committed
142
	} else {
143
		info->projs[nr] = proj;
Matthias Braun's avatar
Matthias Braun committed
144
		return NO_CHANGES;
145
	}
146
}
Michael Beck's avatar
Michael Beck committed
147
148

/**
149
150
151
152
153
 * update the exception block info for a Load/Store node.
 *
 * @param info   the load/store info struct
 * @param block  the exception handler block for this load/store
 * @param pos    the control flow input of the block
Michael Beck's avatar
Michael Beck committed
154
 */
Matthias Braun's avatar
Matthias Braun committed
155
static void update_exc(ldst_info_t *info, ir_node *block, int pos)
Michael Beck's avatar
Michael Beck committed
156
{
Matthias Braun's avatar
Matthias Braun committed
157
	assert(info->exc_block == NULL);
158
159
	info->exc_block = block;
	info->exc_idx   = pos;
160
}
Michael Beck's avatar
Michael Beck committed
161
162

/**
163
 * walker, collects all Proj/Load/Store/Call/CopyB nodes
164
 *
165
 * walks from Start -> End
Michael Beck's avatar
Michael Beck committed
166
 */
Michael Beck's avatar
Michael Beck committed
167
static void collect_nodes(ir_node *node, void *env)
Michael Beck's avatar
Michael Beck committed
168
{
Matthias Braun's avatar
Matthias Braun committed
169
170
	walk_env_t *wenv   = (walk_env_t *)env;
	unsigned    opcode = get_irn_opcode(node);
171

172
	if (opcode == iro_Proj) {
Matthias Braun's avatar
Matthias Braun committed
173
		ir_node *pred = get_Proj_pred(node);
174
		opcode = get_irn_opcode(pred);
175

176
		if (opcode == iro_Load || opcode == iro_Store || opcode == iro_Call) {
Andreas Fried's avatar
Andreas Fried committed
177
			ldst_info_t *ldst_info = get_ldst_info(pred, &wenv->obst);
178
179
180
181

			wenv->changes |= update_projs(ldst_info, node);

			/*
182
183
184
185
186
			 * Place the Proj's to the same block as the
			 * predecessor Load. This is always ok and prevents
			 * "non-SSA" form after optimizations if the Proj
			 * is in a wrong block.
			 */
Andreas Fried's avatar
Andreas Fried committed
187
188
			ir_node *blk      = get_nodes_block(node);
			ir_node *pred_blk = get_nodes_block(pred);
189
190
191
192
193
			if (blk != pred_blk) {
				wenv->changes |= DF_CHANGED;
				set_nodes_block(node, pred_blk);
			}
		}
194
	} else if (opcode == iro_Block) {
Matthias Braun's avatar
Matthias Braun committed
195
196
		for (int i = get_Block_n_cfgpreds(node); i-- > 0; ) {
			bool     is_exc = false;
Andreas Fried's avatar
Andreas Fried committed
197
198
			ir_node *proj   = get_Block_cfgpred(node, i);
			ir_node *pred   = proj;
199
200
201
202
203

			/* ignore Bad predecessors, they will be removed later */
			if (is_Bad(pred))
				continue;

Matthias Braun's avatar
Matthias Braun committed
204
205
206
207
			if (is_Proj(proj)) {
				pred   = get_Proj_pred(proj);
				is_exc = is_x_except_Proj(proj);
			}
Andreas Fried's avatar
Andreas Fried committed
208
209
			ir_node      *pred_block = get_nodes_block(pred);
			block_info_t *bl_info    = get_block_info(pred_block, &wenv->obst);
210

211
			if (is_fragile_op(pred) && is_exc)
212
213
214
215
				bl_info->flags |= BLOCK_HAS_EXC;
			else if (is_irn_forking(pred))
				bl_info->flags |= BLOCK_HAS_COND;

216
			opcode = get_irn_opcode(pred);
Matthias Braun's avatar
Matthias Braun committed
217
218
			if (is_exc && (opcode == iro_Load || opcode == iro_Store
			               || opcode == iro_Call)) {
Andreas Fried's avatar
Andreas Fried committed
219
				ldst_info_t *ldst_info = get_ldst_info(pred, &wenv->obst);
220

Matthias Braun's avatar
Matthias Braun committed
221
				update_exc(ldst_info, node, i);
222
223
			}
		}
224
225
	} else if (is_memop(node)) {
		/* Just initialize a ldst_info */
226
		(void) get_ldst_info(node, &wenv->obst);
227
	}
228
}
Michael Beck's avatar
Michael Beck committed
229

230
/* forward */
231
static void reduce_node_usage(ir_node *ptr);
232
233

/**
Christoph Mallon's avatar
Christoph Mallon committed
234
 * Update a Load that may have lost its users.
235
 */
236
237
static void handle_load_update(ir_node *load)
{
238
239
240
	/* do NOT touch volatile loads for now */
	if (get_Load_volatility(load) == volatility_is_volatile)
		return;
241

Matthias Braun's avatar
Matthias Braun committed
242
243
	ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
	if (!info->projs[pn_Load_res] && !info->projs[pn_Load_X_except]) {
244
245
		ir_node *ptr = get_Load_ptr(load);
		ir_node *mem = get_Load_mem(load);
246

Christoph Mallon's avatar
Christoph Mallon committed
247
		/* a Load whose value is neither used nor exception checked, remove it */
Andreas Zwinkau's avatar
Andreas Zwinkau committed
248
		exchange(info->projs[pn_Load_M], mem);
Matthias Braun's avatar
Matthias Braun committed
249
250
251
252
		if (info->projs[pn_Load_X_regular]) {
			ir_node *jmp = new_r_Jmp(get_nodes_block(load));
			exchange(info->projs[pn_Load_X_regular], jmp);
		}
253
		kill_node(load);
254
		reduce_node_usage(ptr);
255
	}
256
}
257
258

/**
259
 * A use of a node has vanished. Check if this was a Proj
260
261
 * node and update the counters.
 */
262
static void reduce_node_usage(ir_node *ptr)
263
{
264
265
266
267
	if (!is_Proj(ptr))
		return;
	if (get_irn_n_edges(ptr) > 0)
		return;
268

269
	/* this Proj is dead now */
Matthias Braun's avatar
Matthias Braun committed
270
	ir_node *pred = get_Proj_pred(ptr);
271
	if (is_Load(pred)) {
272
		ldst_info_t *info = (ldst_info_t*)get_irn_link(pred);
273
		info->projs[get_Proj_proj(ptr)] = NULL;
274

275
276
		/* this node lost its result proj, handle that */
		handle_load_update(pred);
277
	}
278
}
279

280
281
282
283
/**
 * Kill a Load or Store and all other nodes which are not needed after
 * it has been killed.
 */
Matthias Braun's avatar
Matthias Braun committed
284
285
static void kill_and_reduce_usage(ir_node *node)
{
286
	ir_node *ptr;
Matthias Braun's avatar
Matthias Braun committed
287
	ir_node *value;
yb9976's avatar
yb9976 committed
288
	switch (get_irn_opcode(node)) {
289
	case iro_Load:
Matthias Braun's avatar
Matthias Braun committed
290
291
		ptr   = get_Load_ptr(node);
		value = NULL;
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
		break;
	case iro_Store:
		ptr   = get_Store_ptr(node);
		value = get_Store_value(node);
		break;
	default:
		panic("Cannot handle node %+F", node);
	}

	kill_node(node);
	reduce_node_usage(ptr);
	if (value != NULL) {
		reduce_node_usage(value);
	}
}

308
static void get_base_and_offset(ir_node *ptr, base_offset_t *base_offset)
309
{
310
311
312
313
	/* TODO: long might not be enough, we should probably use some tarval
	 * thingy, or at least detect long overflows and abort */
	long     offset = 0;
	ir_mode *mode   = get_irn_mode(ptr);
Michael Beck's avatar
Michael Beck committed
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
	for (;;) {
		if (is_Add(ptr)) {
			ir_node *l = get_Add_left(ptr);
			ir_node *r = get_Add_right(ptr);

			if (get_irn_mode(l) != mode || !is_Const(r))
				break;

			offset += get_tarval_long(get_Const_tarval(r));
			ptr     = l;
		} else if (is_Sub(ptr)) {
			ir_node *l = get_Sub_left(ptr);
			ir_node *r = get_Sub_right(ptr);

			if (get_irn_mode(l) != mode || !is_Const(r))
				break;

			offset -= get_tarval_long(get_Const_tarval(r));
			ptr     = l;
		} else if (is_Sel(ptr)) {
334
335
336
337
338
339
340
341
342
343
344
345
			ir_node *index = get_Sel_index(ptr);
			if (!is_Const(index))
				break;

			ir_type *type         = get_Sel_type(ptr);
			ir_type *element_type = get_array_element_type(type);
			if (get_type_state(element_type) != layout_fixed)
				break;

			/* TODO: may overflow here */
			int size = get_type_size_bytes(element_type);
			offset += size * get_tarval_long(get_Const_tarval(index));
Michael Beck's avatar
Michael Beck committed
346
			ptr = get_Sel_ptr(ptr);
347
348
349
350
351
352
353
		} else if (is_Member(ptr)) {
			ir_entity *entity = get_Member_entity(ptr);
			ir_type   *owner  = get_entity_owner(entity);
			if (get_type_state(owner) != layout_fixed)
				break;
			offset += get_entity_offset(entity);
			ptr = get_Member_ptr(ptr);
Michael Beck's avatar
Michael Beck committed
354
		} else
355
356
357
			break;
	}

358
359
	base_offset->offset = offset;
	base_offset->base   = ptr;
360
361
}

362
363
364
365
366
367
368
369
370
371
/**
 * This is called for load-after-load and load-after-store.
 * If possible the value of the previous load/store is transformed in a way
 * so the 2nd load can be left out/replaced by arithmetic on the previous
 * value.
 */
static ir_node *transform_previous_value(ir_mode *const load_mode,
	const base_offset_t *const load_bo, ir_mode *const prev_mode,
	const base_offset_t *const prev_bo, ir_node *const prev_value,
	ir_node *const block)
372
{
373
374
	if (load_bo->base != prev_bo->base)
		return NULL;
375

376
377
378
379
380
381
382
383
	/* ensure the load value is completely contained in the previous one */
	long delta = load_bo->offset - prev_bo->offset;
	if (delta < 0)
		return NULL;
	long load_mode_len = get_mode_size_bytes(load_mode);
	long prev_mode_len = get_mode_size_bytes(prev_mode);
	if (delta+load_mode_len > prev_mode_len)
		return NULL;
384

385
386
387
388
389
	/* simple case: previous value has the same mode */
	if (load_mode == prev_mode)
		return prev_value;

	/* two complement values can be transformed with bitops */
390
391
392
393
	ir_mode_arithmetic prev_arithmetic = get_mode_arithmetic(prev_mode);
	ir_mode_arithmetic load_arithmetic = get_mode_arithmetic(load_mode);
	if (prev_arithmetic == irma_twos_complement &&
		load_arithmetic == irma_twos_complement) {
394
395
396
397
398
399
400
401
402
		/* produce a shift to adjust offset delta */
		unsigned const shift = be_get_backend_param()->byte_order_big_endian
			? prev_mode_len - load_mode_len - delta
			: delta;
		ir_node *new_value = prev_value;
		if (shift != 0) {
			ir_graph *const irg   = get_Block_irg(block);
			ir_node  *const cnst  = new_r_Const_long(irg, mode_Iu, shift * 8);
			new_value = new_r_Shr(block, new_value, cnst, prev_mode);
403
		}
404
405

		return new_r_Conv(block, new_value, load_mode);
406
407
408
	} else if(prev_arithmetic != load_arithmetic
	          && load_mode_len == prev_mode_len) {
		return new_r_Bitcast(block, prev_value, load_mode);
409
410
	}

411
412
413
	/* we would need some kind of bitcast to handle non two complement values */
	return NULL;
}
Michael Beck's avatar
Michael Beck committed
414

415
416
static changes_t replace_load(ir_node *load, ir_node *new_value)
{
Matthias Braun's avatar
Matthias Braun committed
417
	const ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
418
419
420
	if (info->projs[pn_Load_M])
		exchange(info->projs[pn_Load_M], get_Load_mem(load));

Matthias Braun's avatar
Matthias Braun committed
421
	changes_t res = NO_CHANGES;
422
	/* no exception */
423
	if (info->projs[pn_Load_X_except] != NULL) {
424
		ir_graph *irg = get_irn_irg(load);
Matthias Braun's avatar
Matthias Braun committed
425
426
		ir_node  *bad = new_r_Bad(irg, mode_X);
		exchange(info->projs[pn_Load_X_except], bad);
427
		res |= CF_CHANGED;
428
429

		assert(info->projs[pn_Load_X_regular] != NULL);
Matthias Braun's avatar
Matthias Braun committed
430
431
		ir_node *jmp = new_r_Jmp(get_nodes_block(load));
		exchange(info->projs[pn_Load_X_regular], jmp);
432
433
	}

434
435
436
	/* loads without user should already be optimized away */
	assert(info->projs[pn_Load_res] != NULL);
	exchange(info->projs[pn_Load_res], new_value);
437

438
	kill_and_reduce_usage(load);
439
440
441
	return res | DF_CHANGED;
}

442
443
444
445
446
/**
 * returns false if op cannot be reached through the X_regular proj of
 * @p prev_op or if there are no exceptions possible.
 */
static bool on_regular_path(ir_node *op, ir_node *prev_op)
447
{
448
449
450
451
452
	/* TODO: create a real test, for now we just make sure the previous node
	 * does not throw an exception. */
	(void)op;
	return !is_fragile_op(prev_op) || !ir_throws_exception(prev_op);
}
453

454
455
456
457
458
static changes_t try_load_after_store(track_load_env_t *env, ir_node *store)
{
	ir_node *const load = env->load;
	if (!on_regular_path(load, store))
		return NO_CHANGES;
459

460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
	ir_node      *store_ptr = get_Store_ptr(store);
	base_offset_t base_offset;
	get_base_and_offset(store_ptr, &base_offset);

	ir_mode *const load_mode   = get_Load_mode(load);
	ir_mode *const store_mode  = get_irn_mode(get_Store_value(store));
	ir_node *const store_value = get_Store_value(store);
	ir_node *const block       = get_nodes_block(load);

	/* load value completely contained in previsou store? */
	ir_node *const new_value
		= transform_previous_value(load_mode, &env->base_offset, store_mode,
		                           &base_offset, store_value, block);
	if (new_value == NULL)
		return NO_CHANGES;
475

476
477
478
	DBG_OPT_RAW(load, new_value);
	return replace_load(load, new_value);
}
479

480
481
482
483
484
static changes_t try_load_after_load(track_load_env_t *env, ir_node *prev_load)
{
	ir_node *const load = env->load;
	if (!on_regular_path(load, prev_load))
		return NO_CHANGES;
485

Matthias Braun's avatar
Matthias Braun committed
486
	const ldst_info_t *info = (ldst_info_t*)get_irn_link(prev_load);
487
488
489
490
	ir_node *const prev_value = info->projs[pn_Load_res];
	/* the other load is unused and will get removed later anyway */
	if (prev_value == NULL)
		return NO_CHANGES;
491

492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
	ir_node *const prev_ptr = get_Load_ptr(prev_load);
	base_offset_t base_offset;
	get_base_and_offset(prev_ptr, &base_offset);
	ir_mode *const load_mode = get_Load_mode(load);
	ir_mode *const prev_mode = get_Load_mode(prev_load);
	ir_node *const block     = get_nodes_block(load);

	/* load value completely contained in previous load? */
	ir_node *const new_value
		= transform_previous_value(load_mode, &env->base_offset, prev_mode,
		                           &base_offset, prev_value, block);
	if (new_value == NULL)
		return NO_CHANGES;

	DBG_OPT_RAR(prev_load, load);
	return replace_load(load, new_value);
}

static bool try_update_ptr_CopyB(track_load_env_t *env, ir_node *copyb)
{
	ir_node *copyb_dst = get_CopyB_dst(copyb);
	base_offset_t dst_base_offset;
	get_base_and_offset(copyb_dst, &dst_base_offset);
	if (dst_base_offset.base != env->base_offset.base)
		return false;

	/* see if bytes loaded are fully contained in the CopyB */
	if (env->base_offset.offset < dst_base_offset.offset)
		return false;

	ir_type *copyb_type = get_CopyB_type(copyb);
	long     n_copy     = get_type_size_bytes(copyb_type);
	ir_node *copyb_src  = get_CopyB_src(copyb);
	base_offset_t src_base_offset;
	get_base_and_offset(copyb_src, &src_base_offset);

	long     delta     = env->base_offset.offset - dst_base_offset.offset;
	ir_node *load      = env->load;
	ir_mode *load_mode = get_Load_mode(load);
	long     load_size = get_mode_size_bytes(load_mode);
	if (delta + load_size > n_copy)
		return false;

	/* track src input */
	env->base_offset.base   = src_base_offset.base;
	env->base_offset.offset = src_base_offset.offset + delta;
538
539
540
541
542

	/*
	 * Everything is OK, we can replace
	 *   ptr = (load_base_ptr + load_offset)
	 * with
543
	 *   new_load_ptr = (src_base_ptr + delta)
544
545
546
	 */
	ir_graph *irg          = get_irn_irg(load);
	ir_node  *block        = get_nodes_block(load);
547
	ir_mode  *mode_ref     = get_irn_mode(src_base_offset.base);
548
	ir_mode  *mode_ref_int = get_reference_mode_unsigned_eq(mode_ref);
549
550
551
552
553
	ir_node  *cnst         = new_r_Const_long(irg, mode_ref_int,
	                                          src_base_offset.offset + delta);
	ir_node  *new_load_ptr = new_r_Add(block, src_base_offset.base, cnst, mode_P);
	env->ptr = new_load_ptr;
	return true;
554
555
}

556
557
558
559
/**
 * Follow the memory chain as long as there are only Loads,
 * alias free Stores, and constant Calls and try to replace the
 * current Load by a previous ones.
Michael Beck's avatar
Michael Beck committed
560
561
562
563
564
565
 * Note that in unreachable loops it might happen that we reach
 * load again, as well as we can fall into a cycle.
 * We break such cycles using a special visited flag.
 *
 * INC_MASTER() must be called before dive into
 */
566
static changes_t follow_load_mem_chain(track_load_env_t *env, ir_node *start)
567
{
568
569
	ir_node *load      = env->load;
	ir_mode *load_mode = get_Load_mode(load);
570

571
572
573
574
	ir_node  *node = start;
	changes_t res  = NO_CHANGES;
	for (;;) {
		ldst_info_t *node_info = (ldst_info_t*)get_irn_link(node);
Michael Beck's avatar
Michael Beck committed
575

576
577
578
		if (is_Store(node)) {
			/* first try load-after-store */
			changes_t changes = try_load_after_store(env, node);
Matthias Braun's avatar
Matthias Braun committed
579
			if (changes != NO_CHANGES)
580
				return changes | res;
581

582
			/* check if we can pass through this store */
583
584
585
586
587
588
589
			const ir_node *ptr       = get_Store_ptr(node);
			const ir_node *value     = get_Store_value(node);
			const ir_mode *mode      = get_irn_mode(value);
			const ir_type *type      = get_type_for_mode(mode);
			const ir_type *load_type = get_type_for_mode(load_mode);
			ir_alias_relation rel = get_alias_relation(ptr, type, env->ptr,
			                                           load_type);
590
			/* if the might be an alias, we cannot pass this Store */
Michael Beck's avatar
Michael Beck committed
591
			if (rel != ir_no_alias)
592
				break;
593
594
595
596
597
598
599
600
601
			node = skip_Proj(get_Store_mem(node));
		} else if (is_Load(node)) {
			/* try load-after-load */
			changes_t changes = try_load_after_load(env, node);
			if (changes != NO_CHANGES)
				return changes | res;
			/* we can skip any load */
			node = skip_Proj(get_Load_mem(node));
		} else if (is_CopyB(node)) {
602
603
604
605
606
607
608
609
			/*
			 * We cannot replace the Load with another
			 * Load from the CopyB's source directly,
			 * because there may be Stores in between,
			 * destroying the source data. However, we can
			 * use the source address from this point
			 * onwards for further optimizations.
			 */
610
611
612
613
614
615
			bool updated = try_update_ptr_CopyB(env, node);
			if (updated) {
				/* Special case: If new_ptr points to
				 * a constant, we *can* replace the
				 * Load immediately.
				 */
616
				res |= NODES_CREATED;
617
618
619
				ir_node *new_value = predict_load(env->ptr, load_mode);
				if (new_value != NULL)
					return replace_load(load, new_value) | res;
620
621
			}

622
623
624
625
626
627
628
629
630
631
			/* check aliasing with the CopyB */
			ir_node *dst       = get_CopyB_dst(node);
			ir_type *type      = get_CopyB_type(node);
			ir_type *load_type = get_type_for_mode(load_mode);
			ir_alias_relation rel = get_alias_relation(dst, type, env->ptr,
			                                           load_type);
			/* possible alias => we cannot continue */
			if (rel != ir_no_alias)
				break;
			node = skip_Proj(get_CopyB_mem(node));
632
633
		} else if (is_irn_const_memory(node)) {
			node = skip_Proj(get_memop_mem(node));
634
		} else {
635
636
			/* be conservative about any other node and assume aliasing
			 * that changes the loaded value */
637
638
639
640
			break;
		}

		/* check for cycles */
641
		if (NODE_VISITED(node_info))
642
			break;
643
		MARK_NODE(node_info);
644
645
	}

646
	if (is_Sync(node)) {
647
		/* handle all Sync predecessors */
648
649
650
651
		foreach_irn_in(node, i, in) {
			ir_node *skipped = skip_Proj(in);
			res |= follow_load_mem_chain(env, skipped);
			if ((res & ~NODES_CREATED) != NO_CHANGES)
Matthias Braun's avatar
Matthias Braun committed
652
				break;
653
654
655
		}
	}
	return res;
656
}
Michael Beck's avatar
Michael Beck committed
657

658
659
ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c)
{
660
661
662
663
	ir_mode  *c_mode = get_irn_mode(c);
	ir_mode  *l_mode = get_Load_mode(load);
	ir_node  *block  = get_nodes_block(load);
	dbg_info *dbgi   = get_irn_dbg_info(load);
664
	ir_node  *res    = duplicate_subgraph(dbgi, c, block);
665
666
667
668
669

	if (c_mode != l_mode) {
		/* check, if the mode matches OR can be easily converted info */
		if (is_reinterpret_cast(c_mode, l_mode)) {
			/* copy the value from the const code irg and cast it */
670
			res = new_rd_Conv(dbgi, block, res, l_mode);
Andreas Zwinkau's avatar
Andreas Zwinkau committed
671
672
		} else {
			return NULL;
673
674
675
		}
	}
	return res;
676
}
677

Michael Beck's avatar
Michael Beck committed
678
679
/**
 * optimize a Load
680
681
 *
 * @param load  the Load node
Michael Beck's avatar
Michael Beck committed
682
 */
Matthias Braun's avatar
Matthias Braun committed
683
static changes_t optimize_load(ir_node *load)
Michael Beck's avatar
Michael Beck committed
684
{
Matthias Braun's avatar
Matthias Braun committed
685
686
	const ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
	changes_t          res  = NO_CHANGES;
687
688
689

	/* do NOT touch volatile loads for now */
	if (get_Load_volatility(load) == volatility_is_volatile)
Matthias Braun's avatar
Matthias Braun committed
690
		return NO_CHANGES;
691
692

	/* the address of the load to be optimized */
Andreas Fried's avatar
Andreas Fried committed
693
	ir_node *ptr = get_Load_ptr(load);
694
695

	/* The mem of the Load. Must still be returned after optimization. */
Andreas Fried's avatar
Andreas Fried committed
696
	ir_node *mem = get_Load_mem(load);
697

698
	if (info->projs[pn_Load_res] == NULL
699
700
	    && info->projs[pn_Load_X_except] == NULL) {
		assert(info->projs[pn_Load_X_regular] == NULL);
701
		/* the value is never used and we don't care about exceptions, remove */
702
		exchange(info->projs[pn_Load_M], mem);
703
		kill_and_reduce_usage(load);
704
705
706
		return res | DF_CHANGED;
	}

707
708
709
710
711
712
713
714
	track_load_env_t env;
	get_base_and_offset(ptr, &env.base_offset);
	env.ptr = ptr;

	/* Check, if the base address of this load is used more than once.
	 * If not, we won't find another store/load/CopyB anyway.
	 * TODO: can we miss values with multiple users in between? */
	if (get_irn_n_edges(ptr) <= 1 && get_irn_n_edges(env.base_offset.base) <= 1)
715
716
717
718
719
720
721
722
723
724
		return res;

	/*
	 * follow the memory chain as long as there are only Loads
	 * and try to replace current Load or Store by a previous one.
	 * Note that in unreachable loops it might happen that we reach
	 * load again, as well as we can fall into a cycle.
	 * We break such cycles using a special visited flag.
	 */
	INC_MASTER();
725
726
	env.load = load;
	res = follow_load_mem_chain(&env, skip_Proj(mem));
727
	return res;
728
}
Michael Beck's avatar
Michael Beck committed
729

730
731
732
733
/**
 * Check whether a value of mode new_mode would completely overwrite a value
 * of mode old_mode in memory.
 */
Matthias Braun's avatar
Matthias Braun committed
734
static bool is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
735
736
{
	return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
737
}
738

739
740
741
/**
 * Check whether small is a part of large (starting at same address).
 */
Matthias Braun's avatar
Matthias Braun committed
742
static bool is_partially_same(ir_node *small, ir_node *large)
743
{
Matthias Braun's avatar
Matthias Braun committed
744
745
	const ir_mode *sm = get_irn_mode(small);
	const ir_mode *lm = get_irn_mode(large);
746
747
748
749
750

	/* FIXME: Check endianness */
	return is_Conv(small) && get_Conv_op(small) == large
	    && get_mode_size_bytes(sm) < get_mode_size_bytes(lm)
	    && get_mode_arithmetic(sm) == irma_twos_complement
751
	    && get_mode_arithmetic(lm) == irma_twos_complement;
752
}
753

Michael Beck's avatar
Michael Beck committed
754
/**
Matthias Braun's avatar
Matthias Braun committed
755
756
 * follow the memory chain as long as there are only Loads and alias free
 * Stores.
Michael Beck's avatar
Michael Beck committed
757
758
 * INC_MASTER() must be called before dive into
 */
Matthias Braun's avatar
Matthias Braun committed
759
static changes_t follow_store_mem_chain(ir_node *store, ir_node *start,
760
                                        bool had_split)
761
{
Matthias Braun's avatar
Matthias Braun committed
762
763
764
765
766
767
	changes_t    res   = NO_CHANGES;
	ldst_info_t *info  = (ldst_info_t*)get_irn_link(store);
	ir_node     *ptr   = get_Store_ptr(store);
	ir_node     *mem   = get_Store_mem(store);
	ir_node     *value = get_Store_value(store);
	ir_mode     *mode  = get_irn_mode(value);
Matthias Braun's avatar
Matthias Braun committed
768
	ir_type     *type  = get_type_for_mode(mode);
Matthias Braun's avatar
Matthias Braun committed
769
770
	ir_node     *block = get_nodes_block(store);

Matthias Braun's avatar
Matthias Braun committed
771
772
773
	ir_node *node = start;
	while (node != store) {
		ldst_info_t *node_info = (ldst_info_t*)get_irn_link(node);
774
775
776
777

		/*
		 * BEWARE: one might think that checking the modes is useless, because
		 * if the pointers are identical, they refer to the same object.
Matthias Braun's avatar
Matthias Braun committed
778
779
780
781
782
		 * This is only true in strong typed languages, not is C were the
		 * following is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
		 * However, if the size of the mode that is written is bigger or equal
		 * the size of the old one, the old value is completely overwritten and
		 * can be killed ...
783
		 */
Matthias Braun's avatar
Matthias Braun committed
784
785
		if (is_Store(node) && !had_split && get_Store_ptr(node) == ptr &&
		    get_nodes_block(node) == block) {
786
			/*
Matthias Braun's avatar
Matthias Braun committed
787
			 * a Store after a Store in the same Block -- a write after write.
788
789
790
791
792
793
			 */

			/*
			 * We may remove the first Store, if the old value is completely
			 * overwritten or the old value is a part of the new value,
			 * and if it does not have an exception handler.
794
795
796
			 *
			 * TODO: What, if both have the same exception handler ???
			 */
Matthias Braun's avatar
Matthias Braun committed
797
798
799
			if (get_Store_volatility(node) != volatility_is_volatile
			    && !node_info->projs[pn_Store_X_except]) {
				ir_node *predvalue = get_Store_value(node);
800
801
				ir_mode *predmode  = get_irn_mode(predvalue);

802
				if (is_completely_overwritten(predmode, mode)
Matthias Braun's avatar
Matthias Braun committed
803
804
805
806
807
808
				    || is_partially_same(predvalue, value)) {
					DBG_OPT_WAW(node, store);
					DB((dbg, LEVEL_1, "  killing store %+F (override by %+F)\n",
					    node, store));
					exchange(node_info->projs[pn_Store_M], get_Store_mem(node));
					kill_and_reduce_usage(node);
809
810
811
812
813
814
815
816
817
818
819
820
					return DF_CHANGED;
				}
			}

			/*
			 * We may remove the Store, if the old value already contains
			 * the new value, and if it does not have an exception handler.
			 *
			 * TODO: What, if both have the same exception handler ???
			 */
			if (get_Store_volatility(store) != volatility_is_volatile
			        && !info->projs[pn_Store_X_except]) {
Matthias Braun's avatar
Matthias Braun committed
821
				ir_node *predvalue = get_Store_value(node);
822

823
				if (is_partially_same(value, predvalue)) {
Matthias Braun's avatar
Matthias Braun committed
824
825
826
					DBG_OPT_WAW(node, store);
					DB((dbg, LEVEL_1, "  killing store %+F (override by %+F)\n",
					    node, store));
827
					exchange(info->projs[pn_Store_M], mem);
828
					kill_and_reduce_usage(store);
829
830
					return DF_CHANGED;
				}
831
			}
Matthias Braun's avatar
Matthias Braun committed
832
833
		} else if (is_Load(node) && get_Load_ptr(node) == ptr &&
		           value == node_info->projs[pn_Load_res]) {
834
			/*
835
836
837
838
			 * a Store of a value just loaded from the same address
			 * -- a write after read.
			 * We may remove the Store, if it does not have an exception
			 * handler.
839
			 */
Matthias Braun's avatar
Matthias Braun committed
840
			if (!info->projs[pn_Store_X_except]) {
Matthias Braun's avatar
Matthias Braun committed
841
842
843
844
				DBG_OPT_WAR(store, node);
				DB((dbg, LEVEL_1,
				    "  killing store %+F (read %+F from same address)\n",
				    store, node));
845
				exchange(info->projs[pn_Store_M], mem);
846
				kill_and_reduce_usage(store);
847
848
849
850
				return DF_CHANGED;
			}
		}

Matthias Braun's avatar
Matthias Braun committed
851
		if (is_Store(node)) {
Michael Beck's avatar
Michael Beck committed
852
			/* check if we can pass through this store */
Matthias Braun's avatar
Matthias Braun committed
853
854
855
856
857
			ir_node *store_ptr   = get_Store_ptr(node);
			ir_node *store_value = get_Store_value(node);
			ir_type *store_type  = get_type_for_mode(get_irn_mode(store_value));
			ir_alias_relation rel
				= get_alias_relation(store_ptr, store_type, ptr, type);
858
			/* if the might be an alias, we cannot pass this Store */
Michael Beck's avatar
Michael Beck committed
859
			if (rel != ir_no_alias)
860
				break;
Matthias Braun's avatar
Matthias Braun committed
861
862
863
864
865
866
			node = skip_Proj(get_Store_mem(node));
		} else if (is_Load(node)) {
			ir_node *load_ptr  = get_Load_ptr(node);
			ir_type *load_type = get_type_for_mode(get_Load_mode(node));
			ir_alias_relation rel
				= get_alias_relation(load_ptr, load_type, ptr, type);
Michael Beck's avatar
Michael Beck committed
867
			if (rel != ir_no_alias)
868
869
				break;

Matthias Braun's avatar
Matthias Braun committed
870
871
872
873
874
875
876
877
878
879
880
881
			node = skip_Proj(get_Load_mem(node));
		} else if (is_CopyB(node)) {
			ir_node *copyb_src  = get_CopyB_src(node);
			ir_type *copyb_type = get_CopyB_type(node);
			ir_alias_relation src_rel
				= get_alias_relation(copyb_src, copyb_type, ptr, type);
			if (src_rel != ir_no_alias)
				break;
			ir_node *copyb_dst = get_CopyB_dst(node);
			ir_alias_relation dst_rel
				= get_alias_relation(copyb_dst, copyb_type, ptr, type);
			if (dst_rel != ir_no_alias)
882
				break;
883
884
885
886
887
888
		} else {
			/* follow only Load chains */
			break;
		}

		/* check for cycles */
Matthias Braun's avatar
Matthias Braun committed
889
		if (NODE_VISITED(node_info))
890
			break;
Matthias Braun's avatar
Matthias Braun committed
891
		MARK_NODE(node_info);
892
893
	}

Matthias Braun's avatar
Matthias Braun committed
894
	if (is_Sync(node)) {
895
		/* handle all Sync predecessors */
Matthias Braun's avatar
Matthias Braun committed
896
897
898
899
		foreach_irn_in(node, i, in) {
			ir_node *skipped = skip_Proj(in);
			res |= follow_store_mem_chain(store, skipped, true);
			if (res != NO_CHANGES)
900
901
902
903
				break;
		}
	}
	return res;
904
}
Michael Beck's avatar
Michael Beck committed
905

906
907
static ir_entity *find_entity(ir_node *ptr)
{
908
	switch (get_irn_opcode(ptr)) {
909
910
911
912
913
	case iro_Address:
		return get_Address_entity(ptr);

	case iro_Offset:
		return get_Offset_entity(ptr);
914

915
916
	case iro_Member: {
		ir_node *pred = get_Member_ptr(ptr);
917
		if (get_irg_frame(get_irn_irg(ptr)) == pred)
918
			return get_Member_entity(ptr);
919
920
921
922
923
924
925
926

		return find_entity(pred);
	}
	case iro_Sub:
	case iro_Add: {
		ir_node *left = get_binop_left(ptr);
		if (mode_is_reference(get_irn_mode(left)))
			return find_entity(left);
Matthias Braun's avatar
Matthias Braun committed
927
		ir_node *right = get_binop_right(ptr);
928
929
930
931
932
933
934
935
936
		if (mode_is_reference(get_irn_mode(right)))
			return find_entity(right);
		return NULL;
	}
	default:
		return NULL;
	}
}

Michael Beck's avatar
Michael Beck committed
937
938
/**
 * optimize a Store
939
940
 *
 * @param store  the Store node
Michael Beck's avatar
Michael Beck committed
941
 */
Matthias Braun's avatar
Matthias Braun committed
942
static changes_t optimize_store(ir_node *store)
943
{
944
	if (get_Store_volatility(store) == volatility_is_volatile)
Matthias Braun's avatar
Matthias Braun committed
945
		return NO_CHANGES;
946

947
948
	/* Check, if the address of this Store is used more than once.
	 * If not, this Store cannot be removed in any case. */
Matthias Braun's avatar
Matthias Braun committed
949
	ir_node *ptr = get_Store_ptr(store);
950
	if (get_irn_n_edges(ptr) <= 1)
Matthias Braun's avatar
Matthias Braun committed
951
		return NO_CHANGES;
952

953
	ir_node *mem = get_Store_mem(store);
Michael Beck's avatar
Michael Beck committed
954

955
956
	/* follow the memory chain as long as there are only Loads */
	INC_MASTER();
Michael Beck's avatar
Michael Beck committed
957

958
	return follow_store_mem_chain(store, skip_Proj(mem), false);
959
}
Michael Beck's avatar
Michael Beck committed
960

961
962
963
964
/**
 * Checks whether @c ptr of type @c ptr_type lies completely within an
 * object of type @c struct_type starting at @c struct_ptr;
 */
Matthias Braun's avatar
Matthias Braun committed
965
966
static bool ptr_is_in_struct(ir_node *ptr, ir_type *ptr_type,
                             ir_node *struct_ptr, ir_type *struct_type)
967
{
968
969
970
971
972
	base_offset_t base_offset;
	get_base_and_offset(ptr, &base_offset);
	long      ptr_offset    = base_offset.offset;
	base_offset_t struct_offset;
	get_base_and_offset(struct_ptr, &struct_offset);
973
974
975
	unsigned  ptr_size      = get_type_size_bytes(ptr_type);
	unsigned  struct_size   = get_type_size_bytes(struct_type);

976
977
978
	return base_offset.base == struct_offset.base &&
		ptr_offset >= struct_offset.offset &&
		ptr_offset + ptr_size <= struct_offset.offset + struct_size;
979
980
981
}

/**
982
 * Tries to optimize @c copyb. This function handles the following
983
 * cases:
984
985
986
 * - A previous Store that lies completely within @c copyb's destination
 *   will be deleted, except it modifies the @c copyb's source.
 * - If a previous CopyB writes to @c copyb's source, @c copyb will
987
988
989
 *   read from the previous CopyB's source if possible. Cases where
 *   the CopyB nodes are offset against each other are not handled.
 */
Matthias Braun's avatar
Matthias Braun committed
990
static changes_t follow_copyb_mem_chain(ir_node *copyb, ir_node *start,
991
                                        bool had_split)
992
{
Matthias Braun's avatar
Matthias Braun committed
993
	changes_t res       = NO_CHANGES;
994
995
996
	ir_node  *src       = get_CopyB_src(copyb);
	ir_node  *dst       = get_CopyB_dst(copyb);
	ir_type  *type      = get_CopyB_type(copyb);
997
	unsigned  type_size = get_type_size_bytes(type);
998
	ir_node  *block     = get_nodes_block(copyb);
999

Matthias Braun's avatar
Matthias Braun committed
1000
1001
1002
	ir_node *node = start;
	while (node != copyb) {
		ldst_info_t *node_info = (ldst_info_t*)get_irn_link(node);
Andreas Fried's avatar