ldstopt.c 47.5 KB
Newer Older
Christian Würdig's avatar
Christian Würdig committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
/*
 * Copyright (C) 1995-2007 University of Karlsruhe.  All right reserved.
 *
 * This file is part of libFirm.
 *
 * This file may be distributed and/or modified under the terms of the
 * GNU General Public License version 2 as published by the Free Software
 * Foundation and appearing in the file LICENSE.GPL included in the
 * packaging of this file.
 *
 * Licensees holding valid libFirm Professional Edition licenses may use
 * this file in accordance with the libFirm Commercial License.
 * Agreement provided with the Software.
 *
 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE.
 */

Michael Beck's avatar
Michael Beck committed
20
21
22
23
24
/**
 * @file
 * @brief   Load/Store optimizations.
 * @author  Michael Beck
 * @version $Id$
Michael Beck's avatar
Michael Beck committed
25
 */
Michael Beck's avatar
Michael Beck committed
26
27
28
29
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif

30
#include <string.h>
Michael Beck's avatar
Michael Beck committed
31

32
#include "iroptimize.h"
33
34
35
36
37
38
39
40
41
42
43
44
45
46
#include "irnode_t.h"
#include "irgraph_t.h"
#include "irmode_t.h"
#include "iropt_t.h"
#include "ircons_t.h"
#include "irgmod.h"
#include "irgwalk.h"
#include "irvrfy.h"
#include "tv_t.h"
#include "dbginfo_t.h"
#include "iropt_dbg.h"
#include "irflag_t.h"
#include "array.h"
#include "irhooks.h"
Michael Beck's avatar
BugFix:    
Michael Beck committed
47
#include "iredges.h"
48
49
#include "irtools.h"
#include "opt_polymorphy.h"
50
#include "irmemory.h"
51
#include "xmalloc.h"
52
53
54
55
56
57
#include "irphase_t.h"
#include "irgopt.h"
#include "debug.h"

/** The debug handle. */
DEBUG_ONLY(static firm_dbg_module_t *dbg;)
58
59
60
61

#ifdef DO_CACHEOPT
#include "cacheopt/cachesim.h"
#endif
Michael Beck's avatar
Michael Beck committed
62
63
64
65
66
67

#undef IMAX
#define IMAX(a,b)	((a) > (b) ? (a) : (b))

#define MAX_PROJ	IMAX(pn_Load_max, pn_Store_max)

68
enum changes_t {
69
70
	DF_CHANGED = 1,       /**< data flow changed */
	CF_CHANGED = 2,       /**< control flow changed */
71
72
};

Michael Beck's avatar
Michael Beck committed
73
74
75
76
/**
 * walker environment
 */
typedef struct _walk_env_t {
77
78
	struct obstack obst;          /**< list of all stores */
	unsigned changes;             /**< a bitmask of graph changes */
Michael Beck's avatar
Michael Beck committed
79
80
} walk_env_t;

81
82
83
84
/**
 * flags for Load/Store
 */
enum ldst_flags_t {
85
	LDST_VISITED = 1              /**< if set, this Load/Store is already visited */
86
87
};

88
/** A Load/Store info. */
Michael Beck's avatar
Michael Beck committed
89
typedef struct _ldst_info_t {
90
91
92
93
94
	ir_node  *projs[MAX_PROJ];    /**< list of Proj's of this node */
	ir_node  *exc_block;          /**< the exception block if available */
	int      exc_idx;             /**< predecessor index in the exception block */
	unsigned flags;               /**< flags */
	unsigned visited;             /**< visited counter for breaking loops */
Michael Beck's avatar
Michael Beck committed
95
96
} ldst_info_t;

97
/**
98
 * flags for control flow.
99
100
 */
enum block_flags_t {
101
102
	BLOCK_HAS_COND = 1,      /**< Block has conditional control flow */
	BLOCK_HAS_EXC  = 2       /**< Block has exceptional control flow */
103
104
105
};

/**
106
 * a Block info.
107
108
 */
typedef struct _block_info_t {
109
	unsigned flags;               /**< flags for the block */
110
111
} block_info_t;

112
113
114
115
116
117
118
/** the master visited flag for loop detection. */
static unsigned master_visited = 0;

#define INC_MASTER()       ++master_visited
#define MARK_NODE(info)    (info)->visited = master_visited
#define NODE_VISITED(info) (info)->visited >= master_visited

Michael Beck's avatar
Michael Beck committed
119
120
121
/**
 * get the Load/Store info of a node
 */
122
static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst) {
123
	ldst_info_t *info = get_irn_link(node);
Michael Beck's avatar
Michael Beck committed
124

125
	if (! info) {
126
		info = obstack_alloc(obst, sizeof(*info));
127
128
129
130
131
		memset(info, 0, sizeof(*info));
		set_irn_link(node, info);
	}
	return info;
}  /* get_ldst_info */
Michael Beck's avatar
Michael Beck committed
132

133
134
135
/**
 * get the Block info of a node
 */
136
static block_info_t *get_block_info(ir_node *node, struct obstack *obst) {
137
	block_info_t *info = get_irn_link(node);
138

139
	if (! info) {
140
		info = obstack_alloc(obst, sizeof(*info));
141
142
143
144
145
		memset(info, 0, sizeof(*info));
		set_irn_link(node, info);
	}
	return info;
}  /* get_block_info */
146

Michael Beck's avatar
Michael Beck committed
147
/**
Michael Beck's avatar
Michael Beck committed
148
 * update the projection info for a Load/Store
Michael Beck's avatar
Michael Beck committed
149
 */
150
static unsigned update_projs(ldst_info_t *info, ir_node *proj)
Michael Beck's avatar
Michael Beck committed
151
{
152
	long nr = get_Proj_proj(proj);
Michael Beck's avatar
Michael Beck committed
153

154
	assert(0 <= nr && nr <= MAX_PROJ && "Wrong proj from LoadStore");
Michael Beck's avatar
Michael Beck committed
155

156
157
158
159
160
161
162
163
164
165
	if (info->projs[nr]) {
		/* there is already one, do CSE */
		exchange(proj, info->projs[nr]);
		return DF_CHANGED;
	}
	else {
		info->projs[nr] = proj;
		return 0;
	}
}  /* update_projs */
Michael Beck's avatar
Michael Beck committed
166
167

/**
168
169
170
171
172
 * update the exception block info for a Load/Store node.
 *
 * @param info   the load/store info struct
 * @param block  the exception handler block for this load/store
 * @param pos    the control flow input of the block
Michael Beck's avatar
Michael Beck committed
173
 */
174
static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
Michael Beck's avatar
Michael Beck committed
175
{
176
	assert(info->exc_block == NULL && "more than one exception block found");
Michael Beck's avatar
Michael Beck committed
177

178
179
180
181
	info->exc_block = block;
	info->exc_idx   = pos;
	return 0;
}  /* update_exc */
Michael Beck's avatar
Michael Beck committed
182

183
/** Return the number of uses of an address node */
Michael Beck's avatar
BugFix:    
Michael Beck committed
184
#define get_irn_n_uses(adr)     get_irn_n_edges(adr)
185

Michael Beck's avatar
Michael Beck committed
186
187
/**
 * walker, collects all Load/Store/Proj nodes
188
 *
189
 * walks from Start -> End
Michael Beck's avatar
Michael Beck committed
190
 */
Michael Beck's avatar
Michael Beck committed
191
static void collect_nodes(ir_node *node, void *env)
Michael Beck's avatar
Michael Beck committed
192
{
193
194
195
196
197
198
199
200
201
202
203
204
205
	ir_op       *op = get_irn_op(node);
	ir_node     *pred, *blk, *pred_blk;
	ldst_info_t *ldst_info;
	walk_env_t  *wenv = env;

	if (op == op_Proj) {
		ir_node *adr;
		ir_op *op;

		pred = get_Proj_pred(node);
		op   = get_irn_op(pred);

		if (op == op_Load) {
206
			ldst_info = get_ldst_info(pred, &wenv->obst);
207
208
209
210
211
212
213
214
215

			wenv->changes |= update_projs(ldst_info, node);

			if ((ldst_info->flags & LDST_VISITED) == 0) {
				adr = get_Load_ptr(pred);
				ldst_info->flags |= LDST_VISITED;
			}

			/*
216
217
218
219
220
			 * Place the Proj's to the same block as the
			 * predecessor Load. This is always ok and prevents
			 * "non-SSA" form after optimizations if the Proj
			 * is in a wrong block.
			 */
221
222
223
224
225
226
227
			blk      = get_nodes_block(node);
			pred_blk = get_nodes_block(pred);
			if (blk != pred_blk) {
				wenv->changes |= DF_CHANGED;
				set_nodes_block(node, pred_blk);
			}
		} else if (op == op_Store) {
228
			ldst_info = get_ldst_info(pred, &wenv->obst);
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253

			wenv->changes |= update_projs(ldst_info, node);

			if ((ldst_info->flags & LDST_VISITED) == 0) {
				adr = get_Store_ptr(pred);
				ldst_info->flags |= LDST_VISITED;
			}

			/*
			* Place the Proj's to the same block as the
			* predecessor Store. This is always ok and prevents
			* "non-SSA" form after optimizations if the Proj
			* is in a wrong block.
			*/
			blk      = get_nodes_block(node);
			pred_blk = get_nodes_block(pred);
			if (blk != pred_blk) {
				wenv->changes |= DF_CHANGED;
				set_nodes_block(node, pred_blk);
			}
		}
	} else if (op == op_Block) {
		int i;

		for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
254
			ir_node      *pred_block, *proj;
255
			block_info_t *bl_info;
256
257
258
			int          is_exc = 0;

			pred = proj = get_Block_cfgpred(node, i);
259

260
261
262
263
			if (is_Proj(proj)) {
				pred   = get_Proj_pred(proj);
				is_exc = get_Proj_proj(proj) == pn_Generic_X_except;
			}
264
265
266
267
268
269

			/* ignore Bad predecessors, they will be removed later */
			if (is_Bad(pred))
				continue;

			pred_block = get_nodes_block(pred);
270
			bl_info    = get_block_info(pred_block, &wenv->obst);
271

272
			if (is_fragile_op(pred) && is_exc)
273
274
275
276
				bl_info->flags |= BLOCK_HAS_EXC;
			else if (is_irn_forking(pred))
				bl_info->flags |= BLOCK_HAS_COND;

277
			if (is_exc && (get_irn_op(pred) == op_Load || get_irn_op(pred) == op_Store)) {
278
				ldst_info = get_ldst_info(pred, &wenv->obst);
279
280
281
282
283
284

				wenv->changes |= update_exc(ldst_info, node, i);
			}
		}
	}
}  /* collect_nodes */
Michael Beck's avatar
Michael Beck committed
285

Michael Beck's avatar
Michael Beck committed
286
/**
287
 * Returns an entity if the address ptr points to a constant one.
288
289
290
291
 *
 * @param ptr  the address
 *
 * @return an entity or NULL
Michael Beck's avatar
Michael Beck committed
292
 */
293
static ir_entity *find_constant_entity(ir_node *ptr)
Michael Beck's avatar
Michael Beck committed
294
{
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
	for (;;) {
		ir_op *op = get_irn_op(ptr);

		if (op == op_SymConst && (get_SymConst_kind(ptr) == symconst_addr_ent)) {
			ir_entity *ent = get_SymConst_entity(ptr);
			if (variability_constant == get_entity_variability(ent))
				return ent;
			return NULL;
		} else if (op == op_Sel) {
			ir_entity *ent = get_Sel_entity(ptr);
			ir_type   *tp  = get_entity_owner(ent);

			/* Do not fiddle with polymorphism. */
			if (is_Class_type(get_entity_owner(ent)) &&
				((get_entity_n_overwrites(ent)    != 0) ||
				(get_entity_n_overwrittenby(ent) != 0)   ) )
				return NULL;

			if (is_Array_type(tp)) {
				/* check bounds */
				int i, n;

				for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
					ir_node *bound;
					tarval *tlower, *tupper;
					ir_node *index = get_Sel_index(ptr, i);
					tarval *tv     = computed_value(index);

					/* check if the index is constant */
					if (tv == tarval_bad)
						return NULL;

					bound  = get_array_lower_bound(tp, i);
					tlower = computed_value(bound);
					bound  = get_array_upper_bound(tp, i);
					tupper = computed_value(bound);

					if (tlower == tarval_bad || tupper == tarval_bad)
						return NULL;

					if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
						return NULL;
					if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
						return NULL;

					/* ok, bounds check finished */
				}
			}

			if (variability_constant == get_entity_variability(ent))
				return ent;

			/* try next */
			ptr = get_Sel_ptr(ptr);
		} else
			return NULL;
	}
}  /* find_constant_entity */
Michael Beck's avatar
Michael Beck committed
353

Michael Beck's avatar
Michael Beck committed
354
355
356
/**
 * Return the Selection index of a Sel node from dimension n
 */
357
static long get_Sel_array_index_long(ir_node *n, int dim) {
358
359
360
361
	ir_node *index = get_Sel_index(n, dim);
	assert(get_irn_op(index) == op_Const);
	return get_tarval_long(get_Const_tarval(index));
}  /* get_Sel_array_index_long */
362

Michael Beck's avatar
Michael Beck committed
363
364
365
366
367
368
369
370
/**
 * Returns the accessed component graph path for an
 * node computing an address.
 *
 * @param ptr    the node computing the address
 * @param depth  current depth in steps upward from the root
 *               of the address
 */
Götz Lindenmaier's avatar
bugfix    
Götz Lindenmaier committed
371
static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
	compound_graph_path *res = NULL;
	ir_entity           *root, *field;
	int                 path_len, pos;

	if (get_irn_op(ptr) == op_SymConst) {
		/* a SymConst. If the depth is 0, this is an access to a global
		 * entity and we don't need a component path, else we know
		 * at least it's length.
		 */
		assert(get_SymConst_kind(ptr) == symconst_addr_ent);
		root = get_SymConst_entity(ptr);
		res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
	} else {
		assert(get_irn_op(ptr) == op_Sel);
		/* it's a Sel, go up until we find the root */
		res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);

		/* fill up the step in the path at the current position */
		field    = get_Sel_entity(ptr);
		path_len = get_compound_graph_path_length(res);
		pos      = path_len - depth - 1;
		set_compound_graph_path_node(res, pos, field);

		if (is_Array_type(get_entity_owner(field))) {
			assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
			set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
		}
	}
	return res;
}  /* rec_get_accessed_path */
402

Götz Lindenmaier's avatar
bugfix    
Götz Lindenmaier committed
403
/** Returns an access path or NULL.  The access path is only
Michael Beck's avatar
Michael Beck committed
404
405
 *  valid, if the graph is in phase_high and _no_ address computation is used.
 */
Götz Lindenmaier's avatar
bugfix    
Götz Lindenmaier committed
406
static compound_graph_path *get_accessed_path(ir_node *ptr) {
407
408
	return rec_get_accessed_path(ptr, 0);
}  /* get_accessed_path */
409

410
411
412
413
414
415
416
/* forward */
static void reduce_adr_usage(ir_node *ptr);

/**
 * Update a Load that may lost it's usage.
 */
static void handle_load_update(ir_node *load) {
417
	ldst_info_t *info = get_irn_link(load);
418

419
420
421
	/* do NOT touch volatile loads for now */
	if (get_Load_volatility(load) == volatility_is_volatile)
		return;
422

423
424
425
	if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
		ir_node *ptr = get_Load_ptr(load);
		ir_node *mem = get_Load_mem(load);
426

427
428
		/* a Load which value is neither used nor exception checked, remove it */
		exchange(info->projs[pn_Load_M], mem);
429
		exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
430
431
432
433
		exchange(load, new_Bad());
		reduce_adr_usage(ptr);
	}
}  /* handle_load_update */
434
435
436
437
438
439

/**
 * A Use of an address node is vanished. Check if this was a Proj
 * node and update the counters.
 */
static void reduce_adr_usage(ir_node *ptr) {
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
	if (is_Proj(ptr)) {
		if (get_irn_n_edges(ptr) <= 0) {
			/* this Proj is dead now */
			ir_node *pred = get_Proj_pred(ptr);

			if (is_Load(pred)) {
				ldst_info_t *info = get_irn_link(pred);
				info->projs[get_Proj_proj(ptr)] = NULL;

				/* this node lost it's result proj, handle that */
				handle_load_update(pred);
			}
		}
	}
}  /* reduce_adr_usage */
455

456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
/**
 * Check, if an already existing value of mode old_mode can be converted
 * into the needed one new_mode without loss.
 */
static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
	if (old_mode == new_mode)
		return 1;

	/* if both modes are two-complement ones, we can always convert the
	   Stored value into the needed one. */
	if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
		  get_mode_arithmetic(old_mode) == irma_twos_complement &&
		  get_mode_arithmetic(new_mode) == irma_twos_complement)
		return 1;
	return 0;
}  /* can_use_stored_value */

Michael Beck's avatar
Michael Beck committed
473
474
/**
 * Follow the memory chain as long as there are only Loads
475
476
 * and alias free Stores and try to replace current Load or Store
 * by a previous ones.
Michael Beck's avatar
Michael Beck committed
477
478
479
480
481
482
 * Note that in unreachable loops it might happen that we reach
 * load again, as well as we can fall into a cycle.
 * We break such cycles using a special visited flag.
 *
 * INC_MASTER() must be called before dive into
 */
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
	unsigned res = 0;
	ldst_info_t *info = get_irn_link(load);
	ir_node *pred;
	ir_node *ptr       = get_Load_ptr(load);
	ir_node *mem       = get_Load_mem(load);
	ir_mode *load_mode = get_Load_mode(load);

	for (pred = curr; load != pred; ) {
		ldst_info_t *pred_info = get_irn_link(pred);

		/*
		 * BEWARE: one might think that checking the modes is useless, because
		 * if the pointers are identical, they refer to the same object.
		 * This is only true in strong typed languages, not in C were the following
		 * is possible a = *(ir_type1 *)p; b = *(ir_type2 *)p ...
		 */
500
		if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
501
		    can_use_stored_value(get_irn_mode(get_Store_value(pred)), load_mode)) {
502
503
504
			/*
			 * a Load immediately after a Store -- a read after write.
			 * We may remove the Load, if both Load & Store does not have an exception handler
505
			 * OR they are in the same MacroBlock. In the latter case the Load cannot
506
507
508
509
			 * throw an exception when the previous Store was quiet.
			 *
			 * Why we need to check for Store Exception? If the Store cannot
			 * be executed (ROM) the exception handler might simply jump into
510
			 * the load MacroBlock :-(
511
512
513
			 * We could make it a little bit better if we would know that the exception
			 * handler of the Store jumps directly to the end...
			 */
514
			if ((pred_info->projs[pn_Store_X_except] == NULL && info->projs[pn_Load_X_except] == NULL) ||
515
			    get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)) {
516
517
518
				ir_node *value = get_Store_value(pred);

				DBG_OPT_RAW(load, value);
519
520
521
522
523
524

				/* add an convert if needed */
				if (get_irn_mode(get_Store_value(pred)) != load_mode) {
					value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
				}

525
526
527
528
529
530
531
532
				if (info->projs[pn_Load_M])
					exchange(info->projs[pn_Load_M], mem);

				/* no exception */
				if (info->projs[pn_Load_X_except]) {
					exchange( info->projs[pn_Load_X_except], new_Bad());
					res |= CF_CHANGED;
				}
533
534
535
536
				if (info->projs[pn_Load_X_regular]) {
					exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
					res |= CF_CHANGED;
				}
537
538
539
540
541
542
543
544

				if (info->projs[pn_Load_res])
					exchange(info->projs[pn_Load_res], value);

				exchange(load, new_Bad());
				reduce_adr_usage(ptr);
				return res | DF_CHANGED;
			}
545
		} else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
546
		           can_use_stored_value(get_Load_mode(pred), load_mode)) {
547
548
549
			/*
			 * a Load after a Load -- a read after read.
			 * We may remove the second Load, if it does not have an exception handler
550
			 * OR they are in the same MacroBlock. In the later case the Load cannot
551
552
553
554
555
			 * throw an exception when the previous Load was quiet.
			 *
			 * Here, there is no need to check if the previous Load has an exception
			 * hander because they would have exact the same exception...
			 */
556
			if (info->projs[pn_Load_X_except] == NULL || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)) {
557
558
				ir_node *value;

559
560
				DBG_OPT_RAR(load, pred);

561
562
563
564
565
566
567
				/* the result is used */
				if (info->projs[pn_Load_res]) {
					if (pred_info->projs[pn_Load_res] == NULL) {
						/* create a new Proj again */
						pred_info->projs[pn_Load_res] = new_r_Proj(current_ir_graph, get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
					}
					value = pred_info->projs[pn_Load_res];
568
569
570
571
572
573

					/* add an convert if needed */
					if (get_Load_mode(pred) != load_mode) {
						value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
					}

574
					exchange(info->projs[pn_Load_res], value);
575
576
				}

577
578
579
				if (info->projs[pn_Load_M])
					exchange(info->projs[pn_Load_M], mem);

580
581
582
583
584
				/* no exception */
				if (info->projs[pn_Load_X_except]) {
					exchange(info->projs[pn_Load_X_except], new_Bad());
					res |= CF_CHANGED;
				}
585
586
587
588
				if (info->projs[pn_Load_X_regular]) {
					exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
					res |= CF_CHANGED;
				}
589
590
591
592
593
594
595

				exchange(load, new_Bad());
				reduce_adr_usage(ptr);
				return res |= DF_CHANGED;
			}
		}

596
		if (is_Store(pred)) {
597
			/* check if we can pass through this store */
598
599
600
601
			ir_alias_relation rel = get_alias_relation(
				current_ir_graph,
				get_Store_ptr(pred),
				get_irn_mode(get_Store_value(pred)),
602
				ptr, load_mode);
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
			/* if the might be an alias, we cannot pass this Store */
			if (rel != no_alias)
				break;
			pred = skip_Proj(get_Store_mem(pred));
		} else if (get_irn_op(pred) == op_Load) {
			pred = skip_Proj(get_Load_mem(pred));
		} else {
			/* follow only Load chains */
			break;
		}

		/* check for cycles */
		if (NODE_VISITED(pred_info))
			break;
		MARK_NODE(pred_info);
	}

620
	if (is_Sync(pred)) {
621
622
623
624
625
626
627
628
629
630
631
632
		int i;

		/* handle all Sync predecessors */
		for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
			res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
			if (res)
				break;
		}
	}

	return res;
}  /* follow_Mem_chain */
Michael Beck's avatar
Michael Beck committed
633

Michael Beck's avatar
Michael Beck committed
634
635
/**
 * optimize a Load
636
637
 *
 * @param load  the Load node
Michael Beck's avatar
Michael Beck committed
638
 */
639
static unsigned optimize_load(ir_node *load)
Michael Beck's avatar
Michael Beck committed
640
{
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
	ldst_info_t *info = get_irn_link(load);
	ir_node *mem, *ptr, *new_node;
	ir_entity *ent;
	unsigned res = 0;

	/* do NOT touch volatile loads for now */
	if (get_Load_volatility(load) == volatility_is_volatile)
		return 0;

	/* the address of the load to be optimized */
	ptr = get_Load_ptr(load);

	/*
	 * Check if we can remove the exception from a Load:
	 * This can be done, if the address is from an Sel(Alloc) and
	 * the Sel type is a subtype of the allocated type.
	 *
	 * This optimizes some often used OO constructs,
	 * like x = new O; x->t;
	 */
	if (info->projs[pn_Load_X_except]) {
		if (is_Sel(ptr)) {
			ir_node *mem = get_Sel_mem(ptr);

			/* FIXME: works with the current FE, but better use the base */
666
			if (is_Alloc(skip_Proj(mem))) {
667
668
669
670
671
672
673
674
675
676
677
				/* ok, check the types */
				ir_entity *ent    = get_Sel_entity(ptr);
				ir_type   *s_type = get_entity_type(ent);
				ir_type   *a_type = get_Alloc_type(mem);

				if (is_SubClass_of(s_type, a_type)) {
					/* ok, condition met: there can't be an exception because
					* Alloc guarantees that enough memory was allocated */

					exchange(info->projs[pn_Load_X_except], new_Bad());
					info->projs[pn_Load_X_except] = NULL;
678
679
					exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
					info->projs[pn_Load_X_regular] = NULL;
680
681
682
					res |= CF_CHANGED;
				}
			}
683
		} else if (is_Alloc(skip_Proj(skip_Cast(ptr)))) {
684
685
686
687
688
689
690
				/* simple case: a direct load after an Alloc. Firm Alloc throw
				 * an exception in case of out-of-memory. So, there is no way for an
				 * exception in this load.
				 * This code is constructed by the "exception lowering" in the Jack compiler.
				 */
				exchange(info->projs[pn_Load_X_except], new_Bad());
				info->projs[pn_Load_X_except] = NULL;
691
692
				exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
				info->projs[pn_Load_X_regular] = NULL;
693
694
695
696
697
698
699
700
701
702
703
				res |= CF_CHANGED;
		}
	}

	/* The mem of the Load. Must still be returned after optimization. */
	mem  = get_Load_mem(load);

	if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
		/* a Load which value is neither used nor exception checked, remove it */
		exchange(info->projs[pn_Load_M], mem);

704
705
706
707
708
		if (info->projs[pn_Load_X_regular]) {
			/* should not happen, but if it does, remove it */
			exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
			res |= CF_CHANGED;
		}
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
		exchange(load, new_Bad());
		reduce_adr_usage(ptr);
		return res | DF_CHANGED;
	}

	/* Load from a constant polymorphic field, where we can resolve
	   polymorphism. */
	new_node = transform_node_Load(load);
	if (new_node != load) {
		if (info->projs[pn_Load_M]) {
			exchange(info->projs[pn_Load_M], mem);
			info->projs[pn_Load_M] = NULL;
		}
		if (info->projs[pn_Load_X_except]) {
			exchange(info->projs[pn_Load_X_except], new_Bad());
			info->projs[pn_Load_X_except] = NULL;
725
726
727
728
729
730
			res |= CF_CHANGED;
		}
		if (info->projs[pn_Load_X_regular]) {
			exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
			info->projs[pn_Load_X_regular] = NULL;
			res |= CF_CHANGED;
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
		}
		if (info->projs[pn_Load_res])
			exchange(info->projs[pn_Load_res], new_node);

		exchange(load, new_Bad());
		reduce_adr_usage(ptr);
		return res | DF_CHANGED;
	}

	/* check if we can determine the entity that will be loaded */
	ent = find_constant_entity(ptr);
	if (ent) {
		if ((allocation_static == get_entity_allocation(ent)) &&
			(visibility_external_allocated != get_entity_visibility(ent))) {
			/* a static allocation that is not external: there should be NO exception
			 * when loading. */

			/* no exception, clear the info field as it might be checked later again */
			if (info->projs[pn_Load_X_except]) {
				exchange(info->projs[pn_Load_X_except], new_Bad());
				info->projs[pn_Load_X_except] = NULL;
				res |= CF_CHANGED;
			}
754
755
756
757
758
			if (info->projs[pn_Load_X_regular]) {
				exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
				info->projs[pn_Load_X_regular] = NULL;
				res |= CF_CHANGED;
			}
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853

			if (variability_constant == get_entity_variability(ent)
				&& is_atomic_entity(ent)) {
				/* Might not be atomic after
				   lowering of Sels.  In this
				   case we could also load, but
				   it's more complicated. */
				/* more simpler case: we load the content of a constant value:
				 * replace it by the constant itself
				 */

				/* no memory */
				if (info->projs[pn_Load_M]) {
					exchange(info->projs[pn_Load_M], mem);
					res |= DF_CHANGED;
				}
				/* no result :-) */
				if (info->projs[pn_Load_res]) {
					if (is_atomic_entity(ent)) {
						ir_node *c = copy_const_value(get_irn_dbg_info(load), get_atomic_ent_value(ent));

						DBG_OPT_RC(load, c);
						exchange(info->projs[pn_Load_res], c);
						res |= DF_CHANGED;
					}
				}
				exchange(load, new_Bad());
				reduce_adr_usage(ptr);
				return res;
			} else if (variability_constant == get_entity_variability(ent)) {
				compound_graph_path *path = get_accessed_path(ptr);

				if (path) {
					ir_node *c;

					assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
					/*
					{
						int j;
						for (j = 0; j < get_compound_graph_path_length(path); ++j) {
							ir_entity *node = get_compound_graph_path_node(path, j);
							fprintf(stdout, ".%s", get_entity_name(node));
							if (is_Array_type(get_entity_owner(node)))
								fprintf(stdout, "[%d]", get_compound_graph_path_array_index(path, j));
						}
						printf("\n");
					}
					*/

					c = get_compound_ent_value_by_path(ent, path);
					free_compound_graph_path(path);

					/* printf("  cons: "); DDMN(c); */

					if (info->projs[pn_Load_M]) {
						exchange(info->projs[pn_Load_M], mem);
						res |= DF_CHANGED;
					}
					if (info->projs[pn_Load_res]) {
						exchange(info->projs[pn_Load_res], copy_const_value(get_irn_dbg_info(load), c));
						res |= DF_CHANGED;
					}
					exchange(load, new_Bad());
					reduce_adr_usage(ptr);
					return res;
				} else {
					/*  We can not determine a correct access path.  E.g., in jack, we load
					a byte from an object to generate an exception.   Happens in test program
					Reflectiontest.
					printf(">>>>>>>>>>>>> Found access to constant entity %s in function %s\n", get_entity_name(ent),
					get_entity_name(get_irg_entity(current_ir_graph)));
					printf("  load: "); DDMN(load);
					printf("  ptr:  "); DDMN(ptr);
					*/
				}
			}
		}
	}

	/* Check, if the address of this load is used more than once.
	 * If not, this load cannot be removed in any case. */
	if (get_irn_n_uses(ptr) <= 1)
		return res;

	/*
	 * follow the memory chain as long as there are only Loads
	 * and try to replace current Load or Store by a previous one.
	 * Note that in unreachable loops it might happen that we reach
	 * load again, as well as we can fall into a cycle.
	 * We break such cycles using a special visited flag.
	 */
	INC_MASTER();
	res = follow_Mem_chain(load, skip_Proj(mem));
	return res;
}  /* optimize_load */
Michael Beck's avatar
Michael Beck committed
854

855
856
857
858
859
860
861
862
863
/**
 * Check whether a value of mode new_mode would completely overwrite a value
 * of mode old_mode in memory.
 */
static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
{
	return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
}  /* is_completely_overwritten */

Michael Beck's avatar
Michael Beck committed
864
/**
865
 * follow the memory chain as long as there are only Loads and alias free Stores.
Michael Beck's avatar
Michael Beck committed
866
867
868
 *
 * INC_MASTER() must be called before dive into
 */
869
870
871
872
873
874
875
876
877
static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
	unsigned res = 0;
	ldst_info_t *info = get_irn_link(store);
	ir_node *pred;
	ir_node *ptr = get_Store_ptr(store);
	ir_node *mem = get_Store_mem(store);
	ir_node *value = get_Store_value(store);
	ir_mode *mode  = get_irn_mode(value);
	ir_node *block = get_nodes_block(store);
878
	ir_node *mblk  = get_Block_MacroBlock(block);
879
880
881
882
883
884
885
886
887

	for (pred = curr; pred != store;) {
		ldst_info_t *pred_info = get_irn_link(pred);

		/*
		 * BEWARE: one might think that checking the modes is useless, because
		 * if the pointers are identical, they refer to the same object.
		 * This is only true in strong typed languages, not is C were the following
		 * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
888
889
		 * However, if the mode that is written have a bigger  or equal size the the old
		 * one, the old value is completely overwritten and can be killed ...
890
		 */
891
892
		if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
		    get_nodes_MacroBlock(pred) == mblk &&
893
		    is_completely_overwritten(get_irn_mode(get_Store_value(pred)), mode)) {
894
895
896
897
898
899
900
901
			/*
			 * a Store after a Store in the same block -- a write after write.
			 * We may remove the first Store, if it does not have an exception handler.
			 *
			 * TODO: What, if both have the same exception handler ???
			 */
			if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
				DBG_OPT_WAW(pred, store);
902
				exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
903
904
905
906
				exchange(pred, new_Bad());
				reduce_adr_usage(ptr);
				return DF_CHANGED;
			}
907
		} else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
908
909
910
911
912
913
914
		           value == pred_info->projs[pn_Load_res]) {
			/*
			 * a Store of a value after a Load -- a write after read.
			 * We may remove the second Store, if it does not have an exception handler.
			 */
			if (! info->projs[pn_Store_X_except]) {
				DBG_OPT_WAR(store, pred);
915
				exchange(info->projs[pn_Store_M], mem);
916
917
918
919
920
921
				exchange(store, new_Bad());
				reduce_adr_usage(ptr);
				return DF_CHANGED;
			}
		}

922
		if (is_Store(pred)) {
923
924
925
926
927
			/* check if we can pass thru this store */
			ir_alias_relation rel = get_alias_relation(
				current_ir_graph,
				get_Store_ptr(pred),
				get_irn_mode(get_Store_value(pred)),
928
				ptr, mode);
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
			/* if the might be an alias, we cannot pass this Store */
			if (rel != no_alias)
				break;
			pred = skip_Proj(get_Store_mem(pred));
		} else if (get_irn_op(pred) == op_Load) {
			pred = skip_Proj(get_Load_mem(pred));
		} else {
			/* follow only Load chains */
			break;
		}

		/* check for cycles */
		if (NODE_VISITED(pred_info))
			break;
		MARK_NODE(pred_info);
	}

946
	if (is_Sync(pred)) {
947
948
949
950
951
952
953
954
955
956
957
		int i;

		/* handle all Sync predecessors */
		for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
			res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
			if (res)
				break;
		}
	}
	return res;
}  /* follow_Mem_chain_for_Store */
Michael Beck's avatar
Michael Beck committed
958
959
960

/**
 * optimize a Store
961
962
 *
 * @param store  the Store node
Michael Beck's avatar
Michael Beck committed
963
 */
964
965
static unsigned optimize_store(ir_node *store) {
	ir_node *ptr, *mem;
Michael Beck's avatar
Michael Beck committed
966

967
968
	if (get_Store_volatility(store) == volatility_is_volatile)
		return 0;
Michael Beck's avatar
Michael Beck committed
969

970
	ptr = get_Store_ptr(store);
971

972
973
974
975
	/* Check, if the address of this Store is used more than once.
	 * If not, this Store cannot be removed in any case. */
	if (get_irn_n_uses(ptr) <= 1)
		return 0;
976

977
	mem = get_Store_mem(store);
Michael Beck's avatar
Michael Beck committed
978

979
980
981
982
	/* follow the memory chain as long as there are only Loads */
	INC_MASTER();
	return follow_Mem_chain_for_Store(store, skip_Proj(mem));
}  /* optimize_store */
Michael Beck's avatar
Michael Beck committed
983

Michael Beck's avatar
Michael Beck committed
984
/**
985
 * walker, optimizes Phi after Stores to identical places:
Michael Beck's avatar
Michael Beck committed
986
 * Does the following optimization:
987
 * @verbatim
Michael Beck's avatar
Michael Beck committed
988
989
990
 *
 *   val1   val2   val3          val1  val2  val3
 *    |      |      |               \    |    /
991
 *  Store  Store  Store              \   |   /
992
 *      \    |    /                   PhiData
Michael Beck's avatar
Michael Beck committed
993
 *       \   |   /                       |
994
 *        \  |  /                      Store
995
 *          PhiM
Michael Beck's avatar
Michael Beck committed
996
 *
997
 * @endverbatim
998
999
 * This reduces the number of stores and allows for predicated execution.
 * Moves Stores back to the end of a function which may be bad.
Michael Beck's avatar
Michael Beck committed
1000
 *
1001
 * This is only possible if the predecessor blocks have only one successor.
Michael Beck's avatar
Michael Beck committed
1002
 */
1003
static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
Michael Beck's avatar
Michael Beck committed
1004
{
1005
1006
1007
	int i, n;
	ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
	ir_mode *mode;
1008
	ir_node **inM, **inD, **projMs;
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
	int *idx;
	dbg_info *db = NULL;
	ldst_info_t *info;
	block_info_t *bl_info;
	unsigned res = 0;

	/* Must be a memory Phi */
	if (get_irn_mode(phi) != mode_M)
		return 0;

	n = get_Phi_n_preds(phi);
	if (n <= 0)
		return 0;

Matthias Braun's avatar
Matthias Braun committed
1023
1024
1025
1026
1027
1028
	/* must be only one user */
	projM = get_Phi_pred(phi, 0);
	if (get_irn_n_edges(projM) != 1)
		return 0;

	store = skip_Proj(projM);
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
	old_store = store;
	if (get_irn_op(store) != op_Store)
		return 0;

	block = get_nodes_block(store);

	/* abort on dead blocks */
	if (is_Block_dead(block))
		return 0;

	/* check if the block is post dominated by Phi-block
	   and has no exception exit */
	bl_info = get_irn_link(block);
	if (bl_info->flags & BLOCK_HAS_EXC)
		return 0;

	phi_block = get_nodes_block(phi);
1046
	if (! block_strictly_postdominates(phi_block, block))
1047
1048
1049
1050
1051
1052
1053
1054
1055
		return 0;

	/* this is the address of the store */
	ptr  = get_Store_ptr(store);
	mode = get_irn_mode(get_Store_value(store));
	info = get_irn_link(store);
	exc  = info->exc_block;

	for (i = 1; i < n; ++i) {
Matthias Braun's avatar
Matthias Braun committed
1056
1057
1058
1059
		ir_node *pred = get_Phi_pred(phi, i);

		if (get_irn_n_edges(pred) != 1)
			return 0;
1060

Matthias Braun's avatar
Matthias Braun committed
1061
		pred = skip_Proj(pred);
1062
		if (!is_Store(pred))
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
			return 0;

		if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
			return 0;

		info = get_irn_link(pred);

		/* check, if all stores have the same exception flow */
		if (exc != info->exc_block)
			return 0;

		/* abort on dead blocks */
		block = get_nodes_block(pred);
		if (is_Block_dead(block))
			return 0;

		/* check if the block is post dominated by Phi-block
		   and has no exception exit. Note that block must be different from
		   Phi-block, else we would move a Store from end End of a block to its
		   Start... */
		bl_info = get_irn_link(block);
		if (bl_info->flags & BLOCK_HAS_EXC)
			return 0;
		if (block == phi_block || ! block_postdominates(phi_block, block))
			return 0;
	}

	/*
	 * ok, when we are here, we found all predecessors of a Phi that
	 * are Stores to the same address and size. That means whatever
	 * we do before we enter the block of the Phi, we do a Store.
	 * So, we can move the Store to the current block:
	 *
	 *   val1    val2    val3          val1  val2  val3
	 *    |       |       |               \    |    /
	 * | Str | | Str | | Str |             \   |   /
	 *      \     |     /                   PhiData
	 *       \    |    /                       |
	 *        \   |   /                       Str
	 *           PhiM
	 *
	 * Is only allowed if the predecessor blocks have only one successor.
	 */

1107
	NEW_ARR_A(ir_node *, projMs, n);
1108
1109
1110
1111
1112
1113
1114
1115
	NEW_ARR_A(ir_node *, inM, n);
	NEW_ARR_A(ir_node *, inD, n);
	NEW_ARR_A(int, idx, n);

	/* Prepare: Collect all Store nodes.  We must do this
	   first because we otherwise may loose a store when exchanging its
	   memory Proj.
	 */
1116
1117
1118
	for (i = n - 1; i >= 0; --i) {
		ir_node *store;

1119
		projMs[i] = get_Phi_pred(phi, i);
1120
		assert(is_Proj(projMs[i]));
1121

1122
1123
		store = get_Proj_pred(projMs[i]);
		info  = get_irn_link(store);
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136

		inM[i] = get_Store_mem(store);
		inD[i] = get_Store_value(store);
		idx[i] = info->exc_idx;
	}
	block = get_nodes_block(phi);

	/* second step: create a new memory Phi */
	phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);

	/* third step: create a new data Phi */
	phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);

1137
	/* rewire memory and kill the node */
1138
	for (i = n - 1; i >= 0; --i) {
1139
		ir_node *proj  = projMs[i];
1140

1141
1142
1143
1144
1145
		if(is_Proj(proj)) {
			ir_node *store = get_Proj_pred(proj);
			exchange(proj, inM[i]);
			kill_node(store);
		}
1146
1147
	}

1148
1149
	/* fourth step: create the Store */
	store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
1150
#ifdef DO_CACHEOPT
1151
	co_set_irn_name(store, co_get_irn_ident(old_store));
1152
1153
#endif

1154
	projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
1155

1156
	info = get_ldst_info(store, &wenv->obst);
1157
	info->projs[pn_Store_M] = projM;
Michael Beck's avatar
Michael Beck committed
1158

1159
1160
1161
	/* fifths step: repair exception flow */
	if (exc) {
		ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
Michael Beck's avatar
Michael Beck committed
1162

1163
1164
1165
		info->projs[pn_Store_X_except] = projX;
		info->exc_block                = exc;
		info->exc_idx                  = idx[0];
1166

1167
1168
1169
		for (i = 0; i < n; ++i) {
			set_Block_cfgpred(exc, idx[i], projX);
		}
Michael Beck's avatar
Michael Beck committed
1170

1171
1172
1173
		if (n > 1) {
			/* the exception block should be optimized as some inputs are identical now */
		}
1174

1175
1176
		res |= CF_CHANGED;
	}
Michael Beck's avatar
Michael Beck committed
1177

1178
1179
	/* sixth step: replace old Phi */
	exchange(phi, projM);
Michael Beck's avatar
Michael Beck committed
1180

1181
1182
	return res | DF_CHANGED;
}  /* optimize_phi */
Michael Beck's avatar
Michael Beck committed
1183

Michael Beck's avatar
Michael Beck committed
1184
/**
Michael Beck's avatar
Michael Beck committed
1185
 * walker, do the optimizations
Michael Beck's avatar
Michael Beck committed
1186
 */
1187
1188
static void do_load_store_optimize(ir_node *n, void *env) {
	walk_env_t *wenv = env;
Michael Beck's avatar
Michael Beck committed
1189

1190
	switch (get_irn_opcode(n)) {
Michael Beck's avatar
Michael Beck committed
1191

1192
1193
1194
	case iro_Load:
		wenv->changes |= optimize_load(n);
		break;
Michael Beck's avatar
Michael Beck committed
1195

1196
1197
1198
	case iro_Store:
		wenv->changes |= optimize_store(n);
		break;
Michael Beck's avatar
Michael Beck committed
1199

1200
1201
	case iro_Phi:
		wenv->changes |= optimize_phi(n, wenv);
Michael Beck's avatar
Michael Beck committed
1202

1203
1204
	default:
		;
1205
1206
	}
}  /* do_load_store_optimize */
Michael Beck's avatar
Michael Beck committed
1207

1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
/** A scc. */
typedef struct scc {
	ir_node *head;		/**< the head of the list */
} scc;

/** A node entry. */
typedef struct node_entry {
	unsigned DFSnum;    /**< the DFS number of this node */
	unsigned low;       /**< the low number of this node */
	ir_node  *header;   /**< the header of this node */
	int      in_stack;  /**< flag, set if the node is on the stack */
	ir_node  *next;     /**< link to the next node the the same scc */
	scc      *pscc;     /**< the scc of this node */
	unsigned POnum;     /**< the post order number for blocks */
} node_entry;

/** A loop entry. */
typedef struct loop_env {
	ir_phase ph;           /**< the phase object */
	ir_node  **stack;      /**< the node stack */
	int      tos;          /**< tos index */
	unsigned nextDFSnum;   /**< the current DFS number */
	unsigned POnum;        /**< current post order number */

	unsigned changes;      /**< a bitmask of graph changes */
} loop_env;

/**
* Gets the node_entry of a node
*/
static node_entry *get_irn_ne(ir_node *irn, loop_env *env) {
	ir_phase   *ph = &env->ph;
	node_entry *e  = phase_get_irn_data(&env->ph, irn);

	if (! e) {
		e = phase_alloc(ph, sizeof(*e));
		memset(e, 0, sizeof(*e));
		phase_set_irn_data(ph, irn, e);
	}
	return e;
}  /* get_irn_ne */

/**
 * Push a node onto the stack.
 *
 * @param env   the loop environment
 * @param n     the node to push
 */
static void push(loop_env *env, ir_node *n) {
	node_entry *e;

	if (env->tos == ARR_LEN(env->stack)) {
		int nlen = ARR_LEN(env->stack) * 2;
		ARR_RESIZE(ir_node *, env->stack, nlen);
	}
	env->stack[env->tos++] = n;
	e = get_irn_ne(n, env);
	e->in_stack = 1;
}  /* push */

/**
 * pop a node from the stack
 *
 * @param env   the loop environment
 *
 * @return  The topmost node
 */
static ir_node *pop(loop_env *env) {
	ir_node *n = env->stack[--env->tos];
	node_entry *e = get_irn_ne(n, env);

	e->in_stack = 0;
	return n;
}  /* pop */

/**
 * Check if irn is a region constant.
 * The block or irn must strictly dominate the header block.
 *
 * @param irn           the node to check
 * @param header_block  the header block of the induction variable
 */
static int is_rc(ir_node *irn, ir_node *header_block) {
	ir_node *block = get_nodes_block(irn);

	return (block != header_block) && block_dominates(block, header_block);
}  /* is_rc */

typedef struct phi_entry phi_entry;
struct phi_entry {
	ir_node   *phi;    /**< A phi with a region const memory. */
	int       pos;     /**< The position of the region const memory */
	ir_node   *load;   /**< the newly created load for this phi */
	phi_entry *next;
};

/**
 * Move loops out of loops if possible
 */
static void move_loads_in_loops(scc *pscc, loop_env *env) {
	ir_node   *phi, *load, *next, *other, *next_other;
	ir_entity *ent;
	int       j;
	phi_entry *phi_list = NULL;

	/* collect all outer memories */
	for (phi = pscc->head; phi != NULL; phi = next) {
		node_entry *ne = get_irn_ne(phi, env);
		next = ne->next;

		/* check all memory Phi's */
		if (! is_Phi(phi) || get_irn_mode(phi) != mode_M)
			continue;

		for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
			ir_node    *pred = get_irn_n(phi, j);
			node_entry *pe   = get_irn_ne(pred, env);

			if (pe->pscc != ne->pscc) {
				/* not in the same SCC, is region const */
				phi_entry *pe = phase_alloc(&env->ph, sizeof(*pe));

				pe->phi  = phi;
				pe->pos  = j;
				pe->next = phi_list;
				phi_list = pe;
			}
		}
	}
Michael Beck's avatar
Michael Beck committed
1337
1338
1339
	/* no Phis no fun */
	if (phi_list == NULL)
		return;
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358

	for (load = pscc->head; load; load = next) {
		ir_mode *load_mode;
		node_entry *ne = get_irn_ne(load, env);
		next = ne->next;

		if (is_Load(load)) {
			ldst_info_t *info = get_irn_link(load);
			ir_node     *ptr = get_Load_ptr(load);

			/* for now, we cannot handle Loads with exceptions */
			if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
				continue;

			/* for now, we can only handle Load(SymConst) */
			if (! is_SymConst(ptr) || get_SymConst_kind(ptr) != symconst_addr_ent)
				continue;
			ent = get_SymConst_entity(ptr);

Michael Beck's avatar
Michael Beck committed
1359
			load_mode = get_Load_mode(load);
1360
			if (get_entity_address_taken(ent) == ir_address_not_taken) {
Michael Beck's avatar
Michael Beck committed
1361
				/* Shortcut: If the addres is never taken, this address if complete alias free*/
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600