funccall.c 29.9 KB
Newer Older
Christian Würdig's avatar
Christian Würdig committed
1
/*
Michael Beck's avatar
Michael Beck committed
2
 * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
Christian Würdig's avatar
Christian Würdig committed
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
 *
 * This file is part of libFirm.
 *
 * This file may be distributed and/or modified under the terms of the
 * GNU General Public License version 2 as published by the Free Software
 * Foundation and appearing in the file LICENSE.GPL included in the
 * packaging of this file.
 *
 * Licensees holding valid libFirm Professional Edition licenses may use
 * this file in accordance with the libFirm Commercial License.
 * Agreement provided with the Software.
 *
 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE.
 */

Michael Beck's avatar
Michael Beck committed
20
21
22
23
24
/**
 * @file
 * @brief   Optimization of function calls.
 * @author  Michael Beck
 * @version $Id$
25
 */
26
27
#include "config.h"

28
#include "opt_init.h"
29

30
31
32
33
34
35
#include "irnode_t.h"
#include "irgraph_t.h"
#include "irgmod.h"
#include "irgwalk.h"
#include "dbginfo_t.h"
#include "irflag_t.h"
36
#include "irloop_t.h"
37
#include "ircons.h"
38
#include "iredges_t.h"
39
#include "irpass_t.h"
40
#include "iroptimize.h"
41
#include "analyze_irg_args.h"
42
#include "irhooks.h"
Matthias Braun's avatar
Matthias Braun committed
43
#include "raw_bitset.h"
44
45
46
#include "debug.h"

DEBUG_ONLY(static firm_dbg_module_t *dbg;)
47
48

/**
49
 * The walker environment for updating function calls.
50
 */
51
typedef struct env_t {
Michael Beck's avatar
Michael Beck committed
52
53
	size_t   n_calls_SymConst;
	size_t   n_calls_Sel;
54
55
56
57
58
	ir_node  *float_const_call_list;    /**< The list of all floating const function calls that will be changed. */
	ir_node  *nonfloat_const_call_list; /**< The list of all non-floating const function calls that will be changed. */
	ir_node  *pure_call_list;           /**< The list of all pure function calls that will be changed. */
	ir_node  *nothrow_call_list;        /**< The list of all nothrow function calls that will be changed. */
	ir_node  *proj_list;                /**< The list of all potential Proj nodes that must be fixed. */
59
60
} env_t;

61
62
63
64
65
66
67
/** Ready IRG's are marked in the ready set. */
static unsigned *ready_set;

/** IRG's that are in progress are marked here. */
static unsigned *busy_set;

/**
68
69
70
 * We misuse the mtp_property_inherited flag as temporary here.
 * The is ok, as we cannot set or get it anyway using the
 * get_addtional_properties API.
71
72
73
 */
#define mtp_temporary  mtp_property_inherited

74
/**
75
 * Walker: Collect all calls to const and pure functions
76
 * to lists. Collect all Proj(Call) nodes into a Proj list.
77
 */
78
79
static void collect_const_and_pure_calls(ir_node *node, void *env)
{
80
81
82
	env_t     *ctx = (env_t*)env;
	ir_node   *call;
	ir_node   *ptr;
83
	ir_entity *ent;
84
	unsigned  and_prop, or_prop, prop;
85
86
87
88
89
90
91

	if (is_Call(node)) {
		call = node;

		/* set the link to NULL for all non-const/pure calls */
		set_irn_link(call, NULL);
		ptr = get_Call_ptr(call);
92
93
		if (is_Global(ptr)) {
			ent = get_Global_entity(ptr);
94

95
96
			prop = get_entity_additional_properties(ent);
			if ((prop & (mtp_property_const|mtp_property_pure)) == 0)
97
98
99
100
				return;
			++ctx->n_calls_SymConst;
		} else if (get_opt_closed_world() &&
		           is_Sel(ptr) &&
101
		           get_irg_callee_info_state(get_irn_irg(node)) == irg_callee_info_consistent) {
102
			/* If all possible callees are const functions, we can remove the memory edge. */
103
			size_t i, n_callees = get_Call_n_callees(call);
104
105
106
107
108
109
110
111
			if (n_callees == 0) {
				/* This is kind of strange:  dying code or a Call that will raise an exception
				   when executed as there is no implementation to call.  So better not
				   optimize. */
				return;
			}

			/* note that const function are a subset of pure ones */
112
113
			and_prop = mtp_property_const | mtp_property_pure;
			or_prop  = 0;
114
115
116
117
118
119
			for (i = 0; i < n_callees; ++i) {
				ent = get_Call_callee(call, i);
				if (ent == unknown_entity) {
					/* we don't know which entity is called here */
					return;
				}
120
121
122
123
				prop      = get_entity_additional_properties(ent);
				and_prop &= prop;
				or_prop  &= prop;
				if (and_prop == mtp_no_property)
124
125
					return;
			}
126
			prop = and_prop | (or_prop & mtp_property_has_loop);
127
128
129
130
131
			++ctx->n_calls_Sel;
		} else
			return;

		/* ok, if we get here we found a call to a const or a pure function */
132
		if (prop & mtp_property_pure) {
133
134
135
			set_irn_link(call, ctx->pure_call_list);
			ctx->pure_call_list = call;
		} else {
136
137
138
139
140
141
142
			if (prop & mtp_property_has_loop) {
				set_irn_link(call, ctx->nonfloat_const_call_list);
				ctx->nonfloat_const_call_list = call;
			} else {
				set_irn_link(call, ctx->float_const_call_list);
				ctx->float_const_call_list = call;
			}
143
144
145
146
147
148
149
150
151
152
153
154
		}
	} else if (is_Proj(node)) {
		/*
		 * Collect all memory and exception Proj's from
		 * calls.
		 */
		call = get_Proj_pred(node);
		if (! is_Call(call))
			return;

		/* collect the Proj's in the Proj list */
		switch (get_Proj_proj(node)) {
155
		case pn_Call_M:
156
		case pn_Call_X_except:
157
		case pn_Call_X_regular:
158
159
160
161
162
163
164
			set_irn_link(node, ctx->proj_list);
			ctx->proj_list = node;
			break;
		default:
			break;
		}
	}
165
}  /* collect_const_and_pure_calls */
166
167
168

/**
 * Fix the list of collected Calls.
169
 *
170
171
 * @param irg  the graph that contained calls to pure functions
 * @param ctx  context
172
 */
173
174
static void fix_const_call_lists(ir_graph *irg, env_t *ctx)
{
175
176
177
	ir_node *call, *next, *mem, *proj;
	int exc_changed = 0;

178
179
	/* First step: fix all calls by removing their memory input and let
	 * them floating.
Christoph Mallon's avatar
Christoph Mallon committed
180
	 * The original memory input is preserved in their link fields. */
181
	for (call = ctx->float_const_call_list; call != NULL; call = next) {
182
		next = (ir_node*)get_irn_link(call);
183
184
185
186
187
188
		mem  = get_Call_mem(call);

		set_irn_link(call, mem);
		set_Call_mem(call, get_irg_no_mem(irg));

		/*
Christoph Mallon's avatar
Christoph Mallon committed
189
		 * Unfortunately we cannot simply set the node to 'float'.
190
191
192
193
194
195
196
197
198
199
200
201
202
		 * There is a reason for that:
		 *
		 * - The call might be inside a loop/if that is NOT entered
		 *   and calls a endless function. Setting the call to float
		 *   would allow to move it out from the loop/if causing this
		 *   function be called even if the loop/if is not entered ...
		 *
		 * This could be fixed using post-dominators for calls and Pin nodes
		 * but need some more analyzes to ensure that a call that potential
		 * never returns is not executed before some code that generates
		 * observable states...
		 */

203
204
		/* finally, this call can float */
		set_irn_pinned(call, op_pin_state_floats);
205
206
207
		hook_func_call(irg, call);
	}

208
209
	/* Last step: fix all Proj's */
	for (proj = ctx->proj_list; proj != NULL; proj = next) {
210
		next = (ir_node*)get_irn_link(proj);
211
		call = get_Proj_pred(proj);
212
		mem  = (ir_node*)get_irn_link(call);
213
214

		/* beware of calls in the pure call list */
215
		if (!mem || is_Call(mem))
216
217
218
219
			continue;
		assert(get_irn_mode(mem) == mode_M);

		switch (get_Proj_proj(proj)) {
220
		case pn_Call_M: {
221
222
223
			/* in dead code there might be cycles where proj == mem */
			if (proj != mem)
				exchange(proj, mem);
224
225
			 break;
		}
226
227
		case pn_Call_X_except:
			exc_changed = 1;
Matthias Braun's avatar
Matthias Braun committed
228
			exchange(proj, new_r_Bad(irg, mode_X));
229
			break;
230
231
232
		case pn_Call_X_regular: {
			ir_node *block = get_nodes_block(call);
			exc_changed = 1;
233
			exchange(proj, new_r_Jmp(block));
234
235
			break;
		}
236
		default:
237
			break;
238
239
240
241
242
		}
	}

	if (exc_changed) {
		/* ... including exception edges */
243
244
		clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE
		                   | IR_GRAPH_STATE_CONSISTENT_LOOPINFO);
245
	}
246
247
248
249
250
251
}  /* fix_const_call_list */

/**
 * Walker: Collect all calls to nothrow functions
 * to lists. Collect all Proj(Call) nodes into a Proj list.
 */
252
253
static void collect_nothrow_calls(ir_node *node, void *env)
{
254
	env_t *ctx = (env_t*)env;
255
256
257
258
259
260
261
262
263
264
	ir_node *call, *ptr;
	ir_entity *ent;
	unsigned prop;

	if (is_Call(node)) {
		call = node;

		/* set the link to NULL for all non-const/pure calls */
		set_irn_link(call, NULL);
		ptr = get_Call_ptr(call);
265
266
		if (is_Global(ptr)) {
			ent = get_Global_entity(ptr);
267
268
269
270
271
272
273

			prop = get_entity_additional_properties(ent);
			if ((prop & mtp_property_nothrow) == 0)
				return;
			++ctx->n_calls_SymConst;
		} else if (get_opt_closed_world() &&
		           is_Sel(ptr) &&
274
		           get_irg_callee_info_state(get_irn_irg(node)) == irg_callee_info_consistent) {
275
			/* If all possible callees are nothrow functions, we can remove the exception edge. */
276
			size_t i, n_callees = get_Call_n_callees(call);
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
			if (n_callees == 0) {
				/* This is kind of strange:  dying code or a Call that will raise an exception
				   when executed as there is no implementation to call.  So better not
				   optimize. */
				return;
			}

			/* note that const function are a subset of pure ones */
			prop = mtp_property_nothrow;
			for (i = 0; i < n_callees; ++i) {
				ent = get_Call_callee(call, i);
				if (ent == unknown_entity) {
					/* we don't know which entity is called here */
					return;
				}
				prop &= get_entity_additional_properties(ent);
				if (prop == mtp_no_property)
					return;
			}
			++ctx->n_calls_Sel;
		} else
			return;

		/* ok, if we get here we found a call to a nothrow function */
		set_irn_link(call, ctx->nothrow_call_list);
		ctx->nothrow_call_list = call;
	} else if (is_Proj(node)) {
		/*
		 * Collect all memory and exception Proj's from
		 * calls.
		 */
		call = get_Proj_pred(node);
		if (! is_Call(call))
			return;

		/* collect the Proj's in the Proj list */
		switch (get_Proj_proj(node)) {
314
		case pn_Call_M:
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
		case pn_Call_X_except:
		case pn_Call_X_regular:
			set_irn_link(node, ctx->proj_list);
			ctx->proj_list = node;
			break;
		default:
			break;
		}
	}
}  /* collect_nothrow_calls */

/**
 * Fix the list of collected nothrow Calls.
 *
 * @param irg        the graph that contained calls to pure functions
 * @param call_list  the list of all call sites of const functions
 * @param proj_list  the list of all memory/exception Proj's of this call sites
 */
333
334
static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list)
{
335
336
337
338
339
	ir_node *call, *next, *proj;
	int exc_changed = 0;

	/* First step: go through the list of calls and mark them. */
	for (call = call_list; call; call = next) {
340
		next = (ir_node*)get_irn_link(call);
341
342
343
344
345
346
347
348

		/* current_ir_graph is in memory anyway, so it's a good marker */
		set_irn_link(call, &current_ir_graph);
		hook_func_call(irg, call);
	}

	/* Second step: Remove all exception Proj's */
	for (proj = proj_list; proj; proj = next) {
349
		next = (ir_node*)get_irn_link(proj);
350
351
352
353
354
355
356
357
358
359
		call = get_Proj_pred(proj);

		/* handle only marked calls */
		if (get_irn_link(call) != &current_ir_graph)
			continue;

		/* kill any exception flow */
		switch (get_Proj_proj(proj)) {
		case pn_Call_X_except:
			exc_changed = 1;
Matthias Braun's avatar
Matthias Braun committed
360
			exchange(proj, new_r_Bad(irg, mode_X));
361
362
363
364
			break;
		case pn_Call_X_regular: {
			ir_node *block = get_nodes_block(call);
			exc_changed = 1;
365
			exchange(proj, new_r_Jmp(block));
366
367
368
			break;
		}
		default:
369
			break;
370
371
372
373
374
375
		}
	}

	/* changes were done ... */
	if (exc_changed) {
		/* ... including exception edges */
376
377
		clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE
		                   | IR_GRAPH_STATE_CONSISTENT_LOOPINFO);
378
379
	}
}  /* fix_nothrow_call_list */
380

381
/* marking */
382
#define SET_IRG_READY(irg)  rbitset_set(ready_set, get_irg_idx(irg))
383
384
385
386
#define IS_IRG_READY(irg)   rbitset_is_set(ready_set, get_irg_idx(irg))
#define SET_IRG_BUSY(irg)   rbitset_set(busy_set, get_irg_idx(irg))
#define CLEAR_IRG_BUSY(irg) rbitset_clear(busy_set, get_irg_idx(irg))
#define IS_IRG_BUSY(irg)    rbitset_is_set(busy_set, get_irg_idx(irg))
387
388

/* forward */
389
static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, int top);
390
391

/**
392
 * Calculate the bigger property of two. Handle the temporary flag right.
393
 */
394
395
static mtp_additional_properties max_property(mtp_additional_properties a,
                                              mtp_additional_properties b)
396
{
397
398
	mtp_additional_properties r;
	mtp_additional_properties t = (a | b) & mtp_temporary;
399
400
	a &= ~mtp_temporary;
	b &= ~mtp_temporary;
401

402
403
404
405
	if (a == mtp_no_property || b == mtp_no_property)
		return mtp_no_property;
	r = a > b ? a : b;
	return r | t;
406
}  /* max_property */
407
408
409
410
411
412

/**
 * Follow the memory chain starting at node and determine
 * the mtp_property.
 *
 * @return mtp_property_const if only calls of const functions are detected
Michael Beck's avatar
Michael Beck committed
413
414
 *         mtp_property_pure  if only Loads and const/pure calls detected
 *         mtp_no_property    else
415
 */
416
static mtp_additional_properties follow_mem_(ir_node *node)
417
{
418
419
	mtp_additional_properties mode = mtp_property_const;
	mtp_additional_properties m;
420
421
422
423
	ir_node  *ptr;
	int i;

	for (;;) {
424
425
426
		if (mode == mtp_no_property)
			return mtp_no_property;

427
		if (irn_visited_else_mark(node))
428
429
430
431
432
433
434
435
436
437
438
439
440
			return mode;

		switch (get_irn_opcode(node)) {
		case iro_Proj:
			node = get_Proj_pred(node);
			break;

		case iro_NoMem:
			/* finish here */
			return mode;

		case iro_Phi:
		case iro_Sync:
441
			/* do a dfs search */
442
			for (i = get_irn_arity(node) - 1; i >= 0; --i) {
443
				m    = follow_mem_(get_irn_n(node, i));
444
				mode = max_property(mode, m);
445
446
				if (mode == mtp_no_property)
					return mtp_no_property;
447
			}
448
			return mode;
449
450

		case iro_Load:
451
			/* Beware volatile Loads are NOT allowed in pure functions. */
452
			if (get_Load_volatility(node) == volatility_is_volatile)
453
				return mtp_no_property;
454
			mode = max_property(mode, mtp_property_pure);
455
456
457
458
			node = get_Load_mem(node);
			break;

		case iro_Call:
459
			/* A call is only tolerable if its either constant or pure. */
460
			ptr = get_Call_ptr(node);
Michael Beck's avatar
Michael Beck committed
461
			if (is_SymConst_addr_ent(ptr)) {
462
463
464
				ir_entity *ent = get_SymConst_entity(ptr);
				ir_graph  *irg = get_entity_irg(ent);

465
				if (irg == NULL) {
466
					m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
467
					mode = max_property(mode, m);
468
				} else {
469
470
					/* we have a graph, analyze it. */
					m = check_const_or_pure_function(irg, /*top=*/0);
471
					mode = max_property(mode, m);
472
473
				}
			} else
474
				return mtp_no_property;
475
476
477
478
			node = get_Call_mem(node);
			break;

		default:
479
			return mtp_no_property;
480
481
		}
	}
482
}
483

484
485
486
487
488
/**
 * Follow the memory chain starting at node and determine
 * the mtp_property.
 *
 * @return mtp_property_const if only calls of const functions are detected
Michael Beck's avatar
Michael Beck committed
489
490
 *         mtp_property_pure  if only Loads and const/pure calls detected
 *         mtp_no_property else
491
 */
492
static mtp_additional_properties follow_mem(ir_node *node, mtp_additional_properties mode)
493
{
494
	mtp_additional_properties m = follow_mem_(node);
495
	return max_property(mode, m);
496
}
497

498
499
/**
 * Check if a graph represents a const or a pure function.
500
 *
501
502
 * @param irg  the graph to check
 * @param top  if set, this is the top call
503
 */
504
static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, int top)
505
{
506
507
	ir_node *end, *endbl;
	int j;
508
509
510
511
512
	ir_entity *entity   = get_irg_entity(irg);
	ir_type   *type     = get_entity_type(entity);
	size_t     n_params = get_method_n_params(type);
	size_t     i;
	mtp_additional_properties may_be_const = mtp_property_const;
513
	mtp_additional_properties prop = get_irg_additional_properties(irg);
514

515
516
517
518
519
520
521
522
523
524
	/* libfirm handles aggregate parameters by passing around pointers to
	 * stuff in memory, so if we have compound parameters we are never const */
	for (i = 0; i < n_params; ++i) {
		ir_type *param = get_method_param_type(type, i);
		if (is_compound_type(param)) {
			prop        &= ~mtp_property_const;
			may_be_const = mtp_no_property;
		}
	}

525
	if (prop & mtp_property_const) {
526
527
528
		/* already marked as a const function */
		return mtp_property_const;
	}
529
	if (prop & mtp_property_pure) {
530
531
532
533
		/* already marked as a pure function */
		return mtp_property_pure;
	}

534
535
536
537
538
	if (IS_IRG_READY(irg)) {
		/* already checked */
		return mtp_no_property;
	}
	if (IS_IRG_BUSY(irg)) {
539
540
541
542
		/* We are still evaluate this method.
		 * The function (indirectly) calls itself and thus may not terminate.
		 */
		return mtp_no_property;
543
544
	}
	SET_IRG_BUSY(irg);
545
546
547

	end   = get_irg_end(irg);
	endbl = get_nodes_block(end);
548
	prop  = may_be_const;
549

Michael Beck's avatar
Michael Beck committed
550
	ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
551
	inc_irg_visited(irg);
Michael Beck's avatar
Michael Beck committed
552
	/* mark the initial mem: recursion of follow_mem() stops here */
553
554
	mark_irn_visited(get_irg_initial_mem(irg));

555
556
	/* visit every Return */
	for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
557
		ir_node   *node = get_Block_cfgpred(endbl, j);
558
		unsigned   code = get_irn_opcode(node);
559
		ir_node   *mem;
560
561

		/* Bad nodes usually do NOT produce anything, so it's ok */
562
		if (code == iro_Bad)
563
564
			continue;

565
		if (code == iro_Return) {
566
567
568
569
570
571
572
			mem = get_Return_mem(node);

			/* Bad nodes usually do NOT produce anything, so it's ok */
			if (is_Bad(mem))
				continue;

			if (mem != get_irg_initial_mem(irg))
573
				prop = max_property(prop, follow_mem(mem, prop));
574
		} else {
575
			/* Exception found. Cannot be const or pure. */
576
			prop = mtp_no_property;
577
578
			break;
		}
579
		if (prop == mtp_no_property)
580
581
582
			break;
	}

583
	if (prop != mtp_no_property) {
584
585
		/* check, if a keep-alive exists */
		for (j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
586
			ir_node *kept = get_End_keepalive(end, j);
587

588
589
590
591
592
593
			if (is_Block(kept)) {
				prop = mtp_no_property;
				break;
			}

			if (mode_M != get_irn_mode(kept))
594
595
				continue;

596
			prop = max_property(prop, follow_mem(kept, prop));
597
			if (prop == mtp_no_property)
598
599
600
601
				break;
		}
	}

602
603
604
605
	if (top) {
		/* Set the property only if we are at top-level. */
		if (prop != mtp_no_property) {
			add_irg_additional_properties(irg, prop);
606
607
		}
		SET_IRG_READY(irg);
608
	}
609
	CLEAR_IRG_BUSY(irg);
Michael Beck's avatar
Michael Beck committed
610
	ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
611
	return prop;
612
}  /* check_const_or_pure_function */
613
614
615

/**
 * Handle calls to const functions.
616
617
 *
 * @param ctx  context
618
 */
619
620
static void handle_const_Calls(env_t *ctx)
{
Michael Beck's avatar
Michael Beck committed
621
	size_t i, n;
622

623
624
	ctx->n_calls_SymConst = 0;
	ctx->n_calls_Sel      = 0;
625

626
	/* all calls of const functions can be transformed */
627
	for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
628
		ir_graph *irg  = get_irp_irg(i);
629

630
631
632
633
		ctx->float_const_call_list    = NULL;
		ctx->nonfloat_const_call_list = NULL;
		ctx->pure_call_list           = NULL;
		ctx->proj_list                = NULL;
Michael Beck's avatar
Michael Beck committed
634
635

		ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
636
		irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx);
637

Michael Beck's avatar
Michael Beck committed
638
		if (ctx->float_const_call_list != NULL)
639
			fix_const_call_lists(irg, ctx);
Michael Beck's avatar
Michael Beck committed
640
		ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
641
	}
642
643
}  /* handle_const_Calls */

644
645
646
647
648
/**
 * Handle calls to nothrow functions.
 *
 * @param ctx  context
 */
649
650
static void handle_nothrow_Calls(env_t *ctx)
{
Michael Beck's avatar
Michael Beck committed
651
	size_t i, n;
652
653
654
655
656

	ctx->n_calls_SymConst = 0;
	ctx->n_calls_Sel      = 0;

	/* all calls of const functions can be transformed */
Michael Beck's avatar
Michael Beck committed
657
	for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
658
659
660
661
		ir_graph *irg  = get_irp_irg(i);

		ctx->nothrow_call_list = NULL;
		ctx->proj_list         = NULL;
Michael Beck's avatar
Michael Beck committed
662
663

		ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
664
665
		irg_walk_graph(irg, NULL, collect_nothrow_calls, ctx);

Michael Beck's avatar
Michael Beck committed
666
		if (ctx->nothrow_call_list)
667
			fix_nothrow_call_list(irg, ctx->nothrow_call_list, ctx->proj_list);
Michael Beck's avatar
Michael Beck committed
668
		ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
669
670
671
672
673
674
675
676
677
	}
}

/**
 * Check, whether a given node represents a return value of
 * a malloc like function (ie, new heap allocated memory).
 *
 * @param node  the node to check
 */
678
679
static int is_malloc_call_result(const ir_node *node)
{
680
681
682
683
	if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) {
		/* Firm style high-level allocation */
		return 1;
	}
684
	/* TODO: check mtp_malloc */
685
	return 0;
686
}
687
688
689
690

/**
 * Update a property depending on a call property.
 */
691
static mtp_additional_properties update_property(mtp_additional_properties orig_prop, mtp_additional_properties call_prop)
692
{
693
694
	mtp_additional_properties t = (orig_prop | call_prop) & mtp_temporary;
	mtp_additional_properties r = orig_prop & call_prop;
695
	return r | t;
696
}
697

698
699
700
/**
 * Check if a node is stored.
 */
701
702
static int is_stored(const ir_node *n)
{
703
704
705
706
707
708
709
710
711
712
713
714
715
	const ir_edge_t *edge;
	const ir_node   *ptr;

	foreach_out_edge(n, edge) {
		const ir_node *succ = get_edge_src_irn(edge);

		switch (get_irn_opcode(succ)) {
		case iro_Return:
		case iro_Load:
		case iro_Cmp:
			/* ok */
			break;
		case iro_Store:
716
			if (get_Store_value(succ) == n)
717
				return 1;
718
719
720
721
722
723
			/* ok if its only the address input */
			break;
		case iro_Sel:
		case iro_Cast:
		case iro_Confirm:
			if (is_stored(succ))
724
				return 1;
725
726
727
			break;
		case iro_Call:
			ptr = get_Call_ptr(succ);
728
729
			if (is_Global(ptr)) {
				ir_entity *ent = get_Global_entity(ptr);
730
				size_t    i;
731
732

				/* we know the called entity */
733
734
				for (i = get_Call_n_params(succ); i > 0;) {
					if (get_Call_param(succ, --i) == n) {
735
736
737
						/* n is the i'th param of the call */
						if (get_method_param_access(ent, i) & ptr_access_store) {
							/* n is store in ent */
738
							return 1;
739
740
741
742
						}
					}
				}
			} else {
743
744
				/* unknown call address */
				return 1;
745
746
747
748
			}
			break;
		default:
			/* bad, potential alias */
749
			return 1;
750
751
		}
	}
752
	return 0;
753
754
755
756
757
758
759
}  /* is_stored */

/**
 * Check that the return value of an irg is not stored anywhere.
 *
 * return ~mtp_property_malloc if return values are stored, ~0 else
 */
760
static mtp_additional_properties check_stored_result(ir_graph *irg)
761
{
762
	ir_node  *end_blk = get_irg_end_block(irg);
763
	int      i;
764
	mtp_additional_properties res = ~mtp_no_property;
765
766
767
768
	int      old_edges = edges_assure_kind(irg, EDGE_KIND_NORMAL);

	for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
		ir_node *pred = get_Block_cfgpred(end_blk, i);
769
		size_t  j;
770
771
772

		if (! is_Return(pred))
			continue;
773
774
		for (j = get_Return_n_ress(pred); j > 0;) {
			const ir_node *irn = get_Return_res(pred, --j);
775

776
			if (is_stored(irn)) {
777
778
779
780
781
782
783
784
785
786
				/* bad, might create an alias */
				res = ~mtp_property_malloc;
				goto finish;
			}
		}
	}
finish:
	if (! old_edges)
		edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
	return res;
787
}
788

789
790
791
792
793
794
/**
 * Check if a graph represents a nothrow or a malloc function.
 *
 * @param irg  the graph to check
 * @param top  if set, this is the top call
 */
795
static mtp_additional_properties check_nothrow_or_malloc(ir_graph *irg, int top)
796
{
797
798
	mtp_additional_properties curr_prop = mtp_property_malloc | mtp_property_nothrow;
	ir_node                  *end_blk   = get_irg_end_block(irg);
799
800
	ir_entity *ent;
	ir_type   *mtp;
801
	int       i;
802
803
804
805
806
807
808
809
810
811
812
813

	if (IS_IRG_READY(irg)) {
		/* already checked */
		return get_irg_additional_properties(irg);
	}
	if (IS_IRG_BUSY(irg)) {
		/* we are still evaluate this method. Be optimistic,
		return the best possible so far but mark the result as temporary. */
		return mtp_temporary | mtp_property_malloc | mtp_property_nothrow;
	}
	SET_IRG_BUSY(irg);

814
815
816
817
818
819
	ent = get_irg_entity(irg);
	mtp = get_entity_type(ent);

	if (get_method_n_ress(mtp) <= 0)
		curr_prop &= ~mtp_property_malloc;

820
821
822
823
824
	for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
		ir_node *pred = get_Block_cfgpred(end_blk, i);

		if (is_Return(pred)) {
			if (curr_prop & mtp_property_malloc) {
825
826
				size_t j;

827
				/* check, if malloc is called here */
828
829
				for (j = get_Return_n_ress(pred); j > 0;) {
					ir_node *res = get_Return_res(pred, --j);
830
831
832
833
834
835

					/* skip Confirms and Casts */
					res = skip_HighLevel_ops(res);
					/* skip Proj's */
					while (is_Proj(res))
						res = get_Proj_pred(res);
836
837
838
839
840
					if (is_malloc_call_result(res)) {
						/* ok, this is a malloc */
					} else if (is_Call(res)) {
						ir_node *ptr = get_Call_ptr(res);

841
						if (is_Global(ptr)) {
842
							/* a direct call */
843
							ir_entity *ent    = get_Global_entity(ptr);
844
845
846
847
848
							ir_graph  *callee = get_entity_irg(ent);

							if (callee == irg) {
								/* A self-recursive call. The property did not depend on this call. */
							} else if (callee != NULL) {
849
								mtp_additional_properties prop = check_nothrow_or_malloc(callee, /*top=*/0);
850
851
852
853
854
855
856
857
								curr_prop = update_property(curr_prop, prop);
							} else {
								curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
							}
						} else if (get_opt_closed_world() &&
						           is_Sel(ptr) &&
						           get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
							/* check if all possible callees are malloc functions. */
858
							size_t i, n_callees = get_Call_n_callees(res);
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
							if (n_callees == 0) {
								/* This is kind of strange:  dying code or a Call that will raise an exception
								   when executed as there is no implementation to call.  So better not
								   optimize. */
								curr_prop &= ~mtp_property_malloc;
								continue;
							}

							for (i = 0; i < n_callees; ++i) {
								ir_entity *ent = get_Call_callee(res, i);
								if (ent == unknown_entity) {
									/* we don't know which entity is called here */
									curr_prop &= ~mtp_property_malloc;
									break;
								}
								if ((get_entity_additional_properties(ent) & mtp_property_malloc) == 0) {
									curr_prop &= ~mtp_property_malloc;
									break;
								}
							}
							/* if we pass the for cycle, malloc is still ok */
						} else {
							/* unknown call */
							curr_prop &= ~mtp_property_malloc;
						}
884
885
886
					} else {
						/* unknown return value */
						curr_prop &= ~mtp_property_malloc;
887
888
889
890
891
892
893
894
895
896
					}
				}
			}
		} else if (curr_prop & mtp_property_nothrow) {
			/* exception flow detected */
			pred = skip_Proj(pred);

			if (is_Call(pred)) {
				ir_node *ptr = get_Call_ptr(pred);

897
				if (is_Global(ptr)) {
898
					/* a direct call */
899
					ir_entity *ent    = get_Global_entity(ptr);
900
901
902
903
904
					ir_graph  *callee = get_entity_irg(ent);

					if (callee == irg) {
						/* A self-recursive call. The property did not depend on this call. */
					} else if (callee != NULL) {
905
						/* Note: we check here for nothrow only, so do NOT reset the malloc property */
906
						mtp_additional_properties prop = check_nothrow_or_malloc(callee, /*top=*/0) | mtp_property_malloc;
907
908
						curr_prop = update_property(curr_prop, prop);
					} else {
909
910
						if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0)
							curr_prop &= ~mtp_property_nothrow;
911
912
913
914
915
					}
				} else if (get_opt_closed_world() &&
				           is_Sel(ptr) &&
				           get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
					/* check if all possible callees are nothrow functions. */
916
					size_t i, n_callees = get_Call_n_callees(pred);
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
					if (n_callees == 0) {
						/* This is kind of strange:  dying code or a Call that will raise an exception
						   when executed as there is no implementation to call.  So better not
						   optimize. */
						curr_prop &= ~mtp_property_nothrow;
						continue;
					}

					for (i = 0; i < n_callees; ++i) {
						ir_entity *ent = get_Call_callee(pred, i);
						if (ent == unknown_entity) {
							/* we don't know which entity is called here */
							curr_prop &= ~mtp_property_nothrow;
							break;
						}
						if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0) {
							curr_prop &= ~mtp_property_nothrow;
							break;
						}
					}
					/* if we pass the for cycle, nothrow is still ok */
				} else {
					/* unknown call */
					curr_prop &= ~mtp_property_nothrow;
				}
			} else {
				/* real exception flow possible. */
				curr_prop &= ~mtp_property_nothrow;
			}
		}
		if ((curr_prop & ~mtp_temporary) == mtp_no_property) {
			/* no need to search further */
			break;
		}
	}
952
953
954
955
956
957
958
959
960
961
962

	if (curr_prop & mtp_property_malloc) {
		/*
		 * Note that the malloc property means not only return newly allocated
		 * memory, but also that this memory is ALIAS FREE.
		 * To ensure that, we do NOT allow that the returned memory is somewhere
		 * stored.
	     */
		curr_prop &= check_stored_result(irg);
	}

963
964
965
966
967
	if (curr_prop != mtp_no_property) {
		if (top || (curr_prop & mtp_temporary) == 0) {
			/* We use the temporary flag here to mark an optimistic result.
			   Set the property only if we are sure that it does NOT base on
			   temporary results OR if we are at top-level. */
968
			add_irg_additional_properties(irg, curr_prop & ~mtp_temporary);
969
970
971
972
973
974
975
976
977
			SET_IRG_READY(irg);
		}
	}
	if (top)
		SET_IRG_READY(irg);
	CLEAR_IRG_BUSY(irg);
	return curr_prop;
}  /* check_nothrow_or_malloc */

978
979
/**
 * When a function was detected as "const", it might be moved out of loops.
Michael Beck's avatar
Michael Beck committed
980
 * This might be dangerous if the graph can contain endless loops.
981
 */
982
983
static void check_for_possible_endless_loops(ir_graph *irg)
{
984
	ir_loop *root_loop;
985
	assure_loopinfo(irg);
986
987
988

	root_loop = get_irg_loop(irg);
	if (root_loop->flags & loop_outer_loop)
989
		add_irg_additional_properties(irg, mtp_property_has_loop);
990
991
}

992
993
994
/*
 * optimize function calls by handling const functions
 */
995
void optimize_funccalls(void)
996
{
Michael Beck's avatar
Michael Beck committed
997
	size_t i, n;
998
	size_t last_idx;
999
	env_t  ctx;
Michael Beck's avatar
Michael Beck committed
1000
	size_t num_const   = 0;