jumpthreading.c 19.4 KB
Newer Older
Christian Würdig's avatar
Christian Würdig committed
1
/*
Michael Beck's avatar
Michael Beck committed
2
 * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
Christian Würdig's avatar
Christian Würdig committed
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
 *
 * This file is part of libFirm.
 *
 * This file may be distributed and/or modified under the terms of the
 * GNU General Public License version 2 as published by the Free Software
 * Foundation and appearing in the file LICENSE.GPL included in the
 * packaging of this file.
 *
 * Licensees holding valid libFirm Professional Edition licenses may use
 * this file in accordance with the libFirm Commercial License.
 * Agreement provided with the Software.
 *
 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE.
 */

Michael Beck's avatar
Michael Beck committed
20
21
/**
 * @file
22
 * @brief   Path-Sensitive Jump Threading
Michael Beck's avatar
Michael Beck committed
23
24
25
 * @date    10. Sep. 2006
 * @author  Christoph Mallon, Matthias Braun
 * @version $Id$
26
 */
27
28
#include "config.h"

29
30
#include "iroptimize.h"

31
#include <assert.h>
32
#include "array_t.h"
33
34
#include "debug.h"
#include "ircons.h"
Christoph Mallon's avatar
Christoph Mallon committed
35
36
#include "irgmod.h"
#include "irgopt.h"
37
38
#include "irgwalk.h"
#include "irnode.h"
39
#include "irnode_t.h"
40
#include "iredges.h"
41
#include "iredges_t.h"
42
#include "irtools.h"
43
#include "irgraph.h"
44
#include "tv.h"
45
#include "opt_confirms.h"
46
#include "iropt_dbg.h"
47
#include "irpass.h"
48
#include "vrp.h"
49

50
#undef AVOID_PHIB
51

52
53
DEBUG_ONLY(static firm_dbg_module_t *dbg);

Christoph Mallon's avatar
Christoph Mallon committed
54
55
56
57
58
59
60
61
62
63
64
65
66
/**
 * Add the new predecessor x to node node, which is either a Block or a Phi
 */
static void add_pred(ir_node* node, ir_node* x)
{
	ir_node** ins;
	int n;
	int i;

	assert(is_Block(node) || is_Phi(node));

	n = get_irn_arity(node);
	NEW_ARR_A(ir_node*, ins, n + 1);
67
68
	for (i = 0; i < n; i++)
		ins[i] = get_irn_n(node, i);
Christoph Mallon's avatar
Christoph Mallon committed
69
70
71
72
	ins[n] = x;
	set_irn_in(node, n + 1, ins);
}

73
74
75
76
77
static ir_node *ssa_second_def;
static ir_node *ssa_second_def_block;

static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode,
                                           int first)
Christoph Mallon's avatar
Christoph Mallon committed
78
{
79
80
81
82
83
84
	int i;
	int n_cfgpreds;
	ir_graph *irg;
	ir_node *phi;
	ir_node **in;

85
	/* This is needed because we create bads sometimes */
86
	if (is_Bad(block))
87
		return new_Bad();
88

89
90
91
92
93
94
95
96
97
	/* the other defs can't be marked for cases where a user of the original
	 * value is in the same block as the alternative definition.
	 * In this case we mustn't use the alternative definition.
	 * So we keep a flag that indicated wether we walked at least 1 block
	 * away and may use the alternative definition */
	if (block == ssa_second_def_block && !first) {
		return ssa_second_def;
	}

98
	/* already processed this block? */
99
	if (irn_visited(block)) {
100
101
102
103
		ir_node *value = (ir_node*) get_irn_link(block);
		return value;
	}

Matthias Braun's avatar
Matthias Braun committed
104
105
106
	irg = get_irn_irg(block);
	assert(block != get_irg_start_block(irg));

Christoph Mallon's avatar
Christoph Mallon committed
107
	/* a Block with only 1 predecessor needs no Phi */
108
	n_cfgpreds = get_Block_n_cfgpreds(block);
109
	if (n_cfgpreds == 1) {
110
		ir_node *pred_block = get_Block_cfgpred_block(block, 0);
111
		ir_node *value      = search_def_and_create_phis(pred_block, mode, 0);
112
113
114
115
116
117

		set_irn_link(block, value);
		mark_irn_visited(block);
		return value;
	}

118
	/* create a new Phi */
Christoph Mallon's avatar
Christoph Mallon committed
119
	NEW_ARR_A(ir_node*, in, n_cfgpreds);
120
	for (i = 0; i < n_cfgpreds; ++i)
121
122
		in[i] = new_Unknown(mode);

123
	phi = new_r_Phi(block, n_cfgpreds, in, mode);
124
125
126
	set_irn_link(block, phi);
	mark_irn_visited(block);

127
	/* set Phi predecessors */
128
	for (i = 0; i < n_cfgpreds; ++i) {
129
		ir_node *pred_block = get_Block_cfgpred_block(block, i);
130
		ir_node *pred_val   = search_def_and_create_phis(pred_block, mode, 0);
131
132
133
134
135
136
137
138

		set_irn_n(phi, i, pred_val);
	}

	return phi;
}

/**
139
140
141
 * Given a set of values this function constructs SSA-form for the users of the
 * first value (the users are determined through the out-edges of the value).
 * Uses the irn_visited flags. Works without using the dominance tree.
142
 */
143
144
static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
                          ir_node *second_block, ir_node *second_val)
Christoph Mallon's avatar
Christoph Mallon committed
145
{
146
147
	ir_graph *irg;
	ir_mode *mode;
148
149
	const ir_edge_t *edge;
	const ir_edge_t *next;
150

151
152
153
154
	/* no need to do anything */
	if (orig_val == second_val)
		return;

155
	irg = get_irn_irg(orig_val);
156
157
	inc_irg_visited(irg);

158
159
160
	mode = get_irn_mode(orig_val);
	set_irn_link(orig_block, orig_val);
	mark_irn_visited(orig_block);
161

162
163
	ssa_second_def_block = second_block;
	ssa_second_def       = second_val;
164

165
	/* Only fix the users of the first, i.e. the original node */
166
	foreach_out_edge_safe(orig_val, edge, next) {
167
168
169
170
		ir_node *user = get_edge_src_irn(edge);
		int j = get_edge_src_pos(edge);
		ir_node *user_block = get_nodes_block(user);
		ir_node *newval;
171

172
173
		/* ignore keeps */
		if (is_End(user))
174
			continue;
175
176
177

		DB((dbg, LEVEL_3, ">>> Fixing user %+F (pred %d == %+F)\n", user, j, get_irn_n(user, j)));

178
		if (is_Phi(user)) {
179
			ir_node *pred_block = get_Block_cfgpred_block(user_block, j);
180
			newval = search_def_and_create_phis(pred_block, mode, 1);
181
		} else {
182
			newval = search_def_and_create_phis(user_block, mode, 1);
183
184
		}

185
		/* don't fix newly created Phis from the SSA construction */
186
187
188
		if (newval != user) {
			DB((dbg, LEVEL_4, ">>>> Setting input %d of %+F to %+F\n", j, user, newval));
			set_irn_n(user, j, newval);
189
190
191
192
		}
	}
}

193
194
static void split_critical_edge(ir_node *block, int pos)
{
195
196
197
198
	ir_graph *irg = get_irn_irg(block);
	ir_node *in[1];
	ir_node *new_block;
	ir_node *new_jmp;
Christoph Mallon's avatar
Christoph Mallon committed
199

200
201
	in[0] = get_Block_cfgpred(block, pos);
	new_block = new_r_Block(irg, 1, in);
202
	new_jmp = new_r_Jmp(new_block);
203
	set_Block_cfgpred(block, pos, new_jmp);
Christoph Mallon's avatar
Christoph Mallon committed
204
205
}

206
typedef struct jumpthreading_env_t {
207
	ir_node       *true_block;
208
	ir_node       *cmp;        /**< The Compare node that might be partial evaluated */
209
	pn_Cmp         pnc;        /**< The Compare mode of the Compare node. */
210
211
	ir_node       *cnst;
	tarval        *tv;
212
	ir_visited_t   visited_nr;
213
214
215
216

	ir_node       *cnst_pred;   /**< the block before the constant */
	int            cnst_pos;    /**< the pos to the constant block (needed to
	                                  kill that edge later) */
217
} jumpthreading_env_t;
Christoph Mallon's avatar
Christoph Mallon committed
218

219
220
221
222
static ir_node *copy_and_fix_node(const jumpthreading_env_t *env,
                                  ir_node *block, ir_node *copy_block, int j,
                                  ir_node *node)
{
Matthias Braun's avatar
Matthias Braun committed
223
224
225
226
227
228
	int      i, arity;
	ir_node *copy;

	/* we can evaluate Phis right now, all other nodes get copied */
	if (is_Phi(node)) {
		copy = get_Phi_pred(node, j);
229
		/* we might have to evaluate a Phi-cascade */
230
		if (get_irn_visited(copy) >= env->visited_nr) {
Matthias Braun's avatar
Matthias Braun committed
231
232
233
234
235
236
237
238
239
			copy = get_irn_link(copy);
		}
	} else {
		copy = exact_copy(node);
		set_nodes_block(copy, copy_block);

		assert(get_irn_mode(copy) != mode_X);

		arity = get_irn_arity(copy);
240
		for (i = 0; i < arity; ++i) {
Matthias Braun's avatar
Matthias Braun committed
241
242
243
			ir_node *pred     = get_irn_n(copy, i);
			ir_node *new_pred;

244
			if (get_nodes_block(pred) != block)
Matthias Braun's avatar
Matthias Braun committed
245
246
				continue;

247
			if (get_irn_visited(pred) >= env->visited_nr) {
Matthias Braun's avatar
Matthias Braun committed
248
249
250
251
				new_pred = get_irn_link(pred);
			} else {
				new_pred = copy_and_fix_node(env, block, copy_block, j, pred);
			}
252
			DB((dbg, LEVEL_2, ">> Set Pred of %+F to %+F\n", copy, new_pred));
Matthias Braun's avatar
Matthias Braun committed
253
254
255
256
257
258
259
260
261
262
			set_irn_n(copy, i, new_pred);
		}
	}

	set_irn_link(node, copy);
	set_irn_visited(node, env->visited_nr);

	return copy;
}

263
264
265
static void copy_and_fix(const jumpthreading_env_t *env, ir_node *block,
                         ir_node *copy_block, int j)
{
266
267
268
269
270
271
	const ir_edge_t *edge;

	/* Look at all nodes in the cond_block and copy them into pred */
	foreach_out_edge(block, edge) {
		ir_node *node = get_edge_src_irn(edge);
		ir_node *copy;
272
273
274
275
276
277
278
		ir_mode *mode;

		if (is_Block(node)) {
			/* Block->Block edge, should be the MacroBlock edge */
			assert(get_Block_MacroBlock(node) == block && "Block->Block edge found");
			continue;
		}
279
280

		/* ignore control flow */
281
		mode = get_irn_mode(node);
282
		if (mode == mode_X || is_Cond(node))
283
			continue;
284
#ifdef AVOID_PHIB
285
		/* we may not copy mode_b nodes, because this could produce Phi with
286
		 * mode_bs which can't be handled in all backends. Instead we duplicate
Christoph Mallon's avatar
Christoph Mallon committed
287
		 * the node and move it to its users */
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
		if (mode == mode_b) {
			const ir_edge_t *edge, *next;
			ir_node *pred;
			int      pn;

			assert(is_Proj(node));

			pred = get_Proj_pred(node);
			pn   = get_Proj_proj(node);

			foreach_out_edge_safe(node, edge, next) {
				ir_node *cmp_copy;
				ir_node *user       = get_edge_src_irn(edge);
				int pos             = get_edge_src_pos(edge);
				ir_node *user_block = get_nodes_block(user);

304
				if (user_block == block)
305
306
307
308
309
310
311
312
313
					continue;

				cmp_copy = exact_copy(pred);
				set_nodes_block(cmp_copy, user_block);
				copy = new_r_Proj(current_ir_graph, user_block, cmp_copy, mode_b, pn);
				set_irn_n(user, pos, copy);
			}
			continue;
		}
314
#endif
Christoph Mallon's avatar
Christoph Mallon committed
315

Matthias Braun's avatar
Matthias Braun committed
316
		copy = copy_and_fix_node(env, block, copy_block, j, node);
317
318

		/* we might hit values in blocks that have already been processed by a
319
		 * recursive find_phi_with_const() call */
320
		assert(get_irn_visited(copy) <= env->visited_nr);
321
		if (get_irn_visited(copy) >= env->visited_nr) {
322
			ir_node *prev_copy = get_irn_link(copy);
323
			if (prev_copy != NULL)
324
325
326
				set_irn_link(node, prev_copy);
		}
	}
Christoph Mallon's avatar
Christoph Mallon committed
327

328
329
330
	/* fix data-flow (and reconstruct SSA if needed) */
	foreach_out_edge(block, edge) {
		ir_node *node = get_edge_src_irn(edge);
331
		ir_node *copy_node;
332
333
334
335
336
337
338
		ir_mode *mode;

		if (is_Block(node)) {
			/* Block->Block edge, should be the MacroBlock edge */
			assert(get_Block_MacroBlock(node) == block && "Block->Block edge found");
			continue;
		}
339

340
		mode = get_irn_mode(node);
341
		if (mode == mode_X || is_Cond(node))
342
			continue;
343
#ifdef AVOID_PHIB
344
		if (mode == mode_b)
345
			continue;
346
#endif
347

348
349
		DB((dbg, LEVEL_2, ">> Fixing users of %+F\n", node));

350
351
		copy_node = get_irn_link(node);
		construct_ssa(block, node, copy_block, copy_node);
352
353
354
	}
}

355
/**
Michael Beck's avatar
Michael Beck committed
356
 * returns whether the cmp evaluates to true or false, or can't be evaluated!
357
 * 1: true, 0: false, -1: can't evaluate
358
359
360
361
 *
 * @param pnc       the compare mode of the Compare
 * @param tv_left   the left tarval
 * @param tv_right  the right tarval
362
 */
363
364
static int eval_cmp_tv(pn_Cmp pnc, tarval *tv_left, tarval *tv_right)
{
365
	pn_Cmp cmp_result = tarval_cmp(tv_left, tv_right);
366

367
368
	/* does the compare evaluate to true? */
	if (cmp_result == pn_Cmp_False)
369
		return -1;
370
	if ((cmp_result & pnc) != cmp_result)
371
372
373
374
375
		return 0;

	return 1;
}

376
377
378
379
380
381
382
383
384
385
386
387
388
/**
 * returns whether the cmp evaluates to true or false according to vrp
 * information , or can't be evaluated!
 * 1: true, 0: false, -1: can't evaluate
 *
 * @param pnc       the compare mode of the Compare
 * @param left   the left node
 * @param right  the right node
 */
static int eval_cmp_vrp(pn_Cmp pnc, ir_node *left, ir_node *right)
{
	pn_Cmp cmp_result = vrp_cmp(left, right);
	/* does the compare evaluate to true? */
389
	if (cmp_result == pn_Cmp_False) {
390
		return -1;
391
392
393
394
395
	}
	if ((cmp_result & pnc) != cmp_result) {
		if ((cmp_result & pnc) != 0) {
			return -1;
		}
396
		return 0;
397
	}
398
399
	return 1;
}
400
/**
401
402
403
404
405
 * returns whether the cmp evaluates to true or false, or can't be evaluated!
 * 1: true, 0: false, -1: can't evaluate
 *
 * @param env      the environment
 * @param cand     the candidate node, either a Const or a Confirm
406
 */
407
408
static int eval_cmp(jumpthreading_env_t *env, ir_node *cand)
{
409
410
411
412
413
414
415
416
417
418
419
	if (is_Const(cand)) {
		tarval *tv_cand   = get_Const_tarval(cand);
		tarval *tv_cmp    = get_Const_tarval(env->cnst);

		return eval_cmp_tv(env->pnc, tv_cand, tv_cmp);
	} else { /* a Confirm */
		tarval *res = computed_value_Cmp_Confirm(env->cmp, cand, env->cnst, env->pnc);

		if (res == tarval_bad)
			return -1;
		return res == tarval_b_true;
420
	}
421
422
423
424
425
}

/**
 * Check for Const or Confirm with Const.
 */
426
427
static int is_Const_or_Confirm(const ir_node *node)
{
428
429
	if (is_Confirm(node))
		node = get_Confirm_bound(node);
430
431
432
433
	return is_Const(node);
}

/**
434
 * get the tarval of a Const or Confirm with
435
 */
436
437
static tarval *get_Const_or_Confirm_tarval(const ir_node *node)
{
438
	if (is_Confirm(node)) {
439
		if (get_Confirm_bound(node))
440
441
442
443
444
			node = get_Confirm_bound(node);
	}
	return get_Const_tarval(node);
}

445
446
static ir_node *find_const_or_confirm(jumpthreading_env_t *env, ir_node *jump,
                                      ir_node *value)
447
{
448
449
	ir_node *block = get_nodes_block(jump);

450
	if (irn_visited_else_mark(value))
451
452
		return NULL;

453
454
	if (is_Const_or_Confirm(value)) {
		if (eval_cmp(env, value) <= 0) {
455
			return NULL;
456
		}
Christoph Mallon's avatar
Christoph Mallon committed
457
458
459

		DB((
			dbg, LEVEL_1,
yb9976's avatar
yb9976 committed
460
			"> Found jump threading candidate %+F->%+F\n",
461
			env->true_block, block
Christoph Mallon's avatar
Christoph Mallon committed
462
463
		));

464
		/* adjust true_block to point directly towards our jump */
465
		add_pred(env->true_block, jump);
466

467
		split_critical_edge(env->true_block, 0);
468

469
		/* we need a bigger visited nr when going back */
470
		env->visited_nr++;
471

472
473
		return block;
	}
474

475
	if (is_Phi(value)) {
476
		int i, arity;
Christoph Mallon's avatar
Christoph Mallon committed
477

478
		/* the Phi has to be in the same Block as the Jmp */
479
		if (get_nodes_block(value) != block) {
480
			return NULL;
481
		}
482

483
		arity = get_irn_arity(value);
484
		for (i = 0; i < arity; ++i) {
485
486
			ir_node *copy_block;
			ir_node *phi_pred = get_Phi_pred(value, i);
487
			ir_node *cfgpred  = get_Block_cfgpred(block, i);
488

489
			copy_block = find_const_or_confirm(env, cfgpred, phi_pred);
490
			if (copy_block == NULL)
491
492
				continue;

493
			/* copy duplicated nodes in copy_block and fix SSA */
494
			copy_and_fix(env, block, copy_block, i);
495

496
			if (copy_block == get_nodes_block(cfgpred)) {
497
				env->cnst_pred = block;
498
				env->cnst_pos  = i;
499
			}
500

501
			/* return now as we can't process more possibilities in 1 run */
502
			return copy_block;
503
		}
Christoph Mallon's avatar
Christoph Mallon committed
504
	}
505

506
	return NULL;
Christoph Mallon's avatar
Christoph Mallon committed
507
508
}

509
static ir_node *find_candidate(jumpthreading_env_t *env, ir_node *jump,
510
                               ir_node *value)
511
{
512
513
	ir_node *block = get_nodes_block(jump);

514
	if (irn_visited_else_mark(value)) {
515
		return NULL;
516
	}
517

518
	if (is_Const_or_Confirm(value)) {
519
		tarval *tv = get_Const_or_Confirm_tarval(value);
520

521
		if (tv != env->tv)
522
523
524
525
			return NULL;

		DB((
			dbg, LEVEL_1,
yb9976's avatar
yb9976 committed
526
			"> Found jump threading candidate %+F->%+F\n",
527
528
529
			env->true_block, block
		));

530
		/* adjust true_block to point directly towards our jump */
531
532
533
		add_pred(env->true_block, jump);

		split_critical_edge(env->true_block, 0);
534

535
		/* we need a bigger visited nr when going back */
536
		env->visited_nr++;
537

538
		return block;
539
	}
540
	if (is_Phi(value)) {
541
		int i, arity;
542

543
		/* the Phi has to be in the same Block as the Jmp */
544
		if (get_nodes_block(value) != block)
545
			return NULL;
546

547
		arity = get_irn_arity(value);
548
		for (i = 0; i < arity; ++i) {
549
550
551
			ir_node *copy_block;
			ir_node *phi_pred = get_Phi_pred(value, i);
			ir_node *cfgpred  = get_Block_cfgpred(block, i);
552

553
			copy_block = find_candidate(env, cfgpred, phi_pred);
554
			if (copy_block == NULL)
555
556
557
558
559
				continue;

			/* copy duplicated nodes in copy_block and fix SSA */
			copy_and_fix(env, block, copy_block, i);

560
			if (copy_block == get_nodes_block(cfgpred)) {
561
562
563
564
				env->cnst_pred = block;
				env->cnst_pos  = i;
			}

565
			/* return now as we can't process more possibilities in 1 run */
566
			return copy_block;
Christoph Mallon's avatar
Christoph Mallon committed
567
		}
568
	}
569
	if (is_Proj(value)) {
570
571
572
573
		ir_node *left;
		ir_node *right;
		int      pnc;
		ir_node *cmp = get_Proj_pred(value);
574
		if (!is_Cmp(cmp))
575
			return NULL;
576

577
578
579
		left  = get_Cmp_left(cmp);
		right = get_Cmp_right(cmp);
		pnc   = get_Proj_proj(value);
580

581
582
		/* we assume that the constant is on the right side, swap left/right
		 * if needed */
583
		if (is_Const(left)) {
584
585
586
			ir_node *t = left;
			left       = right;
			right      = t;
587

588
589
			pnc        = get_inversed_pnc(pnc);
		}
590

591
		if (!is_Const(right))
592
			return 0;
593

594
		if (get_nodes_block(left) != block) {
595
596
			return 0;
		}
597

598
		/* negate condition when we're looking for the false block */
599
		if (env->tv == tarval_b_false) {
600
			pnc = get_negated_pnc(pnc, get_irn_mode(right));
601
		}
602

603
		/* (recursively) look if a pred of a Phi is a constant or a Confirm */
604
		env->cmp  = cmp;
605
606
607
		env->pnc  = pnc;
		env->cnst = right;

608
		return find_const_or_confirm(env, jump, left);
609
	}
610

611
612
	return NULL;
}
613
614
615
616
617
618
619
620
621
622
623
624
625
626

/**
 * Block-walker: searches for the following construct
 *
 *  Const or Phi with constants
 *           |
 *          Cmp
 *           |
 *         Cond
 *          /
 *       ProjX
 *        /
 *     Block
 */
627
static void thread_jumps(ir_node* block, void* data)
628
{
629
	jumpthreading_env_t env;
630
	int *changed = data;
631
	ir_node *selector;
632
633
	ir_node *projx;
	ir_node *cond;
634
	ir_node *copy_block;
635
	int      selector_evaluated;
636
	const ir_edge_t *edge, *next;
637
	ir_node *bad;
638
	size_t   cnst_pos;
639

640
	if (get_Block_n_cfgpreds(block) != 1)
641
642
643
644
645
646
647
648
649
650
651
		return;

	projx = get_Block_cfgpred(block, 0);
	if (!is_Proj(projx))
		return;
	assert(get_irn_mode(projx) == mode_X);

	cond = get_Proj_pred(projx);
	if (!is_Cond(cond))
		return;

652
	selector = get_Cond_selector(cond);
653
	/* TODO handle switch Conds */
654
	if (get_irn_mode(selector) != mode_b)
655
		return;
656

Michael Beck's avatar
Michael Beck committed
657
	/* handle cases that can be immediately evaluated */
658
	selector_evaluated = -1;
659
	if (is_Proj(selector)) {
660
		ir_node *cmp = get_Proj_pred(selector);
661
		if (is_Cmp(cmp)) {
662
663
			ir_node *left  = get_Cmp_left(cmp);
			ir_node *right = get_Cmp_right(cmp);
664
			if (is_Const(left) && is_Const(right)) {
665
666
667
668
				int     pnc      = get_Proj_proj(selector);
				tarval *tv_left  = get_Const_tarval(left);
				tarval *tv_right = get_Const_tarval(right);

669
				selector_evaluated = eval_cmp_tv(pnc, tv_left, tv_right);
670
671
672
673
674
675
676
677
678
			}
			if (selector_evaluated < 0) {
				/* This is only the case if the predecessor nodes are not
				 * constant or the comparison could not be evaluated.
				 * Try with VRP information now.
				 */
				int pnc = get_Proj_proj(selector);

				selector_evaluated = eval_cmp_vrp(pnc, left, right);
679
680
			}
		}
681
	} else if (is_Const_or_Confirm(selector)) {
682
		tarval *tv = get_Const_or_Confirm_tarval(selector);
683
		if (tv == tarval_b_true) {
684
685
			selector_evaluated = 1;
		} else {
686
			assert(tv == tarval_b_false);
687
688
689
690
691
			selector_evaluated = 0;
		}
	}

	env.cnst_pred = NULL;
692
	if (get_Proj_proj(projx) == pn_Cond_false) {
693
		env.tv = tarval_b_false;
694
		if (selector_evaluated >= 0)
695
			selector_evaluated = !selector_evaluated;
696
	} else {
697
		env.tv = tarval_b_true;
698
699
	}

700
	if (selector_evaluated == 0) {
701
702
703
704
		bad = new_Bad();
		exchange(projx, bad);
		*changed = 1;
		return;
705
	} else if (selector_evaluated == 1) {
706
		dbg_info *dbgi = get_irn_dbg_info(selector);
707
		ir_node  *jmp  = new_rd_Jmp(dbgi, get_nodes_block(projx));
yb9976's avatar
yb9976 committed
708
		DBG_OPT_JUMPTHREADING(projx, jmp);
709
710
711
712
713
		exchange(projx, jmp);
		*changed = 1;
		return;
	}

714
	/* (recursively) look if a pred of a Phi is a constant or a Confirm */
715
716
717
718
719
720
	env.true_block = block;
	inc_irg_visited(current_ir_graph);
	env.visited_nr = get_irg_visited(current_ir_graph);

	copy_block = find_candidate(&env, projx, selector);
	if (copy_block == NULL)
721
722
		return;

723
	/* we have to remove the edge towards the pred as the pred now
724
	 * jumps into the true_block. We also have to shorten Phis
725
726
727
728
	 * in our block because of this */
	bad      = new_Bad();
	cnst_pos = env.cnst_pos;

729
	/* shorten Phis */
730
731
732
	foreach_out_edge_safe(env.cnst_pred, edge, next) {
		ir_node *node = get_edge_src_irn(edge);

733
		if (is_Phi(node))
734
735
736
737
738
739
740
			set_Phi_pred(node, cnst_pos, bad);
	}

	set_Block_cfgpred(env.cnst_pred, cnst_pos, bad);

	/* the graph is changed now */
	*changed = 1;
741
742
}

743
void opt_jumpthreading(ir_graph* irg)
744
{
Michael Beck's avatar
Michael Beck committed
745
	int changed, rerun;
746

747
	FIRM_DBG_REGISTER(dbg, "firm.opt.jumpthreading");
748

749
	DB((dbg, LEVEL_1, "===> Performing jumpthreading on %+F\n", irg));
750

Christoph Mallon's avatar
Christoph Mallon committed
751
	remove_critical_cf_edges(irg);
752

Michael Beck's avatar
Michael Beck committed
753
	edges_assure(irg);
754
	ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
755

Michael Beck's avatar
Michael Beck committed
756
	changed = 0;
757
	do {
Michael Beck's avatar
Michael Beck committed
758
		rerun = 0;
759
		irg_block_walk_graph(irg, thread_jumps, NULL, &rerun);
Michael Beck's avatar
Michael Beck committed
760
761
762
		changed |= rerun;
	} while (rerun);

763
	ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
764

Michael Beck's avatar
Michael Beck committed
765
766
	if (changed) {
		/* control flow changed, some blocks may become dead */
767
		set_irg_outs_inconsistent(irg);
Michael Beck's avatar
Michael Beck committed
768
769
770
		set_irg_doms_inconsistent(irg);
		set_irg_extblk_inconsistent(irg);
		set_irg_loopinfo_inconsistent(irg);
771
		set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
772
773
774
775

		/* Dead code might be created. Optimize it away as it is dangerous
		 * to call optimize_df() an dead code. */
		optimize_cf(irg);
Michael Beck's avatar
Michael Beck committed
776
	}
777
}
778
779

/* Creates an ir_graph pass for opt_jumpthreading. */
780
ir_graph_pass_t *opt_jumpthreading_pass(const char *name)
781
{
782
	return def_graph_pass(name ? name : "jumpthreading", opt_jumpthreading);
783
}  /* opt_jumpthreading_pass */