beverify.c 22.8 KB
Newer Older
Christian Würdig's avatar
Christian Würdig committed
1
2
/*
 * This file is part of libFirm.
3
 * Copyright (C) 2012 University of Karlsruhe.
Christian Würdig's avatar
Christian Würdig committed
4
5
 */

Christian Würdig's avatar
Christian Würdig committed
6
7
8
9
10
/**
 * @file
 * @brief       Various verify routines that check a scheduled graph for correctness.
 * @author      Matthias Braun
 * @date        05.05.2006
11
 */
12
#include <stdbool.h>
13

14
15
16
#include "bitset.h"
#include "set.h"
#include "array.h"
17

18
#include "irnode_t.h"
19
20
21
#include "irgwalk.h"
#include "irprintf.h"
#include "irdump_t.h"
22
#include "iredges_t.h"
23
24

#include "beverify.h"
Matthias Braun's avatar
Matthias Braun committed
25
#include "belive.h"
26
#include "besched.h"
27
#include "benode.h"
28
#include "beirg.h"
Matthias Braun's avatar
Matthias Braun committed
29
#include "belistsched.h"
30

31
static bool my_values_interfere(const ir_node *a, const ir_node *b);
32

33
typedef struct be_verify_register_pressure_env_t_ {
34
	be_lv_t                     *lv;                  /**< Liveness information. */
Christian Würdig's avatar
Christian Würdig committed
35
	const arch_register_class_t *cls;                 /**< the register class to check for */
36
37
	unsigned                    registers_available;  /**< number of available registers */
	bool                        problem_found;        /**< flag indicating if a problem was found */
38
39
} be_verify_register_pressure_env_t;

Christian Würdig's avatar
Christian Würdig committed
40
41
42
/**
 * Print all nodes of a pset into a file.
 */
43
44
static void print_living_values(FILE *F, const ir_nodeset_t *live_nodes)
{
Christian Würdig's avatar
Christian Würdig committed
45
	ir_fprintf(F, "\t");
46
	foreach_ir_nodeset(live_nodes, node, iter) {
Christian Würdig's avatar
Christian Würdig committed
47
		ir_fprintf(F, "%+F ", node);
48
	}
Christian Würdig's avatar
Christian Würdig committed
49
	ir_fprintf(F, "\n");
50
51
}

52
static void verify_warnf(ir_node const *const node, char const *const fmt, ...)
53
{
54
55
56
57
58
59
60
61
62
63
64
65
	FILE* const f = stderr;

	ir_node const *const block    = get_block_const(node);
	ir_graph      *const irg      = get_irn_irg(node);
	ir_entity     *const irg_ent  = get_irg_entity(irg);
	char    const *const irg_name = get_entity_ld_name(irg_ent);
	ir_fprintf(f, "%+F(%s): verify warning: ", block, irg_name);
	va_list ap;
	va_start(ap, fmt);
	ir_vfprintf(f, fmt, ap);
	va_end(ap);
	fputc('\n', f);
66
67
}

Christian Würdig's avatar
Christian Würdig committed
68
69
70
/**
 * Check if number of live nodes never exceeds the number of available registers.
 */
71
72
static void verify_liveness_walker(ir_node *block, void *data)
{
Christian Würdig's avatar
Christian Würdig committed
73
	be_verify_register_pressure_env_t *env = (be_verify_register_pressure_env_t *)data;
74
	ir_nodeset_t live_nodes;
75

Christian Würdig's avatar
Christian Würdig committed
76
	/* collect register pressure info, start with end of a block */
77
	ir_nodeset_init(&live_nodes);
78
	be_liveness_end_of_block(env->lv, env->cls, block,
79
	                         &live_nodes);
Christian Würdig's avatar
Christian Würdig committed
80

81
	unsigned pressure = ir_nodeset_size(&live_nodes);
82
	if (pressure > env->registers_available) {
83
84
		verify_warnf(block, "register pressure too high at end of block (%d/%d):",
			pressure, env->registers_available);
85
		print_living_values(stderr, &live_nodes);
86
		env->problem_found = true;
87
	}
88

89
	sched_foreach_non_phi_reverse(block, irn) {
Sebastian Hack's avatar
Sebastian Hack committed
90
		// print_living_values(stderr, &live_nodes);
91
		be_liveness_transfer(env->cls, irn, &live_nodes);
92

93
		pressure = ir_nodeset_size(&live_nodes);
94

95
		if (pressure > env->registers_available) {
96
97
			verify_warnf(block, "register pressure too high before %+F (%d/%d):",
				irn, pressure, env->registers_available);
98
			print_living_values(stderr, &live_nodes);
99
			env->problem_found = true;
100
101
		}
	}
102
	ir_nodeset_destroy(&live_nodes);
103
104
}

105
bool be_verify_register_pressure(ir_graph *irg, const arch_register_class_t *cls)
106
{
107
	be_verify_register_pressure_env_t env;
108
	env.lv                  = be_liveness_new(irg);
Christian Würdig's avatar
Christian Würdig committed
109
	env.cls                 = cls;
110
	env.registers_available = be_get_n_allocatable_regs(irg, cls);
111
	env.problem_found       = false;
112

113
	be_liveness_compute_sets(env.lv);
Christian Würdig's avatar
Christian Würdig committed
114
	irg_block_walk_graph(irg, verify_liveness_walker, NULL, &env);
Sebastian Hack's avatar
Sebastian Hack committed
115
	be_liveness_free(env.lv);
Christian Würdig's avatar
Christian Würdig committed
116
117

	return ! env.problem_found;
118
119
}

120
/*--------------------------------------------------------------------------- */
121

122
typedef struct be_verify_schedule_env_t_ {
123
	bool      problem_found; /**< flags indicating a problem */
124
	bitset_t *scheduled;     /**< bitset of scheduled nodes */
125
126
} be_verify_schedule_env_t;

127
128
static void verify_schedule_walker(ir_node *block, void *data)
{
129
130
131
	be_verify_schedule_env_t *env = (be_verify_schedule_env_t*) data;

	/*
132
	 * Tests for the following things:
133
134
135
	 *   1. Make sure that all phi nodes are scheduled at the beginning of the
	 *      block
	 *   2. No value is defined after it has been used
136
	 */
137
138
139
	ir_node         *non_phi_found  = NULL;
	ir_node         *cfchange_found = NULL;
	sched_timestep_t last_timestep  = 0;
140
	sched_foreach(block, node) {
141
		/* this node is scheduled */
142
		if (bitset_is_set(env->scheduled, get_irn_idx(node))) {
143
			verify_warnf(block, "%+F appears to be schedule twice");
144
			env->problem_found = true;
145
146
147
		}
		bitset_set(env->scheduled, get_irn_idx(node));

148
		/* Check that scheduled nodes are in the correct block */
149
		if (get_nodes_block(node) != block) {
150
			verify_warnf(block, "%+F is in wrong %+F", node, get_nodes_block(node));
151
			env->problem_found = true;
152
153
		}

154
		/* Check that timesteps are increasing */
155
		sched_timestep_t timestep = sched_get_time_step(node);
156
		if (timestep <= last_timestep) {
157
			verify_warnf(block, "schedule timestep did not increase at %+F", node);
158
			env->problem_found = true;
159
160
		}
		last_timestep = timestep;
161

162
		if (arch_get_irn_flags(node) & arch_irn_flag_not_scheduled) {
163
			verify_warnf(block, "flag_not_scheduled node %+F scheduled anyway", node);
164
165
166
			env->problem_found = true;
		}

167
		/* Check that phis come before any other node */
168
		if (!is_Phi(node)) {
Matthias Braun's avatar
Matthias Braun committed
169
			non_phi_found = node;
170
171
172
		} else if (non_phi_found) {
			verify_warnf(block, "%+F scheduled after non-Phi %+F", node, non_phi_found);
			env->problem_found = true;
173
		}
174

175
		/* Check for control flow changing nodes */
Matthias Braun's avatar
Matthias Braun committed
176
		if (is_cfop(node)) {
Christian Würdig's avatar
Christian Würdig committed
177
			/* check, that only one CF operation is scheduled */
Matthias Braun's avatar
Matthias Braun committed
178
			if (cfchange_found != NULL) {
179
				verify_warnf(block, "additional control flow changing node %+F scheduled after %+F", node, cfchange_found);
180
				env->problem_found = true;
Matthias Braun's avatar
Matthias Braun committed
181
182
			} else {
				cfchange_found = node;
183
			}
Matthias Braun's avatar
Matthias Braun committed
184
		} else if (cfchange_found != NULL) {
185
186
			/* keepany isn't a real instruction. */
			if (!be_is_Keep(node)) {
187
				verify_warnf(block, "%+F scheduled after control flow changing node", node);
188
				env->problem_found = true;
189
190
			}
		}
191

192
		/* Check that all uses come before their definitions */
193
		if (!is_Phi(node)) {
Matthias Braun's avatar
Matthias Braun committed
194
			sched_timestep_t nodetime = sched_get_time_step(node);
195
			foreach_irn_in(node, i, arg) {
196
				if (get_nodes_block(arg) != block || !sched_is_scheduled(arg))
197
198
					continue;

199
				if (sched_get_time_step(arg) >= nodetime) {
200
					verify_warnf(block, "%+F used by %+F before it was defined", arg, node);
201
					env->problem_found = true;
202
				}
203
			}
204
		}
205

206
		/* Check that no dead nodes are scheduled */
207
		if (get_irn_n_edges(node) == 0) {
208
			verify_warnf(block, "%+F is dead but scheduled", node);
209
			env->problem_found = true;
210
		}
211

212
		if (be_is_Keep(node) || be_is_CopyKeep(node)) {
213
			/* at least 1 of the keep arguments has to be its schedule
214
			 * predecessor */
215
			ir_node *prev = sched_prev(node);
216
			while (be_is_Keep(prev) || be_is_CopyKeep(prev))
217
218
				prev = sched_prev(prev);

219
			do {
220
221
				foreach_irn_in(node, i, in) {
					if (skip_Proj(in) == prev)
222
						goto ok;
223
224
				}
				prev = sched_prev(prev);
225
			} while (is_Phi(prev));
226
			verify_warnf(block, "%+F not scheduled after its pred node", node);
227
228
			env->problem_found = true;
ok:;
229
		}
230
	}
231
}
232

233
234
static void check_schedule(ir_node *node, void *data)
{
235
	be_verify_schedule_env_t *env = (be_verify_schedule_env_t*)data;
236
237
	bool const should_be = !arch_is_irn_not_scheduled(node);
	bool const scheduled = bitset_is_set(env->scheduled, get_irn_idx(node));
238

239
	if (should_be != scheduled) {
240
		verify_warnf(node, "%+F should%s be scheduled", node, should_be ? "" : " not");
241
		env->problem_found = true;
242
243
244
	}
}

245
bool be_verify_schedule(ir_graph *irg)
246
247
{
	be_verify_schedule_env_t env;
248
	env.problem_found = false;
249
	env.scheduled     = bitset_alloca(get_irg_last_idx(irg));
250

251
	irg_block_walk_graph(irg, verify_schedule_walker, NULL, &env);
252
	/* check if all nodes are scheduled */
253
	irg_walk_graph(irg, check_schedule, NULL, &env);
254

Christian Würdig's avatar
Christian Würdig committed
255
	return ! env.problem_found;
256
}
257

258
/*--------------------------------------------------------------------------- */
259

260
typedef struct spill_t {
261
	ir_node *spill;
262
	ir_entity *ent;
263
264
265
} spill_t;

typedef struct {
266
267
268
269
	set                  *spills;
	ir_node             **reloads;
	bool                  problem_found;
	get_frame_entity_func get_frame_entity;
270
271
} be_verify_spillslots_env_t;

272
273
static int cmp_spill(const void* d1, const void* d2, size_t size)
{
274
	(void) size;
275
276
	const spill_t* s1 = (const spill_t*)d1;
	const spill_t* s2 = (const spill_t*)d2;
277
278
279
	return s1->spill != s2->spill;
}

280
281
static spill_t *find_spill(be_verify_spillslots_env_t *env, ir_node *node)
{
282
283
	spill_t spill;
	spill.spill = node;
284
	return set_find(spill_t, env->spills, &spill, sizeof(spill), hash_ptr(node));
285
286
}

287
288
static spill_t *get_spill(be_verify_spillslots_env_t *env, ir_node *node, ir_entity *ent)
{
289
	int hash = hash_ptr(node);
290
	spill_t spill;
291
	spill.spill = node;
292
	spill_t *res = set_find(spill_t, env->spills, &spill, sizeof(spill), hash);
293

294
	if (res == NULL) {
295
		spill.ent = ent;
296
		res = set_insert(spill_t, env->spills, &spill, sizeof(spill), hash);
297
298
299
300
301
	}

	return res;
}

302
303
static ir_node *get_memory_edge(const ir_node *node)
{
304
	ir_node *result = NULL;
305
	foreach_irn_in_r(node, i, arg) {
306
		if (get_irn_mode(arg) == mode_M) {
307
308
309
310
311
312
313
314
			assert(result == NULL);
			result = arg;
		}
	}

	return result;
}

315
static void collect(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent);
316

317
static void be_check_entity(ir_node *node, ir_entity *ent)
318
{
319
320
	if (ent == NULL)
		verify_warnf(node, "%+F should have an entity assigned", node);
321
322
}

323
static void collect_spill(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent)
324
{
325
	ir_entity *spillent = env->get_frame_entity(node);
326
	be_check_entity(node, spillent);
327
328
	get_spill(env, node, ent);

329
	if (spillent != ent) {
330
		verify_warnf(node, "spill %+F has different entity than reload %+F", node, reload);
331
		env->problem_found = true;
332
333
334
	}
}

335
336
static void collect_memperm(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent)
{
337
	ir_node *memperm = get_Proj_pred(node);
338
	unsigned out     = get_Proj_num(node);
339

340
	ir_entity *spillent = be_get_MemPerm_out_entity(memperm, out);
341
	be_check_entity(memperm, spillent);
342
	if (spillent != ent) {
343
		verify_warnf(node, "MemPerm %+F has different entity than reload %+F", node, reload);
344
		env->problem_found = true;
345
346
	}

347
348
	int hash = hash_ptr(node);
	spill_t spill;
349
	spill.spill = node;
350
	spill_t *res = set_find(spill_t, env->spills, &spill, sizeof(spill), hash);
351
	if (res != NULL) {
352
353
354
355
		return;
	}

	spill.ent = spillent;
356
	(void)set_insert(spill_t, env->spills, &spill, sizeof(spill), hash);
357

358
359
	int arity = be_get_MemPerm_entity_arity(memperm);
	for (int i = 0; i < arity; ++i) {
360
361
		ir_node   *const arg    = get_irn_n(memperm, i);
		ir_entity *const argent = be_get_MemPerm_in_entity(memperm, i);
362
363
364
365
366

		collect(env, arg, memperm, argent);
	}
}

367
368
static void collect_memphi(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity *ent)
{
369
370
	assert(is_Phi(node));

371
372
	int hash = hash_ptr(node);
	spill_t spill;
373
	spill.spill = node;
374
	spill_t *res = set_find(spill_t, env->spills, &spill, sizeof(spill), hash);
375
	if (res != NULL) {
376
377
378
379
		return;
	}

	spill.ent = ent;
380
	(void)set_insert(spill_t, env->spills, &spill, sizeof(spill), hash);
381

382
	/* is 1 of the arguments a spill? */
383
	foreach_irn_in(node, i, arg) {
384
385
386
387
		collect(env, arg, reload, ent);
	}
}

388
389
static void collect(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent)
{
390
	if (arch_irn_is(node, spill)) {
391
		collect_spill(env, node, reload, ent);
392
	} else if (is_Proj(node)) {
393
		collect_memperm(env, node, reload, ent);
394
	} else if (is_Phi(node) && get_irn_mode(node) == mode_M) {
395
396
397
398
399
400
401
402
		collect_memphi(env, node, reload, ent);
	}
}

/**
 * This walker function searches for reloads and collects all the spills
 * and memphis attached to them.
 */
403
404
static void collect_spills_walker(ir_node *node, void *data)
{
405
	be_verify_spillslots_env_t *env = (be_verify_spillslots_env_t*)data;
406

407
	if (arch_irn_is(node, reload)) {
408
		ir_node *spill = get_memory_edge(node);
409
		if (spill == NULL) {
410
			verify_warnf(node, "no spill attached to reload %+F", node);
411
			env->problem_found = true;
412
413
			return;
		}
414
		ir_entity *ent = env->get_frame_entity(node);
415
		be_check_entity(node, ent);
416
417
418
419
420
421

		collect(env, spill, node, ent);
		ARR_APP1(ir_node*, env->reloads, node);
	}
}

422
423
424
425
static void check_spillslot_interference(be_verify_spillslots_env_t *env)
{
	int       spillcount = set_count(env->spills);
	spill_t **spills     = ALLOCAN(spill_t*, spillcount);
426

427
	int s = 0;
428
	foreach_set(env->spills, spill_t, spill) {
429
		spills[s++] = spill;
430
	}
431
	assert(s == spillcount);
432

433
	for (int i = 0; i < spillcount; ++i) {
434
435
		spill_t *sp1 = spills[i];

436
		for (int i2 = i+1; i2 < spillcount; ++i2) {
437
438
			spill_t *sp2 = spills[i2];

439
			if (sp1->ent != sp2->ent)
440
441
				continue;

442
			if (my_values_interfere(sp1->spill, sp2->spill)) {
443
444
				verify_warnf(sp1->spill, "spillslots for %+F and %+F (in %+F) interfere",
						sp1->spill, sp2->spill, get_nodes_block(sp2->spill));
445
				env->problem_found = true;
446
447
448
449
450
			}
		}
	}
}

451
452
static void check_lonely_spills(ir_node *node, void *data)
{
453
	be_verify_spillslots_env_t *env = (be_verify_spillslots_env_t*)data;
454

455
456
	if (arch_irn_is(node, spill)
	    || (is_Proj(node) && be_is_MemPerm(get_Proj_pred(node)))) {
457
		spill_t *spill = find_spill(env, node);
458
		if (arch_irn_is(node, spill)) {
459
			ir_entity *ent = env->get_frame_entity(node);
460
			be_check_entity(node, ent);
461
462
		}

463
464
		if (spill == NULL)
			verify_warnf(node, "%+F not connected to a reload", node);
465
466
467
	}
}

468
bool be_verify_spillslots(ir_graph *irg, get_frame_entity_func get_frame_entity)
469
470
{
	be_verify_spillslots_env_t env;
471
472
473
474
	env.spills           = new_set(cmp_spill, 10);
	env.reloads          = NEW_ARR_F(ir_node*, 0);
	env.problem_found    = false;
	env.get_frame_entity = get_frame_entity;
475
476

	irg_walk_graph(irg, collect_spills_walker, NULL, &env);
477
	irg_walk_graph(irg, check_lonely_spills, NULL, &env);
478
479
480
481
482
483
484
485
486

	check_spillslot_interference(&env);

	DEL_ARR_F(env.reloads);
	del_set(env.spills);

	return ! env.problem_found;
}

487
/*--------------------------------------------------------------------------- */
488
489
490
491
492
493

/**
 * Check, if two values interfere.
 * @param a The first value.
 * @param b The second value.
 * @return 1, if a and b interfere, 0 if not.
494
 */
495
static bool my_values_interfere(const ir_node *a, const ir_node *b)
496
{
Matthias Braun's avatar
Matthias Braun committed
497
498
499
	assert(a != b);
	int a2b = value_strictly_dominates(a, b);
	int b2a = value_strictly_dominates(b, a);
500
501

	/* If there is no dominance relation, they do not interfere. */
502
	if (!a2b && !b2a)
503
		return false;
504
505
506
507
508

	/*
	 * Adjust a and b so, that a dominates b if
	 * a dominates b or vice versa.
	 */
509
	if (b2a) {
510
511
512
513
514
		const ir_node *t = a;
		a = b;
		b = t;
	}

515
	ir_node *bb = get_nodes_block(b);
516
517
518
519
520
521
522
523
524
525
526
527
528
529

	/*
	 * Look at all usages of a.
	 * If there's one usage of a in the block of b, then
	 * we check, if this use is dominated by b, if that's true
	 * a and b interfere. Note that b must strictly dominate the user,
	 * since if b is the last user of in the block, b and a do not
	 * interfere.
	 * Uses of a not in b's block can be disobeyed, because the
	 * check for a being live at the end of b's block is already
	 * performed.
	 */
	foreach_out_edge(a, edge) {
		const ir_node *user = get_edge_src_irn(edge);
530
		if (b == user)
531
532
			continue;

533
		if (is_End(user))
534
535
			continue;

536
		/* in case of phi arguments we compare with the block the value comes from */
537
		if (is_Phi(user)) {
538
			ir_node *phiblock = get_nodes_block(user);
539
			if (phiblock == bb)
540
				continue;
541
542
543
			user = get_irn_n(phiblock, get_edge_src_pos(edge));
		}

Matthias Braun's avatar
Matthias Braun committed
544
		if (value_strictly_dominates(b, user))
545
			return true;
546
547
	}

548
	return false;
549
}
550

551
/*--------------------------------------------------------------------------- */
552

553
typedef struct be_verify_reg_alloc_env_t {
554
555
	be_lv_t *lv;
	bool     problem_found;
556
557
558
} be_verify_reg_alloc_env_t;

static void check_output_constraints(be_verify_reg_alloc_env_t *const env, const ir_node *node)
559
{
560
	arch_register_req_t const *const req = arch_get_irn_register_req(node);
561
	if (!req->cls->regs)
562
563
		return;

564
	/* verify output register */
565
	arch_register_t const *const reg = arch_get_irn_register(node);
566
	if (reg == NULL) {
567
		verify_warnf(node, "%+F should have a register assigned", node);
568
		env->problem_found = true;
569
	} else if (!arch_reg_is_allocatable(req, reg)) {
570
		verify_warnf(node, "register %s assigned as output of %+F not allowed (register constraint)", reg->name, node);
571
		env->problem_found = true;
572
	}
573
574
}

575
static void check_input_constraints(be_verify_reg_alloc_env_t *const env, ir_node *const node)
576
{
577
578
579
580
581
582
583
	arch_register_req_t const **const in_reqs = arch_get_irn_register_reqs_in(node);
	if (!in_reqs && get_irn_arity(node) != 0) {
		verify_warnf(node, "%+F has no input requirements", node);
		env->problem_found = true;
		return;
	}

584
	/* verify input register */
585
	foreach_irn_in(node, i, pred) {
Christian Würdig's avatar
Christian Würdig committed
586
		if (is_Bad(pred)) {
587
			verify_warnf(node, "%+F has Bad as input %d", node, i);
588
			env->problem_found = true;
Christian Würdig's avatar
Christian Würdig committed
589
590
			continue;
		}
591

592
593
594
595
596
597
598
		const arch_register_req_t *req      = arch_get_irn_register_req_in(node, i);
		const arch_register_req_t *pred_req = arch_get_irn_register_req(pred);
		if (req->cls != pred_req->cls) {
			verify_warnf(node, "%+F register class of requirement at input %d and operand differ", node, i);
			env->problem_found = true;
		}

599
		if (!req->cls->regs)
600
601
			continue;

602
		if (req->width > pred_req->width) {
603
			verify_warnf(node, "%+F register width of value at input %d too small", node, i);
604
			env->problem_found = true;
605
606
		}

607
		const arch_register_t *reg = arch_get_irn_register(pred);
608
		if (reg == NULL) {
609
			verify_warnf(pred, "%+F should have a register assigned (%+F input constraint)", pred, node);
610
			env->problem_found = true;
611
		} else if (!arch_reg_is_allocatable(req, reg)) {
612
			verify_warnf(node, "register %s as input %d of %+F not allowed (register constraint)", reg->name, i, node);
613
			env->problem_found = true;
614
615
616
		}
	}

617
618
619
	/* phis should be NOPs at this point, which means all input regs
	 * must be the same as the output reg */
	if (is_Phi(node)) {
620
		const arch_register_t *reg = arch_get_irn_register(node);
621
		foreach_irn_in(node, i, pred) {
622
			const arch_register_t *pred_reg = arch_get_irn_register(pred);
623

624
			if (reg != pred_reg && !(pred_reg->is_virtual)) {
625
626
				const char *pred_name = pred_reg != NULL ? pred_reg->name : "(null)";
				const char *reg_name  = reg != NULL ? reg->name : "(null)";
627
				verify_warnf(node, "input %d of %+F uses register %s instead of %s", i, node, pred_name, reg_name);
628
				env->problem_found = true;
629
			}
630
631
		}
	}
632
}
633

634
635
636
637
638
639
640
641
static bool ignore_error_for_reg(ir_graph *irg, const arch_register_t *reg)
{
	be_irg_t *birg = be_birg_from_irg(irg);
	if (birg->non_ssa_regs == NULL)
		return false;
	return rbitset_is_set(birg->non_ssa_regs, reg->global_index);
}

642
static void value_used(be_verify_reg_alloc_env_t *const env, ir_node const **const registers, ir_node const *const block, ir_node const *const node)
643
{
644
	const arch_register_t *reg = arch_get_irn_register(node);
645
	if (reg == NULL || reg->is_virtual)
646
647
		return;

648
	const arch_register_req_t *req = arch_get_irn_register_req(node);
649
	assert(req->width > 0);
650
651
	unsigned idx = reg->global_index;
	for (unsigned i = 0; i < req->width; ++i) {
652
		ir_node const *const reg_node = registers[idx + i];
653
		if (reg_node != NULL && reg_node != node
654
			&& !ignore_error_for_reg(get_irn_irg(block), reg)) {
655
			verify_warnf(block, "register %s assigned more than once (nodes %+F and %+F)", reg->name, node, reg_node);
656
			env->problem_found = true;
657
		}
658
		registers[idx + i] = node;
659
660
661
	}
}

662
static void value_def(be_verify_reg_alloc_env_t *const env, ir_node const **const registers, ir_node const *const node)
663
{
664
	const arch_register_t *reg = arch_get_irn_register(node);
665

666
	if (reg == NULL || reg->is_virtual)
667
668
		return;

669
	const arch_register_req_t *req = arch_get_irn_register_req(node);
670
	assert(req->width > 0);
671
672
	unsigned idx = reg->global_index;
	for (unsigned i = 0; i < req->width; ++i) {
673
		ir_node const *const reg_node = registers[idx + i];
674

675
676
677
678
		/* a little cheat, since its so hard to remove all outedges to dead code
		 * in the backend. This particular case should never be a problem. */
		if (reg_node == NULL && get_irn_n_edges(node) == 0)
			return;
679

680
		if (reg_node != node && !ignore_error_for_reg(get_irn_irg(node), reg)) {
681
			verify_warnf(node, "%+F not registered as value for register %s (but %+F)", node, reg->name, reg_node);
682
			env->problem_found = true;
683
		}
684
		registers[idx + i] = NULL;
685
	}
686
687
}

688
689
static void verify_block_register_allocation(ir_node *block, void *data)
{
690
	be_verify_reg_alloc_env_t *const env = (be_verify_reg_alloc_env_t*)data;
691

692
	unsigned        const n_regs    = isa_if->n_registers;
693
	ir_node const **const registers = ALLOCANZ(ir_node const*, n_regs);
694

695
	be_lv_foreach(env->lv, block, be_lv_state_end, lv_node) {
696
		value_used(env, registers, block, lv_node);
697
	}
698

699
	sched_foreach_reverse(block, node) {
700
		be_foreach_value(node, value,
701
			value_def(env, registers, value);
702
			check_output_constraints(env, value);
703
		);
704

705
		check_input_constraints(env, node);
706

707
708
		/* process uses. (Phi inputs are no real uses) */
		if (!is_Phi(node)) {
709
			foreach_irn_in(node, i, use) {
710
				value_used(env, registers, block, use);
711
712
			}
		}
713
	}
714

715
	be_lv_foreach(env->lv, block, be_lv_state_in, lv_node) {
716
		value_def(env, registers, lv_node);
717
	}
718

719
	/* set must be empty now */
720
	for (unsigned i = 0; i < n_regs; ++i) {
721
722
		if (registers[i]) {
			verify_warnf(block, "%+F not live-in and no def found", registers[i]);
723
			env->problem_found = true;
724
		}
725
726
727
	}
}

728
bool be_verify_register_allocation(ir_graph *const irg)
729
{