bespillutil.c 36.9 KB
Newer Older
1
2
/*
 * This file is part of libFirm.
3
 * Copyright (C) 2012 University of Karlsruhe.
4
5
6
7
8
9
 */

/**
 * @file
 * @brief       implementation of the spill/reload placement abstraction layer
 * @author      Daniel Grund, Sebastian Hack, Matthias Braun
10
 * @date        29.09.2005
11
12
13
14
15
16
 */
#include <stdlib.h>
#include <stdbool.h>

#include "array.h"
#include "bearch.h"
Matthias Braun's avatar
Matthias Braun committed
17
18
19
20
21
#include "bechordal_t.h"
#include "beirg.h"
#include "belive.h"
#include "bemodule.h"
#include "benode.h"
22
23
24
25
26
#include "besched.h"
#include "bespill.h"
#include "bespillutil.h"
#include "bessaconstr.h"
#include "be_t.h"
Matthias Braun's avatar
Matthias Braun committed
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
#include "beutil.h"
#include "debug.h"
#include "entity_t.h"
#include "execfreq.h"
#include "ident_t.h"
#include "irbackedge_t.h"
#include "ircons_t.h"
#include "iredges_t.h"
#include "irgmod.h"
#include "irgwalk.h"
#include "irnodehashmap.h"
#include "irnode_t.h"
#include "statev_t.h"
#include "type_t.h"
#include "util.h"
42

43
44
DEBUG_ONLY(static firm_dbg_module_t *dbg;)
DEBUG_ONLY(static firm_dbg_module_t *dbg_constr;)
45
46
47
48
49
50
51
52

#define REMAT_COST_INFINITE  1000

typedef struct reloader_t reloader_t;
struct reloader_t {
	reloader_t *next;
	ir_node    *reloader;
	int         remat_cost_delta; /** costs needed for rematerialization,
Matthias Braun's avatar
Matthias Braun committed
53
	                                  compared to placing a reload */
54
55
56
57
58
59
60
61
62
63
64
};

typedef struct spill_t spill_t;
struct spill_t {
	spill_t *next;
	ir_node *after;  /**< spill has to be placed after this node (or earlier) */
	ir_node *spill;
};

typedef struct spill_info_t spill_info_t;
struct spill_info_t {
65
66
	ir_node      *to_spill;  /**< the value that should get spilled */
	reloader_t   *reloaders; /**< list of places where the value should get
Matthias Braun's avatar
Matthias Braun committed
67
	                              reloaded */
68
	spill_t      *spills;    /**< list of latest places where spill must be
Matthias Braun's avatar
Matthias Braun committed
69
	                              placed */
70
71
72
73
74
75
	spill_info_t *next;
	spill_info_t *next_mem_phi;
	double        spill_costs; /**< costs needed for spilling the value */
	bool          spilled_phi; /**< true when the whole Phi has been spilled and
	                                will be replaced with a PhiM. false if only
	                                the value of the Phi gets spilled */
76
77
78
79
};

struct spill_env_t {
	ir_graph         *irg;
80
81
82
	ir_nodehashmap_t  spillmap;
	spill_info_t     *spills;
	spill_info_t     *mem_phis;
83
	struct obstack    obst;
84
	regalloc_if_t     regif;
85
86
87
88
89
90
91
92
93
	unsigned          spill_count;
	unsigned          reload_count;
	unsigned          remat_count;
	unsigned          spilled_phi_count;
};

/**
 * Returns spill info for a specific value (the value that is to be spilled)
 */
94
static spill_info_t *get_spillinfo(spill_env_t *env, ir_node *value)
95
{
96
97
98
99
100
101
102
103
104
105
	spill_info_t *info = ir_nodehashmap_get(spill_info_t, &env->spillmap,
	                                        value);
	if (info == NULL) {
		info = OALLOCZ(&env->obst, spill_info_t);
		info->to_spill    = value;
		info->spill_costs = -1;
		ir_nodehashmap_insert(&env->spillmap, value, info);

		info->next = env->spills;
		env->spills = info;
106
107
	}

108
	return info;
109
110
}

111
spill_env_t *be_new_spill_env(ir_graph *irg, const regalloc_if_t *regif)
112
{
113
114
	spill_env_t *env = XMALLOCZ(spill_env_t);
	env->irg         = irg;
115
	env->regif       = *regif;
116
	ir_nodehashmap_init(&env->spillmap);
117
118
119
120
121
122
	obstack_init(&env->obst);
	return env;
}

void be_delete_spill_env(spill_env_t *env)
{
123
	ir_nodehashmap_destroy(&env->spillmap);
124
125
126
127
128
129
	obstack_free(&env->obst, NULL);
	free(env);
}

void be_add_spill(spill_env_t *env, ir_node *to_spill, ir_node *after)
{
130
	assert(!arch_irn_is(skip_Proj_const(to_spill), dont_spill));
131
132
133
134
135
136
	DB((dbg, LEVEL_1, "Add spill of %+F after %+F\n", to_spill, after));

	/* Just for safety make sure that we do not insert the spill in front of a phi */
	assert(!is_Phi(sched_next(after)));

	/* spills that are dominated by others are not needed */
Matthias Braun's avatar
Matthias Braun committed
137
	spill_info_t *spill_info = get_spillinfo(env, to_spill);
138
139
	for (spill_t **anchor = &spill_info->spills; *anchor;) {
		spill_t *const s = *anchor;
140
		/* no need to add this spill if it is dominated by another */
Matthias Braun's avatar
Matthias Braun committed
141
		if (value_strictly_dominates(s->after, after)) {
142
143
144
145
			DB((dbg, LEVEL_1, "...dominated by %+F, not added\n", s->after));
			return;
		}
		/* remove spills that we dominate */
Matthias Braun's avatar
Matthias Braun committed
146
		if (value_strictly_dominates(after, s->after)) {
147
			DB((dbg, LEVEL_1, "...remove old spill at %+F\n", s->after));
148
			*anchor = s->next;
149
		} else {
150
			anchor = &s->next;
151
152
153
		}
	}

Matthias Braun's avatar
Matthias Braun committed
154
155
156
157
	spill_t *spill = OALLOC(&env->obst, spill_t);
	spill->after = after;
	spill->next  = spill_info->spills;
	spill->spill = NULL;
158
159
160
	spill_info->spills = spill;
}

Matthias Braun's avatar
Matthias Braun committed
161
void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before)
162
{
163
	assert(!arch_irn_is(skip_Proj_const(to_spill), dont_spill));
Matthias Braun's avatar
Matthias Braun committed
164
	assert(!be_is_Keep(before));
165

Matthias Braun's avatar
Matthias Braun committed
166
	spill_info_t *info = get_spillinfo(env, to_spill);
167
168

	/* put reload into list */
Matthias Braun's avatar
Matthias Braun committed
169
	reloader_t *rel       = OALLOC(&env->obst, reloader_t);
170
171
	rel->next             = info->reloaders;
	rel->reloader         = before;
Matthias Braun's avatar
Matthias Braun committed
172
	rel->remat_cost_delta = 0;
173

Matthias Braun's avatar
Matthias Braun committed
174
	info->reloaders = rel;
175

Matthias Braun's avatar
Matthias Braun committed
176
177
178
	DBG((dbg, LEVEL_1,
	     "creating spillinfo for %+F, will be reloaded before %+F\n",
	     to_spill, before));
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
}

ir_node *be_get_end_of_block_insertion_point(const ir_node *block)
{
	ir_node *last = sched_last(block);

	/* we might have keeps behind the jump... */
	while (be_is_Keep(last)) {
		last = sched_prev(last);
		assert(!sched_is_end(last));
	}

	assert(is_cfop(last));

	/* add the reload before the (cond-)jump */
	return last;
}

/**
 * Returns the point at which you can insert a node that should be executed
 * before block @p block when coming from pred @p pos.
 */
static ir_node *get_block_insertion_point(ir_node *block, int pos)
{
	/* simply add the reload to the beginning of the block if we only have 1
	 * predecessor. We don't need to check for phis as there can't be any in a
	 * block with only 1 pred. */
206
	if (get_Block_n_cfgpreds(block) == 1) {
207
208
209
210
211
		assert(!is_Phi(sched_first(block)));
		return sched_first(block);
	}

	/* We have to reload the value in pred-block */
Matthias Braun's avatar
Matthias Braun committed
212
	ir_node *predblock = get_Block_cfgpred_block(block, pos);
213
214
215
216
	return be_get_end_of_block_insertion_point(predblock);
}

void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block,
Matthias Braun's avatar
Matthias Braun committed
217
                           int pos)
218
219
{
	ir_node *before = get_block_insertion_point(block, pos);
Matthias Braun's avatar
Matthias Braun committed
220
	be_add_reload(env, to_spill, before);
221
222
}

223
void be_spill_phi(spill_env_t *const env, ir_node *const phi)
224
{
225
	assert(is_Phi(phi));
226

227
	spill_info_t *info = get_spillinfo(env, phi);
228
229
230
	info->spilled_phi  = true;
	info->next_mem_phi = env->mem_phis;
	env->mem_phis      = info;
231
232

	/* create spills for the phi arguments */
233
	foreach_irn_in(phi, i, arg) {
234
		ir_node *const insert = be_move_after_schedule_first(arg);
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
		be_add_spill(env, arg, insert);
	}
}

static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo);

/**
 * Creates a spill.
 *
 * @param senv      the spill environment
 * @param irn       the node that should be spilled
 * @param ctx_irn   an user of the spilled node
 * @return a be_Spill node
 */
static void spill_irn(spill_env_t *env, spill_info_t *spillinfo)
{
	/* determine_spill_costs must have been run before */
	assert(spillinfo->spill_costs >= 0);

254
	ir_node *const to_spill = spillinfo->to_spill;
255
	DBG((dbg, LEVEL_1, "spilling %+F ... \n", to_spill));
Matthias Braun's avatar
Matthias Braun committed
256
257
	for (spill_t *spill = spillinfo->spills; spill != NULL;
	     spill = spill->next) {
258
		ir_node *const after = be_move_after_schedule_first(spill->after);
259
		spill->spill = env->regif.new_spill(to_spill, after);
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
		DB((dbg, LEVEL_1, "\t%+F after %+F\n", spill->spill, after));
		env->spill_count++;
	}
	DBG((dbg, LEVEL_1, "\n"));
}

static void spill_node(spill_env_t *env, spill_info_t *spillinfo);

/**
 * If the first usage of a Phi result would be out of memory
 * there is no sense in allocating a register for it.
 * Thus we spill it and all its operands to the same spill slot.
 * Therefore the phi/dataB becomes a phi/Memory
 *
 * @param senv      the spill environment
 * @param phi       the Phi node that should be spilled
 * @param ctx_irn   an user of the spilled node
 */
static void spill_phi(spill_env_t *env, spill_info_t *spillinfo)
{
Matthias Braun's avatar
Matthias Braun committed
280
	ir_node *phi = spillinfo->to_spill;
281
282
283
	DBG((dbg, LEVEL_1, "spilling Phi %+F:\n", phi));

	/* build a new PhiM */
284
285
286
	ir_node *const block = get_nodes_block(phi);
	ir_node *const phim  = be_new_Phi0(block, mode_M, arch_no_register_req);
	sched_add_after(block, phim);
287
288

	/* override or replace spills list... */
Matthias Braun's avatar
Matthias Braun committed
289
	spill_t *spill = OALLOC(&env->obst, spill_t);
290
291
292
	spill->after = be_move_after_schedule_first(phi);
	spill->spill = phim;
	spill->next  = NULL;
293
294
295
296

	spillinfo->spills = spill;
	env->spilled_phi_count++;

297
298
	unsigned  const arity = get_Phi_n_preds(phi);
	ir_node **const ins   = ALLOCAN(ir_node*, arity);
299
	foreach_irn_in(phi, i, arg) {
300
301
302
303
304
		spill_info_t *arg_info = get_spillinfo(env, arg);

		determine_spill_costs(env, arg_info);
		spill_node(env, arg_info);

305
		ins[i] = arg_info->spills->spill;
306
	}
307
308
	be_complete_Phi(phim, arity, ins);
	DBG((dbg, LEVEL_1, "... done spilling Phi %+F, created PhiM %+F\n", phi, phim));
309
310
311
312
313
314
315
316
317
318
319
}

/**
 * Spill a node.
 *
 * @param senv      the spill environment
 * @param to_spill  the node that should be spilled
 */
static void spill_node(spill_env_t *env, spill_info_t *spillinfo)
{
	/* node is already spilled */
320
	if (spillinfo->spills != NULL && spillinfo->spills->spill != NULL)
321
322
		return;

323
	if (spillinfo->spilled_phi) {
324
325
326
327
328
329
330
331
		spill_phi(env, spillinfo);
	} else {
		spill_irn(env, spillinfo);
	}
}

/**
 * Tests whether value @p arg is available before node @p reloader
Matthias Braun's avatar
Matthias Braun committed
332
 * @returns true if value is available
333
 */
Matthias Braun's avatar
Matthias Braun committed
334
static bool is_value_available(spill_env_t *env, const ir_node *arg)
335
{
336
	if (is_Unknown(arg) || is_NoMem(arg))
Matthias Braun's avatar
Matthias Braun committed
337
		return true;
338
	if (arch_irn_is(skip_Proj_const(arg), spill))
Matthias Braun's avatar
Matthias Braun committed
339
		return true;
340
	if (arg == get_irg_frame(env->irg))
Matthias Braun's avatar
Matthias Braun committed
341
		return true;
342
	if (get_irn_mode(arg) == mode_T)
Matthias Braun's avatar
Matthias Braun committed
343
344
		return false;
	/* "Ignore registers" are always available */
345
	if (arch_irn_is_ignore(arg))
Matthias Braun's avatar
Matthias Braun committed
346
		return true;
347

Matthias Braun's avatar
Matthias Braun committed
348
	return false;
349
350
351
352
353
354
355
356
357
358
359
360
361
}

/**
 * Check if a node is rematerializable. This tests for the following conditions:
 *
 * - The node itself is rematerializable
 * - All arguments of the node are available or also rematerialisable
 * - The costs for the rematerialisation operation is less or equal a limit
 *
 * Returns the costs needed for rematerialisation or something
 * >= REMAT_COST_INFINITE if remat is not possible.
 */
static int check_remat_conditions_costs(spill_env_t *env,
Matthias Braun's avatar
Matthias Braun committed
362
363
364
                                        const ir_node *spilled,
                                        const ir_node *reloader,
                                        int parentcosts)
365
{
366
	const ir_node *insn = skip_Proj_const(spilled);
367

368
	assert(!arch_irn_is(insn, spill));
369
	if (!arch_irn_is(insn, rematerializable))
370
371
		return REMAT_COST_INFINITE;

372
	int costs = isa_if->get_op_estimated_cost(insn);
373
374
	int spillcosts = env->regif.reload_cost + env->regif.spill_cost;
	if (parentcosts + costs >= spillcosts)
375
		return REMAT_COST_INFINITE;
376

377
	/* never rematerialize a node which modifies the flags.
378
	 * (would be better to test whether the flags are actually live at point
379
380
	 * reloader...)
	 */
Matthias Braun's avatar
Matthias Braun committed
381
	if (arch_irn_is(insn, modify_flags))
382
383
		return REMAT_COST_INFINITE;

Matthias Braun's avatar
Matthias Braun committed
384
	int argremats = 0;
385
	foreach_irn_in(insn, i, arg) {
Matthias Braun's avatar
Matthias Braun committed
386
		if (is_value_available(env, arg))
387
388
389
			continue;

		/* we have to rematerialize the argument as well */
390
		++argremats;
391
		if (argremats > 1) {
392
			/* we only support rematerializing 1 argument at the moment,
393
			 * as multiple arguments could increase register pressure */
394
395
396
397
398
			return REMAT_COST_INFINITE;
		}

		costs += check_remat_conditions_costs(env, arg, reloader,
		                                      parentcosts + costs);
399
		if (parentcosts + costs >= spillcosts)
400
401
402
403
404
405
406
407
408
			return REMAT_COST_INFINITE;
	}

	return costs;
}

/**
 * Re-materialize a node.
 *
Michael Beck's avatar
Michael Beck committed
409
 * @param env       the spill environment
410
411
412
413
414
 * @param spilled   the node that was spilled
 * @param reloader  a irn that requires a reload
 */
static ir_node *do_remat(spill_env_t *env, ir_node *spilled, ir_node *reloader)
{
Matthias Braun's avatar
Matthias Braun committed
415
	ir_node **ins = ALLOCAN(ir_node*, get_irn_arity(spilled));
416
	foreach_irn_in(spilled, i, arg) {
Matthias Braun's avatar
Matthias Braun committed
417
		if (is_value_available(env, arg)) {
418
419
420
421
422
423
424
			ins[i] = arg;
		} else {
			ins[i] = do_remat(env, arg, reloader);
		}
	}

	/* create a copy of the node */
425
426
	ir_node *const bl  = get_nodes_block(reloader);
	ir_node *const res = new_similar_node(spilled, bl, ins);
427
428
	if (env->regif.mark_remat)
		env->regif.mark_remat(res);
429

Matthias Braun's avatar
Matthias Braun committed
430
431
	DBG((dbg, LEVEL_1, "Insert remat %+F of %+F before reloader %+F\n", res,
	     spilled, reloader));
432

Christoph Mallon's avatar
Christoph Mallon committed
433
	if (!is_Proj(res))
434
435
436
437
438
439
440
		sched_add_before(reloader, res);

	return res;
}

double be_get_spill_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
{
Matthias Braun's avatar
Matthias Braun committed
441
	(void)to_spill;
442
	ir_node *block = get_nodes_block(before);
443
	double   freq  = get_block_execfreq(block);
444
	return env->regif.spill_cost * freq;
445
446
}

Matthias Braun's avatar
Matthias Braun committed
447
448
unsigned be_get_reload_costs_no_weight(spill_env_t *env,
                                       const ir_node *to_spill,
449
450
                                       const ir_node *before)
{
451
	if (be_do_remats) {
452
453
		/* is the node rematerializable? */
		unsigned costs = check_remat_conditions_costs(env, to_spill, before, 0);
454
		if (costs < (unsigned) env->regif.reload_cost)
455
456
457
			return costs;
	}

458
	return env->regif.reload_cost;
459
460
461
462
}

double be_get_reload_costs(spill_env_t *env, ir_node *to_spill, ir_node *before)
{
463
464
	ir_node *block = get_nodes_block(before);
	double   freq  = get_block_execfreq(block);
465

466
	if (be_do_remats) {
467
468
		/* is the node rematerializable? */
		int costs = check_remat_conditions_costs(env, to_spill, before, 0);
469
		if (costs < (int)env->regif.reload_cost)
470
471
472
			return costs * freq;
	}

473
	return env->regif.reload_cost * freq;
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
}

double be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill,
                                   ir_node *block, int pos)
{
	ir_node *before = get_block_insertion_point(block, pos);
	return be_get_reload_costs(env, to_spill, before);
}

/**
 * analyzes how to best spill a node and determine costs for that
 */
static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo)
{
	/* already calculated? */
489
	if (spillinfo->spill_costs >= 0)
490
491
		return;

Matthias Braun's avatar
Matthias Braun committed
492
493
	ir_node       *to_spill = spillinfo->to_spill;
	const ir_node *insn     = skip_Proj_const(to_spill);
494
	assert(!arch_irn_is(insn, dont_spill));
495
	assert(!arch_irn_is(insn, reload));
496

Matthias Braun's avatar
Matthias Braun committed
497
498
	ir_node *spill_block    = get_nodes_block(insn);
	double   spill_execfreq = get_block_execfreq(spill_block);
499

500
	if (spillinfo->spilled_phi) {
501
502
		/* TODO calculate correct costs...
		 * (though we can't remat this node anyway so no big problem) */
503
		spillinfo->spill_costs = env->regif.spill_cost * spill_execfreq;
504
505
506
		return;
	}

507
508
509
510
511
512
513
514
515
	/* calculate sum of execution frequencies of individual spills */
	assert(spillinfo->spills);
	double spills_execfreq = 0;
	for (spill_t *s = spillinfo->spills; s != NULL; s = s->next) {
		ir_node *spill_block = get_block(s->after);
		double   freq        = get_block_execfreq(spill_block);

		spills_execfreq += freq;
	}
516

517
	DB((dbg, LEVEL_1, "%+F: latespillcosts %f after def: %f\n", to_spill,
518
519
			spills_execfreq * env->regif.spill_cost,
			spill_execfreq * env->regif.spill_cost));
520

521
522
523
	/* multi-/latespill is advantageous -> return*/
	if (spills_execfreq < spill_execfreq) {
		DB((dbg, LEVEL_1, "use latespills for %+F\n", to_spill));
524
		spillinfo->spill_costs = spills_execfreq * env->regif.spill_cost;
525
		return;
526
527
528
	}

	/* override spillinfos or create a new one */
Matthias Braun's avatar
Matthias Braun committed
529
	spill_t *spill = OALLOC(&env->obst, spill_t);
530
	spill->after   = be_move_after_schedule_first(skip_Proj(to_spill));
Matthias Braun's avatar
Matthias Braun committed
531
532
	spill->next    = NULL;
	spill->spill   = NULL;
533
534

	spillinfo->spills      = spill;
535
	spillinfo->spill_costs = spill_execfreq * env->regif.spill_cost;
536
537
538
539
540
	DB((dbg, LEVEL_1, "spill %+F after definition\n", to_spill));
}

void be_insert_spills_reloads(spill_env_t *env)
{
541
	be_timer_push(T_RA_SPILL_APPLY);
542
543
544

	/* create all phi-ms first, this is needed so, that phis, hanging on
	   spilled phis work correctly */
545
546
	for (spill_info_t *info = env->mem_phis; info != NULL;
	     info = info->next_mem_phi) {
547
548
549
550
		spill_node(env, info);
	}

	/* process each spilled node */
551
	for (spill_info_t *si = env->spills; si != NULL; si = si->next) {
552
553
554
		ir_node  *to_spill        = si->to_spill;
		ir_node **copies          = NEW_ARR_F(ir_node*, 0);
		double    all_remat_costs = 0; /** costs when we would remat all nodes */
555
		bool      force_remat     = false;
556
557
558
559
560
561

		DBG((dbg, LEVEL_1, "\nhandling all reloaders of %+F:\n", to_spill));

		determine_spill_costs(env, si);

		/* determine possibility of rematerialisations */
562
		if (be_do_remats) {
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
563
564
			/* calculate cost savings for each individual value when it would
			   be rematerialised instead of reloaded */
Matthias Braun's avatar
Matthias Braun committed
565
566
			for (reloader_t *rld = si->reloaders; rld != NULL;
			     rld = rld->next) {
567
				ir_node *reloader = rld->reloader;
568
				if (rld->remat_cost_delta >= REMAT_COST_INFINITE) {
569
570
571
572
573
574
					DBG((dbg, LEVEL_2, "\treload before %+F is forbidden\n",
					     reloader));
					all_remat_costs = REMAT_COST_INFINITE;
					continue;
				}

Matthias Braun's avatar
Matthias Braun committed
575
576
				int remat_cost = check_remat_conditions_costs(env, to_spill,
				                                              reloader, 0);
577
				if (remat_cost >= REMAT_COST_INFINITE) {
578
579
580
581
582
583
584
					DBG((dbg, LEVEL_2, "\tremat before %+F not possible\n",
					     reloader));
					rld->remat_cost_delta = REMAT_COST_INFINITE;
					all_remat_costs       = REMAT_COST_INFINITE;
					continue;
				}

585
				int remat_cost_delta  = remat_cost - env->regif.reload_cost;
586
				rld->remat_cost_delta = remat_cost_delta;
Matthias Braun's avatar
Matthias Braun committed
587
588
				ir_node *block        = get_block(reloader);
				double   freq         = get_block_execfreq(block);
589
590
591
592
593
				all_remat_costs      += remat_cost_delta * freq;
				DBG((dbg, LEVEL_2, "\tremat costs delta before %+F: "
				     "%d (rel %f)\n", reloader, remat_cost_delta,
				     remat_cost_delta * freq));
			}
594
			if (all_remat_costs < REMAT_COST_INFINITE) {
595
596
597
598
				/* we don't need the costs for the spill if we can remat
				   all reloaders */
				all_remat_costs -= si->spill_costs;
				DBG((dbg, LEVEL_2, "\tspill costs %d (rel %f)\n",
599
				     env->regif.spill_cost, si->spill_costs));
600
601
			}

602
			if (all_remat_costs < 0) {
Matthias Braun's avatar
Matthias Braun committed
603
				force_remat = true;
604
605
606
607
608
609
				DBG((dbg, LEVEL_1, "\nforcing remats of all reloaders (%f)\n",
				     all_remat_costs));
			}
		}

		/* go through all reloads for this spill */
Matthias Braun's avatar
Matthias Braun committed
610
		for (reloader_t *rld = si->reloaders; rld != NULL; rld = rld->next) {
611
			ir_node *copy; /* a reload is a "copy" of the original value */
612
			if (be_do_remats && (force_remat || rld->remat_cost_delta < 0)) {
613
				copy = do_remat(env, to_spill, rld->reloader);
Christoph Mallon's avatar
Christoph Mallon committed
614
				++env->remat_count;
615
616
617
618
619
620
621
			} else {
				/* make sure we have a spill */
				spill_node(env, si);

				/* create a reload, use the first spill for now SSA
				 * reconstruction for memory comes below */
				assert(si->spills != NULL);
622
623
				copy = env->regif.new_reload(si->to_spill, si->spills->spill,
											 rld->reloader);
624
625
626
627
628
629
630
631
632
633
634
635
				env->reload_count++;
			}

			DBG((dbg, LEVEL_1, " %+F of %+F before %+F\n",
			     copy, to_spill, rld->reloader));
			ARR_APP1(ir_node*, copies, copy);
		}

		/* if we had any reloads or remats, then we need to reconstruct the
		 * SSA form for the spilled value */
		if (ARR_LEN(copies) > 0) {
			be_ssa_construction_env_t senv;
636
			be_ssa_construction_init(&senv, env->irg);
637
638
639
640
641
642
643
644
			be_ssa_construction_add_copy(&senv, to_spill);
			be_ssa_construction_add_copies(&senv, copies, ARR_LEN(copies));
			be_ssa_construction_fix_users(&senv, to_spill);
			be_ssa_construction_destroy(&senv);
		}
		/* need to reconstruct SSA form if we had multiple spills */
		if (si->spills != NULL && si->spills->next != NULL) {
			be_ssa_construction_env_t senv;
645
			be_ssa_construction_init(&senv, env->irg);
Matthias Braun's avatar
Matthias Braun committed
646
647
648
			unsigned spill_count = 0;
			for (spill_t *spill = si->spills ; spill != NULL;
			     spill = spill->next) {
649
				/* maybe we rematerialized the value and need no spill */
650
				if (spill->spill == NULL)
651
652
					continue;
				be_ssa_construction_add_copy(&senv, spill->spill);
Matthias Braun's avatar
Matthias Braun committed
653
				++spill_count;
654
			}
655
			if (spill_count > 1) {
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
				/* all reloads are attached to the first spill, fix them now */
				be_ssa_construction_fix_users(&senv, si->spills->spill);
			}

			be_ssa_construction_destroy(&senv);
		}

		DEL_ARR_F(copies);
		si->reloaders = NULL;
	}

	stat_ev_dbl("spill_spills", env->spill_count);
	stat_ev_dbl("spill_reloads", env->reload_count);
	stat_ev_dbl("spill_remats", env->remat_count);
	stat_ev_dbl("spill_spilled_phis", env->spilled_phi_count);

	/* Matze: In theory be_ssa_construction should take care of the liveness...
	 * try to disable this again in the future */
674
	be_invalidate_live_sets(env->irg);
675

676
	be_remove_dead_nodes_from_schedule(env->irg);
677

678
	be_timer_pop(T_RA_SPILL_APPLY);
679
680
}

681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
static be_irg_t      *birg;
static be_lv_t       *lv;
static unsigned long  precol_copies;
static unsigned long  multi_precol_copies;
static unsigned long  constrained_livethrough_copies;

static void prepare_constr_insn(ir_node *const node)
{
	/* Insert a copy for constraint inputs attached to a value which can't
	 * fulfill the constraint
	 * (typical example: stack pointer as input to copyb)
	 * TODO: This really just checks precolored registers at the moment and
	 *       ignores the general case of not matching in/out constraints */
	foreach_irn_in(node, i, op) {
		const arch_register_req_t *const req
			= arch_get_irn_register_req_in(node, i);
		if (req->cls == NULL)
			continue;

		const arch_register_t *const reg = arch_get_irn_register(op);
		if (reg == NULL)
			continue;

		/* Precolored with an ignore register (which is not virtual). */
		if ((reg->type & arch_register_type_virtual) ||
		    rbitset_is_set(birg->allocatable_regs, reg->global_index))
			continue;

		if (!arch_register_req_is(req, limited))
			continue;
		if (rbitset_is_set(req->limited, reg->index))
			continue;

		ir_node *block = get_nodes_block(node);
		ir_node *copy  = be_new_Copy(block, op);
		sched_add_before(node, copy);
		set_irn_n(node, i, copy);
		++precol_copies;
		DBG((dbg_constr, LEVEL_3, "inserting ignore arg copy %+F for %+F pos %d\n",
		     copy, node, i));
	}

	/* insert copies for nodes that occur constrained more than once. */
	for (int i = 0, arity = get_irn_arity(node); i < arity; ++i) {
		const arch_register_req_t *const req
			= arch_get_irn_register_req_in(node, i);
		const arch_register_class_t *const cls = req->cls;
		if (cls == NULL)
			continue;
		if (!arch_register_req_is(req, limited))
			continue;

		ir_node *in = get_irn_n(node, i);
		const arch_register_req_t *const in_req
			= arch_get_irn_register_req(in);
		if (arch_register_req_is(in_req, ignore))
			continue;
		for (int i2 = i + 1; i2 < arity; ++i2) {
			const arch_register_req_t *const req2
				= arch_get_irn_register_req_in(node, i2);
			if (req2->cls != cls)
				continue;
			if (!arch_register_req_is(req2, limited))
				continue;

			ir_node *in2 = get_irn_n(node, i2);
			if (in2 != in)
				continue;

			/* if the constraint is the same, no copy is necessary
			 * TODO generalise to unequal but overlapping constraints */
			if (rbitsets_equal(req->limited, req2->limited, cls->n_regs))
				continue;

			ir_node *block = get_nodes_block(node);
			ir_node *copy  = be_new_Copy(block, in);
			sched_add_before(node, copy);
			set_irn_n(node, i2, copy);
			++multi_precol_copies;
			DBG((dbg_constr, LEVEL_3,
			     "inserting multiple constr copy %+F for %+F pos %d\n",
			     copy, node, i2));
		}
	}

	/* collect all registers occurring in out constraints. */
	unsigned *def_constr = NULL;
	be_foreach_value(node, value,
		const arch_register_req_t *const req = arch_get_irn_register_req(value);
		const arch_register_class_t *const cls = req->cls;
		if (cls == NULL)
			continue;
		if (!arch_register_req_is(req, limited))
			continue;
775
776
		if (def_constr == NULL)
			def_constr = rbitset_alloca(isa_if->n_registers);
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
		rbitset_foreach(req->limited, cls->n_regs, e) {
			const arch_register_t *reg = arch_register_for_index(cls, e);
			rbitset_set(def_constr, reg->global_index);
		}
	);
	/* no output constraints => we're good */
	if (def_constr == NULL)
		return;

	/* Insert copies for all constrained arguments living through the node and
	 * being constrained to a register which also occurs in out constraints. */
	for (int i = 0, arity = get_irn_arity(node); i < arity; ++i) {
		/* Check, if
		 * 1) the operand is constrained.
		 * 2) lives through the node.
		 * 3) is constrained to a register occurring in out constraints. */
		const arch_register_req_t *const req
			= arch_get_irn_register_req_in(node, i);
		const arch_register_class_t *const cls = req->cls;
		if (cls == NULL)
			continue;
		if (!arch_register_req_is(req, limited))
			continue;
		ir_node *in = get_irn_n(node, i);
		const arch_register_req_t *const in_req
			= arch_get_irn_register_req(in);
		if (arch_register_req_is(in_req, ignore))
			continue;
		/* Only create the copy if the operand is no copy.
		 * this is necessary since the assure constraints phase inserts
		 * Copies and Keeps for operands which must be different from the
		 * results. Additional copies here would destroy this. */
		if (be_is_Copy(in))
			continue;
		if (!be_value_live_after(in, node))
			continue;

		bool common_limits = false;
		rbitset_foreach(req->limited, cls->n_regs, e) {
			const arch_register_t *reg = arch_register_for_index(cls, e);
			if (rbitset_is_set(def_constr, reg->global_index)) {
				common_limits = true;
				break;
			}
		}
		if (!common_limits)
			continue;

		ir_node *block = get_nodes_block(node);
		ir_node *copy  = be_new_Copy(block, in);
		sched_add_before(node, copy);
		set_irn_n(node, i, copy);
		++constrained_livethrough_copies;
		DBG((dbg_constr, LEVEL_3, "inserting constr copy %+F for %+F pos %d\n",
		     copy, node, i));
		be_liveness_update(lv, in);
	}
}

static void add_missing_copies_in_block(ir_node *block, void *data)
{
	(void)data;
	sched_foreach(block, node) {
		prepare_constr_insn(node);
	}
}

static bool has_irn_users(const ir_node *irn)
{
	return get_irn_out_edge_first_kind(irn, EDGE_KIND_NORMAL) != 0;
}

static ir_node *find_copy(ir_node *irn, ir_node *op)
{
Matthias Braun's avatar
Matthias Braun committed
851
	for (ir_node *cur_node = irn;;) {
852
		cur_node = sched_prev(cur_node);
Matthias Braun's avatar
Matthias Braun committed
853
		if (!be_is_Copy(cur_node))
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
			return NULL;
		if (be_get_Copy_op(cur_node) == op && arch_irn_is(cur_node, dont_spill))
			return cur_node;
	}
}

/** Environment for constraints. */
typedef struct {
	ir_nodehashmap_t op_set;
	struct obstack   obst;
} constraint_env_t;

/** Associates an ir_node with its copy and CopyKeep. */
typedef struct {
	ir_nodeset_t copies; /**< all non-spillable copies of this irn */
} op_copy_assoc_t;

static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, constraint_env_t *env)
{
	arch_register_req_t const *const req = arch_get_irn_register_req(other_different);
874
875
	if (arch_register_req_is(req, ignore)) {
		DB((dbg_constr, LEVEL_1, "ignore constraint for %+F because other_irn is ignore\n", irn));
876
877
878
		return;
	}

879
880
	ir_nodehashmap_t *op_set = &env->op_set;
	ir_node          *block  = get_nodes_block(irn);
881
882
883
884
885
886
887

	/* Make a not spillable copy of the different node   */
	/* this is needed because the different irn could be */
	/* in block far far away                             */
	/* The copy is optimized later if not needed         */

	/* check if already exists such a copy in the schedule immediately before */
Matthias Braun's avatar
Matthias Braun committed
888
889
	ir_node *cpy = find_copy(skip_Proj(irn), other_different);
	if (cpy == NULL) {
890
		cpy = be_new_Copy(block, other_different);
891
		arch_add_irn_flags(cpy, arch_irn_flag_dont_spill);
892
893
894
895
896
897
898
		DB((dbg_constr, LEVEL_1, "created non-spillable %+F for value %+F\n", cpy, other_different));
	} else {
		DB((dbg_constr, LEVEL_1, "using already existing %+F for value %+F\n", cpy, other_different));
	}

	/* Add the Keep resp. CopyKeep and reroute the users */
	/* of the other_different irn in case of CopyKeep.   */
Matthias Braun's avatar
Matthias Braun committed
899
	ir_node *keep;
900
	if (has_irn_users(other_different)) {
901
902
		ir_node *const in[] = { irn };
		keep = be_new_CopyKeep(block, cpy, ARRAY_SIZE(in), in);
903
	} else {
Matthias Braun's avatar
Matthias Braun committed
904
905
		ir_node *in[] = { irn, cpy };
		keep = be_new_Keep(block, ARRAY_SIZE(in), in);
906
907
908
909
910
911
	}

	DB((dbg_constr, LEVEL_1, "created %+F(%+F, %+F)\n\n", keep, irn, cpy));

	/* insert copy and keep into schedule */
	assert(sched_is_scheduled(irn) && "need schedule to assure constraints");
Matthias Braun's avatar
Matthias Braun committed
912
	if (!sched_is_scheduled(cpy))
913
914
915
916
		sched_add_before(skip_Proj(irn), cpy);
	sched_add_after(skip_Proj(irn), keep);

	/* insert the other different and its copies into the map */
Matthias Braun's avatar
Matthias Braun committed
917
918
919
	op_copy_assoc_t *entry
		= ir_nodehashmap_get(op_copy_assoc_t, op_set, other_different);
	if (entry == NULL) {
920
		entry = OALLOC(&env->obst, op_copy_assoc_t);
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
		ir_nodeset_init(&entry->copies);

		ir_nodehashmap_insert(op_set, other_different, entry);
	}

	/* insert copy */
	ir_nodeset_insert(&entry->copies, cpy);

	/* insert keep in case of CopyKeep */
	if (be_is_CopyKeep(keep))
		ir_nodeset_insert(&entry->copies, keep);
}

/**
 * Checks if node has a must_be_different constraint in output and adds a Keep
 * then to assure the constraint.
 *
 * @param irn          the node to check
 * @param skipped_irn  if irn is a Proj node, its predecessor, else irn
 * @param env          the constraint environment
 */
static void assure_different_constraints(ir_node *irn, ir_node *skipped_irn, constraint_env_t *env)
{
	const arch_register_req_t *req = arch_get_irn_register_req(irn);

	if (arch_register_req_is(req, must_be_different)) {
		const unsigned other = req->other_different;

		if (arch_register_req_is(req, should_be_same)) {
			const unsigned same = req->other_same;

			if (is_po2(other) && is_po2(same)) {
				int idx_other = ntz(other);
				int idx_same  = ntz(same);

				/*
				 * We can safely ignore a should_be_same x must_be_different y
				 * IFF both inputs are equal!
				 */
				if (get_irn_n(skipped_irn, idx_other) == get_irn_n(skipped_irn, idx_same)) {
					return;
				}
			}
		}
Matthias Braun's avatar
Matthias Braun committed
965
		for (unsigned i = 0; 1U << i <= other; ++i) {
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
			if (other & (1U << i)) {
				ir_node *different_from = get_irn_n(skipped_irn, i);
				gen_assure_different_pattern(irn, different_from, env);
			}
		}
	}
}

/**
 * Calls the functions to assure register constraints.
 *
 * @param block    The block to be checked
 * @param walk_env The walker environment
 */
static void assure_constraints_walker(ir_node *block, void *walk_env)
{
	constraint_env_t *env = (constraint_env_t*)walk_env;

	sched_foreach_reverse(block, irn) {
		be_foreach_value(irn, value,
986
			assure_different_constraints(value, irn, env);
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
		);
	}
}

/**
 * Melt all copykeeps pointing to the same node
 * (or Projs of the same node), copying the same operand.
 */
static void melt_copykeeps(constraint_env_t *cenv)
{
	struct obstack obst;
	obstack_init(&obst);

	/* for all */
	ir_nodehashmap_entry_t    map_entry;
	ir_nodehashmap_iterator_t map_iter;
	foreach_ir_nodehashmap(&cenv->op_set, map_entry, map_iter) {
		op_copy_assoc_t *entry = (op_copy_assoc_t*)map_entry.data;

		/* collect all copykeeps */
		unsigned num_ck = 0;
		foreach_ir_nodeset(&entry->copies, cp, iter) {
			if (be_is_CopyKeep(cp)) {
				obstack_grow(&obst, &cp, sizeof(cp));
				++num_ck;
			}
		}

		/* compare each copykeep with all other copykeeps */
		ir_node **ck_arr = (ir_node **)obstack_finish(&obst);
		for (unsigned idx = 0; idx < num_ck; ++idx) {
			if (ck_arr[idx] == NULL)
				continue;
			unsigned n_melt     = 1;
			ir_node *ref        = ck_arr[idx];
			ir_node *ref_mode_T = skip_Proj(get_irn_n(ref, 1));
			obstack_grow(&obst, &ref, sizeof(ref));

			DB((dbg_constr, LEVEL_1, "Trying to melt %+F:\n", ref));

			/* check for copykeeps pointing to the same mode_T node as the reference copykeep */
			for (unsigned j = 0; j < num_ck; ++j) {
				if (j == idx)
					continue;
				ir_node *cur_ck = ck_arr[j];
				if (cur_ck == NULL || skip_Proj(get_irn_n(cur_ck, 1)) != ref_mode_T)
					continue;

				obstack_grow(&obst, &cur_ck, sizeof(cur_ck));
				ir_nodeset_remove(&entry->copies, cur_ck);
				DB((dbg_constr, LEVEL_1, "\t%+F\n", cur_ck));
				ck_arr[j] = NULL;
				++n_melt;
				sched_remove(cur_ck);
			}
			ck_arr[idx] = NULL;

			/* check, if we found some candidates for melting */
			if (n_melt == 1) {
				DB((dbg_constr, LEVEL_1, "\tno candidate found\n"));
				continue;
			}

			ir_nodeset_remove(&entry->copies, ref);
			sched_remove(ref);

			ir_node **melt_arr = (ir_node **)obstack_finish(&obst);
			/* melt all found copykeeps */
			ir_node **new_ck_in = ALLOCAN(ir_node*,n_melt);
			for (unsigned j = 0; j < n_melt; ++j) {
				new_ck_in[j] = get_irn_n(melt_arr[j], 1);

				/* now, we can kill the melted keep, except the */
				/* ref one, we still need some information      */
				if (melt_arr[j] != ref)
					kill_node(melt_arr[j]);
			}

			ir_node *const new_ck = be_new_CopyKeep(get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in);

			ir_nodeset_insert(&entry->copies, new_ck);

			/* find scheduling point */
1070
1071
			ir_node *const sched_pt = be_move_after_schedule_first(ref_mode_T);
			sched_add_after(sched_pt, new_ck);
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
			DB((dbg_constr, LEVEL_1, "created %+F, scheduled before %+F\n", new_ck, sched_pt));

			/* finally: kill the reference copykeep */
			kill_node(ref);
		}
		obstack_free(&obst, ck_arr);
	}
	obstack_free(&obst, NULL);
}

1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
/**
 * Tests whether a node has a real user and is not just kept by the End or
 * Anchor node
 */
static bool has_real_user(const ir_node *node)
{
	foreach_out_edge(node, edge) {
		ir_node *user = get_edge_src_irn(edge);
		if (!is_End(user) && !is_Anchor(user))
			return true;
	}
	return false;
}

static void add_missing_keep_walker(ir_node *node, void *data)
{
	(void)data;
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
	if (get_irn_mode(node) == mode_T) {
		unsigned const n_outs = arch_get_irn_n_outs(node);
		assert(n_outs != 0);

		ir_node **const existing_projs = ALLOCANZ(ir_node*, n_outs);
		foreach_out_edge(node, edge) {
			ir_node *const succ = get_edge_src_irn(edge);
			/* The node could be kept */
			if (is_Proj(succ)) {
				unsigned const pn = get_Proj_num(succ);
				assert(pn < n_outs);
				existing_projs[pn] = succ;
1111
1112
1113
			}
		}

1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
		/* are keeps missing? */
		unsigned n_to_keep = 0;
		for (unsigned i = 0; i < n_outs; ++i) {
			arch_register_req_t   const *const req = arch_get_irn_register_req_out(node, i);
			arch_register_class_t const *const cls = req->cls;
			if (cls && !(cls->flags & arch_register_class_flag_manual_ra)) {
				ir_node *value = existing_projs[i];
				if (!value) {
					value = new_r_Proj(node, cls->mode, i);
				} else if (has_real_user(value)) {
					continue;
				}
				existing_projs[n_to_keep++] = value;
			}
1128
		}
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
		if (n_to_keep != 0) {
			ir_node *const block = get_nodes_block(node);
			ir_node *const keep  = be_new_Keep(block, n_to_keep, existing_projs);
			sched_add_after(node, keep);
		}
	} else if (!is_Proj(node)) {
		arch_register_req_t   const *const req = arch_get_irn_register_req(node);
		arch_register_class_t const *const cls = req->cls;
		if (cls && !(cls->flags & arch_register_class_flag_manual_ra)) {
			if (!has_real_user(node)) {
				ir_node *const keep = be_new_Keep_one(node);
				sched_add_after(node, keep);
			}
1142
1143
1144
1145
		}
	}
}

1146
1147
1148
void be_spill_prepare_for_constraints(ir_graph *irg)
{
	FIRM_DBG_REGISTER(dbg_constr, "firm.be.lower.constr");
1149
	be_timer_push(T_RA_CONSTR);
1150

1151
1152
	irg_walk_graph(irg, add_missing_keep_walker, NULL, NULL);

1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
	constraint_env_t cenv;
	ir_nodehashmap_init(&cenv.op_set);
	obstack_init(&cenv.obst);

	irg_block_walk_graph(irg, NULL, assure_constraints_walker, &cenv);

	/* melt copykeeps, pointing to projs of */
	/* the same mode_T node and keeping the */
	/* same operand                         */
	melt_copykeeps(&cenv);

	/* for all */
	ir_nodehashmap_iterator_t map_iter;
	ir_nodehashmap_entry_t    map_entry;
	foreach_ir_nodehashmap(&cenv.op_set, map_entry, map_iter) {
Matthias Braun's avatar
Matthias Braun committed
1168
1169
1170
		op_copy_assoc_t *entry    = (op_copy_assoc_t*)map_entry.data;
		size_t           n_copies = ir_nodeset_size(&entry->copies);
		ir_node        **nodes    = ALLOCAN(ir_node*, n_copies);
1171
1172
1173
1174
1175

		/* put the node in an array */
		DBG((dbg_constr, LEVEL_1, "introduce copies for %+F ", map_entry.node));

		/* collect all copies */
Matthias Braun's avatar
Matthias Braun committed
1176
		size_t n = 0;
1177
1178
1179
1180
1181
1182
1183
1184
		foreach_ir_nodeset(&entry->copies, cp, iter) {
			nodes[n++] = cp;
			DB((dbg_constr, LEVEL_1, ", %+F ", cp));
		}

		DB((dbg_constr, LEVEL_1, "\n"));

		/* introduce the copies for the operand and its copies */
Matthias Braun's avatar
Matthias Braun committed
1185
		be_ssa_construction_env_t senv;
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
		be_ssa_construction_init(&senv, irg);
		be_ssa_construction_add_copy(&senv, map_entry.node);
		be_ssa_construction_add_copies(&senv, nodes, n);
		be_ssa_construction_fix_users(&senv, map_entry.node);
		be_ssa_construction_destroy(&senv);

		/* Could be that not all CopyKeeps are really needed, */
		/* so we transform unnecessary ones into Keeps.       */
		foreach_ir_nodeset(&entry->copies, cp, iter) {
			if (be_is_CopyKeep(cp) && get_irn_n_edges(cp) < 1) {
Matthias Braun's avatar
Matthias Braun committed
1196
1197
1198
				int      arity = get_irn_arity(cp);
				ir_node *block = get_nodes_block(cp);
				ir_node *keep  = be_new_Keep(block, arity, get_irn_in(cp) + 1);
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
				sched_replace(cp, keep);

				/* Set all ins (including the block) of the CopyKeep BAD to keep the verifier happy. */
				kill_node(cp);
			}
		}

		ir_nodeset_destroy(&entry->copies);
	}

	ir_nodehashmap_destroy(&cenv.op_set);
	obstack_free(&cenv.obst, NULL);
	be_invalidate_live_sets(irg);

1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
	/* part2: add missing copies */
	precol_copies                  = 0;
	multi_precol_copies            = 0;
	constrained_livethrough_copies = 0;
	be_assure_live_sets(irg);
	birg = be_birg_from_irg(irg);
	lv   = be_get_irg_liveness(irg);
	irg_block_walk_graph(irg, add_missing_copies_in_block, NULL, NULL);

	stat_ev_ull("ra_precol_copies", precol_copies);
	stat_ev_ull("ra_multi_precol_copies", multi_precol_copies);
	stat_ev_ull("ra_constrained_livethrough_copies",
	            constrained_livethrough_copies);
1226
1227
	be_timer_pop(T_RA_CONSTR);
	be_dump(DUMP_RA, irg, "spillprepare");
1228
1229
}

Matthias Braun's avatar
Matthias Braun committed
1230
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spill)
1231
1232
1233
1234
void be_init_spill(void)
{
	FIRM_DBG_REGISTER(dbg, "firm.be.spill");
}