belistsched.c 25.9 KB
Newer Older
1
2
3
4
5
6
/**
 * Scheduling algorithms.
 * Just a simple list scheduling algorithm is here.
 * @date 20.10.2004
 * @author Sebastian Hack
 */
7

Michael Beck's avatar
Michael Beck committed
8
9
10
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
11
12
13
14

#include <stdio.h>
#include <stdarg.h>
#include <string.h>
15
#include <limits.h>
16

Sebastian Hack's avatar
Sebastian Hack committed
17
18
#include "benode_t.h"

19
#include "obst.h"
20
21
22
23
#include "list.h"
#include "iterator.h"

#include "iredges_t.h"
24
25
26
27
28
#include "irgwalk.h"
#include "irnode_t.h"
#include "irmode_t.h"
#include "irdump.h"
#include "irprintf_t.h"
29
#include "array.h"
30
#include "debug.h"
31
#include "irtools.h"
32
33
34

#include "besched_t.h"
#include "beutil.h"
35
#include "belive_t.h"
36
#include "belistsched.h"
Sebastian Hack's avatar
Sebastian Hack committed
37
#include "beschedmris.h"
38
#include "bearch.h"
39
#include "bestat.h"
Daniel Grund's avatar
Daniel Grund committed
40

41
42
43
44
45
46
47
48
49
50
/**
 * All scheduling info needed per node.
 */
typedef struct _sched_irn_t {
	sched_timestep_t delay;     /**< The delay for this node if already calculated, else 0. */
	sched_timestep_t etime;     /**< The earliest time of this node. */
	unsigned already_sched : 1; /**< Set if this node is already scheduled */
	unsigned is_root       : 1; /**< is a root node of a block */
} sched_irn_t;

51
52
53
54
/**
 * Scheduling environment for the whole graph.
 */
typedef struct _sched_env_t {
55
	sched_irn_t *sched_info;                    /**< scheduling info per node */
56
	const list_sched_selector_t *selector;      /**< The node selector. */
57
	const arch_env_t *arch_env;                 /**< The architecture environment. */
58
	const ir_graph *irg;                        /**< The graph to schedule. */
59
	void *selector_env;                         /**< A pointer to give to the selector. */
60
61
} sched_env_t;

62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
#if 0
/*
 * Ugly global variable for the compare function
 * since qsort(3) does not pass an extra pointer.
 */
static ir_node *curr_bl = NULL;

static int cmp_usage(const void *a, const void *b)
{
	struct trivial_sched_env *env;
	const ir_node *p = a;
	const ir_node *q = b;
	int res = 0;

	res = is_live_end(env->curr_bl, a) - is_live_end(env->curr_bl, b);

	/*
	 * One of them is live at the end of the block.
	 * Then, that one shall be scheduled at after the other
	 */
	if(res != 0)
		return res;


	return res;
}
#endif

Michael Beck's avatar
Michael Beck committed
90
91
92
93
94
/**
 * The trivial selector:
 * Just assure that branches are executed last, otherwise select
 * the first node ready.
 */
95
static ir_node *trivial_select(void *block_env, nodeset *ready_set)
96
{
97
98
	const arch_env_t *arch_env = block_env;
	ir_node *irn = NULL;
Christian Würdig's avatar
Christian Würdig committed
99
	int const_last = 0;
Daniel Grund's avatar
Daniel Grund committed
100

Christian Würdig's avatar
Christian Würdig committed
101
	/* assure that branches and constants are executed last */
102
	for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
Christian Würdig's avatar
Christian Würdig committed
103
104
105
		arch_irn_class_t irn_class = arch_irn_classify(arch_env, irn);

		if (irn_class != arch_irn_class_branch && (const_last ? (irn_class != arch_irn_class_const) : 1)) {
106
			nodeset_break(ready_set);
107
108
109
110
			return irn;
		}
	}

Christian Würdig's avatar
Christian Würdig committed
111
112
113
114
115
116
117
118
119
120
121
122
	/* assure that constants are executed before branches */
	if (const_last) {
		for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
			if (arch_irn_classify(arch_env, irn) != arch_irn_class_branch) {
				nodeset_break(ready_set);
				return irn;
			}
		}
	}


	/* at last: schedule branches */
123
124
	irn = nodeset_first(ready_set);
	nodeset_break(ready_set);
125
126
127
128
129
130
131
132
133
134
135
136

	return irn;
}

static void *trivial_init_graph(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
{
	return (void *) arch_env;
}

static void *trivial_init_block(void *graph_env, ir_node *bl)
{
	return graph_env;
137
138
}

Sebastian Hack's avatar
Sebastian Hack committed
139
static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
140
{
Sebastian Hack's avatar
Sebastian Hack committed
141
142
143
144
145
	int res = 0;

	if(sel->to_appear_in_schedule)
		res = sel->to_appear_in_schedule(block_env, irn);

Sebastian Hack's avatar
Sebastian Hack committed
146
	return res || to_appear_in_schedule(irn) || be_is_Keep(irn) || be_is_RegParams(irn);
147
148
}

149
static const list_sched_selector_t trivial_selector_struct = {
150
151
	trivial_init_graph,
	trivial_init_block,
152
	trivial_select,
153
154
155
156
157
	NULL,                /* to_appear_in_schedule */
	NULL,                /* exectime */
	NULL,                /* latency */
	NULL,                /* finish_block */
	NULL                 /* finish_graph */
158
159
160
161
};

const list_sched_selector_t *trivial_selector = &trivial_selector_struct;

162
163
164
165
166
167
168
169
170
typedef struct _usage_stats_t {
	ir_node *irn;
	struct _usage_stats_t *next;
	int max_hops;
	int uses_in_block;      /**< Number of uses inside the current block. */
	int already_consumed;   /**< Number of insns using this value already
							  scheduled. */
} usage_stats_t;

171
172
173
174
175
typedef struct {
	const list_sched_selector_t *vtab;
	const arch_env_t *arch_env;
} reg_pressure_main_env_t;

176
177
typedef struct {
	struct obstack obst;
178
	const reg_pressure_main_env_t *main_env;
179
	usage_stats_t *root;
180
	nodeset *already_scheduled;
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
} reg_pressure_selector_env_t;

static INLINE usage_stats_t *get_or_set_usage_stats(reg_pressure_selector_env_t *env, ir_node *irn)
{
	usage_stats_t *us = get_irn_link(irn);

	if(!us) {
		us                   = obstack_alloc(&env->obst, sizeof(us[0]));
		us->irn              = irn;
		us->already_consumed = 0;
		us->max_hops         = INT_MAX;
		us->next             = env->root;
		env->root            = us;
		set_irn_link(irn, us);
	}

	return us;
}

static INLINE usage_stats_t *get_usage_stats(ir_node *irn)
{
	usage_stats_t *us = get_irn_link(irn);
	assert(us && "This node must have usage stats");
	return us;
}

Sebastian Hack's avatar
Sebastian Hack committed
207
static int max_hops_walker(reg_pressure_selector_env_t *env, ir_node *irn, ir_node *curr_bl, int depth, unsigned visited_nr)
208
{
Sebastian Hack's avatar
Sebastian Hack committed
209
210
211
212
213
214
215
	ir_node *bl = get_nodes_block(irn);
	/*
	 * If the reached node is not in the block desired,
	 * return the value passed for this situation.
	 */
	if(get_nodes_block(irn) != bl)
		return block_dominates(bl, curr_bl) ? 0 : INT_MAX;
216

Sebastian Hack's avatar
Sebastian Hack committed
217
218
219
220
	/*
	 * If the node is in the current block but not
	 * yet scheduled, we keep on searching from that node.
	 */
221
	if(!nodeset_find(env->already_scheduled, irn)) {
Sebastian Hack's avatar
Sebastian Hack committed
222
223
		int i, n;
		int res = 0;
224
225
226
227
228
229
230
		for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
			ir_node *operand = get_irn_n(irn, i);

			if(get_irn_visited(operand) < visited_nr) {
				int tmp;

				set_irn_visited(operand, visited_nr);
Sebastian Hack's avatar
Sebastian Hack committed
231
				tmp = max_hops_walker(env, operand, bl, depth + 1, visited_nr);
232
233
234
				res = MAX(tmp, res);
			}
		}
Sebastian Hack's avatar
Sebastian Hack committed
235
236

		return res;
237
238
	}

Sebastian Hack's avatar
Sebastian Hack committed
239
240
241
242
243
244
	/*
	 * If the node is in the current block and scheduled, return
	 * the depth which indicates the number of steps to the
	 * region of scheduled nodes.
	 */
	return depth;
245
246
247
248
249
250
}

static int compute_max_hops(reg_pressure_selector_env_t *env, ir_node *irn)
{
	ir_node *bl   = get_nodes_block(irn);
	ir_graph *irg = get_irn_irg(bl);
Sebastian Hack's avatar
Sebastian Hack committed
251
	int res       = 0;
252
253
254
255

	const ir_edge_t *edge;

	foreach_out_edge(irn, edge) {
Sebastian Hack's avatar
Sebastian Hack committed
256
257
258
		ir_node *user       = get_edge_src_irn(edge);
		unsigned visited_nr = get_irg_visited(irg) + 1;
		int max_hops;
259

Sebastian Hack's avatar
Sebastian Hack committed
260
261
262
		set_irg_visited(irg, visited_nr);
		max_hops = max_hops_walker(env, user, irn, 0, visited_nr);
		res      = MAX(res, max_hops);
263
264
265
266
267
	}

	return res;
}

268
static void *reg_pressure_graph_init(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
269
{
270
271
272
273
	reg_pressure_main_env_t *main_env = xmalloc(sizeof(main_env[0]));

	main_env->arch_env = arch_env;
	main_env->vtab     = vtab;
274
	irg_walk_graph(irg, firm_clear_link, NULL, NULL);
275
276

	return main_env;
277
278
279
280
281
}

static void *reg_pressure_block_init(void *graph_env, ir_node *bl)
{
	ir_node *irn;
282
	reg_pressure_selector_env_t *env  = xmalloc(sizeof(env[0]));
283
284

	obstack_init(&env->obst);
285
	env->already_scheduled = new_nodeset(32);
286
	env->root              = NULL;
287
	env->main_env          = graph_env;
288
289
290
291
292

	/*
	 * Collect usage statistics.
	 */
	sched_foreach(bl, irn) {
293
		if(must_appear_in_schedule(env->main_env->vtab, env, irn)) {
294
295
296
297
			int i, n;

			for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
				ir_node *op = get_irn_n(irn, i);
298
				if(must_appear_in_schedule(env->main_env->vtab, env, irn)) {
299
300
301
302
303
304
305
306
307
308
309
310
311
					usage_stats_t *us = get_or_set_usage_stats(env, irn);
					if(is_live_end(bl, op))
						us->uses_in_block = 99999;
					else
						us->uses_in_block++;
				}
			}
		}
	}

	return env;
}

312
static void reg_pressure_block_free(void *block_env)
313
314
315
316
317
318
319
320
{
	reg_pressure_selector_env_t *env = block_env;
	usage_stats_t *us;

	for(us = env->root; us; us = us->next)
		set_irn_link(us->irn, NULL);

	obstack_free(&env->obst, NULL);
321
	del_nodeset(env->already_scheduled);
322
323
324
	free(env);
}

Sebastian Hack's avatar
Sebastian Hack committed
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
static int get_result_hops_sum(reg_pressure_selector_env_t *env, ir_node *irn)
{
	int res = 0;
	if(get_irn_mode(irn) == mode_T) {
		const ir_edge_t *edge;

		foreach_out_edge(irn, edge)
			res += get_result_hops_sum(env, get_edge_src_irn(edge));
	}

	else if(mode_is_data(get_irn_mode(irn)))
		res = compute_max_hops(env, irn);


	return res;
}

342
343
344
345
346
347
348
349
static INLINE int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
{
	int i, n;
	int sum = 0;

	for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
		ir_node *op = get_irn_n(irn, i);

350
		if(must_appear_in_schedule(env->main_env->vtab, env, op))
351
352
353
			sum += compute_max_hops(env, op);
	}

Sebastian Hack's avatar
Sebastian Hack committed
354
355
	sum += get_result_hops_sum(env, irn);

356
357
358
	return sum;
}

359
static ir_node *reg_pressure_select(void *block_env, nodeset *ready_set)
360
361
362
363
364
{
	reg_pressure_selector_env_t *env = block_env;
	ir_node *irn, *res     = NULL;
	int curr_cost          = INT_MAX;

365
	assert(nodeset_count(ready_set) > 0);
366

367
	for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
368
369
370
371
		/*
			Ignore branch instructions for the time being.
			They should only be scheduled if there is nothing else.
		*/
372
		if (arch_irn_classify(env->main_env->arch_env, irn) != arch_irn_class_branch) {
373
			int costs = reg_pr_costs(env, irn);
374
			if (costs <= curr_cost) {
375
376
377
				res       = irn;
				curr_cost = costs;
			}
378
379
380
		}
	}

381
382
383
384
385
386
	/*
		There was no result so we only saw a branch.
		Take it and finish.
	*/

	if(!res) {
387
388
		res = nodeset_first(ready_set);
		nodeset_break(ready_set);
389
390
391
392

		assert(res && "There must be a node scheduled.");
	}

393
	nodeset_insert(env->already_scheduled, res);
394
395
396
	return res;
}

397
398
399
400
401
402
403
404
405
406
407
408
409
/**
 * Environment for a block scheduler.
 */
typedef struct _block_sched_env_t {
	sched_irn_t *sched_info;                    /**< scheduling info per node, copied from the global scheduler object */
	sched_timestep_t curr_time;                 /**< current time of the scheduler */
	nodeset *cands;                             /**< the set of candidates */
	ir_node *block;                             /**< the current block */
	sched_env_t *sched_env;                     /**< the scheduler environment */
	const list_sched_selector_t *selector;
	void *selector_block_env;
	DEBUG_ONLY(firm_dbg_module_t *dbg;)
} block_sched_env_t;
410

411
412
413
414
415
416
/**
 * Returns non-zero if the node is already scheduled
 */
static INLINE int is_already_scheduled(block_sched_env_t *env, ir_node *n)
{
	int idx = get_irn_idx(n);
417

418
419
420
	assert(idx < ARR_LEN(env->sched_info));
	return env->sched_info[idx].already_sched;
}
421

422
423
424
425
/**
 * Mark a node as already scheduled
 */
static INLINE void mark_already_scheduled(block_sched_env_t *env, ir_node *n)
426
{
427
	int idx = get_irn_idx(n);
428

429
430
431
	assert(idx < ARR_LEN(env->sched_info));
	env->sched_info[idx].already_sched = 1;
}
432

433
434
435
436
437
438
/**
 * Returns non-zero if the node is a root node
 */
static INLINE unsigned is_root_node(block_sched_env_t *env, ir_node *n)
{
	int idx = get_irn_idx(n);
439

440
441
442
	assert(idx < ARR_LEN(env->sched_info));
	return env->sched_info[idx].is_root;
}
443

444
445
446
447
448
449
/**
 * Mark a node as roto node
 */
static INLINE void mark_root_node(block_sched_env_t *env, ir_node *n)
{
	int idx = get_irn_idx(n);
450

451
452
	assert(idx < ARR_LEN(env->sched_info));
	env->sched_info[idx].is_root = 1;
453
454
}

455
456
457
458
459
460
461
462
463
/**
 * Get the current delay.
 */
static sched_timestep_t get_irn_delay(block_sched_env_t *env, ir_node *n) {
	int idx = get_irn_idx(n);

	assert(idx < ARR_LEN(env->sched_info));
	return env->sched_info[idx].delay;
}
464
465

/**
466
 * Set the current delay.
467
 */
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
static void set_irn_delay(block_sched_env_t *env, ir_node *n, sched_timestep_t delay) {
	int idx = get_irn_idx(n);

	assert(idx < ARR_LEN(env->sched_info));
	env->sched_info[idx].delay = delay;
}

/**
 * Get the current etime.
 */
static sched_timestep_t get_irn_etime(block_sched_env_t *env, ir_node *n) {
	int idx = get_irn_idx(n);

	assert(idx < ARR_LEN(env->sched_info));
	return env->sched_info[idx].etime;
}

/**
 * Set the current etime.
 */
static void set_irn_etime(block_sched_env_t *env, ir_node *n, sched_timestep_t etime) {
	int idx = get_irn_idx(n);

	assert(idx < ARR_LEN(env->sched_info));
	env->sched_info[idx].etime = etime;
}

/**
 * returns the exec-time for node n.
 */
static sched_timestep_t exectime(sched_env_t *env, ir_node *n) {
  if (be_is_Keep(n) || is_Proj(n))
    return 0;
	if (env->selector->exectime)
		return env->selector->exectime(env->selector_env, n);
	return 1;
}

/**
 * Calculates the latency for between two ops
 */
static sched_timestep_t latency(sched_env_t *env, ir_node *pred, int pred_cycle, ir_node *curr, int curr_cycle) {
	/* a Keep hides a root */
  if (be_is_Keep(curr))
		return exectime(env, pred);

	/* Proj's are executed immediately */
	if (is_Proj(curr))
    return 0;

	/* predecessors Proj's must be skipped */
  if (is_Proj(pred))
    pred = get_Proj_pred(pred);

	if (env->selector->latency)
		return env->selector->latency(env->selector_env, pred, pred_cycle, curr, curr_cycle);
	return 1;
}
526
527
528

/**
 * Try to put a node in the ready set.
529
530
531
 * @param env   The block scheduler environment.
 * @param pred  The previous scheduled node.
 * @param irn   The node to make ready.
532
533
 * @return 1, if the node could be made ready, 0 else.
 */
534
static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
535
{
Florian Liekweg's avatar
Florian Liekweg committed
536
    int i, n;
537
		sched_timestep_t etime_p, etime;
538

Florian Liekweg's avatar
Florian Liekweg committed
539
    /* Blocks cannot be scheduled. */
540
    if (is_Block(irn))
Florian Liekweg's avatar
Florian Liekweg committed
541
        return 0;
542

Florian Liekweg's avatar
Florian Liekweg committed
543
544
545
546
    /*
     * Check, if the given ir node is in a different block as the
     * currently scheduled one. If that is so, don't make the node ready.
     */
547
    if (env->block != get_nodes_block(irn))
Florian Liekweg's avatar
Florian Liekweg committed
548
        return 0;
549

550
    for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
Florian Liekweg's avatar
Florian Liekweg committed
551
        ir_node *op = get_irn_n(irn, i);
552

Michael Beck's avatar
Michael Beck committed
553
554
555
556
557
558
        /* if irn is an End we have keep-alives and op might be a block, skip that */
        if (is_Block(op)) {
          assert(get_irn_op(irn) == op_End);
          continue;
        }

Florian Liekweg's avatar
Florian Liekweg committed
559
560
        /* If the operand is local to the scheduled block and not yet
         * scheduled, this nodes cannot be made ready, so exit. */
561
        if (!is_already_scheduled(env, op) && get_nodes_block(op) == env->block)
Florian Liekweg's avatar
Florian Liekweg committed
562
563
            return 0;
    }
564

565
    nodeset_insert(env->cands, irn);
566

567
568
569
570
571
		/* calculate the etime of this node */
		etime = env->curr_time;
		if (pred) {
			etime_p  = get_irn_etime(env, pred);
			etime   += latency(env->sched_env, pred, 1, irn, 0);
572

573
574
			etime = etime_p > etime ? etime_p : etime;
		}
575

576
577
578
579
580
581
		set_irn_etime(env, irn, etime);

    DB((env->dbg, LEVEL_2, "\tmaking ready: %+F etime %u\n", irn, etime));

    return 1;
}
582
583
584
585
586
587
588
589
590
591

/**
 * Try, to make all users of a node ready.
 * In fact, a usage node can only be made ready, if all its operands
 * have already been scheduled yet. This is checked my make_ready().
 * @param env The block schedule environment.
 * @param irn The node, which usages (successors) are to be made ready.
 */
static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
{
592
593
594
595
596
	const ir_edge_t *edge;

	foreach_out_edge(irn, edge) {
		ir_node *user = edge->src;
		if(!is_Phi(user))
597
			make_ready(env, irn, user);
598
	}
599
600
601
602
603
604
605
606
607
608
}

/**
 * Compare to nodes using pointer equality.
 * @param p1 Node one.
 * @param p2 Node two.
 * @return 0 if they are identical.
 */
static int node_cmp_func(const void *p1, const void *p2)
{
Florian Liekweg's avatar
Florian Liekweg committed
609
    return p1 != p2;
610
611
612
613
}

/**
 * Append an instruction to a schedule.
Sebastian Hack's avatar
Sebastian Hack committed
614
 * @param env The block scheduling environment.
615
 * @param irn The node to add to the schedule.
Sebastian Hack's avatar
Sebastian Hack committed
616
 * @return    The given node.
617
618
619
 */
static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
{
Florian Liekweg's avatar
Florian Liekweg committed
620
621
    /* If the node consumes/produces data, it is appended to the schedule
     * list, otherwise, it is not put into the list */
Sebastian Hack's avatar
Sebastian Hack committed
622
    if(must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
Florian Liekweg's avatar
Florian Liekweg committed
623
624
        sched_info_t *info = get_irn_sched_info(irn);
        INIT_LIST_HEAD(&info->list);
625
        info->scheduled = 1;
626
        sched_add_before(env->block, irn);
627

628
        DBG((env->dbg, LEVEL_2, "\tadding %+F\n", irn));
Florian Liekweg's avatar
Florian Liekweg committed
629
    }
630

Florian Liekweg's avatar
Florian Liekweg committed
631
    /* Insert the node in the set of all already scheduled nodes. */
632
    mark_already_scheduled(env, irn);
633

Florian Liekweg's avatar
Florian Liekweg committed
634
    /* Remove the node from the ready set */
635
636
    if(nodeset_find(env->cands, irn))
        nodeset_remove(env->cands, irn);
637

Florian Liekweg's avatar
Florian Liekweg committed
638
    return irn;
639
640
641
642
643
644
645
646
647
648
649
650
651
652
}

/**
 * Add the proj nodes of a tuple-mode irn to the schedule immediately
 * after the tuple-moded irn. By pinning the projs after the irn, no
 * other nodes can create a new lifetime between the tuple-moded irn and
 * one of its projs. This should render a realistic image of a
 * tuple-moded irn, which in fact models a node which defines multiple
 * values.
 *
 * @param irn The tuple-moded irn.
 */
static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
{
653
	const ir_edge_t *edge;
654

655
	assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
656

657
658
	foreach_out_edge(irn, edge) {
		ir_node *out = edge->src;
659

660
661
		assert(is_Proj(out) && "successor of a modeT node must be a proj");

662
		if (get_irn_mode(out) == mode_T)
663
664
665
666
667
668
			add_tuple_projs(env, out);
		else {
			add_to_sched(env, out);
			make_users_ready(env, out);
		}
	}
669
670
}

671
672
673
674
/**
 * Execute the heuristic function,
 */
static ir_node *select_node_heuristic(block_sched_env_t *be, nodeset *ns)
Sebastian Hack's avatar
Sebastian Hack committed
675
{
676
677
	ir_node *irn;

678
	for (irn = nodeset_first(ns); irn; irn = nodeset_next(ns)) {
679
		if (be_is_Keep(irn)) {
680
			nodeset_break(ns);
681
682
683
684
			return irn;
		}
	}

685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
	return be->selector->select(be->selector_block_env, ns);
}

/**
 * Returns non-zero if root is a root in the block block.
 */
static int is_root(ir_node *root, ir_node *block) {
	const ir_edge_t *edge;

	foreach_out_edge(root, edge) {
		ir_node *succ = get_edge_src_irn(edge);

		if (is_Block(succ))
			continue;
		if (get_nodes_block(succ) == block)
			return 0;
	}
	return 1;
}

/* we need a special mark */
static char _mark;
#define MARK	&_mark

/**
 * descent into a dag and create a pre-order list.
 */
static void descent(ir_node *root, ir_node *block, ir_node **list) {
	int i;

	if (! is_Phi(root)) {
		/* Phi nodes always leave the block */
		for (i = get_irn_arity(root) - 1; i >= 0; --i) {
			ir_node *pred = get_irn_n(root, i);

			/* Blocks may happen as predecessors of End nodes */
			if (is_Block(pred))
				continue;

			/* already seen nodes are not marked */
			if (get_irn_link(pred) != MARK)
				continue;

			/* don't leave our block */
			if (get_nodes_block(pred) != block)
				continue;

			descent(pred, block, list);
		}
	}
	set_irn_link(root, *list);
	*list = root;
Sebastian Hack's avatar
Sebastian Hack committed
737
738
}

739
740
741
742
743
744
745
746
747
/**
 * Perform list scheduling on a block.
 *
 * Note, that the caller must compute a linked list of nodes in the block
 * using the link field before calling this function.
 *
 * Also the outs must have been computed.
 *
 * @param block The block node.
Christian Würdig's avatar
Christian Würdig committed
748
 * @param env Scheduling environment.
749
750
751
 */
static void list_sched_block(ir_node *block, void *env_ptr)
{
752
	sched_env_t *env                      = env_ptr;
753
	const list_sched_selector_t *selector = env->selector;
754
755
756
757
	ir_node *start_node                   = get_irg_start(get_irn_irg(block));
	sched_info_t *info                    = get_irn_sched_info(block);

	block_sched_env_t be;
758
	const ir_edge_t *edge;
759
	ir_node *irn;
Sebastian Hack's avatar
Sebastian Hack committed
760
	int j, m;
761

762
763
764
	ir_node *root = NULL, *preord = NULL;
	ir_node *curr;

765
766
767
768
	/* Initialize the block's list head that will hold the schedule. */
	INIT_LIST_HEAD(&info->list);

	/* Initialize the block scheduling environment */
769
	be.sched_info        = env->sched_info;
770
771
	be.block             = block;
	be.curr_time         = 0;
772
	be.cands             = new_nodeset(get_irn_n_edges(block));
773
	be.selector          = selector;
774
	be.sched_env         = env;
775
	FIRM_DBG_REGISTER(be.dbg, "firm.be.sched");
776

777
778
779
//	firm_dbg_set_mask(be.dbg, SET_LEVEL_3);

	if (selector->init_block)
780
		be.selector_block_env = selector->init_block(env->selector_env, block);
781

782
	DBG((be.dbg, LEVEL_1, "scheduling %+F\n", block));
783

784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
	/* First step: Find the root set. */
	foreach_out_edge(block, edge) {
		ir_node *succ = get_edge_src_irn(edge);

		if (is_root(succ, block)) {
			mark_root_node(&be, succ);
			set_irn_link(succ, root);
			root = succ;
		}
		else
			set_irn_link(succ, MARK);
	}

	/* Second step: calculate the pre-order list. */
	preord = NULL;
	for (curr = root; curr; curr = irn) {
		irn = get_irn_link(curr);
		descent(curr, block, &preord);
	}
	root = preord;

	/* Third step: calculate the Delay. Note that our
	 * list is now in pre-order, starting at root
	 */
	for (curr = root; curr; curr = get_irn_link(curr)) {
		sched_timestep_t d;

		if (arch_irn_classify(env->arch_env, curr) == arch_irn_class_branch) {
			/* assure, that branches can be executed last */
			d = 0;
		}
		else {
			if (is_root_node(&be, curr))
				d = exectime(env, curr);
			else {
				d = 0;
				foreach_out_edge(curr, edge) {
					ir_node *n = get_edge_src_irn(edge);

					if (get_nodes_block(n) == block) {
						sched_timestep_t ld;

						ld = latency(env, curr, 1, n, 0) + get_irn_delay(&be, n);
						d = ld > d ? ld : d;
					}
				}
			}
		}
		set_irn_delay(&be, curr, d);
		DB((be.dbg, LEVEL_2, "\t%+F delay %u\n", curr, d));

		/* set the etime of all nodes to 0 */
		set_irn_etime(&be, curr, 0);
	}


840
	/* Then one can add all nodes are ready to the set. */
841
842
	foreach_out_edge(block, edge) {
		ir_node *irn = get_edge_src_irn(edge);
843
844

		/* Skip the end node because of keepalive edges. */
845
		if (get_irn_opcode(irn) == iro_End)
846
847
			continue;

848
849
850
851
852
853
		if (is_Phi(irn)) {
			/* Phi functions are scheduled immediately, since they only transfer
			 * data flow from the predecessors to this block. */

			/* Increase the time step. */
			be.curr_time += get_irn_etime(&be, irn);
854
855
856
			add_to_sched(&be, irn);
			make_users_ready(&be, irn);
		}
857
858
859
		else if (irn == start_node) {
			/* The start block will be scheduled as the first node */
			be.curr_time += get_irn_etime(&be, irn);
860

861
862
863
			add_to_sched(&be, irn);
			add_tuple_projs(&be, irn);
		}
864
		else {
865
866
			/* Other nodes must have all operands in other blocks to be made
			 * ready */
Daniel Grund's avatar
Daniel Grund committed
867
			int ready = 1;
868
869

			/* Check, if the operands of a node are not local to this block */
870
			for (j = 0, m = get_irn_arity(irn); j < m; ++j) {
871
872
				ir_node *operand = get_irn_n(irn, j);

873
				if (get_nodes_block(operand) == block) {
Daniel Grund's avatar
Daniel Grund committed
874
					ready = 0;
875
876
877
878
879
					break;
				}
			}

			/* Make the node ready, if all operands live in a foreign block */
880
			if (ready) {
881
				DBG((be.dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
882
				make_ready(&be, NULL, irn);
883
884
885
886
			}
		}
	}

887
888
889
890
	while (nodeset_count(be.cands) > 0) {
		nodeset *mcands;                            /**< the set of candidates with maximum delay time */
		nodeset *ecands;                            /**< the set of nodes in mcands whose etime <= curr_time  */
		sched_timestep_t max_delay = 0;
891

892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
		/* collect statistics about amount of ready nodes */
		be_do_stat_sched_ready(block, be.cands);

		/* calculate the max delay of all candidates */
		foreach_nodeset(be.cands, irn) {
			sched_timestep_t d = get_irn_delay(&be, irn);

			max_delay = d > max_delay ? d : max_delay;
		}
		mcands = new_nodeset(8);
		ecands = new_nodeset(8);

		/* calculate mcands and ecands */
		foreach_nodeset(be.cands, irn) {
      if (be_is_Keep(irn)) {
        nodeset_break(be.cands);
        break;
      }
			if (get_irn_delay(&be, irn) == max_delay) {
				nodeset_insert(mcands, irn);
				if (get_irn_etime(&be, irn) <= be.curr_time)
					nodeset_insert(ecands, irn);
			}
		}

    if (irn) {
      /* Keeps must be immediately scheduled */
    }
    else {
		  DB((be.dbg, LEVEL_2, "\tbe.curr_time = %u\n", be.curr_time));

		  /* select a node to be scheduled and check if it was ready */
		  if (nodeset_count(mcands) == 1) {
			  DB((be.dbg, LEVEL_3, "\tmcand = 1, max_delay = %u\n", max_delay));
			  irn = nodeset_first(mcands);
		  }
		  else {
			  int cnt = nodeset_count(ecands);
			  if (cnt == 1) {
					arch_irn_class_t irn_class;

				  irn = nodeset_first(ecands);
					irn_class = arch_irn_classify(env->arch_env, irn);

					if (irn_class == arch_irn_class_branch) {
						/* BEWARE: don't select a JUMP if others are still possible */
						goto force_mcands;
					}
				  DB((be.dbg, LEVEL_3, "\tecand = 1, max_delay = %u\n", max_delay));
			  }
			  else if (cnt > 1) {
				  DB((be.dbg, LEVEL_3, "\tecand = %d, max_delay = %u\n", cnt, max_delay));
				  irn = select_node_heuristic(&be, ecands);
			  }
			  else {
force_mcands:
				  DB((be.dbg, LEVEL_3, "\tmcand = %d\n", nodeset_count(mcands)));
				  irn = select_node_heuristic(&be, mcands);
			  }
		  }
    }
		del_nodeset(mcands);
		del_nodeset(ecands);
955

956
		DB((be.dbg, LEVEL_2, "\tpicked node %+F\n", irn));
957

958
959
		/* Increase the time step. */
		be.curr_time += exectime(env, irn);
960
961
962
963

		/* Add the node to the schedule. */
		add_to_sched(&be, irn);

964
		if (get_irn_mode(irn) == mode_T)
965
966
967
968
969
			add_tuple_projs(&be, irn);
		else
			make_users_ready(&be, irn);

		/* remove the scheduled node from the ready list. */
970
971
		if (nodeset_find(be.cands, irn))
			nodeset_remove(be.cands, irn);
972
973
	}

974
	if (selector->finish_block)
975
976
		selector->finish_block(be.selector_block_env);

977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
	del_nodeset(be.cands);
}

static const list_sched_selector_t reg_pressure_selector_struct = {
	reg_pressure_graph_init,
	reg_pressure_block_init,
	reg_pressure_select,
	NULL,                    /* to_appear_in_schedule */
	NULL,                    /* exectime */
	NULL,                    /* latency */
	reg_pressure_block_free,
	free
};

const list_sched_selector_t *reg_pressure_selector = &reg_pressure_selector_struct;

/* List schedule a graph. */
Sebastian Hack's avatar
Sebastian Hack committed
994
void list_sched(const be_irg_t *birg, int disable_mris)
995
{
Sebastian Hack's avatar
Sebastian Hack committed
996
997
998
999
	const arch_env_t *arch_env = birg->main_env->arch_env;
	ir_graph *irg              = birg->irg;

	int num_nodes;
1000
	sched_env_t env;
Sebastian Hack's avatar
Sebastian Hack committed
1001
1002
1003
1004
1005
1006
1007
1008
1009
	mris_env_t *mris;

	/* Assure, that the out edges are computed */
	edges_assure(irg);

	if(!disable_mris)
		mris = be_sched_mris_preprocess(birg);

	num_nodes = get_irg_last_idx(irg);
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027

	memset(&env, 0, sizeof(env));
	env.selector   = arch_env->isa->impl->get_list_sched_selector(arch_env->isa);
	env.arch_env   = arch_env;
	env.irg        = irg;
	env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);

	memset(env.sched_info, 0, num_nodes * sizeof(*env.sched_info));

	if (env.selector->init_graph)
		env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);

	/* Schedule each single block. */
	irg_block_walk_graph(irg, list_sched_block, NULL, &env);

	if (env.selector->finish_graph)
		env.selector->finish_graph(env.selector_env);

Sebastian Hack's avatar
Sebastian Hack committed
1028
1029
1030
	if(!disable_mris)
		be_sched_mris_free(mris);

1031
	DEL_ARR_F(env.sched_info);
1032
}