mips_scheduler.c 6.38 KB
Newer Older
Christian Würdig's avatar
Christian Würdig committed
1
/*
Michael Beck's avatar
Michael Beck committed
2
 * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
Christian Würdig's avatar
Christian Würdig committed
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
 *
 * This file is part of libFirm.
 *
 * This file may be distributed and/or modified under the terms of the
 * GNU General Public License version 2 as published by the Free Software
 * Foundation and appearing in the file LICENSE.GPL included in the
 * packaging of this file.
 *
 * Licensees holding valid libFirm Professional Edition licenses may use
 * this file in accordance with the libFirm Commercial License.
 * Agreement provided with the Software.
 *
 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE.
 */

20
21
22
23
24
25
/**
 * @file
 * @brief   Mips implementation of list scheduler selector
 * @author  Matthias Braun, Mehdi
 * @version $Id$
 */
26
#include "config.h"
27
28
29

#include "mips_scheduler.h"

30
#include "../besched.h"
31
#include "be.h"
32
#include "../beabi.h"
33
#include "../belistsched.h"
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
#include "iredges.h"
#include "ircons.h"
#include "gen_mips_regalloc_if.h"

#include "mips_new_nodes.h"

list_sched_selector_t mips_sched_selector;

typedef struct {
	pset *div_set;
	/**
	 * This array holds an entry for each register that specifies how much cycles
	 * have to pass before we can access that register again
	 * (because mips will write the register value back in the WB phase of the pipeline)
	 */
Michael Beck's avatar
Michael Beck committed
49
	int busy_registers[N_mips_gp_REGS];
50
51
52
53
54
	/// current block
	ir_node* block;
	ir_node* last_nop;
} mips_sched_env_t;

55
56
57
/* Matze: deprecated and totally broken */
#if 0

58
static void *mips_scheduler_init_graph(const list_sched_selector_t *vtab, ir_graph *irg)
59
{
60
	mips_sched_env_t *sched_env = XMALLOCZ(mips_sched_env_t);
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90

	sched_env->div_set = new_pset(pset_default_ptr_cmp, 4);

	return sched_env;
}

static void mips_scheduler_finish_graph(void* graph_env)
{
	mips_sched_env_t *sched_env = (mips_sched_env_t*) graph_env;
	del_pset(sched_env->div_set);
}

static void *mips_scheduler_init_block(void *graph_env, ir_node *block)
{
	mips_sched_env_t *sched_env = (mips_sched_env_t*) graph_env;
	assert(pset_count(sched_env->div_set) == 0);
	srand(12234);
	// TODO later we might have blocks that don't end in a jump
	memset(&sched_env->busy_registers, 0, sizeof(sched_env->busy_registers));
	sched_env->block = block;
	sched_env->last_nop = NULL;
	return sched_env;
}

static void mips_scheduler_finish_block(void* graph_env)
{
	mips_sched_env_t *sched_env = (mips_sched_env_t*) graph_env;
	// attach last nop to end node (so that firm doesn't discard it)
	if(sched_env->last_nop != NULL) {
		ir_node* end = get_irg_end(get_irn_irg(sched_env->block));
Matthias Braun's avatar
Matthias Braun committed
91
92
		(void) end;
		// TODO
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
	}
	sched_env->block = NULL;
}

static int mips_scheduler_to_appear_in_schedule(void *block_env, const ir_node *irn)
{
	return is_mips_irn(irn) && !is_mips_zero(irn) && !is_mips_reinterpret_conv(irn) && !is_mips_fallthrough(irn);
}

static void mips_collect_mflohis(pset* set, ir_node* node) {
	// construct a list of nodes that need to be scheduled before
	// we are allowed to schedule another div or mul instruction
	const ir_edge_t *edge, *edge2;

	if(is_mips_div(node)) {
		foreach_out_edge(node, edge) {
			const ir_node* node2 = get_edge_src_irn(edge);

			assert(is_Proj(node2));
			foreach_out_edge(node2, edge2) {
				const ir_node* node3 = get_edge_src_irn(edge2);
				if(is_mips_mfhi(node3) || is_mips_mflo(node3))
					pset_insert_ptr(set, node3);
			}
		}
	} else if(is_mips_mult(node)) {
		foreach_out_edge(node, edge) {
			const ir_node* node2 = get_edge_src_irn(edge);

			if(is_mips_mfhi(node2) || is_mips_mflo(node2))
				pset_insert_ptr(set, node2);
		}
	}
}

static int mips_scheduler_node_allowed(mips_sched_env_t *sched_env, ir_node* node)
{
	if(pset_count(sched_env->div_set) != 0 && (is_mips_div(node) || is_mips_mult(node))) {
		return 0;
	}

	return 1;
}

Christian Würdig's avatar
Christian Würdig committed
137
static ir_node *mips_scheduler_select(void *block_env, nodeset *ready_set, nodeset *live_set)
138
139
140
141
142
143
144
145
146
147
{
	mips_sched_env_t *sched_env = (mips_sched_env_t*) block_env;
	ir_node *node = NULL;
	ir_node *block = sched_env->block;
	ir_node *condjmp = NULL;
	ir_graph *irg = get_irn_irg(block);
	int have_non_branch_nodes = 0;

	// test all nodes in the ready set and take the first non-branch that
	// is allowed
148
	for (node = nodeset_first(ready_set); node != NULL; node = nodeset_next(ready_set)) {
149
		if (arch_irn_class_is(node, branch)) {
150
			if (is_irn_forking(node))
151
152
153
154
155
156
				condjmp = node;
			continue;
		}

		have_non_branch_nodes = 1;

157
		if (mips_scheduler_node_allowed(sched_env, node))
158
		{
159
			nodeset_break(ready_set);
160
161
162

			// TODO update busy_registers

163
			if (is_mips_div(node) || is_mips_mult(node)) {
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
				mips_collect_mflohis(sched_env->div_set, node);
			} else if(is_mips_mflo(node) || is_mips_mfhi(node)) {
				pset_remove_ptr(sched_env->div_set, node);
			}

			return node;
		}
	}

	// if we arrive here no non-branch node was found that we can emit

	// return a branch if there are just branches left
	if(!have_non_branch_nodes) {
		// schedule conditional branches before non-conditional ones
		if(condjmp != NULL) {
			return condjmp;
		}
181
		node = nodeset_first(ready_set);
182
		assert(arch_irn_class_is(node, branch));
183
		nodeset_break(ready_set);
184
185
186
187
188
189
190
191
192
		return node;
	}

	// emit a nop
	node = new_rd_mips_nop(NULL, irg, block, mode_M);
	keep_alive(node);
	return node;
}

193
194
#endif

Matthias Braun's avatar
Matthias Braun committed
195
196
197
static
int mips_to_appear_in_schedule(void *block_env, const ir_node *node)
{
Matthias Braun's avatar
Matthias Braun committed
198
199
	(void) block_env;

Matthias Braun's avatar
Matthias Braun committed
200
201
	if(!is_mips_irn(node))
		return -1;
202
	if(is_mips_zero(node) || is_mips_Immediate(node))
Matthias Braun's avatar
Matthias Braun committed
203
204
205
206
207
208
209
		return 0;

	return 1;
}

list_sched_selector_t  mips_selector;

210
211
212
/**
 * Returns the reg_pressure scheduler with to_appear_in_schedule() overloaded
 */
Matthias Braun's avatar
Matthias Braun committed
213
214
const list_sched_selector_t *mips_get_list_sched_selector(const void *self,
		list_sched_selector_t *selector)
215
{
Matthias Braun's avatar
Matthias Braun committed
216
	(void) self;
217
#if 0
218
219
220
221
222
223
224
	memset(&mips_sched_selector, 0, sizeof(mips_sched_selector));
	mips_sched_selector.init_graph = mips_scheduler_init_graph;
	mips_sched_selector.init_block = mips_scheduler_init_block;
	mips_sched_selector.select = mips_scheduler_select;
	mips_sched_selector.to_appear_in_schedule = mips_scheduler_to_appear_in_schedule;
	mips_sched_selector.finish_block = mips_scheduler_finish_block;
	mips_sched_selector.finish_graph = mips_scheduler_finish_graph;
Matthias Braun's avatar
Matthias Braun committed
225
	//return &mips_sched_selector;
226
#endif
Matthias Braun's avatar
Matthias Braun committed
227
228
229
230
	memcpy(&mips_selector, selector, sizeof(mips_selector));
	mips_selector.to_appear_in_schedule = mips_to_appear_in_schedule;

	return &mips_selector;
231
}
232

Matthias Braun's avatar
Matthias Braun committed
233
234
235
const ilp_sched_selector_t *mips_get_ilp_sched_selector(const void *self)
{
	(void) self;
236
237
	return NULL;
}