amd64_finish.c 8.09 KB
Newer Older
1
2
/*
 * This file is part of libFirm.
3
 * Copyright (C) 2014 University of Karlsruhe.
4
5
6
7
8
9
10
 */

/**
 * @file
 * @brief   This file implements functions to finalize the irg for emit.
 */
#include "amd64_finish.h"
Matthias Braun's avatar
Matthias Braun committed
11
12
#include "amd64_new_nodes.h"
#include "amd64_nodes_attr.h"
13
#include "amd64_transform.h"
14
#include "bearch.h"
15
#include "bearch_amd64_t.h"
16
17
18
#include "benode.h"
#include "besched.h"
#include "debug.h"
Matthias Braun's avatar
Matthias Braun committed
19
#include "panic.h"
20
#include "gen_amd64_new_nodes.h"
21
#include "gen_amd64_regalloc_if.h"
22
#include "irgwalk.h"
Matthias Braun's avatar
Matthias Braun committed
23
#include "util.h"
24
#include "irgmod.h"
25
26
27
28
29
30

DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)

/**
 * Returns the index of the first "same" register.
 */
Matthias Braun's avatar
Matthias Braun committed
31
static unsigned get_first_same(arch_register_req_t const *const req)
32
33
{
	unsigned const other = req->other_same;
Matthias Braun's avatar
Matthias Braun committed
34
	for (unsigned i = 0; i != 32; ++i) {
35
36
37
38
39
40
		if (other & (1U << i))
			return i;
	}
	panic("same position not found");
}

41
42
43
44
45
static bool is_commutative(const ir_node *node)
{
	return arch_get_irn_flags(node) & amd64_arch_irn_flag_commutative_binop;
}

Matthias Braun's avatar
Matthias Braun committed
46
47
48
static bool try_swap_inputs(ir_node *node)
{
	/* commutative operation, just switch the inputs */
49
50
	if (is_commutative(node)) {
		assert(get_amd64_attr_const(node)->op_mode == AMD64_OP_REG_REG);
Matthias Braun's avatar
Matthias Braun committed
51
52
53
54
55
56
57
58
59
60
		/* TODO: support Cmp input swapping */
		ir_node *in0 = get_irn_n(node, 0);
		ir_node *in1 = get_irn_n(node, 1);
		set_irn_n(node, 0, in1);
		set_irn_n(node, 1, in0);
		return true;
	}
	return false;
}

61
62
63
64
65
66
67
68
69
70
/**
  * Transforms a Sub to a Neg + Add, which subsequently allows swapping
  * of the inputs. The swapping is also (implicitly) done here.
  */
static void transform_sub_to_neg_add(ir_node *node,
                                     const arch_register_t *out_reg)
{
	ir_node  *block = get_nodes_block(node);
	dbg_info *dbgi  = get_irn_dbg_info(node);

71
72
73
	ir_graph *irg = get_irn_irg(node);
	ir_node  *in1 = get_irn_n(node, 0);
	ir_node  *in2 = get_irn_n(node, 1);
74

75
	const arch_register_t *in2_reg = arch_get_irn_register(in2);
76

77
78
79
	const amd64_binop_addr_attr_t *attr = get_amd64_binop_addr_attr(node);
	ir_node *add, *add_res;

80
	if (is_amd64_subs(node)) {
Matthias Braun's avatar
Matthias Braun committed
81
		ir_tarval *tv = create_sign_tv(amd64_mode_xmm);
82
83
84
85
		ir_entity *sign_bit_const = create_float_const_entity(irg, tv);

		amd64_binop_addr_attr_t xor_attr;
		memset(&xor_attr, 0, sizeof(xor_attr));
Matthias Braun's avatar
Matthias Braun committed
86
87
		xor_attr.base.insn_mode             = INSN_MODE_64;
		xor_attr.base.base.op_mode          = AMD64_OP_ADDR_REG;
88
89
90
91
92
		xor_attr.base.addr.base_input       = NO_INPUT;
		xor_attr.base.addr.index_input      = NO_INPUT;
		xor_attr.base.addr.immediate.entity = sign_bit_const;

		ir_node *xor_in[] = { in2 };
93
94
		ir_node *xor = new_bd_amd64_xorp(dbgi, block, ARRAY_SIZE(xor_in),
		                                 xor_in, &xor_attr);
95
		arch_set_irn_register_reqs_in(xor, amd64_xmm_reqs);
96
		ir_node *neg = new_r_Proj(xor, amd64_mode_xmm, pn_amd64_xorp_res);
97
98
99
100
101

		sched_add_before(node, xor);
		arch_set_irn_register(neg, in2_reg);

		ir_node *in[] = { neg, in1 };
102
103
		add     = new_bd_amd64_adds(dbgi, block, ARRAY_SIZE(in), in, attr);
		add_res = new_r_Proj(add, amd64_mode_xmm, pn_amd64_adds_res);
104
	} else {
105
106
107
		assert(is_amd64_sub(node));
		ir_node *neg = new_bd_amd64_neg(dbgi, block, in2, attr->base.insn_mode);
		arch_set_irn_register_out(neg, pn_amd64_neg_res, out_reg);
108
		sched_add_before(node, neg);
109
110
		ir_node *neg_res
			= new_r_Proj(neg, amd64_reg_classes[CLASS_amd64_gp].mode,
111
			             pn_amd64_neg_res);
112

113
		ir_node *in[] = { neg_res, in1 };
114
115
		add     = new_bd_amd64_add(dbgi, block, ARRAY_SIZE(in), in, attr);
		add_res = new_r_Proj(add, mode_Lu, pn_amd64_add_res);
116
	}
117
	arch_set_irn_register_reqs_in(add, arch_get_irn_register_reqs_in(node));
118
119
120
121
122
123
124
125
126
	arch_set_irn_register(add_res, out_reg);

	/* exchange the add and the sub */
	edges_reroute(node, add);
	sched_replace(node, add);

	kill_node(node);
}

Matthias Braun's avatar
Matthias Braun committed
127
128
static ir_node *amd64_turn_back_am(ir_node *node)
{
Matthias Braun's avatar
Matthias Braun committed
129
130
131
	dbg_info          *dbgi  = get_irn_dbg_info(node);
	ir_node           *block = get_nodes_block(node);
	amd64_addr_attr_t *attr  = get_amd64_addr_attr(node);
Matthias Braun's avatar
Matthias Braun committed
132

Matthias Braun's avatar
Matthias Braun committed
133
	amd64_addr_t new_addr = attr->addr;
Matthias Braun's avatar
Matthias Braun committed
134
135
	ir_node *load_in[3];
	int      load_arity = 0;
Matthias Braun's avatar
Matthias Braun committed
136
137
138
139
	if (attr->addr.base_input != NO_INPUT
	 && attr->addr.base_input != RIP_INPUT) {
		new_addr.base_input = load_arity;
		load_in[load_arity++] = get_irn_n(node, attr->addr.base_input);
Matthias Braun's avatar
Matthias Braun committed
140
	}
Matthias Braun's avatar
Matthias Braun committed
141
142
143
	if (attr->addr.index_input != NO_INPUT) {
		new_addr.index_input = load_arity;
		load_in[load_arity++] = get_irn_n(node, attr->addr.index_input);
Matthias Braun's avatar
Matthias Braun committed
144
	}
Matthias Braun's avatar
Matthias Braun committed
145
146
147
	assert(attr->addr.mem_input != NO_INPUT);
	new_addr.mem_input = load_arity;
	load_in[load_arity++] = get_irn_n(node, attr->addr.mem_input);
Matthias Braun's avatar
Matthias Braun committed
148

149
150
151
152
	ir_node *load = new_bd_amd64_mov_gp(dbgi, block, load_arity, load_in,
	                                    attr->insn_mode, AMD64_OP_ADDR,
	                                    new_addr);
	ir_node *load_res = new_r_Proj(load, mode_Lu, pn_amd64_mov_gp_res);
Matthias Braun's avatar
Matthias Braun committed
153
154

	/* change operation */
Matthias Braun's avatar
Matthias Braun committed
155
156
	const amd64_binop_addr_attr_t *binop_attr
		= (const amd64_binop_addr_attr_t*)attr;
Matthias Braun's avatar
Matthias Braun committed
157
	ir_node *new_in[2];
Matthias Braun's avatar
Matthias Braun committed
158
	new_in[0] = get_irn_n(node, binop_attr->u.reg_input);
Matthias Braun's avatar
Matthias Braun committed
159
160
	new_in[1] = load_res;
	set_irn_in(node, ARRAY_SIZE(new_in), new_in);
Matthias Braun's avatar
Matthias Braun committed
161
162
163
	attr->base.op_mode     = AMD64_OP_REG_REG;
	attr->addr.base_input  = NO_INPUT;
	attr->addr.index_input = NO_INPUT;
Matthias Braun's avatar
Matthias Braun committed
164
165
166
167
168
169

	/* rewire mem-proj */
	foreach_out_edge(node, edge) {
		ir_node *out = get_edge_src_irn(edge);
		if (get_irn_mode(out) == mode_M) {
			set_Proj_pred(out, load);
170
			set_Proj_num(out, pn_amd64_mov_gp_M);
Matthias Braun's avatar
Matthias Braun committed
171
172
173
174
175
176
177
178
179
			break;
		}
	}

	if (sched_is_scheduled(node))
		sched_add_before(node, load);
	return load_res;
}

180
181
182
183
184
185
186
/**
 * Insert copies for all amd64 nodes where the should_be_same requirement is
 * not fulfilled.
 */
static void assure_should_be_same_requirements(ir_node *const node)
{
	/* Check all OUT requirements, if there is a should_be_same. */
Christoph Mallon's avatar
Christoph Mallon committed
187
	be_foreach_out(node, i) {
Matthias Braun's avatar
Matthias Braun committed
188
189
190
191
192
193
194
195
196
197
198
		arch_register_req_t const *const req
			= arch_get_irn_register_req_out(node, i);
		if (!arch_register_req_is(req, should_be_same))
			continue;
		unsigned               const same_pos = get_first_same(req);
		ir_node               *const in_node  = get_irn_n(node, same_pos);
		arch_register_t const *const in_reg   = arch_get_irn_register(in_node);
		arch_register_t const *const out_reg
			= arch_get_irn_register_out(node, i);
		if (in_reg == out_reg)
			continue;
199

Matthias Braun's avatar
Matthias Braun committed
200
		/* test if any other input is using the out register */
201
202
		foreach_irn_in(node, i2, in) {
			arch_register_t const *const reg = arch_get_irn_register(in);
Matthias Braun's avatar
Matthias Braun committed
203
204
			if (reg == out_reg && (unsigned)i2 != same_pos) {
				if (!is_amd64_irn(node))
205
					panic("cannot fulfill should_be_same on non-amd64 node");
Matthias Braun's avatar
Matthias Braun committed
206
207
				/* see what role this register has */
				const amd64_attr_t *attr = get_amd64_attr_const(node);
Matthias Braun's avatar
Matthias Braun committed
208
209
210
				if (attr->op_mode == AMD64_OP_ADDR
				 || attr->op_mode == AMD64_OP_REG
				 || attr->op_mode == AMD64_OP_REG_IMM) {
Matthias Braun's avatar
Matthias Braun committed
211
					panic("unexpected op_mode");
Matthias Braun's avatar
Matthias Braun committed
212
				} else if (attr->op_mode == AMD64_OP_REG_REG) {
Matthias Braun's avatar
Matthias Braun committed
213
214
215
216
swap:;
					bool res = try_swap_inputs(node);
					if (res)
						return;
217

218
					if (is_amd64_sub(node) || is_amd64_subs(node)) {
219
220
221
						transform_sub_to_neg_add(node, out_reg);
						return;
					}
Matthias Braun's avatar
Matthias Braun committed
222
223
					panic("couldn't swap inputs of %+F", node);
				} else {
Matthias Braun's avatar
Matthias Braun committed
224
					assert(attr->op_mode == AMD64_OP_ADDR_REG);
Matthias Braun's avatar
Matthias Braun committed
225
226
227
228
229
230
231
232
					/* extract load into an own instruction */
					ir_node *res = amd64_turn_back_am(node);
					arch_set_irn_register(res, out_reg);
					goto swap;
				}
			}
		}

Matthias Braun's avatar
Matthias Braun committed
233
234
		ir_node *const block = get_nodes_block(node);
		ir_node *const copy  = be_new_Copy(block, in_node);
235

Matthias Braun's avatar
Matthias Braun committed
236
237
238
239
240
241
242
243
		/* Destination is the out register. */
		arch_set_irn_register(copy, out_reg);
		/* Insert copy before the node into the schedule. */
		sched_add_before(node, copy);
		/* Set copy as in. */
		set_irn_n(node, same_pos, copy);

		DBG((dbg, LEVEL_1, "created copy %+F for should be same argument at input %d of %+F\n", copy, same_pos, node));
244
245
246
247
248
249
250
251
252
253
254
	}
}

/**
 * Block walker: finishes a block.
 */
static void amd64_finish_irg_walker(ir_node *const block, void *const env)
{
	(void) env;

	/* Insert should_be_same copies. */
255
256
	sched_foreach_safe(block, irn) {
		if (is_amd64_irn(irn))
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
			assure_should_be_same_requirements(irn);
	}
}

/**
 * Add Copy nodes for not fulfilled should_be_same constraints.
 */
void amd64_finish_irg(ir_graph *const irg)
{
	irg_block_walk_graph(irg, 0, amd64_finish_irg_walker, 0);
}

void amd64_init_finish(void)
{
	FIRM_DBG_REGISTER(dbg, "firm.be.amd64.finish");
}