amd64_transform.c 13.9 KB
Newer Older
1
2
/*
 * This file is part of libFirm.
3
 * Copyright (C) 2012 University of Karlsruhe.
4
5
6
7
8
9
10
11
12
13
14
15
16
 */

/**
 * @file
 * @brief   code selection (transform FIRM into amd64 FIRM)
 */
#include "irnode_t.h"
#include "irgraph_t.h"
#include "irmode_t.h"
#include "irgmod.h"
#include "iredges.h"
#include "ircons.h"
#include "iropt_t.h"
17
#include "error.h"
18
#include "debug.h"
19
#include "tv_t.h"
20

21
22
23
#include "benode.h"
#include "betranshlp.h"
#include "beutil.h"
24
25
26
27
28
29
30
31
32
33
34
35
#include "bearch_amd64_t.h"

#include "amd64_nodes_attr.h"
#include "amd64_transform.h"
#include "amd64_new_nodes.h"

#include "gen_amd64_regalloc_if.h"

DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)

/* Some support functions: */

36
37
38
39
40
static inline int mode_needs_gp_reg(ir_mode *mode)
{
	return mode_is_int(mode) || mode_is_reference(mode);
}

41
42
43
44
45
/* Op transformers: */

/**
 * Transforms a Const node.
 *
46
 * @return The transformed AMD64 node.
47
 */
48
49
static ir_node *gen_Const(ir_node *node)
{
50
	ir_node  *block = be_transform_node(get_nodes_block(node));
51
	dbg_info *dbgi  = get_irn_dbg_info(node);
52
	ir_mode  *mode  = get_irn_mode(node);
53
54
55
56
57
58
59
	if (!mode_needs_gp_reg(mode))
		panic("amd64: float constant not supported yet");
	ir_tarval *tv = get_Const_tarval(node);
	assert(tarval_is_uint64(tv));
	uint64_t val = get_tarval_uint64(tv);
	amd64_insn_mode_t imode = val > UINT32_MAX ? INSN_MODE_64 : INSN_MODE_32;
	return new_bd_amd64_Const(dbgi, block, imode, val, false, NULL);
60
61
}

62
63
64
65
66
67
68
69
70
/**
 * Transforms a SymConst node.
 *
 * @return The transformed ARM node.
 */
static ir_node *gen_SymConst(ir_node *node)
{
	ir_node   *block  = be_transform_node(get_nodes_block(node));
	dbg_info  *dbgi   = get_irn_dbg_info(node);
71
	ir_entity *entity = get_SymConst_entity(node);
72

73
	return new_bd_amd64_Const(dbgi, block, INSN_MODE_32, 0, false, entity);
74
75
}

76
77
78
79
80
81
82
83
84
85
static ir_node *gen_binop(ir_node *const node, ir_node *(*const new_node)(dbg_info*, ir_node*, ir_node*, ir_node*))
{
	dbg_info *const dbgi    = get_irn_dbg_info(node);
	ir_node  *const block   = be_transform_node(get_nodes_block(node));
	ir_node  *const op1     = get_binop_left(node);
	ir_node  *const new_op1 = be_transform_node(op1);
	ir_node  *const op2     = get_binop_right(node);
	ir_node  *const new_op2 = be_transform_node(op2);

	return new_node(dbgi, block, new_op1, new_op2);
86
87
}

88
static ir_node *gen_Add (ir_node *const node) { return gen_binop(node, &new_bd_amd64_Add);  }
89
90
91
static ir_node *gen_And (ir_node *const node) { return gen_binop(node, &new_bd_amd64_And);  }
static ir_node *gen_Eor (ir_node *const node) { return gen_binop(node, &new_bd_amd64_Xor);  }
static ir_node *gen_Or  (ir_node *const node) { return gen_binop(node, &new_bd_amd64_Or);   }
92
static ir_node *gen_Mul (ir_node *const node) { return gen_binop(node, &new_bd_amd64_IMul); }
93
94
95
static ir_node *gen_Shl (ir_node *const node) { return gen_binop(node, &new_bd_amd64_Shl);  }
static ir_node *gen_Shr (ir_node *const node) { return gen_binop(node, &new_bd_amd64_Shr);  }
static ir_node *gen_Shrs(ir_node *const node) { return gen_binop(node, &new_bd_amd64_Sar);  }
96
static ir_node *gen_Sub (ir_node *const node) { return gen_binop(node, &new_bd_amd64_Sub);  }
Robin Redeker's avatar
Robin Redeker committed
97

98
static ir_node *gen_unop(ir_node *const node, int op_pos, ir_node *(*const new_node)(dbg_info*, ir_node*, ir_node*))
99
{
100
101
	dbg_info *const dbgi   = get_irn_dbg_info(node);
	ir_node  *const block  = be_transform_node(get_nodes_block(node));
102
	ir_node  *const op     = get_irn_n(node, op_pos);
103
	ir_node  *const new_op = be_transform_node(op);
104

105
	return new_node(dbgi, block, new_op);
106
107
}

108
109
110
111
112
113
114
115
static ir_node *gen_Minus(ir_node *const node)
{
	return gen_unop(node, n_Minus_op, &new_bd_amd64_Neg);
}
static ir_node *gen_Not  (ir_node *const node)
{
	return gen_unop(node, n_Not_op, &new_bd_amd64_Not);
}
116

117
118
119
120
121
122
123
124
125
static ir_node *gen_Jmp(ir_node *node)
{
	ir_node  *block     = get_nodes_block(node);
	ir_node  *new_block = be_transform_node(block);
	dbg_info *dbgi      = get_irn_dbg_info(node);

	return new_bd_amd64_Jmp(dbgi, new_block);
}

126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
static ir_node *gen_Switch(ir_node *node)
{
	ir_graph *irg       = get_irn_irg(node);
	ir_node  *new_block = be_transform_node(get_nodes_block(node));
	ir_node  *sel       = get_Switch_selector(node);
	dbg_info *dbgi      = get_irn_dbg_info(node);
	ir_node  *new_sel   = be_transform_node(sel);
	const ir_switch_table *table  = get_Switch_table(node);
	unsigned               n_outs = get_Switch_n_outs(node);

	ir_entity *entity;

	entity = new_entity(NULL, id_unique("TBL%u"), get_unknown_type());
	set_entity_visibility(entity, ir_visibility_private);
	add_entity_linkage(entity, IR_LINKAGE_CONSTANT);

	table = ir_switch_table_duplicate(irg, table);

	ir_node *out = new_bd_amd64_SwitchJmp(dbgi, new_block, new_sel, n_outs, table, entity);
	return out;
}

148
149
150
static ir_node *gen_be_Call(ir_node *node)
{
	ir_node *res = be_duplicate_node(node);
151
	arch_add_irn_flags(res, arch_irn_flags_modify_flags);
152
153
154
155

	return res;
}

156
157
158
159
160
161
162
163
164
165
166
167
static ir_node *gen_Cmp(ir_node *node)
{
	ir_node  *block    = be_transform_node(get_nodes_block(node));
	ir_node  *op1      = get_Cmp_left(node);
	ir_node  *op2      = get_Cmp_right(node);
	ir_mode  *cmp_mode = get_irn_mode(op1);
	dbg_info *dbgi     = get_irn_dbg_info(node);
	ir_node  *new_op1;
	ir_node  *new_op2;
	bool      is_unsigned;

	if (mode_is_float(cmp_mode)) {
168
		panic("Floating point not implemented yet!");
169
170
171
172
173
174
	}

	assert(get_irn_mode(op2) == cmp_mode);
	is_unsigned = !mode_is_signed(cmp_mode);

	new_op1 = be_transform_node(op1);
175
	/* new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode); */
176
	new_op2 = be_transform_node(op2);
177
	/* new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode); */
178
179
180
181
182
183
184
185
186
187
188
	return new_bd_amd64_Cmp(dbgi, block, new_op1, new_op2, false,
	                        is_unsigned);
}

/**
 * Transforms a Cond.
 *
 * @return the created ARM Cond node
 */
static ir_node *gen_Cond(ir_node *node)
{
189
190
	ir_node    *const block     = be_transform_node(get_nodes_block(node));
	dbg_info   *const dbgi      = get_irn_dbg_info(node);
191
	ir_node    *const selector  = get_Cond_selector(node);
192
193
	ir_node    *const flag_node = be_transform_node(selector);
	ir_relation const relation  = get_Cmp_relation(selector);
194
	return new_bd_amd64_Jcc(dbgi, block, flag_node, relation);
195
}
196
197
198

static ir_node *gen_Phi(ir_node *node)
{
199
	ir_mode                   *mode = get_irn_mode(node);
200
201
	const arch_register_req_t *req;
	if (mode_needs_gp_reg(mode)) {
202
		/* all integer operations are on 64bit registers now */
203
204
205
206
207
		req  = amd64_reg_classes[CLASS_amd64_gp].class_req;
	} else {
		req = arch_no_register_req;
	}

208
	return be_transform_phi(node, req);
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
}

/**
 * Transforms a Conv node.
 *
 * @return The created ia32 Conv node
 */
static ir_node *gen_Conv(ir_node *node)
{
	ir_node  *block    = be_transform_node(get_nodes_block(node));
	ir_node  *op       = get_Conv_op(node);
	ir_node  *new_op   = be_transform_node(op);
	ir_mode  *src_mode = get_irn_mode(op);
	ir_mode  *dst_mode = get_irn_mode(node);
	dbg_info *dbgi     = get_irn_dbg_info(node);

	if (src_mode == dst_mode)
		return new_op;

	if (mode_is_float(src_mode) || mode_is_float(dst_mode)) {
		panic("float not supported yet");
	} else { /* complete in gp registers */
		int src_bits = get_mode_size_bits(src_mode);
		int dst_bits = get_mode_size_bits(dst_mode);
		ir_mode *min_mode;

		if (src_bits == dst_bits) {
Michael Beck's avatar
Michael Beck committed
236
			/* kill unnecessary conv */
237
238
239
240
241
242
243
244
			return new_op;
		}

		if (src_bits < dst_bits) {
			min_mode = src_mode;
		} else {
			min_mode = dst_mode;
		}
245

246
247
248
249
250
		ir_node *res = new_bd_amd64_Conv(dbgi, block, new_op, min_mode);
		if (!mode_is_signed(min_mode) && get_mode_size_bits(min_mode) == 32) {
			amd64_attr_t *const attr = get_amd64_attr(res);
			attr->data.insn_mode = INSN_MODE_32;
		}
251

252
		return res;
253
254
	}
}
255

256
257
258
259
260
261
262
263
264
265
266
static amd64_insn_mode_t get_insn_mode_from_mode(const ir_mode *mode)
{
	switch (get_mode_size_bits(mode)) {
	case  8: return INSN_MODE_8;
	case 16: return INSN_MODE_16;
	case 32: return INSN_MODE_32;
	case 64: return INSN_MODE_64;
	}
	panic("unexpected mode");
}

267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
/**
 * Transforms a Store.
 *
 * @return the created AMD64 Store node
 */
static ir_node *gen_Store(ir_node *node)
{
	ir_node  *block    = be_transform_node(get_nodes_block(node));
	ir_node  *ptr      = get_Store_ptr(node);
	ir_node  *new_ptr  = be_transform_node(ptr);
	ir_node  *mem      = get_Store_mem(node);
	ir_node  *new_mem  = be_transform_node(mem);
	ir_node  *val      = get_Store_value(node);
	ir_node  *new_val  = be_transform_node(val);
	ir_mode  *mode     = get_irn_mode(val);
	dbg_info *dbgi     = get_irn_dbg_info(node);
283
	ir_node *new_store;
284
285
286
287

	if (mode_is_float(mode)) {
		panic("Float not supported yet");
	} else {
288
289
290
		assert(mode_needs_gp_reg(mode) && "unsupported mode for Store");
		amd64_insn_mode_t insn_mode = get_insn_mode_from_mode(mode);
		new_store = new_bd_amd64_Store(dbgi, block, new_ptr, new_val, new_mem, insn_mode, NULL);
291
292
293
294
295
	}
	set_irn_pinned(new_store, get_irn_pinned(node));
	return new_store;
}

296
297
298
299
300
301
302
303
304
305
306
307
308
309
/**
 * Transforms a Load.
 *
 * @return the created AMD64 Load node
 */
static ir_node *gen_Load(ir_node *node)
{
	ir_node  *block    = be_transform_node(get_nodes_block(node));
	ir_node  *ptr      = get_Load_ptr(node);
	ir_node  *new_ptr  = be_transform_node(ptr);
	ir_node  *mem      = get_Load_mem(node);
	ir_node  *new_mem  = be_transform_node(mem);
	ir_mode  *mode     = get_Load_mode(node);
	dbg_info *dbgi     = get_irn_dbg_info(node);
310
	ir_node  *new_load;
311
312
313
314

	if (mode_is_float(mode)) {
		panic("Float not supported yet");
	} else {
315
316
317
318
319
320
321
		assert(mode_needs_gp_reg(mode) && "unsupported mode for Load");
		amd64_insn_mode_t insn_mode = get_insn_mode_from_mode(mode);
		if (get_mode_size_bits(mode) < 64 && mode_is_signed(mode)) {
			new_load = new_bd_amd64_LoadS(dbgi, block, new_ptr, new_mem, insn_mode, NULL);
		} else {
			new_load = new_bd_amd64_LoadZ(dbgi, block, new_ptr, new_mem, insn_mode, NULL);
		}
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
	}
	set_irn_pinned(new_load, get_irn_pinned(node));

	return new_load;
}

/**
 * Transform a Proj from a Load.
 */
static ir_node *gen_Proj_Load(ir_node *node)
{
	ir_node  *load     = get_Proj_pred(node);
	ir_node  *new_load = be_transform_node(load);
	dbg_info *dbgi     = get_irn_dbg_info(node);
	long     proj      = get_Proj_proj(node);

	/* renumber the proj */
	switch (get_amd64_irn_opcode(new_load)) {
340
341
342
343
344
345
346
347
348
		case iro_amd64_LoadS:
			/* handle all gp loads equal: they have the same proj numbers. */
			if (proj == pn_Load_res) {
				return new_rd_Proj(dbgi, new_load, mode_Lu, pn_amd64_LoadS_res);
			} else if (proj == pn_Load_M) {
				return new_rd_Proj(dbgi, new_load, mode_M, pn_amd64_LoadS_M);
			}
		break;
		case iro_amd64_LoadZ:
349
350
			/* handle all gp loads equal: they have the same proj numbers. */
			if (proj == pn_Load_res) {
351
				return new_rd_Proj(dbgi, new_load, mode_Lu, pn_amd64_LoadZ_res);
352
			} else if (proj == pn_Load_M) {
353
				return new_rd_Proj(dbgi, new_load, mode_M, pn_amd64_LoadZ_M);
354
355
356
357
358
359
360
361
362
			}
		break;
		default:
			panic("Unsupported Proj from Load");
	}

    return be_duplicate_node(node);
}

363
static ir_node *gen_Proj_Store(ir_node *node)
364
{
365
366
367
368
369
370
	ir_node *pred = get_Proj_pred(node);
	long     pn   = get_Proj_proj(node);
	if (pn == pn_Store_M) {
		return be_transform_node(pred);
	} else {
		panic("Unsupported Proj from Store");
371
	}
372
}
373

374
375
376
377
378
379
380
381
382
383
384
385
static ir_node *gen_Proj_be_Call(ir_node *node)
{
	ir_mode *mode = get_irn_mode(node);
	if (mode_needs_gp_reg(mode)) {
		ir_node *pred     = get_Proj_pred(node);
		ir_node *new_pred = be_transform_node(pred);
		long     pn       = get_Proj_proj(node);
		ir_node *new_proj = new_r_Proj(new_pred, mode_Lu, pn);
		return new_proj;
	} else {
		return be_duplicate_node(node);
	}
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
}

/**
 * Transforms a FrameAddr into an AMD64 Add.
 */
static ir_node *gen_be_FrameAddr(ir_node *node)
{
	ir_node   *block  = be_transform_node(get_nodes_block(node));
	ir_entity *ent    = be_get_frame_entity(node);
	ir_node   *fp     = be_get_FrameAddr_frame(node);
	ir_node   *new_fp = be_transform_node(fp);
	dbg_info  *dbgi   = get_irn_dbg_info(node);
	ir_node   *new_node;

	new_node = new_bd_amd64_FrameAddr(dbgi, block, new_fp, ent);
	return new_node;
}

404
405
406
407
408
409
410
411
412
413
414
415
416
417
static ir_node *gen_be_Start(ir_node *node)
{
	ir_node *new_node = be_duplicate_node(node);
	be_start_set_setup_stackframe(new_node, true);
	return new_node;
}

static ir_node *gen_be_Return(ir_node *node)
{
	ir_node *new_node = be_duplicate_node(node);
	be_return_set_destroy_stackframe(new_node, true);
	return new_node;
}

418
419
420
421
/* Boilerplate code for transformation: */

static void amd64_register_transformers(void)
{
422
423
424
425
426
	be_start_transform_setup();

	be_set_transform_function(op_Const,        gen_Const);
	be_set_transform_function(op_SymConst,     gen_SymConst);
	be_set_transform_function(op_Add,          gen_Add);
427
428
	be_set_transform_function(op_And,          gen_And);
	be_set_transform_function(op_Eor,          gen_Eor);
429
430
	be_set_transform_function(op_Sub,          gen_Sub);
	be_set_transform_function(op_Mul,          gen_Mul);
Christoph Mallon's avatar
Christoph Mallon committed
431
	be_set_transform_function(op_Not,          gen_Not);
432
433
434
435
	be_set_transform_function(op_Or,           gen_Or);
	be_set_transform_function(op_Shl,          gen_Shl);
	be_set_transform_function(op_Shr,          gen_Shr);
	be_set_transform_function(op_Shrs,         gen_Shrs);
436
437
	be_set_transform_function(op_be_Call,      gen_be_Call);
	be_set_transform_function(op_be_FrameAddr, gen_be_FrameAddr);
438
439
	be_set_transform_function(op_be_Return,    gen_be_Return);
	be_set_transform_function(op_be_Start,     gen_be_Start);
440
441
	be_set_transform_function(op_Conv,         gen_Conv);
	be_set_transform_function(op_Jmp,          gen_Jmp);
442
	be_set_transform_function(op_Switch,       gen_Switch);
443
444
445
446
447
448
	be_set_transform_function(op_Cmp,          gen_Cmp);
	be_set_transform_function(op_Cond,         gen_Cond);
	be_set_transform_function(op_Phi,          gen_Phi);
	be_set_transform_function(op_Load,         gen_Load);
	be_set_transform_function(op_Store,        gen_Store);
	be_set_transform_function(op_Minus,        gen_Minus);
449
450
451
452
453

	be_set_transform_proj_function(op_be_Call,  gen_Proj_be_Call);
	be_set_transform_proj_function(op_be_Start, be_duplicate_node);
	be_set_transform_proj_function(op_Load,     gen_Proj_Load);
	be_set_transform_proj_function(op_Store,    gen_Proj_Store);
454
455
}

456
void amd64_transform_graph(ir_graph *irg)
457
458
{
	amd64_register_transformers();
459
	be_transform_graph(irg, NULL);
460
461
462
463
464
465
}

void amd64_init_transform(void)
{
	FIRM_DBG_REGISTER(dbg, "firm.be.amd64.transform");
}