amd64_transform.c 14.2 KB
Newer Older
1
2
/*
 * This file is part of libFirm.
3
 * Copyright (C) 2012 University of Karlsruhe.
4
5
6
7
8
9
10
11
12
13
14
15
16
 */

/**
 * @file
 * @brief   code selection (transform FIRM into amd64 FIRM)
 */
#include "irnode_t.h"
#include "irgraph_t.h"
#include "irmode_t.h"
#include "irgmod.h"
#include "iredges.h"
#include "ircons.h"
#include "iropt_t.h"
17
#include "error.h"
18
19
#include "debug.h"

20
21
22
#include "benode.h"
#include "betranshlp.h"
#include "beutil.h"
23
24
25
26
27
28
29
30
31
32
33
34
#include "bearch_amd64_t.h"

#include "amd64_nodes_attr.h"
#include "amd64_transform.h"
#include "amd64_new_nodes.h"

#include "gen_amd64_regalloc_if.h"

DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)

/* Some support functions: */

35
36
37
38
39
static inline int mode_needs_gp_reg(ir_mode *mode)
{
	return mode_is_int(mode) || mode_is_reference(mode);
}

40
41
42
43
44
45
46
/**
 * Create a DAG constructing a given Const.
 *
 * @param irn  a Firm const
 */
static ir_node *create_const_graph(ir_node *irn, ir_node *block)
{
Matthias Braun's avatar
Matthias Braun committed
47
48
49
50
	ir_tarval *tv   = get_Const_tarval(irn);
	ir_mode   *mode = get_tarval_mode(tv);
	dbg_info  *dbgi = get_irn_dbg_info(irn);
	unsigned   value;
51
52
53

	if (mode_is_reference(mode)) {
		/* AMD64 is 64bit, so we can safely convert a reference tarval into Iu */
54
55
		assert(get_mode_size_bits(mode) == get_mode_size_bits(mode_Lu));
		tv = tarval_convert_to(tv, mode_Lu);
56
57
58
59
	}

	value = get_tarval_long(tv);

60
	return new_bd_amd64_Const(dbgi, block, value);
61
62
63
64
65
66
67
}

/* Op transformers: */

/**
 * Transforms a Const node.
 *
68
 * @return The transformed AMD64 node.
69
70
71
72
 */
static ir_node *gen_Const(ir_node *node) {
	ir_node  *block = be_transform_node(get_nodes_block(node));
	ir_mode  *mode  = get_irn_mode(node);
Michael Beck's avatar
Michael Beck committed
73
	ir_node *res = create_const_graph(node, block);
74
75
76
77
78
	(void) mode;

	return res;
}

79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
/**
 * Transforms a SymConst node.
 *
 * @return The transformed ARM node.
 */
static ir_node *gen_SymConst(ir_node *node)
{
	ir_node   *block  = be_transform_node(get_nodes_block(node));
	ir_entity *entity = get_SymConst_entity(node);
	dbg_info  *dbgi   = get_irn_dbg_info(node);
	ir_node   *new_node;

	new_node = new_bd_amd64_SymConst(dbgi, block, entity);
	return new_node;
}

95
96
97
98
99
100
101
102
103
104
static ir_node *gen_binop(ir_node *const node, ir_node *(*const new_node)(dbg_info*, ir_node*, ir_node*, ir_node*))
{
	dbg_info *const dbgi    = get_irn_dbg_info(node);
	ir_node  *const block   = be_transform_node(get_nodes_block(node));
	ir_node  *const op1     = get_binop_left(node);
	ir_node  *const new_op1 = be_transform_node(op1);
	ir_node  *const op2     = get_binop_right(node);
	ir_node  *const new_op2 = be_transform_node(op2);

	return new_node(dbgi, block, new_op1, new_op2);
105
106
}

107
static ir_node *gen_Add (ir_node *const node) { return gen_binop(node, &new_bd_amd64_Add);  }
108
109
110
static ir_node *gen_And (ir_node *const node) { return gen_binop(node, &new_bd_amd64_And);  }
static ir_node *gen_Eor (ir_node *const node) { return gen_binop(node, &new_bd_amd64_Xor);  }
static ir_node *gen_Or  (ir_node *const node) { return gen_binop(node, &new_bd_amd64_Or);   }
111
static ir_node *gen_Mul (ir_node *const node) { return gen_binop(node, &new_bd_amd64_IMul); }
112
113
114
static ir_node *gen_Shl (ir_node *const node) { return gen_binop(node, &new_bd_amd64_Shl);  }
static ir_node *gen_Shr (ir_node *const node) { return gen_binop(node, &new_bd_amd64_Shr);  }
static ir_node *gen_Shrs(ir_node *const node) { return gen_binop(node, &new_bd_amd64_Sar);  }
115
static ir_node *gen_Sub (ir_node *const node) { return gen_binop(node, &new_bd_amd64_Sub);  }
Robin Redeker's avatar
Robin Redeker committed
116

117
static ir_node *gen_unop(ir_node *const node, int op_pos, ir_node *(*const new_node)(dbg_info*, ir_node*, ir_node*))
118
{
119
120
	dbg_info *const dbgi   = get_irn_dbg_info(node);
	ir_node  *const block  = be_transform_node(get_nodes_block(node));
121
	ir_node  *const op     = get_irn_n(node, op_pos);
122
	ir_node  *const new_op = be_transform_node(op);
123

124
	return new_node(dbgi, block, new_op);
125
126
}

127
128
129
130
131
132
133
134
static ir_node *gen_Minus(ir_node *const node)
{
	return gen_unop(node, n_Minus_op, &new_bd_amd64_Neg);
}
static ir_node *gen_Not  (ir_node *const node)
{
	return gen_unop(node, n_Not_op, &new_bd_amd64_Not);
}
135

136
137
138
139
140
141
142
143
144
static ir_node *gen_Jmp(ir_node *node)
{
	ir_node  *block     = get_nodes_block(node);
	ir_node  *new_block = be_transform_node(block);
	dbg_info *dbgi      = get_irn_dbg_info(node);

	return new_bd_amd64_Jmp(dbgi, new_block);
}

145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
static ir_node *gen_Switch(ir_node *node)
{
	ir_graph *irg       = get_irn_irg(node);
	ir_node  *new_block = be_transform_node(get_nodes_block(node));
	ir_node  *sel       = get_Switch_selector(node);
	dbg_info *dbgi      = get_irn_dbg_info(node);
	ir_node  *new_sel   = be_transform_node(sel);
	const ir_switch_table *table  = get_Switch_table(node);
	unsigned               n_outs = get_Switch_n_outs(node);

	ir_entity *entity;

	entity = new_entity(NULL, id_unique("TBL%u"), get_unknown_type());
	set_entity_visibility(entity, ir_visibility_private);
	add_entity_linkage(entity, IR_LINKAGE_CONSTANT);

	table = ir_switch_table_duplicate(irg, table);

	ir_node *out = new_bd_amd64_SwitchJmp(dbgi, new_block, new_sel, n_outs, table, entity);
	return out;
}

167
168
169
static ir_node *gen_be_Call(ir_node *node)
{
	ir_node *res = be_duplicate_node(node);
170
	arch_add_irn_flags(res, arch_irn_flags_modify_flags);
171
172
173
174

	return res;
}

175
176
177
178
179
180
181
182
183
184
185
186
static ir_node *gen_Cmp(ir_node *node)
{
	ir_node  *block    = be_transform_node(get_nodes_block(node));
	ir_node  *op1      = get_Cmp_left(node);
	ir_node  *op2      = get_Cmp_right(node);
	ir_mode  *cmp_mode = get_irn_mode(op1);
	dbg_info *dbgi     = get_irn_dbg_info(node);
	ir_node  *new_op1;
	ir_node  *new_op2;
	bool      is_unsigned;

	if (mode_is_float(cmp_mode)) {
187
		panic("Floating point not implemented yet!");
188
189
190
191
192
193
	}

	assert(get_irn_mode(op2) == cmp_mode);
	is_unsigned = !mode_is_signed(cmp_mode);

	new_op1 = be_transform_node(op1);
194
	/* new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode); */
195
	new_op2 = be_transform_node(op2);
196
	/* new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode); */
197
198
199
200
201
202
203
204
205
206
207
	return new_bd_amd64_Cmp(dbgi, block, new_op1, new_op2, false,
	                        is_unsigned);
}

/**
 * Transforms a Cond.
 *
 * @return the created ARM Cond node
 */
static ir_node *gen_Cond(ir_node *node)
{
208
209
	ir_node    *const block     = be_transform_node(get_nodes_block(node));
	dbg_info   *const dbgi      = get_irn_dbg_info(node);
210
	ir_node    *const selector  = get_Cond_selector(node);
211
212
	ir_node    *const flag_node = be_transform_node(selector);
	ir_relation const relation  = get_Cmp_relation(selector);
213
	return new_bd_amd64_Jcc(dbgi, block, flag_node, relation);
214
}
215
216
217

static ir_node *gen_Phi(ir_node *node)
{
218
	ir_mode                   *mode = get_irn_mode(node);
219
220
	const arch_register_req_t *req;
	if (mode_needs_gp_reg(mode)) {
221
		/* all integer operations are on 64bit registers now */
222
223
224
225
226
		req  = amd64_reg_classes[CLASS_amd64_gp].class_req;
	} else {
		req = arch_no_register_req;
	}

227
	return be_transform_phi(node, req);
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
}

/**
 * Transforms a Conv node.
 *
 * @return The created ia32 Conv node
 */
static ir_node *gen_Conv(ir_node *node)
{
	ir_node  *block    = be_transform_node(get_nodes_block(node));
	ir_node  *op       = get_Conv_op(node);
	ir_node  *new_op   = be_transform_node(op);
	ir_mode  *src_mode = get_irn_mode(op);
	ir_mode  *dst_mode = get_irn_mode(node);
	dbg_info *dbgi     = get_irn_dbg_info(node);

	if (src_mode == dst_mode)
		return new_op;

	if (mode_is_float(src_mode) || mode_is_float(dst_mode)) {
		panic("float not supported yet");
	} else { /* complete in gp registers */
		int src_bits = get_mode_size_bits(src_mode);
		int dst_bits = get_mode_size_bits(dst_mode);
		ir_mode *min_mode;

		if (src_bits == dst_bits) {
Michael Beck's avatar
Michael Beck committed
255
			/* kill unnecessary conv */
256
257
258
259
260
261
262
263
			return new_op;
		}

		if (src_bits < dst_bits) {
			min_mode = src_mode;
		} else {
			min_mode = dst_mode;
		}
264

265
266
267
268
269
		ir_node *res = new_bd_amd64_Conv(dbgi, block, new_op, min_mode);
		if (!mode_is_signed(min_mode) && get_mode_size_bits(min_mode) == 32) {
			amd64_attr_t *const attr = get_amd64_attr(res);
			attr->data.insn_mode = INSN_MODE_32;
		}
270

271
		return res;
272
273
	}
}
274

275
276
277
278
279
280
281
282
283
284
285
static amd64_insn_mode_t get_insn_mode_from_mode(const ir_mode *mode)
{
	switch (get_mode_size_bits(mode)) {
	case  8: return INSN_MODE_8;
	case 16: return INSN_MODE_16;
	case 32: return INSN_MODE_32;
	case 64: return INSN_MODE_64;
	}
	panic("unexpected mode");
}

286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
/**
 * Transforms a Store.
 *
 * @return the created AMD64 Store node
 */
static ir_node *gen_Store(ir_node *node)
{
	ir_node  *block    = be_transform_node(get_nodes_block(node));
	ir_node  *ptr      = get_Store_ptr(node);
	ir_node  *new_ptr  = be_transform_node(ptr);
	ir_node  *mem      = get_Store_mem(node);
	ir_node  *new_mem  = be_transform_node(mem);
	ir_node  *val      = get_Store_value(node);
	ir_node  *new_val  = be_transform_node(val);
	ir_mode  *mode     = get_irn_mode(val);
	dbg_info *dbgi     = get_irn_dbg_info(node);
302
	ir_node *new_store;
303
304
305
306

	if (mode_is_float(mode)) {
		panic("Float not supported yet");
	} else {
307
308
309
		assert(mode_needs_gp_reg(mode) && "unsupported mode for Store");
		amd64_insn_mode_t insn_mode = get_insn_mode_from_mode(mode);
		new_store = new_bd_amd64_Store(dbgi, block, new_ptr, new_val, new_mem, insn_mode, NULL);
310
311
312
313
314
	}
	set_irn_pinned(new_store, get_irn_pinned(node));
	return new_store;
}

315
316
317
318
319
320
321
322
323
324
325
326
327
328
/**
 * Transforms a Load.
 *
 * @return the created AMD64 Load node
 */
static ir_node *gen_Load(ir_node *node)
{
	ir_node  *block    = be_transform_node(get_nodes_block(node));
	ir_node  *ptr      = get_Load_ptr(node);
	ir_node  *new_ptr  = be_transform_node(ptr);
	ir_node  *mem      = get_Load_mem(node);
	ir_node  *new_mem  = be_transform_node(mem);
	ir_mode  *mode     = get_Load_mode(node);
	dbg_info *dbgi     = get_irn_dbg_info(node);
329
	ir_node  *new_load;
330
331
332
333

	if (mode_is_float(mode)) {
		panic("Float not supported yet");
	} else {
334
335
336
337
338
339
340
		assert(mode_needs_gp_reg(mode) && "unsupported mode for Load");
		amd64_insn_mode_t insn_mode = get_insn_mode_from_mode(mode);
		if (get_mode_size_bits(mode) < 64 && mode_is_signed(mode)) {
			new_load = new_bd_amd64_LoadS(dbgi, block, new_ptr, new_mem, insn_mode, NULL);
		} else {
			new_load = new_bd_amd64_LoadZ(dbgi, block, new_ptr, new_mem, insn_mode, NULL);
		}
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
	}
	set_irn_pinned(new_load, get_irn_pinned(node));

	return new_load;
}

/**
 * Transform a Proj from a Load.
 */
static ir_node *gen_Proj_Load(ir_node *node)
{
	ir_node  *load     = get_Proj_pred(node);
	ir_node  *new_load = be_transform_node(load);
	dbg_info *dbgi     = get_irn_dbg_info(node);
	long     proj      = get_Proj_proj(node);

	/* renumber the proj */
	switch (get_amd64_irn_opcode(new_load)) {
359
360
361
362
363
364
365
366
367
		case iro_amd64_LoadS:
			/* handle all gp loads equal: they have the same proj numbers. */
			if (proj == pn_Load_res) {
				return new_rd_Proj(dbgi, new_load, mode_Lu, pn_amd64_LoadS_res);
			} else if (proj == pn_Load_M) {
				return new_rd_Proj(dbgi, new_load, mode_M, pn_amd64_LoadS_M);
			}
		break;
		case iro_amd64_LoadZ:
368
369
			/* handle all gp loads equal: they have the same proj numbers. */
			if (proj == pn_Load_res) {
370
				return new_rd_Proj(dbgi, new_load, mode_Lu, pn_amd64_LoadZ_res);
371
			} else if (proj == pn_Load_M) {
372
				return new_rd_Proj(dbgi, new_load, mode_M, pn_amd64_LoadZ_M);
373
374
375
376
377
378
379
380
381
			}
		break;
		default:
			panic("Unsupported Proj from Load");
	}

    return be_duplicate_node(node);
}

382
static ir_node *gen_Proj_Store(ir_node *node)
383
{
384
385
386
387
388
389
	ir_node *pred = get_Proj_pred(node);
	long     pn   = get_Proj_proj(node);
	if (pn == pn_Store_M) {
		return be_transform_node(pred);
	} else {
		panic("Unsupported Proj from Store");
390
	}
391
}
392

393
394
395
396
397
398
399
400
401
402
403
404
static ir_node *gen_Proj_be_Call(ir_node *node)
{
	ir_mode *mode = get_irn_mode(node);
	if (mode_needs_gp_reg(mode)) {
		ir_node *pred     = get_Proj_pred(node);
		ir_node *new_pred = be_transform_node(pred);
		long     pn       = get_Proj_proj(node);
		ir_node *new_proj = new_r_Proj(new_pred, mode_Lu, pn);
		return new_proj;
	} else {
		return be_duplicate_node(node);
	}
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
}

/**
 * Transforms a FrameAddr into an AMD64 Add.
 */
static ir_node *gen_be_FrameAddr(ir_node *node)
{
	ir_node   *block  = be_transform_node(get_nodes_block(node));
	ir_entity *ent    = be_get_frame_entity(node);
	ir_node   *fp     = be_get_FrameAddr_frame(node);
	ir_node   *new_fp = be_transform_node(fp);
	dbg_info  *dbgi   = get_irn_dbg_info(node);
	ir_node   *new_node;

	new_node = new_bd_amd64_FrameAddr(dbgi, block, new_fp, ent);
	return new_node;
}

423
424
425
426
427
428
429
430
431
432
433
434
435
436
static ir_node *gen_be_Start(ir_node *node)
{
	ir_node *new_node = be_duplicate_node(node);
	be_start_set_setup_stackframe(new_node, true);
	return new_node;
}

static ir_node *gen_be_Return(ir_node *node)
{
	ir_node *new_node = be_duplicate_node(node);
	be_return_set_destroy_stackframe(new_node, true);
	return new_node;
}

437
438
439
440
/* Boilerplate code for transformation: */

static void amd64_register_transformers(void)
{
441
442
443
444
445
	be_start_transform_setup();

	be_set_transform_function(op_Const,        gen_Const);
	be_set_transform_function(op_SymConst,     gen_SymConst);
	be_set_transform_function(op_Add,          gen_Add);
446
447
	be_set_transform_function(op_And,          gen_And);
	be_set_transform_function(op_Eor,          gen_Eor);
448
449
	be_set_transform_function(op_Sub,          gen_Sub);
	be_set_transform_function(op_Mul,          gen_Mul);
Christoph Mallon's avatar
Christoph Mallon committed
450
	be_set_transform_function(op_Not,          gen_Not);
451
452
453
454
	be_set_transform_function(op_Or,           gen_Or);
	be_set_transform_function(op_Shl,          gen_Shl);
	be_set_transform_function(op_Shr,          gen_Shr);
	be_set_transform_function(op_Shrs,         gen_Shrs);
455
456
	be_set_transform_function(op_be_Call,      gen_be_Call);
	be_set_transform_function(op_be_FrameAddr, gen_be_FrameAddr);
457
458
	be_set_transform_function(op_be_Return,    gen_be_Return);
	be_set_transform_function(op_be_Start,     gen_be_Start);
459
460
	be_set_transform_function(op_Conv,         gen_Conv);
	be_set_transform_function(op_Jmp,          gen_Jmp);
461
	be_set_transform_function(op_Switch,       gen_Switch);
462
463
464
465
466
467
	be_set_transform_function(op_Cmp,          gen_Cmp);
	be_set_transform_function(op_Cond,         gen_Cond);
	be_set_transform_function(op_Phi,          gen_Phi);
	be_set_transform_function(op_Load,         gen_Load);
	be_set_transform_function(op_Store,        gen_Store);
	be_set_transform_function(op_Minus,        gen_Minus);
468
469
470
471
472

	be_set_transform_proj_function(op_be_Call,  gen_Proj_be_Call);
	be_set_transform_proj_function(op_be_Start, be_duplicate_node);
	be_set_transform_proj_function(op_Load,     gen_Proj_Load);
	be_set_transform_proj_function(op_Store,    gen_Proj_Store);
473
474
}

475
void amd64_transform_graph(ir_graph *irg)
476
477
{
	amd64_register_transformers();
478
	be_transform_graph(irg, NULL);
479
480
481
482
483
484
}

void amd64_init_transform(void)
{
	FIRM_DBG_REGISTER(dbg, "firm.be.amd64.transform");
}