amd64_bearch.c 25.4 KB
Newer Older
1
2
/*
 * This file is part of libFirm.
3
 * Copyright (C) 2012 University of Karlsruhe.
4
5
6
7
8
9
 */

/**
 * @file
 * @brief    The main amd64 backend driver file.
 */
10
#include "amd64_bearch_t.h"
11
12
13
#include "amd64_emitter.h"
#include "amd64_finish.h"
#include "amd64_new_nodes.h"
Christoph Mallon's avatar
Christoph Mallon committed
14
#include "amd64_optimize.h"
15
#include "amd64_transform.h"
16
#include "amd64_varargs.h"
Matthias Braun's avatar
Matthias Braun committed
17
18
19
#include "beflags.h"
#include "beirg.h"
#include "bemodule.h"
20
#include "bera.h"
Matthias Braun's avatar
Matthias Braun committed
21
#include "besched.h"
22
23
#include "bespillslots.h"
#include "bestack.h"
24
#include "beutil.h"
25
#include "debug.h"
26
#include "gen_amd64_regalloc_if.h"
27
#include "irarch_t.h"
28
#include "ircons.h"
29
#include "iredges_t.h"
Matthias Braun's avatar
Matthias Braun committed
30
#include "irgmod.h"
31
#include "irgopt.h"
Matthias Braun's avatar
Matthias Braun committed
32
33
#include "irgwalk.h"
#include "iropt_t.h"
34
#include "irtools.h"
Matthias Braun's avatar
Matthias Braun committed
35
#include "lower_alloc.h"
Matthias Braun's avatar
Matthias Braun committed
36
37
#include "lower_builtins.h"
#include "lower_calls.h"
Tobias Rapp's avatar
Tobias Rapp committed
38
#include "lower_mode_b.h"
39
40
#include "lowering.h"
#include "panic.h"
41
42
43

DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)

44
45
pmap *amd64_constants;

46
ir_mode *amd64_mode_xmm;
47

48
49
static ir_entity *amd64_get_frame_entity(const ir_node *node)
{
50
51
	if (!is_amd64_irn(node))
		return NULL;
52
	if (!amd64_has_addr_attr(get_amd64_attr_const(node)->op_mode))
Matthias Braun's avatar
Matthias Braun committed
53
54
		return NULL;
	const amd64_addr_attr_t *attr = get_amd64_addr_attr_const(node);
55
	if (attr->addr.immediate.kind != X86_IMM_FRAMEENT)
56
		return NULL;
57
	return attr->addr.immediate.entity;
58
59
}

60
static int get_insn_size_bytes(amd64_insn_size_t size)
61
{
62
63
64
65
66
67
68
	switch (size) {
	case INSN_SIZE_8:       return 1;
	case INSN_SIZE_16:      return 2;
	case INSN_SIZE_32:      return 4;
	case INSN_SIZE_64:      return 8;
	case INSN_SIZE_128:     return 16;
	case INSN_SIZE_80:      break;
69
70
71
72
	}
	panic("bad insn mode");
}

73
74
75
76
/**
 * This function is called by the generic backend to correct offsets for
 * nodes accessing the stack.
 */
Matthias Braun's avatar
Matthias Braun committed
77
static void amd64_set_frame_offset(ir_node *node, int offset)
78
{
79
80
	if (!is_amd64_irn(node))
		return;
Matthias Braun's avatar
Matthias Braun committed
81
82
	amd64_addr_attr_t *attr = get_amd64_addr_attr(node);
	attr->addr.immediate.offset += offset;
83
	if (is_amd64_pop_am(node)) {
84
85
		ir_graph *irg = get_irn_irg(node);
		if (amd64_get_irg_data(irg)->omit_fp)
86
			attr->addr.immediate.offset -= get_insn_size_bytes(attr->size);
87
	}
88
	assert(attr->addr.immediate.kind == X86_IMM_FRAMEENT);
89
90
	attr->addr.immediate.kind = X86_IMM_VALUE;
	attr->addr.immediate.entity = NULL;
91
92
}

93
static int amd64_get_sp_bias(const ir_node *node)
94
{
95
	if (is_amd64_push_am(node)) {
Matthias Braun's avatar
Matthias Braun committed
96
		const amd64_addr_attr_t *attr = get_amd64_addr_attr_const(node);
97
		return get_insn_size_bytes(attr->size);
98
	} else if (is_amd64_push_reg(node)) {
99
		/* 64-bit register size */
100
		return AMD64_REGISTER_SIZE;
101
	} else if (is_amd64_pop_am(node)) {
Matthias Braun's avatar
Matthias Braun committed
102
		const amd64_addr_attr_t *attr = get_amd64_addr_attr_const(node);
103
		return -get_insn_size_bytes(attr->size);
104
	} else if (is_amd64_leave(node)) {
105
		return SP_BIAS_RESET;
106
	}
107

108
109
110
	return 0;
}

111
static ir_node *create_push(ir_node *node, ir_node *schedpoint, ir_node *sp,
112
                            ir_node *mem, ir_entity *ent,
113
                            amd64_insn_size_t size)
114
115
116
117
118
119
{
	dbg_info *dbgi  = get_irn_dbg_info(node);
	ir_node  *block = get_nodes_block(node);
	ir_graph *irg   = get_irn_irg(node);
	ir_node  *frame = get_irg_frame(irg);

120
121
	amd64_addr_t addr = {
		.immediate = {
122
			.kind   = X86_IMM_FRAMEENT,
123
124
			.entity = ent,
		},
125
126
		.variant    = X86_ADDR_BASE,
		.base_input = 1,
127
	};
128
	ir_node *in[] = { sp, frame, mem };
129
	ir_node *const push = new_bd_amd64_push_am(dbgi, block, ARRAY_SIZE(in), in, rsp_reg_mem_reqs, size, addr);
130
131
132
133
	sched_add_before(schedpoint, push);
	return push;
}

134
static ir_node *create_pop(ir_node *node, ir_node *schedpoint, ir_node *sp,
135
                           ir_entity *ent, amd64_insn_size_t size)
136
137
138
139
140
141
{
	dbg_info *dbgi  = get_irn_dbg_info(node);
	ir_node  *block = get_nodes_block(node);
	ir_graph *irg   = get_irn_irg(node);
	ir_node  *frame = get_irg_frame(irg);

142
143
	amd64_addr_t addr = {
		.immediate = {
144
			.kind   = X86_IMM_FRAMEENT,
145
146
			.entity = ent,
		},
147
		.variant     = X86_ADDR_BASE,
148
149
		.base_input  = 1,
	};
150
151
	ir_node *in[] = { sp, frame, get_irg_no_mem(irg) };

152
	ir_node *const pop = new_bd_amd64_pop_am(dbgi, block, ARRAY_SIZE(in), in, rsp_reg_mem_reqs, size, addr);
153
154
155
156
157
158
159
	sched_add_before(schedpoint, pop);

	return pop;
}

static ir_node* create_spproj(ir_node *pred, int pos)
{
160
	return be_new_Proj_reg(pred, pos, &amd64_registers[REG_RSP]);
161
162
163
164
165
166
167
168
169
170
}

/**
 * Transform MemPerm, currently we do this the ugly way and produce
 * push/pop into/from memory cascades. This is possible without using
 * any registers.
 */
static void transform_MemPerm(ir_node *node)
{
	ir_graph *irg   = get_irn_irg(node);
Christoph Mallon's avatar
Christoph Mallon committed
171
	ir_node  *sp    = be_get_Start_proj(irg, &amd64_registers[REG_RSP]);
172
173
174
175
176
177
178
179
180
	int       arity = be_get_MemPerm_entity_arity(node);
	ir_node **pops  = ALLOCAN(ir_node*, arity);
	int       i;

	/* create Pushs */
	for (i = 0; i < arity; ++i) {
		ir_entity *inent = be_get_MemPerm_in_entity(node, i);
		ir_entity *outent = be_get_MemPerm_out_entity(node, i);
		ir_type *enttype = get_entity_type(inent);
181
182
		unsigned entsize = get_type_size(enttype);
		unsigned entsize2 = get_type_size(get_entity_type(outent));
183
		ir_node *mem = get_irn_n(node, i);
184
185
186
187
188

		/* work around cases where entities have different sizes */
		if (entsize2 < entsize)
			entsize = entsize2;

189
190
		int offset = 0;
		do {
191
			amd64_insn_size_t size;
192
			if (entsize%2 == 1) {
193
				size = INSN_SIZE_8;
194
			} else if (entsize % 4 == 2) {
195
				size = INSN_SIZE_16;
196
			} else if (entsize % 8 == 4) {
197
				size = INSN_SIZE_32;
198
199
			} else {
				assert(entsize%8 == 0);
200
				size = INSN_SIZE_64;
201
202
			}

203
			ir_node *push = create_push(node, node, sp, mem, inent, size);
204
			sp = create_spproj(push, pn_amd64_push_am_stack);
205
206
			get_amd64_addr_attr(push)->addr.immediate.offset = offset;

207
208
209
			unsigned bytes = get_insn_size_bytes(size);
			offset  += bytes;
			entsize -= bytes;
210
		} while(entsize > 0);
211
212
213
214
		set_irn_n(node, i, new_r_Bad(irg, mode_X));
	}

	/* create pops */
Tobias Rapp's avatar
Tobias Rapp committed
215
	for (i = arity; i-- > 0; ) {
216
217
218
		ir_entity *inent = be_get_MemPerm_in_entity(node, i);
		ir_entity *outent = be_get_MemPerm_out_entity(node, i);
		ir_type *enttype = get_entity_type(outent);
219
220
		unsigned entsize = get_type_size(enttype);
		unsigned entsize2 = get_type_size(get_entity_type(inent));
221
222
223
224
225

		/* work around cases where entities have different sizes */
		if (entsize2 < entsize)
			entsize = entsize2;

226
227
228
		int      offset = entsize;
		ir_node *pop;
		do {
229
			amd64_insn_size_t size;
230
			if (entsize%2 == 1) {
231
				size = INSN_SIZE_8;
232
			} else if (entsize % 4 == 2) {
233
				size = INSN_SIZE_16;
234
			} else if (entsize % 8 == 4) {
235
				size = INSN_SIZE_32;
236
237
			} else {
				assert(entsize%8 == 0);
238
				size = INSN_SIZE_64;
239
240
			}

241
			pop = create_pop(node, node, sp, outent, size);
242
			sp  = create_spproj(pop, pn_amd64_pop_am_stack);
243

244
245
246
			unsigned bytes = get_insn_size_bytes(size);
			offset  -= bytes;
			entsize -= bytes;
247
248
			get_amd64_addr_attr(pop)->addr.immediate.offset = offset;
		} while(entsize > 0);
249
250
251
		pops[i] = pop;
	}

252
	ir_node *const keep = be_new_Keep_one(sp);
253
254
255
256
257
	sched_replace(node, keep);

	/* exchange memprojs */
	foreach_out_edge_safe(node, edge) {
		ir_node *proj = get_edge_src_irn(edge);
258
		int p = get_Proj_num(proj);
259
260
261
262

		assert(p < arity);

		set_Proj_pred(proj, pops[p]);
263
		set_Proj_num(proj, pn_amd64_pop_am_M);
264
265
266
267
268
269
	}

	/* remove memperm */
	kill_node(node);
}

270
271
272
273
static void amd64_after_ra_walker(ir_node *block, void *data)
{
	(void) data;

274
	sched_foreach_reverse_safe(block, node) {
Matthias Braun's avatar
Matthias Braun committed
275
		if (be_is_MemPerm(node)) {
276
			transform_MemPerm(node);
277
278
279
280
		}
	}
}

281
282
/**
 * rewrite unsigned long -> float/double conversion
283
284
285
286
287
288
289
290
291
292
293
294
 * x86_64 only has a signed conversion so we do some crazy sse construction
 * instead (first seen this pattern in llvm): We split the 64bit value into
 * 2 32bit vals and place them into the mantissa parts of apropriately choosen
 * float values and later add the 2 floats together. In pseudo code:
 *
 * a = (vector unsigned long, unsigned long) x;
 * b = (vector unsigned, unsigned, unsigned, unsigned)
 *       upper half of 0x1p+52, upper half of 0x1p+84, 0, 0
 * c = repack (a[0], b[0], a[1], b[1])
 * d = (vector double) 0x1p+52, 0x1p+84
 * e = c - d
 * f = e[0] + e[1]
295
296
297
 */
static void rewrite_unsigned_float_Conv(ir_node *node)
{
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
	ir_graph  *irg    = get_irn_irg(node);
	dbg_info  *dbgi   = get_irn_dbg_info(node);
	ir_node   *block  = get_nodes_block(node);
	ir_node   *in     = get_Conv_op(node);
	ir_node   *in_xmm = new_r_Conv(block, in, amd64_mode_xmm);
	ir_tarval *magic0
		= new_integer_tarval_from_str("4530000043300000", 16, 0, 16,
		                              amd64_mode_xmm);
	ir_node   *const0 = new_r_Const(irg, magic0);
	collect_new_start_block_node(const0);
	ir_node   *punpck = new_bd_amd64_l_punpckldq(dbgi, block, in_xmm, const0);
	ir_tarval *magic1
		= new_integer_tarval_from_str("45300000000000004330000000000000", 32,
		                              0, 16, amd64_mode_xmm);
	ir_node   *const1 = new_r_Const(irg, magic1);
	collect_new_start_block_node(const1);
	ir_node   *subpd  = new_bd_amd64_l_subpd(dbgi, block, punpck, const1);
	ir_node   *haddpd = new_bd_amd64_l_haddpd(dbgi, block, subpd, subpd);
	ir_mode   *mode   = get_irn_mode(node);
	ir_node   *conv   = new_r_Conv(block, haddpd, mode);
	exchange(node, conv);
319
320
}

321
322
323
324
325
/* Creates a 64-bit constant with only the sign bit set,
 * i.e. returns 0x8000000000000000
 */
static ir_node *create_sign_bit_const(ir_graph *irg)
{
326
	ir_tarval *sign_tv = create_sign_tv(mode_Ls);
327
328
329
330
	return new_r_Const(irg, sign_tv);
}

/* rewrite float/double -> unsigned long conversion
331
332
333
334
335
336
337
338
 * x86_64 only has a signed conversion so we rewrite to the following:
 *
 * if (x >= 9223372036854775808.) {
 *   converted ^= (int)(x-9223372036854775808.) ^ 0x8000000000000000;
 * } else {
 *   converted = (int)x;
 * }
 * return (unsigned)converted;
339
340
341
342
343
344
345
346
347
348
 */
static void rewrite_float_unsigned_Conv(ir_node *node)
{
	ir_graph *irg        = get_irn_irg(node);
	dbg_info *dbgi       = get_irn_dbg_info(node);
	ir_node *lower_block = get_nodes_block(node);
	ir_mode *dest_mode   = get_irn_mode(node);

	part_block(node);

349
350
351
352
353
354
	ir_node   *block    = get_nodes_block(node);
	ir_node   *fp_x     = get_Conv_op(node);
	ir_mode   *src_mode = get_irn_mode(fp_x);
	double     d_const  = 9223372036854775808.;
	ir_tarval *tv       = new_tarval_from_double(d_const, src_mode);
	ir_node   *fp_const = new_r_Const(irg, tv);
355
	collect_new_start_block_node(fp_const);
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371

	/* Test if the sign bit is needed */
	ir_node *cmp         = new_rd_Cmp(dbgi, block, fp_x, fp_const,
	                                 ir_relation_greater_equal);
	ir_node *cond        = new_rd_Cond(dbgi, block, cmp);
	ir_node *proj_true   = new_r_Proj(cond, mode_X, pn_Cond_true);
	ir_node *proj_false  = new_r_Proj(cond, mode_X, pn_Cond_false);
	ir_node *in_true[1]  = { proj_true };
	ir_node *in_false[1] = { proj_false };

	/* true block: Do some arithmetic to use the signed conversion */
	ir_node *true_block  = new_r_Block(irg, ARRAY_SIZE(in_true), in_true);
	ir_node *true_jmp    = new_r_Jmp(true_block);
	ir_node *sub         = new_r_Sub(true_block, fp_const, fp_x, src_mode);
	ir_node *sub_conv    = new_rd_Conv(dbgi, true_block, sub, mode_Ls);
	ir_node *sign_bit    = create_sign_bit_const(irg);
372
	collect_new_start_block_node(sign_bit);
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
	ir_node *xor         = new_r_Eor(true_block, sub_conv, sign_bit, mode_Ls);
	ir_node *true_res    = new_rd_Conv(dbgi, true_block, xor, dest_mode);

	/* false block: Simply convert */
	ir_node *false_block  = new_r_Block(irg, ARRAY_SIZE(in_false), in_false);
	ir_node *false_jmp    = new_r_Jmp(false_block);
	ir_node *false_signed = new_rd_Conv(dbgi, false_block, fp_x, mode_Ls);
	ir_node *false_res    = new_rd_Conv(dbgi, false_block, false_signed,
	                                    dest_mode);

	/* lower block */
	ir_node *lower_in[2] = { true_jmp, false_jmp };
	ir_node *phi_in[2]   = { true_res, false_res };

	set_irn_in(lower_block, ARRAY_SIZE(lower_in), lower_in);
	ir_node *phi = new_r_Phi(lower_block, ARRAY_SIZE(phi_in), phi_in,
	                         dest_mode);
390
	collect_new_phi_node(phi);
391
392
393
	exchange(node, phi);
}

394
395
396
397
398
399
400
401
402
static bool amd64_rewrite_Conv(ir_node *node)
{
	ir_mode *to_mode    = get_irn_mode(node);
	ir_node *op         = get_Conv_op(node);
	ir_mode *from_mode  = get_irn_mode(op);
	bool     to_float   = mode_is_float(to_mode);
	bool     from_float = mode_is_float(from_mode);

	if (to_float && !from_float && !mode_is_signed(from_mode)
403
404
	    && get_mode_size_bits(from_mode) == 64
	    && to_mode != x86_mode_E) {
405
406
407
		rewrite_unsigned_float_Conv(node);
		return true;
	} else if (from_float && !to_float && !mode_is_signed(to_mode)
408
409
	           && get_mode_size_bits(to_mode) == 64
	           && from_mode != x86_mode_E) {
410
		rewrite_float_unsigned_Conv(node);
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
		return true;
	}

	return false;
}

static void amd64_intrinsics_walker(ir_node *node, void *data)
{
	bool *changed = (bool*)data;
	if (is_Conv(node)) {
		if (amd64_rewrite_Conv(node))
			*changed = true;
	}
}

static void amd64_handle_intrinsics(ir_graph *irg)
{
	ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
429
	collect_phiprojs_and_start_block_nodes(irg);
430
431
432
433
434
435
436
437
438
439
440
441
442
	bool changed = false;
	irg_walk_graph(irg, amd64_intrinsics_walker, NULL, &changed);
	ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);

	if (changed) {
		confirm_irg_properties(irg,
		        IR_GRAPH_PROPERTY_NO_BADS
		        | IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES
		        | IR_GRAPH_PROPERTY_MANY_RETURNS
		        | IR_GRAPH_PROPERTY_ONE_RETURN);
	}
}

443
444
static void amd64_set_frame_entity(ir_node *node, ir_entity *entity,
                                   const ir_type *type)
445
{
446
	(void)type;
Matthias Braun's avatar
Matthias Braun committed
447
448
	amd64_addr_attr_t *attr = get_amd64_addr_attr(node);
	attr->addr.immediate.entity = entity;
449
450
}

451
static ir_type *get_type_for_insn_size(amd64_insn_size_t const size)
452
453
{
	/* TODO: do not hardcode node names here */
454
455
456
	switch (size) {
	case INSN_SIZE_128: return get_type_for_mode(amd64_mode_xmm);
	case INSN_SIZE_80:  return x86_type_E;
457
458
459
460
	default:            return get_type_for_mode(mode_Lu);
	}
}

461
462
463
464
465
/**
 * Collects nodes that need frame entities assigned.
 */
static void amd64_collect_frame_entity_nodes(ir_node *node, void *data)
{
466
467
	if (!is_amd64_irn(node))
		return;
468
469
470
471

	/* Disable coalescing for "returns twice" calls: In case of setjmp/longjmp
	 * our control flow graph isn't completely correct: There are no backedges
	 * from longjmp to the setjmp => coalescing would produce wrong results. */
472
	be_fec_env_t *const env = (be_fec_env_t*)data;
473
474
475
476
477
478
479
480
481
	if (is_amd64_call(node)) {
		const amd64_call_addr_attr_t    *attrs = get_amd64_call_addr_attr_const(node);
		const ir_type                   *type  = attrs->call_tp;
		const mtp_additional_properties  mtp
			= get_method_additional_properties(type);
		if (mtp & mtp_property_returns_twice)
			be_forbid_coalescing(env);
	}

482
	/* we are only interested to report Load nodes */
483
	if (!amd64_loads(node))
Matthias Braun's avatar
Matthias Braun committed
484
485
		return;

Matthias Braun's avatar
Matthias Braun committed
486
	const amd64_addr_attr_t *attr = get_amd64_addr_attr_const(node);
487
	x86_imm32_t       const *imm  = &attr->addr.immediate;
488
	if (imm->kind == X86_IMM_FRAMEENT && imm->entity == NULL) {
489
		const ir_type *type = get_type_for_insn_size(attr->size);
490
		be_load_needs_frame_entity(env, node, type);
491
492
493
	}
}

494
495
static int determine_rbp_input(ir_node *ret)
{
496
	arch_register_t const *const bp = &amd64_registers[REG_RBP];
497
498
499
500
	foreach_irn_in(ret, i, input) {
		if (arch_get_irn_register(input) == bp)
			return i;
	}
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
501
	panic("no rbp input found at %+F", ret);
502
503
}

504
505
506
507
508
/**
 * prepare graph and perform code selection.
 */
static void amd64_select_instructions(ir_graph *irg)
{
509
510
	amd64_adjust_pic(irg);

511
512
513
514
515
	be_timer_push(T_CODEGEN);
	amd64_transform_graph(irg);
	be_timer_pop(T_CODEGEN);

	be_dump(DUMP_BE, irg, "code-selection");
516
517
518
519

	optimize_graph_df(irg);

	be_dump(DUMP_BE, irg, "opt");
520
521
}

522
static void introduce_epilogue(ir_node *ret, bool omit_fp)
523
{
524
525
526
	ir_graph          *irg        = get_irn_irg(ret);
	ir_node           *block      = get_nodes_block(ret);
	ir_type           *frame_type = get_irg_frame_type(irg);
527
	unsigned           frame_size = get_type_size(frame_type);
528
529
	ir_node           *first_sp   = get_irn_n(ret, n_amd64_ret_stack);
	ir_node           *curr_sp    = first_sp;
530

531
	if (!omit_fp) {
532
533
534
535
536
		int      const n_rbp    = determine_rbp_input(ret);
		ir_node       *curr_bp  = get_irn_n(ret, n_rbp);
		ir_node       *curr_mem = get_irn_n(ret, n_amd64_ret_mem);
		ir_node *const leave    = new_bd_amd64_leave(NULL, block, curr_bp, curr_mem);
		curr_mem = be_new_Proj(leave, pn_amd64_leave_M);
537
538
		curr_bp = be_new_Proj_reg(leave, pn_amd64_leave_frame, &amd64_registers[REG_RBP]);
		curr_sp = be_new_Proj_reg(leave, pn_amd64_leave_stack, &amd64_registers[REG_RSP]);
539
540
		sched_add_before(ret, leave);

541
542
		set_irn_n(ret, n_amd64_ret_mem, curr_mem);
		set_irn_n(ret, n_rbp,           curr_bp);
543
544
	} else {
		if (frame_size > 0) {
545
546
			ir_node *incsp = amd64_new_IncSP(block, curr_sp,
			                                 -(int)frame_size, 0);
547
548
549
550
			sched_add_before(ret, incsp);
			curr_sp = incsp;
		}
	}
551
	set_irn_n(ret, n_amd64_ret_stack, curr_sp);
552
553
554
555
556

	/* keep verifier happy... */
	if (get_irn_n_edges(first_sp) == 0 && is_Proj(first_sp)) {
		kill_node(first_sp);
	}
557
558
}

559
static void introduce_prologue(ir_graph *const irg, bool omit_fp)
560
561
{
	const arch_register_t *sp         = &amd64_registers[REG_RSP];
562
	const arch_register_t *bp         = &amd64_registers[REG_RBP];
563
564
565
	ir_node               *start      = get_irg_start(irg);
	ir_node               *block      = get_nodes_block(start);
	ir_type               *frame_type = get_irg_frame_type(irg);
566
	unsigned               frame_size = get_type_size(frame_type);
Christoph Mallon's avatar
Christoph Mallon committed
567
	ir_node               *initial_sp = be_get_Start_proj(irg, sp);
568

569
	if (!omit_fp) {
570
		/* push rbp */
571
		ir_node *const mem        = get_irg_initial_mem(irg);
Christoph Mallon's avatar
Christoph Mallon committed
572
		ir_node *const initial_bp = be_get_Start_proj(irg, bp);
573
574
575
576
		ir_node *const push       = new_bd_amd64_push_reg(NULL, block, initial_sp, mem, initial_bp);
		sched_add_after(start, push);
		ir_node *const curr_mem   = be_new_Proj(push, pn_amd64_push_reg_M);
		edges_reroute_except(mem, curr_mem, push);
577
		ir_node *const curr_sp    = be_new_Proj_reg(push, pn_amd64_push_reg_stack, sp);
578
579
580
581

		/* move rsp to rbp */
		ir_node *const curr_bp = be_new_Copy(block, curr_sp);
		sched_add_after(push, curr_bp);
582
		arch_copy_irn_out_info(curr_bp, 0, initial_bp);
583
		edges_reroute_except(initial_bp, curr_bp, push);
584

585
		ir_node *incsp = amd64_new_IncSP(block, curr_sp, frame_size, 0);
586
		sched_add_after(curr_bp, incsp);
587
		edges_reroute_except(initial_sp, incsp, push);
588
589

		/* make sure the initial IncSP is really used by someone */
590
		be_keep_if_unused(incsp);
591

592
		be_stack_layout_t *const layout = be_get_irg_stack_layout(irg);
593
		layout->initial_bias = -8;
594
595
	} else {
		if (frame_size > 0) {
596
597
			ir_node *const incsp = amd64_new_IncSP(block, initial_sp,
			                                       frame_size, 0);
598
			sched_add_after(start, incsp);
599
			edges_reroute_except(initial_sp, incsp, incsp);
600
601
		}
	}
602
}
603

604
static void introduce_prologue_epilogue(ir_graph *irg, bool omit_fp)
605
{
606
607
	/* introduce epilogue for every return node */
	foreach_irn_in(get_irg_end_block(irg), i, ret) {
608
		assert(is_amd64_ret(ret));
609
		introduce_epilogue(ret, omit_fp);
610
	}
611

612
	introduce_prologue(irg, omit_fp);
613
614
}

615
616
617
/**
 * Called immediatly before emit phase.
 */
618
static void amd64_finish_and_emit(ir_graph *irg)
619
{
620
621
	bool          omit_fp = amd64_get_irg_data(irg)->omit_fp;
	be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
622
623
624

	/* create and coalesce frame entities */
	irg_walk_graph(irg, NULL, amd64_collect_frame_entity_nodes, fec_env);
625
	be_assign_entities(fec_env, amd64_set_frame_entity, omit_fp);
626
	be_free_frame_entity_coalescer(fec_env);
627

628
	irg_block_walk_graph(irg, NULL, amd64_after_ra_walker, NULL);
629

630
	introduce_prologue_epilogue(irg, omit_fp);
631

632
	/* fix stack entity offsets */
633
	be_fix_stack_nodes(irg, &amd64_registers[REG_RSP]);
634
	be_birg_from_irg(irg)->non_ssa_regs = NULL;
635
636
	be_abi_fix_stack_bias(irg, amd64_get_sp_bias, amd64_set_frame_offset,
	                      amd64_get_frame_entity);
637
638
639

	/* Fix 2-address code constraints. */
	amd64_finish_irg(irg);
640

Matthias Braun's avatar
Matthias Braun committed
641
642
	amd64_simulate_graph_x87(irg);

Christoph Mallon's avatar
Christoph Mallon committed
643
644
	amd64_peephole_optimization(irg);

645
	/* emit code */
646
	be_timer_push(T_EMIT);
647
	amd64_emit_function(irg);
648
	be_timer_pop(T_EMIT);
649
}
650

Matthias Braun's avatar
Matthias Braun committed
651
652
653
654
655
static void amd64_finish(void)
{
	amd64_free_opcodes();
}

656
657
658
659
660
661
662
static const regalloc_if_t amd64_regalloc_if = {
	.spill_cost  = 7,
	.reload_cost = 5,
	.new_spill   = amd64_new_spill,
	.new_reload  = amd64_new_reload,
};

663
static void amd64_generate_code(FILE *output, const char *cup_name)
664
{
665
	amd64_constants = pmap_create();
666
	be_begin(output, cup_name);
667
	unsigned *const sp_is_non_ssa = rbitset_alloca(N_AMD64_REGISTERS);
668
	rbitset_set(sp_is_non_ssa, REG_RSP);
669

670
671
672
	foreach_irp_irg(i, irg) {
		if (!be_step_first(irg))
			continue;
673

674
675
676
		struct obstack *obst = be_get_be_obst(irg);
		be_birg_from_irg(irg)->isa_link = OALLOCZ(obst, amd64_irg_data_t);

677
		be_birg_from_irg(irg)->non_ssa_regs = sp_is_non_ssa;
678
		amd64_select_instructions(irg);
679

680
681
682
683
684
685
686
		be_step_schedule(irg);

		be_timer_push(T_RA_PREPARATION);
		be_sched_fix_flags(irg, &amd64_reg_classes[CLASS_amd64_flags], NULL,
						   NULL, NULL);
		be_timer_pop(T_RA_PREPARATION);

687
		be_step_regalloc(irg, &amd64_regalloc_if);
688
689
690
691
692
693
694
695

		amd64_finish_and_emit(irg);

		be_step_last(irg);
	}

	be_finish();
	pmap_destroy(amd64_constants);
Matthias Braun's avatar
Matthias Braun committed
696
697
}

698
699
static void amd64_lower_for_target(void)
{
700
	/* lower compound param handling */
701
	lower_calls_with_compounds(LF_RETURN_HIDDEN, NULL);
702
	be_after_irp_transform("lower-calls");
703

704
	foreach_irp_irg(i, irg) {
705
		lower_switch(irg, 4, 256, mode_Iu);
706
		be_after_transform(irg, "lower-switch");
707
708
	}

Tobias Rapp's avatar
Tobias Rapp committed
709
710
711
712
	foreach_irp_irg(i, irg) {
		/* lower for mode_b stuff */
		ir_lower_mode_b(irg, mode_Lu);
		be_after_transform(irg, "lower-modeb");
Matthias Braun's avatar
Matthias Braun committed
713
714
		lower_alloc(irg, AMD64_PO2_STACK_ALIGNMENT);
		be_after_transform(irg, "lower-alloc");
Tobias Rapp's avatar
Tobias Rapp committed
715
716
	}

717
	foreach_irp_irg(i, irg) {
718
719
720
721
		/* Turn all small CopyBs into loads/stores, and turn all bigger
		 * CopyBs into memcpy calls, because we cannot handle CopyB nodes
		 * during code generation yet.
		 * TODO:  Adapt this once custom CopyB handling is implemented. */
722
		lower_CopyB(irg, 64, 65, true);
723
		be_after_transform(irg, "lower-copyb");
724
	}
725

726
	ir_builtin_kind supported[6];
727
	size_t  s = 0;
728
729
730
	supported[s++] = ir_bk_ffs;
	supported[s++] = ir_bk_clz;
	supported[s++] = ir_bk_ctz;
731
	supported[s++] = ir_bk_compare_swap;
732
	supported[s++] = ir_bk_saturating_increment;
733
	supported[s++] = ir_bk_va_start;
734
735
736

	assert(s <= ARRAY_SIZE(supported));
	lower_builtins(s, supported);
737
	be_after_irp_transform("lower-builtins");
738
739
}

740
741
742
static int amd64_is_mux_allowed(ir_node *sel, ir_node *mux_false,
                                ir_node *mux_true)
{
Matthias Braun's avatar
Matthias Braun committed
743
744
745
	/* optimizable by middleend */
	if (ir_is_optimizable_mux(sel, mux_false, mux_true))
		return true;
746
747
748
	return false;
}

749
static const ir_settings_arch_dep_t amd64_arch_dep = {
750
751
752
753
754
755
756
	.also_use_subs        = true,
	.maximum_shifts       = 4,
	.highest_shift_amount = 63,
	.evaluate             = NULL,
	.allow_mulhs          = true,
	.allow_mulhu          = true,
	.max_bits_for_mulh    = 32,
757
};
758
759

static backend_params amd64_backend_params = {
760
	.experimental                  = "the amd64 backend is highly experimental and unfinished (consider the ia32 backend)",
761
	.byte_order_big_endian         = false,
762
	.pic_supported                 = true,
763
764
765
766
767
768
769
770
771
772
	.unaligned_memaccess_supported = true,
	.modulo_shift                  = 32,
	.dep_param                     = &amd64_arch_dep,
	.allow_ifconv                  = amd64_is_mux_allowed,
	.machine_size                  = 64,
	.mode_float_arithmetic         = NULL,  /* will be set later */
	.type_long_long                = NULL,  /* will be set later */
	.type_unsigned_long_long       = NULL,  /* will be set later */
	.type_long_double              = NULL,  /* will be set later */
	.stack_param_align             = 8,
773
774
	.float_int_overflow            = ir_overflow_indefinite,
	.vararg                        = {
775
776
		.va_list_type = NULL,  /* Will be set later */
		.lower_va_arg = amd64_lower_va_arg,
777
	},
778
779
};

780
static const backend_params *amd64_get_backend_params(void) {
781
	return &amd64_backend_params;
782
783
784
785
}

static int amd64_is_valid_clobber(const char *clobber)
{
786
	return x86_parse_clobber(amd64_additional_clobber_names, clobber) != NULL;
787
788
}

789
790
static void amd64_init_types(void)
{
791
792
793
	ir_mode *const ptr_mode = new_reference_mode("p64", irma_twos_complement, 64, 64);
	set_modeP(ptr_mode);

794
795
796
797
	/* use an int128 mode for xmm registers for now, so that firm allows us to
	 * create constants with the xmm mode... */
	amd64_mode_xmm = new_int_mode("x86_xmm", irma_twos_complement, 128, 0, 0);

Matthias Braun's avatar
Matthias Braun committed
798
	x86_init_x87_type();
799
800
	amd64_backend_params.type_long_double = x86_type_E;

Manuel Mohr's avatar
Manuel Mohr committed
801
	amd64_backend_params.vararg.va_list_type = amd64_build_va_list_type();
802
803
804
805
806
807
}

static void amd64_init(void)
{
	amd64_init_types();
	amd64_register_init();
808
	amd64_create_opcodes();
809
	amd64_cconv_init();
810
	x86_set_be_asm_constraint_support(&amd64_asm_constraints);
811
812
}

813
814
815
816
817
818
static unsigned amd64_get_op_estimated_cost(const ir_node *node)
{
	(void)node;/* TODO */
	return 1;
}

819
static arch_isa_if_t const amd64_isa_if = {
820
821
822
823
824
825
826
827
828
829
830
831
	.n_registers           = N_AMD64_REGISTERS,
	.registers             = amd64_registers,
	.n_register_classes    = N_AMD64_CLASSES,
	.register_classes      = amd64_reg_classes,
	.init                  = amd64_init,
	.finish                = amd64_finish,
	.get_params            = amd64_get_backend_params,
	.generate_code         = amd64_generate_code,
	.lower_for_target      = amd64_lower_for_target,
	.is_valid_clobber      = amd64_is_valid_clobber,
	.handle_intrinsics     = amd64_handle_intrinsics,
	.get_op_estimated_cost = amd64_get_op_estimated_cost,
832
833
};

Matthias Braun's avatar
Matthias Braun committed
834
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_amd64)
835
836
837
838
void be_init_arch_amd64(void)
{
	be_register_isa_if("amd64", &amd64_isa_if);
	FIRM_DBG_REGISTER(dbg, "firm.be.amd64.cg");
839

840
	static const lc_opt_table_entry_t options[] = {
841
842
		LC_OPT_ENT_BOOL("x64abi",      "Use x64 ABI (otherwise system V)", &amd64_use_x64_abi),
		LC_OPT_ENT_BOOL("no-red-zone", "gcc compatibility",                &amd64_use_red_zone),
843
844
		LC_OPT_LAST
	};
845
846
847
	lc_opt_entry_t *be_grp    = lc_opt_get_grp(firm_opt_get_root(), "be");
	lc_opt_entry_t *amd64_grp = lc_opt_get_grp(be_grp, "amd64");
	lc_opt_add_table(amd64_grp, options);
848

849
850
	amd64_init_transform();
}