amd64_bearch.c 25.6 KB
Newer Older
1
2
/*
 * This file is part of libFirm.
3
 * Copyright (C) 2012 University of Karlsruhe.
4
5
6
7
8
9
 */

/**
 * @file
 * @brief    The main amd64 backend driver file.
 */
10
#include "amd64_bearch_t.h"
11
12
13
#include "amd64_emitter.h"
#include "amd64_finish.h"
#include "amd64_new_nodes.h"
Christoph Mallon's avatar
Christoph Mallon committed
14
#include "amd64_optimize.h"
15
#include "amd64_transform.h"
16
#include "amd64_varargs.h"
Matthias Braun's avatar
Matthias Braun committed
17
18
19
#include "beflags.h"
#include "beirg.h"
#include "bemodule.h"
20
#include "bera.h"
Matthias Braun's avatar
Matthias Braun committed
21
#include "besched.h"
22
23
#include "bespillslots.h"
#include "bestack.h"
24
#include "beutil.h"
25
#include "debug.h"
26
#include "gen_amd64_regalloc_if.h"
27
#include "irarch_t.h"
28
#include "ircons.h"
29
#include "iredges_t.h"
Matthias Braun's avatar
Matthias Braun committed
30
#include "irgmod.h"
31
#include "irgopt.h"
Matthias Braun's avatar
Matthias Braun committed
32
33
#include "irgwalk.h"
#include "iropt_t.h"
34
#include "irtools.h"
Matthias Braun's avatar
Matthias Braun committed
35
#include "lower_alloc.h"
Matthias Braun's avatar
Matthias Braun committed
36
37
#include "lower_builtins.h"
#include "lower_calls.h"
Tobias Rapp's avatar
Tobias Rapp committed
38
#include "lower_mode_b.h"
39
40
#include "lowering.h"
#include "panic.h"
41
42
43

DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)

44
45
pmap *amd64_constants;

46
ir_mode *amd64_mode_xmm;
47

48
static ir_node *create_push(ir_node *node, ir_node *schedpoint, ir_node *sp,
49
                            ir_node *mem, ir_entity *ent, x86_insn_size_t size)
50
51
52
53
54
55
{
	dbg_info *dbgi  = get_irn_dbg_info(node);
	ir_node  *block = get_nodes_block(node);
	ir_graph *irg   = get_irn_irg(node);
	ir_node  *frame = get_irg_frame(irg);

56
	x86_addr_t addr = {
57
		.immediate = {
58
			.kind   = X86_IMM_FRAMEENT,
59
60
			.entity = ent,
		},
61
62
		.variant    = X86_ADDR_BASE,
		.base_input = 1,
63
	};
64
	ir_node *in[] = { sp, frame, mem };
65
	ir_node *const push = new_bd_amd64_push_am(dbgi, block, ARRAY_SIZE(in), in, rsp_reg_mem_reqs, size, addr);
66
67
68
69
	sched_add_before(schedpoint, push);
	return push;
}

70
static ir_node *create_pop(ir_node *node, ir_node *schedpoint, ir_node *sp,
71
                           ir_entity *ent, x86_insn_size_t size)
72
73
74
75
76
77
{
	dbg_info *dbgi  = get_irn_dbg_info(node);
	ir_node  *block = get_nodes_block(node);
	ir_graph *irg   = get_irn_irg(node);
	ir_node  *frame = get_irg_frame(irg);

78
	x86_addr_t addr = {
79
		.immediate = {
80
			.kind   = X86_IMM_FRAMEENT,
81
82
			.entity = ent,
		},
83
		.variant     = X86_ADDR_BASE,
84
85
		.base_input  = 1,
	};
86
87
	ir_node *in[] = { sp, frame, get_irg_no_mem(irg) };

88
	ir_node *const pop = new_bd_amd64_pop_am(dbgi, block, ARRAY_SIZE(in), in, rsp_reg_mem_reqs, size, addr);
89
90
91
92
93
94
95
	sched_add_before(schedpoint, pop);

	return pop;
}

static ir_node* create_spproj(ir_node *pred, int pos)
{
96
	return be_new_Proj_reg(pred, pos, &amd64_registers[REG_RSP]);
97
98
99
100
101
102
103
104
105
106
}

/**
 * Transform MemPerm, currently we do this the ugly way and produce
 * push/pop into/from memory cascades. This is possible without using
 * any registers.
 */
static void transform_MemPerm(ir_node *node)
{
	ir_graph *irg   = get_irn_irg(node);
Christoph Mallon's avatar
Christoph Mallon committed
107
	ir_node  *sp    = be_get_Start_proj(irg, &amd64_registers[REG_RSP]);
108
109
110
111
	int       arity = be_get_MemPerm_entity_arity(node);
	ir_node **pops  = ALLOCAN(ir_node*, arity);

	/* create Pushs */
112
	for (int i = 0; i < arity; ++i) {
113
114
		ir_entity *inent = be_get_MemPerm_in_entity(node, i);
		ir_entity *outent = be_get_MemPerm_out_entity(node, i);
115
116
117
118
		assert(inent->kind == IR_ENTITY_SPILLSLOT);
		assert(outent->kind == IR_ENTITY_SPILLSLOT);
		unsigned entsize = inent->attr.spillslot.size;
		unsigned entsize2 = outent->attr.spillslot.size;
119
		ir_node *mem = get_irn_n(node, i);
120
121
122
123
124

		/* work around cases where entities have different sizes */
		if (entsize2 < entsize)
			entsize = entsize2;

125
126
		int offset = 0;
		do {
127
			x86_insn_size_t size;
128
			if (entsize%2 == 1) {
129
				size = X86_SIZE_8;
130
			} else if (entsize % 4 == 2) {
131
				size = X86_SIZE_16;
132
			} else if (entsize % 8 == 4) {
133
				size = X86_SIZE_32;
134
135
			} else {
				assert(entsize%8 == 0);
136
				size = X86_SIZE_64;
137
138
			}

139
			ir_node *push = create_push(node, node, sp, mem, inent, size);
140
			sp = create_spproj(push, pn_amd64_push_am_stack);
141
142
			get_amd64_addr_attr(push)->addr.immediate.offset = offset;

143
			unsigned bytes = x86_bytes_from_size(size);
144
145
			offset  += bytes;
			entsize -= bytes;
146
		} while(entsize > 0);
147
148
149
150
		set_irn_n(node, i, new_r_Bad(irg, mode_X));
	}

	/* create pops */
151
	for (int i = arity; i-- > 0; ) {
152
153
		ir_entity *inent = be_get_MemPerm_in_entity(node, i);
		ir_entity *outent = be_get_MemPerm_out_entity(node, i);
154
155
156
157
		assert(inent->kind == IR_ENTITY_SPILLSLOT);
		assert(outent->kind == IR_ENTITY_SPILLSLOT);
		unsigned entsize = outent->attr.spillslot.size;
		unsigned entsize2 = inent->attr.spillslot.size;
158
159
160
161
162

		/* work around cases where entities have different sizes */
		if (entsize2 < entsize)
			entsize = entsize2;

163
164
165
		int      offset = entsize;
		ir_node *pop;
		do {
166
			x86_insn_size_t size;
167
			if (entsize%2 == 1) {
168
				size = X86_SIZE_8;
169
			} else if (entsize % 4 == 2) {
170
				size = X86_SIZE_16;
171
			} else if (entsize % 8 == 4) {
172
				size = X86_SIZE_32;
173
174
			} else {
				assert(entsize%8 == 0);
175
				size = X86_SIZE_64;
176
177
			}

178
			pop = create_pop(node, node, sp, outent, size);
179
			sp  = create_spproj(pop, pn_amd64_pop_am_stack);
180

181
			unsigned bytes = x86_bytes_from_size(size);
182
183
			offset  -= bytes;
			entsize -= bytes;
184
185
			get_amd64_addr_attr(pop)->addr.immediate.offset = offset;
		} while(entsize > 0);
186
187
188
		pops[i] = pop;
	}

189
	ir_node *const keep = be_new_Keep_one(sp);
190
191
192
193
194
	sched_replace(node, keep);

	/* exchange memprojs */
	foreach_out_edge_safe(node, edge) {
		ir_node *proj = get_edge_src_irn(edge);
195
		int p = get_Proj_num(proj);
196
197
198
199

		assert(p < arity);

		set_Proj_pred(proj, pops[p]);
200
		set_Proj_num(proj, pn_amd64_pop_am_M);
201
202
203
204
205
206
	}

	/* remove memperm */
	kill_node(node);
}

207
208
209
210
static void amd64_after_ra_walker(ir_node *block, void *data)
{
	(void) data;

211
	sched_foreach_reverse_safe(block, node) {
Matthias Braun's avatar
Matthias Braun committed
212
		if (be_is_MemPerm(node)) {
213
			transform_MemPerm(node);
214
215
216
217
		}
	}
}

218
219
/**
 * rewrite unsigned long -> float/double conversion
220
221
222
223
224
225
226
227
228
229
230
231
 * x86_64 only has a signed conversion so we do some crazy sse construction
 * instead (first seen this pattern in llvm): We split the 64bit value into
 * 2 32bit vals and place them into the mantissa parts of apropriately choosen
 * float values and later add the 2 floats together. In pseudo code:
 *
 * a = (vector unsigned long, unsigned long) x;
 * b = (vector unsigned, unsigned, unsigned, unsigned)
 *       upper half of 0x1p+52, upper half of 0x1p+84, 0, 0
 * c = repack (a[0], b[0], a[1], b[1])
 * d = (vector double) 0x1p+52, 0x1p+84
 * e = c - d
 * f = e[0] + e[1]
232
233
234
 */
static void rewrite_unsigned_float_Conv(ir_node *node)
{
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
	ir_graph  *irg    = get_irn_irg(node);
	dbg_info  *dbgi   = get_irn_dbg_info(node);
	ir_node   *block  = get_nodes_block(node);
	ir_node   *in     = get_Conv_op(node);
	ir_node   *in_xmm = new_r_Conv(block, in, amd64_mode_xmm);
	ir_tarval *magic0
		= new_integer_tarval_from_str("4530000043300000", 16, 0, 16,
		                              amd64_mode_xmm);
	ir_node   *const0 = new_r_Const(irg, magic0);
	collect_new_start_block_node(const0);
	ir_node   *punpck = new_bd_amd64_l_punpckldq(dbgi, block, in_xmm, const0);
	ir_tarval *magic1
		= new_integer_tarval_from_str("45300000000000004330000000000000", 32,
		                              0, 16, amd64_mode_xmm);
	ir_node   *const1 = new_r_Const(irg, magic1);
	collect_new_start_block_node(const1);
	ir_node   *subpd  = new_bd_amd64_l_subpd(dbgi, block, punpck, const1);
	ir_node   *haddpd = new_bd_amd64_l_haddpd(dbgi, block, subpd, subpd);
	ir_mode   *mode   = get_irn_mode(node);
	ir_node   *conv   = new_r_Conv(block, haddpd, mode);
	exchange(node, conv);
256
257
}

258
259
260
261
262
/* Creates a 64-bit constant with only the sign bit set,
 * i.e. returns 0x8000000000000000
 */
static ir_node *create_sign_bit_const(ir_graph *irg)
{
263
	ir_tarval *sign_tv = create_sign_tv(mode_Ls);
264
265
266
267
	return new_r_Const(irg, sign_tv);
}

/* rewrite float/double -> unsigned long conversion
268
269
270
271
272
273
274
275
 * x86_64 only has a signed conversion so we rewrite to the following:
 *
 * if (x >= 9223372036854775808.) {
 *   converted ^= (int)(x-9223372036854775808.) ^ 0x8000000000000000;
 * } else {
 *   converted = (int)x;
 * }
 * return (unsigned)converted;
276
277
278
279
280
281
282
283
284
285
 */
static void rewrite_float_unsigned_Conv(ir_node *node)
{
	ir_graph *irg        = get_irn_irg(node);
	dbg_info *dbgi       = get_irn_dbg_info(node);
	ir_node *lower_block = get_nodes_block(node);
	ir_mode *dest_mode   = get_irn_mode(node);

	part_block(node);

286
287
288
289
290
291
	ir_node   *block    = get_nodes_block(node);
	ir_node   *fp_x     = get_Conv_op(node);
	ir_mode   *src_mode = get_irn_mode(fp_x);
	double     d_const  = 9223372036854775808.;
	ir_tarval *tv       = new_tarval_from_double(d_const, src_mode);
	ir_node   *fp_const = new_r_Const(irg, tv);
292
	collect_new_start_block_node(fp_const);
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308

	/* Test if the sign bit is needed */
	ir_node *cmp         = new_rd_Cmp(dbgi, block, fp_x, fp_const,
	                                 ir_relation_greater_equal);
	ir_node *cond        = new_rd_Cond(dbgi, block, cmp);
	ir_node *proj_true   = new_r_Proj(cond, mode_X, pn_Cond_true);
	ir_node *proj_false  = new_r_Proj(cond, mode_X, pn_Cond_false);
	ir_node *in_true[1]  = { proj_true };
	ir_node *in_false[1] = { proj_false };

	/* true block: Do some arithmetic to use the signed conversion */
	ir_node *true_block  = new_r_Block(irg, ARRAY_SIZE(in_true), in_true);
	ir_node *true_jmp    = new_r_Jmp(true_block);
	ir_node *sub         = new_r_Sub(true_block, fp_const, fp_x, src_mode);
	ir_node *sub_conv    = new_rd_Conv(dbgi, true_block, sub, mode_Ls);
	ir_node *sign_bit    = create_sign_bit_const(irg);
309
	collect_new_start_block_node(sign_bit);
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
	ir_node *xor         = new_r_Eor(true_block, sub_conv, sign_bit, mode_Ls);
	ir_node *true_res    = new_rd_Conv(dbgi, true_block, xor, dest_mode);

	/* false block: Simply convert */
	ir_node *false_block  = new_r_Block(irg, ARRAY_SIZE(in_false), in_false);
	ir_node *false_jmp    = new_r_Jmp(false_block);
	ir_node *false_signed = new_rd_Conv(dbgi, false_block, fp_x, mode_Ls);
	ir_node *false_res    = new_rd_Conv(dbgi, false_block, false_signed,
	                                    dest_mode);

	/* lower block */
	ir_node *lower_in[2] = { true_jmp, false_jmp };
	ir_node *phi_in[2]   = { true_res, false_res };

	set_irn_in(lower_block, ARRAY_SIZE(lower_in), lower_in);
	ir_node *phi = new_r_Phi(lower_block, ARRAY_SIZE(phi_in), phi_in,
	                         dest_mode);
327
	collect_new_phi_node(phi);
328
329
330
	exchange(node, phi);
}

331
332
333
334
335
336
337
338
339
static bool amd64_rewrite_Conv(ir_node *node)
{
	ir_mode *to_mode    = get_irn_mode(node);
	ir_node *op         = get_Conv_op(node);
	ir_mode *from_mode  = get_irn_mode(op);
	bool     to_float   = mode_is_float(to_mode);
	bool     from_float = mode_is_float(from_mode);

	if (to_float && !from_float && !mode_is_signed(from_mode)
340
341
	    && get_mode_size_bits(from_mode) == 64
	    && to_mode != x86_mode_E) {
342
343
344
		rewrite_unsigned_float_Conv(node);
		return true;
	} else if (from_float && !to_float && !mode_is_signed(to_mode)
345
346
	           && get_mode_size_bits(to_mode) == 64
	           && from_mode != x86_mode_E) {
347
		rewrite_float_unsigned_Conv(node);
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
		return true;
	}

	return false;
}

static void amd64_intrinsics_walker(ir_node *node, void *data)
{
	bool *changed = (bool*)data;
	if (is_Conv(node)) {
		if (amd64_rewrite_Conv(node))
			*changed = true;
	}
}

static void amd64_handle_intrinsics(ir_graph *irg)
{
	ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
366
	collect_phiprojs_and_start_block_nodes(irg);
367
368
369
370
371
372
373
374
375
376
377
378
379
	bool changed = false;
	irg_walk_graph(irg, amd64_intrinsics_walker, NULL, &changed);
	ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);

	if (changed) {
		confirm_irg_properties(irg,
		        IR_GRAPH_PROPERTY_NO_BADS
		        | IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES
		        | IR_GRAPH_PROPERTY_MANY_RETURNS
		        | IR_GRAPH_PROPERTY_ONE_RETURN);
	}
}

380
381
static void amd64_set_frame_entity(ir_node *node, ir_entity *entity,
                                   const ir_type *type)
382
{
383
	(void)type;
Matthias Braun's avatar
Matthias Braun committed
384
385
	amd64_addr_attr_t *attr = get_amd64_addr_attr(node);
	attr->addr.immediate.entity = entity;
386
387
}

388
static ir_type *get_type_for_insn_size(x86_insn_size_t const size)
389
390
{
	/* TODO: do not hardcode node names here */
391
	switch (size) {
392
393
394
	case X86_SIZE_128: return get_type_for_mode(amd64_mode_xmm);
	case X86_SIZE_80:  return x86_type_E;
	default:           return get_type_for_mode(mode_Lu);
395
396
397
	}
}

398
399
400
401
402
/**
 * Collects nodes that need frame entities assigned.
 */
static void amd64_collect_frame_entity_nodes(ir_node *node, void *data)
{
403
404
	if (!is_amd64_irn(node))
		return;
405
406
407
408

	/* Disable coalescing for "returns twice" calls: In case of setjmp/longjmp
	 * our control flow graph isn't completely correct: There are no backedges
	 * from longjmp to the setjmp => coalescing would produce wrong results. */
409
	be_fec_env_t *const env = (be_fec_env_t*)data;
410
411
412
413
414
415
416
417
418
	if (is_amd64_call(node)) {
		const amd64_call_addr_attr_t    *attrs = get_amd64_call_addr_attr_const(node);
		const ir_type                   *type  = attrs->call_tp;
		const mtp_additional_properties  mtp
			= get_method_additional_properties(type);
		if (mtp & mtp_property_returns_twice)
			be_forbid_coalescing(env);
	}

419
	/* we are only interested to report Load nodes */
420
	if (!amd64_loads(node))
Matthias Braun's avatar
Matthias Braun committed
421
422
		return;

Matthias Braun's avatar
Matthias Braun committed
423
	const amd64_addr_attr_t *attr = get_amd64_addr_attr_const(node);
424
	x86_imm32_t       const *imm  = &attr->addr.immediate;
425
	if (imm->kind == X86_IMM_FRAMEENT && imm->entity == NULL) {
426
		const ir_type *type = get_type_for_insn_size(attr->base.size);
427
		be_load_needs_frame_entity(env, node, type);
428
429
430
	}
}

431
432
static int determine_rbp_input(ir_node *ret)
{
433
	arch_register_t const *const bp = &amd64_registers[REG_RBP];
434
435
436
437
	foreach_irn_in(ret, i, input) {
		if (arch_get_irn_register(input) == bp)
			return i;
	}
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
438
	panic("no rbp input found at %+F", ret);
439
440
}

441
442
443
444
445
/**
 * prepare graph and perform code selection.
 */
static void amd64_select_instructions(ir_graph *irg)
{
446
447
	amd64_adjust_pic(irg);

448
449
450
451
452
	be_timer_push(T_CODEGEN);
	amd64_transform_graph(irg);
	be_timer_pop(T_CODEGEN);

	be_dump(DUMP_BE, irg, "code-selection");
453
454
455
456

	optimize_graph_df(irg);

	be_dump(DUMP_BE, irg, "opt");
457
458
}

459
static void introduce_epilogue(ir_node *ret, bool omit_fp)
460
{
461
462
463
464
	ir_graph *irg      = get_irn_irg(ret);
	ir_node  *block    = get_nodes_block(ret);
	ir_node  *first_sp = get_irn_n(ret, n_amd64_ret_stack);
	ir_node  *curr_sp  = first_sp;
465

466
	if (!omit_fp) {
467
468
469
470
471
		int      const n_rbp    = determine_rbp_input(ret);
		ir_node       *curr_bp  = get_irn_n(ret, n_rbp);
		ir_node       *curr_mem = get_irn_n(ret, n_amd64_ret_mem);
		ir_node *const leave    = new_bd_amd64_leave(NULL, block, curr_bp, curr_mem);
		curr_mem = be_new_Proj(leave, pn_amd64_leave_M);
472
473
		curr_bp = be_new_Proj_reg(leave, pn_amd64_leave_frame, &amd64_registers[REG_RBP]);
		curr_sp = be_new_Proj_reg(leave, pn_amd64_leave_stack, &amd64_registers[REG_RSP]);
474
475
		sched_add_before(ret, leave);

476
477
		set_irn_n(ret, n_amd64_ret_mem, curr_mem);
		set_irn_n(ret, n_rbp,           curr_bp);
478
	} else {
479
480
481
482
483
484
		ir_type *frame_type = get_irg_frame_type(irg);
		unsigned frame_size = get_type_size(frame_type);
		ir_node *incsp = amd64_new_IncSP(block, curr_sp, -(int)frame_size,
										 true);
		sched_add_before(ret, incsp);
		curr_sp = incsp;
485
	}
486
	set_irn_n(ret, n_amd64_ret_stack, curr_sp);
487
488
489
490
491

	/* keep verifier happy... */
	if (get_irn_n_edges(first_sp) == 0 && is_Proj(first_sp)) {
		kill_node(first_sp);
	}
492
493
}

494
static void introduce_prologue(ir_graph *const irg, bool omit_fp)
495
496
{
	const arch_register_t *sp         = &amd64_registers[REG_RSP];
497
	const arch_register_t *bp         = &amd64_registers[REG_RBP];
498
499
500
	ir_node               *start      = get_irg_start(irg);
	ir_node               *block      = get_nodes_block(start);
	ir_type               *frame_type = get_irg_frame_type(irg);
501
	unsigned               frame_size = get_type_size(frame_type);
Christoph Mallon's avatar
Christoph Mallon committed
502
	ir_node               *initial_sp = be_get_Start_proj(irg, sp);
503

504
	if (!omit_fp) {
505
		/* push rbp */
506
		ir_node *const mem        = get_irg_initial_mem(irg);
Christoph Mallon's avatar
Christoph Mallon committed
507
		ir_node *const initial_bp = be_get_Start_proj(irg, bp);
508
		ir_node *const push       = new_bd_amd64_push_reg(NULL, block, initial_sp, mem, initial_bp, X86_SIZE_64);
509
510
511
		sched_add_after(start, push);
		ir_node *const curr_mem   = be_new_Proj(push, pn_amd64_push_reg_M);
		edges_reroute_except(mem, curr_mem, push);
512
		ir_node *const curr_sp    = be_new_Proj_reg(push, pn_amd64_push_reg_stack, sp);
513
514
515
516

		/* move rsp to rbp */
		ir_node *const curr_bp = be_new_Copy(block, curr_sp);
		sched_add_after(push, curr_bp);
517
		arch_copy_irn_out_info(curr_bp, 0, initial_bp);
518
		edges_reroute_except(initial_bp, curr_bp, push);
519

520
		ir_node *incsp = amd64_new_IncSP(block, curr_sp, frame_size, false);
521
		sched_add_after(curr_bp, incsp);
522
		edges_reroute_except(initial_sp, incsp, push);
523
524

		/* make sure the initial IncSP is really used by someone */
525
		be_keep_if_unused(incsp);
526
	} else {
527
528
529
530
		ir_node *const incsp = amd64_new_IncSP(block, initial_sp,
											   frame_size, false);
		sched_add_after(start, incsp);
		edges_reroute_except(initial_sp, incsp, incsp);
531
	}
532
}
533

534
static void introduce_prologue_epilogue(ir_graph *irg, bool omit_fp)
535
{
536
537
	/* introduce epilogue for every return node */
	foreach_irn_in(get_irg_end_block(irg), i, ret) {
538
		assert(is_amd64_ret(ret));
539
		introduce_epilogue(ret, omit_fp);
540
	}
541

542
	introduce_prologue(irg, omit_fp);
543
544
}

545
static bool node_has_sp_base(ir_node const *const node,
546
                             x86_addr_t const *const addr)
547
548
549
550
551
552
553
554
555
556
557
558
559
560
{
	if (!x86_addr_variant_has_base(addr->variant))
		return false;
	arch_register_t const *const base_reg
		= arch_get_irn_register_in(node, addr->base_input);
	return base_reg == &amd64_registers[REG_RSP];
}

static void amd64_determine_frameoffset(ir_node *node, int sp_offset)
{
	if (!is_amd64_irn(node)
	 || !amd64_has_addr_attr(get_amd64_attr_const(node)->op_mode))
		return;

561
	x86_addr_t *const addr = &get_amd64_addr_attr(node)->addr;
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
	if (addr->immediate.kind == X86_IMM_FRAMEENT) {
		addr->immediate.offset += get_entity_offset(addr->immediate.entity);
		addr->immediate.entity  = NULL;
		addr->immediate.kind    = X86_IMM_FRAMEOFFSET;
	}

	if (addr->immediate.kind == X86_IMM_FRAMEOFFSET) {
		if (node_has_sp_base(node, addr))
			addr->immediate.offset += sp_offset;
		else {
			/* we calculate offsets relative to the SP value at function begin,
			 * but RBP points after the saved old frame pointer */
			addr->immediate.offset += AMD64_REGISTER_SIZE;
		}
		addr->immediate.kind = X86_IMM_VALUE;
	}
}

static void amd64_sp_sim(ir_node *const node, stack_pointer_state_t *state)
{
	/* Pop nodes modify the stack pointer before calculating destination
	 * address, so do this first */
	if (is_amd64_pop_am(node)) {
		const amd64_addr_attr_t *attr = get_amd64_addr_attr_const(node);
586
		state->offset -= x86_bytes_from_size(attr->base.size);
587
588
589
590
591
592
	}

	amd64_determine_frameoffset(node, state->offset);

	if (is_amd64_push_am(node)) {
		const amd64_addr_attr_t *attr = get_amd64_addr_attr_const(node);
593
		state->offset       += x86_bytes_from_size(attr->base.size);
594
595
596
597
598
599
600
601
602
603
604
	} else if (is_amd64_push_reg(node)) {
		/* 64-bit register size */
		state->offset       += AMD64_REGISTER_SIZE;
	} else if (is_amd64_leave(node)) {
		state->offset        = 0;
		state->align_padding = 0;
	} else if (is_amd64_sub_sp(node)) {
		state->align_padding = 0;
	}
}

605
606
607
/**
 * Called immediatly before emit phase.
 */
608
static void amd64_finish_and_emit(ir_graph *irg)
609
{
610
	bool omit_fp = amd64_get_irg_data(irg)->omit_fp;
611
612

	/* create and coalesce frame entities */
613
	be_fec_env_t *fec_env = be_new_frame_entity_coalescer(irg);
614
	irg_walk_graph(irg, NULL, amd64_collect_frame_entity_nodes, fec_env);
615
	be_assign_entities(fec_env, amd64_set_frame_entity, omit_fp);
616
	be_free_frame_entity_coalescer(fec_env);
617

618
619
620
621
622
623
	ir_type *const frame = get_irg_frame_type(irg);
	be_sort_frame_entities(frame, omit_fp);
	unsigned const misalign = AMD64_REGISTER_SIZE; /* return address on stack */
	int      const begin    = omit_fp ? 0 : -AMD64_REGISTER_SIZE;
	be_layout_frame_type(frame, begin, misalign);

624
	irg_block_walk_graph(irg, NULL, amd64_after_ra_walker, NULL);
625

626
	introduce_prologue_epilogue(irg, omit_fp);
627

628
	/* fix stack entity offsets */
629
	be_fix_stack_nodes(irg, &amd64_registers[REG_RSP]);
630
	be_birg_from_irg(irg)->non_ssa_regs = NULL;
631
632
	unsigned const p2align = AMD64_PO2_STACK_ALIGNMENT;
	be_sim_stack_pointer(irg, misalign, p2align, amd64_sp_sim);
633
634
635

	/* Fix 2-address code constraints. */
	amd64_finish_irg(irg);
636

Matthias Braun's avatar
Matthias Braun committed
637
638
	amd64_simulate_graph_x87(irg);

Christoph Mallon's avatar
Christoph Mallon committed
639
640
	amd64_peephole_optimization(irg);

641
	/* emit code */
642
	be_timer_push(T_EMIT);
643
	amd64_emit_function(irg);
644
	be_timer_pop(T_EMIT);
645
}
646

Matthias Braun's avatar
Matthias Braun committed
647
648
649
650
651
static void amd64_finish(void)
{
	amd64_free_opcodes();
}

652
653
654
655
656
657
658
static const regalloc_if_t amd64_regalloc_if = {
	.spill_cost  = 7,
	.reload_cost = 5,
	.new_spill   = amd64_new_spill,
	.new_reload  = amd64_new_reload,
};

659
static void amd64_generate_code(FILE *output, const char *cup_name)
660
{
661
	amd64_constants = pmap_create();
662
	be_begin(output, cup_name);
663
	unsigned *const sp_is_non_ssa = rbitset_alloca(N_AMD64_REGISTERS);
664
	rbitset_set(sp_is_non_ssa, REG_RSP);
665

666
667
668
	foreach_irp_irg(i, irg) {
		if (!be_step_first(irg))
			continue;
669

670
671
672
		struct obstack *obst = be_get_be_obst(irg);
		be_birg_from_irg(irg)->isa_link = OALLOCZ(obst, amd64_irg_data_t);

673
		be_birg_from_irg(irg)->non_ssa_regs = sp_is_non_ssa;
674
		amd64_select_instructions(irg);
675

676
677
678
679
680
681
682
		be_step_schedule(irg);

		be_timer_push(T_RA_PREPARATION);
		be_sched_fix_flags(irg, &amd64_reg_classes[CLASS_amd64_flags], NULL,
						   NULL, NULL);
		be_timer_pop(T_RA_PREPARATION);

683
		be_step_regalloc(irg, &amd64_regalloc_if);
684
685
686
687
688
689
690
691

		amd64_finish_and_emit(irg);

		be_step_last(irg);
	}

	be_finish();
	pmap_destroy(amd64_constants);
Matthias Braun's avatar
Matthias Braun committed
692
693
}

694
695
static void amd64_lower_for_target(void)
{
696
	/* lower compound param handling */
697
	lower_calls_with_compounds(LF_RETURN_HIDDEN, NULL);
698
	be_after_irp_transform("lower-calls");
699

700
	foreach_irp_irg(i, irg) {
701
		lower_switch(irg, 4, 256, mode_Iu);
702
		be_after_transform(irg, "lower-switch");
703
704
	}

Tobias Rapp's avatar
Tobias Rapp committed
705
706
707
708
	foreach_irp_irg(i, irg) {
		/* lower for mode_b stuff */
		ir_lower_mode_b(irg, mode_Lu);
		be_after_transform(irg, "lower-modeb");
Matthias Braun's avatar
Matthias Braun committed
709
710
		lower_alloc(irg, AMD64_PO2_STACK_ALIGNMENT);
		be_after_transform(irg, "lower-alloc");
Tobias Rapp's avatar
Tobias Rapp committed
711
712
	}

713
	foreach_irp_irg(i, irg) {
714
715
716
717
		/* Turn all small CopyBs into loads/stores, and turn all bigger
		 * CopyBs into memcpy calls, because we cannot handle CopyB nodes
		 * during code generation yet.
		 * TODO:  Adapt this once custom CopyB handling is implemented. */
718
		lower_CopyB(irg, 64, 65, true);
719
		be_after_transform(irg, "lower-copyb");
720
	}
721

722
	ir_builtin_kind supported[6];
723
	size_t  s = 0;
724
725
726
	supported[s++] = ir_bk_ffs;
	supported[s++] = ir_bk_clz;
	supported[s++] = ir_bk_ctz;
727
	supported[s++] = ir_bk_compare_swap;
728
	supported[s++] = ir_bk_saturating_increment;
729
	supported[s++] = ir_bk_va_start;
730
731
732

	assert(s <= ARRAY_SIZE(supported));
	lower_builtins(s, supported);
733
	be_after_irp_transform("lower-builtins");
734
735
}

736
737
738
static int amd64_is_mux_allowed(ir_node *sel, ir_node *mux_false,
                                ir_node *mux_true)
{
Matthias Braun's avatar
Matthias Braun committed
739
740
741
	/* optimizable by middleend */
	if (ir_is_optimizable_mux(sel, mux_false, mux_true))
		return true;
742
743
744
	return false;
}

745
static const ir_settings_arch_dep_t amd64_arch_dep = {
746
747
748
749
750
751
752
	.also_use_subs        = true,
	.maximum_shifts       = 4,
	.highest_shift_amount = 63,
	.evaluate             = NULL,
	.allow_mulhs          = true,
	.allow_mulhu          = true,
	.max_bits_for_mulh    = 32,
753
};
754
755

static backend_params amd64_backend_params = {
756
	.experimental                  = "the amd64 backend is highly experimental and unfinished (consider the ia32 backend)",
757
	.byte_order_big_endian         = false,
758
	.pic_supported                 = true,
759
760
761
762
763
764
765
766
767
768
	.unaligned_memaccess_supported = true,
	.modulo_shift                  = 32,
	.dep_param                     = &amd64_arch_dep,
	.allow_ifconv                  = amd64_is_mux_allowed,
	.machine_size                  = 64,
	.mode_float_arithmetic         = NULL,  /* will be set later */
	.type_long_long                = NULL,  /* will be set later */
	.type_unsigned_long_long       = NULL,  /* will be set later */
	.type_long_double              = NULL,  /* will be set later */
	.stack_param_align             = 8,
769
770
	.float_int_overflow            = ir_overflow_indefinite,
	.vararg                        = {
771
772
		.va_list_type = NULL,  /* Will be set later */
		.lower_va_arg = amd64_lower_va_arg,
773
	},
774
775
};

776
static const backend_params *amd64_get_backend_params(void) {
777
	return &amd64_backend_params;
778
779
780
781
}

static int amd64_is_valid_clobber(const char *clobber)
{
782
	return x86_parse_clobber(amd64_additional_clobber_names, clobber) != NULL;
783
784
}

785
786
static void amd64_init_types(void)
{
787
788
789
	ir_mode *const ptr_mode = new_reference_mode("p64", irma_twos_complement, 64, 64);
	set_modeP(ptr_mode);

790
791
792
793
	/* use an int128 mode for xmm registers for now, so that firm allows us to
	 * create constants with the xmm mode... */
	amd64_mode_xmm = new_int_mode("x86_xmm", irma_twos_complement, 128, 0, 0);

Matthias Braun's avatar
Matthias Braun committed
794
	x86_init_x87_type();
795
796
	amd64_backend_params.type_long_double = x86_type_E;

Manuel Mohr's avatar
Manuel Mohr committed
797
	amd64_backend_params.vararg.va_list_type = amd64_build_va_list_type();
798
799
800
801
802
803
}

static void amd64_init(void)
{
	amd64_init_types();
	amd64_register_init();
804
	amd64_create_opcodes();
805
	amd64_cconv_init();
806
	x86_set_be_asm_constraint_support(&amd64_asm_constraints);
807
808
}

809
810
811
812
813
814
static unsigned amd64_get_op_estimated_cost(const ir_node *node)
{
	(void)node;/* TODO */
	return 1;
}

815
static arch_isa_if_t const amd64_isa_if = {
816
817
818
819
820
821
822
823
824
825
826
827
	.n_registers           = N_AMD64_REGISTERS,
	.registers             = amd64_registers,
	.n_register_classes    = N_AMD64_CLASSES,
	.register_classes      = amd64_reg_classes,
	.init                  = amd64_init,
	.finish                = amd64_finish,
	.get_params            = amd64_get_backend_params,
	.generate_code         = amd64_generate_code,
	.lower_for_target      = amd64_lower_for_target,
	.is_valid_clobber      = amd64_is_valid_clobber,
	.handle_intrinsics     = amd64_handle_intrinsics,
	.get_op_estimated_cost = amd64_get_op_estimated_cost,
828
829
};

Matthias Braun's avatar
Matthias Braun committed
830
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_amd64)
831
832
833
834
void be_init_arch_amd64(void)
{
	be_register_isa_if("amd64", &amd64_isa_if);
	FIRM_DBG_REGISTER(dbg, "firm.be.amd64.cg");
835

836
	static const lc_opt_table_entry_t options[] = {
837
838
		LC_OPT_ENT_BOOL("x64abi",      "Use x64 ABI (otherwise system V)", &amd64_use_x64_abi),
		LC_OPT_ENT_BOOL("no-red-zone", "gcc compatibility",                &amd64_use_red_zone),
839
840
		LC_OPT_LAST
	};
841
842
843
	lc_opt_entry_t *be_grp    = lc_opt_get_grp(firm_opt_get_root(), "be");
	lc_opt_entry_t *amd64_grp = lc_opt_get_grp(be_grp, "amd64");
	lc_opt_add_table(amd64_grp, options);
844

845
846
	amd64_init_transform();
}