amd64_emitter.c 17.4 KB
Newer Older
1
2
/*
 * This file is part of libFirm.
3
 * Copyright (C) 2012 University of Karlsruhe.
4
5
6
7
8
9
10
11
 */

/**
 * @file
 * @brief   emit assembler for a backend graph
 */
#include <limits.h>

12
#include "be_t.h"
13
#include "error.h"
14
15
16
17
18
19
20
21
22
#include "xmalloc.h"
#include "tv.h"
#include "iredges.h"
#include "debug.h"
#include "irgwalk.h"
#include "irop_t.h"
#include "irargs_t.h"
#include "irprog.h"

23
24
25
#include "besched.h"
#include "begnuas.h"
#include "beblocksched.h"
26
27
28

#include "amd64_emitter.h"
#include "gen_amd64_emitter.h"
29
#include "gen_amd64_regalloc_if.h"
30
31
32
#include "amd64_nodes_attr.h"
#include "amd64_new_nodes.h"

33
#include "benode.h"
34

35
36
37
38
39
40
41
42
43
44
45
/*************************************************************
 *             _       _    __   _          _
 *            (_)     | |  / _| | |        | |
 *  _ __  _ __ _ _ __ | |_| |_  | |__   ___| |_ __   ___ _ __
 * | '_ \| '__| | '_ \| __|  _| | '_ \ / _ \ | '_ \ / _ \ '__|
 * | |_) | |  | | | | | |_| |   | | | |  __/ | |_) |  __/ |
 * | .__/|_|  |_|_| |_|\__|_|   |_| |_|\___|_| .__/ \___|_|
 * | |                                       | |
 * |_|                                       |_|
 *************************************************************/

Christoph Mallon's avatar
Christoph Mallon committed
46
47
48
49
/**
 * Returns the target block for a control flow node.
 */
static ir_node *get_cfop_target_block(const ir_node *irn)
50
{
Christoph Mallon's avatar
Christoph Mallon committed
51
	return (ir_node*)get_irn_link(irn);
52
53
}

54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
static void amd64_emit_insn_mode_suffix(amd64_insn_mode_t mode)
{
	char c;
	switch (mode) {
	case INSN_MODE_8:  c = 'b'; break;
	case INSN_MODE_16: c = 'w'; break;
	case INSN_MODE_32: c = 'l'; break;
	case INSN_MODE_64: c = 'q'; break;
	default:
		panic("invalid insn mode");
	}
	be_emit_char(c);
}

static void amd64_emit_mode_suffix(const ir_mode *mode)
{
	assert(mode_is_int(mode) || mode_is_reference(mode));
	char c;
	switch (get_mode_size_bits(mode)) {
	case 8:  c = 'b'; break;
	case 16: c = 'w'; break;
	case 32: c = 'l'; break;
	case 64: c = 'q'; break;
	default:
		panic("Can't output mode_suffix for %+F", mode);
	}
	be_emit_char(c);
}

83
static const char *get_8bit_name(const arch_register_t *reg)
84
{
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
	switch (reg->index) {
	case REG_GP_RAX: return "al";
	case REG_GP_RBX: return "bl";
	case REG_GP_RCX: return "cl";
	case REG_GP_RDX: return "dl";
	case REG_GP_RSP: return "spl";
	case REG_GP_RBP: return "bpl";
	case REG_GP_RSI: return "sil";
	case REG_GP_RDI: return "dil";
	case REG_GP_R8:  return "r8b";
	case REG_GP_R9:  return "r9b";
	case REG_GP_R10: return "r10b";
	case REG_GP_R11: return "r11b";
	case REG_GP_R12: return "r12b";
	case REG_GP_R13: return "r13b";
	case REG_GP_R14: return "r14b";
	case REG_GP_R15: return "r15b";
	}
	panic("unexpected register number");
104
105
}

106
static const char *get_16bit_name(const arch_register_t *reg)
107
{
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
	switch (reg->index) {
	case REG_GP_RAX: return "ax";
	case REG_GP_RBX: return "bx";
	case REG_GP_RCX: return "cx";
	case REG_GP_RDX: return "dx";
	case REG_GP_RSP: return "sp";
	case REG_GP_RBP: return "bp";
	case REG_GP_RSI: return "si";
	case REG_GP_RDI: return "di";
	case REG_GP_R8:  return "r8w";
	case REG_GP_R9:  return "r9w";
	case REG_GP_R10: return "r10w";
	case REG_GP_R11: return "r11w";
	case REG_GP_R12: return "r12w";
	case REG_GP_R13: return "r13w";
	case REG_GP_R14: return "r14w";
	case REG_GP_R15: return "r15w";
	}
	panic("unexpected register number");
127
128
}

129
static const char *get_32bit_name(const arch_register_t *reg)
130
{
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
	switch (reg->index) {
	case REG_GP_RAX: return "eax";
	case REG_GP_RBX: return "ebx";
	case REG_GP_RCX: return "ecx";
	case REG_GP_RDX: return "edx";
	case REG_GP_RSP: return "esp";
	case REG_GP_RBP: return "ebp";
	case REG_GP_RSI: return "esi";
	case REG_GP_RDI: return "edi";
	case REG_GP_R8:  return "r8d";
	case REG_GP_R9:  return "r9d";
	case REG_GP_R10: return "r10d";
	case REG_GP_R11: return "r11d";
	case REG_GP_R12: return "r12d";
	case REG_GP_R13: return "r13d";
	case REG_GP_R14: return "r14d";
	case REG_GP_R15: return "r15d";
	}
	panic("unexpected register number");
150
151
152
153
154
155
156
157
158
159
}

static void emit_register(const arch_register_t *reg)
{
	be_emit_char('%');
	be_emit_string(reg->name);
}

static void emit_register_mode(const arch_register_t *reg, const ir_mode *mode)
{
160
	const char *name;
161
	switch (get_mode_size_bits(mode)) {
162
163
164
165
166
167
	case 8:  name = get_8bit_name(reg);  break;
	case 16: name = get_16bit_name(reg); break;
	case 32: name = get_32bit_name(reg); break;
	case 64: name = reg->name;           break;
	default:
		panic("invalid mode");
168
	}
169
170
	be_emit_char('%');
	be_emit_string(name);
171
172
173
174
175
}

static void emit_register_insn_mode(const arch_register_t *reg,
                                    amd64_insn_mode_t mode)
{
176
	const char *name;
177
	switch (mode) {
178
179
180
181
182
183
	case INSN_MODE_8:  name = get_8bit_name(reg);  break;
	case INSN_MODE_16: name = get_16bit_name(reg); break;
	case INSN_MODE_32: name = get_32bit_name(reg); break;
	case INSN_MODE_64: name = reg->name;           break;
	default:
		panic("invalid mode");
184
	}
185
186
	be_emit_char('%');
	be_emit_string(name);
187
188
189
190
191
192
193
}

typedef enum amd64_emit_mod_t {
	EMIT_NONE        = 0,
	EMIT_RESPECT_LS  = 1U << 0,
	EMIT_IGNORE_MODE = 1U << 1,
} amd64_emit_mod_t;
yb9976's avatar
yb9976 committed
194
ENUM_BITSET(amd64_emit_mod_t)
195

196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
static void amd64_emit_immediate(const amd64_imm_t *const imm)
{
	if (imm->symconst != NULL) {
		if (imm->sc_sign)
			be_emit_char('-');
		be_gas_emit_entity(imm->symconst);
	}
	if (imm->symconst == NULL || imm->offset != 0) {
		if (imm->symconst != NULL) {
			be_emit_irprintf("%+ld", imm->offset);
		} else {
			be_emit_irprintf("0x%lX", imm->offset);
		}
	}
}

Christoph Mallon's avatar
Christoph Mallon committed
212
void amd64_emitf(ir_node const *const node, char const *fmt, ...)
213
{
Christoph Mallon's avatar
Christoph Mallon committed
214
215
	va_list ap;
	va_start(ap, fmt);
216

Christoph Mallon's avatar
Christoph Mallon committed
217
218
219
	be_emit_char('\t');
	for (;;) {
		char const *start = fmt;
220

Christoph Mallon's avatar
Christoph Mallon committed
221
222
223
224
225
		while (*fmt != '%' && *fmt != '\n' && *fmt != '\0')
			++fmt;
		if (fmt != start) {
			be_emit_string_len(start, fmt - start);
		}
226

Christoph Mallon's avatar
Christoph Mallon committed
227
228
229
230
231
232
233
		if (*fmt == '\n') {
			be_emit_char('\n');
			be_emit_write_line();
			be_emit_char('\t');
			++fmt;
			continue;
		}
234

Christoph Mallon's avatar
Christoph Mallon committed
235
236
237
238
		if (*fmt == '\0')
			break;

		++fmt;
239
240
241
242
243
244
245
246
247
248
249
		amd64_emit_mod_t mod = EMIT_NONE;
		for (;;) {
			switch (*fmt) {
			case '#': mod |= EMIT_RESPECT_LS;  break;
			case '^': mod |= EMIT_IGNORE_MODE; break;
			default:
				goto end_of_mods;
			}
			++fmt;
		}
end_of_mods:
Christoph Mallon's avatar
Christoph Mallon committed
250
251
252
253
254
255
256
257
258
259

		switch (*fmt++) {
			arch_register_t const *reg;

			case '%':
				be_emit_char('%');
				break;

			case 'C': {
				amd64_attr_t const *const attr = get_amd64_attr_const(node);
260
				amd64_emit_immediate(&attr->imm);
Christoph Mallon's avatar
Christoph Mallon committed
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
				break;
			}

			case 'D':
				if (*fmt < '0' || '9' <= *fmt)
					goto unknown;
				reg = arch_get_irn_register_out(node, *fmt++ - '0');
				goto emit_R;

			case 'E': {
				ir_entity const *const ent = va_arg(ap, ir_entity const*);
				be_gas_emit_entity(ent);
				break;
			}

			case 'L': {
				ir_node *const block = get_cfop_target_block(node);
				be_gas_emit_block_name(block);
				break;
			}

			case 'O': {
				amd64_SymConst_attr_t const *const attr = get_amd64_SymConst_attr_const(node);
				if (attr->fp_offset)
					be_emit_irprintf("%d", attr->fp_offset);
				break;
			}

			case 'R':
				reg = va_arg(ap, arch_register_t const*);
emit_R:
292
293
294
295
296
297
298
299
300
301
				if (mod & EMIT_IGNORE_MODE) {
					emit_register(reg);
				} else {
					amd64_attr_t const *const attr = get_amd64_attr_const(node);
					if (mod & EMIT_RESPECT_LS) {
						emit_register_mode(reg, attr->ls_mode);
					} else {
						emit_register_insn_mode(reg, attr->data.insn_mode);
					}
				}
Christoph Mallon's avatar
Christoph Mallon committed
302
303
304
305
306
307
308
309
310
311
312
313
314
				break;

			case 'S': {
				int pos;
				if ('0' <= *fmt && *fmt <= '9') {
					pos = *fmt++ - '0';
				} else {
					goto unknown;
				}
				reg = arch_get_irn_register_in(node, pos);
				goto emit_R;
			}

315
316
			case 'M': {
				amd64_attr_t const *const attr = get_amd64_attr_const(node);
317
318
319
320
321
				if (mod & EMIT_RESPECT_LS) {
					amd64_emit_mode_suffix(attr->ls_mode);
				} else {
					amd64_emit_insn_mode_suffix(attr->data.insn_mode);
				}
322
323
324
				break;
			}

Christoph Mallon's avatar
Christoph Mallon committed
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
			case 'd': {
				int const num = va_arg(ap, int);
				be_emit_irprintf("%d", num);
				break;
			}

			case 's': {
				char const *const str = va_arg(ap, char const*);
				be_emit_string(str);
				break;
			}

			case 'u': {
				unsigned const num = va_arg(ap, unsigned);
				be_emit_irprintf("%u", num);
				break;
			}

343
344
345
346
347
348
349
350
351
352
353
354
355
			case 'c': {
				amd64_attr_t const *const attr = get_amd64_attr_const(node);
				ir_mode                  *mode = attr->ls_mode;
				if (get_mode_size_bits(mode) == 64)
					break;
				if (get_mode_size_bits(mode) == 32 && !mode_is_signed(mode)
				 && attr->data.insn_mode == INSN_MODE_32)
					break;
				be_emit_char(mode_is_signed(mode) ? 's' : 'z');
				amd64_emit_mode_suffix(mode);
				break;
			}

Christoph Mallon's avatar
Christoph Mallon committed
356
357
358
359
360
			default:
unknown:
				panic("unknown format conversion");
		}
	}
361

Christoph Mallon's avatar
Christoph Mallon committed
362
363
	be_emit_finish_line_gas(node);
	va_end(ap);
364
365
366
367
368
369
370
371
372
373
374
375
}

/***********************************************************************************
 *                  _          __                                             _
 *                 (_)        / _|                                           | |
 *  _ __ ___   __ _ _ _ __   | |_ _ __ __ _ _ __ ___   _____      _____  _ __| | __
 * | '_ ` _ \ / _` | | '_ \  |  _| '__/ _` | '_ ` _ \ / _ \ \ /\ / / _ \| '__| |/ /
 * | | | | | | (_| | | | | | | | | | | (_| | | | | | |  __/\ V  V / (_) | |  |   <
 * |_| |_| |_|\__,_|_|_| |_| |_| |_|  \__,_|_| |_| |_|\___| \_/\_/ \___/|_|  |_|\_\
 *
 ***********************************************************************************/

376
377
378
379
380
/**
 * Returns the next block in a block schedule.
 */
static ir_node *sched_next_block(const ir_node *block)
{
381
    return (ir_node*)get_irn_link(block);
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
}

/**
 * Emit a Jmp.
 */
static void emit_amd64_Jmp(const ir_node *node)
{
	ir_node *block, *next_block;

	/* for now, the code works for scheduled and non-schedules blocks */
	block = get_nodes_block(node);

	/* we have a block schedule */
	next_block = sched_next_block(block);
	if (get_cfop_target_block(node) != next_block) {
Christoph Mallon's avatar
Christoph Mallon committed
397
398
399
		amd64_emitf(node, "jmp %L");
	} else if (be_options.verbose_asm) {
		amd64_emitf(node, "/* fallthrough to %L */");
400
401
402
	}
}

403
404
405
406
407
408
409
410
static void emit_amd64_SwitchJmp(const ir_node *node)
{
	const amd64_switch_jmp_attr_t *attr = get_amd64_switch_jmp_attr_const(node);

	amd64_emitf(node, "jmp *%E(,%S0,8)", attr->table_entity);
	be_emit_jump_table(node, attr->table, attr->table_entity, get_cfop_target_block);
}

411
412
413
414
415
/**
 * Emit a Compare with conditional branch.
 */
static void emit_amd64_Jcc(const ir_node *irn)
{
416
417
418
419
420
421
422
423
424
425
	const ir_node      *proj_true  = NULL;
	const ir_node      *proj_false = NULL;
	const ir_node      *block;
	const ir_node      *next_block;
	const char         *suffix;
	const amd64_attr_t *attr      = get_amd64_attr_const(irn);
	ir_relation         relation  = attr->ext.relation;
	ir_node            *op1       = get_irn_n(irn, 0);
	const amd64_attr_t *cmp_attr  = get_amd64_attr_const(op1);
	bool                is_signed = !cmp_attr->data.cmp_unsigned;
426
427
428
429
430
431
432
433
434
435
436
437
438
439

	assert(is_amd64_Cmp(op1));

	foreach_out_edge(irn, edge) {
		ir_node *proj = get_edge_src_irn(edge);
		long nr = get_Proj_proj(proj);
		if (nr == pn_Cond_true) {
			proj_true = proj;
		} else {
			proj_false = proj;
		}
	}

	if (cmp_attr->data.ins_permuted) {
440
		relation = get_inversed_relation(relation);
441
442
443
444
445
446
447
448
	}

	/* for now, the code works for scheduled and non-schedules blocks */
	block = get_nodes_block(irn);

	/* we have a block schedule */
	next_block = sched_next_block(block);

449
450
	assert(relation != ir_relation_false);
	assert(relation != ir_relation_true);
451
452
453
454
455
456
457

	if (get_cfop_target_block(proj_true) == next_block) {
		/* exchange both proj's so the second one can be omitted */
		const ir_node *t = proj_true;

		proj_true  = proj_false;
		proj_false = t;
458
		relation   = get_negated_relation(relation);
459
460
	}

461
	switch (relation & ir_relation_less_equal_greater) {
462
463
464
465
466
467
468
		case ir_relation_equal:              suffix = "e"; break;
		case ir_relation_less:               suffix = is_signed ? "l"  : "b"; break;
		case ir_relation_less_equal:         suffix = is_signed ? "le" : "be"; break;
		case ir_relation_greater:            suffix = is_signed ? "g"  : "a"; break;
		case ir_relation_greater_equal:      suffix = is_signed ? "ge" : "ae"; break;
		case ir_relation_less_greater:       suffix = "ne"; break;
		case ir_relation_less_equal_greater: suffix = "mp"; break;
469
470
471
472
		default: panic("Cmp has unsupported pnc");
	}

	/* emit the true proj */
Christoph Mallon's avatar
Christoph Mallon committed
473
474
475
476
477
478
	amd64_emitf(proj_true, "j%s %L", suffix);

	if (get_cfop_target_block(proj_false) != next_block) {
		amd64_emitf(proj_false, "jmp %L");
	} else if (be_options.verbose_asm) {
		amd64_emitf(proj_false, "/* fallthrough to %L */");
479
480
481
	}
}

482
483
484
485
486
487
488
489
490
491
492
493
494
static void emit_amd64_LoadZ(const ir_node *node)
{
	const amd64_attr_t *attr = get_amd64_attr_const(node);
	switch (attr->data.insn_mode) {
	case INSN_MODE_8:  amd64_emitf(node, "movzbq %O(%^S0), %^D0"); break;
	case INSN_MODE_16: amd64_emitf(node, "movzwq %O(%^S0), %^D0"); break;
	case INSN_MODE_32:
	case INSN_MODE_64: amd64_emitf(node, "mov%M %O(%^S0), %D0");   break;
	default:
		panic("invalid insn mode");
	}
}

495
496
497
498
499
/**
 * Emits code for a call.
 */
static void emit_be_Call(const ir_node *node)
{
500
501
502
503
504
505
	ir_entity *entity = be_Call_get_entity(node);

	/* %eax/%rax is used in AMD64 to pass the number of vector parameters for
	 * variable argument counts */
	if (get_method_variadicity (be_Call_get_type((ir_node *) node))) {
		/* But this still is a hack... */
Christoph Mallon's avatar
Christoph Mallon committed
506
		amd64_emitf(node, "xor %%rax, %%rax");
507
	}
508
509

	if (entity) {
Christoph Mallon's avatar
Christoph Mallon committed
510
		amd64_emitf(node, "call %E", entity);
511
512
513
514
515
516
517
518
519
520
521
522
523
	} else {
		be_emit_pad_comment();
		be_emit_cstring("/* FIXME: call NULL entity?! */\n");
	}
}

/**
 * emit copy node
 */
static void emit_be_Copy(const ir_node *irn)
{
	ir_mode *mode = get_irn_mode(irn);

524
	if (arch_get_irn_register_in(irn, 0) == arch_get_irn_register_out(irn, 0)) {
525
526
527
528
529
		/* omitted Copy */
		return;
	}

	if (mode_is_float(mode)) {
530
		panic("move not supported for FP");
531
	} else if (mode_is_data(mode)) {
532
		amd64_emitf(irn, "mov %^S0, %^D0");
533
	} else {
534
		panic("move not supported for this mode");
535
536
	}
}
537

538
539
static void emit_be_Perm(const ir_node *node)
{
540
541
	arch_register_t const *const reg0 = arch_get_irn_register_out(node, 0);
	arch_register_t const *const reg1 = arch_get_irn_register_out(node, 1);
542

543
544
	arch_register_class_t const* const cls0 = reg0->reg_class;
	assert(cls0 == reg1->reg_class && "Register class mismatch at Perm");
545

546
	amd64_emitf(node, "xchg %^R, %^R", reg0, reg1);
547
548
549
550
551
552

	if (cls0 != &amd64_reg_classes[CLASS_amd64_gp]) {
		panic("unexpected register class in be_Perm (%+F)", node);
	}
}

553
554
static void emit_amd64_FrameAddr(const ir_node *irn)
{
555
556
	const amd64_SymConst_attr_t *attr =
		(const amd64_SymConst_attr_t*) get_amd64_attr_const(irn);
557

Christoph Mallon's avatar
Christoph Mallon committed
558
559
	amd64_emitf(irn, "mov %S0, %D0");
	amd64_emitf(irn, "add $%u, %D0", attr->fp_offset);
560
561
}

562
563
564
565
566
567
568
569
570
571
572
/**
 * Emits code to increase stack pointer.
 */
static void emit_be_IncSP(const ir_node *node)
{
	int offs = be_get_IncSP_offset(node);

	if (offs == 0)
		return;

	if (offs > 0) {
Matthias Braun's avatar
Matthias Braun committed
573
		amd64_emitf(node, "subq $%d, %^D0", offs);
574
	} else {
Matthias Braun's avatar
Matthias Braun committed
575
		amd64_emitf(node, "addq $%d, %^D0", -offs);
576
577
578
579
580
581
582
583
584
585
586
	}
}

static void emit_be_Start(const ir_node *node)
{
	ir_graph *irg        = get_irn_irg(node);
	ir_type  *frame_type = get_irg_frame_type(irg);
	unsigned  size       = get_type_size_bytes(frame_type);

	if (size > 0) {
		amd64_emitf(node, "subq $%u, %%rsp", size);
587
588
589
	}
}

590
591
592
593
594
/**
 * Emits code for a return.
 */
static void emit_be_Return(const ir_node *node)
{
595
596
597
598
599
600
601
602
	ir_graph *irg        = get_irn_irg(node);
	ir_type  *frame_type = get_irg_frame_type(irg);
	unsigned  size       = get_type_size_bytes(frame_type);

	if (size > 0) {
		amd64_emitf(node, "addq $%u, %%rsp", size);
	}

603
	be_emit_cstring("\tret");
604
605
606
607
608
609
610
611
612
613
	be_emit_finish_line_gas(node);
}

/**
 * Enters the emitter functions for handled nodes into the generic
 * pointer of an opcode.
 */
static void amd64_register_emitters(void)
{
	/* first clear the generic function pointer for all ops */
Matthias Braun's avatar
Matthias Braun committed
614
	ir_clear_opcodes_generic_func();
615
616
617
618

	/* register all emitter functions defined in spec */
	amd64_register_spec_emitters();

619
620
621
	be_set_emitter(op_amd64_FrameAddr,  emit_amd64_FrameAddr);
	be_set_emitter(op_amd64_Jcc,        emit_amd64_Jcc);
	be_set_emitter(op_amd64_Jmp,        emit_amd64_Jmp);
622
	be_set_emitter(op_amd64_LoadZ,      emit_amd64_LoadZ);
623
	be_set_emitter(op_amd64_SwitchJmp,  emit_amd64_SwitchJmp);
624
625
	be_set_emitter(op_be_Call,          emit_be_Call);
	be_set_emitter(op_be_Copy,          emit_be_Copy);
Matthias Braun's avatar
Matthias Braun committed
626
	be_set_emitter(op_be_CopyKeep,      emit_be_Copy);
627
628
629
	be_set_emitter(op_be_IncSP,         emit_be_IncSP);
	be_set_emitter(op_be_Perm,          emit_be_Perm);
	be_set_emitter(op_be_Return,        emit_be_Return);
630
	be_set_emitter(op_be_Start,         emit_be_Start);
631
632
633

	be_set_emitter(op_Phi,      be_emit_nothing);
	be_set_emitter(op_be_Keep,  be_emit_nothing);
634
635
636
637
638
639
640
641
642
643
644
645
646
}

/**
 * Walks over the nodes in a block connected by scheduling edges
 * and emits code for each node.
 */
static void amd64_gen_block(ir_node *block, void *data)
{
	(void) data;

	if (! is_Block(block))
		return;

647
	be_gas_begin_block(block, true);
648
649

	sched_foreach(block, node) {
650
		be_emit_node(node);
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
	}
}


/**
 * Sets labels for control flow nodes (jump target)
 * TODO: Jump optimization
 */
static void amd64_gen_labels(ir_node *block, void *env)
{
	ir_node *pred;
	int n = get_Block_n_cfgpreds(block);
	(void) env;

	for (n--; n >= 0; n--) {
		pred = get_Block_cfgpred(block, n);
		set_irn_link(pred, block);
	}
}

671
void amd64_emit_function(ir_graph *irg)
672
{
673
674
	ir_entity *entity = get_irg_entity(irg);
	ir_node  **blk_sched;
675
	size_t i, n;
676
677
678
679

	/* register all emitter functions */
	amd64_register_emitters();

680
	blk_sched = be_create_block_schedule(irg);
681

682
	be_gas_emit_function_prolog(entity, 4, NULL);
683

684
	irg_block_walk_graph(irg, amd64_gen_labels, NULL, NULL);
685
686

	n = ARR_LEN(blk_sched);
687
	for (i = 0; i < n; i++) {
688
		ir_node *block = blk_sched[i];
689
		ir_node *next  = (i + 1) < n ? blk_sched[i+1] : NULL;
690

691
		set_irn_link(block, next);
692
693
694
695
696
697
698
699
700
	}

	for (i = 0; i < n; ++i) {
		ir_node *block = blk_sched[i];

		amd64_gen_block(block, 0);
	}

	be_gas_emit_function_epilog(entity);
701
}