amd64_transform.c 86.9 KB
Newer Older
1
2
/*
 * This file is part of libFirm.
3
 * Copyright (C) 2012 University of Karlsruhe.
4
5
6
7
8
9
 */

/**
 * @file
 * @brief   code selection (transform FIRM into amd64 FIRM)
 */
Matthias Braun's avatar
Matthias Braun committed
10
#include "debug.h"
Matthias Braun's avatar
Matthias Braun committed
11
#include "panic.h"
Matthias Braun's avatar
Matthias Braun committed
12
13
14
#include "heights.h"
#include "ircons.h"
#include "irgmod.h"
15
16
#include "irgraph_t.h"
#include "irmode_t.h"
Matthias Braun's avatar
Matthias Braun committed
17
#include "irnode_t.h"
18
#include "iropt_t.h"
19
#include "tv_t.h"
Matthias Braun's avatar
Matthias Braun committed
20
#include "util.h"
21

22
23
#include "benode.h"
#include "betranshlp.h"
24
#include "bearch_amd64_t.h"
25
#include "beirg.h"
Matthias Braun's avatar
Matthias Braun committed
26
#include "besched.h"
27

Matthias Braun's avatar
Matthias Braun committed
28
#include "amd64_new_nodes.h"
29
30
#include "amd64_nodes_attr.h"
#include "amd64_transform.h"
Matthias Braun's avatar
Matthias Braun committed
31
#include "../ia32/x86_address_mode.h"
32
#include "../ia32/x86_cconv.h"
33
34
35
36
37

#include "gen_amd64_regalloc_if.h"

DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)

38
static ir_mode         *mode_gp;
Matthias Braun's avatar
Matthias Braun committed
39
static ir_mode         *mode_flags;
40
static x86_cconv_t     *current_cconv = NULL;
41
static be_start_info_t  start_mem;
42
static be_start_info_t  start_val[N_AMD64_REGISTERS];
43
44
45
static size_t           start_params_offset;
static pmap            *node_to_stack;
static be_stackorder_t *stackorder;
46

47
48
49
/** we don't have a concept of aliasing registers, so enumerate them
 * manually for the asm nodes. */
const x86_clobber_name_t amd64_additional_clobber_names[] = {
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
	{ "al", REG_RAX }, { "ah", REG_RAX }, { "ax", REG_RAX }, { "eax", REG_RAX },
	{ "bl", REG_RBX }, { "bh", REG_RBX }, { "bx", REG_RBX }, { "ebx", REG_RBX },
	{ "cl", REG_RCX }, { "ch", REG_RCX }, { "cx", REG_RCX }, { "ecx", REG_RCX },
	{ "dl", REG_RDX }, { "dh", REG_RDX }, { "dx", REG_RDX }, { "edx", REG_RDX },
	{ "sil",  REG_RSI }, { "si",   REG_RSI }, { "esi",  REG_RSI },
	{ "dil",  REG_RDI }, { "di",   REG_RDI }, { "edi",  REG_RDI },
	{ "bpl",  REG_RBP }, { "bp",   REG_RBP }, { "ebp",  REG_RBP },
	{ "spl",  REG_RSP }, { "sp",   REG_RSP }, { "esp",  REG_RSP },
	{ "r8b",  REG_R8  }, { "r8w",  REG_R8  }, { "r8d",  REG_R8  },
	{ "r9b",  REG_R9  }, { "r9w",  REG_R9  }, { "r9d",  REG_R9  },
	{ "r10b", REG_R10 }, { "r10w", REG_R10 }, { "r10d", REG_R10 },
	{ "r11b", REG_R11 }, { "r11w", REG_R11 }, { "r11d", REG_R11 },
	{ "r12b", REG_R12 }, { "r12w", REG_R12 }, { "r12d", REG_R12 },
	{ "r13b", REG_R13 }, { "r13w", REG_R13 }, { "r13d", REG_R13 },
	{ "r14b", REG_R14 }, { "r14w", REG_R14 }, { "r14d", REG_R14 },
	{ "r15b", REG_R15 }, { "r15w", REG_R15 }, { "r15d", REG_R15 },
66
67
68
69
	{ NULL, ~0u }
};

#define GP &amd64_reg_classes[CLASS_amd64_gp]
70
const x86_asm_constraint_list_t amd64_asm_constraints = {
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
	['A'] = { MATCH_REG, GP, 1 << REG_GP_RAX | 1 << REG_GP_RDX },
	['D'] = { MATCH_REG, GP, 1 << REG_GP_RDI },
	['I'] = { MATCH_IMM, GP, 0 },
	['J'] = { MATCH_IMM, GP, 0 },
	['K'] = { MATCH_IMM, GP, 0 },
	['L'] = { MATCH_IMM, GP, 0 },
	['M'] = { MATCH_IMM, GP, 0 },
	['N'] = { MATCH_IMM, GP, 0 },
	['O'] = { MATCH_IMM, GP, 0 },
	['R'] = { MATCH_REG, GP, 1 << REG_GP_RAX | 1 << REG_GP_RBX
		| 1 << REG_GP_RCX | 1 << REG_GP_RDX | 1 << REG_GP_RSI
		| 1 << REG_GP_RDI | 1 << REG_GP_RBP | 1 << REG_GP_RSP },
	['S'] = { MATCH_REG, GP, 1 << REG_GP_RSI },
	['Q'] = { MATCH_REG, GP, 1 << REG_GP_RAX | 1 << REG_GP_RBX
		| 1 << REG_GP_RCX | 1 << REG_GP_RDX },
	['V'] = { MATCH_MEM, GP, 0 },
	['X'] = { MATCH_ANY, GP, 0 },
	['a'] = { MATCH_REG, GP, 1 << REG_GP_RAX },
	['b'] = { MATCH_REG, GP, 1 << REG_GP_RBX },
	['c'] = { MATCH_REG, GP, 1 << REG_GP_RCX },
	['d'] = { MATCH_REG, GP, 1 << REG_GP_RDX },
	['g'] = { MATCH_ANY, GP, 0 },
	['i'] = { MATCH_IMM, GP, 0 },
	['l'] = { MATCH_REG, GP, 1 << REG_GP_RAX | 1 << REG_GP_RBX
		| 1 << REG_GP_RCX | 1 << REG_GP_RDX | 1 << REG_GP_RSI
		| 1 << REG_GP_RDI | 1 << REG_GP_RBP },
	['m'] = { MATCH_MEM, GP, 0 },
	['n'] = { MATCH_IMM, GP, 0 },
	['o'] = { MATCH_MEM, GP, 0 },
	['p'] = { MATCH_REG, GP, 0 },
	['q'] = { MATCH_REG, GP, 0 },
	['r'] = { MATCH_REG, GP, 0 },
	['x'] = { MATCH_REG, &amd64_reg_classes[CLASS_amd64_xmm], 0 },

	// see comments in ia32_transform.c about unimplemented stuff.
};
#undef GP

Matthias Braun's avatar
Matthias Braun committed
109
110
111
#define BIT(x)    (1u << x)

static const arch_register_req_t amd64_requirement_gp_same_0 = {
112
113
	.cls             = &amd64_reg_classes[CLASS_amd64_gp],
	.limited         = NULL,
114
	.type            = arch_register_req_type_should_be_same,
115
116
117
	.other_same      = BIT(0),
	.other_different = 0,
	.width           = 1,
Matthias Braun's avatar
Matthias Braun committed
118
119
};

120
static const arch_register_req_t amd64_requirement_xmm_same_0 = {
121
122
	.cls             = &amd64_reg_classes[CLASS_amd64_xmm],
	.limited         = NULL,
123
	.type            = arch_register_req_type_should_be_same,
124
125
126
	.other_same      = BIT(0),
	.other_different = 0,
	.width           = 1,
127
128
};

Matthias Braun's avatar
Matthias Braun committed
129
static const arch_register_req_t amd64_requirement_gp_same_0_not_1 = {
130
131
	.cls             = &amd64_reg_classes[CLASS_amd64_gp],
	.limited         = NULL,
132
	.type            = arch_register_req_type_should_be_same
133
134
135
136
	                   | arch_register_req_type_must_be_different,
	.other_same      = BIT(0),
	.other_different = BIT(1),
	.width           = 1,
Matthias Braun's avatar
Matthias Braun committed
137
138
139
140
141
142
143
};

static const arch_register_req_t *mem_reqs[] = {
	&arch_no_requirement,
};

static const arch_register_req_t *reg_mem_reqs[] = {
144
	&amd64_class_reg_req_gp,
Matthias Braun's avatar
Matthias Braun committed
145
146
147
	&arch_no_requirement,
};

Tobias Rapp's avatar
Tobias Rapp committed
148
static const arch_register_req_t *rsp_mem_reqs[] = {
149
	&amd64_single_reg_req_gp_rsp,
Tobias Rapp's avatar
Tobias Rapp committed
150
151
152
153
	&arch_no_requirement,
};

static const arch_register_req_t *rsp_reg_mem_reqs[] = {
154
155
	&amd64_single_reg_req_gp_rsp,
	&amd64_class_reg_req_gp,
Tobias Rapp's avatar
Tobias Rapp committed
156
157
158
	&arch_no_requirement,
};

159
static const arch_register_req_t *xmm_mem_reqs[] = {
160
	&amd64_class_reg_req_xmm,
161
162
163
	&arch_no_requirement,
};

Matthias Braun's avatar
Matthias Braun committed
164
static const arch_register_req_t *reg_reg_mem_reqs[] = {
165
166
	&amd64_class_reg_req_gp,
	&amd64_class_reg_req_gp,
Matthias Braun's avatar
Matthias Braun committed
167
168
169
	&arch_no_requirement,
};

170
static const arch_register_req_t *xmm_reg_mem_reqs[] = {
171
172
	&amd64_class_reg_req_xmm,
	&amd64_class_reg_req_gp,
173
174
175
	&arch_no_requirement,
};

Matthias Braun's avatar
Matthias Braun committed
176
static const arch_register_req_t *reg_reg_reg_mem_reqs[] = {
177
178
179
	&amd64_class_reg_req_gp,
	&amd64_class_reg_req_gp,
	&amd64_class_reg_req_gp,
Matthias Braun's avatar
Matthias Braun committed
180
181
182
	&arch_no_requirement,
};

183
static const arch_register_req_t *xmm_reg_reg_mem_reqs[] = {
184
185
186
	&amd64_class_reg_req_xmm,
	&amd64_class_reg_req_gp,
	&amd64_class_reg_req_gp,
187
188
189
	&arch_no_requirement,
};

Tobias Rapp's avatar
Tobias Rapp committed
190
static const arch_register_req_t *reg_flags_reqs[] = {
191
192
	&amd64_class_reg_req_gp,
	&amd64_class_reg_req_flags,
193
194
};

Matthias Braun's avatar
Matthias Braun committed
195
static const arch_register_req_t *reg_reg_reqs[] = {
196
197
	&amd64_class_reg_req_gp,
	&amd64_class_reg_req_gp,
198
199
};

200
static const arch_register_req_t *rax_reg_reqs[] = {
201
202
	&amd64_single_reg_req_gp_rax,
	&amd64_class_reg_req_gp,
203
204
};

205
static const arch_register_req_t *rax_reg_rdx_mem_reqs[] = {
206
207
208
	&amd64_single_reg_req_gp_rax,
	&amd64_class_reg_req_gp,
	&amd64_single_reg_req_gp_rdx,
209
	&arch_no_requirement,
210
211
};

Matthias Braun's avatar
Matthias Braun committed
212
static const arch_register_req_t *reg_reqs[] = {
213
	&amd64_class_reg_req_gp,
Matthias Braun's avatar
Matthias Braun committed
214
215
};

216
arch_register_req_t const *amd64_xmm_reqs[] = {
217
	&amd64_class_reg_req_xmm,
218
219
};

Matthias Braun's avatar
Matthias Braun committed
220
static const arch_register_req_t *reg_rcx_reqs[] = {
221
222
	&amd64_class_reg_req_gp,
	&amd64_single_reg_req_gp_rcx,
Matthias Braun's avatar
Matthias Braun committed
223
224
};

225
226
227
static const arch_register_req_t *no_reqs[] = {
};

228
static const arch_register_req_t *xmm_xmm_reqs[] = {
229
230
	&amd64_class_reg_req_xmm,
	&amd64_class_reg_req_xmm,
231
232
};

233
arch_register_req_t const **const gp_am_reqs[] = {
234
235
236
237
238
239
240
241
242
243
244
245
246
	mem_reqs,
	reg_mem_reqs,
	reg_reg_mem_reqs,
	reg_reg_reg_mem_reqs,
};

static arch_register_req_t const **const xmm_am_reqs[] = {
	mem_reqs,
	xmm_mem_reqs,
	xmm_reg_mem_reqs,
	xmm_reg_reg_mem_reqs,
};

Matthias Braun's avatar
Matthias Braun committed
247
248
static inline bool mode_needs_gp_reg(ir_mode *mode)
{
249
250
	return get_mode_arithmetic(mode) == irma_twos_complement
	    && mode != amd64_mode_xmm; /* mode_xmm is 128bit int at the moment */
Matthias Braun's avatar
Matthias Braun committed
251
252
}

253
254
static ir_node *get_initial_sp(ir_graph *irg)
{
255
	return be_get_start_proj(irg, &start_val[REG_RSP]);
256
257
258
259
}

static ir_node *get_initial_fp(ir_graph *irg)
{
260
	return be_get_start_proj(irg, &start_val[REG_RBP]);
261
262
263
264
}

static ir_node *get_initial_mem(ir_graph *irg)
{
265
	return be_get_start_proj(irg, &start_mem);
266
267
268
269
270
271
272
273
274
275
}

static ir_node *get_frame_base(ir_graph *irg)
{
	if (current_cconv->omit_fp) {
		return get_initial_sp(irg);
	} else {
		return get_initial_fp(irg);
	}
}
276

277
278
279
static amd64_insn_mode_t get_insn_mode_from_mode(const ir_mode *mode)
{
	switch (get_mode_size_bits(mode)) {
280
281
282
283
284
	case   8: return INSN_MODE_8;
	case  16: return INSN_MODE_16;
	case  32: return INSN_MODE_32;
	case  64: return INSN_MODE_64;
	case 128: return INSN_MODE_128;
285
286
287
288
	}
	panic("unexpected mode");
}

289
ir_entity *create_float_const_entity(ir_tarval *const tv)
290
{
291
	ir_entity *entity = pmap_get(ir_entity, amd64_constants, tv);
292
293
294
295
296
297
298
299
300
301
302
303
304
305
	if (entity != NULL)
		return entity;

	ir_mode *mode = get_tarval_mode(tv);
	ir_type *type = get_type_for_mode(mode);
	ir_type *glob = get_glob_type();

	entity = new_entity(glob, id_unique("C%u"), type);
	set_entity_visibility(entity, ir_visibility_private);
	add_entity_linkage(entity, IR_LINKAGE_CONSTANT);

	ir_initializer_t *initializer = create_initializer_tarval(tv);
	set_entity_initializer(entity, initializer);

306
	pmap_insert(amd64_constants, tv, entity);
307
308
309
	return entity;
}

310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
typedef enum reference_mode_t {
	REFERENCE_DIRECT,
	REFERENCE_IP_RELATIVE,
	REFERENCE_GOT,
} reference_mode_t;

static reference_mode_t need_relative_addressing(const ir_entity *entity)
{
	if (!be_options.pic)
		return REFERENCE_DIRECT;

	/* simply everything is instruction pointer relative, external functions
	 * use a global offset table */
	return entity_has_definition(entity)
	   && (get_entity_linkage(entity) & IR_LINKAGE_MERGE) == 0
	    ? REFERENCE_IP_RELATIVE : REFERENCE_GOT;
}

328
static ir_node *create_float_const(dbg_info *dbgi, ir_node *block,
Matthias Braun's avatar
Matthias Braun committed
329
                                   ir_tarval *tv)
330
331
332
{
	ir_graph  *irg     = get_irn_irg(block);
	ir_mode   *tv_mode = get_tarval_mode(tv);
333
	ir_entity *entity  = create_float_const_entity(tv);
334
335
336
337
338
339
340
341
342
343
	ir_node   *nomem   = get_irg_no_mem(irg);

	ir_node *in[] = { nomem };
	amd64_addr_t addr;
	memset(&addr, 0, sizeof(addr));

	addr.immediate.entity       = entity;
	amd64_insn_mode_t insn_mode = get_insn_mode_from_mode(tv_mode);

	addr.index_input = NO_INPUT;
344
345
346
347
348
349
	if (need_relative_addressing(entity) == REFERENCE_DIRECT) {
		addr.base_input = NO_INPUT;
	} else {
		assert(need_relative_addressing(entity) == REFERENCE_IP_RELATIVE);
		addr.base_input = RIP_INPUT;
	}
350

351
	ir_node *load;
352
	unsigned pn_res;
353
354
355
	if (insn_mode == INSN_MODE_128) {
		load = new_bd_amd64_movdqa(dbgi, block, ARRAY_SIZE(in), in,
		                           AMD64_OP_ADDR, addr);
356
		pn_res = pn_amd64_movdqa_res;
357
	} else {
358
359
360
		load = new_bd_amd64_movs_xmm(dbgi, block, ARRAY_SIZE(in), in,
		                             insn_mode, AMD64_OP_ADDR, addr);
		pn_res = pn_amd64_movs_xmm_res;
361
	}
362
363
364
	arch_set_irn_register_reqs_in(load, mem_reqs);
	set_irn_pinned(load, op_pin_state_floats);

365
	return new_r_Proj(load, amd64_mode_xmm, pn_res);
366
367
368
369
370
}

ir_tarval *create_sign_tv(ir_mode *mode)
{
	unsigned size = get_mode_size_bits(mode);
Matthias Braun's avatar
Matthias Braun committed
371
372
373
374
	assert(size == 32 || size == 64 || size == 128);
	ir_mode *intmode = size == 128 ? amd64_mode_xmm
	                 : size == 64  ? mode_Lu
	                               : mode_Iu;
375
376
377
	ir_tarval *one  = get_mode_one(intmode);
	ir_tarval *sign = tarval_shl_unsigned(one, size-1);
	return tarval_bitcast(sign, mode);
378
379
}

380
381
static ir_node *gen_Const(ir_node *node)
{
382
	ir_node  *block = be_transform_nodes_block(node);
383
	dbg_info *dbgi  = get_irn_dbg_info(node);
384
	ir_mode  *mode  = get_irn_mode(node);
385
	ir_tarval *tv = get_Const_tarval(node);
386
387
388

	if (!mode_needs_gp_reg(mode)) {
		if (tarval_is_null(tv)) {
389
			return new_bd_amd64_xorpd_0(dbgi, block);
390
391
		}

392
		return create_float_const(dbgi, block, tv);
393
394
	}

395
396
	uint64_t val = get_tarval_uint64(tv);
	amd64_insn_mode_t imode = val > UINT32_MAX ? INSN_MODE_64 : INSN_MODE_32;
397
	return new_bd_amd64_mov_imm(dbgi, block, imode, val, NULL);
398
399
}

400
static ir_node *gen_Address(ir_node *node)
401
{
402
	ir_node   *block  = be_transform_nodes_block(node);
403
	dbg_info  *dbgi   = get_irn_dbg_info(node);
404
	ir_entity *entity = get_Address_entity(node);
405

Matthias Braun's avatar
Matthias Braun committed
406
407
	/* do we need RIP-relative addressing because of PIC? */
	reference_mode_t mode = need_relative_addressing(entity);
Matthias Braun's avatar
Matthias Braun committed
408
	if (mode == REFERENCE_DIRECT)
409
		return new_bd_amd64_mov_imm(dbgi, block, INSN_MODE_64, 0, entity);
Matthias Braun's avatar
Matthias Braun committed
410
411
412

	amd64_addr_t addr;
	memset(&addr, 0, sizeof(addr));
Matthias Braun's avatar
Matthias Braun committed
413
	addr.base_input  = RIP_INPUT;
Matthias Braun's avatar
Matthias Braun committed
414
415
	addr.index_input = NO_INPUT;
	addr.mem_input   = NO_INPUT;
Matthias Braun's avatar
Matthias Braun committed
416
417

	if (mode == REFERENCE_IP_RELATIVE) {
Matthias Braun's avatar
Matthias Braun committed
418
		addr.immediate.entity = entity;
419
		return new_bd_amd64_lea(dbgi, block, 0, NULL, INSN_MODE_64, addr);
Matthias Braun's avatar
Matthias Braun committed
420
421
	} else {
		assert(mode == REFERENCE_GOT);
Matthias Braun's avatar
Matthias Braun committed
422
		addr.immediate.entity = new_got_entry_entity(entity);
423
424
425
		ir_node *load = new_bd_amd64_mov_gp(dbgi, block, 0, NULL, INSN_MODE_64,
		                                    AMD64_OP_ADDR, addr);
		return new_r_Proj(load, mode_gp, pn_amd64_mov_gp_res);
Matthias Braun's avatar
Matthias Braun committed
426
427
428
	}
}

429
430
ir_node *amd64_new_IncSP(ir_node *block, ir_node *old_sp, int offset,
                         unsigned align)
Tobias Rapp's avatar
Tobias Rapp committed
431
{
432
433
434
435
	ir_node *incsp = be_new_IncSP(&amd64_registers[REG_RSP], block, old_sp,
	                              offset, align);
	arch_add_irn_flags(incsp, arch_irn_flag_modify_flags);
	return incsp;
Tobias Rapp's avatar
Tobias Rapp committed
436
437
}

Matthias Braun's avatar
Matthias Braun committed
438
typedef ir_node *(*construct_binop_func)(dbg_info *dbgi, ir_node *block,
Matthias Braun's avatar
Matthias Braun committed
439
	int arity, ir_node *in[], const amd64_binop_addr_attr_t *attr_init);
Matthias Braun's avatar
Matthias Braun committed
440

441
442
443
444
typedef ir_node *(*construct_rax_binop_func)(dbg_info *dbgi, ir_node *block,
	int arity, ir_node *in[], amd64_insn_mode_t insn_mode,
	amd64_op_mode_t op_mode, amd64_addr_t addr);

Matthias Braun's avatar
Matthias Braun committed
445
446
447
448
449
450
451
452
typedef enum match_flags_t {
	match_am           = 1 << 0,
	match_mode_neutral = 1 << 1,
	match_immediate    = 1 << 2,
	match_commutative  = 1 << 3,
} match_flags_t;

typedef struct amd64_args_t {
Matthias Braun's avatar
Matthias Braun committed
453
454
455
456
	amd64_binop_addr_attr_t     attr;
	ir_node                    *mem_proj;
	ir_node                    *in[4];
	int                         arity;
Matthias Braun's avatar
Matthias Braun committed
457
458
459
	const arch_register_req_t **reqs;
} amd64_args_t;

Matthias Braun's avatar
Matthias Braun committed
460
static bool match_immediate_32(amd64_imm32_t *imm, const ir_node *op,
461
462
                               bool can_match_ip_relative,
                               bool upper32_dont_care)
Matthias Braun's avatar
Matthias Braun committed
463
464
{
	assert(mode_needs_gp_reg(get_irn_mode(op)));
Matthias Braun's avatar
Matthias Braun committed
465
	assert(imm->offset == 0 && imm->entity == NULL);
466
467
468
469
470
471
472
473

	ir_tarval *tv;
	ir_entity *entity;
	if (!be_match_immediate(op, &tv, &entity))
		return false;

	int32_t val;
	if (tv) {
Matthias Braun's avatar
Matthias Braun committed
474
475
		if (!tarval_is_long(tv))
			return false;
476
477
		long lval = get_tarval_long(tv);
		val = (int32_t)lval;
Matthias Braun's avatar
Matthias Braun committed
478
479
		if ((long)val != lval)
			return false;
480
481
482
483
484
		/** the immediate value is signed extended to 64bit, sometimes
		 * this is not what we want. */
		if (!upper32_dont_care && val < 0
		    && !mode_is_signed(get_tarval_mode(tv)))
		    return false;
485
486
487
488
489
	} else {
		val = 0;
	}

	if (entity && !can_match_ip_relative) {
Matthias Braun's avatar
Matthias Braun committed
490
		/* TODO: check if entity is in lower 4GB address space/relative */
491
		return false;
Matthias Braun's avatar
Matthias Braun committed
492
	}
493
494
495
496

	imm->offset = val;
	imm->entity = entity;
	return true;
Matthias Braun's avatar
Matthias Braun committed
497
498
499
500
}

static ir_heights_t *heights;

501
502
503
504
505
506
507
508
509
static bool input_depends_on_load(ir_node *load, ir_node *input)
{
	ir_node *block = get_nodes_block(load);
	/* if the dependency is in another block, then we ignore it for now
	   as we only match address mode loads in the same block. */
	return get_nodes_block(input) == block
	    && heights_reachable_in_block(heights, input, load);
}

Tobias Rapp's avatar
Tobias Rapp committed
510
511
512
513
514
515
516
517
518
static void fix_node_mem_proj(ir_node *node, ir_node *mem_proj)
{
	if (mem_proj == NULL)
		return;

	ir_node *load = get_Proj_pred(mem_proj);
	be_set_transformed_node(load, node);
}

519
static ir_node *source_am_possible(ir_node *block, ir_node *node)
Matthias Braun's avatar
Matthias Braun committed
520
521
{
	if (!is_Proj(node))
522
		return NULL;
Matthias Braun's avatar
Matthias Braun committed
523
524
	ir_node *load = get_Proj_pred(node);
	if (!is_Load(load))
525
		return NULL;
526
	assert(get_Proj_num(node) == pn_Load_res);
Matthias Braun's avatar
Matthias Braun committed
527
	if (get_nodes_block(load) != block)
528
		return NULL;
Matthias Braun's avatar
Matthias Braun committed
529
530
	/* make sure we are the only user */
	if (get_irn_n_edges(node) != 1)
531
		return NULL;
532
533
534
535
536
537
	/* From ia32_transform.c:751:
	 * in some edge cases with address mode we might reach the load normally
	 * and through some AM sequence, if it is already materialized then we
	 * can't create an AM node from it */
	if (be_is_transformed(node))
		return NULL;
538
	return load;
Matthias Braun's avatar
Matthias Braun committed
539
540
541
542
543
544
545
546
547
548
}

static bool needs_extension(ir_node *op)
{
	ir_mode *mode = get_irn_mode(op);
	if (get_mode_size_bits(mode) >= 32)
		return false;
	return !be_upper_bits_clean(op, mode);
}

549
static ir_node *create_sext(ir_node *new_block, ir_node *const node, ir_mode *mode)
550
551
552
{
	amd64_insn_mode_t insn_mode = get_insn_mode_from_mode(mode);
	dbg_info *const   dbgi      = get_irn_dbg_info(node);
Tobias Rapp's avatar
Tobias Rapp committed
553
	ir_node  *const   new_node  = be_transform_node(node);
554
555
556
557
558
559

	amd64_shift_attr_t attr;
	memset(&attr, 0, sizeof(attr));
	attr.base.op_mode = AMD64_OP_SHIFT_IMM;
	attr.insn_mode    = insn_mode;
	attr.immediate    = get_mode_size_bits(mode) - 1;
Tobias Rapp's avatar
Tobias Rapp committed
560
	ir_node *in[1]    = { new_node };
561
	ir_node *sar      = new_bd_amd64_sar(dbgi, new_block, ARRAY_SIZE(in),
Tobias Rapp's avatar
Tobias Rapp committed
562
	                                     in, &attr);
563

564
565
	arch_set_irn_register_reqs_in(sar, reg_reqs);
	arch_set_irn_register_req_out(sar, 0, &amd64_requirement_gp_same_0);
566
	return new_r_Proj(sar, mode_gp, pn_amd64_sar_res);
567
568
}

569
static ir_node *create_zext(ir_node *new_block, ir_node *const node)
570
{
Tobias Rapp's avatar
Tobias Rapp committed
571
	dbg_info *const dbgi      = get_irn_dbg_info(node);
572
	ir_node  *const xor0      = new_bd_amd64_xor_0(dbgi, new_block);
573
	arch_set_irn_register_reqs_in(xor0, reg_reqs);
574
	return new_r_Proj(xor0, mode_gp, pn_amd64_xor_0_res);
575
576
}

Tobias Rapp's avatar
Tobias Rapp committed
577
578
static bool use_address_matching(ir_mode *mode, match_flags_t flags,
                                 ir_node *block,
579
580
581
                                 ir_node *op1, ir_node *op2,
                                 ir_node **out_load, ir_node **out_op)
{
582
583
	if (! (flags & match_am))
		return false;
584

Tobias Rapp's avatar
Tobias Rapp committed
585
586
587
588
	unsigned mode_bits = get_mode_size_bits(mode);
	if (mode_bits == 8 || mode_bits == 16)
		return false;

589
590
591
	ir_node *load2 = source_am_possible(block, op2);
	if (load2 != NULL && !input_depends_on_load(load2, op1)) {
		(*out_load) = load2;
592
593
		(*out_op)   = op1;
		return true;
594
595
596
597
598
599
600
601
602
	}

	if (flags & match_commutative) {
		ir_node *load1 = source_am_possible(block, op1);
		if (load1 != NULL && !input_depends_on_load(load1, op2)) {
			(*out_load) = load1;
			(*out_op)   = op2;
			return true;
		}
603
604
605
606
	}
	return false;
}

607
608
static void perform_address_matching(ir_node *ptr, int *arity,
                                     ir_node **in, amd64_addr_t *addr)
609
610
611
{
	x86_address_t maddr;
	memset(&maddr, 0, sizeof(maddr));
612
	x86_create_address_mode(&maddr, ptr, x86_create_am_normal);
613
614
615
616
617
618

	if (maddr.base != NULL) {
		int base_input   = (*arity)++;
		addr->base_input = base_input;
		in[base_input]   = be_transform_node(maddr.base);
	} else {
619
620
621
622
623
624
625
		ir_entity *entity = maddr.entity;
		if (entity != NULL
		    && need_relative_addressing(entity) != REFERENCE_DIRECT) {
		    addr->base_input = RIP_INPUT;
		} else {
			addr->base_input = NO_INPUT;
		}
626
627
628
629
630
631
632
633
	}
	if (maddr.index != NULL) {
		int index_input = (*arity)++;
		addr->index_input = index_input;
		in[index_input]  = be_transform_node(maddr.index);
	} else {
		addr->index_input = NO_INPUT;
	}
634
635
636
637
638
639
640
641
642
643
	if (maddr.frame_entity != NULL) {
		assert(maddr.entity == NULL);
		addr->immediate.entity = maddr.frame_entity;
		/* not supported yet */
		assert(!is_parameter_entity(maddr.frame_entity)
		       || get_entity_parameter_number(maddr.frame_entity)
		          != IR_VA_START_PARAMETER_NUMBER);
	} else {
		addr->immediate.entity = maddr.entity;
	}
644
645
646
647
	addr->immediate.offset = maddr.offset;
	addr->log_scale        = maddr.scale;
}

Matthias Braun's avatar
Matthias Braun committed
648
649
650
static void match_binop(amd64_args_t *args, ir_node *block,
                        ir_mode *mode, ir_node *op1, ir_node *op2,
                        match_flags_t flags)
Matthias Braun's avatar
Matthias Braun committed
651
652
653
{
	memset(args, 0, sizeof(*args));

654
	bool use_am;
655
	bool use_xmm       = mode_is_float(mode);
Matthias Braun's avatar
Matthias Braun committed
656
	bool use_immediate = flags & match_immediate;
657
	bool mode_neutral  = flags & match_mode_neutral;
Matthias Braun's avatar
Matthias Braun committed
658

Matthias Braun's avatar
Matthias Braun committed
659
	args->attr.base.insn_mode = get_insn_mode_from_mode(mode);
Matthias Braun's avatar
Matthias Braun committed
660
661

	/* TODO: legalize phase */
662
	if (mode_neutral) {
663
664
		op1 = be_skip_downconv(op1, true);
		op2 = be_skip_downconv(op2, true);
Matthias Braun's avatar
Matthias Braun committed
665
666
667
668
669
670
671
	} else {
		/* TODO: extend inputs? */
		(void)needs_extension;
	}

	ir_node *load;
	ir_node *op;
672

Tobias Rapp's avatar
Tobias Rapp committed
673
	use_am = use_address_matching(mode, flags, block, op1, op2, &load, &op);
674

675
676
	if (use_immediate
	    && match_immediate_32(&args->attr.u.immediate, op2, false, mode_neutral)) {
677
		assert(!use_xmm && "Can't (yet) match binop with xmm immediate");
Matthias Braun's avatar
Matthias Braun committed
678
		/* fine, we found an immediate */
Matthias Braun's avatar
Matthias Braun committed
679
680
681
		args->attr.base.base.op_mode = AMD64_OP_REG_IMM;
		args->in[args->arity++]      = be_transform_node(op1);
		args->reqs                   = reg_reqs;
682
	} else if (use_am) {
Matthias Braun's avatar
Matthias Braun committed
683
684
685
686
		ir_node *new_op        = be_transform_node(op);
		int      reg_input     = args->arity++;
		args->attr.u.reg_input = reg_input;
		args->in[reg_input]    = new_op;
687
		amd64_addr_t *addr     = &args->attr.base.addr;
Matthias Braun's avatar
Matthias Braun committed
688

689
690
691
		ir_node *ptr = get_Load_ptr(load);
		perform_address_matching(ptr, &(args->arity), args->in, addr);

692
693
		args->reqs = (use_xmm ? xmm_am_reqs : gp_am_reqs)[args->arity];

694
695
696
697
		ir_node *new_mem    = be_transform_node(get_Load_mem(load));
		int mem_input       = args->arity++;
		args->in[mem_input] = new_mem;
		addr->mem_input     = mem_input;
698

699
		args->mem_proj      = get_Proj_for_pn(load, pn_Load_M);
700
		args->attr.base.base.op_mode = AMD64_OP_ADDR_REG;
Matthias Braun's avatar
Matthias Braun committed
701
702
703
704
	} else {
		/* simply transform the arguments */
		args->in[args->arity++] = be_transform_node(op1);
		args->in[args->arity++] = be_transform_node(op2);
Matthias Braun's avatar
Matthias Braun committed
705
		args->attr.base.base.op_mode = AMD64_OP_REG_REG;
706

707
		args->reqs = use_xmm ? xmm_xmm_reqs : reg_reg_reqs;
Matthias Braun's avatar
Matthias Braun committed
708
709
710
711
	}
}

static ir_node *gen_binop_am(ir_node *node, ir_node *op1, ir_node *op2,
712
713
                             construct_binop_func func, unsigned pn_res,
                             match_flags_t flags)
Matthias Braun's avatar
Matthias Braun committed
714
715
716
717
{
	ir_node *block = get_nodes_block(node);
	ir_mode *mode  = get_irn_mode(node);
	amd64_args_t args;
Matthias Braun's avatar
Matthias Braun committed
718
	match_binop(&args, block, mode, op1, op2, flags);
Matthias Braun's avatar
Matthias Braun committed
719
720
721
722

	dbg_info *const dbgi      = get_irn_dbg_info(node);
	ir_node  *const new_block = be_transform_node(block);

Matthias Braun's avatar
Matthias Braun committed
723
	ir_node *new_node = func(dbgi, new_block, args.arity, args.in, &args.attr);
Matthias Braun's avatar
Matthias Braun committed
724
	arch_set_irn_register_reqs_in(new_node, args.reqs);
725

Tobias Rapp's avatar
Tobias Rapp committed
726
	fix_node_mem_proj(new_node, args.mem_proj);
Matthias Braun's avatar
Matthias Braun committed
727

728
729
730
	if (mode_is_float(mode)) {
		arch_set_irn_register_req_out(new_node, 0,
		                              &amd64_requirement_xmm_same_0);
731
		return new_r_Proj(new_node, amd64_mode_xmm, pn_res);
732
733
734
	} else {
		arch_set_irn_register_req_out(new_node, 0,
		                              &amd64_requirement_gp_same_0);
735
		return new_r_Proj(new_node, mode_gp, pn_res);
736
	}
Matthias Braun's avatar
Matthias Braun committed
737
738
}

739
740
741
742
743
744
745
746
747
748
749
750
751
static ir_node *gen_binop_rax(ir_node *node, ir_node *op1, ir_node *op2,
                              construct_rax_binop_func make_node,
                              match_flags_t flags)
{
	bool use_am;
	bool mode_neutral  = flags & match_mode_neutral;
	assert(! (flags & match_immediate));

	ir_mode *mode = get_irn_mode(op1);
	amd64_insn_mode_t insn_mode = get_insn_mode_from_mode(mode);

	/* TODO: legalize phase */
	if (mode_neutral) {
752
753
		op1 = be_skip_downconv(op1, true);
		op2 = be_skip_downconv(op2, true);
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
	} else {
		/* TODO: extend inputs? */
		(void)needs_extension;
	}

	ir_node *load;
	ir_node *op;
	ir_node *block = get_nodes_block(node);
	ir_node *in[4];
	int      arity = 0;
	const arch_register_req_t **reqs;
	amd64_op_mode_t op_mode;
	amd64_addr_t    addr;
	memset(&addr, 0, sizeof(addr));

Tobias Rapp's avatar
Tobias Rapp committed
769
	use_am = use_address_matching(mode, flags, block, op1, op2, &load, &op);
770

771
	ir_node *mem_proj = NULL;
772
773
774
775
776
	if (use_am) {
		ir_node *new_op    = be_transform_node(op);
		int      reg_input = arity++;
		in[reg_input]      = new_op;

777
778
779
		ir_node *ptr = get_Load_ptr(load);
		perform_address_matching(ptr, &arity, in, &addr);

780
		reqs = gp_am_reqs[arity];
781
782
783
784
785

		ir_node *new_mem = be_transform_node(get_Load_mem(load));
		int mem_input    = arity++;
		in[mem_input]    = new_mem;
		addr.mem_input   = mem_input;
786

787
		mem_proj                = get_Proj_for_pn(load, pn_Load_M);
788
789
790
		op_mode                 = AMD64_OP_RAX_ADDR;
	} else {
		/* simply transform the arguments */
791
792
793
794
		in[arity++] = be_transform_node(op1);
		in[arity++] = be_transform_node(op2);
		reqs        = rax_reg_reqs;
		op_mode     = AMD64_OP_RAX_REG;
795
796
	}

797
	assert((size_t)arity <= ARRAY_SIZE(in));
798
799
800
801
802
	dbg_info *dbgi      = get_irn_dbg_info(node);
	ir_node  *new_block = be_transform_node(block);
	ir_node  *new_node  = make_node(dbgi, new_block, arity, in, insn_mode,
	                                op_mode, addr);
	arch_set_irn_register_reqs_in(new_node, reqs);
803
804
805
	if (mem_proj != NULL) {
		be_set_transformed_node(load, new_node);
	}
806
807
808
	return new_node;
}

809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
static ir_node *gen_binop_xmm(ir_node *node, ir_node *op0, ir_node *op1,
                              construct_binop_func make_node,
                              match_flags_t flags)
{
	ir_node *block = get_nodes_block(node);
	ir_mode *mode  = get_irn_mode(op0);
	amd64_args_t args;
	memset(&args, 0, sizeof(args));

	ir_node *load;
	ir_node *op;
	bool use_am = use_address_matching(mode, flags, block, op0, op1, &load,
	                                   &op);

	if (use_am) {
		int reg_input = args.arity++;
		args.attr.u.reg_input = reg_input;
		args.in[reg_input]    = be_transform_node(op);

		amd64_addr_t *addr = &args.attr.base.addr;
		ir_node      *ptr  = get_Load_ptr(load);
		perform_address_matching(ptr, &args.arity, args.in, addr);

832
		args.reqs = xmm_am_reqs[args.arity];
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857

		ir_node *new_mem   = be_transform_node(get_Load_mem(load));
		int mem_input      = args.arity++;
		args.in[mem_input] = new_mem;
		addr->mem_input    = mem_input;

		args.mem_proj      = get_Proj_for_pn(load, pn_Load_M);
		args.attr.base.base.op_mode = AMD64_OP_ADDR_REG;
	} else {
		args.in[args.arity++] = be_transform_node(op0);
		args.in[args.arity++] = be_transform_node(op1);
		args.attr.base.base.op_mode = AMD64_OP_REG_REG;
		args.reqs = xmm_xmm_reqs;
	}

	dbg_info *const dbgi      = get_irn_dbg_info(node);
	ir_node  *const new_block = be_transform_node(block);
	ir_node *new_node = make_node(dbgi, new_block, args.arity, args.in,
	                              &args.attr);
	arch_set_irn_register_reqs_in(new_node, args.reqs);

	fix_node_mem_proj(new_node, args.mem_proj);

	arch_set_irn_register_req_out(new_node, 0,
								  &amd64_requirement_xmm_same_0);
858
	return new_r_Proj(new_node, amd64_mode_xmm, pn_amd64_subs_res);
859
860
}

Matthias Braun's avatar
Matthias Braun committed
861
typedef ir_node *(*construct_shift_func)(dbg_info *dbgi, ir_node *block,
Matthias Braun's avatar
Matthias Braun committed
862
	int arity, ir_node *in[], const amd64_shift_attr_t *attr_init);
Matthias Braun's avatar
Matthias Braun committed
863
864

static ir_node *gen_shift_binop(ir_node *node, ir_node *op1, ir_node *op2,
865
866
                                construct_shift_func func, unsigned pn_res,
                                match_flags_t flags)
Matthias Braun's avatar
Matthias Braun committed
867
868
869
870
871
872
873
874
875
876
{
	ir_mode *mode = get_irn_mode(node);
	assert(!mode_is_float(mode));

	if (get_mode_modulo_shift(mode) != 32 && get_mode_size_bits(mode) != 64)
		panic("insupported modulo shift used");

	ir_node *in[3];
	int      arity = 0;
	if (flags & match_mode_neutral) {
877
		op1 = be_skip_downconv(op1, true);
Matthias Braun's avatar
Matthias Braun committed
878
		in[arity++] = be_transform_node(op1);
Matthias Braun's avatar
Matthias Braun committed
879
		mode = get_mode_size_bits(mode) > 32 ? mode_gp : mode_Iu;
Matthias Braun's avatar
Matthias Braun committed
880
	} else {
881
		op1 = be_skip_sameconv(op1);
882
883
884

		/* Use 8/16bit operations instead of doing zext/upconv */
		in[arity++] = be_transform_node(op1);
Matthias Braun's avatar
Matthias Braun committed
885
886
887
888
889
890
	}

	/* we can skip any convs for the shift count, as it only uses the lowest
	 * 5/6 bits anyway */
	while (is_Conv(op2) && get_irn_n_edges(op2) == 1) {
		ir_node *const op = get_Conv_op(op2);
891
		if (get_mode_arithmetic(get_irn_mode(op)) != irma_twos_complement)
Matthias Braun's avatar
Matthias Braun committed
892
893
894
895
			break;
		op2 = op;
	}

Matthias Braun's avatar
Matthias Braun committed
896
897
	amd64_shift_attr_t attr;
	memset(&attr, 0, sizeof(attr));
Matthias Braun's avatar
Matthias Braun committed
898
899
900
	const arch_register_req_t **reqs;
	const arch_register_req_t  *out_req0;
	if (is_Const(op2)) {
Matthias Braun's avatar
Matthias Braun committed
901
902
903
		attr.base.op_mode = AMD64_OP_SHIFT_IMM;
		reqs              = reg_reqs;
		out_req0          = &amd64_requirement_gp_same_0;
904
		attr.immediate    = get_Const_long(op2);
Matthias Braun's avatar
Matthias Braun committed
905
	} else {
Matthias Braun's avatar
Matthias Braun committed
906
907
908
909
		attr.base.op_mode = AMD64_OP_SHIFT_REG;
		in[arity++]       = be_transform_node(op2);
		reqs              = reg_rcx_reqs;
		out_req0          = &amd64_requirement_gp_same_0_not_1;
Matthias Braun's avatar
Matthias Braun committed
910
	}
Matthias Braun's avatar
Matthias Braun committed
911
	attr.insn_mode = get_insn_mode_from_mode(mode);
Matthias Braun's avatar
Matthias Braun committed
912

Matthias Braun's avatar
Matthias Braun committed
913
	dbg_info *const dbgi      = get_irn_dbg_info(node);
914
	ir_node  *const new_block = be_transform_nodes_block(node);
Matthias Braun's avatar
Matthias Braun committed
915
	ir_node  *const new_node  = func(dbgi, new_block, arity, in, &attr);
Matthias Braun's avatar
Matthias Braun committed
916
917
	arch_set_irn_register_reqs_in(new_node, reqs);
	arch_set_irn_register_req_out(new_node, 0, out_req0);
918
	return new_r_Proj(new_node, mode_gp, pn_res);
919
920
}

921
static ir_node *create_lea_as_add(ir_node *node, ir_node *op1, ir_node *op2)
922
923
{
	dbg_info *const dbgi = get_irn_dbg_info(node);
924
	ir_node  *new_block  = be_transform_nodes_block(node);
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
	ir_mode *mode        = get_irn_mode(node);

	amd64_insn_mode_t insn_mode;
	if (get_mode_size_bits(mode) <= 32)
		insn_mode = INSN_MODE_32;
	else
		insn_mode = INSN_MODE_64;

	const arch_register_req_t **reqs;
	amd64_addr_t addr;
	memset(&addr, 0, sizeof(addr));

	ir_node *in[2];
	int arity = 0;

	if (match_immediate_32(&addr.immediate, op2, false, true)) {
		in[arity++]      = be_transform_node(op1);
		reqs             = reg_reqs;
		addr.index_input = NO_INPUT;
	} else {
		in[arity++]      = be_transform_node(op1);
		in[arity++]      = be_transform_node(op2);
947
948
		addr.base_input  = 0;
		addr.index_input = 1;
949
950
951
		reqs             = reg_reg_reqs;
	}

952
	ir_node *res = new_bd_amd64_lea(dbgi, new_block, arity, in, insn_mode, addr);
953
	arch_set_irn_register_reqs_in(res, reqs);
954
	return res;
955
956
}

Matthias Braun's avatar
Matthias Braun committed
957
958
static ir_node *gen_Add(ir_node *const node)
{
959
960
961
	match_flags_t flags = match_immediate | match_am | match_mode_neutral
	                      | match_commutative;

Matthias Braun's avatar
Matthias Braun committed
962
963
	ir_node *op1 = get_Add_left(node);
	ir_node *op2 = get_Add_right(node);
964
965
966
967
968

	ir_mode *mode  = get_irn_mode(node);
	ir_node *block = get_nodes_block(node);
	ir_node *load, *op;

969
	if (mode_is_float(mode)) {
970
971
		return gen_binop_am(node, op1, op2, new_bd_amd64_adds,
							pn_amd64_adds_res, match_commutative | match_am);
972
973
	}

974
975
976
977
	bool use_am = use_address_matching(mode, flags, block, op1, op2, &load, &op);

	ir_node *res;
	if (use_am)
978
979
		res = gen_binop_am(node, op1, op2, new_bd_amd64_add, pn_amd64_add_res,
		                   flags);
980
	else
981
		res = create_lea_as_add(node, op1, op2);
982

Matthias Braun's avatar
Matthias Braun committed
983
984
985
	x86_mark_non_am(node);
	return res;
}
986

Matthias Braun's avatar
Matthias Braun committed
987
static ir_node *gen_Sub(ir_node *const node)
988
{
Matthias Braun's avatar
Matthias Braun committed
989
990
	ir_node  *const op1     = get_Sub_left(node);
	ir_node  *const op2     = get_Sub_right(node);
991
	ir_mode  *const mode    = get_irn_mode(node);
992
993

	if (mode_is_float(mode)) {
994
995
		return gen_binop_am(node, op1, op2, new_bd_amd64_subs,
		                    pn_amd64_subs_res, match_am);
996
	} else {
997
		/* TODO: do not match AM yet until we have a sub->neg+add rule
998
		 * in amd64_finish */
999
1000
		return gen_binop_am(node, op1, op2, new_bd_amd64_sub, pn_amd64_sub_res,
		                    match_immediate);
1001
	}
Matthias Braun's avatar
Matthias Braun committed
1002
1003
1004
1005
1006
1007
}

static ir_node *gen_And(ir_node *const node)
{
	ir_node *op1 = get_And_left(node);
	ir_node *op2 = get_And_right(node);
1008
	return gen_binop_am(node, op1, op2, new_bd_amd64_and, pn_amd64_and_res,
Matthias Braun's avatar
Matthias Braun committed
1009
1010
	                    match_immediate | match_am | match_mode_neutral
	                    | match_commutative);
1011
1012
}

Matthias Braun's avatar
Matthias Braun committed
1013
1014
1015
1016
static ir_node *gen_Eor(ir_node *const node)
{
	ir_node *op1 = get_Eor_left(node);
	ir_node *op2 = get_Eor_right(node);
1017
	return gen_binop_am(node, op1, op2, new_bd_amd64_xor, pn_amd64_xor_res,
Matthias Braun's avatar
Matthias Braun committed
1018
1019
1020
1021
1022
1023
1024
1025
	                    match_immediate | match_am | match_mode_neutral
	                    | match_commutative);
}

static ir_node *gen_Or(ir_node *const node)
{
	ir_node *op1 = get_Or_left(node);
	ir_node *op2 = get_Or_right(node);
1026
	return gen_binop_am(node, op1, op2, new_bd_amd64_or, pn_amd64_or_res,
Matthias Braun's avatar
Matthias Braun committed
1027
1028
1029
1030
1031
1032
	                    match_immediate | match_am | match_mode_neutral
	                    | match_commutative);
}

static ir_node *gen_Mul(ir_node *const node)
{
1033
1034
1035
1036
1037
1038
1039
	ir_node *op1  = get_Mul_left(node);
	ir_node *op2  = get_Mul_right(node);
	ir_mode *mode = get_irn_mode(node);

	if (get_mode_size_bits(mode) < 16) {
		/* imulb only supports rax - reg form */
		ir_node *new_node =
1040
		            gen_binop_rax(node, op1, op2, new_bd_amd64_imul_1op,
Tobias Rapp's avatar
Tobias Rapp committed
1041
1042
		                          match_mode_neutral
		                          | match_commutative);
1043
		return new_r_Proj(new_node, mode_gp, pn_amd64_imul_1op_res_low);
Tobias Rapp's avatar
Tobias Rapp committed
1044
	} else if (mode_is_float(mode)) {
1045
1046
		return gen_binop_am(node, op1, op2, new_bd_amd64_muls,
		                    pn_amd64_muls_res, match_commutative | match_am);
1047
	} else {
1048
1049
		return ge