lower_calls.c 29.4 KB
Newer Older
Christian Würdig's avatar
Christian Würdig committed
1
2
/*
 * This file is part of libFirm.
3
 * Copyright (C) 2012 University of Karlsruhe.
Christian Würdig's avatar
Christian Würdig committed
4
5
 */

Michael Beck's avatar
Michael Beck committed
6
7
8
/**
 * @file
 * @brief   Lowering of Calls with compound parameters and return types.
9
 * @author  Michael Beck, Matthias Braun
10
 */
11
12
#include <stdbool.h>

Matthias Braun's avatar
Matthias Braun committed
13
#include "array.h"
14
#include "be.h"
Matthias Braun's avatar
Matthias Braun committed
15
#include "panic.h"
16
#include "firm_types.h"
17
#include "heights.h"
18
#include "ircons.h"
19
#include "iredges_t.h"
20
21
#include "irgmod.h"
#include "irgwalk.h"
22
#include "irmemory.h"
23
#include "irmemory_t.h"
Matthias Braun's avatar
Matthias Braun committed
24
25
#include "irmode_t.h"
#include "irnode_t.h"
26
#include "iroptimize.h"
Matthias Braun's avatar
Matthias Braun committed
27
28
29
30
#include "irprog_t.h"
#include "irtools.h"
#include "lower_calls.h"
#include "lowering.h"
31
#include "pmap.h"
Matthias Braun's avatar
Matthias Braun committed
32
#include "type_t.h"
33
#include "util.h"
34

35
36
static pmap    *pointer_types;
static pmap    *lowered_mtps;
37
38
39
40
41

typedef struct lowering_env_t {
	compound_call_lowering_flags flags;
	decide_aggregate_ret_func    aggregate_ret;
} lowering_env_t;
42

43
44
45
46
47
48
49
50
51
52
53
aggregate_spec_t const no_values_aggregate_spec = {
	.n_values = 0,
	.modes    = NULL,
};

static aggregate_spec_t const *get_no_values(ir_type const *const type)
{
	(void)type;
	return &no_values_aggregate_spec;
}

54
55
/**
 * Default implementation for finding a pointer type for a given element type.
Manuel Mohr's avatar
Manuel Mohr committed
56
 * Simply create a new one.
57
 */
58
static ir_type *get_pointer_type(ir_type *dest_type)
59
{
60
	ir_type *res = pmap_get(ir_type, pointer_types, dest_type);
61
62
63
	if (res == NULL) {
		res = new_type_pointer(dest_type);
		pmap_insert(pointer_types, dest_type, res);
64
65
	}
	return res;
66
}
67

68
static void fix_parameter_entities(ir_graph *irg, unsigned arg_shift)
69
70
{
	ir_type *frame_type = get_irg_frame_type(irg);
71
	size_t   n_members  = get_compound_n_members(frame_type);
72

sebastian.buchwald1's avatar
sebastian.buchwald1 committed
73
	for (size_t i = 0; i < n_members; ++i) {
74
75
76
77
78
		ir_entity *member = get_compound_member(frame_type, i);
		if (!is_parameter_entity(member))
			continue;

		/* increase parameter number since we added a new parameter in front */
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
79
		size_t num = get_entity_parameter_number(member);
80
81
		if (num == IR_VA_START_PARAMETER_NUMBER)
			continue;
82
		set_entity_parameter_number(member, num + arg_shift);
83
84
85
	}
}

86
87
88
89
90
static void remove_compound_param_entities(ir_graph *irg)
{
	ir_type *frame_type = get_irg_frame_type(irg);
	size_t   n_members  = get_compound_n_members(frame_type);

sebastian.buchwald1's avatar
sebastian.buchwald1 committed
91
92
	for (size_t i = n_members; i-- > 0; ) {
		ir_entity *member = get_compound_member(frame_type, i);
93
94
95
		if (!is_parameter_entity(member))
			continue;

sebastian.buchwald1's avatar
sebastian.buchwald1 committed
96
		ir_type *type = get_entity_type(member);
97
		if (is_aggregate_type(type)) {
98
99
100
101
102
			free_entity(member);
		}
	}
}

103
104
105
106
/**
 * Creates a new lowered type for a method type with compound
 * arguments. The new type is associated to the old one and returned.
 */
107
static ir_type *lower_mtp(lowering_env_t const *const env, ir_type *mtp)
108
{
109
110
111
	if (!is_Method_type(mtp))
		return mtp;

sebastian.buchwald1's avatar
sebastian.buchwald1 committed
112
	ir_type *lowered = pmap_get(ir_type, lowered_mtps, mtp);
113
	if (lowered != NULL)
114
115
		return lowered;

116
	/* check if the type has to be lowered at all */
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
117
118
119
120
	bool   must_be_lowered = false;
	size_t n_params        = get_method_n_params(mtp);
	size_t n_ress          = get_method_n_ress(mtp);
	for (size_t i = 0; i < n_ress; ++i) {
121
		ir_type *res_tp = get_method_res_type(mtp, i);
122
		if (is_aggregate_type(res_tp)) {
123
124
			must_be_lowered = true;
			break;
125
		}
126
	}
127
	if (!must_be_lowered && !(env->flags & LF_DONT_LOWER_ARGUMENTS)) {
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
128
		for (size_t i = 0; i < n_params; ++i) {
129
			ir_type *param_type = get_method_param_type(mtp, i);
130
			if (is_aggregate_type(param_type)) {
131
132
133
134
135
				must_be_lowered = true;
				break;
			}
		}
	}
136
137
138
	if (!must_be_lowered)
		return mtp;

sebastian.buchwald1's avatar
sebastian.buchwald1 committed
139
	ir_type **params    = ALLOCANZ(ir_type*, n_params + n_ress);
140
	ir_type **results   = ALLOCANZ(ir_type*, n_ress * 2);
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
141
142
	size_t    nn_params = 0;
	size_t    nn_ress   = 0;
143
144

	/* add a hidden parameter in front for every compound result */
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
145
	for (size_t i = 0; i < n_ress; ++i) {
146
		ir_type *const res_tp = get_method_res_type(mtp, i);
147

148
		if (is_aggregate_type(res_tp)) {
149
			aggregate_spec_t const *const ret_spec = env->aggregate_ret(res_tp);
150
			unsigned                const n_values = ret_spec->n_values;
151
152
153
154
			if (n_values > 0) {
				for (unsigned i = 0; i < n_values; ++i) {
					ir_mode *const mode = ret_spec->modes[i];
					results[nn_ress++] = get_type_for_mode(mode);
155
156
157
158
159
160
				}
			} else {
				/* this compound will be allocated on callers stack and its
				   address will be transmitted as a hidden parameter. */
				ir_type *ptr_tp = get_pointer_type(res_tp);
				params[nn_params++] = ptr_tp;
161
				if (env->flags & LF_RETURN_HIDDEN)
162
163
					results[nn_ress++] = ptr_tp;
			}
164
165
166
		} else {
			/* scalar result */
			results[nn_ress++] = res_tp;
167
168
		}
	}
169
	/* copy over parameter types */
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
170
	for (size_t i = 0; i < n_params; ++i) {
171
		ir_type *param_type = get_method_param_type(mtp, i);
172
		if (!(env->flags & LF_DONT_LOWER_ARGUMENTS)
173
		    && is_aggregate_type(param_type)) {
174
175
176
177
		    /* turn parameter into a pointer type */
		    param_type = new_type_pointer(param_type);
		}
		params[nn_params++] = param_type;
178
	}
179
	assert(nn_ress <= n_ress*2);
180
	assert(nn_params <= n_params + n_ress);
181

182
183
184
185
	unsigned cconv = get_method_calling_convention(mtp);
	if (nn_params > n_params)
		cconv |= cc_compound_ret;

186
187
188
189
190
	mtp_additional_properties mtp_properties = get_method_additional_properties(mtp);
	/* after lowering the call is not pure anymore, since it writes to the
	 * memory for the return value passed to it */
	mtp_properties &= ~(mtp_property_no_write | mtp_property_pure);

191
	/* create the new type */
192
	bool const is_variadic = is_method_variadic(mtp);
193
	lowered = new_type_method(nn_params, nn_ress, is_variadic, cconv, mtp_properties);
194
	set_type_dbg_info(lowered, get_type_dbg_info(mtp));
195
196

	/* fill it */
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
197
	for (size_t i = 0; i < nn_params; ++i)
198
		set_method_param_type(lowered, i, params[i]);
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
199
	for (size_t i = 0; i < nn_ress; ++i)
200
201
		set_method_res_type(lowered, i, results[i]);

202
	/* associate the lowered type with the original one for easier access */
203
	set_higher_type(lowered, mtp);
204
	pmap_insert(lowered_mtps, mtp, lowered);
205
206

	return lowered;
207
208
209
}

/**
210
211
212
213
 * A call list entry.
 */
typedef struct cl_entry cl_entry;
struct cl_entry {
214
215
216
	cl_entry *next;   /**< Pointer to the next entry. */
	ir_node  *call;   /**< Pointer to the Call node. */
	ir_node  *copyb;  /**< List of all CopyB nodes. */
217
218
219
220
	ir_node  *proj_M;
	ir_node  *proj_res;
	unsigned  n_compound_ret;
	bool      has_compound_param;
221
222
223
224
};

/**
 * Walker environment for fix_args_and_collect_calls().
225
 */
226
typedef struct wlk_env {
227
	unsigned             arg_shift;        /**< The Argument index shift for parameters. */
228
229
	struct obstack       obst;             /**< An obstack to allocate the data on. */
	cl_entry             *cl_list;         /**< The call list. */
230
	compound_call_lowering_flags flags;
231
	lowering_env_t const *env;
Matthias Braun's avatar
Matthias Braun committed
232
	ir_type              *mtp;             /**< original mtp before lowering */
233
	ir_type              *lowered_mtp;     /**< The lowered method type of the current irg if any. */
234
	ir_heights_t         *heights;         /**< Heights for reachability check. */
235
236
	bool                  only_local_mem:1;/**< Set if only local memory access was found. */
	bool                  changed:1;       /**< Set if the current graph was changed. */
237
	ir_node             **param_members;
238
239
240
} wlk_env;

/**
241
242
243
244
245
246
247
 * Return the call list entry of a call node.
 * If no entry exists yet, allocate one and enter the node into
 * the call list of the environment.
 *
 * @param call   A Call node.
 * @param env    The environment.
 */
248
static cl_entry *get_call_entry(ir_node *call, wlk_env *env)
249
{
250
	cl_entry *res = (cl_entry*)get_irn_link(call);
251
	if (res == NULL) {
252
253
254
		res = OALLOCZ(&env->obst, cl_entry);
		res->next = env->cl_list;
		res->call = call;
255
256
257
258
		set_irn_link(call, res);
		env->cl_list = res;
	}
	return res;
259
260
261
}

/**
262
 * Finds the base address of an address by skipping Member's and address
263
264
265
266
267
 * calculation.
 *
 * @param adr   the address
 * @param pEnt  points to the base entity if any
 */
268
269
static ir_node *find_base_adr(ir_node *ptr, ir_entity **pEnt)
{
270
271
272
273
	ir_entity *ent = NULL;
	assert(mode_is_reference(get_irn_mode(ptr)));

	for (;;) {
274
275
276
277
		if (is_Member(ptr)) {
			ent = get_Member_entity(ptr);
			ptr = get_Member_ptr(ptr);
		} else if (is_Sel(ptr)) {
278
			ptr = get_Sel_ptr(ptr);
279
		} else if (is_Add(ptr)) {
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
			ir_node *left = get_Add_left(ptr);
			if (mode_is_reference(get_irn_mode(left)))
				ptr = left;
			else
				ptr = get_Add_right(ptr);
			ent = NULL;
		} else if (is_Sub(ptr)) {
			ptr = get_Sub_left(ptr);
			ent = NULL;
		} else {
			*pEnt = ent;
			return ptr;
		}
	}
}

/**
 * Check if a given pointer represents non-local memory.
 */
299
300
static void check_ptr(ir_node *ptr, wlk_env *env)
{
301
	/* still alias free */
Matthias Braun's avatar
Matthias Braun committed
302
303
	ir_entity *ent;
	ir_node *base_ptr = find_base_adr(ptr, &ent);
304
305
	ir_storage_class_class_t sc
		= get_base_sc(classify_pointer(ptr, base_ptr));
306
307
	if (sc != ir_sc_localvar && sc != ir_sc_malloced) {
		/* non-local memory access */
308
		env->only_local_mem = false;
309
310
311
	}
}

312
313
314
315
316
317
/*
 * Returns non-zero if a Call is surely a self-recursive Call.
 * Beware: if this functions returns 0, the call might be self-recursive!
 */
static bool is_self_recursive_Call(const ir_node *call)
{
318
319
320
	const ir_entity *callee = get_Call_callee(call);
	if (callee != NULL) {
		const ir_graph *irg = get_entity_linktime_irg(callee);
321
		if (irg == get_irn_irg(call))
322
			return true;
323
	}
324
	return false;
325
326
}

327
328
/**
 * Post walker: shift all parameter indexes
329
 * and collect Calls with compound returns in the call list.
330
331
 * If a non-alias free memory access is found, reset the alias free
 * flag.
332
 */
333
334
static void fix_args_and_collect_calls(ir_node *n, void *ctx)
{
335
	wlk_env *env = (wlk_env*)ctx;
336
337
338
339
340

	switch (get_irn_opcode(n)) {
	case iro_Load:
	case iro_Store:
		if (env->only_local_mem) {
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
341
			ir_node *ptr = get_irn_n(n, 1);
342
343
344
			check_ptr(ptr, env);
		}
		break;
345
346
347
	case iro_Proj: {
		ir_node  *pred = get_Proj_pred(n);
		ir_graph *irg  = get_irn_irg(n);
348
		if (pred == get_irg_args(irg)) {
349
			unsigned arg_shift = env->arg_shift;
350
			if (arg_shift > 0) {
351
352
				unsigned pn = get_Proj_num(n);
				set_Proj_num(n, pn + arg_shift);
353
354
355
				env->changed = true;
			}
		} else if (is_Call(pred)) {
356
			unsigned pn = get_Proj_num(n);
357
358
359
360
361
362
363
			if (pn == pn_Call_M) {
				cl_entry *entry = get_call_entry(pred, env);
				entry->proj_M = n;
			} else if (pn == pn_Call_T_result) {
				cl_entry *entry = get_call_entry(pred, env);
				entry->proj_res = n;
			}
364
		}
365
		break;
366
	}
367
	case iro_Call: {
368
369
370
		ir_type *ctp      = get_Call_type(n);
		size_t   n_ress   = get_method_n_ress(ctp);
		size_t   n_params = get_method_n_params(ctp);
371
372
		if (! is_self_recursive_Call(n)) {
			/* any non self recursive call might access global memory */
373
			env->only_local_mem = false;
374
		}
375

376
		/* check for compound returns */
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
377
		for (size_t i = 0; i < n_ress; ++i) {
378
			ir_type *type = get_method_res_type(ctp, i);
379
			if (is_aggregate_type(type)) {
380
381
382
383
				/*
				 * This is a call with a compound return. As the result
				 * might be ignored, we must put it in the list.
				 */
384
				cl_entry *entry = get_call_entry(n, env);
385
				++entry->n_compound_ret;
386
387
			}
		}
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
388
		for (size_t i = 0; i < n_params; ++i) {
389
			ir_type *type = get_method_param_type(ctp, i);
390
			if (is_aggregate_type(type)) {
391
392
				cl_entry *entry = get_call_entry(n, env);
				entry->has_compound_param = true;
393
				break;
394
395
			}
		}
396
		break;
397
398
399
	}
	case iro_CopyB: {
		ir_node *src = get_CopyB_src(n);
400
401
402
403
404
		if (env->only_local_mem) {
			check_ptr(get_CopyB_src(n), env);
			if (env->only_local_mem)
				check_ptr(get_CopyB_dst(n), env);
		}
405
406
407
		/* check for compound returns */
		if (is_Proj(src)) {
			ir_node *proj = get_Proj_pred(src);
408
			if (is_Proj(proj) && get_Proj_num(proj) == pn_Call_T_result) {
409
410
				ir_node *call = get_Proj_pred(proj);
				if (is_Call(call)) {
411
					ir_type *ctp = get_Call_type(call);
412
					if (is_aggregate_type(get_method_res_type(ctp, get_Proj_num(src)))) {
413
						/* found a CopyB from compound Call result */
414
						cl_entry *e = get_call_entry(call, env);
415
416
						set_irn_link(n, e->copyb);
						e->copyb = n;
417
418
419
					}
				}
			}
420
		}
421
		break;
422
	}
423
424
	case iro_Member: {
		ir_entity *entity = get_Member_entity(n);
425
426
427
428
429
		if (!is_parameter_entity(entity))
			break;
		ir_type *type = get_entity_type(entity);
		if (is_aggregate_type(type)) {
			if (! (env->flags & LF_DONT_LOWER_ARGUMENTS))
430
				ARR_APP1(ir_node*, env->param_members, n);
431
432
			/* we need to copy compound parameters */
			env->only_local_mem = false;
433
434
435
		}
		break;
	}
436
437
438
	default:
		/* do nothing */
		break;
439
	}
440
441
442
}

/**
Matthias Braun's avatar
Matthias Braun committed
443
 * Returns non-zero if a node is a compound address of a frame-type entity.
444
445
446
447
 *
 * @param ft   the frame type
 * @param adr  the node
 */
448
static bool is_compound_address(ir_type *ft, ir_node *adr)
449
{
450
	if (!is_Member(adr))
451
		return false;
452
	ir_entity *ent = get_Member_entity(adr);
453
	return get_entity_owner(ent) == ft;
454
455
}

456
/** A pair for the copy-return-optimization. */
457
typedef struct cr_pair {
458
459
	ir_entity *ent; /**< the entity than can be removed from the frame */
	ir_node *arg;   /**< the argument that replaces the entities address */
460
461
} cr_pair;

Matthias Braun's avatar
Matthias Braun committed
462
463
464
465
466
typedef struct copy_return_opt_env {
	cr_pair *arr;
	size_t   n_pairs;
} copy_return_opt_env;

467
468
469
470
/**
 * Post walker: fixes all entities addresses for the copy-return
 * optimization.
 *
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
471
 * Note: We expect the length of the cr_pair array (i.e. number of compound
472
473
 * return values) to be 1 (C, C++) in almost all cases, so ignore the
 * linear search complexity here.
474
 */
475
476
static void do_copy_return_opt(ir_node *n, void *ctx)
{
477
	if (is_Member(n)) {
Matthias Braun's avatar
Matthias Braun committed
478
		copy_return_opt_env *env = (copy_return_opt_env*)ctx;
479
		ir_entity *ent = get_Member_entity(n);
480

Matthias Braun's avatar
Matthias Braun committed
481
482
483
		for (size_t i = 0, n_pairs = env->n_pairs; i < n_pairs; ++i) {
			if (ent == env->arr[i].ent) {
				exchange(n, env->arr[i].arg);
484
485
486
487
				break;
			}
		}
	}
488
489
490
}

/**
491
 * Return a Member node that selects a dummy argument of type tp.
492
 *
493
 * @param block  the block where a newly create Member should be placed
494
495
 * @param tp     the type of the dummy entity that should be create
 */
496
static ir_node *get_dummy_member(ir_node *block, ir_type *tp)
497
{
498
	ir_graph *irg = get_irn_irg(block);
499
	ir_type  *ft  = get_irg_frame_type(irg);
500
501
	if (get_type_state(ft) == layout_fixed) {
		/* Fix the layout again */
502
		panic("fixed layout not implemented");
503
	}
504

505
	ident     *dummy_id = id_unique("call_result");
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
506
	ir_entity *ent      = new_entity(ft, dummy_id, tp);
507
	return new_r_Member(block, get_irg_frame(irg), ent);
508
509
510
511
512
}

/**
 * Add the hidden parameter from the CopyB node to the Call node.
 */
513
514
static void get_dest_addrs(const cl_entry *entry, ir_node **ins,
                           const ir_type *orig_ctp, wlk_env *env)
515
{
516
	unsigned n_args = 0;
517
518
	for (ir_node *next, *copyb = entry->copyb; copyb != NULL; copyb = next) {
		ir_node *src = get_CopyB_src(copyb);
519
		size_t   idx = get_Proj_num(src);
520
		next = (ir_node*)get_irn_link(copyb);
521

522
		/* consider only the first CopyB */
523
524
		if (ins[idx] != NULL)
			continue;
525

526
527
528
529
		ir_node *call       = entry->call;
		ir_node *call_block = get_nodes_block(call);
		ir_node *dst        = get_CopyB_dst(copyb);
		ir_node *dst_block  = get_nodes_block(dst);
530

531
532
533
		/* Check whether we can use the destination of the CopyB for the call. */
		if (!block_dominates(dst_block, call_block))
			continue;
534

535
536
537
		if (dst_block == call_block) {
			ir_heights_t *heights = env->heights;
			if (heights == NULL) {
538
				ir_graph *irg = get_irn_irg(call_block);
539
540
				heights = heights_new(irg);
				env->heights = heights;
541
542
			}

543
544
545
546
547
			/* Do not optimize the CopyB if the destination depends on the
			 * call. */
			if (heights_reachable_in_block(heights, dst, call))
				continue;
		}
548

549
550
551
552
553
554
555
556
557
		ir_graph *irg   = get_irn_irg(dst);
		ir_node  *frame = get_irg_frame(irg);
		if (!is_Member(dst) || get_Member_ptr(dst) != frame)
			continue;

		ir_entity *dst_ent = get_Member_entity(dst);
		if (get_entity_usage(dst_ent) & ir_usage_address_taken)
			continue;

558
		/* Special case for calls with NoMem memory input. This can happen
559
560
561
562
563
		 * for mtp_property_const & mtp_property_terminates functions.
		 * The call needs a memory input after lowering, so patch it here
		 * to be the input of the CopyB. Note that in case of multiple CopyB
		 * return values this code may break the order: fix it if you find a
		 * language that actually uses this. */
564
565
566
567
568
		ir_node *copyb_mem = get_CopyB_mem(copyb);
		ir_node *call_mem  = get_Call_mem(call);
		if (is_NoMem(call_mem)) {
			set_Call_mem(call, copyb_mem);
			copyb_mem = new_r_Proj(call, mode_M, pn_Call_M);
569
		}
570
571
572
573
574

		ins[idx] = dst;
		/* get rid of the CopyB */
		exchange(copyb, copyb_mem);
		++n_args;
575
576
577
	}

	/* now create dummy entities for function with ignored return value */
578
579
580
581
582
	unsigned n_compound_ret = entry->n_compound_ret;
	if (n_args < n_compound_ret) {
		for (size_t i = 0, j = 0, n_ress = get_method_n_ress(orig_ctp);
		     i < n_ress; ++i) {
			ir_type *rtp = get_method_res_type(orig_ctp, i);
583
			if (is_aggregate_type(rtp)) {
584
				if (ins[j] == NULL)
585
					ins[j] = get_dummy_member(get_nodes_block(entry->call), rtp);
586
587
588
589
				++j;
			}
		}
	}
590
591
}

592
593
594
595
static void fix_int_return(cl_entry const *const entry,
                           ir_node *const base_addr,
                           aggregate_spec_t const *const ret_spec,
                           long const orig_pn, long const pn)
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
{
	ir_node  *const call  = entry->call;
	ir_node  *const block = get_nodes_block(call);
	ir_graph *const irg   = get_irn_irg(base_addr);

	/* we need edges activated here */
	assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES);

	/* if the Call throws an exception, then we cannot add instruction
	 * immediately behind it as the call ends the basic block */
	assert(!ir_throws_exception(call));
	ir_mode *const mode_ref = get_irn_mode(base_addr);

	ir_node *proj_mem = entry->proj_M;
	if (proj_mem == NULL)
		proj_mem = new_r_Proj(call, mode_M, pn_Call_M);
	ir_node *proj_res = entry->proj_res;
	if (proj_res == NULL)
		proj_res = new_r_Proj(call, mode_T, pn_Call_T_result);
	/* reroute old users */
Christoph Mallon's avatar
Christoph Mallon committed
616
	ir_node *const res_user = get_Proj_for_pn(proj_res, orig_pn);
617
618
619
620
621
622
623
624
	if (res_user != NULL)
		edges_reroute(res_user, base_addr);

	/* very hacky: reroute all memory users to a dummy node, which we will
	 * later reroute to the new memory */
	ir_node *dummy = new_r_Dummy(irg, mode_M);
	edges_reroute(proj_mem, dummy);

625
626
627
628
629
630
631
	unsigned  const n_values = ret_spec->n_values;
	ir_node **const sync_in  = ALLOCAN(ir_node*, n_values);
	int             offset   = 0;
	for (unsigned i = 0; i < n_values; ++i) {
		ir_mode *const mode = ret_spec->modes[i];
		ir_node *      addr = base_addr;
		if (offset > 0) {
632
633
			ir_mode *mode_offset = get_reference_offset_mode(mode_ref);
			ir_node *offset_cnst = new_r_Const_long(irg, mode_offset, offset);
634
			addr = new_r_Add(block, addr, offset_cnst);
635
		}
636
637
		ir_node *const value     = new_r_Proj(proj_res, mode, pn+i);
		ir_type *const type      = get_type_for_mode(mode);
638
		ir_node *const store     = new_r_Store(block, proj_mem, addr, value,
639
		                                       type, cons_none);
640
641
		ir_node *const store_mem = new_r_Proj(store, mode_M, pn_Store_M);
		sync_in[i] = store_mem;
642
		offset += get_mode_size_bytes(mode);
643
644
	}

645
	ir_node *const sync = new_r_Sync(block, n_values, sync_in);
646
647
	edges_reroute(dummy, sync);
}
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
648

649
650
651
652
653
654
655
static void fix_call_compound_ret(const cl_entry *entry,
                                  const ir_type *orig_ctp, wlk_env *env)
{
	/* produce destination addresses */
	unsigned  n_compound_ret = entry->n_compound_ret;
	ir_node **dest_addrs     = ALLOCANZ(ir_node*, n_compound_ret);
	get_dest_addrs(entry, dest_addrs, orig_ctp, env);
656

657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
	/* now add parameters for destinations or produce stores if compound is
	 * returned as values */
	ir_node  *call     = entry->call;
	size_t    n_params = get_Call_n_params(call);
	size_t    max_ins  = n_params + (n_Call_max+1) + n_compound_ret;
	ir_node **new_in   = NULL;
	size_t    pos      = (size_t)-1;
	long      pn       = 0;
	for (size_t i = 0, c = 0, n_ress = get_method_n_ress(orig_ctp);
	     i < n_ress; ++i) {
		ir_type *type = get_method_res_type(orig_ctp, i);
		if (!is_aggregate_type(type)) {
			++pn;
			continue;
		}

		ir_node *dest_addr = dest_addrs[c++];
674
		aggregate_spec_t const *const ret_spec = env->env->aggregate_ret(type);
675
		unsigned                const n_values = ret_spec->n_values;
676
677
678
		if (n_values > 0) {
			fix_int_return(entry, dest_addr, ret_spec, i, pn);
			pn += n_values;
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
		} else {
			/* add parameter with destination */
			/* lazily construct new_input list */
			if (new_in == NULL) {
				new_in = ALLOCAN(ir_node*, max_ins);
				new_in[n_Call_mem] = get_Call_mem(call);
				new_in[n_Call_ptr] = get_Call_ptr(call);
				pos = 2;
				assert(pos == n_Call_max+1);
			}
			new_in[pos++] = dest_addr;

			if (env->flags & LF_RETURN_HIDDEN)
				++pn;
		}
	}

	/* do we have new inputs? */
	if (new_in != NULL) {
		/* copy all other parameters */
		for (size_t i = 0; i < n_params; ++i) {
			ir_node *param = get_Call_param(call, i);
			new_in[pos++] = param;
		}
		assert(pos <= max_ins);
		set_irn_in(call, pos, new_in);
705
706
707
	}
}

Manuel Mohr's avatar
Manuel Mohr committed
708
static ir_entity *create_compound_arg_entity(ir_graph *irg, ir_type *type)
709
710
{
	ir_type   *frame  = get_irg_frame_type(irg);
711
	ident     *id     = id_unique("$compound_param");
712
	ir_entity *entity = new_entity(frame, id, type);
Matthias Braun's avatar
Matthias Braun committed
713
714
	/* TODO: we could do some optimizations here and create a big union type
	 * for all different call types in a function */
715
716
717
	return entity;
}

718
static void fix_call_compound_params(const cl_entry *entry, const ir_type *ctp, wlk_env *env)
719
720
721
722
723
724
725
{
	ir_node  *call     = entry->call;
	dbg_info *dbgi     = get_irn_dbg_info(call);
	ir_node  *mem      = get_Call_mem(call);
	ir_graph *irg      = get_irn_irg(call);
	ir_node  *frame    = get_irg_frame(irg);
	size_t    n_params = get_method_n_params(ctp);
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
726
727
728

	for (size_t i = 0; i < n_params; ++i) {
		ir_type *type = get_method_param_type(ctp, i);
729
		if (!is_aggregate_type(type) || (env->flags & LF_DONT_LOWER_ARGUMENTS))
730
731
			continue;

sebastian.buchwald1's avatar
sebastian.buchwald1 committed
732
733
734
		ir_node   *arg         = get_Call_param(call, i);
		ir_entity *arg_entity  = create_compound_arg_entity(irg, type);
		ir_node   *block       = get_nodes_block(call);
735
		ir_node   *sel         = new_rd_Member(dbgi, block, frame, arg_entity);
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
736
737
		bool       is_volatile = is_partly_volatile(arg);
		mem = new_rd_CopyB(dbgi, block, mem, sel, arg, type, is_volatile ? cons_volatile : cons_none);
738
739
740
741
742
743
744
		set_Call_param(call, i, sel);
	}
	set_Call_mem(call, mem);
}

static void fix_calls(wlk_env *env)
{
745
746
747
	for (const cl_entry *entry = env->cl_list; entry; entry = entry->next) {
		if (!entry->has_compound_param && entry->n_compound_ret == 0)
			continue;
748
749
		ir_node *call        = entry->call;
		ir_type *ctp         = get_Call_type(call);
750
		ir_type *lowered_mtp = lower_mtp(env->env, ctp);
751
		set_Call_type(call, lowered_mtp);
752

753
		if (entry->has_compound_param) {
754
			fix_call_compound_params(entry, ctp, env);
755
		}
756
757
		if (entry->n_compound_ret > 0) {
			fix_call_compound_ret(entry, ctp, env);
758
		}
759
		env->changed = true;
760
	}
761
762
}

Matthias Braun's avatar
Matthias Braun committed
763
764
765
static void transform_return(ir_node *ret, size_t n_ret_com, wlk_env *env)
{
	ir_node   *block      = get_nodes_block(ret);
766
	ir_graph  *irg        = get_irn_irg(ret);
Matthias Braun's avatar
Matthias Braun committed
767
768
769
770
771
772
773
	ir_type   *mtp        = env->mtp;
	size_t     n_ress     = get_method_n_ress(mtp);
	ir_node   *mem        = get_Return_mem(ret);
	ir_node   *args       = get_irg_args(irg);
	ir_type   *frame_type = get_irg_frame_type(irg);
	size_t     n_cr_opt   = 0;
	size_t     n_in       = 1;
774
	ir_node  **new_in     = ALLOCAN(ir_node*, n_ress*2 + 1);
Matthias Braun's avatar
Matthias Braun committed
775
776
777
778
779
780
781
782
783
784
	cr_pair   *cr_opt     = ALLOCAN(cr_pair, n_ret_com);

	for (size_t i = 0, k = 0; i < n_ress; ++i) {
		ir_node *pred = get_Return_res(ret, i);
		ir_type *type = get_method_res_type(mtp, i);
		if (!is_aggregate_type(type)) {
			new_in[n_in++] = pred;
			continue;
		}

785
		aggregate_spec_t const *const ret_spec = env->env->aggregate_ret(type);
786
		unsigned                const n_values = ret_spec->n_values;
787
		if (n_values > 0) {
788
			if (is_Unknown(pred)) {
789
790
				for (unsigned i = 0; i < n_values; ++i) {
					new_in[n_in++] = new_r_Unknown(irg, ret_spec->modes[i]);
791
792
				}
			} else {
793
794
795
796
797
798
799
				ir_node **const sync_in = ALLOCAN(ir_node*, n_values);
				int             offset  = 0;
				for (unsigned i = 0; i < n_values; ++i) {
					ir_node *      addr     = pred;
					ir_mode *const mode_ref = get_irn_mode(addr);
					if (offset > 0) {
						ir_mode *const mode_offset
800
							= get_reference_offset_mode(mode_ref);
801
						ir_node *const offset_cnst
802
							= new_r_Const_long(irg, mode_offset, offset);
803
						addr = new_r_Add(block, addr, offset_cnst);
804
					}
805
806
807
808
809
810
					ir_mode *const mode = ret_spec->modes[i];
					ir_node *const load = new_r_Load(block, mem, addr, mode,
					                                 type, cons_none);
					sync_in[i]     = new_r_Proj(load, mode_M, pn_Load_M);
					new_in[n_in++] = new_r_Proj(load, mode, pn_Load_res);
					offset += get_mode_size_bytes(mode);
811
				}
812
				mem = new_r_Sync(block, n_values, sync_in);
813
814
815
816
			}
			continue;
		}

817
		ir_node *arg = new_r_Proj(args, mode_P, k++);
Matthias Braun's avatar
Matthias Braun committed
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
		if (env->flags & LF_RETURN_HIDDEN)
			new_in[n_in++] = arg;

		/* nothing to do when returning an unknown value */
		if (is_Unknown(pred))
			continue;

		/**
		 * Sorrily detecting that copy-return is possible isn't
		 * that simple. We must check, that the hidden address
		 * is alias free during the whole function.
		 * A simple heuristic: all Loads/Stores inside
		 * the function access only local frame.
		 */
		if (env->only_local_mem && is_compound_address(frame_type, pred)) {
			/* we can do the copy-return optimization here */
834
			cr_opt[n_cr_opt].ent = get_Member_entity(pred);
Matthias Braun's avatar
Matthias Braun committed
835
836
837
838
839
840
			cr_opt[n_cr_opt].arg = arg;
			++n_cr_opt;
		} else {
			/* copy-return optimization is impossible, do the copy. */
			bool is_volatile = is_partly_volatile(pred);
			mem = new_r_CopyB(block, mem, arg, pred, type,
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
841
			                  is_volatile ? cons_volatile : cons_none);
Matthias Braun's avatar
Matthias Braun committed
842
843
844
		}
	}
	/* replace the in of the Return */
845
	assert(n_in <= n_ress*2 + 1);
Matthias Braun's avatar
Matthias Braun committed
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
	new_in[0] = mem;
	set_irn_in(ret, n_in, new_in);

	if (n_cr_opt > 0) {
		copy_return_opt_env env;
		env.arr     = cr_opt;
		env.n_pairs = n_cr_opt;
		irg_walk_graph(irg, NULL, do_copy_return_opt, &env);

		for (size_t c = 0; c < n_cr_opt; ++c)
			free_entity(cr_opt[c].ent);
	}

	env->changed = true;
}

862
863
864
865
866
/**
 * Transform a graph. If it has compound parameter returns,
 * remove them and use the hidden parameter instead.
 * If it calls methods with compound parameter returns, add hidden
 * parameters.
867
 */
868
static void transform_irg(lowering_env_t const *const env, ir_graph *const irg)
869
{
870
871
872
873
874
	ir_entity *ent         = get_irg_entity(irg);
	ir_type   *mtp         = get_entity_type(ent);
	size_t     n_ress      = get_method_n_ress(mtp);
	size_t     n_params    = get_method_n_params(mtp);
	size_t     n_param_com = 0;
875

876
	/* calculate the number of compound returns */
877
878
	size_t   n_ret_com = 0;
	unsigned arg_shift = 0;
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
879
	for (size_t i = 0; i < n_ress; ++i) {
880
		ir_type *type = get_method_res_type(mtp, i);
881
		if (is_aggregate_type(type)) {
882
			++n_ret_com;
883
884

			aggregate_spec_t const *const ret_spec = env->aggregate_ret(type);
885
886
			/* if we don't return it as values, then we will add a new parameter
			 * with the address of the destination memory */
887
			if (ret_spec->n_values == 0)
888
889
				++arg_shift;
		}
890
	}
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
891
	for (size_t i = 0; i < n_params; ++i) {
892
		ir_type *type = get_method_param_type(mtp, i);
893
		if (is_aggregate_type(type))
894
895
			++n_param_com;
	}
896

897
898
	if (arg_shift > 0)
		fix_parameter_entities(irg, arg_shift);
899

900
901
	/* much easier if we have only one return */
	if (n_ret_com != 0)
Matthias Braun's avatar
Matthias Braun committed
902
		assure_irg_properties(irg, IR_GRAPH_PROPERTY_ONE_RETURN);
903

904
	ir_type *lowered_mtp = lower_mtp(env, mtp);
905
906
	set_entity_type(ent, lowered_mtp);

907
908
909
910
911
912
913
914
915
916
	wlk_env walk_env = {
		.arg_shift      = arg_shift,
		.flags          = env->flags,
		.env            = env,
		.mtp            = mtp,
		.lowered_mtp    = lowered_mtp,
		.param_members  = NEW_ARR_F(ir_node*, 0),
		.only_local_mem = true,
	};
	obstack_init(&walk_env.obst);
917
918

	/* scan the code, fix argument numbers and collect calls. */
919
	ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
920
921
	irg_walk_graph(irg, firm_clear_link, NULL, &walk_env);
	irg_walk_graph(irg, fix_args_and_collect_calls, NULL, &walk_env);
922

923
924
	/* fix parameter sels */
	ir_node *args = get_irg_args(irg);
925
926
	for (size_t i = 0, n = ARR_LEN(walk_env.param_members); i < n; ++i) {
		ir_node   *member = walk_env.param_members[i];
927
		ir_entity *entity = get_Member_entity(member);
928
929
		size_t     num    = get_entity_parameter_number(entity);
		ir_node   *ptr    = new_r_Proj(args, mode_P, num);
930
		exchange(member, ptr);
931
	}
932
	DEL_ARR_F(walk_env.param_members);
933

934
	if (n_param_com > 0 && !(env->flags & LF_DONT_LOWER_ARGUMENTS))
935
936
		remove_compound_param_entities(irg);

937
	assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
938
	fix_calls(&walk_env);
939
	ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
940

Matthias Braun's avatar
Matthias Braun committed
941
942
	/* transform return nodes */
	if (n_ret_com > 0) {
sebastian.buchwald1's avatar
sebastian.buchwald1 committed
943
		ir_node *endbl = get_irg_end_block(irg);
Matthias Braun's avatar
Matthias Braun committed
944
		foreach_irn_in(endbl, i, pred) {
945
			if (is_Return(pred)) {
946
				transform_return(pred, n_ret_com, &walk_env);
947
948
949
				break;
			}
		}
950
	}
951

952
953
954
955
956
	if (walk_env.heights != NULL)
		heights_free(walk_env.heights);
	obstack_free(&walk_env.obst, NULL);
	confirm_irg_properties(irg, walk_env.changed
		? IR_GRAPH_PROPERTIES_CONTROL_FLOW : IR_GRAPH_PROPERTIES_ALL);
957
}
958

Matthias Braun's avatar
Matthias Braun committed
959
static void lower_method_types(ir_type *const type, ir_entity *const entity,
960
                               void *const data)
961
{
962
	lowering_env_t const *const env = (lowering_env_t const*)data;
963
964

	/* fix method entities */