Commit f6d766be authored by Matthias Braun's avatar Matthias Braun
Browse files

move beabihelper to betranshlp/bera

parent 6d813381
......@@ -19,7 +19,6 @@
#include "debug.h"
#include "panic.h"
#include "be_t.h"
#include "beabihelper.h"
#include "bearch.h"
#include "benode.h"
#include "belower.h"
......@@ -27,6 +26,7 @@
#include "bemodule.h"
#include "begnuas.h"
#include "belistsched.h"
#include "bera.h"
#include "bestack.h"
#include "bespillutil.h"
......
......@@ -24,7 +24,6 @@
#include "beutil.h"
#include "bearch_amd64_t.h"
#include "beirg.h"
#include "beabihelper.h"
#include "besched.h"
#include "amd64_cconv.h"
......
......@@ -7,7 +7,6 @@
* @file
* @brief The main amd64 backend driver file.
*/
#include "beabihelper.h"
#include "bearch.h"
#include "beflags.h"
#include "begnuas.h"
......@@ -16,6 +15,7 @@
#include "belower.h"
#include "bemodule.h"
#include "benode.h"
#include "bera.h"
#include "besched.h"
#include "bespillslots.h"
#include "bespillutil.h"
......
......@@ -25,7 +25,6 @@
#include "beirg.h"
#include "beutil.h"
#include "betranshlp.h"
#include "beabihelper.h"
#include "arm_nodes_attr.h"
#include "arm_transform.h"
......
......@@ -28,7 +28,6 @@
#include "irtools.h"
#include "util.h"
#include "beabihelper.h"
#include "bearch.h"
#include "benode.h"
#include "belower.h"
......@@ -41,6 +40,7 @@
#include "begnuas.h"
#include "belistsched.h"
#include "beflags.h"
#include "bera.h"
#include "bestack.h"
#include "betranshlp.h"
......
/*
* This file is part of libFirm.
* Copyright (C) 2012 University of Karlsruhe.
*/
/**
* @file
* @brief Helper functions for handling ABI constraints in the code
* selection phase.
* @author Matthias Braun
*/
#include "util.h"
#include "beabihelper.h"
#include "bearch.h"
#include "beirg.h"
#include "benode.h"
#include "besched.h"
#include "heights.h"
#include "ircons.h"
#include "iredges.h"
#include "irgwalk.h"
#include "irnodemap.h"
#include "irtools.h"
/**
* Tests whether a node has a real user and is not just kept by the End or
* Anchor node
*/
static bool has_real_user(const ir_node *node)
{
foreach_out_edge(node, edge) {
ir_node *user = get_edge_src_irn(edge);
if (!is_End(user) && !is_Anchor(user))
return true;
}
return false;
}
static ir_node *add_to_keep(ir_node *last_keep,
const arch_register_class_t *cls, ir_node *node)
{
if (last_keep != NULL) {
be_Keep_add_node(last_keep, cls, node);
} else {
ir_node *in[1] = { node };
ir_node *block = get_nodes_block(node);
ir_node *schedpoint;
last_keep = be_new_Keep(block, 1, in);
schedpoint = skip_Proj(node);
if (sched_is_scheduled(schedpoint)) {
sched_add_after(schedpoint, last_keep);
}
}
return last_keep;
}
static void add_missing_keep_walker(ir_node *node, void *data)
{
(void)data;
ir_mode *mode = get_irn_mode(node);
ir_node *last_keep;
if (mode != mode_T) {
if (!has_real_user(node)) {
const arch_register_req_t *req = arch_get_irn_register_req(node);
const arch_register_class_t *cls = req->cls;
if (cls == NULL
|| (cls->flags & arch_register_class_flag_manual_ra)) {
return;
}
add_to_keep(NULL, cls, node);
}
return;
}
unsigned n_outs = arch_get_irn_n_outs(node);
if (n_outs <= 0)
return;
unsigned *const found_projs = rbitset_alloca(n_outs);
ir_node **const existing_projs = ALLOCANZ(ir_node*, n_outs);
foreach_out_edge(node, edge) {
ir_node *succ = get_edge_src_irn(edge);
ir_mode *mode = get_irn_mode(succ);
/* The node could be kept */
if (is_End(succ) || is_Anchor(succ))
continue;
if (mode == mode_M || mode == mode_X)
continue;
unsigned pn = get_Proj_num(succ);
existing_projs[pn] = succ;
if (!has_real_user(succ))
continue;
assert(pn < n_outs);
rbitset_set(found_projs, pn);
}
/* are keeps missing? */
last_keep = NULL;
for (unsigned i = 0; i < n_outs; ++i) {
ir_node *value;
const arch_register_req_t *req;
const arch_register_class_t *cls;
if (rbitset_is_set(found_projs, i)) {
continue;
}
req = arch_get_irn_register_req_out(node, i);
cls = req->cls;
if (cls == NULL || (cls->flags & arch_register_class_flag_manual_ra)) {
continue;
}
value = existing_projs[i];
if (value == NULL)
value = new_r_Proj(node, arch_register_class_mode(cls), i);
last_keep = add_to_keep(last_keep, cls, value);
}
}
void be_add_missing_keeps(ir_graph *irg)
{
irg_walk_graph(irg, add_missing_keep_walker, NULL, NULL);
}
/**
* Link the node into its block list as a new head.
*/
static void collect_node(ir_node *node)
{
ir_node *block = get_nodes_block(node);
ir_node *old = (ir_node*)get_irn_link(block);
set_irn_link(node, old);
set_irn_link(block, node);
}
/**
* Post-walker: link all nodes that probably access the stack into lists of their block.
*/
static void link_ops_in_block_walker(ir_node *node, void *data)
{
(void) data;
switch (get_irn_opcode(node)) {
case iro_Return:
case iro_Call:
collect_node(node);
break;
case iro_Alloc:
/** all non-stack alloc nodes should be lowered before the backend */
collect_node(node);
break;
case iro_Free:
collect_node(node);
break;
case iro_Builtin:
if (get_Builtin_kind(node) == ir_bk_return_address) {
ir_node *const param = get_Builtin_param(node, 0);
long const value = get_Const_long(param); /* must be Const */
if (value > 0) {
/* not the return address of the current function:
* we need the stack pointer for the frame climbing */
collect_node(node);
}
}
break;
default:
break;
}
}
static ir_heights_t *heights;
/**
* Check if a node is somehow data dependent on another one.
* both nodes must be in the same basic block.
* @param n1 The first node.
* @param n2 The second node.
* @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
*/
static int dependent_on(const ir_node *n1, const ir_node *n2)
{
assert(get_nodes_block(n1) == get_nodes_block(n2));
return heights_reachable_in_block(heights, n1, n2);
}
/**
* Classical qsort() comparison function behavior:
*
* 0 if both elements are equal, no node depend on the other
* +1 if first depends on second (first is greater)
* -1 if second depends on first (second is greater)
*/
static int cmp_call_dependency(const void *c1, const void *c2)
{
const ir_node *n1 = *(const ir_node **) c1;
const ir_node *n2 = *(const ir_node **) c2;
unsigned h1, h2;
if (dependent_on(n1, n2))
return 1;
if (dependent_on(n2, n1))
return -1;
/* The nodes have no depth order, but we need a total order because qsort()
* is not stable.
*
* Additionally, we need to respect transitive dependencies. Consider a
* Call a depending on Call b and an independent Call c.
* We MUST NOT order c > a and b > c. */
h1 = get_irn_height(heights, n1);
h2 = get_irn_height(heights, n2);
if (h1 < h2) return 1;
if (h1 > h2) return -1;
/* Same height, so use a random (but stable) order */
return get_irn_idx(n2) - get_irn_idx(n1);
}
/**
* Block-walker: sorts dependencies and remember them into a phase
*/
static void process_ops_in_block(ir_node *block, void *data)
{
ir_nodemap *map = (ir_nodemap*)data;
unsigned n;
unsigned n_nodes;
ir_node *node;
ir_node **nodes;
n_nodes = 0;
for (node = (ir_node*)get_irn_link(block); node != NULL;
node = (ir_node*)get_irn_link(node)) {
++n_nodes;
}
if (n_nodes == 0)
return;
nodes = XMALLOCN(ir_node*, n_nodes);
n = 0;
for (node = (ir_node*)get_irn_link(block); node != NULL;
node = (ir_node*)get_irn_link(node)) {
nodes[n++] = node;
}
assert(n == n_nodes);
/* order nodes according to their data dependencies */
QSORT(nodes, n_nodes, cmp_call_dependency);
/* remember the calculated dependency into a phase */
for (n = n_nodes-1; n > 0; --n) {
ir_node *node = nodes[n];
ir_node *pred = nodes[n-1];
ir_nodemap_insert(map, node, pred);
}
free(nodes);
}
struct be_stackorder_t {
ir_nodemap stack_order; /**< a phase to handle stack dependencies. */
};
be_stackorder_t *be_collect_stacknodes(ir_graph *irg)
{
be_stackorder_t *env = XMALLOCZ(be_stackorder_t);
ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
/* collect all potential^stack accessing nodes */
irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, NULL);
ir_nodemap_init(&env->stack_order, irg);
/* use heights to create a total order for those nodes: this order is stored
* in the created phase */
heights = heights_new(irg);
irg_block_walk_graph(irg, NULL, process_ops_in_block, &env->stack_order);
heights_free(heights);
ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
return env;
}
ir_node *be_get_stack_pred(const be_stackorder_t *env, const ir_node *node)
{
return ir_nodemap_get(ir_node, &env->stack_order, node);
}
void be_free_stackorder(be_stackorder_t *env)
{
ir_nodemap_destroy(&env->stack_order);
free(env);
}
static void create_stores_for_type(ir_graph *irg, ir_type *type)
{
size_t n = get_compound_n_members(type);
ir_node *frame = get_irg_frame(irg);
ir_node *initial_mem = get_irg_initial_mem(irg);
ir_node *mem = initial_mem;
ir_node *first_store = NULL;
ir_node *start_block = get_irg_start_block(irg);
ir_node *args = get_irg_args(irg);
size_t i;
/* all parameter entities left in the frame type require stores.
* (The ones passed on the stack have been moved to the arg type) */
for (i = 0; i < n; ++i) {
ir_entity *entity = get_compound_member(type, i);
ir_type *tp = get_entity_type(entity);
ir_node *addr;
size_t arg;
if (!is_parameter_entity(entity))
continue;
arg = get_entity_parameter_number(entity);
if (arg == IR_VA_START_PARAMETER_NUMBER)
continue;
addr = new_r_Member(start_block, frame, entity);
if (entity->attr.parameter.doubleword_low_mode != NULL) {
ir_mode *mode = entity->attr.parameter.doubleword_low_mode;
ir_node *val0 = new_r_Proj(args, mode, arg);
ir_node *val1 = new_r_Proj(args, mode, arg+1);
ir_node *store0 = new_r_Store(start_block, mem, addr, val0,
tp, cons_none);
ir_node *mem0 = new_r_Proj(store0, mode_M, pn_Store_M);
size_t offset = get_mode_size_bits(mode)/8;
ir_mode *mode_ref = get_irn_mode(addr);
ir_mode *mode_offs = get_reference_mode_unsigned_eq(mode_ref);
ir_node *cnst = new_r_Const_long(irg, mode_offs, offset);
ir_node *next_addr = new_r_Add(start_block, addr, cnst, mode_ref);
ir_node *store1 = new_r_Store(start_block, mem0, next_addr, val1,
tp, cons_none);
mem = new_r_Proj(store1, mode_M, pn_Store_M);
if (first_store == NULL)
first_store = store0;
} else {
ir_mode *mode = is_compound_type(tp) ? mode_P : get_type_mode(tp);
ir_node *val = new_r_Proj(args, mode, arg);
ir_node *store = new_r_Store(start_block, mem, addr, val, tp, cons_none);
mem = new_r_Proj(store, mode_M, pn_Store_M);
if (first_store == NULL)
first_store = store;
}
}
if (mem != initial_mem) {
edges_reroute_except(initial_mem, mem, first_store);
set_irg_initial_mem(irg, initial_mem);
}
}
void be_add_parameter_entity_stores(ir_graph *irg)
{
ir_type *frame_type = get_irg_frame_type(irg);
be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
ir_type *between_type = layout->between_type;
create_stores_for_type(irg, frame_type);
if (between_type != NULL) {
create_stores_for_type(irg, between_type);
}
}
unsigned be_get_n_allocatable_regs(const ir_graph *irg,
const arch_register_class_t *cls)
{
unsigned *const bs = rbitset_alloca(cls->n_regs);
be_get_allocatable_regs(irg, cls, bs);
return rbitset_popcount(bs, cls->n_regs);
}
void be_get_allocatable_regs(ir_graph const *const irg,
arch_register_class_t const *const cls,
unsigned *const raw_bitset)
{
be_irg_t *birg = be_birg_from_irg(irg);
unsigned *allocatable_regs = birg->allocatable_regs;
rbitset_clear_all(raw_bitset, cls->n_regs);
for (unsigned i = 0; i < cls->n_regs; ++i) {
const arch_register_t *reg = &cls->regs[i];
if (rbitset_is_set(allocatable_regs, reg->global_index))
rbitset_set(raw_bitset, i);
}
}
/*
* This file is part of libFirm.
* Copyright (C) 2012 University of Karlsruhe.
*/
/**
* @file
* @brief Helper functions for handling ABI constraints in the code
* selection phase.
* @author Matthias Braun
*/
#ifndef FIRM_BE_BEABI_HELPER_H
#define FIRM_BE_BEABI_HELPER_H
#include "firm_types.h"
#include "be_types.h"
#include "bearch.h"
typedef struct be_stackorder_t be_stackorder_t;
/**
* Adds a X->Proj->Keep for each output value of X which has no Proj yet
*/
void be_add_missing_keeps(ir_graph *irg);
/**
* In the normal firm representation some nodes like pure calls, builtins
* have no memory inputs+outputs. However in the backend these sometimes have to
* access the stack to work and therefore suddenly need to be enqueued into the
* memory edge again.
* This API creates a possible order to enqueue them so we can be sure to create
* a legal dependency graph when transforming them.
*/
be_stackorder_t *be_collect_stacknodes(ir_graph *irg);
/**
* return node that should produce the predecessor stack node in a block.
* returns NULL if there's no predecessor in the current block.
*/
ir_node *be_get_stack_pred(const be_stackorder_t *env, const ir_node *node);
/**
* free memory associated with a stackorder structure
*/
void be_free_stackorder(be_stackorder_t *env);
/**
* In case where a parameter is transmitted via register but someone takes its
* address a store to the frame which can be references is necessary.
* This function can be used as a preprocessing phase before transformation to
* do this. The assumption is that all parameter_entities which are passed
* through the stack are already moved to the arg_type and all remaining
* parameter_entities on the frame type need stores.
*/
void be_add_parameter_entity_stores(ir_graph *irg);
#endif
......@@ -37,7 +37,6 @@
#include "beutil.h"
#include "benode.h"
#include "belive.h"
#include "beabihelper.h"
static const arch_register_class_t *flag_class;
static const arch_register_t *flags_reg;
......
......@@ -212,6 +212,114 @@ void be_add_missing_copies(ir_graph *irg)
constrained_livethrough_copies);
}
static ir_node *add_to_keep(ir_node *last_keep,
const arch_register_class_t *cls, ir_node *node)
{
if (last_keep != NULL) {
be_Keep_add_node(last_keep, cls, node);
} else {
ir_node *in[1] = { node };
ir_node *block = get_nodes_block(node);
ir_node *schedpoint;
last_keep = be_new_Keep(block, 1, in);
schedpoint = skip_Proj(node);
if (sched_is_scheduled(schedpoint)) {
sched_add_after(schedpoint, last_keep);
}
}
return last_keep;
}
/**
* Tests whether a node has a real user and is not just kept by the End or
* Anchor node
*/
static bool has_real_user(const ir_node *node)
{
foreach_out_edge(node, edge) {
ir_node *user = get_edge_src_irn(edge);
if (!is_End(user) && !is_Anchor(user))
return true;
}
return false;
}
static void add_missing_keep_walker(ir_node *node, void *data)
{
(void)data;
ir_mode *mode = get_irn_mode(node);
ir_node *last_keep;
if (mode != mode_T) {
if (!has_real_user(node)) {
const arch_register_req_t *req = arch_get_irn_register_req(node);
const arch_register_class_t *cls = req->cls;
if (cls == NULL
|| (cls->flags & arch_register_class_flag_manual_ra)) {
return;
}
add_to_keep(NULL, cls, node);
}
return;
}
unsigned n_outs = arch_get_irn_n_outs(node);
if (n_outs <= 0)
return;
unsigned *const found_projs = rbitset_alloca(n_outs);
ir_node **const existing_projs = ALLOCANZ(ir_node*, n_outs);
foreach_out_edge(node, edge) {
ir_node *succ = get_edge_src_irn(edge);
ir_mode *mode = get_irn_mode(succ);
/* The node could be kept */
if (is_End(succ) || is_Anchor(succ))
continue;
if (mode == mode_M || mode == mode_X)
continue;
unsigned pn = get_Proj_num(succ);
existing_projs[pn] = succ;
if (!has_real_user(succ))
continue;
assert(pn < n_outs);
rbitset_set(found_projs, pn);
}
/* are keeps missing? */
last_keep = NULL;
for (unsigned i = 0; i < n_outs; ++i) {
ir_node *value;
const arch_register_req_t *req;
const arch_register_class_t *cls;
if (rbitset_is_set(found_projs, i)) {
continue;
}
req = arch_get_irn_register_req_out(node, i);
cls = req->cls;