Commit 0860cafa authored by Matthias Braun's avatar Matthias Braun
Browse files

- Rewrite internal backend logic for querying register constraints.

  (We have a separate get_input, and get_output callback for now).
  This should make the code faster for now and is a first step towards
  changing the interface to query register constraints on the mode_T node
  itself instead of the Proj nodes.
- Handle middleend node constraints and stuff in benode.c instead of in each
  backend
- Remove irn_class_branch we already had is_cfop in the middleend
- Fix a bunch of bugs/problems in the process

[r26320]
parent e738216b
......@@ -80,7 +80,8 @@ typedef enum {
/** The opcodes of the libFirm predefined operations. */
typedef enum {
iro_Block,
iro_First,
iro_Block = iro_First,
iro_Start, iro_End, iro_Jmp, iro_IJmp, iro_Cond, iro_Return,
iro_Const, iro_SymConst,
iro_Sel,
......
......@@ -66,61 +66,9 @@ static set *cur_reg_set = NULL;
* |___/
**************************************************/
/**
* Return register requirements for a TEMPLATE node.
* If the node returns a tuple (mode_T) then the proj's
* will be asked for this information.
*/
static const arch_register_req_t *TEMPLATE_get_irn_reg_req(const ir_node *node,
int pos)
{
long node_pos = pos == -1 ? 0 : pos;
ir_mode *mode = get_irn_mode(node);
if (mode == mode_T || mode == mode_M) {
return arch_no_register_req;
}
if (is_Proj(node)) {
/* in case of a proj, we need to get the correct OUT slot */
/* of the node corresponding to the proj number */
if (pos == -1) {
node_pos = TEMPLATE_translate_proj_pos(node);
} else {
node_pos = pos;
}
node = skip_Proj_const(node);
}
/* get requirements for our own nodes */
if (is_TEMPLATE_irn(node)) {
const arch_register_req_t *req;
if (pos >= 0) {
req = get_TEMPLATE_in_req(node, pos);
} else {
req = get_TEMPLATE_out_req(node, node_pos);
}
assert(req != NULL);
return req;
}
/* unknowns should be transformed already */
assert(!is_Unknown(node));
return arch_no_register_req;
}
static arch_irn_class_t TEMPLATE_classify(const ir_node *irn)
{
irn = skip_Proj_const(irn);
if (is_cfop(irn)) {
return arch_irn_class_branch;
}
(void) irn;
return 0;
}
......@@ -158,7 +106,8 @@ static int TEMPLATE_get_sp_bias(const ir_node *irn)
/* fill register allocator interface */
static const arch_irn_ops_t TEMPLATE_irn_ops = {
TEMPLATE_get_irn_reg_req,
get_TEMPLATE_in_req,
get_TEMPLATE_out_req,
TEMPLATE_classify,
TEMPLATE_get_frame_entity,
TEMPLATE_set_frame_entity,
......
......@@ -83,62 +83,9 @@ static set *cur_reg_set = NULL;
* |___/
**************************************************/
/**
* Return register requirements for a arm node.
* If the node returns a tuple (mode_T) then the proj's
* will be asked for this information.
*/
static const arch_register_req_t *arm_get_irn_reg_req(const ir_node *node,
int pos)
{
long node_pos = pos == -1 ? 0 : pos;
ir_mode *mode = get_irn_mode(node);
if (is_Block(node) || mode == mode_X) {
return arch_no_register_req;
}
if (mode == mode_T && pos < 0) {
return arch_no_register_req;
}
if (is_Proj(node)) {
if(mode == mode_M)
return arch_no_register_req;
if(pos >= 0) {
return arch_no_register_req;
}
node_pos = (pos == -1) ? get_Proj_proj(node) : pos;
node = skip_Proj_const(node);
}
/* get requirements for our own nodes */
if (is_arm_irn(node)) {
const arch_register_req_t *req;
if (pos >= 0) {
req = get_arm_in_req(node, pos);
} else {
req = get_arm_out_req(node, node_pos);
}
return req;
}
/* unknown should be transformed by now */
assert(!is_Unknown(node));
return arch_no_register_req;
}
static arch_irn_class_t arm_classify(const ir_node *irn)
{
irn = skip_Proj_const(irn);
if (is_cfop(irn)) {
return arch_irn_class_branch;
}
(void) irn;
return 0;
}
......@@ -175,7 +122,8 @@ static int arm_get_sp_bias(const ir_node *irn)
/* fill register allocator interface */
static const arch_irn_ops_t arm_irn_ops = {
arm_get_irn_reg_req,
get_arm_in_req,
get_arm_out_req,
arm_classify,
arm_get_frame_entity,
arm_set_frame_entity,
......
......@@ -71,8 +71,34 @@ static inline const arch_irn_ops_t *get_irn_ops(const ir_node *irn)
const arch_register_req_t *arch_get_register_req(const ir_node *irn, int pos)
{
const arch_irn_ops_t *ops = get_irn_ops(irn);
return ops->get_irn_reg_req(irn, pos);
const arch_irn_ops_t *ops;
if (is_Proj(irn)) {
assert(pos == -1);
pos = -1-get_Proj_proj(irn);
irn = get_Proj_pred(irn);
}
ops = get_irn_ops(irn);
if (pos < 0) {
return ops->get_irn_reg_req_out(irn, -pos-1);
} else {
return ops->get_irn_reg_req_in(irn, pos);
}
}
const arch_register_req_t *arch_get_register_req_out(const ir_node *irn)
{
int pos = 0;
const arch_irn_ops_t *ops;
if (is_Proj(irn)) {
pos = get_Proj_proj(irn);
irn = get_Proj_pred(irn);
} else if (get_irn_mode(irn) == mode_T) {
return arch_no_register_req;
}
ops = get_irn_ops(irn);
return ops->get_irn_reg_req_out(irn, pos);
}
void arch_set_frame_offset(ir_node *irn, int offset)
......
......@@ -86,15 +86,14 @@ extern const arch_register_req_t *arch_no_register_req;
extern char *arch_register_req_format(char *buf, size_t len, const arch_register_req_t *req, const ir_node *node);
/**
* Certain node classes which are relevant for the register allocator.
* Node classification. Mainly used for statistics.
*/
typedef enum arch_irn_class_t {
arch_irn_class_spill = 1 << 0,
arch_irn_class_reload = 1 << 1,
arch_irn_class_remat = 1 << 2,
arch_irn_class_copy = 1 << 3,
arch_irn_class_perm = 1 << 4,
arch_irn_class_branch = 1 << 5
arch_irn_class_spill = 1 << 0,
arch_irn_class_reload = 1 << 1,
arch_irn_class_remat = 1 << 2,
arch_irn_class_copy = 1 << 3,
arch_irn_class_perm = 1 << 4
} arch_irn_class_t;
void arch_set_frame_offset(ir_node *irn, int bias);
......@@ -116,8 +115,7 @@ void arch_perform_memory_operand(ir_node *irn, ir_node *spill, unsign
* operand was no register operand.
*/
const arch_register_req_t *arch_get_register_req(const ir_node *irn, int pos);
#define arch_get_register_req_out(irn) arch_get_register_req(irn, -1)
const arch_register_req_t *arch_get_register_req_out(const ir_node *irn);
/**
* Put all registers which shall not be ignored by the register
......@@ -300,7 +298,7 @@ _arch_register_for_index(const arch_register_class_t *cls, unsigned idx)
* Expresses requirements to register allocation for an operand.
*/
struct arch_register_req_t {
arch_register_req_type_t type; /**< The type of the constraint. */
arch_register_req_type_t type; /**< The type of the constraint. */
const arch_register_class_t *cls; /**< The register class this constraint belongs to. */
const unsigned *limited; /**< allowed register bitset */
......@@ -355,13 +353,22 @@ struct arch_irn_ops_t {
/**
* Get the register requirements for a given operand.
* @param self The self pointer.
* @param irn The node.
* @param pos The operand's position (0..n for the input operands).
* @param pos The operand's position
* @return The register requirements for the selected operand.
* The pointer returned is never NULL.
*/
const arch_register_req_t *(*get_irn_reg_req_in)(const ir_node *irn, int pos);
/**
* Get the register requirements for values produced by a node
* @param irn The node.
* @param pos The operand's position (0 for most nodes,
* 0..n for mode_T nodes)
* @return The register requirements for the selected operand.
* The pointer returned is never NULL.
*/
const arch_register_req_t *(*get_irn_reg_req)(const ir_node *irn, int pos);
const arch_register_req_t *(*get_irn_reg_req_out)(const ir_node *irn, int pos);
/**
* Classify the node.
......@@ -372,7 +379,6 @@ struct arch_irn_ops_t {
/**
* Get the entity on the stack frame this node depends on.
* @param self The this pointer.
* @param irn The node in question.
* @return The entity on the stack frame or NULL, if the node does not have a
* stack frame entity.
......@@ -381,7 +387,6 @@ struct arch_irn_ops_t {
/**
* Set the entity on the stack frame this node depends on.
* @param self The this pointer.
* @param irn The node in question.
* @param ent The entity to set
*/
......@@ -389,7 +394,6 @@ struct arch_irn_ops_t {
/**
* Set the offset of a node carrying an entity on the stack frame.
* @param self The this pointer.
* @param irn The node.
* @param offset The offset of the node's stack frame entity.
*/
......@@ -402,7 +406,6 @@ struct arch_irn_ops_t {
* A positive value stands for an expanding stack area, a negative value for
* a shrinking one.
*
* @param self The this pointer
* @param irn The node
* @return 0 if the stackpointer is not modified with a constant
* value, otherwise the increment/decrement value
......@@ -413,7 +416,6 @@ struct arch_irn_ops_t {
* Returns an inverse operation which yields the i-th argument
* of the given node as result.
*
* @param self The this pointer.
* @param irn The original operation
* @param i Index of the argument we want the inverse operation to yield
* @param inverse struct to be filled with the resulting inverse op
......@@ -425,7 +427,6 @@ struct arch_irn_ops_t {
/**
* Get the estimated cycle count for @p irn.
*
* @param self The this pointer.
* @param irn The node.
*
* @return The estimated cycle count for this operation
......@@ -435,7 +436,6 @@ struct arch_irn_ops_t {
/**
* Asks the backend whether operand @p i of @p irn can be loaded form memory internally
*
* @param self The this pointer.
* @param irn The node.
* @param i Index of the argument we would like to know whether @p irn can load it form memory internally
*
......@@ -446,7 +446,6 @@ struct arch_irn_ops_t {
/**
* Ask the backend to assimilate @p reload of operand @p i into @p irn.
*
* @param self The this pointer.
* @param irn The node.
* @param spill The spill.
* @param i The position of the reload.
......
......@@ -674,17 +674,17 @@ static void pressure(ir_node *block, void *env_ptr)
border_def(proj, step, 1);
}
}
}
/*
* If the node defines some value, which can put into a
* register of the current class, make a border for it.
*/
if (has_reg_class(env, irn)) {
int nr = get_irn_idx(irn);
bitset_clear(live, nr);
border_def(irn, step, 1);
} else {
/*
* If the node defines some value, which can put into a
* register of the current class, make a border for it.
*/
if (has_reg_class(env, irn)) {
int nr = get_irn_idx(irn);
bitset_clear(live, nr);
border_def(irn, step, 1);
}
}
/*
......
......@@ -301,33 +301,6 @@ static void add_register_req(ir_node *node)
ARR_APP1(reg_out_info_t, info->out_infos, out_info);
}
/**
* Skip Proj nodes and return their Proj numbers.
*
* If *node is a Proj or Proj(Proj) node, skip it.
*
* @param node points to the node to be skipped
*
* @return 0 if *node was no Proj node, its Proj number else.
*/
static int redir_proj(const ir_node **node)
{
const ir_node *n = *node;
if(is_Proj(n)) {
ir_node *irn;
*node = irn = get_Proj_pred(n);
if(is_Proj(irn)) {
assert(get_irn_mode(irn) == mode_T);
*node = get_Proj_pred(irn);
}
return get_Proj_proj(n);
}
return 0;
}
ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_class_t *cls_frame,
ir_node *bl, ir_node *frame, ir_node *to_spill)
{
......@@ -345,6 +318,15 @@ ir_node *be_new_Spill(const arch_register_class_t *cls, const arch_register_clas
be_node_set_reg_class_in(res, be_pos_Spill_frame, cls_frame);
be_node_set_reg_class_in(res, be_pos_Spill_val, cls);
/*
* For spills and reloads, we return "none" as requirement for frame
* pointer, so every input is ok. Some backends need this (e.g. STA).
* Matze: we should investigate if this is really needed, this solution
* looks very hacky to me
*/
be_node_set_reg_class_in(res, be_pos_Spill_frame, NULL);
return res;
}
......@@ -364,6 +346,15 @@ ir_node *be_new_Reload(const arch_register_class_t *cls,
be_node_set_reg_class_out(res, 0, cls);
be_node_set_reg_class_in(res, be_pos_Reload_frame, cls_frame);
arch_irn_set_flags(res, arch_irn_flags_rematerializable);
/*
* For spills and reloads, we return "none" as requirement for frame
* pointer, so every input is ok. Some backends need this (e.g. STA).
* Matze: we should investigate if this is really needed, this solution
* looks very hacky to me
*/
be_node_set_reg_class_in(res, be_pos_Reload_frame, NULL);
return res;
}
......@@ -1069,78 +1060,39 @@ ir_node *be_reload(const arch_register_class_t *cls, ir_node *insert, ir_mode *m
*/
static const
arch_register_req_t *get_out_reg_req(const ir_node *irn, int out_pos)
static const arch_register_req_t *be_node_get_out_reg_req(
const ir_node *irn, int pos)
{
const be_node_attr_t *a = get_irn_attr_const(irn);
if (out_pos >= ARR_LEN(a->reg_data)) {
assert(pos >= 0);
if (pos >= ARR_LEN(a->reg_data)) {
return arch_no_register_req;
}
return &a->reg_data[out_pos].req;
return &a->reg_data[pos].req;
}
static const
arch_register_req_t *get_in_reg_req(const ir_node *irn, int pos)
static const arch_register_req_t *be_node_get_in_reg_req(
const ir_node *irn, int pos)
{
const be_node_attr_t *a = get_irn_attr_const(irn);
assert(pos >= 0);
if (pos >= get_irn_arity(irn) || pos >= ARR_LEN(a->reg_data))
return arch_no_register_req;
return &a->reg_data[pos].in_req;
}
static const arch_register_req_t *
be_node_get_irn_reg_req(const ir_node *irn, int pos)
{
int out_pos = pos;
if (pos < 0) {
if (get_irn_mode(irn) == mode_T)
return arch_no_register_req;
assert(pos == -1);
out_pos = redir_proj((const ir_node **)&irn);
assert(is_be_node(irn));
return get_out_reg_req(irn, out_pos);
} else if (is_be_node(irn)) {
/*
* For spills and reloads, we return "none" as requirement for frame
* pointer, so every input is ok. Some backends need this (e.g. STA).
*/
if ((pos == be_pos_Spill_frame && be_is_Spill(irn)) ||
(pos == be_pos_Reload_frame && be_is_Reload(irn)))
return arch_no_register_req;
return get_in_reg_req(irn, pos);
}
return arch_no_register_req;
}
static arch_irn_class_t be_node_classify(const ir_node *irn)
{
restart:
switch (get_irn_opcode(irn)) {
#define XXX(a,b) case a: return b
XXX(beo_Spill, arch_irn_class_spill);
XXX(beo_Reload, arch_irn_class_reload);
XXX(beo_Perm, arch_irn_class_perm);
XXX(beo_Copy, arch_irn_class_copy);
XXX(beo_Return, arch_irn_class_branch);
#undef XXX
case iro_Proj:
irn = get_Proj_pred(irn);
if (is_Proj(irn)) {
assert(get_irn_mode(irn) == mode_T);
irn = get_Proj_pred(irn);
}
goto restart;
default:
return 0;
case beo_Spill: return arch_irn_class_spill;
case beo_Reload: return arch_irn_class_reload;
case beo_Perm: return arch_irn_class_perm;
case beo_Copy: return arch_irn_class_copy;
default: return 0;
}
}
......@@ -1161,10 +1113,13 @@ static void be_node_set_frame_entity(ir_node *irn, ir_entity *ent)
static void be_node_set_frame_offset(ir_node *irn, int offset)
{
if(be_has_frame_entity(irn)) {
be_frame_attr_t *a = get_irn_attr(irn);
a->offset = offset;
}
be_frame_attr_t *a;
if(!be_has_frame_entity(irn))
return;
a = get_irn_attr(irn);
a->offset = offset;
}
static int be_node_get_sp_bias(const ir_node *irn)
......@@ -1186,8 +1141,10 @@ static int be_node_get_sp_bias(const ir_node *irn)
*/
/* for be nodes */
static const arch_irn_ops_t be_node_irn_ops = {
be_node_get_irn_reg_req,
be_node_get_in_reg_req,
be_node_get_out_reg_req,
be_node_classify,
be_node_get_frame_entity,
be_node_set_frame_entity,
......@@ -1199,6 +1156,61 @@ static const arch_irn_ops_t be_node_irn_ops = {
NULL, /* perform_memory_operand */
};
static const arch_register_req_t *dummy_reg_req(
const ir_node *node, int pos)
{
(void) node;
(void) pos;
return arch_no_register_req;
}
static arch_irn_class_t dummy_classify(const ir_node *node)
{
(void) node;
return 0;
}
static ir_entity* dummy_get_frame_entity(const ir_node *node)
{
(void) node;
return NULL;
}
static void dummy_set_frame_entity(ir_node *node, ir_entity *entity)
{
(void) node;
(void) entity;
panic("dummy_set_frame_entity() should not be called");
}
static void dummy_set_frame_offset(ir_node *node, int bias)
{
(void) node;
(void) bias;
panic("dummy_set_frame_offset() should not be called");
}
static int dummy_get_sp_bias(const ir_node *node)
{
(void) node;
return 0;
}
/* for "middleend" nodes */
static const arch_irn_ops_t dummy_be_irn_ops = {
dummy_reg_req,
dummy_reg_req,
dummy_classify,
dummy_get_frame_entity,
dummy_set_frame_entity,
dummy_set_frame_offset,
dummy_get_sp_bias,
NULL, /* get_inverse */
NULL, /* get_op_estimated_cost */
NULL, /* possible_memory_operand */
NULL, /* perform_memory_operand */
};
/*
____ _ _ ___ ____ _ _ _ _ _ _
| _ \| |__ (_) |_ _| _ \| \ | | | | | | __ _ _ __ __| | | ___ _ __
......@@ -1326,45 +1338,14 @@ void be_set_phi_flags(ir_node *node, arch_irn_flags_t flags)
attr->flags = flags;
}
static arch_irn_class_t phi_classify(const ir_node *irn)
{
(void) irn;
return 0;
}
static ir_entity *phi_get_frame_entity(const ir_node *irn)
{
(void) irn;
return NULL;
}
static void phi_set_frame_entity(ir_node *irn, ir_entity *ent)
{
(void) irn;
(void) ent;
panic("phi_set_frame_entity() should not be called");
}
static void phi_set_frame_offset(ir_node *irn, int bias)
{
(void) irn;
(void) bias;
panic("phi_set_frame_offset() should not be called");
}
static int phi_get_sp_bias(const ir_node *irn)
{
(void) irn;