Commit d6510178 authored by Matthias Braun's avatar Matthias Braun
Browse files

remove generic be_Spill/be_Reload node

parent 73886204
......@@ -10,7 +10,7 @@
* @date 17.05.2005
*
* Backend node support for generic backend nodes.
* This file provides Perm, Copy, Spill and Reload nodes.
* This file provides Perm, and Copy nodes.
*/
#include <stdlib.h>
......@@ -95,8 +95,6 @@ typedef struct {
} be_memperm_attr_t;
static unsigned be_opcode_start;
ir_op *op_be_Spill;
ir_op *op_be_Reload;
ir_op *op_be_Perm;
ir_op *op_be_MemPerm;
ir_op *op_be_Copy;
......@@ -238,83 +236,6 @@ static void add_register_req_in(ir_node *node, const arch_register_req_t *req)
ARR_APP1(const arch_register_req_t*, info->in_reqs, req);
}
ir_node *be_new_Spill(const arch_register_class_t *cls,
const arch_register_class_t *cls_frame, ir_node *bl,
ir_node *frame, ir_node *to_spill)
{
be_frame_attr_t *a;
ir_node *in[2];
ir_node *res;
ir_graph *irg = get_Block_irg(bl);
in[0] = frame;
in[1] = to_spill;
res = new_ir_node(NULL, irg, bl, op_be_Spill, mode_M, 2, in);
init_node_attr(res, 2, 1);
a = (be_frame_attr_t*) get_irn_generic_attr(res);
a->ent = NULL;
a->offset = 0;
a->base.exc.pin_state = op_pin_state_pinned;
be_node_set_reg_class_in(res, n_be_Spill_frame, cls_frame);
be_node_set_reg_class_in(res, n_be_Spill_val, cls);
arch_set_irn_register_req_out(res, 0, arch_no_register_req);
arch_add_irn_flags(res, arch_irn_flag_spill);
return res;
}
ir_node *be_new_Reload(const arch_register_class_t *cls,
const arch_register_class_t *cls_frame, ir_node *block,
ir_node *frame, ir_node *mem, ir_mode *mode)
{
ir_node *in[2];
ir_node *res;
ir_graph *irg = get_Block_irg(block);
be_frame_attr_t *a;
in[0] = frame;
in[1] = mem;
res = new_ir_node(NULL, irg, block, op_be_Reload, mode, 2, in);
init_node_attr(res, 2, 1);
be_node_set_reg_class_out(res, 0, cls);
be_node_set_reg_class_in(res, n_be_Reload_frame, cls_frame);
arch_set_irn_flags(res, arch_irn_flag_rematerializable);
a = (be_frame_attr_t*) get_irn_generic_attr(res);
a->ent = NULL;
a->offset = 0;
a->base.exc.pin_state = op_pin_state_pinned;
return res;
}
ir_node *be_get_Reload_mem(const ir_node *irn)
{
assert(be_is_Reload(irn));
return get_irn_n(irn, n_be_Reload_mem);
}
ir_node *be_get_Reload_frame(const ir_node *irn)
{
assert(be_is_Reload(irn));
return get_irn_n(irn, n_be_Reload_frame);
}
ir_node *be_get_Spill_val(const ir_node *irn)
{
assert(be_is_Spill(irn));
return get_irn_n(irn, n_be_Spill_val);
}
ir_node *be_get_Spill_frame(const ir_node *irn)
{
assert(be_is_Spill(irn));
return get_irn_n(irn, n_be_Spill_frame);
}
ir_node *be_new_Perm(arch_register_class_t const *const cls, ir_node *const block, int const n, ir_node *const *const in)
{
int i;
......@@ -750,7 +671,7 @@ void be_set_CopyKeep_op(ir_node *cpy, ir_node *op)
static bool be_has_frame_entity(const ir_node *irn)
{
return be_is_Spill(irn) || be_is_Reload(irn) || be_is_FrameAddr(irn);
return be_is_FrameAddr(irn);
}
ir_entity *be_get_frame_entity(const ir_node *irn)
......@@ -1276,14 +1197,12 @@ static ir_op *new_be_op(unsigned code, const char *name, op_pin_state p,
void be_init_op(void)
{
assert(op_be_Spill == NULL);
assert(op_be_Perm == NULL);
be_opcode_start = get_next_ir_opcodes(beo_last+1);
/* Acquire all needed opcodes. */
unsigned o = be_opcode_start;
op_be_Spill = new_be_op(o+beo_Spill, "be_Spill", op_pin_state_exc_pinned, irop_flag_none, oparity_any, sizeof(be_frame_attr_t));
op_be_Reload = new_be_op(o+beo_Reload, "be_Reload", op_pin_state_exc_pinned, irop_flag_none, oparity_any, sizeof(be_frame_attr_t));
op_be_Perm = new_be_op(o+beo_Perm, "be_Perm", op_pin_state_exc_pinned, irop_flag_none, oparity_variable, sizeof(be_node_attr_t));
op_be_MemPerm = new_be_op(o+beo_MemPerm, "be_MemPerm", op_pin_state_exc_pinned, irop_flag_none, oparity_variable, sizeof(be_memperm_attr_t));
op_be_Copy = new_be_op(o+beo_Copy, "be_Copy", op_pin_state_exc_pinned, irop_flag_none, oparity_any, sizeof(be_node_attr_t));
......@@ -1300,8 +1219,6 @@ void be_init_op(void)
ir_op_set_memory_index(op_be_Call, n_be_Call_mem);
ir_op_set_fragile_indices(op_be_Call, pn_be_Call_X_regular, pn_be_Call_X_except);
op_be_Spill->ops.node_cmp_attr = FrameAddr_cmp_attr;
op_be_Reload->ops.node_cmp_attr = FrameAddr_cmp_attr;
op_be_Perm->ops.node_cmp_attr = be_nodes_equal;
op_be_MemPerm->ops.node_cmp_attr = be_nodes_equal;
op_be_Copy->ops.node_cmp_attr = be_nodes_equal;
......@@ -1327,8 +1244,6 @@ void be_init_op(void)
void be_finish_op(void)
{
free_ir_op(op_be_Spill); op_be_Spill = NULL;
free_ir_op(op_be_Reload); op_be_Reload = NULL;
free_ir_op(op_be_Perm); op_be_Perm = NULL;
free_ir_op(op_be_MemPerm); op_be_MemPerm = NULL;
free_ir_op(op_be_Copy); op_be_Copy = NULL;
......
......@@ -10,7 +10,7 @@
* @date 17.05.2005
*
* Backend node support for generic backend nodes.
* This file provides Perm, Copy, Spill and Reload nodes.
* This file provides Perm, and Copy nodes.
*/
#ifndef FIRM_BE_BENODE_T_H
#define FIRM_BE_BENODE_T_H
......@@ -22,9 +22,8 @@
#include "bearch.h"
typedef enum be_opcode {
beo_Spill,
beo_Reload,
beo_Perm,
beo_first = beo_Perm,
beo_MemPerm,
beo_Copy,
beo_Keep,
......@@ -36,16 +35,12 @@ typedef enum be_opcode {
beo_SubSP,
beo_Start,
beo_FrameAddr,
beo_first = beo_Spill,
beo_last = beo_FrameAddr
} be_opcode;
/**
* The benode op's. Must be available to register emitter function.
*/
extern ir_op *op_be_Spill;
extern ir_op *op_be_Reload;
extern ir_op *op_be_Perm;
extern ir_op *op_be_MemPerm;
extern ir_op *op_be_Copy;
......@@ -73,36 +68,6 @@ void be_init_op(void);
void be_finish_op(void);
/**
* Position numbers for the be_Spill inputs.
*/
enum {
n_be_Spill_frame = 0,
n_be_Spill_val = 1
};
/**
* Make a new Spill node.
*/
ir_node *be_new_Spill(const arch_register_class_t *cls,
const arch_register_class_t *cls_frame, ir_node *block,
ir_node *frame, ir_node *to_spill);
/**
* Position numbers for the be_Reload inputs.
*/
enum {
n_be_Reload_frame = 0,
n_be_Reload_mem = 1
};
/**
* Make a new Reload node.
*/
ir_node *be_new_Reload(const arch_register_class_t *cls,
const arch_register_class_t *cls_frame, ir_node *block,
ir_node *frame, ir_node *mem, ir_mode *mode);
/**
* Position numbers for the be_Copy inputs.
*/
......@@ -393,11 +358,6 @@ void be_node_set_frame_entity(ir_node *node, ir_entity *entity);
*/
int be_get_frame_offset(const ir_node *irn);
ir_node* be_get_Reload_mem(const ir_node *irn);
ir_node *be_get_Reload_frame(const ir_node *irn);
ir_node* be_get_Spill_val(const ir_node *irn);
ir_node *be_get_Spill_frame(const ir_node *irn);
void be_set_MemPerm_in_entity(const ir_node *irn, int n, ir_entity* ent);
ir_entity *be_get_MemPerm_in_entity(const ir_node *irn, int n);
......@@ -465,8 +425,6 @@ ir_node *be_new_Phi(ir_node *block, int n_ins, ir_node **ins, ir_mode *mode,
*/
ir_node *be_get_initial_reg_value(ir_graph *irg, const arch_register_t *reg);
static inline bool be_is_Spill (const ir_node *irn) { return get_irn_op(irn) == op_be_Spill ; }
static inline bool be_is_Reload (const ir_node *irn) { return get_irn_op(irn) == op_be_Reload ; }
static inline bool be_is_Copy (const ir_node *irn) { return get_irn_op(irn) == op_be_Copy ; }
static inline bool be_is_CopyKeep (const ir_node *irn) { return get_irn_op(irn) == op_be_CopyKeep ; }
static inline bool be_is_Perm (const ir_node *irn) { return get_irn_op(irn) == op_be_Perm ; }
......
......@@ -511,18 +511,14 @@ static int check_remat_conditions_costs(spill_env_t *env,
int costs = 0;
const ir_node *insn = skip_Proj_const(spilled);
assert(!be_is_Spill(insn));
assert(!arch_irn_is(insn, spill));
if (!arch_irn_is(insn, rematerializable))
return REMAT_COST_INFINITE;
if (be_is_Reload(insn)) {
costs += 2;
} else {
costs += arch_get_op_estimated_cost(insn);
}
if (parentcosts + costs >= env->reload_cost + env->spill_cost) {
costs += arch_get_op_estimated_cost(insn);
if (parentcosts + costs >= env->reload_cost + env->spill_cost)
return REMAT_COST_INFINITE;
}
/* never rematerialize a node which modifies the flags.
* (would be better to test whether the flags are actually live at point
* reloader...)
......@@ -642,39 +638,6 @@ double be_get_reload_costs_on_edge(spill_env_t *env, ir_node *to_spill,
return be_get_reload_costs(env, to_spill, before);
}
ir_node *be_new_spill(ir_node *value, ir_node *after)
{
ir_graph *irg = get_irn_irg(value);
ir_node *frame = get_irg_frame(irg);
const arch_register_class_t *cls = arch_get_irn_reg_class(value);
const arch_register_class_t *cls_frame = arch_get_irn_reg_class(frame);
ir_node *block = get_block(after);
ir_node *spill
= be_new_Spill(cls, cls_frame, block, frame, value);
sched_add_after(after, spill);
return spill;
}
ir_node *be_new_reload(ir_node *value, ir_node *spill, ir_node *before)
{
ir_graph *irg = get_irn_irg(value);
ir_node *frame = get_irg_frame(irg);
ir_node *block = get_block(before);
const arch_register_class_t *cls = arch_get_irn_reg_class(value);
const arch_register_class_t *cls_frame = arch_get_irn_reg_class(frame);
ir_mode *mode = get_irn_mode(value);
ir_node *reload;
assert(be_is_Spill(spill) || is_Phi(spill));
assert(get_irn_mode(spill) == mode_M);
reload = be_new_Reload(cls, cls_frame, block, frame, spill, mode);
sched_add_before(before, reload);
return reload;
}
/*
* ___ _ ____ _ _
* |_ _|_ __ ___ ___ _ __| |_ | _ \ ___| | ___ __ _ __| |___
......@@ -700,7 +663,7 @@ static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo)
return;
assert(!arch_irn_is(insn, dont_spill));
assert(!be_is_Reload(insn));
assert(!arch_irn_is(insn, reload));
/* some backends have virtual noreg/unknown nodes that are not scheduled
* and simply always available.
......
......@@ -410,7 +410,7 @@ static void collect_memphi(be_verify_spillslots_env_t *env, ir_node *node, ir_no
static void collect(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent)
{
if (be_is_Spill(node)) {
if (arch_irn_is(node, spill)) {
collect_spill(env, node, reload, ent);
} else if (is_Proj(node)) {
collect_memperm(env, node, reload, ent);
......@@ -427,7 +427,7 @@ static void collect_spills_walker(ir_node *node, void *data)
{
be_verify_spillslots_env_t *env = (be_verify_spillslots_env_t*)data;
if (be_is_Reload(node)) {
if (arch_irn_is(node, reload)) {
ir_node *spill = get_memory_edge(node);
if (spill == NULL) {
ir_fprintf(stderr, "Verify warning: No spill attached to reload %+F in block %+F(%s)\n",
......@@ -478,9 +478,10 @@ static void check_lonely_spills(ir_node *node, void *data)
{
be_verify_spillslots_env_t *env = (be_verify_spillslots_env_t*)data;
if (be_is_Spill(node) || (is_Proj(node) && be_is_MemPerm(get_Proj_pred(node)))) {
if (arch_irn_is(node, spill)
|| (is_Proj(node) && be_is_MemPerm(get_Proj_pred(node)))) {
spill_t *spill = find_spill(env, node);
if (be_is_Spill(node)) {
if (arch_irn_is(node, spill)) {
ir_entity *ent = arch_get_frame_entity(node);
be_check_entity(env, node, ent);
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment