Commit 38a2ba85 authored by Sebastian Hack's avatar Sebastian Hack
Browse files

Small bugfixes

parent 2711d82f
......@@ -18,6 +18,7 @@
#include "set.h"
#include "pmap.h"
#include "util.h"
#include "debug.h"
#include "irop_t.h"
#include "irmode_t.h"
......@@ -96,17 +97,15 @@ ir_node *new_Spill(const be_node_factory_t *factory,
}
ir_node *new_Reload(const be_node_factory_t *factory,
const arch_register_class_t *cls,
ir_graph *irg, ir_node *bl, ir_node *spill_node)
const arch_register_class_t *cls, ir_graph *irg,
ir_node *bl, ir_mode *mode, ir_node *spill_node)
{
ir_mode *mode;
ir_node *in[1];
ir_op *op = get_op(factory, cls, node_kind_reload)->op;
assert(op && "Reload opcode must be present for this register class");
assert(is_Spill(factory, spill_node) && "Operand of Reload must be a Spill");
// assert(is_Spill(factory, spill_node) && "Operand of Reload must be a Spill");
in[0] = spill_node;
mode = get_irn_mode(get_irn_n(spill_node, 0));
return new_ir_node(NULL, irg, bl, op, mode, 1, in);
}
......@@ -327,24 +326,35 @@ ir_node *insert_Perm_after(const be_main_session_env_t *env,
ir_graph *irg = get_irn_irg(bl);
pset *live = put_live_end(bl, pset_new_ptr_default());
ir_node *curr, *irn, *perm, **nodes;
firm_dbg_module_t *dbg = firm_dbg_register("firm.be.node");
int i, n;
ir_printf("Insert Perm after: %+F\n", pos);
DBG((dbg, LEVEL_1, "Insert Perm after: %+F\n", pos));
sched_foreach_reverse(bl, irn) {
ir_node *x;
DBG((dbg, LEVEL_1, "%+F\n", irn));
for(x = pset_first(live); x; x = pset_next(live))
DBG((dbg, LEVEL_1, "\tlive: %+F\n", x));
if(arch_irn_has_reg_class(arch_env, irn, arch_pos_make_out(0), cls))
pset_remove_ptr(live, irn);
/*
* Consider the definition of the node, but not the uses, since
* newly created liveranges by the node after which the perm is
* located are not of interest for the perm.
*/
if(irn == pos)
break;
for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
ir_node *op = get_irn_n(irn, i);
if(arch_irn_has_reg_class(arch_env, op, arch_pos_make_out(0), cls))
pset_insert_ptr(live, op);
}
if(sched_prev(irn) == pos)
break;
}
n = pset_count(live);
......
......@@ -44,8 +44,8 @@ ir_node *new_Spill(const be_node_factory_t *factory,
ir_graph *irg, ir_node *bl, ir_node *node_to_spill);
ir_node *new_Reload(const be_node_factory_t *factory,
const arch_register_class_t *cls,
ir_graph *irg, ir_node *bl, ir_node *spill_node);
const arch_register_class_t *cls, ir_graph *irg,
ir_node *bl, ir_mode *mode, ir_node *spill_node);
ir_node *new_Perm(const be_node_factory_t *factory,
const arch_register_class_t *cls,
......
......@@ -88,6 +88,7 @@ int sched_verify(const ir_node *block)
const ir_node *irn;
int i, n;
int *save_time_step;
const ir_node **save_nodes;
const ir_edge_t *edge;
pset *scheduled_nodes = pset_new_ptr_default();
......@@ -99,11 +100,13 @@ int sched_verify(const ir_node *block)
n++;
save_time_step = malloc(n * sizeof(save_time_step[0]));
save_nodes = malloc(n * sizeof(save_nodes[0]));
i = 0;
sched_foreach(block, irn) {
sched_info_t *info = get_irn_sched_info(irn);
save_time_step[i] = info->time_step;
save_nodes[i] = irn;
info->time_step = i;
pset_insert_ptr(scheduled_nodes, irn);
......@@ -122,11 +125,12 @@ int sched_verify(const ir_node *block)
ir_node *op = get_irn_n(irn, i);
if(to_appear_in_schedule(op)
&& !is_Phi(irn)
&& get_nodes_block(op) == block
&& sched_get_time_step(op) > step) {
DBG((dbg_sched, LEVEL_DEFAULT,
"%n is operand of %n but scheduled after", op, irn));
"%+F: %+F is operand of %+F but scheduled after\n", block, op, irn));
res = 0;
}
}
......@@ -136,8 +140,9 @@ int sched_verify(const ir_node *block)
for(i = 1; i < n; ++i) {
if(save_time_step[i] - save_time_step[i - 1] <= 0) {
DBG((dbg_sched, LEVEL_DEFAULT,
"position %d: time step shrinks (from %d to %d)\n",
i, save_time_step[i - 1], save_time_step[i]));
"%+F from %+F(%d) -> %+F(%d) step shrinks from %d -> %d\n",
block, save_nodes[i - 1], i - 1, save_nodes[i], i,
save_time_step[i - 1], save_time_step[i]));
res = 0;
}
}
......@@ -153,11 +158,13 @@ int sched_verify(const ir_node *block)
foreach_out_edge(block, edge) {
ir_node *irn = get_edge_src_irn(edge);
if(to_appear_in_schedule(irn) && !pset_find_ptr(scheduled_nodes, irn))
DBG((dbg_sched, LEVEL_DEFAULT, "%+F is in block but not scheduled\n", irn));
DBG((dbg_sched, LEVEL_DEFAULT,
"%+F: %+F is in block but not scheduled\n", block, irn));
}
del_pset(scheduled_nodes);
free(save_time_step);
free(save_nodes);
return res;
}
......
......@@ -18,13 +18,22 @@ ir_node *(sched_last)(const ir_node *block);
ir_node *(sched_add_before)(ir_node *before, ir_node *irn);
ir_node *(sched_add_after)(ir_node *before, ir_node *irn);
#define sched_is_end(irn) is_Block(irn)
#define sched_is_begin(irn) is_Block(irn)
#define sched_foreach_from(from, irn) \
for(irn = from; !sched_is_end(irn); irn = sched_next(irn))
#define sched_foreach_reverse_from(from, irn) \
for(irn = from; !sched_is_begin(irn); irn = sched_prev(irn))
/**
* A shorthand macro for iterating over a schedule.
* @param block The block.
* @param irn A ir node pointer used as an iterator.
*/
#define sched_foreach(block,irn) \
for(irn = sched_first(block); !is_Block(irn); irn = sched_next(irn))
sched_foreach_from(sched_first(block), irn)
/**
* A shorthand macro for reversely iterating over a schedule.
......@@ -32,6 +41,6 @@ ir_node *(sched_add_after)(ir_node *before, ir_node *irn);
* @param irn A ir node pointer used as an iterator.
*/
#define sched_foreach_reverse(block,irn) \
for(irn = sched_last(block); !is_Block(irn); irn = sched_prev(irn))
sched_foreach_reverse_from(sched_last(block), irn)
#endif
......@@ -22,7 +22,7 @@ typedef struct _sched_info_t {
sched_timestep_t time_step; /**< If a is after b in a schedule, its time step is
larger than b's. */
int scheduled : 1; /**< 1, if the node is in the schedule of the block, 0 else. */
int scheduled : 1; /**< 1, if the node is in the schedule of the block, 0 else. */
} sched_info_t;
#define _sched_entry(list_head) (list_entry(list_head, sched_info_t, list))
......@@ -127,9 +127,9 @@ static INLINE ir_node *_sched_first(const ir_node *block)
/**
* Get the last node in a schedule.
* @param block The block to get the schedule for.
* @return The last ir node in a schedule, or the block itself, if there is node in the schedule.
* or it is empty.
* @param block The block to get the schedule for.
* @return The last ir node in a schedule, or the block itself
* if there is node in the schedule.
*/
static INLINE ir_node *_sched_last(const ir_node *block)
{
......@@ -154,7 +154,7 @@ static INLINE void _sched_set_time_stamp(ir_node *irn)
* else we have to compute our time step from our
* neighbours.
*/
if(after_ts == 0)
if(before_ts >= after_ts)
inf->time_step = before_ts + SCHED_INITIAL_GRANULARITY;
else {
sched_timestep_t ts = (before_ts + after_ts) / 2;
......@@ -165,6 +165,8 @@ static INLINE void _sched_set_time_stamp(ir_node *irn)
*/
if(ts == before_ts || ts == after_ts)
sched_renumber(get_nodes_block(irn));
else
inf->time_step = ts;
}
}
......@@ -203,11 +205,26 @@ static INLINE ir_node *_sched_add_after(ir_node *after, ir_node *irn)
* @param irn The node.
* @return 1, if the node is scheduled, 0 if not.
*/
static INLINE int _sched_is_scheduled(ir_node *irn)
static INLINE int _sched_is_scheduled(const ir_node *irn)
{
return get_irn_sched_info(irn)->scheduled;
}
/**
* Compare two nodes according to their position in the schedule.
* @param a The first node.
* @param b The second node.
* @return A number smaller, equals to or larger than 0, if a is
* before, the same, or after b in the schedule.
*/
static INLINE int _sched_cmp(const ir_node *a, const ir_node *b)
{
assert(_sched_is_scheduled(a) && _sched_is_scheduled(b));
assert(get_nodes_block(a) == get_nodes_block(b));
return get_irn_sched_info(a)->time_step - get_irn_sched_info(b)->time_step;
}
/**
* Verify a schedule.
* @param block The block whose schedule to verify.
......@@ -232,6 +249,7 @@ extern int sched_verify_irg(ir_graph *irg);
#define sched_last(irn) _sched_last(irn)
#define sched_add_before(before, irn) _sched_add_before(before, irn)
#define sched_add_after(after, irn) _sched_add_after(after, irn)
#define sched_is_scheduled(irn) _sched_is_scheduled(irn)
#define sched_is_scheduled(irn) _sched_is_scheduled(irn)
#define sched_cmp(a, b) _sched_cmp(a, b)
#endif
......@@ -12,6 +12,18 @@
/** Undefine this to disable debugging mode. */
#define BE_DEBUG 1
/**
* Convenient block getter.
* Works also, if the given node is a block.
* @param irn The node.
* @return The block of the node, or the node itself, if the node is a
* block.
*/
static INLINE const ir_node *get_block(const ir_node *irn)
{
return is_Block(irn) ? irn : get_nodes_block(irn);
}
/**
* Check, if a node produces or consumes a data value.
* If it does, it is significant for scheduling and register allocation.
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment