Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
Zwinkau
libfirm
Commits
071b8199
Commit
071b8199
authored
Jan 24, 2006
by
Sebastian Hack
Browse files
Some minor changes
parent
763f328a
Changes
11
Hide whitespace changes
Inline
Side-by-side
ir/be/bearch.c
View file @
071b8199
...
...
@@ -97,8 +97,10 @@ int arch_get_allocatable_regs(const arch_env_t *env, const ir_node *irn,
return
0
;
}
if
(
arch_register_req_is
(
req
,
limited
))
return
req
->
limited
(
irn
,
pos
,
bs
);
if
(
arch_register_req_is
(
req
,
limited
))
{
req
->
limited
(
irn
,
pos
,
bs
);
return
bitset_popcnt
(
bs
);
}
arch_register_class_put
(
req
->
cls
,
bs
);
return
req
->
cls
->
n_regs
;
...
...
ir/be/bearch.h
View file @
071b8199
...
...
@@ -111,13 +111,6 @@ _arch_register_for_index(const arch_register_class_t *cls, int idx)
#define arch_register_for_index(cls, idx) \
_arch_register_for_index(cls, idx)
/**
* Get the register set for a register class.
* @param cls The register class.
* @return The set containing all registers in the class.
*/
#define arch_get_register_set_for_class(cls) ((cls)->set)
typedef
enum
_arch_operand_type_t
{
arch_operand_type_invalid
,
arch_operand_type_memory
,
...
...
@@ -163,7 +156,7 @@ typedef struct _arch_register_req_t {
arch_register_req_type_t
type
;
/**< The type of the constraint. */
const
arch_register_class_t
*
cls
;
/**< The register class this constraint belongs to. */
int
(
*
limited
)(
const
ir_node
*
irn
,
int
pos
,
bitset_t
*
bs
);
void
(
*
limited
)(
const
ir_node
*
irn
,
int
pos
,
bitset_t
*
bs
);
/**< In case of the 'limited'
constraint, this function
must put all allowable
...
...
ir/be/bechordal.c
View file @
071b8199
...
...
@@ -53,7 +53,7 @@
#define NO_COLOR (-1)
#
un
def DUMP_INTERVALS
#def
ine
DUMP_INTERVALS
typedef
struct
_be_chordal_alloc_env_t
{
be_chordal_env_t
*
chordal_env
;
...
...
@@ -596,10 +596,10 @@ void be_ra_chordal_color(be_chordal_env_t *chordal_env)
char
buf
[
128
];
plotter_t
*
plotter
;
ir_snprintf
(
buf
,
sizeof
(
buf
),
"ifg_%s_%F.eps"
,
cls
->
name
,
irg
);
ir_snprintf
(
buf
,
sizeof
(
buf
),
"ifg_%s_%F.eps"
,
chordal_env
->
cls
->
name
,
irg
);
plotter
=
new_plotter_ps
(
buf
);
draw_interval_tree
(
&
draw_chordal_def_opts
,
chordal_env
,
plotter
,
env
->
arch_env
,
cls
);
draw_interval_tree
(
&
draw_chordal_def_opts
,
chordal_env
,
plotter
);
plotter_free
(
plotter
);
}
#endif
...
...
ir/be/bechordal_draw.c
View file @
071b8199
...
...
@@ -354,7 +354,7 @@ static void draw_block(ir_node *bl, void *data)
struct
block_dims
*
dom_dims
=
pmap_get
(
env
->
block_dims
,
dom
);
for
(
irn
=
pset_first
(
live_in
);
irn
;
irn
=
pset_next
(
live_in
))
{
if
(
arch_irn_has_reg_class
(
env
->
arch_env
,
irn
,
0
,
env
->
cls
))
{
if
(
arch_irn_has_reg_class
(
env
->
arch_env
,
irn
,
-
1
,
env
->
cls
))
{
const
arch_register_t
*
reg
=
arch_get_irn_register
(
env
->
arch_env
,
irn
);
int
col
=
arch_register_get_index
(
reg
);
int
x
=
(
col
+
1
)
*
opts
->
h_inter_gap
;
...
...
ir/be/bechordal_main.c
View file @
071b8199
...
...
@@ -247,6 +247,7 @@ static void be_ra_chordal_main(const be_main_env_t *main_env, ir_graph *irg)
chordal_env
.
cls
=
arch_isa_get_reg_class
(
isa
,
j
);
be_liveness
(
irg
);
dump
(
BE_CH_DUMP_LIVE
,
irg
,
chordal_env
.
cls
,
"-live"
,
dump_ir_block_graph_sched
);
/* spilling */
switch
(
options
.
spill_method
)
{
...
...
ir/be/bechordal_t.h
View file @
071b8199
...
...
@@ -87,12 +87,13 @@ enum {
/* Dump flags */
BE_CH_DUMP_NONE
=
(
1
<<
0
),
BE_CH_DUMP_SPILL
=
(
1
<<
1
),
BE_CH_DUMP_COLOR
=
(
1
<<
2
),
BE_CH_DUMP_COPYMIN
=
(
1
<<
3
),
BE_CH_DUMP_SSADESTR
=
(
1
<<
4
),
BE_CH_DUMP_TREE_INTV
=
(
1
<<
5
),
BE_CH_DUMP_CONSTR
=
(
1
<<
6
),
BE_CH_DUMP_LOWER
=
(
1
<<
7
),
BE_CH_DUMP_LIVE
=
(
1
<<
2
),
BE_CH_DUMP_COLOR
=
(
1
<<
3
),
BE_CH_DUMP_COPYMIN
=
(
1
<<
4
),
BE_CH_DUMP_SSADESTR
=
(
1
<<
5
),
BE_CH_DUMP_TREE_INTV
=
(
1
<<
6
),
BE_CH_DUMP_CONSTR
=
(
1
<<
7
),
BE_CH_DUMP_LOWER
=
(
1
<<
8
),
BE_CH_DUMP_ALL
=
2
*
BE_CH_DUMP_LOWER
-
1
,
/* copymin method */
...
...
ir/be/belistsched.c
View file @
071b8199
...
...
@@ -14,6 +14,8 @@
#include
<string.h>
#include
<limits.h>
#include
"benode_t.h"
#include
"obst.h"
#include
"list.h"
#include
"iterator.h"
...
...
@@ -81,16 +83,21 @@ static ir_node *trivial_select(void *block_env, pset *ready_set)
return
res
;
}
static
int
default_to
_appear_in_schedule
(
void
*
env
,
const
ir_node
*
irn
)
static
INLINE
int
must
_appear_in_schedule
(
const
list_sched_selector_t
*
sel
,
void
*
block_
env
,
const
ir_node
*
irn
)
{
return
to_appear_in_schedule
(
irn
);
int
res
=
0
;
if
(
sel
->
to_appear_in_schedule
)
res
=
sel
->
to_appear_in_schedule
(
block_env
,
irn
);
return
res
||
to_appear_in_schedule
(
irn
)
||
be_is_Keep
(
irn
);
}
static
const
list_sched_selector_t
trivial_selector_struct
=
{
NULL
,
NULL
,
trivial_select
,
default_to_appear_in_schedule
,
NULL
,
NULL
,
NULL
};
...
...
@@ -205,12 +212,12 @@ static void *reg_pressure_block_init(void *graph_env, ir_node *bl)
* Collect usage statistics.
*/
sched_foreach
(
bl
,
irn
)
{
if
(
env
->
vtab
->
to
_appear_in_schedule
(
env
,
irn
))
{
if
(
must
_appear_in_schedule
(
env
->
vtab
,
env
,
irn
))
{
int
i
,
n
;
for
(
i
=
0
,
n
=
get_irn_arity
(
irn
);
i
<
n
;
++
i
)
{
ir_node
*
op
=
get_irn_n
(
irn
,
i
);
if
(
env
->
vtab
->
to
_appear_in_schedule
(
env
,
irn
))
{
if
(
must
_appear_in_schedule
(
env
->
vtab
,
env
,
irn
))
{
usage_stats_t
*
us
=
get_or_set_usage_stats
(
env
,
irn
);
if
(
is_live_end
(
bl
,
op
))
us
->
uses_in_block
=
99999
;
...
...
@@ -245,7 +252,7 @@ static INLINE int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
for
(
i
=
0
,
n
=
get_irn_arity
(
irn
);
i
<
n
;
++
i
)
{
ir_node
*
op
=
get_irn_n
(
irn
,
i
);
if
(
env
->
vtab
->
to
_appear_in_schedule
(
env
,
op
))
if
(
must
_appear_in_schedule
(
env
->
vtab
,
env
,
op
))
sum
+=
compute_max_hops
(
env
,
op
);
}
...
...
@@ -276,7 +283,7 @@ static const list_sched_selector_t reg_pressure_selector_struct = {
reg_pressure_graph_init
,
reg_pressure_block_init
,
reg_pressure_select
,
default_to_appear_in_schedule
,
NULL
,
reg_pressure_block_free
,
NULL
};
...
...
@@ -404,15 +411,15 @@ static int node_cmp_func(const void *p1, const void *p2)
/**
* Append an instruction to a schedule.
* @param env The block schedul
e
ing environment.
* @param env The block scheduling environment.
* @param irn The node to add to the schedule.
* @return The given node.
* @return
The given node.
*/
static
ir_node
*
add_to_sched
(
block_sched_env_t
*
env
,
ir_node
*
irn
)
{
/* If the node consumes/produces data, it is appended to the schedule
* list, otherwise, it is not put into the list */
if
(
t
o
_appear_in_schedule
(
irn
))
{
if
(
mus
t_appear_in_schedule
(
env
->
selector
,
env
->
selector_block_env
,
irn
))
{
sched_info_t
*
info
=
get_irn_sched_info
(
irn
);
INIT_LIST_HEAD
(
&
info
->
list
);
info
->
scheduled
=
1
;
...
...
@@ -431,7 +438,6 @@ static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
return
irn
;
}
/**
* Add the proj nodes of a tuple-mode irn to the schedule immediately
* after the tuple-moded irn. By pinning the projs after the irn, no
...
...
@@ -470,6 +476,11 @@ static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
}
}
static
ir_node
*
select_node
(
block_sched_env_t
*
be
)
{
return
be
->
selector
->
select
(
be
->
selector_block_env
,
be
->
ready_set
);
}
/**
* Perform list scheduling on a block.
*
...
...
@@ -505,8 +516,6 @@ static void list_sched_block(ir_node *block, void *env_ptr)
be
.
already_scheduled
=
new_pset
(
node_cmp_func
,
get_irn_n_edges
(
block
));
be
.
selector
=
selector
;
firm_dbg_set_mask
(
be
.
dbg
,
0
);
if
(
selector
->
init_block
)
be
.
selector_block_env
=
selector
->
init_block
(
env
->
selector_env
,
block
);
...
...
@@ -565,7 +574,7 @@ static void list_sched_block(ir_node *block, void *env_ptr)
while
(
pset_count
(
be
.
ready_set
)
>
0
)
{
/* select a node to be scheduled and check if it was ready */
irn
=
select
or
->
select
(
be
.
selector_block_env
,
be
.
ready_set
);
irn
=
select
_node
(
&
be
);
DBG
((
be
.
dbg
,
LEVEL_3
,
"
\t
picked node %+F
\n
"
,
irn
));
...
...
ir/be/belistsched.h
View file @
071b8199
...
...
@@ -80,11 +80,6 @@ struct _list_sched_selector_t {
};
/**
* A default implementation of to_appear_in_schedule,
* as required in list_sched_selector_t.
*/
extern
int
be_default_to_appear_in_schedule
(
void
*
env
,
const
ir_node
*
irn
);
/**
* A trivial selector, that just selects the first ready node.
...
...
ir/be/belower.c
View file @
071b8199
...
...
@@ -19,7 +19,7 @@
#include
"belower.h"
#include
"benode_t.h"
#include
"bechordal_t.h"
#include
"besched.h"
#include
"besched
_t
.h"
#include
"irgmod.h"
#include
"iredges_t.h"
...
...
@@ -403,6 +403,7 @@ static void lower_call_node(ir_node *call, void *walk_env) {
arch_isa_t
*
isa
=
arch_env_get_isa
(
arch_env
);
const
ir_node
*
proj_T
=
NULL
;
ir_node
**
in_keep
,
*
block
=
get_nodes_block
(
call
);
ir_node
*
last_proj
=
NULL
;
bitset_t
*
proj_set
;
const
ir_edge_t
*
edge
;
const
arch_register_t
*
reg
;
...
...
@@ -426,8 +427,21 @@ static void lower_call_node(ir_node *call, void *walk_env) {
/* set all used arguments */
if
(
proj_T
)
{
foreach_out_edge
(
proj_T
,
edge
)
{
ir_node
*
proj
=
get_edge_src_irn
(
edge
);
assert
(
is_Proj
(
proj
));
bitset_set
(
proj_set
,
get_Proj_proj
(
get_edge_src_irn
(
edge
)));
/*
* Filter out the last proj in the schedule.
* After that one, we have to insert the Keep node.
*/
if
(
!
last_proj
||
sched_comes_after
(
last_proj
,
proj
))
last_proj
=
proj
;
}
}
else
{
...
...
@@ -450,14 +464,21 @@ static void lower_call_node(ir_node *call, void *walk_env) {
if
(
arch_register_type_is
(
reg
,
caller_saved
))
{
pn
=
isa
->
impl
->
get_projnum_for_register
(
isa
,
reg
);
if
(
!
bitset_is_set
(
proj_set
,
pn
))
{
in_keep
[
keep_arity
++
]
=
new_r_Proj
(
current_ir_graph
,
block
,
(
ir_node
*
)
proj_T
,
mode_Is
,
pn
);
ir_node
*
proj
=
new_r_Proj
(
current_ir_graph
,
block
,
(
ir_node
*
)
proj_T
,
mode_Is
,
pn
);
in_keep
[
keep_arity
++
]
=
proj
;
sched_add_after
(
last_proj
,
proj
);
last_proj
=
proj
;
}
}
}
/* ok, we found some caller save register which are not in use but must be saved */
if
(
keep_arity
)
{
be_new_Keep
(
reg_class
,
current_ir_graph
,
block
,
keep_arity
,
in_keep
);
ir_node
*
keep
;
keep
=
be_new_Keep
(
reg_class
,
current_ir_graph
,
block
,
keep_arity
,
in_keep
);
sched_add_after
(
last_proj
,
keep
);
}
}
...
...
@@ -520,17 +541,15 @@ static void lower_nodes_walker(ir_node *irn, void *walk_env) {
lower_env_t
*
env
=
walk_env
;
const
arch_env_t
*
arch_env
=
env
->
chord_env
->
main_env
->
arch_env
;
if
(
!
is_Block
(
irn
))
{
if
(
!
is_Proj
(
irn
))
{
if
(
is_Perm
(
arch_env
,
irn
))
{
lower_perm_node
(
irn
,
walk_env
);
}
else
if
(
is_Call
(
arch_env
,
irn
))
{
lower_call_node
(
irn
,
walk_env
);
}
else
if
(
be_is_Spill
(
irn
)
||
be_is_Reload
(
irn
))
{
lower_spill_reload
(
irn
,
walk_env
);
}
if
(
!
is_Block
(
irn
)
&&
!
is_Proj
(
irn
))
{
if
(
is_Perm
(
arch_env
,
irn
))
{
lower_perm_node
(
irn
,
walk_env
);
}
else
if
(
is_Call
(
arch_env
,
irn
))
{
lower_call_node
(
irn
,
walk_env
);
}
else
if
(
be_is_Spill
(
irn
)
||
be_is_Reload
(
irn
))
{
lower_spill_reload
(
irn
,
walk_env
);
}
}
...
...
ir/be/besched.c
View file @
071b8199
...
...
@@ -170,6 +170,11 @@ int sched_verify_irg(ir_graph *irg)
return
res
;
}
int
(
sched_comes_after
)(
const
ir_node
*
n1
,
const
ir_node
*
n2
)
{
return
_sched_comes_after
(
n1
,
n2
);
}
int
sched_skip_cf_predicator
(
const
ir_node
*
irn
,
void
*
data
)
{
arch_env_t
*
ae
=
data
;
return
arch_irn_classify
(
ae
,
irn
)
==
arch_irn_class_branch
;
...
...
ir/be/besched_t.h
View file @
071b8199
...
...
@@ -248,6 +248,21 @@ extern int sched_verify(const ir_node *block);
*/
extern
int
sched_verify_irg
(
ir_graph
*
irg
);
/**
* Checks, if one node is scheduled before another.
* @param n1 A node.
* @param n2 Another node.
* @return 1, if n1 is in front of n2 in the schedule, 0 else.
* @note Both nodes must be in the same block.
*/
static
INLINE
int
_sched_comes_after
(
const
ir_node
*
n1
,
const
ir_node
*
n2
)
{
assert
(
_sched_is_scheduled
(
n1
));
assert
(
_sched_is_scheduled
(
n2
));
assert
(
get_nodes_block
(
n1
)
==
get_nodes_block
(
n2
));
return
_sched_get_time_step
(
n1
)
<
_sched_get_time_step
(
n2
);
}
/**
* A predicate for a node.
* @param irn The node.
...
...
@@ -273,16 +288,17 @@ extern ir_node *sched_skip(ir_node *from, int forward,
sched_predicator_t
*
predicator
,
void
*
data
);
#define sched_get_time_step(irn) _sched_get_time_step(irn)
#define sched_has_succ(irn)
_sched_has_succ(irn)
#define sched_has_prev(irn)
_sched_has_prev(irn)
#define sched_succ(irn)
_sched_succ(irn)
#define sched_prev(irn)
_sched_prev(irn)
#define sched_first(irn)
_sched_first(irn)
#define sched_last(irn)
_sched_last(irn)
#define sched_has_succ(irn) _sched_has_succ(irn)
#define sched_has_prev(irn) _sched_has_prev(irn)
#define sched_succ(irn) _sched_succ(irn)
#define sched_prev(irn) _sched_prev(irn)
#define sched_first(irn) _sched_first(irn)
#define sched_last(irn) _sched_last(irn)
#define sched_add_before(before, irn) _sched_add_before(before, irn)
#define sched_add_after(after, irn) _sched_add_after(after, irn)
#define sched_remove(irn) _sched_remove(irn)
#define sched_is_scheduled(irn) _sched_is_scheduled(irn)
#define sched_cmp(a, b) _sched_cmp(a, b)
#define sched_is_scheduled(irn) _sched_is_scheduled(irn)
#define sched_comes_after(n1, n2) _sched_comes_after(n1, n1)
#define sched_cmp(a, b) _sched_cmp(a, b)
#endif
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment