Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
Zwinkau
libfirm
Commits
b02303ed
Commit
b02303ed
authored
Feb 22, 2002
by
Götz Lindenmaier
Browse files
irgopt: inline_small_irgs implemented
[r315]
parent
8e1b80d5
Changes
11
Hide whitespace changes
Inline
Side-by-side
Changes
View file @
b02303ed
22.2. Goetz
irgopt: inline_small_irgs implemented
30.1. - 20.2. Goetz
Bugfixes, some access functions ...
29.1.2002 Goetz
New directory: ana for analyses. Adapted configure/makefiles
implemented irout: backedges. Added one field to ir_graph, one to ir_node.
...
...
ir/ir/irdump.c
View file @
b02303ed
...
...
@@ -102,7 +102,8 @@ dump_node_opcode (ir_node *n)
/* SymConst */
}
else
if
(
n
->
op
->
code
==
iro_SymConst
)
{
if
(
get_SymConst_kind
(
n
)
==
linkage_ptr_info
)
{
xfprintf
(
F
,
"SymC %I"
,
get_SymConst_ptrinfo
(
n
));
/* don't use get_SymConst_ptr_info as it mangles the name. */
xfprintf
(
F
,
"SymC %I"
,
n
->
attr
.
i
.
tori
.
ptrinfo
);
}
else
{
assert
(
get_kind
(
get_SymConst_type
(
n
))
==
k_type
);
assert
(
get_type_ident
(
get_SymConst_type
(
n
)));
...
...
@@ -157,7 +158,7 @@ dump_node_nodeattr (ir_node *n)
break
;
case
iro_Sel
:
{
assert
(
get_kind
(
get_Sel_entity
(
n
))
==
k_entity
);
xfprintf
(
F
,
"%
s
"
,
id_to_str
(
get_entity_ident
(
get_Sel_entity
(
n
)))
)
;
xfprintf
(
F
,
"%
I
"
,
get_entity_ident
(
get_Sel_entity
(
n
)));
}
break
;
default:
}
/* end switch */
...
...
@@ -344,7 +345,7 @@ dump_ir_node (ir_node *n)
case
iro_Sel
:
assert
(
get_kind
(
get_Sel_entity
(
n
))
==
k_entity
);
xfprintf
(
F
,
"
\"
%I "
,
get_irn_opident
(
n
));
xfprintf
(
F
,
"%
s
"
,
id_to_str
(
get_entity_ident
(
get_Sel_entity
(
n
)))
)
;
xfprintf
(
F
,
"%
I
"
,
get_entity_ident
(
get_Sel_entity
(
n
)));
xfprintf
(
F
,
DEFAULT_NODE_ATTR
);
break
;
case
iro_SymConst
:
...
...
@@ -634,7 +635,7 @@ dump_type_info (type_or_ent *tore, void *env) {
switch
(
get_entity_visibility
(
ent
))
{
case
local
:
fprintf
(
F
,
"local
\n
"
);
break
;
case
external_visible
:
fprintf
(
F
,
"external_visible
\n
"
);
break
;
case
external_allocated
:
fprintf
(
F
,
"external_allocate
\n
d
"
);
break
;
case
external_allocated
:
fprintf
(
F
,
"external_allocate
\n
"
);
break
;
}
switch
(
get_entity_variability
(
ent
))
{
case
uninitialized
:
fprintf
(
F
,
"uninitialized"
);
break
;
...
...
@@ -642,6 +643,8 @@ dump_type_info (type_or_ent *tore, void *env) {
case
part_constant
:
fprintf
(
F
,
"part_constant"
);
break
;
case
constant
:
fprintf
(
F
,
"constant"
);
break
;
}
if
(
is_method_type
(
get_entity_type
(
ent
)))
xfprintf
(
F
,
"
\n
irg = %p "
,
get_entity_irg
(
ent
));
xfprintf
(
F
,
"
\"
}
\n
"
);
/* The Edges */
/* skip this to reduce graph. Member edge of type is parallel to this edge. *
...
...
@@ -1005,8 +1008,9 @@ dump_block_to_cfg (ir_node *block, void *env) {
if
(
get_irn_opcode
(
block
)
==
iro_Block
)
{
/* This is a block. Dump a node for the block. */
xfprintf
(
F
,
"node: {title:
\"
%p
\"
label:
\"
%I
\"
}"
,
block
,
block
->
op
->
name
);
xfprintf
(
F
,
"node: {title:
\"
"
);
PRINT_NODEID
(
block
);
xfprintf
(
F
,
"
\"
label:
\"
%I "
,
block
->
op
->
name
);
PRINT_NODEID
(
block
);
xfprintf
(
F
,
"
\"
}
\n
"
);
/* Dump the edges */
for
(
i
=
0
;
i
<
get_Block_n_cfgpreds
(
block
);
i
++
)
{
pred
=
get_nodes_Block
(
skip_Proj
(
get_Block_cfgpred
(
block
,
i
)));
...
...
ir/ir/irgopt.c
View file @
b02303ed
...
...
@@ -14,6 +14,7 @@
# include <assert.h>
# include "irprog.h"
# include "irgopt.h"
# include "irnode_t.h"
# include "irgraph_t.h"
...
...
@@ -22,7 +23,7 @@
# include "ircons.h"
# include "misc.h"
# include "irgmod.h"
# include "array.h"
# include "pset.h"
/* Defined in iropt.c */
...
...
@@ -49,14 +50,20 @@ void init_link (ir_node *n, void *env) {
void
optimize_in_place_wrapper
(
ir_node
*
n
,
void
*
env
)
{
int
i
;
int
start
,
i
;
ir_node
*
optimized
;
if
(
get_irn_op
(
n
)
==
op_Block
)
start
=
0
;
else
start
=
-
1
;
/* optimize all sons after recursion, i.e., the sons' sons are
optimized already. */
for
(
i
=
-
1
;
i
<
get_irn_arity
(
n
);
i
++
)
{
for
(
i
=
start
;
i
<
get_irn_arity
(
n
);
i
++
)
{
optimized
=
optimize_in_place_2
(
get_irn_n
(
n
,
i
));
set_irn_n
(
n
,
i
,
optimized
);
assert
(
get_irn_op
(
optimized
)
!=
op_Id
);
}
}
...
...
@@ -66,7 +73,7 @@ local_optimize_graph (ir_graph *irg) {
current_ir_graph
=
irg
;
/* Handle graph state */
//
assert(get_irg_phase_state(irg) != phase_building);
assert
(
get_irg_phase_state
(
irg
)
!=
phase_building
);
if
(
get_opt_global_cse
())
set_irg_pinned
(
current_ir_graph
,
floats
);
if
(
get_irg_outs_state
(
current_ir_graph
)
==
outs_consistent
)
...
...
@@ -101,7 +108,6 @@ get_new_node (ir_node * n)
return
n
->
link
;
}
/* We use the block_visited flag to mark that we have computed the
number of useful predecessors for this block.
Further we encode the new arity in this flag in the old blocks.
...
...
@@ -223,11 +229,13 @@ copy_preds (ir_node *n, void *env) {
for
(
i
=
-
1
;
i
<
get_irn_arity
(
n
);
i
++
)
set_irn_n
(
nn
,
i
,
get_new_node
(
get_irn_n
(
n
,
i
)));
}
/* Now the new node is complete. We can add it to the hash table for cse. */
add_identities
(
current_ir_graph
->
value_table
,
nn
);
/* Now the new node is complete. We can add it to the hash table for cse.
@@@ inlinening aborts if we identify End. Why? */
if
(
get_irn_op
(
nn
)
!=
op_End
)
add_identities
(
current_ir_graph
->
value_table
,
nn
);
}
/* Copies the graph re
suc
sively, compacts the keepalive of the end node. */
/* Copies the graph re
cur
sively, compacts the keepalive of the end node. */
void
copy_graph
()
{
ir_node
*
oe
,
*
ne
;
/* old end, new end */
...
...
@@ -346,7 +354,7 @@ dead_node_elimination(ir_graph *irg) {
current_ir_graph
=
irg
;
/* Handle graph state */
//
assert(get_irg_phase_state(current_ir_graph) != phase_building);
assert
(
get_irg_phase_state
(
current_ir_graph
)
!=
phase_building
);
free_outs
(
current_ir_graph
);
if
(
get_optimize
()
&&
get_opt_dead_node_elimination
())
{
...
...
@@ -413,7 +421,7 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
if
(
!
get_opt_inline
())
return
;
/* Handle graph state */
//
assert(get_irg_phase_state(current_ir_graph) != phase_building);
assert
(
get_irg_phase_state
(
current_ir_graph
)
!=
phase_building
);
if
(
get_irg_outs_state
(
current_ir_graph
)
==
outs_consistent
)
set_irg_outs_inconsistent
(
current_ir_graph
);
...
...
@@ -650,3 +658,82 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
free
(
cf_pred
);
}
}
/********************************************************************/
/* Apply inlineing to small methods. */
/********************************************************************/
static
int
pos
;
/* It makes no sense to inline too many calls in one procedure. Anyways,
I didn't get a version with NEW_ARR_F to run. */
#define MAX_INLINE 1024
static
void
collect_calls
(
ir_node
*
call
,
void
*
env
)
{
ir_node
**
calls
=
(
ir_node
**
)
env
;
ir_node
*
addr
;
tarval
*
tv
;
ir_graph
*
called_irg
;
if
(
get_irn_op
(
call
)
!=
op_Call
)
return
;
addr
=
get_Call_ptr
(
call
);
if
(
get_irn_op
(
addr
)
==
op_Const
)
{
/* Check whether the constant is the pointer to a compiled entity. */
tv
=
get_Const_tarval
(
addr
);
if
(
tv
->
u
.
p
.
ent
)
{
called_irg
=
get_entity_irg
(
tv
->
u
.
p
.
ent
);
if
(
called_irg
&&
pos
<
MAX_INLINE
)
{
/* The Call node calls a locally defined method. Remember to inline. */
calls
[
pos
]
=
call
;
pos
++
;
}
}
}
}
/* Inlines all small methods at call sites where the called address comes
from a Const node that references the entity representing the called
method.
The size argument is a rough measure for the code size of the method:
Methods where the obstack containing the firm graph is smaller than
size are inlined. */
void
inline_small_irgs
(
ir_graph
*
irg
,
int
size
)
{
int
i
;
ir_node
*
calls
[
MAX_INLINE
];
ir_graph
*
rem
=
current_ir_graph
;
if
(
!
(
get_optimize
()
&&
get_opt_inline
()))
return
;
/*DDME(get_irg_ent(current_ir_graph));*/
current_ir_graph
=
irg
;
/* Handle graph state */
assert
(
get_irg_phase_state
(
current_ir_graph
)
!=
phase_building
);
/* Find Call nodes to inline.
(We can not inline during a walk of the graph, as inlineing the same
method several times changes the visited flag of the walked graph:
after the first inlineing visited of the callee equals visited of
the caller. With the next inlineing both are increased.) */
pos
=
0
;
irg_walk
(
get_irg_end
(
irg
),
NULL
,
collect_calls
,
(
void
*
)
calls
);
if
((
pos
>
0
)
&&
(
pos
<
MAX_INLINE
))
{
/* There are calls to inline */
collect_phiprojs
(
irg
);
for
(
i
=
0
;
i
<
pos
;
i
++
)
{
tarval
*
tv
;
ir_graph
*
callee
;
tv
=
get_Const_tarval
(
get_Call_ptr
(
calls
[
i
]));
callee
=
get_entity_irg
(
tv
->
u
.
p
.
ent
);
if
((
_obstack_memory_used
(
callee
->
obst
)
-
obstack_room
(
callee
->
obst
))
<
size
)
{
/*printf(" inlineing "); DDME(tv->u.p.ent);*/
inline_method
(
calls
[
i
],
callee
);
}
}
}
current_ir_graph
=
rem
;
}
ir/ir/irgopt.h
View file @
b02303ed
...
...
@@ -19,6 +19,8 @@ void local_optimize_graph (ir_graph *irg);
/* Performs dead node elimination by copying the ir graph to a new obstack.
Further removes Bad predecesors from Blocks and the corresponding
inputs to Phi nodes.
Optimization is only performed if options `optimize' and
`opt_dead_node_elimination' are set.
The graph may not be in state phase_building. The outs datasturcture
is freed, the outs state set to no_outs. (@@@ Change this? -> inconsistent.)
Removes old attributes of nodes. Sets link field to NULL.
...
...
@@ -26,6 +28,12 @@ void local_optimize_graph (ir_graph *irg);
development/debugging are not conserved by copying. */
void
dead_node_elimination
(
ir_graph
*
irg
);
/* Removes Bad Bad predecesors from Blocks and the corresponding
inputs to Phi nodes as in dead_node_elimination but without
copying the graph.
@@@ not implemented! */
void
remove_bad_predecessors
(
ir_graph
*
irg
);
/* Inlines a method at the given call site.
Assumes that call is a Call node in current_ir_graph and that
the type in the Call nodes type attribute is the same as the
...
...
@@ -33,14 +41,14 @@ void dead_node_elimination(ir_graph *irg);
Further it assumes that all Phi nodes in a block of current_ir_graph
are assembled in a "link" list in the link field of the corresponding
block nodes. Further assumes that all Proj nodes are in a "link" list
in the nodes producing the tuple. (This is only a optical feature
in the nodes producing the tuple. (This is only a
n
optical feature
for the graph.) Conserves this feature for the old
nodes of the graph. This precondition can be established by a call to
collect_phisprojs(), see irgmod.h.
Called_graph must be unequal to current_ir_graph. Will not inline
if they are equal.
Sets visited masterflag in curren_ir_graph to max of flag in
current
and called graph
s
.
Sets visited masterflag in curren
t
_ir_graph to
the
max of
the
flag in
current
and called graph.
Removes the call node and splits the basic block the call node
belongs to. Inserts a copy of the called graph between these nodes.
It is recommended to call local_optimize_graph after inlining as this
...
...
@@ -48,4 +56,20 @@ void dead_node_elimination(ir_graph *irg);
combination as control flow operation. */
void
inline_method
(
ir_node
*
call
,
ir_graph
*
called_graph
);
/* Inlines all small methods at call sites where the called address comes
from a Const node that references the entity representing the called
method.
The size argument is a rough measure for the code size of the method:
Methods where the obstack containing the firm graph is smaller than
size are inlined. Further only a limited number of calls are inlined.
If the method contains more than 1024 inlineable calls none will be
inlined.
Inlining is only performed if flags `optimize' and `inlineing' are set.
The graph may not be in state phase_building.
It is recommended to call local_optimize_graph after inlining as this
function leaves a set of obscure Tuple nodes, e.g. a Proj-Tuple-Jmp
combination as control flow operation. */
void
inline_small_irgs
(
ir_graph
*
irg
,
int
size
);
# endif
/* _IRGOPT_H_ */
ir/ir/irnode.c
View file @
b02303ed
...
...
@@ -1830,12 +1830,17 @@ skip_Proj (ir_node *node) {
inline
ir_node
*
skip_Tuple
(
ir_node
*
node
)
{
if
((
node
->
op
==
op_Proj
)
&&
(
get_irn_op
(
get_Proj_pred
(
node
))
==
op_Tuple
))
return
get_Tuple_pred
(
get_Proj_pred
(
node
),
get_Proj_proj
(
node
));
ir_node
*
pred
;
if
(
get_irn_op
(
node
)
==
op_Proj
)
{
pred
=
skip_nop
(
get_Proj_pred
(
node
));
if
(
get_irn_op
(
pred
)
==
op_Proj
)
/* nested Tuple ? */
pred
=
skip_nop
(
skip_Tuple
(
pred
));
if
(
get_irn_op
(
pred
)
==
op_Tuple
)
return
get_Tuple_pred
(
pred
,
get_Proj_proj
(
node
));
}
return
node
;
}
inline
ir_node
*
skip_nop
(
ir_node
*
node
)
{
/* don't assert node !!! */
...
...
ir/ir/iropt.c
View file @
b02303ed
...
...
@@ -419,7 +419,6 @@ equivalent_node (ir_node *n)
*/
int
i
,
n_preds
;
ir_node
*
block
=
NULL
;
/* to shutup gcc */
ir_node
*
first_val
=
NULL
;
/* to shutup gcc */
ir_node
*
scnd_val
=
NULL
;
/* to shutup gcc */
...
...
ir/tr/entity.c
View file @
b02303ed
...
...
@@ -84,6 +84,8 @@ new_entity (type *owner, ident *name, type *type)
res
->
ld_name
=
NULL
;
res
->
overwrites
=
NEW_ARR_F
(
entity
*
,
1
);
res
->
irg
=
NULL
;
res
->
visit
=
0
;
/* Remember entity in it's owner. */
...
...
ir/tr/entity.h
View file @
b02303ed
...
...
@@ -109,7 +109,11 @@ typedef struct entity entity;
#endif
/* Creates a new entity.
Automatically inserts the entity as a member of owner. */
Automatically inserts the entity as a member of owner.
Entity is automatic_allocated and uninitialize except if the type
is type_method, then it is static_allocated and constant. The constant
value is a pointer to the method.
Visibility is local, offset -1, and it is not volatile. */
entity
*
new_entity
(
type
*
owner
,
ident
*
name
,
type
*
type
);
/* Copies the entity if the new_owner is different from the
owner of the old entity. Else returns the old entity.
...
...
ir/tr/type.c
View file @
b02303ed
...
...
@@ -192,7 +192,9 @@ set_type_state(type *tp, type_state state) {
if
(
tp
!=
get_glob_type
())
for
(
i
=
0
;
i
<
get_class_n_member
(
tp
);
i
++
)
{
assert
(
get_entity_offset
(
get_class_member
(
tp
,
i
))
>
-
1
);
/* assert(get_entity_allocation(get_class_member(tp, i)) == automatic_allocated); @@@ lowerfirm geht nicht durch */
assert
(
is_method_type
(
get_entity_type
(
get_class_member
(
tp
,
i
)))
||
(
get_entity_allocation
(
get_class_member
(
tp
,
i
))
==
automatic_allocated
));
/* @@@ lowerfirm geht nicht durch */
}
}
break
;
case
tpo_struct
:
...
...
@@ -200,7 +202,7 @@ set_type_state(type *tp, type_state state) {
/* assert(get_type_size(tp) > -1); @@@ lowerfirm geht nicht durch */
for
(
i
=
0
;
i
<
get_struct_n_member
(
tp
);
i
++
)
{
assert
(
get_entity_offset
(
get_struct_member
(
tp
,
i
))
>
-
1
);
/*
assert(get_entity_allocation(get_struct_member(tp, i)) == automatic_allocated);
@@@ lowerfirm geht nicht durch */
assert
(
(
get_entity_allocation
(
get_struct_member
(
tp
,
i
))
==
automatic_allocated
)
)
;
}
}
break
;
case
tpo_union
:
...
...
@@ -406,7 +408,8 @@ inline void free_struct_attrs (type *strct) {
/* manipulate private fields of struct */
void
add_struct_member
(
type
*
strct
,
entity
*
member
)
{
assert
(
strct
&&
(
strct
->
type_op
==
type_struct
));
/*assert(get_type_tpop(get_entity_type(member)) != type_method); @@@ lowerfirm geht nicht durch */
assert
(
get_type_tpop
(
get_entity_type
(
member
))
!=
type_method
);
/* @@@ lowerfirm geht nicht durch */
ARR_APP1
(
entity
*
,
strct
->
attr
.
sa
.
members
,
member
);
}
int
get_struct_n_member
(
type
*
strct
)
{
...
...
@@ -421,7 +424,7 @@ entity *get_struct_member (type *strct, int pos) {
void
set_struct_member
(
type
*
strct
,
int
pos
,
entity
*
member
)
{
assert
(
strct
&&
(
strct
->
type_op
==
type_struct
));
assert
(
pos
>=
0
&&
pos
<
get_struct_n_member
(
strct
));
/*
assert(get_entity_type(member)->type_op != type_method); @@@ lowerfirm !!*/
assert
(
get_entity_type
(
member
)
->
type_op
!=
type_method
);
/*
@@@ lowerfirm !!*/
strct
->
attr
.
sa
.
members
[
pos
+
1
]
=
member
;
}
void
remove_struct_member
(
type
*
strct
,
entity
*
member
)
{
...
...
@@ -643,6 +646,15 @@ void set_array_bounds (type *array, int dimension, ir_node * lower_bound,
array
->
attr
.
aa
.
lower_bound
[
dimension
]
=
lower_bound
;
array
->
attr
.
aa
.
upper_bound
[
dimension
]
=
upper_bound
;
}
void
set_array_lower_bound_int
(
type
*
array
,
int
dimension
,
int
lower_bound
)
{
ir_graph
*
rem
;
assert
(
array
&&
(
array
->
type_op
==
type_array
));
rem
=
current_ir_graph
;
current_ir_graph
=
get_const_code_irg
();
array
->
attr
.
aa
.
lower_bound
[
dimension
]
=
new_Const
(
mode_I
,
tarval_from_long
(
mode_I
,
lower_bound
));
current_ir_graph
=
rem
;
}
void
set_array_lower_bound
(
type
*
array
,
int
dimension
,
ir_node
*
lower_bound
)
{
assert
(
array
&&
(
array
->
type_op
==
type_array
));
array
->
attr
.
aa
.
lower_bound
[
dimension
]
=
lower_bound
;
...
...
@@ -794,6 +806,7 @@ bool is_pointer_type (type *pointer) {
/* create a new type primitive */
type
*
new_type_primitive
(
ident
*
name
,
ir_mode
*
mode
)
{
type
*
res
;
/* @@@ assert( mode_is_data(mode) && (!mode == mode_p)); */
res
=
new_type
(
type_primitive
,
mode
,
name
);
res
->
size
=
get_mode_size
(
mode
);
res
->
state
=
layout_fixed
;
...
...
ir/tr/type.h
View file @
b02303ed
...
...
@@ -421,6 +421,7 @@ void set_array_bounds_int (type *array, int dimension, int lower_bound,
void
set_array_bounds
(
type
*
array
,
int
dimension
,
ir_node
*
lower_bound
,
ir_node
*
upper_bound
);
void
set_array_lower_bound
(
type
*
array
,
int
dimension
,
ir_node
*
lower_bound
);
void
set_array_lower_bound_int
(
type
*
array
,
int
dimension
,
int
lower_bound
);
void
set_array_upper_bound
(
type
*
array
,
int
dimension
,
ir_node
*
upper_bound
);
ir_node
*
get_array_lower_bound
(
type
*
array
,
int
dimension
);
ir_node
*
get_array_upper_bound
(
type
*
array
,
int
dimension
);
...
...
testprograms/oo_inline_example.c
View file @
b02303ed
...
...
@@ -274,7 +274,6 @@ main(void)
/****************************************************************************/
collect_phiprojs
(
main_irg
);
current_ir_graph
=
main_irg
;
printf
(
"Inlining set_a ...
\n
"
);
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment