Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
Zwinkau
libfirm
Commits
15ad7ccd
Commit
15ad7ccd
authored
Mar 09, 2011
by
Christoph Mallon
Browse files
Fix typos in comments: s/it's/its/ and related corrections.
parent
011ba04f
Changes
53
Show whitespace changes
Inline
Side-by-side
ir/be/beloopana.c
View file @
15ad7ccd
...
...
@@ -105,7 +105,7 @@ static unsigned be_compute_block_pressure(const ir_graph *irg,
}
/**
* Compute the highest register pressure in a loop and it
'
s sub-loops.
* Compute the highest register pressure in a loop and its sub-loops.
* @param loop_ana The loop ana object.
* @param loop The loop to compute pressure for.
* @param cls The register class to compute pressure for.
...
...
ir/be/belower.c
View file @
15ad7ccd
...
...
@@ -52,7 +52,7 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg;)
DEBUG_ONLY
(
static
firm_dbg_module_t
*
dbg_constr
;)
DEBUG_ONLY
(
static
firm_dbg_module_t
*
dbg_permmove
;)
/** Associates an ir_node with it
'
s copy and CopyKeep. */
/** Associates an ir_node with its copy and CopyKeep. */
typedef
struct
{
ir_nodeset_t
copies
;
/**< all non-spillable copies of this irn */
const
arch_register_class_t
*
cls
;
...
...
@@ -554,7 +554,7 @@ static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different,
sched_add_before
(
skip_Proj
(
irn
),
cpy
);
sched_add_after
(
skip_Proj
(
irn
),
keep
);
/* insert the other different and it
'
s copies into the map */
/* insert the other different and its copies into the map */
entry
=
(
op_copy_assoc_t
*
)
ir_nodemap_get
(
op_set
,
other_different
);
if
(
!
entry
)
{
entry
=
OALLOC
(
&
env
->
obst
,
op_copy_assoc_t
);
...
...
@@ -804,7 +804,7 @@ void assure_constraints(ir_graph *irg)
DB
((
dbg_constr
,
LEVEL_1
,
"
\n
"
));
/* introduce the copies for the operand and it
'
s copies */
/* introduce the copies for the operand and its copies */
be_ssa_construction_init
(
&
senv
,
irg
);
be_ssa_construction_add_copy
(
&
senv
,
map_entry
.
node
);
be_ssa_construction_add_copies
(
&
senv
,
nodes
,
n
);
...
...
ir/be/bepeephole.c
View file @
15ad7ccd
...
...
@@ -132,9 +132,9 @@ void be_peephole_new_node(ir_node * nw)
/**
* must be called from peephole optimisations before a node will be killed
* and its users will be redirected to new_node.
* so bepeephole can update it
'
s internal state.
* so bepeephole can update its internal state.
*
* Note: killing a node and rewiring
o
s only allowed if new_node produces
* Note: killing a node and rewiring
i
s only allowed if new_node produces
* the same registers as old_node.
*/
static
void
be_peephole_before_exchange
(
const
ir_node
*
old_node
,
...
...
ir/be/bepeephole.h
View file @
15ad7ccd
...
...
@@ -62,7 +62,7 @@ void be_peephole_new_node(ir_node *nw);
void
be_peephole_exchange
(
ir_node
*
old
,
ir_node
*
nw
);
/**
* Tries to optimize a beIncSp node with it
'
s previous IncSP node.
* Tries to optimize a beIncSp node with its previous IncSP node.
* Must be run from a be_peephole_opt() context.
*
* @param node a be_IncSP node
...
...
ir/be/bespillbelady.c
View file @
15ad7ccd
...
...
@@ -766,7 +766,7 @@ static void process_block(ir_node *block)
/* no predecessor -> empty set */
workset_clear
(
ws
);
}
else
if
(
arity
==
1
)
{
/* one predecessor, copy it
'
s end workset */
/* one predecessor, copy its end workset */
ir_node
*
pred_block
=
get_Block_cfgpred_block
(
block
,
0
);
block_info_t
*
pred_info
=
get_block_info
(
pred_block
);
...
...
ir/be/bespillutil.h
View file @
15ad7ccd
...
...
@@ -100,7 +100,7 @@ void be_add_reload_on_edge(spill_env_t *senv, ir_node *to_spill, ir_node *bl,
void
be_insert_spills_reloads
(
spill_env_t
*
senv
);
/**
* There are 2 possibilities to spill a phi node: Only it
'
s value, or replacing
* There are 2 possibilities to spill a phi node: Only its value, or replacing
* the whole phi-node with a memory phi. Normally only the value of a phi will
* be spilled unless you mark the phi with be_spill_phi.
* (Remember that each phi needs a register, so you have to spill phis when
...
...
ir/be/bessaconstr.c
View file @
15ad7ccd
...
...
@@ -29,7 +29,7 @@
* to their closest copy while introducing phis as necessary.
*
* Algorithm: Mark all blocks in the iterated dominance frontiers of the value
* and it
'
s copies. Link the copies ordered by dominance to the blocks. Then
* and its copies. Link the copies ordered by dominance to the blocks. Then
* we search for each use all definitions in the current block, if none is
* found, then we search one in the immediate dominator. If we are in a block
* of the dominance frontier, create a phi and do the same search for all
...
...
ir/be/bessaconstr.h
View file @
15ad7ccd
...
...
@@ -29,7 +29,7 @@
* to their closest copy while introducing phis as necessary.
*
* Algorithm: Mark all blocks in the iterated dominance frontiers of the value
* and it
'
s copies. Link the copies ordered by dominance to the blocks. Then
* and its copies. Link the copies ordered by dominance to the blocks. Then
* we search for each use all definitions in the current block, if none is
* found, then we search one in the immediate dominator. If we are in a block
* of the dominance frontier, create a phi and do the same search for all
...
...
ir/be/ia32/ia32_optimize.c
View file @
15ad7ccd
...
...
@@ -1271,7 +1271,7 @@ void ia32_peephole_optimization(ir_graph *irg)
/**
* Removes node from schedule if it is not used anymore. If irn is a mode_T node
* all it
'
s Projs are removed as well.
* all its Projs are removed as well.
* @param irn The irn to be removed from schedule
*/
static
inline
void
try_kill
(
ir_node
*
node
)
...
...
ir/be/scripts/generate_new_opcodes.pl
View file @
15ad7ccd
...
...
@@ -1028,7 +1028,7 @@ sub get_reg_class {
}
###
# Returns the index of a given register within it
'
s register class.
# Returns the index of a given register within its register class.
# @return index or undef
###
sub
get_reg_index
{
...
...
ir/common/debug.c
View file @
15ad7ccd
...
...
@@ -51,7 +51,7 @@ struct firm_dbg_module_t {
};
/**
* Compares two modules by comparing
it's
names
* Compares two modules by comparing
their
names
*/
static
int
module_cmp
(
const
void
*
p1
,
const
void
*
p2
,
size_t
size
)
{
...
...
ir/ident/mangle.c
View file @
15ad7ccd
...
...
@@ -127,7 +127,7 @@ ident *id_mangle_dot(ident *first, ident *scnd)
return
id_mangle_3
(
first
,
'.'
,
scnd
);
}
/* returns a mangled name for a Win32 function using it
'
s calling convention */
/* returns a mangled name for a Win32 function using its calling convention */
ident
*
id_decorate_win32_c_fkt
(
const
ir_entity
*
ent
,
ident
*
id
)
{
ir_type
*
tp
=
get_entity_type
(
ent
);
...
...
ir/ir/irdump.c
View file @
15ad7ccd
...
...
@@ -540,7 +540,7 @@ static void ird_walk_graph(ir_graph *irg, irg_walk_func *pre, irg_walk_func *pos
}
/**
* Walker, allocates an array for all blocks and puts
it's nodes
non-floating
* Walker, allocates an array for all blocks and puts
their
non-floating
* nodes into this array.
*/
static
void
collect_node
(
ir_node
*
node
,
void
*
env
)
...
...
@@ -1370,7 +1370,7 @@ static void print_mem_edge_vcgattr(FILE *F, ir_node *from, int to)
fprintf
(
F
,
INTER_MEM_EDGE_ATTR
);
}
/** Print the vcg attributes for the edge from node from to it
'
s to
's
input */
/** Print the vcg attributes for the edge from node
"
from
"
to its
"
to
"th
input */
static
void
print_edge_vcgattr
(
FILE
*
F
,
ir_node
*
from
,
int
to
)
{
assert
(
from
);
...
...
@@ -1831,7 +1831,7 @@ static void dump_entity_initializer(FILE *F, const ir_entity *ent)
(
void
)
ent
;
}
/** Dumps a type or entity and it
'
s edges. */
/** Dumps a type or entity and its edges. */
static
void
dump_type_info
(
type_or_ent
tore
,
void
*
env
)
{
FILE
*
F
=
(
FILE
*
)
env
;
...
...
ir/ir/iredges.c
View file @
15ad7ccd
...
...
@@ -264,7 +264,7 @@ static inline void edge_change_cnt(ir_node *tgt, ir_edge_kind_t kind, int ofs)
ir_graph *irg = get_irn_irg(tgt);
assert(info->out_count >= 0);
if (info->out_count == 0 && kind == EDGE_KIND_NORMAL) {
/* tgt lost it
'
s last user */
/* tgt lost its last user */
int i;
for (i = get_irn_arity(tgt) - 1; i >= -1; --i) {
...
...
ir/ir/irgopt.c
View file @
15ad7ccd
...
...
@@ -155,7 +155,7 @@ static void enqueue_users(ir_node *n, pdeq *waitq)
/**
* Data flow optimization walker.
* Optimizes all nodes and enqueue it
'
s users
* Optimizes all nodes and enqueue its users
* if done.
*/
static
void
opt_walker
(
ir_node
*
n
,
void
*
env
)
...
...
ir/ir/irgraph.c
View file @
15ad7ccd
...
...
@@ -142,7 +142,7 @@ void irg_set_nloc(ir_graph *res, int n_loc)
}
/* Allocates a list of nodes:
- The start block containing a start node and Proj nodes for it
'
s four
- The start block containing a start node and Proj nodes for its four
results (X, M, P, Tuple).
- The end block containing an end node. This block is not matured after
new_ir_graph as predecessors need to be added to it.
...
...
ir/ir/irgwalk.c
View file @
15ad7ccd
...
...
@@ -373,7 +373,7 @@ static void irg_block_walk_2(ir_node *node, irg_walk_func *pre,
}
/* walks only over Block nodes in the graph. Has it
'
s own visited
/* walks only over Block nodes in the graph. Has its own visited
flag, so that it can be interleaved with the other walker. */
void
irg_block_walk
(
ir_node
*
node
,
irg_walk_func
*
pre
,
irg_walk_func
*
post
,
void
*
env
)
{
...
...
ir/ir/iropt.c
View file @
15ad7ccd
...
...
@@ -1075,11 +1075,11 @@ static ir_node *equivalent_node_Or(ir_node *n)
ir_tarval
*
tv
;
if
(
a
==
b
)
{
n
=
a
;
/*
Or has it's own neutral element
*/
n
=
a
;
/*
idempotence
*/
DBG_OPT_ALGSIM0
(
oldn
,
n
,
FS_OPT_OR
);
return
n
;
}
/* constants are
c
ormalized to right, check this si
t
e first */
/* constants are
n
ormalized to right, check this si
d
e first */
tv
=
value_of
(
b
);
if
(
tarval_is_null
(
tv
))
{
n
=
a
;
...
...
@@ -1108,11 +1108,11 @@ static ir_node *equivalent_node_And(ir_node *n)
ir_tarval
*
tv
;
if
(
a
==
b
)
{
n
=
a
;
/*
And has it's own neutral element
*/
n
=
a
;
/*
idempotence
*/
DBG_OPT_ALGSIM0
(
oldn
,
n
,
FS_OPT_AND
);
return
n
;
}
/* constants are normalized to right, check this si
t
e first */
/* constants are normalized to right, check this si
d
e first */
tv
=
value_of
(
b
);
if
(
tarval_is_all_one
(
tv
))
{
n
=
a
;
...
...
@@ -1322,11 +1322,11 @@ static ir_node *equivalent_node_Phi(ir_node *n)
first_val
=
get_Phi_pred
(
n
,
i
);
if
(
(
first_val
!=
n
)
/* not self pointer */
#if 0
/* BEWARE: when the if is changed to 1, Phi
'
s will ignore
it's
Bad
* predecessors. Then, Phi nodes in
dead
code might be removed,
causing
* nodes pointing to themsel
f
(Add
'
s for instance).
* This is really bad and causes endless recursion
s i
n several
* code pathes, so we do NOT optimize such
a
code.
/* BEWARE: when the if is changed to 1, Phis will ignore
their
Bad
* predecessors. Then, Phi nodes in
unreachable
code might be removed,
*
causing
nodes pointing to themsel
ev
(Adds for instance).
* This is really bad and causes endless recursion
o
n several
* code pathes, so we do NOT optimize such code.
* This is not that bad as it sounds, optimize_cf() removes bad control flow
* (and bad Phi predecessors), so live code is optimized later.
*/
...
...
@@ -1592,7 +1592,7 @@ static ir_node *equivalent_node_Proj_Store(ir_node *proj)
}
/* equivalent_node_Proj_Store */
/**
* Does all optimizations on nodes that must be done on it
'
s Proj
'
s
* Does all optimizations on nodes that must be done on its Projs
* because of creating new nodes.
*/
static
ir_node
*
equivalent_node_Proj
(
ir_node
*
proj
)
...
...
@@ -4537,7 +4537,7 @@ static ir_node *transform_node_Proj_Bound(ir_node *proj)
}
/* transform_node_Proj_Bound */
/**
* Does all optimizations on nodes that must be done on it
'
s Proj
'
s
* Does all optimizations on nodes that must be done on its Projs
* because of creating new nodes.
*/
static
ir_node
*
transform_node_Proj
(
ir_node
*
proj
)
...
...
@@ -6308,7 +6308,7 @@ static ir_node *gigo(ir_node *node)
#if 0
/* Propagating Unknowns here seems to be a bad idea, because
sometimes we need a node as a input and did not want that
it kills it
'
s user.
it kills its user.
However, it might be useful to move this into a later phase
(if you think that optimizing such code is useful). */
if (is_Unknown(pred) && mode_is_data(get_irn_mode(node)))
...
...
@@ -6371,7 +6371,7 @@ ir_node *optimize_node(ir_node *n)
size_t
node_size
;
/*
* we MUST copy the node here temporary, because it's still
* we MUST copy the node here temporar
il
y, because it's still
* needed for DBG_OPT_CSTEVAL
*/
node_size
=
offsetof
(
ir_node
,
attr
)
+
n
->
op
->
attr_size
;
...
...
ir/opt/cfopt.c
View file @
15ad7ccd
...
...
@@ -179,7 +179,7 @@ static void merge_blocks(ir_node *node, void *ctx)
}
/* normally, we would create a Bad block here, but this must be
* prevented, so just set it
'
s cf to Bad.
* prevented, so just set its cf to Bad.
*/
if
(
is_Block_dead
(
new_block
))
{
ir_graph
*
irg
=
get_irn_irg
(
node
);
...
...
ir/opt/code_placement.c
View file @
15ad7ccd
...
...
@@ -55,7 +55,7 @@ static int is_Block_unreachable(ir_node *block)
* all "living" nodes into a living block. That's why we must
* move nodes in dead block with "live" successors into a valid
* block.
* We move them just into the same block as it
'
s successor (or
* We move them just into the same block as its successor (or
* in case of a Phi into the effective use block). For Phi successors,
* this may still be a dead block, but then there is no real use, as
* the control flow will be dead later.
...
...
@@ -319,7 +319,7 @@ static inline int get_block_loop_depth(ir_node *block)
}
/**
* Move n to a block with less loop depth than it
'
s current block. The
* Move n to a block with less loop depth than its current block. The
* new block must be dominated by early.
*
* @param n the node that should be moved
...
...
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment