Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
Zwinkau
libfirm
Commits
a8583be0
Commit
a8583be0
authored
May 12, 2016
by
yb9976
Browse files
Fix indentation
parent
52e16f28
Changes
54
Hide whitespace changes
Inline
Side-by-side
ir/adt/pqueue.c
View file @
a8583be0
...
...
@@ -49,7 +49,7 @@ static void pqueue_heapify(pqueue_t *q, size_t pos)
exchange
=
pos
*
2
;
if
((
pos
*
2
+
1
)
<
len
&&
q
->
elems
[
exchange
].
priority
<
q
->
elems
[
pos
*
2
+
1
].
priority
)
&&
q
->
elems
[
exchange
].
priority
<
q
->
elems
[
pos
*
2
+
1
].
priority
)
exchange
=
pos
*
2
+
1
;
if
(
exchange
==
pos
)
...
...
ir/ana/constbits.c
View file @
a8583be0
...
...
@@ -665,7 +665,7 @@ undefined:
switch
(
relation
)
{
case
ir_relation_less_greater
:
if
(
!
tarval_is_null
(
tarval_andnot
(
ro
,
lz
))
||
!
tarval_is_null
(
tarval_andnot
(
lo
,
rz
)))
{
!
tarval_is_null
(
tarval_andnot
(
lo
,
rz
)))
{
// At least one bit differs.
z
=
o
=
t
;
}
else
if
(
lz
==
lo
&&
rz
==
ro
&&
lz
==
rz
)
{
...
...
@@ -677,7 +677,7 @@ undefined:
case
ir_relation_equal
:
if
(
!
tarval_is_null
(
tarval_andnot
(
ro
,
lz
))
||
!
tarval_is_null
(
tarval_andnot
(
lo
,
rz
)))
{
!
tarval_is_null
(
tarval_andnot
(
lo
,
rz
)))
{
// At least one bit differs.
z
=
o
=
f
;
}
else
if
(
lz
==
lo
&&
rz
==
ro
&&
lz
==
rz
)
{
...
...
@@ -691,7 +691,7 @@ undefined:
case
ir_relation_less
:
/* TODO handle negative values */
if
(
tarval_is_negative
(
lz
)
||
tarval_is_negative
(
lo
)
||
tarval_is_negative
(
rz
)
||
tarval_is_negative
(
ro
))
tarval_is_negative
(
rz
)
||
tarval_is_negative
(
ro
))
goto
result_unknown
;
if
(
tarval_cmp
(
lz
,
ro
)
&
relation
)
{
...
...
@@ -709,7 +709,7 @@ undefined:
case
ir_relation_greater
:
/* TODO handle negative values */
if
(
tarval_is_negative
(
lz
)
||
tarval_is_negative
(
lo
)
||
tarval_is_negative
(
rz
)
||
tarval_is_negative
(
ro
))
tarval_is_negative
(
rz
)
||
tarval_is_negative
(
ro
))
goto
result_unknown
;
if
(
!
(
tarval_cmp
(
lz
,
ro
)
&
relation
))
{
...
...
ir/ana/dca.c
View file @
a8583be0
...
...
@@ -170,8 +170,8 @@ static void dca_transfer(ir_node *irn)
* don't fit into the smaller mode. */
if
(
get_tarval_highest_bit
(
care
)
>=
(
int
)
pred_bits
)
care
=
tarval_or
(
care
,
tarval_shl_unsigned
(
get_mode_one
(
mode
),
pred_bits
-
1
));
tarval_shl_unsigned
(
get_mode_one
(
mode
),
pred_bits
-
1
));
}
else
{
/* Thwart sign extension as it doesn't make sense on
* our abstract tarvals. */
...
...
@@ -401,7 +401,7 @@ static void dca_init_node(ir_node *n, void *data)
ir_mode
*
m
=
get_irn_mode
(
n
);
set_irn_link
(
n
,
(
void
*
)
(
mode_is_int
(
m
)
?
get_mode_null
(
m
)
:
tarval_b_false
));
get_mode_null
(
m
)
:
tarval_b_false
));
}
void
dca_analyze
(
ir_graph
*
irg
)
...
...
@@ -413,7 +413,7 @@ void dca_analyze(ir_graph *irg)
assert
(
tarval_get_wrap_on_overflow
());
assert
(((
ir_resources_reserved
(
irg
)
&
IR_RESOURCE_IRN_LINK
)
!=
0
)
&&
"user of dc analysis must reserve links"
);
"user of dc analysis must reserve links"
);
irg_walk_graph
(
irg
,
dca_init_node
,
NULL
,
0
);
...
...
ir/ana/irconsconfirm.c
View file @
a8583be0
...
...
@@ -290,7 +290,7 @@ static void handle_if(ir_node *block, ir_node *cmp, ir_relation rel, env_t *env)
env
->
num_eq
+=
1
;
}
else
if
(
block_dominates
(
blk
,
cond_block
)
&&
is_Const
(
right
)
&&
!
get_irn_pinned
(
user
))
{
&&
is_Const
(
right
)
&&
!
get_irn_pinned
(
user
))
{
/*
* left == Const and we found a movable user of left in a
* dominator of the Cond block
...
...
ir/ana/vrp.c
View file @
a8583be0
...
...
@@ -227,14 +227,12 @@ static int vrp_update_node(ir_vrp_info *info, ir_node *node)
vrp_left
=
vrp_get_or_set_info
(
info
,
get_Eor_left
(
node
));
vrp_right
=
vrp_get_or_set_info
(
info
,
get_Eor_right
(
node
));
new_bits_set
=
tarval_or
(
tarval_and
(
vrp_left
->
bits_set
,
tarval_not
(
vrp_right
->
bits_not_set
)),
tarval_and
(
tarval_not
(
vrp_left
->
bits_not_set
),
vrp_right
->
bits_set
));
new_bits_set
=
tarval_or
(
tarval_and
(
vrp_left
->
bits_set
,
tarval_not
(
vrp_right
->
bits_not_set
)),
tarval_and
(
tarval_not
(
vrp_left
->
bits_not_set
),
vrp_right
->
bits_set
));
new_bits_not_set
=
tarval_not
(
tarval_or
(
tarval_and
(
vrp_left
->
bits_set
,
vrp_right
->
bits_set
),
tarval_and
(
tarval_not
(
vrp_left
->
bits_not_set
),
tarval_not
(
vrp_right
->
bits_not_set
))));
new_bits_not_set
=
tarval_not
(
tarval_or
(
tarval_and
(
vrp_left
->
bits_set
,
vrp_right
->
bits_set
),
tarval_and
(
tarval_not
(
vrp_left
->
bits_not_set
),
tarval_not
(
vrp_right
->
bits_not_set
))));
break
;
}
...
...
@@ -272,8 +270,8 @@ static int vrp_update_node(ir_vrp_info *info, ir_node *node)
/* The second and is needed if target type is smaller*/
new_bits_not_set
=
tarval_convert_to
(
get_mode_all_one
(
old_mode
),
new_mode
);
new_bits_not_set
=
tarval_and
(
new_bits_not_set
,
tarval_convert_to
(
vrp_pred
->
bits_not_set
,
new_mode
));
new_bits_set
=
tarval_and
(
new_bits_not_set
,
tarval_convert_to
(
vrp_pred
->
bits_set
,
new_mode
));
new_bits_set
=
tarval_and
(
new_bits_not_set
,
tarval_convert_to
(
vrp_pred
->
bits_set
,
new_mode
));
/* Matze: TODO, BUGGY, tarval_cmp never returns ir_relation_less_equal */
if
(
tarval_cmp
(
vrp_pred
->
range_top
,
get_mode_max
(
new_mode
))
==
ir_relation_less_equal
)
{
...
...
@@ -322,8 +320,7 @@ static int vrp_update_node(ir_vrp_info *info, ir_node *node)
for
(
int
i
=
1
,
num
=
get_Phi_n_preds
(
node
);
i
<
num
;
i
++
)
{
pred
=
get_Phi_pred
(
node
,
i
);
vrp_pred
=
vrp_get_or_set_info
(
info
,
pred
);
if
(
new_range_type
==
VRP_RANGE
&&
vrp_pred
->
range_type
==
VRP_RANGE
)
{
if
(
new_range_type
==
VRP_RANGE
&&
vrp_pred
->
range_type
==
VRP_RANGE
)
{
ir_relation
relation
=
tarval_cmp
(
new_range_top
,
vrp_pred
->
range_top
);
if
(
relation
==
ir_relation_less
)
{
new_range_top
=
vrp_pred
->
range_top
;
...
...
@@ -337,7 +334,7 @@ static int vrp_update_node(ir_vrp_info *info, ir_node *node)
}
new_bits_set
=
tarval_and
(
new_bits_set
,
vrp_pred
->
bits_set
);
new_bits_not_set
=
tarval_or
(
new_bits_not_set
,
vrp_pred
->
bits_not_set
);
vrp_pred
->
bits_not_set
);
}
break
;
...
...
@@ -397,7 +394,7 @@ static int vrp_update_node(ir_vrp_info *info, ir_node *node)
}
if
(
vrp
->
range_type
==
VRP_UNDEFINED
&&
new_range_type
!=
VRP_UNDEFINED
)
{
new_range_type
!=
VRP_UNDEFINED
)
{
something_changed
=
true
;
vrp
->
range_type
=
new_range_type
;
vrp
->
range_bottom
=
new_range_bottom
;
...
...
@@ -419,12 +416,12 @@ static int vrp_update_node(ir_vrp_info *info, ir_node *node)
/* if they are overlapping, cut the range.*/
/* TODO: Maybe we can preserve more information here*/
if
(
tarval_cmp
(
vrp
->
range_bottom
,
new_range_top
)
==
ir_relation_greater
&&
tarval_cmp
(
vrp
->
range_bottom
,
new_range_bottom
)
==
ir_relation_greater
)
{
tarval_cmp
(
vrp
->
range_bottom
,
new_range_bottom
)
==
ir_relation_greater
)
{
something_changed
=
true
;
vrp
->
range_bottom
=
new_range_top
;
}
else
if
(
tarval_cmp
(
vrp
->
range_top
,
new_range_bottom
)
==
ir_relation_greater
&&
tarval_cmp
(
vrp
->
range_top
,
new_range_top
)
==
ir_relation_less
)
{
tarval_cmp
(
vrp
->
range_top
,
new_range_top
)
==
ir_relation_less
)
{
something_changed
=
true
;
vrp
->
range_top
=
new_range_bottom
;
}
...
...
@@ -567,7 +564,7 @@ ir_relation vrp_cmp(const ir_node *left, const ir_node *right)
}
if
(
!
tarval_is_null
(
tarval_and
(
vrp_left
->
bits_set
,
tarval_not
(
vrp_right
->
bits_not_set
)))
||
!
tarval_is_null
(
tarval_and
(
tarval_not
(
vrp_left
->
bits_not_set
),
vrp_right
->
bits_set
)))
{
!
tarval_is_null
(
tarval_and
(
tarval_not
(
vrp_left
->
bits_not_set
),
vrp_right
->
bits_set
)))
{
return
ir_relation_less_greater
;
}
...
...
ir/be/amd64/amd64_bearch.c
View file @
a8583be0
...
...
@@ -450,7 +450,7 @@ static void introduce_epilogue(ir_node *ret, bool omit_fp)
ir_type
*
frame_type
=
get_irg_frame_type
(
irg
);
unsigned
frame_size
=
get_type_size
(
frame_type
);
ir_node
*
incsp
=
amd64_new_IncSP
(
block
,
curr_sp
,
-
(
int
)
frame_size
,
true
);
true
);
sched_add_before
(
ret
,
incsp
);
curr_sp
=
incsp
;
}
...
...
@@ -496,7 +496,7 @@ static void introduce_prologue(ir_graph *const irg, bool omit_fp)
be_keep_if_unused
(
incsp
);
}
else
{
ir_node
*
const
incsp
=
amd64_new_IncSP
(
block
,
initial_sp
,
frame_size
,
false
);
frame_size
,
false
);
sched_add_after
(
start
,
incsp
);
edges_reroute_except
(
initial_sp
,
incsp
,
incsp
);
}
...
...
@@ -654,7 +654,7 @@ static void amd64_generate_code(FILE *output, const char *cup_name)
be_timer_push
(
T_RA_PREPARATION
);
be_sched_fix_flags
(
irg
,
&
amd64_reg_classes
[
CLASS_amd64_flags
],
NULL
,
NULL
,
NULL
);
NULL
,
NULL
);
be_timer_pop
(
T_RA_PREPARATION
);
be_step_regalloc
(
irg
,
&
amd64_regalloc_if
);
...
...
ir/be/amd64/amd64_pic.c
View file @
a8583be0
...
...
@@ -43,7 +43,7 @@ static ir_node *create_gotpcrel_load(ir_graph *irg, ir_entity *const entity)
ir_node
*
const
nomem
=
get_irg_no_mem
(
irg
);
ir_node
*
const
block
=
get_irg_start_block
(
irg
);
ir_node
*
const
load
=
new_rd_Load
(
NULL
,
block
,
nomem
,
addr
,
mode_P
,
type
,
cons_floats
);
type
,
cons_floats
);
return
new_r_Proj
(
load
,
mode_P
,
pn_Load_res
);
}
...
...
ir/be/amd64/amd64_transform.c
View file @
a8583be0
...
...
@@ -921,8 +921,7 @@ static ir_node *gen_binop_xmm(ir_node *node, ir_node *op0, ir_node *op1,
fix_node_mem_proj
(
new_node
,
args
.
mem_proj
);
arch_set_irn_register_req_out
(
new_node
,
0
,
&
amd64_requirement_xmm_same_0
);
arch_set_irn_register_req_out
(
new_node
,
0
,
&
amd64_requirement_xmm_same_0
);
return
be_new_Proj
(
new_node
,
pn_amd64_subs_res
);
}
...
...
@@ -1046,7 +1045,7 @@ static ir_node *gen_Add(ir_node *const node)
if
(
mode
==
x86_mode_E
)
return
gen_binop_x87
(
node
,
op1
,
op2
,
new_bd_amd64_fadd
);
return
gen_binop_am
(
node
,
op1
,
op2
,
new_bd_amd64_adds
,
pn_amd64_adds_res
,
match_commutative
|
match_am
);
pn_amd64_adds_res
,
match_commutative
|
match_am
);
}
match_flags_t
flags
=
match_immediate
|
match_am
|
match_mode_neutral
...
...
@@ -1613,7 +1612,9 @@ static ir_node *gen_Switch(ir_node *const node)
table
=
ir_switch_table_duplicate
(
irg
,
table
);
ir_node
*
const
out
=
new_bd_amd64_jmp_switch
(
dbgi
,
new_block
,
arity
,
in
,
in_reqs
,
n_outs
,
op_mode
,
X86_SIZE_64
,
&
addr
,
table
,
entity
);
in_reqs
,
n_outs
,
op_mode
,
X86_SIZE_64
,
&
addr
,
table
,
entity
);
return
out
;
}
...
...
@@ -2133,7 +2134,7 @@ static ir_node *match_mov(dbg_info *dbgi, ir_node *block, ir_node *value,
ir_node
*
load
;
ir_node
*
op
;
bool
use_am
=
use_address_matching
(
mode
,
match_am
,
block
,
NULL
,
value
,
&
load
,
&
op
);
value
,
&
load
,
&
op
);
amd64_op_mode_t
op_mode
;
x86_addr_t
addr
;
...
...
@@ -2231,9 +2232,9 @@ static ir_node *create_cvtsd2ss(dbg_info *dbgi, ir_node *block, ir_node *value)
}
static
void
store_to_temp
(
construct_binop_func
const
new_store
,
arch_register_req_t
const
**
const
in_reqs
,
x86_addr_t
*
addr
,
dbg_info
*
dbgi
,
ir_node
*
block
,
ir_node
**
in
,
int
*
n_in
,
ir_node
*
new_op
,
x86_insn_size_t
size
)
arch_register_req_t
const
**
const
in_reqs
,
x86_addr_t
*
addr
,
dbg_info
*
dbgi
,
ir_node
*
block
,
ir_node
**
in
,
int
*
n_in
,
ir_node
*
new_op
,
x86_insn_size_t
size
)
{
ir_graph
*
const
irg
=
get_irn_irg
(
block
);
ir_node
*
const
frame
=
get_irg_frame
(
irg
);
...
...
@@ -2345,7 +2346,7 @@ static ir_node *conv_x87_to_int(dbg_info *const dbgi, ir_node *const block,
int
n_in
=
0
;
x86_addr_t
addr
;
store_to_temp
(
new_bd_amd64_fisttp
,
x87K_reg_mem_reqs
,
&
addr
,
dbgi
,
block
,
in
,
&
n_in
,
new_val
,
insn_size_src
);
in
,
&
n_in
,
new_val
,
insn_size_src
);
assert
(
n_in
<
(
int
)
ARRAY_SIZE
(
in
));
create_mov_func
new_mov
=
insn_size_dest
<
X86_SIZE_64
...
...
ir/be/arm/arm_new_nodes.c
View file @
a8583be0
...
...
@@ -99,7 +99,7 @@ void arm_dump_node(FILE *F, const ir_node *n, dump_reason_t reason)
break
;
case
ARM_SHF_IMM
:
fprintf
(
F
,
"modifier = imm %d ror %d
\n
"
,
attr
->
immediate_value
,
attr
->
shift_immediate
);
attr
->
immediate_value
,
attr
->
shift_immediate
);
break
;
case
ARM_SHF_ASR_IMM
:
fprintf
(
F
,
"modifier = V >>s %d
\n
"
,
attr
->
shift_immediate
);
...
...
ir/be/arm/arm_transform.c
View file @
a8583be0
...
...
@@ -506,7 +506,7 @@ static ir_node *gen_Ror(ir_node *node, ir_node *op1, ir_node *op2,
new_op2
=
new_bd_arm_Rsb_imm
(
dbgi
,
block
,
new_op2
,
32
,
0
);
}
return
new_bd_arm_Mov_reg_shift_reg
(
dbgi
,
block
,
new_op1
,
new_op2
,
ARM_SHF_ROR_REG
);
ARM_SHF_ROR_REG
);
}
static
bool
is_low_mask
(
ir_tarval
*
tv
)
...
...
ir/be/bechordal.c
View file @
a8583be0
...
...
@@ -329,7 +329,7 @@ static void assign(ir_node *const block, void *const env_ptr)
DBG
((
dbg
,
LEVEL_4
,
"
\t
usedef chain for block
\n
"
));
foreach_border_head
(
head
,
b
)
{
DBG
((
dbg
,
LEVEL_4
,
"
\t
%s %+F/%d
\n
"
,
b
->
is_def
?
"def"
:
"use"
,
b
->
irn
,
get_irn_idx
(
b
->
irn
)));
b
->
irn
,
get_irn_idx
(
b
->
irn
)));
}
bitset_t
*
const
available
=
bitset_alloca
(
env
->
allocatable_regs
->
size
);
...
...
ir/be/becopyopt.c
View file @
a8583be0
...
...
@@ -322,8 +322,8 @@ static int ou_max_ind_set_costs(unit_t *const ou)
/* check if curr is a stable set */
for
(
int
i
=
bitset_next_set
(
curr
,
0
);
i
!=-
1
;
i
=
bitset_next_set
(
curr
,
i
+
1
))
for
(
int
o
=
bitset_next_set
(
curr
,
i
+
1
);
o
!=-
1
;
o
=
bitset_next_set
(
curr
,
o
+
1
))
/* !!!!! difference to qnode_max_ind_set(): NOT (curr, i) */
if
(
be_values_interfere
(
unsafe
[
i
],
unsafe
[
o
]))
goto
no_stable_set
;
if
(
be_values_interfere
(
unsafe
[
i
],
unsafe
[
o
]))
goto
no_stable_set
;
/* if we arrive here, we have a stable set */
/* compute the weight of the stable set*/
...
...
@@ -336,7 +336,7 @@ static int ou_max_ind_set_costs(unit_t *const ou)
best_weight
=
curr_weight
;
}
no_stable_set:
no_stable_set:
bitset_minus1
(
curr
);
}
}
...
...
@@ -442,7 +442,7 @@ static void co_collect_units(ir_node *irn, void *env)
if
(
other
&
(
1U
<<
i
))
{
ir_node
*
o
=
get_irn_n
(
skip_Proj
(
irn
),
i
);
if
(
!
arch_irn_is_ignore
(
o
)
&&
!
be_values_interfere
(
irn
,
o
))
{
!
be_values_interfere
(
irn
,
o
))
{
unit
->
nodes
[
k
]
=
o
;
unit
->
costs
[
k
]
=
co
->
get_costs
(
irn
,
-
1
);
++
k
;
...
...
ir/be/begnuas.c
View file @
a8583be0
...
...
@@ -593,8 +593,8 @@ static void emit_visibility(const ir_entity *entity, bool implicit_globl)
emit_symbol_directive
(
directive
,
entity
);
if
(
is_macho
()
&&
(
linkage
&
IR_LINKAGE_HIDDEN_USER
)
&&
get_entity_ld_name
(
entity
)[
0
]
!=
'\0'
)
{
&&
(
linkage
&
IR_LINKAGE_HIDDEN_USER
)
&&
get_entity_ld_name
(
entity
)[
0
]
!=
'\0'
)
{
emit_symbol_directive
(
".no_dead_strip"
,
entity
);
}
}
...
...
@@ -1060,7 +1060,7 @@ static void emit_ir_initializer(normal_or_bitfield *vals,
if
(
bitfield_size
>
0
)
{
unsigned
offset_bits
=
get_entity_bitfield_offset
(
member
);
emit_bitfield
(
&
vals
[
offset
],
offset_bits
,
bitfield_size
,
sub_initializer
,
subtype
);
sub_initializer
,
subtype
);
continue
;
}
...
...
ir/be/bemain.c
View file @
a8583be0
...
...
@@ -611,12 +611,11 @@ void be_step_last(ir_graph *irg)
for
(
be_timer_id_t
t
=
T_FIRST
;
t
<
T_LAST
+
1
;
++
t
)
{
char
buf
[
128
];
snprintf
(
buf
,
sizeof
(
buf
),
"bemain_time_%s"
,
get_timer_name
(
t
));
get_timer_name
(
t
));
stat_ev_dbl
(
buf
,
ir_timer_elapsed_usec
(
be_timers
[
t
]));
}
}
else
{
printf
(
"==>> IRG %s <<==
\n
"
,
get_entity_name
(
get_irg_entity
(
irg
)));
printf
(
"==>> IRG %s <<==
\n
"
,
get_entity_name
(
get_irg_entity
(
irg
)));
for
(
be_timer_id_t
t
=
T_FIRST
;
t
<
T_LAST
+
1
;
++
t
)
{
double
val
=
ir_timer_elapsed_usec
(
be_timers
[
t
])
/
1000
.
0
;
printf
(
"%-20s: %10.3f msec
\n
"
,
get_timer_name
(
t
),
val
);
...
...
ir/be/beprefalloc.c
View file @
a8583be0
...
...
@@ -289,7 +289,7 @@ static void analyze_block(ir_node *block, void *data)
allocation_info_t
*
info
=
get_allocation_info
(
node
);
if
(
get_irn_arity
(
node
)
>=
(
int
)
sizeof
(
info
->
last_uses
)
*
8
)
{
panic
(
"node with more than %d inputs not supported yet"
,
(
int
)
sizeof
(
info
->
last_uses
)
*
8
);
(
int
)
sizeof
(
info
->
last_uses
)
*
8
);
}
/* mark last uses */
...
...
@@ -571,7 +571,7 @@ static bool try_optimistic_split(ir_node *to_split, ir_node *before,
delta
=
pref_delta
+
prefs
[
i
].
pref
;
if
(
delta
<
split_threshold
)
{
DB
((
dbg
,
LEVEL_3
,
"Not doing optimistical split of %+F (depth %d), win %f too low
\n
"
,
to_split
,
recursion
,
delta
));
to_split
,
recursion
,
delta
));
return
false
;
}
...
...
@@ -1176,7 +1176,7 @@ static void enforce_constraints(ir_nodeset_t *live_nodes, ir_node *node,
continue
;
/* livethrough values may not use constrainted output registers */
if
(
rbitset_is_set
(
live_through_regs
,
l
)
&&
rbitset_is_set
(
forbidden_regs
,
r
))
&&
rbitset_is_set
(
forbidden_regs
,
r
))
continue
;
hungarian_add
(
bp
,
r
,
l
,
l
==
r
?
9
:
8
);
...
...
@@ -1404,7 +1404,7 @@ static void assign_phi_registers(ir_node *block)
costs
+=
10000
;
hungarian_add
(
bp
,
n
,
r
,
(
int
)
costs
);
DB
((
dbg
,
LEVEL_3
,
" %s(%f)"
,
arch_register_for_index
(
cls
,
r
)
->
name
,
info
->
prefs
[
r
]));
info
->
prefs
[
r
]));
}
DB
((
dbg
,
LEVEL_3
,
"
\n
"
));
++
n
;
...
...
ir/be/bespillbelady.c
View file @
a8583be0
...
...
@@ -780,7 +780,7 @@ static void fix_block_borders(ir_node *block, void *data)
continue
;
if
(
move_spills
&&
be_is_live_in
(
lv
,
block
,
node
)
&&
!
pred_end_workset
->
vals
[
iter
].
spilled
)
{
&&
!
pred_end_workset
->
vals
[
iter
].
spilled
)
{
ir_node
*
insert_point
;
if
(
n_cfgpreds
>
1
)
{
insert_point
=
be_get_end_of_block_insertion_point
(
pred
);
...
...
ir/be/bespillutil.c
View file @
a8583be0
...
...
@@ -514,8 +514,8 @@ static void determine_spill_costs(spill_env_t *env, spill_info_t *spillinfo)
}
DB
((
dbg
,
LEVEL_1
,
"%+F: latespillcosts %f after def: %f
\n
"
,
to_spill
,
spills_execfreq
*
env
->
regif
.
spill_cost
,
spill_execfreq
*
env
->
regif
.
spill_cost
));
spills_execfreq
*
env
->
regif
.
spill_cost
,
spill_execfreq
*
env
->
regif
.
spill_cost
));
/* multi-/latespill is advantageous -> return*/
if
(
spills_execfreq
<
spill_execfreq
)
{
...
...
@@ -619,7 +619,7 @@ void be_insert_spills_reloads(spill_env_t *env)
* reconstruction for memory comes below */
assert
(
si
->
spills
!=
NULL
);
copy
=
env
->
regif
.
new_reload
(
si
->
to_spill
,
si
->
spills
->
spill
,
rld
->
reloader
);
rld
->
reloader
);
env
->
reload_count
++
;
}
...
...
ir/be/bessaconstr.c
View file @
a8583be0
...
...
@@ -369,7 +369,7 @@ void be_ssa_construction_init(be_ssa_construction_env_t *env, ir_graph *irg)
IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE_FRONTIERS
);
ir_reserve_resources
(
irg
,
IR_RESOURCE_IRN_VISITED
|
IR_RESOURCE_BLOCK_VISITED
|
IR_RESOURCE_IRN_LINK
);
|
IR_RESOURCE_BLOCK_VISITED
|
IR_RESOURCE_IRN_LINK
);
/* we use the visited flag to indicate blocks in the dominance frontier
* and blocks that already have the relevant value at the end calculated */
...
...
@@ -388,7 +388,7 @@ void be_ssa_construction_destroy(be_ssa_construction_env_t *env)
DEL_ARR_F
(
env
->
new_phis
);
ir_free_resources
(
env
->
irg
,
IR_RESOURCE_IRN_VISITED
|
IR_RESOURCE_BLOCK_VISITED
|
IR_RESOURCE_IRN_LINK
);
|
IR_RESOURCE_BLOCK_VISITED
|
IR_RESOURCE_IRN_LINK
);
stat_ev_tim_pop
(
"bessaconstr_total_time"
);
stat_ev_ctx_pop
(
"bessaconstr"
);
...
...
ir/be/ia32/ia32_bearch.c
View file @
a8583be0
...
...
@@ -1298,7 +1298,7 @@ static int ia32_is_mux_allowed(ir_node *sel, ir_node *mux_false,
return
true
;
/* SSE has own min/max operations */
if
(
ia32_cg_config
.
use_sse2
&&
mux_is_float_min_max
(
sel
,
mux_true
,
mux_false
))
&&
mux_is_float_min_max
(
sel
,
mux_true
,
mux_false
))
return
true
;
/* we can handle Mux(?, Const[f], Const[f]) */
if
(
mux_is_float_const_const
(
sel
,
mux_true
,
mux_false
))
...
...
@@ -1454,7 +1454,7 @@ static bool lower_for_emit(ir_graph *const irg, unsigned *const sp_is_non_ssa)
be_timer_push
(
T_RA_PREPARATION
);
ia32_setup_fpu_mode
(
irg
);
be_sched_fix_flags
(
irg
,
&
ia32_reg_classes
[
CLASS_ia32_flags
],
&
flags_remat
,
NULL
,
&
ia32_try_replace_flags
);
&
flags_remat
,
NULL
,
&
ia32_try_replace_flags
);
simplify_remat_nodes
(
irg
);
be_timer_pop
(
T_RA_PREPARATION
);
...
...
@@ -1678,8 +1678,8 @@ static const backend_params *ia32_get_libfirm_params(void)
static
const
lc_opt_table_entry_t
ia32_options
[]
=
{
LC_OPT_ENT_BOOL
(
"gprof"
,
"Create gprof profiling code"
,
&
gprof
),
LC_OPT_ENT_BOOL
(
"struct_in_reg"
,
"Return small structs in integer registers"
,
&
return_small_struct_in_regs
),
"Return small structs in integer registers"
,
&
return_small_struct_in_regs
),
LC_OPT_LAST
};
...
...
ir/be/ia32/ia32_emitter.c
View file @
a8583be0
...
...
@@ -1228,7 +1228,7 @@ static void emit_ia32_GetEIP(const ir_node *node)
ident
*
const
id
=
new_id_fmt
(
"__x86.get_pc_thunk.%s"
,
name
);
ir_type
*
const
tp
=
get_thunk_type
();
thunk
=
new_global_entity
(
glob
,
id
,
tp
,
ir_visibility_external_private
,
IR_LINKAGE_MERGE
|
IR_LINKAGE_GARBAGE_COLLECT
);
IR_LINKAGE_MERGE
|
IR_LINKAGE_GARBAGE_COLLECT
);
/* Note that we do not create a proper method graph, but rather cheat
* later and emit the instructions manually. This is just necessary so
* firm knows we will actually output code for this entity. */
...
...
@@ -1663,7 +1663,7 @@ void ia32_emit_thunks(void)
const
arch_register_t
*
reg
=
&
ia32_reg_classes
[
CLASS_ia32_gp
].
regs
[
i
];
be_gas_emit_function_prolog
(
entity
,
ia32_cg_config
.
function_alignment
,
NULL
);
NULL
);
ia32_emitf
(
NULL
,
"movl (%%esp), %#R"
,
reg
);
ia32_emitf
(
NULL
,
"ret"
);
be_gas_emit_function_epilog
(
entity
);
...
...
Prev
1
2
3
Next
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment