Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
Zwinkau
libfirm
Commits
c82b415b
Commit
c82b415b
authored
Feb 07, 2016
by
Matthias Braun
Browse files
be: Merge common emitter code to query jump targets and block schedule
parent
a6aacda3
Changes
7
Hide whitespace changes
Inline
Side-by-side
ir/be/TEMPLATE/TEMPLATE_emitter.c
View file @
c82b415b
...
...
@@ -57,7 +57,7 @@ static void TEMPLATE_emit_dest_register(const ir_node *node, int pos)
*/
static
void
TEMPLATE_emit_cfop_target
(
const
ir_node
*
node
)
{
ir_node
*
block
=
(
ir_node
*
)
get_irn_link
(
node
);
ir_node
*
block
=
be_emit_get_cfop_target
(
node
);
be_gas_emit_block_name
(
block
);
}
...
...
@@ -221,18 +221,6 @@ static void TEMPLATE_emit_block(ir_node *block)
}
}
/**
* Sets labels for control flow nodes (jump target)
*/
static
void
TEMPLATE_gen_labels
(
ir_node
*
block
,
void
*
env
)
{
(
void
)
env
;
for
(
int
n
=
get_Block_n_cfgpreds
(
block
);
n
--
>
0
;
)
{
ir_node
*
pred
=
get_Block_cfgpred
(
block
,
n
);
set_irn_link
(
pred
,
block
);
}
}
void
TEMPLATE_emit_function
(
ir_graph
*
irg
)
{
/* register all emitter functions */
...
...
@@ -247,7 +235,8 @@ void TEMPLATE_emit_function(ir_graph *irg)
/* populate jump link fields with their destinations */
ir_reserve_resources
(
irg
,
IR_RESOURCE_IRN_LINK
);
irg_block_walk_graph
(
irg
,
TEMPLATE_gen_labels
,
NULL
,
NULL
);
be_emit_init_cf_links
(
block_schedule
);
for
(
size_t
i
=
0
,
n
=
ARR_LEN
(
block_schedule
);
i
<
n
;
++
i
)
{
ir_node
*
block
=
block_schedule
[
i
];
...
...
ir/be/amd64/amd64_emitter.c
View file @
c82b415b
...
...
@@ -30,14 +30,6 @@
static
be_stack_layout_t
*
layout
;
/**
* Returns the target block for a control flow node.
*/
static
ir_node
*
get_cfop_target_block
(
const
ir_node
*
irn
)
{
return
(
ir_node
*
)
get_irn_link
(
irn
);
}
static
char
get_gp_size_suffix
(
amd64_insn_size_t
const
size
)
{
switch
(
size
)
{
...
...
@@ -545,7 +537,7 @@ end_of_mods:
}
case
'L'
:
{
ir_node
*
const
block
=
get_cfop_target
_block
(
node
);
ir_node
*
const
block
=
be_emit_
get_cfop_target
(
node
);
be_gas_emit_block_name
(
block
);
break
;
}
...
...
@@ -651,14 +643,6 @@ unknown:
va_end
(
ap
);
}
/**
* Returns the next block in a block schedule.
*/
static
ir_node
*
sched_next_block
(
const
ir_node
*
block
)
{
return
(
ir_node
*
)
get_irn_link
(
block
);
}
static
const
char
*
get_register_name_ir_mode
(
const
arch_register_t
*
reg
,
ir_mode
*
mode
)
{
...
...
@@ -768,14 +752,9 @@ static void emit_amd64_asm(const ir_node *node)
*/
static
void
emit_amd64_jmp
(
const
ir_node
*
node
)
{
ir_node
*
block
,
*
next_block
;
/* for now, the code works for scheduled and non-schedules blocks */
block
=
get_nodes_block
(
node
);
/* we have a block schedule */
next_block
=
sched_next_block
(
block
);
if
(
get_cfop_target_block
(
node
)
!=
next_block
)
{
ir_node
const
*
const
block
=
get_nodes_block
(
node
);
ir_node
const
*
const
target
=
be_emit_get_cfop_target
(
node
);
if
(
be_emit_get_prev_block
(
target
)
!=
block
)
{
amd64_emitf
(
node
,
"jmp %L"
);
}
else
if
(
be_options
.
verbose_asm
)
{
amd64_emitf
(
node
,
"/* fallthrough to %L */"
);
...
...
@@ -785,7 +764,7 @@ static void emit_amd64_jmp(const ir_node *node)
static
void
emit_jumptable_target
(
ir_entity
const
*
const
table
,
ir_node
const
*
const
proj_x
)
{
ir_node
const
*
const
block
=
get_cfop_target
_block
(
proj_x
);
ir_node
const
*
const
block
=
be_emit_
get_cfop_target
(
proj_x
);
be_gas_emit_block_name
(
block
);
if
(
be_options
.
pic_style
!=
BE_PIC_NONE
)
{
be_emit_char
(
'-'
);
...
...
@@ -822,8 +801,6 @@ static void emit_amd64_jcc(const ir_node *irn)
{
const
ir_node
*
proj_true
=
NULL
;
const
ir_node
*
proj_false
=
NULL
;
const
ir_node
*
block
;
const
ir_node
*
next_block
;
const
ir_node
*
flags
=
get_irn_n
(
irn
,
n_amd64_jcc_eflags
);
const
amd64_cc_attr_t
*
attr
=
get_amd64_cc_attr_const
(
irn
);
x86_condition_code_t
cc
=
determine_final_cc
(
flags
,
attr
->
cc
);
...
...
@@ -838,13 +815,9 @@ static void emit_amd64_jcc(const ir_node *irn)
}
}
/* for now, the code works for scheduled and non-schedules blocks */
block
=
get_nodes_block
(
irn
);
/* we have a block schedule */
next_block
=
sched_next_block
(
block
);
if
(
get_cfop_target_block
(
proj_true
)
==
next_block
)
{
ir_node
const
*
const
block
=
get_nodes_block
(
irn
);
ir_node
const
*
const
true_target
=
be_emit_get_cfop_target
(
proj_true
);
if
(
be_emit_get_prev_block
(
true_target
)
==
block
)
{
/* exchange both proj's so the second one can be omitted */
const
ir_node
*
t
=
proj_true
;
...
...
@@ -866,7 +839,8 @@ static void emit_amd64_jcc(const ir_node *irn)
/* emit the true proj */
amd64_emitf
(
proj_true
,
"j%PX %L"
,
(
int
)
cc
);
if
(
get_cfop_target_block
(
proj_false
)
==
next_block
)
{
ir_node
const
*
const
false_target
=
be_emit_get_cfop_target
(
proj_false
);
if
(
be_emit_get_prev_block
(
false_target
)
==
block
)
{
if
(
be_options
.
verbose_asm
)
amd64_emitf
(
proj_false
,
"/* fallthrough to %L */"
);
}
else
{
...
...
@@ -983,47 +957,24 @@ static void amd64_gen_block(ir_node *block)
}
}
/**
* Sets labels for control flow nodes (jump target)
* TODO: Jump optimization
*/
static
void
amd64_gen_labels
(
ir_node
*
block
,
void
*
env
)
{
(
void
)
env
;
for
(
int
n
=
get_Block_n_cfgpreds
(
block
);
n
--
!=
0
;)
{
ir_node
*
const
pred
=
get_Block_cfgpred
(
block
,
n
);
set_irn_link
(
pred
,
block
);
}
}
void
amd64_emit_function
(
ir_graph
*
irg
)
{
ir_entity
*
entity
=
get_irg_entity
(
irg
);
ir_node
**
blk_sched
;
size_t
i
,
n
;
layout
=
be_get_irg_stack_layout
(
irg
);
/* register all emitter functions */
amd64_register_emitters
();
blk_sched
=
be_create_block_schedule
(
irg
);
ir_node
**
blk_sched
=
be_create_block_schedule
(
irg
);
be_gas_emit_function_prolog
(
entity
,
4
,
NULL
);
ir_reserve_resources
(
irg
,
IR_RESOURCE_IRN_LINK
);
irg_block_walk_graph
(
irg
,
amd64_gen_labels
,
NULL
,
NULL
);
n
=
ARR_LEN
(
blk_sched
);
for
(
i
=
0
;
i
<
n
;
i
++
)
{
ir_node
*
block
=
blk_sched
[
i
];
ir_node
*
next
=
(
i
+
1
)
<
n
?
blk_sched
[
i
+
1
]
:
NULL
;
set_irn_link
(
block
,
next
);
}
be_emit_init_cf_links
(
blk_sched
);
for
(
i
=
0
;
i
<
n
;
++
i
)
{
for
(
size_t
i
=
0
,
n
=
ARR_LEN
(
blk_sched
)
;
i
<
n
;
++
i
)
{
ir_node
*
block
=
blk_sched
[
i
];
amd64_gen_block
(
block
);
}
...
...
ir/be/arm/arm_emitter.c
View file @
c82b415b
...
...
@@ -220,20 +220,12 @@ static void emit_constant_name(const ent_or_tv_t *entry)
be_emit_irprintf
(
"%sC%u"
,
be_gas_get_private_prefix
(),
entry
->
label
);
}
/**
* Returns the target block for a control flow node.
*/
static
ir_node
*
get_cfop_target_block
(
const
ir_node
*
irn
)
{
return
(
ir_node
*
)
get_irn_link
(
irn
);
}
/**
* Emit the target label for a control flow node.
*/
static
void
arm_emit_cfop_target
(
const
ir_node
*
irn
)
{
ir_node
*
block
=
get_cfop_target
_block
(
irn
);
ir_node
*
block
=
be_emit_
get_cfop_target
(
irn
);
be_gas_emit_block_name
(
block
);
}
...
...
@@ -417,14 +409,6 @@ static void emit_arm_fConst(const ir_node *irn)
arm_emitf
(
irn
,
"ldf%m %D0, %C"
,
mode
,
entry
);
}
/**
* Returns the next block in a block schedule.
*/
static
ir_node
*
sched_next_block
(
const
ir_node
*
block
)
{
return
(
ir_node
*
)
get_irn_link
(
block
);
}
/**
* Emit a Compare with conditional branch.
*/
...
...
@@ -451,14 +435,12 @@ static void emit_arm_B(const ir_node *irn)
if
(
cmp_attr
->
ins_permuted
)
relation
=
get_inversed_relation
(
relation
);
/* for now, the code works for scheduled and non-schedules blocks */
const
ir_node
*
block
=
get_nodes_block
(
irn
);
const
ir_node
*
next_block
=
sched_next_block
(
block
);
assert
(
relation
!=
ir_relation_false
);
assert
(
relation
!=
ir_relation_true
);
if
(
get_cfop_target_block
(
proj_true
)
==
next_block
)
{
ir_node
const
*
const
block
=
get_nodes_block
(
irn
);
ir_node
const
*
const
true_target
=
be_emit_get_cfop_target
(
proj_true
);
if
(
be_emit_get_prev_block
(
true_target
)
==
block
)
{
/* exchange both proj's so the second one can be omitted */
const
ir_node
*
t
=
proj_true
;
...
...
@@ -483,7 +465,8 @@ static void emit_arm_B(const ir_node *irn)
/* emit the true proj */
arm_emitf
(
irn
,
"b%s %t"
,
suffix
,
proj_true
);
if
(
get_cfop_target_block
(
proj_false
)
==
next_block
)
{
ir_node
const
*
const
false_target
=
be_emit_get_cfop_target
(
proj_false
);
if
(
be_emit_get_prev_block
(
false_target
)
==
block
)
{
if
(
be_options
.
verbose_asm
)
{
arm_emitf
(
irn
,
"/* fallthrough to %t */"
,
proj_false
);
}
...
...
@@ -583,14 +566,12 @@ static void emit_be_MemPerm(const ir_node *node)
static
void
emit_arm_Jmp
(
const
ir_node
*
node
)
{
/* for now, the code works for scheduled and non-schedules blocks */
const
ir_node
*
block
=
get_nodes_block
(
node
);
const
ir_node
*
next_block
=
sched_next_block
(
block
);
if
(
get_cfop_target_block
(
node
)
!=
next_
block
)
{
ir_node
const
*
const
block
=
get_nodes_block
(
node
);
ir_node
const
*
const
target
=
be_emit_get_cfop_target
(
node
);
if
(
be_emit_get_prev_block
(
target
)
!=
block
)
{
arm_emitf
(
node
,
"b %t"
,
node
);
}
else
{
if
(
be_options
.
verbose_asm
)
{
arm_emitf
(
node
,
"/* fallthrough to %t */"
,
node
);
}
}
else
if
(
be_options
.
verbose_asm
)
{
arm_emitf
(
node
,
"/* fallthrough to %t */"
,
node
);
}
}
...
...
@@ -622,7 +603,7 @@ static void arm_register_emitters(void)
/**
* emit the block label if needed.
*/
static
void
arm_emit_block_header
(
ir_node
*
block
,
ir_node
*
prev
)
static
void
arm_emit_block_header
(
ir_node
*
block
)
{
int
n_cfgpreds
=
get_Block_n_cfgpreds
(
block
);
bool
need_label
;
...
...
@@ -633,7 +614,7 @@ static void arm_emit_block_header(ir_node *block, ir_node *prev)
/* we don't need labels for fallthrough blocks, however switch-jmps
* are no fallthroughs */
need_label
=
pred_block
!=
prev
||
pred_block
!=
be_emit_get_prev_block
(
block
)
||
(
is_Proj
(
pred
)
&&
is_arm_SwitchJmp
(
get_Proj_pred
(
pred
)));
}
else
{
need_label
=
true
;
...
...
@@ -646,28 +627,15 @@ static void arm_emit_block_header(ir_node *block, ir_node *prev)
* Walks over the nodes in a block connected by scheduling edges
* and emits code for each node.
*/
static
void
arm_gen_block
(
ir_node
*
block
,
ir_node
*
prev_block
)
static
void
arm_gen_block
(
ir_node
*
block
)
{
arm_emit_block_header
(
block
,
prev_block
);
arm_emit_block_header
(
block
);
be_dwarf_location
(
get_irn_dbg_info
(
block
));
sched_foreach
(
block
,
irn
)
{
be_emit_node
(
irn
);
}
}
/**
* Block-walker:
* Sets labels for control flow nodes (jump target)
*/
static
void
arm_gen_labels
(
ir_node
*
block
,
void
*
env
)
{
(
void
)
env
;
for
(
int
n
=
get_Block_n_cfgpreds
(
block
);
n
--
>
0
;
)
{
ir_node
*
pred
=
get_Block_cfgpred
(
block
,
n
);
set_irn_link
(
pred
,
block
);
}
}
static
parameter_dbg_info_t
*
construct_parameter_infos
(
ir_graph
*
irg
)
{
ir_entity
*
entity
=
get_irg_entity
(
irg
);
...
...
@@ -708,17 +676,12 @@ void arm_emit_function(ir_graph *irg)
be_gas_emit_function_prolog
(
entity
,
4
,
infos
);
ir_reserve_resources
(
irg
,
IR_RESOURCE_IRN_LINK
);
irg_block_walk_graph
(
irg
,
arm_gen_labels
,
NULL
,
NULL
);
ir_node
*
last_block
=
NULL
;
for
(
size_t
i
=
0
,
n
=
ARR_LEN
(
blk_sched
);
i
<
n
;)
{
ir_node
*
block
=
blk_sched
[
i
++
];
ir_node
*
next_bl
=
i
<
n
?
blk_sched
[
i
]
:
NULL
;
be_emit_init_cf_links
(
blk_sched
);
/* set here the link. the emitter expects to find the next block here */
set_irn_link
(
block
,
next_bl
);
arm_gen_block
(
block
,
last_block
);
last_block
=
block
;
for
(
size_t
i
=
0
,
n
=
ARR_LEN
(
blk_sched
);
i
<
n
;)
{
ir_node
*
block
=
blk_sched
[
i
++
];
arm_gen_block
(
block
);
}
ir_free_resources
(
irg
,
IR_RESOURCE_IRN_LINK
);
...
...
ir/be/beemithlp.c
View file @
c82b415b
...
...
@@ -50,3 +50,24 @@ void be_emit_pad_comment(void)
/* 34 spaces */
be_emit_string_len
(
" "
,
34
-
col
);
}
void
be_emit_init_cf_links
(
ir_node
**
const
block_schedule
)
{
ir_graph
*
const
irg
=
get_irn_irg
(
block_schedule
[
0
]);
assert
(
ir_resources_reserved
(
irg
)
&
IR_RESOURCE_IRN_LINK
);
ir_node
*
prev
=
NULL
;
for
(
size_t
i
=
0
,
n
=
ARR_LEN
(
block_schedule
);
i
<
n
;
++
i
)
{
ir_node
*
const
block
=
block_schedule
[
i
];
/* Initialize cfop link */
for
(
unsigned
n
=
get_Block_n_cfgpreds
(
block
);
n
--
>
0
;
)
{
ir_node
*
pred
=
get_Block_cfgpred
(
block
,
n
);
set_irn_link
(
pred
,
block
);
}
/* initialize pred block links */
set_irn_link
(
block
,
prev
);
prev
=
block
;
}
}
ir/be/beemithlp.h
View file @
c82b415b
...
...
@@ -14,8 +14,10 @@
#ifndef FIRM_BE_BEEMITHLP_H
#define FIRM_BE_BEEMITHLP_H
#include <assert.h>
#include "be.h"
#include "irop_t.h"
#include "irnode_t.h"
/**
* Emit spaces until the comment position is reached.
...
...
@@ -41,4 +43,32 @@ void be_emit_nothing(ir_node const *node);
*/
void
be_emit_node
(
ir_node
const
*
node
);
/**
* Set irn links of blocks to point to the predecessor blocks in the given
* blockschedule and set irn_links of mode_X nodes to the block using them.
* This function expects that you require the IR_RESOURCE_IRN_LINK prior
* to using it.
*/
void
be_emit_init_cf_links
(
ir_node
**
block_schedule
);
/**
* Returns the target block for a control flow node.
* Requires a prior call to be_emit_init_cf_links().
*/
static
inline
ir_node
*
be_emit_get_cfop_target
(
ir_node
const
*
const
irn
)
{
assert
(
get_irn_mode
(
irn
)
==
mode_X
);
return
(
ir_node
*
)
get_irn_link
(
irn
);
}
/**
* Returns the previous block in the block schedule.
* Requires a prior call to be_emit_get_cfop_target().
*/
static
inline
ir_node
*
be_emit_get_prev_block
(
ir_node
const
*
const
block
)
{
assert
(
is_Block
(
block
));
return
(
ir_node
*
)
get_irn_link
(
block
);
}
#endif
ir/be/ia32/ia32_emitter.c
View file @
c82b415b
...
...
@@ -74,12 +74,6 @@ typedef enum get_ip_style_t {
static
int
get_ip_style
=
IA32_GET_IP_THUNK
;
/** Return the next block in Block schedule */
static
ir_node
*
get_prev_block_sched
(
const
ir_node
*
block
)
{
return
(
ir_node
*
)
get_irn_link
(
block
);
}
/** Checks if the current block is a fall-through target. */
static
bool
is_fallthrough
(
const
ir_node
*
cfgpred
)
{
...
...
@@ -100,20 +94,18 @@ static bool block_needs_label(const ir_node *block)
if
(
get_Block_entity
(
block
)
!=
NULL
)
return
true
;
int
n_cfgpreds
=
get_Block_n_cfgpreds
(
block
);
bool
need_label
=
true
;
int
n_cfgpreds
=
get_Block_n_cfgpreds
(
block
);
if
(
n_cfgpreds
==
0
)
{
need_label
=
0
;
return
false
;
}
else
if
(
n_cfgpreds
==
1
)
{
ir_node
*
cfgpred
=
get_Block_cfgpred
(
block
,
0
);
ir_node
*
cfgpred_block
=
get_nodes_block
(
cfgpred
);
if
(
get_prev_block_sched
(
block
)
==
cfgpred_block
&&
is_fallthrough
(
cfgpred
))
{
need_label
=
0
;
}
if
(
!
is_fallthrough
(
cfgpred
))
return
true
;
return
be_emit_get_prev_block
(
block
)
!=
cfgpred_block
;
}
else
{
return
true
;
}
return
need_label
;
}
/**
...
...
@@ -302,21 +294,12 @@ static void ia32_emit_xmm_mode_suffix(ir_node const *const node)
be_emit_char
(
get_xmm_mode_suffix
(
mode
));
}
/**
* Returns the target block for a control flow node.
*/
static
ir_node
*
get_cfop_target_block
(
const
ir_node
*
irn
)
{
assert
(
get_irn_mode
(
irn
)
==
mode_X
);
return
(
ir_node
*
)
get_irn_link
(
irn
);
}
/**
* Emits the target label for a control flow node.
*/
static
void
ia32_emit_cfop_target
(
const
ir_node
*
node
)
{
ir_node
*
block
=
get_cfop_target
_block
(
node
);
ir_node
*
block
=
be_emit_
get_cfop_target
(
node
);
be_gas_emit_block_name
(
block
);
}
...
...
@@ -794,7 +777,7 @@ static void ia32_emit_exc_label(const ir_node *node)
static
bool
fallthrough_possible
(
const
ir_node
*
block
,
const
ir_node
*
target
)
{
return
get_prev_block
_sched
(
target
)
==
block
;
return
be_emit_
get_prev_block
(
target
)
==
block
;
}
/**
...
...
@@ -806,7 +789,7 @@ static void emit_ia32_Jcc(const ir_node *node)
/* get both Projs */
ir_node
const
*
proj_true
=
get_Proj_for_pn
(
node
,
pn_ia32_Jcc_true
);
ir_node
const
*
target_true
=
get_cfop_target
_block
(
proj_true
);
ir_node
const
*
target_true
=
be_emit_
get_cfop_target
(
proj_true
);
ir_node
const
*
proj_false
=
get_Proj_for_pn
(
node
,
pn_ia32_Jcc_false
);
ir_node
const
*
block
=
get_nodes_block
(
node
);
if
(
fallthrough_possible
(
block
,
target_true
))
{
...
...
@@ -817,7 +800,7 @@ static void emit_ia32_Jcc(const ir_node *node)
proj_false
=
t
;
cc
=
x86_negate_condition_code
(
cc
);
}
const
ir_node
*
target_false
=
get_cfop_target
_block
(
proj_false
);
const
ir_node
*
target_false
=
be_emit_
get_cfop_target
(
proj_false
);
bool
fallthrough
=
fallthrough_possible
(
block
,
target_false
);
/* if we can't have a fallthrough anyway, put the more likely case first */
if
(
!
fallthrough
)
{
...
...
@@ -893,7 +876,7 @@ static void emit_jumptable_target(ir_entity const *const table,
ir_node
const
*
const
proj_x
)
{
(
void
)
table
;
ir_node
const
*
const
block
=
get_cfop_target
_block
(
proj_x
);
ir_node
const
*
const
block
=
be_emit_
get_cfop_target
(
proj_x
);
be_gas_emit_block_name
(
block
);
switch
(
be_options
.
pic_style
)
{
case
BE_PIC_NONE
:
...
...
@@ -929,7 +912,7 @@ static void emit_ia32_Jmp(const ir_node *node)
{
/* we have a block schedule */
ir_node
*
block
=
get_nodes_block
(
node
);
ir_node
*
target
=
get_cfop_target
_block
(
node
);
ir_node
*
target
=
be_emit_
get_cfop_target
(
node
);
if
(
fallthrough_possible
(
block
,
target
))
{
if
(
be_options
.
verbose_asm
)
ia32_emitf
(
node
,
"/* fallthrough to %L */"
);
...
...
@@ -1456,7 +1439,7 @@ static void ia32_emit_align_label(void)
static
bool
should_align_block
(
const
ir_node
*
block
)
{
static
const
double
DELTA
=
.
0001
;
ir_node
*
prev
=
get_prev_block
_sched
(
block
);
ir_node
*
prev
=
be_emit_
get_prev_block
(
block
);
double
prev_freq
=
0
;
/**< execfreq of the fallthrough block */
double
jmp_freq
=
0
;
/**< execfreq of all non-fallthrough blocks */
...
...
@@ -1572,7 +1555,6 @@ static void ia32_gen_labels(ir_node *block, void *data)
exc_entry
**
exc_list
=
(
exc_entry
**
)
data
;
for
(
unsigned
n
=
get_Block_n_cfgpreds
(
block
);
n
--
>
0
;
)
{
ir_node
*
pred
=
get_Block_cfgpred
(
block
,
n
);
set_irn_link
(
pred
,
block
);
pred
=
skip_Proj
(
pred
);
if
(
is_ia32_irn
(
pred
)
&&
get_ia32_exc_label
(
pred
)
&&
exc_list
!=
NULL
)
{
...
...
@@ -1633,15 +1615,9 @@ static void emit_function_text(ir_graph *const irg, exc_entry **const exc_list)
ir_reserve_resources
(
irg
,
IR_RESOURCE_IRN_LINK
);
irg_block_walk_graph
(
irg
,
ia32_gen_labels
,
NULL
,
exc_list
);
/* initialize next block links */
size_t
const
n
=
ARR_LEN
(
blk_sched
);
for
(
size_t
i
=
0
;
i
<
n
;
++
i
)
{
ir_node
*
const
block
=
blk_sched
[
i
];
ir_node
*
const
prev
=
i
>
0
?
blk_sched
[
i
-
1
]
:
NULL
;
set_irn_link
(
block
,
prev
);
}
be_emit_init_cf_links
(
blk_sched
);
for
(
size_t
i
=
0
;
i
<
n
;
++
i
)
{
for
(
size_t
i
=
0
,
n
=
ARR_LEN
(
blk_sched
)
;
i
<
n
;
++
i
)
{
ir_node
*
const
block
=
blk_sched
[
i
];
ia32_gen_block
(
block
);
}
...
...
@@ -1737,7 +1713,7 @@ static void bemit_relocation(x86_imm32_t const *const imm)
static
void
bemit_jmp_destination
(
ir_node
const
*
const
cfop
)
{
assert
(
get_irn_mode
(
cfop
)
==
mode_X
);
ir_node
const
*
const
dest_block
=
get_cfop_target
_block
(
cfop
);
ir_node
const
*
const
dest_block
=
be_emit_
get_cfop_target
(
cfop
);
unsigned
const
fragment_num
=
PTR_TO_INT
(
ir_nodehashmap_get
(
void
,
&
block_fragmentnum
,
dest_block
));
be_emit_reloc_fragment
(
4
,
BEMIT_RELOCATION_RELJUMP
,
fragment_num
,
-
4
);
...
...
@@ -2775,7 +2751,7 @@ static void bemit_jmp(ir_node const *const cfop)
static
void
bemit_jump
(
const
ir_node
*
node
)
{
ir_node
*
block
=
get_nodes_block
(
node
);
ir_node
*
target
=
get_cfop_target
_block
(
node
);
ir_node
*
target
=
be_emit_
get_cfop_target
(
node
);
if
(
fallthrough_possible
(
block
,
target
))
return
;
...
...
@@ -2803,7 +2779,7 @@ static void bemit_ia32_jcc(const ir_node *node)
/* get both Projs */
ir_node
const
*
proj_true
=
get_Proj_for_pn
(
node
,
pn_ia32_Jcc_true
);
ir_node
const
*
target_true
=
get_cfop_target
_block
(
proj_true
);
ir_node
const
*
target_true
=
be_emit_
get_cfop_target
(
proj_true
);
ir_node
const
*
proj_false
=
get_Proj_for_pn
(
node
,
pn_ia32_Jcc_false
);
ir_node
const
*
block
=
get_nodes_block
(
node
);
if
(
fallthrough_possible
(
block
,
target_true
))
{
...
...
@@ -2815,7 +2791,7 @@ static void bemit_ia32_jcc(const ir_node *node)
cc
=
x86_negate_condition_code
(
cc
);
}
ir_node
const
*
target_false
=
get_cfop_target
_block
(
proj_false
);
ir_node
const
*
target_false
=
be_emit_
get_cfop_target
(
proj_false
);
bool
const
fallthrough
=
fallthrough_possible
(
block
,
target_false
);
/* if we can't have a fallthrough anyway, put the more likely case first */
if
(
!
fallthrough
)
{
...
...
@@ -3324,14 +3300,12 @@ ir_jit_function_t *ia32_emit_jit(ir_jit_segment_t *const segment,
ir_reserve_resources
(
irg
,
IR_RESOURCE_IRN_LINK
);
irg_block_walk_graph
(
irg
,
ia32_gen_labels
,
NULL
,
NULL
);
/* initialize next block links */
be_emit_init_cf_links
(
blk_sched
);
ir_nodehashmap_init
(
&
block_fragmentnum
);
size_t
n
=
ARR_LEN
(
blk_sched
);
for
(
size_t
i
=
0
;
i
<
n
;
++
i
)
{
ir_node
*
block
=
blk_sched
[
i
];
ir_node
*
prev
=
i
>
0
?
blk_sched
[
i
-
1
]
:
NULL
;
set_irn_link
(
block
,
prev
);
assign_block_fragment_num
(
block
,
(
unsigned
)
i
);
}
for
(
size_t
i
=
0
;
i
<
n
;
++
i
)
{
...
...
ir/be/sparc/sparc_emitter.c
View file @
c82b415b
...
...
@@ -190,22 +190,12 @@ static void emit_fp_suffix(const ir_mode *mode)
}
}
static
void
set_jump_target
(
ir_node
*
jump
,
ir_node
*
target
)
{
set_irn_link
(
jump
,
target
);
}
static
ir_node
*
get_jump_target
(
const
ir_node
*
jump
)
{
return
(
ir_node
*
)
get_irn_link
(
jump
);
}
/**
* Returns the target label for a control flow node.
*/
static
void
sparc_emit_cfop_target
(
const
ir_node
*
node
)
{
ir_node
*
block
=
get_
jum
p_target
(
node
);