Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
Zwinkau
libfirm
Commits
eb38e825
Commit
eb38e825
authored
Dec 04, 2012
by
Matthias Braun
Browse files
remove Bound node
parent
5d8892b2
Changes
5
Hide whitespace changes
Inline
Side-by-side
ir/ir/irop.c
View file @
eb38e825
...
@@ -387,11 +387,6 @@ static int node_cmp_attr_CopyB(const ir_node *a, const ir_node *b)
...
@@ -387,11 +387,6 @@ static int node_cmp_attr_CopyB(const ir_node *a, const ir_node *b)
return
node_cmp_exception
(
a
,
b
);
return
node_cmp_exception
(
a
,
b
);
}
}
static
int
node_cmp_attr_Bound
(
const
ir_node
*
a
,
const
ir_node
*
b
)
{
return
node_cmp_exception
(
a
,
b
);
}
/** Compares the attributes of two Div nodes. */
/** Compares the attributes of two Div nodes. */
static
int
node_cmp_attr_Div
(
const
ir_node
*
a
,
const
ir_node
*
b
)
static
int
node_cmp_attr_Div
(
const
ir_node
*
a
,
const
ir_node
*
b
)
{
{
...
@@ -596,7 +591,6 @@ void firm_init_op(void)
...
@@ -596,7 +591,6 @@ void firm_init_op(void)
register_node_cmp_func
(
op_ASM
,
node_cmp_attr_ASM
);
register_node_cmp_func
(
op_ASM
,
node_cmp_attr_ASM
);
register_node_cmp_func
(
op_Alloc
,
node_cmp_attr_Alloc
);
register_node_cmp_func
(
op_Alloc
,
node_cmp_attr_Alloc
);
register_node_cmp_func
(
op_Bound
,
node_cmp_attr_Bound
);
register_node_cmp_func
(
op_Builtin
,
node_cmp_attr_Builtin
);
register_node_cmp_func
(
op_Builtin
,
node_cmp_attr_Builtin
);
register_node_cmp_func
(
op_Call
,
node_cmp_attr_Call
);
register_node_cmp_func
(
op_Call
,
node_cmp_attr_Call
);
register_node_cmp_func
(
op_Cast
,
node_cmp_attr_Cast
);
register_node_cmp_func
(
op_Cast
,
node_cmp_attr_Cast
);
...
...
ir/ir/iropt.c
View file @
eb38e825
...
@@ -1193,56 +1193,6 @@ static ir_node *equivalent_node_Proj_CopyB(ir_node *proj)
...
@@ -1193,56 +1193,6 @@ static ir_node *equivalent_node_Proj_CopyB(ir_node *proj)
return
proj
;
return
proj
;
}
}
/**
* Optimize Bounds(idx, idx, upper) into idx.
*/
static
ir_node
*
equivalent_node_Proj_Bound
(
ir_node
*
proj
)
{
ir_node
*
oldn
=
proj
;
ir_node
*
bound
=
get_Proj_pred
(
proj
);
ir_node
*
idx
=
get_Bound_index
(
bound
);
ir_node
*
pred
=
skip_Proj
(
idx
);
int
ret_tuple
=
0
;
if
(
idx
==
get_Bound_lower
(
bound
))
ret_tuple
=
1
;
else
if
(
is_Bound
(
pred
))
{
/*
* idx was Bounds checked previously, it is still valid if
* lower <= pred_lower && pred_upper <= upper.
*/
ir_node
*
lower
=
get_Bound_lower
(
bound
);
ir_node
*
upper
=
get_Bound_upper
(
bound
);
if
(
get_Bound_lower
(
pred
)
==
lower
&&
get_Bound_upper
(
pred
)
==
upper
)
{
/*
* One could expect that we simply return the previous
* Bound here. However, this would be wrong, as we could
* add an exception Proj to a new location then.
* So, we must turn in into a tuple.
*/
ret_tuple
=
1
;
}
}
if
(
ret_tuple
)
{
/* Turn Bound into a tuple (mem, jmp, bad, idx) */
switch
(
get_Proj_proj
(
proj
))
{
case
pn_Bound_M
:
DBG_OPT_EXC_REM
(
proj
);
proj
=
get_Bound_mem
(
bound
);
break
;
case
pn_Bound_res
:
proj
=
idx
;
DBG_OPT_ALGSIM0
(
oldn
,
proj
,
FS_OPT_NOP
);
break
;
default:
/* cannot optimize pn_Bound_X_regular, handled in transform ... */
break
;
}
}
return
proj
;
}
/**
/**
* Does all optimizations on nodes that must be done on its Projs
* Does all optimizations on nodes that must be done on its Projs
* because of creating new nodes.
* because of creating new nodes.
...
@@ -4763,63 +4713,6 @@ static ir_node *transform_node_Proj_CopyB(ir_node *proj)
...
@@ -4763,63 +4713,6 @@ static ir_node *transform_node_Proj_CopyB(ir_node *proj)
return
proj
;
return
proj
;
}
}
/**
* Optimize Bounds(idx, idx, upper) into idx.
*/
static
ir_node
*
transform_node_Proj_Bound
(
ir_node
*
proj
)
{
ir_node
*
oldn
=
proj
;
ir_node
*
bound
=
get_Proj_pred
(
proj
);
ir_node
*
idx
=
get_Bound_index
(
bound
);
ir_node
*
pred
=
skip_Proj
(
idx
);
int
ret_tuple
=
0
;
if
(
idx
==
get_Bound_lower
(
bound
))
ret_tuple
=
1
;
else
if
(
is_Bound
(
pred
))
{
/*
* idx was Bounds checked previously, it is still valid if
* lower <= pred_lower && pred_upper <= upper.
*/
ir_node
*
lower
=
get_Bound_lower
(
bound
);
ir_node
*
upper
=
get_Bound_upper
(
bound
);
if
(
get_Bound_lower
(
pred
)
==
lower
&&
get_Bound_upper
(
pred
)
==
upper
)
{
/*
* One could expect that we simply return the previous
* Bound here. However, this would be wrong, as we could
* add an exception Proj to a new location then.
* So, we must turn in into a tuple.
*/
ret_tuple
=
1
;
}
}
if
(
ret_tuple
)
{
/* Turn Bound into a tuple (mem, jmp, bad, idx) */
switch
(
get_Proj_proj
(
proj
))
{
case
pn_Bound_M
:
DBG_OPT_EXC_REM
(
proj
);
proj
=
get_Bound_mem
(
bound
);
break
;
case
pn_Bound_X_except
:
DBG_OPT_EXC_REM
(
proj
);
proj
=
new_r_Bad
(
get_irn_irg
(
proj
),
mode_X
);
break
;
case
pn_Bound_res
:
proj
=
idx
;
DBG_OPT_ALGSIM0
(
oldn
,
proj
,
FS_OPT_NOP
);
break
;
case
pn_Bound_X_regular
:
DBG_OPT_EXC_REM
(
proj
);
proj
=
new_r_Jmp
(
get_nodes_block
(
bound
));
break
;
default:
break
;
}
}
return
proj
;
}
/**
/**
* Does all optimizations on nodes that must be done on its Projs
* Does all optimizations on nodes that must be done on its Projs
* because of creating new nodes.
* because of creating new nodes.
...
@@ -6285,7 +6178,6 @@ void ir_register_opt_node_ops(void)
...
@@ -6285,7 +6178,6 @@ void ir_register_opt_node_ops(void)
register_equivalent_node_func
(
op_Shr
,
equivalent_node_left_zero
);
register_equivalent_node_func
(
op_Shr
,
equivalent_node_left_zero
);
register_equivalent_node_func
(
op_Shrs
,
equivalent_node_left_zero
);
register_equivalent_node_func
(
op_Shrs
,
equivalent_node_left_zero
);
register_equivalent_node_func
(
op_Sub
,
equivalent_node_Sub
);
register_equivalent_node_func
(
op_Sub
,
equivalent_node_Sub
);
register_equivalent_node_func_proj
(
op_Bound
,
equivalent_node_Proj_Bound
);
register_equivalent_node_func_proj
(
op_CopyB
,
equivalent_node_Proj_CopyB
);
register_equivalent_node_func_proj
(
op_CopyB
,
equivalent_node_Proj_CopyB
);
register_equivalent_node_func_proj
(
op_Div
,
equivalent_node_Proj_Div
);
register_equivalent_node_func_proj
(
op_Div
,
equivalent_node_Proj_Div
);
register_equivalent_node_func_proj
(
op_Tuple
,
equivalent_node_Proj_Tuple
);
register_equivalent_node_func_proj
(
op_Tuple
,
equivalent_node_Proj_Tuple
);
...
@@ -6317,7 +6209,6 @@ void ir_register_opt_node_ops(void)
...
@@ -6317,7 +6209,6 @@ void ir_register_opt_node_ops(void)
register_transform_node_func
(
op_Sub
,
transform_node_Sub
);
register_transform_node_func
(
op_Sub
,
transform_node_Sub
);
register_transform_node_func
(
op_Switch
,
transform_node_Switch
);
register_transform_node_func
(
op_Switch
,
transform_node_Switch
);
register_transform_node_func
(
op_Sync
,
transform_node_Sync
);
register_transform_node_func
(
op_Sync
,
transform_node_Sync
);
register_transform_node_func_proj
(
op_Bound
,
transform_node_Proj_Bound
);
register_transform_node_func_proj
(
op_CopyB
,
transform_node_Proj_CopyB
);
register_transform_node_func_proj
(
op_CopyB
,
transform_node_Proj_CopyB
);
register_transform_node_func_proj
(
op_Div
,
transform_node_Proj_Div
);
register_transform_node_func_proj
(
op_Div
,
transform_node_Proj_Div
);
register_transform_node_func_proj
(
op_Load
,
transform_node_Proj_Load
);
register_transform_node_func_proj
(
op_Load
,
transform_node_Proj_Load
);
...
...
ir/ir/irtypes.h
View file @
eb38e825
...
@@ -325,11 +325,6 @@ typedef struct copyb_attr {
...
@@ -325,11 +325,6 @@ typedef struct copyb_attr {
ir_type
*
type
;
/**< Type of the copied entity. */
ir_type
*
type
;
/**< Type of the copied entity. */
}
copyb_attr
;
}
copyb_attr
;
/** Bound attribute. */
typedef
struct
bound_attr
{
except_attr
exc
;
/**< The exception attribute. MUST be the first one. */
}
bound_attr
;
/** Div attribute. */
/** Div attribute. */
typedef
struct
div_attr
{
typedef
struct
div_attr
{
except_attr
exc
;
/**< The exception attribute. MUST be the first one. */
except_attr
exc
;
/**< The exception attribute. MUST be the first one. */
...
@@ -387,7 +382,6 @@ typedef union ir_attr {
...
@@ -387,7 +382,6 @@ typedef union ir_attr {
confirm_attr
confirm
;
/**< For Confirm: compare operation and region. */
confirm_attr
confirm
;
/**< For Confirm: compare operation and region. */
except_attr
except
;
/**< For Phi node construction in case of exceptions */
except_attr
except
;
/**< For Phi node construction in case of exceptions */
copyb_attr
copyb
;
/**< For CopyB operation */
copyb_attr
copyb
;
/**< For CopyB operation */
bound_attr
bound
;
/**< For Bound operation */
div_attr
div
;
/**< For Div operation */
div_attr
div
;
/**< For Div operation */
mod_attr
mod
;
/**< For Mod operation */
mod_attr
mod
;
/**< For Mod operation */
asm_attr
assem
;
/**< For ASM operation. */
asm_attr
assem
;
/**< For ASM operation. */
...
...
ir/ir/irverify.c
View file @
eb38e825
...
@@ -663,28 +663,6 @@ static int verify_node_Proj_CopyB(const ir_node *p)
...
@@ -663,28 +663,6 @@ static int verify_node_Proj_CopyB(const ir_node *p)
return
1
;
return
1
;
}
}
/**
* verify a Proj(Bound) node
*/
static
int
verify_node_Proj_Bound
(
const
ir_node
*
p
)
{
ir_mode
*
mode
=
get_irn_mode
(
p
);
ir_node
*
n
=
get_Proj_pred
(
p
);
long
proj
=
get_Proj_proj
(
p
);
ASSERT_AND_RET_DBG
(
(
(
proj
==
pn_Bound_M
&&
mode
==
mode_M
)
||
(
proj
==
pn_Bound_X_regular
&&
mode
==
mode_X
)
||
(
proj
==
pn_Bound_X_except
&&
mode
==
mode_X
)
||
(
proj
==
pn_Bound_res
&&
mode
==
get_irn_mode
(
get_Bound_index
(
n
)))
),
"wrong Proj from Bound"
,
0
,
show_proj_failure
(
p
);
);
return
1
;
}
static
int
verify_node_Proj_fragile
(
const
ir_node
*
node
)
static
int
verify_node_Proj_fragile
(
const
ir_node
*
node
)
{
{
ir_node
*
pred
=
get_Proj_pred
(
node
);
ir_node
*
pred
=
get_Proj_pred
(
node
);
...
@@ -1613,28 +1591,6 @@ static int verify_node_CopyB(const ir_node *n)
...
@@ -1613,28 +1591,6 @@ static int verify_node_CopyB(const ir_node *n)
return
1
;
return
1
;
}
}
/**
* verify a Bound node
*/
static
int
verify_node_Bound
(
const
ir_node
*
n
)
{
ir_mode
*
mymode
=
get_irn_mode
(
n
);
ir_mode
*
op1mode
=
get_irn_mode
(
get_Bound_mem
(
n
));
ir_mode
*
op2mode
=
get_irn_mode
(
get_Bound_index
(
n
));
ir_mode
*
op3mode
=
get_irn_mode
(
get_Bound_lower
(
n
));
ir_mode
*
op4mode
=
get_irn_mode
(
get_Bound_upper
(
n
));
/* Bound: BB x M x int x int x int --> M x X */
ASSERT_AND_RET
(
mymode
==
mode_T
&&
op1mode
==
mode_M
&&
op2mode
==
op3mode
&&
op3mode
==
op4mode
&&
mode_is_int
(
op3mode
),
"Bound node"
,
0
);
return
1
;
}
/**
/**
* Check dominance.
* Check dominance.
* For each usage of a node, it is checked, if the block of the
* For each usage of a node, it is checked, if the block of the
...
@@ -2173,7 +2129,6 @@ void ir_register_verify_node_ops(void)
...
@@ -2173,7 +2129,6 @@ void ir_register_verify_node_ops(void)
register_verify_node_func
(
op_Alloc
,
verify_node_Alloc
);
register_verify_node_func
(
op_Alloc
,
verify_node_Alloc
);
register_verify_node_func
(
op_And
,
verify_node_And
);
register_verify_node_func
(
op_And
,
verify_node_And
);
register_verify_node_func
(
op_Block
,
verify_node_Block
);
register_verify_node_func
(
op_Block
,
verify_node_Block
);
register_verify_node_func
(
op_Bound
,
verify_node_Bound
);
register_verify_node_func
(
op_Call
,
verify_node_Call
);
register_verify_node_func
(
op_Call
,
verify_node_Call
);
register_verify_node_func
(
op_Cast
,
verify_node_Cast
);
register_verify_node_func
(
op_Cast
,
verify_node_Cast
);
register_verify_node_func
(
op_Cmp
,
verify_node_Cmp
);
register_verify_node_func
(
op_Cmp
,
verify_node_Cmp
);
...
@@ -2213,7 +2168,6 @@ void ir_register_verify_node_ops(void)
...
@@ -2213,7 +2168,6 @@ void ir_register_verify_node_ops(void)
register_verify_node_func
(
op_Sync
,
verify_node_Sync
);
register_verify_node_func
(
op_Sync
,
verify_node_Sync
);
register_verify_node_func_proj
(
op_Alloc
,
verify_node_Proj_Alloc
);
register_verify_node_func_proj
(
op_Alloc
,
verify_node_Proj_Alloc
);
register_verify_node_func_proj
(
op_Bound
,
verify_node_Proj_Bound
);
register_verify_node_func_proj
(
op_Call
,
verify_node_Proj_Call
);
register_verify_node_func_proj
(
op_Call
,
verify_node_Proj_Call
);
register_verify_node_func_proj
(
op_Cond
,
verify_node_Proj_Cond
);
register_verify_node_func_proj
(
op_Cond
,
verify_node_Proj_Cond
);
register_verify_node_func_proj
(
op_CopyB
,
verify_node_Proj_CopyB
);
register_verify_node_func_proj
(
op_CopyB
,
verify_node_Proj_CopyB
);
...
...
scripts/ir_spec.py
View file @
eb38e825
...
@@ -240,28 +240,6 @@ class Block:
...
@@ -240,28 +240,6 @@ class Block:
}
}
'''
'''
@
op
class
Bound
:
"""Performs a bounds-check: if lower <= index < upper then return index,
otherwise throw an exception."""
ins
=
[
(
"mem"
,
"memory dependency"
),
(
"index"
,
"value to test"
),
(
"lower"
,
"lower bound (inclusive)"
),
(
"upper"
,
"upper bound (exclusive)"
),
]
outs
=
[
(
"M"
,
"memory result"
),
(
"res"
,
"the checked index"
),
(
"X_regular"
,
"control flow when no exception occurs"
),
(
"X_except"
,
"control flow when exception occured"
),
]
flags
=
[
"fragile"
,
"highlevel"
]
pinned
=
"exception"
pinned_init
=
"op_pin_state_pinned"
throws_init
=
"false"
attr_struct
=
"bound_attr"
@
op
@
op
class
Builtin
:
class
Builtin
:
"""performs a backend-specific builtin."""
"""performs a backend-specific builtin."""
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment