Commit e080458f authored by Götz Lindenmaier's avatar Götz Lindenmaier
Browse files

Parted common.h into two files common_t.h. By this config.h

  as well as the preprocessor flags are no more visible externaly.
  Adapted include directives.
  Bug: dominator construction aborts for BreakTest with flag 3.31
  Problem: optimization of many subsequent blocks (Jmp-chains)
  where one is inlined (Tuple control flow) in a loop.  Not all
  nodes in merged blocks are moved to remaining block.
  Fix:  different calling order to optimize_in_place_2 from
  optimize_in_place_wrapper.
  Bug: dumping cfg when a block is Bad.  Fix: Output Bad node.
  Change: not only turn off cse but all optimizations during
  inlining.

[r336]
parent c8bd46a3
......@@ -19,7 +19,7 @@
# include "irnode_t.h"
# include "irmode_t.h"
# include "ircons.h"
# include "common.h"
# include "common_t.h"
# include "irvrfy.h"
# include "irop.h"
# include "iropt_t.h"
......@@ -28,7 +28,7 @@
/* memset belongs to string.h */
# include "string.h"
#if USE_EXPICIT_PHI_IN_STACK
#if USE_EXPLICIT_PHI_IN_STACK
/* A stack needed for the automatic Phi node construction in constructor
Phi_in. Redefinition in irgraph.c!! */
struct Phi_in_stack {
......@@ -714,7 +714,7 @@ new_r_Phi0 (ir_graph *irg, ir_node *block, ir_mode *mode)
new_r_Phi_in. The original implementation used the obstack
to model this stack, now it is explicit. This reduces side effects.
*/
#if USE_EXPICIT_PHI_IN_STACK
#if USE_EXPLICIT_PHI_IN_STACK
Phi_in_stack *
new_Phi_in_stack() {
Phi_in_stack *res;
......@@ -775,7 +775,7 @@ alloc_or_pop_from_Phi_in_stack(ir_graph *irg, ir_node *block, ir_mode *mode,
}
return res;
}
#endif /* USE_EXPICIT_PHI_IN_STACK */
#endif /* USE_EXPLICIT_PHI_IN_STACK */
/* Creates a Phi node with a given, fixed array **in of predecessors.
If the Phi node is unnecessary, as the same value reaches the block
......@@ -810,7 +810,7 @@ new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
the in array contains NULLs, there will be missing predecessors in the
returned node.
Is this a possible internal state of the Phi node generation? */
#if USE_EXPICIT_PHI_IN_STACK
#if USE_EXPLICIT_PHI_IN_STACK
res = known = alloc_or_pop_from_Phi_in_stack(irg, block, mode, ins, in);
#else
res = known = new_ir_node (irg, block, op_Phi, mode, ins, in);
......@@ -839,7 +839,7 @@ new_r_Phi_in (ir_graph *irg, ir_node *block, ir_mode *mode,
/* i==ins: there is at most one predecessor, we don't need a phi node. */
if (i==ins) {
#if USE_EXPICIT_PHI_IN_STACK
#if USE_EXPLICIT_PHI_IN_STACK
free_to_Phi_in_stack(res);
#else
obstack_free (current_ir_graph->obst, res);
......@@ -1022,7 +1022,7 @@ get_r_value_internal (ir_node *block, int pos, ir_mode *mode)
it starts the recursion. This causes an Id at the entry of
every block that has no definition of the value! **/
#if USE_EXPICIT_PHI_IN_STACK
#if USE_EXPLICIT_PHI_IN_STACK
/* Just dummies */
Phi_in_stack * new_Phi_in_stack() { return NULL; }
void free_Phi_in_stack(Phi_in_stack *s) { }
......
......@@ -27,7 +27,8 @@
# include "irgwalk.h"
# include "typewalk.h"
# include "irouts.h"
#include "irdom.h"
# include "irdom.h"
# include "common_t.h"
/* Attributes of nodes */
#define DEFAULT_NODE_ATTR ""
......@@ -1059,6 +1060,7 @@ dump_cfg (ir_graph *irg)
/* walk over the blocks in the graph */
irg_block_walk(irg->end, dump_block_to_cfg, NULL, NULL);
dump_ir_node (irg->bad);
dump_dominator_information_flag = ddif;
vcg_close();
......
......@@ -43,20 +43,17 @@ void init_link (ir_node *n, void *env) {
void
optimize_in_place_wrapper (ir_node *n, void *env) {
int start, i;
int i;
ir_node *optimized;
if (get_irn_op(n) == op_Block)
start = 0;
else
start = -1;
/* optimize all sons after recursion, i.e., the sons' sons are
optimized already. */
for (i = start; i < get_irn_arity(n); i++) {
for (i = 0; i < get_irn_arity(n); i++) {
optimized = optimize_in_place_2(get_irn_n(n, i));
set_irn_n(n, i, optimized);
assert(get_irn_op(optimized) != op_Id);
}
if (get_irn_op(n) == op_Block) {
optimized = optimize_in_place_2(n);
if (optimized != n) exchange (n, optimized);
}
}
......@@ -413,7 +410,10 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
int arity, n_ret, n_exc, n_res, i, j, rem_opt;
type *called_frame, *caller_frame;
if (!get_opt_inline()) return;
if (!get_optimize() || !get_opt_inline()) return;
/** Turn off optimizations, this can cause problems when allocating new nodes. **/
rem_opt = get_optimize();
set_optimize(0);
/* Handle graph state */
assert(get_irg_phase_state(current_ir_graph) != phase_building);
......@@ -428,9 +428,6 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
assert(get_type_tpop(get_Call_type(call)) == type_method);
if (called_graph == current_ir_graph) return;
/** Turn off cse, this can cause problems when allocating new nodes. **/
rem_opt = get_opt_cse();
set_opt_cse(0);
/** Part the Call node into two nodes. Pre_call collects the parameters of
the procedure and later replaces the Start node of the called graph.
......@@ -443,8 +440,7 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
in[1] = get_Call_mem(call);
in[2] = get_irg_frame(current_ir_graph);
in[3] = get_irg_globals(current_ir_graph);
in[4] = new_Tuple (get_Call_n_params(call),
get_Call_param_arr(call));
in[4] = new_Tuple (get_Call_n_params(call), get_Call_param_arr(call));
pre_call = new_Tuple(5, in);
post_call = call;
......@@ -461,10 +457,10 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
if (get_irg_block_visited(current_ir_graph)< get_irg_block_visited(called_graph))
set_irg_block_visited(current_ir_graph, get_irg_block_visited(called_graph));
/* Set pre_call as new Start node in link field of the start node of
calling graph and pre_calls block as new block for the start block
of calling graph.
Further mark these nodes so that they are not visited by the
copying. */
calling graph and pre_calls block as new block for the start block
of calling graph.
Further mark these nodes so that they are not visited by the
copying. */
set_irn_link(get_irg_start(called_graph), pre_call);
set_irn_visited(get_irg_start(called_graph),
get_irg_visited(current_ir_graph));/***/
......@@ -476,8 +472,8 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
/* Initialize for compaction of in arrays */
inc_irg_block_visited(current_ir_graph);
/*
set_Block_block_visited(get_irg_start_block(called_graph),
get_irg_block_visited(current_ir_graph) +1 +1); /* count for self edge */
set_Block_block_visited(get_irg_start_block(called_graph),
get_irg_block_visited(current_ir_graph) +1 +1); /* count for self edge */
/*** Replicate local entities of the called_graph ***/
/* copy the entities. */
......@@ -499,7 +495,7 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
entities. */
/* @@@ endless loops are not copied!! */
irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
get_irg_frame_type(called_graph));
get_irg_frame_type(called_graph));
/* Repair called_graph */
set_irg_visited(called_graph, get_irg_visited(current_ir_graph));
......@@ -568,11 +564,11 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
for (j = 0; j < n_res; j++) {
n_ret = 0;
for (i = 0; i < arity; i++) {
ret = get_irn_n(end_bl, i);
if (get_irn_op(ret) == op_Return) {
cf_pred[n_ret] = get_Return_res(ret, j);
n_ret++;
}
ret = get_irn_n(end_bl, i);
if (get_irn_op(ret) == op_Return) {
cf_pred[n_ret] = get_Return_res(ret, j);
n_ret++;
}
}
phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
res_pred[j] = phi;
......@@ -606,15 +602,15 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
ir_node *ret;
ret = skip_Proj(get_irn_n(end_bl, i));
if (get_irn_op(ret) == op_Call) {
cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
n_exc++;
cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
n_exc++;
} else if (is_fragile_op(ret)) {
/* We rely that all cfops have the memory output at the same position. */
cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
n_exc++;
/* We rely that all cfops have the memory output at the same position. */
cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
n_exc++;
} else if (get_irn_op(ret) == op_Raise) {
cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
n_exc++;
cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
n_exc++;
}
}
set_Tuple_pred(call, 3, new_Phi(n_exc, cf_pred, mode_M));
......@@ -638,9 +634,9 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
if (get_irn_op(cf_op) == op_Proj) {
cf_op = get_Proj_pred(cf_op);
if (get_irn_op(cf_op) == op_Tuple) {
cf_op = get_Tuple_pred(cf_op, 1);
assert(get_irn_op(cf_op) == op_Jmp);
break;
cf_op = get_Tuple_pred(cf_op, 1);
assert(get_irn_op(cf_op) == op_Jmp);
break;
}
}
}
......@@ -660,7 +656,7 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
}
/** Turn cse back on. **/
set_opt_cse(rem_opt);
set_optimize(rem_opt);
}
/********************************************************************/
......
......@@ -22,7 +22,7 @@
ir_graph *current_ir_graph;
#if USE_EXPICIT_PHI_IN_STACK
#if USE_EXPLICIT_PHI_IN_STACK
/* really defined in ircons.c */
typedef struct Phi_in_stack Phi_in_stack;
Phi_in_stack *new_Phi_in_stack();
......@@ -67,7 +67,7 @@ new_ir_graph (entity *ent, int n_loc)
res->visited = 0; /* visited flag, for the ir walker */
res->block_visited=0; /* visited flag, for the 'block'-walker */
#if USE_EXPICIT_PHI_IN_STACK
#if USE_EXPLICIT_PHI_IN_STACK
res->Phi_in_stack = new_Phi_in_stack(); /* A stack needed for automatic Phi
generation */
#endif
......@@ -134,7 +134,7 @@ ir_graph *new_const_code_irg() {
res->n_loc = 1; /* Only the memory. */
res->visited = 0; /* visited flag, for the ir walker */
res->block_visited=0; /* visited flag, for the 'block'-walker */
#if USE_EXPICIT_PHI_IN_STACK
#if USE_EXPLICIT_PHI_IN_STACK
res->Phi_in_stack = NULL;
#endif
res->obst = (struct obstack *) xmalloc (sizeof (struct obstack));
......@@ -175,7 +175,7 @@ ir_graph *new_const_code_irg() {
void free_ir_graph (ir_graph *irg) {
set_entity_irg(irg->ent, NULL);
free(irg->obst);
#if USE_EXPICIT_PHI_IN_STACK
#if USE_EXPLICIT_PHI_IN_STACK
free_Phi_in_stack(irg->Phi_in_stack);
#endif
free(irg);
......
......@@ -13,6 +13,7 @@
# include "obst.h"
# include "pset.h"
# include "irgraph.h"
# include "common_t.h"
#define FRAME_TP_SUFFIX "frame_tp"
......@@ -46,7 +47,7 @@ struct ir_graph {
irg_dom_state dom_state; /* Dominator information */
/** Fields for construction **/
#if USE_EXPICIT_PHI_IN_STACK
#if USE_EXPLICIT_PHI_IN_STACK
struct Phi_in_stack *Phi_in_stack; /* needed for automatic Phi construction */
#endif
int n_loc; /* number of local variable in this
......
......@@ -19,7 +19,7 @@
#include "array.h"
#ifdef DEBUG_libfirm
#include "irprog.h"
#include "irprog_t.h"
#endif
/* some constants fixing the positions of nodes predecessors
......@@ -322,14 +322,16 @@ get_irn_link (ir_node *node) {
return node->link;
}
#ifdef DEBUG_libfirm
/* Outputs a unique number for this node */
inline long
get_irn_node_nr(ir_node *node) {
assert(node);
#ifdef DEBUG_libfirm
return node->node_nr;
}
#else
return 0;
#endif
}
inline tarval *
get_irn_const_attr (ir_node *node)
......@@ -1844,6 +1846,8 @@ skip_Proj (ir_node *node) {
inline ir_node *
skip_Tuple (ir_node *node) {
ir_node *pred;
node = skip_nop(node);
if (get_irn_op(node) == op_Proj) {
pred = skip_nop(get_Proj_pred(node));
if (get_irn_op(pred) == op_Proj) /* nested Tuple ? */
......
......@@ -97,10 +97,9 @@ inline void mark_irn_visited (ir_node *node);
inline int irn_not_visited (ir_node *node);
inline void set_irn_link (ir_node *node, ir_node *link);
inline ir_node *get_irn_link (ir_node *node);
#ifdef DEBUG_libfirm
/* Outputs a unique number for this node */
/* Outputs a unique number for this node if libfirm is compiled for
debugging, else returns 0. */
inline long get_irn_node_nr(ir_node *node);
#endif
/*****/
/* irnode constructor */
......@@ -498,7 +497,6 @@ ir_node *get_fragile_op_mem(ir_node *node);
/*****/
/* Makros for debugging the libfirm */
/*#ifdef DEBUG_libfirm*/
#include "ident.h"
#define DDMSG printf("%s(l.%i)\n", __FUNCTION__, __LINE__)
......@@ -526,6 +524,5 @@ ir_node *get_fragile_op_mem(ir_node *node);
#define DDME(X) xprintf("%s(l.%i) %I: %p\n", __FUNCTION__, __LINE__, \
get_entity_ident(X), (X))
/*#endif*/
# endif /* _IRNODE_H_ */
......@@ -14,8 +14,8 @@
# include "irnode.h"
# include "xprintf.h"
# include "irop_t.h"
#include "irdom_t.h" /* For size of struct dom_info. */
# include "common_t.h"
# include "irdom_t.h" /* For size of struct dom_info. */
/** ir node attributes **/
......
......@@ -85,10 +85,6 @@ void set_irp_type(int pos, type *typ);
/** Functions to access the fields of ir_prog **/
type *get_glob_type(void);
#ifdef DEBUG_libfirm
/* Returns a new, unique number to number nodes or the like. */
int get_irp_new_node_nr();
#endif
/*****/
/***p* irprog/get_const_code_irg
......
......@@ -5,6 +5,7 @@
# define _IRPROG_T_H_
#include "irprog.h"
#include "common_t.h"
struct ir_prog {
firm_kind kind;
......@@ -26,4 +27,9 @@ struct ir_prog {
#endif
};
#ifdef DEBUG_libfirm
/* Returns a new, unique number to number nodes or the like. */
int get_irp_new_node_nr();
#endif
#endif /* ifndef _IRPROG_T_H_ */
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment