Commit 4e3fd9fe authored by Götz Lindenmaier's avatar Götz Lindenmaier
Browse files

some simple optimizations for execution speed

[r2461]
parent 8dc4091a
......@@ -111,7 +111,7 @@ void *set_insert (set *set, const void *key, size_t size, unsigned hash);
* @return a pointer to the set_entry of the inserted element
*
* @note
* It is not possible to insert on element more than once. If an element
* It is not possible to insert an element more than once. If an element
* that should be inserted is already in the set, this functions does
* nothing but returning its set_entry.
*/
......
......@@ -21,8 +21,7 @@ SOURCES = $(INSTALL_HEADERS)
SOURCES += Makefile.in \
irouts.c irdom_t.h irdom.c cgana.c \
irloop_t.h irbackedge.c irbackedge_t.h irscc.c irtypeinfo.c irsimpletype.c \
confirmcons.c
irloop_t.h irbackedge.c irbackedge_t.h irscc.c irtypeinfo.c irsimpletype.c
include $(topdir)/MakeRules
......
......@@ -209,11 +209,13 @@ void compute_doms(ir_graph *irg) {
for (i = n_blocks-1; i > 0; i--) { /* Don't iterate the root, it's done. */
int irn_arity;
tmp_dom_info *w = &tdi_list[i];
tmp_dom_info *v;
/* Step 2 */
for (j = 0; j < get_irn_arity(w->block); j++) {
irn_arity = get_irn_arity(w->block);
for (j = 0; j < irn_arity; j++) {
ir_node *pred = get_nodes_Block(get_Block_cfgpred(w->block, j));
tmp_dom_info *u;
......
......@@ -183,15 +183,16 @@ void irg_out_block_walk(ir_node *node,
/* Returns the amount of out edges for not yet visited successors. */
static int count_outs(ir_node *n) {
int start, i, res;
int start, i, res, irn_arity;
ir_node *succ;
set_irn_visited(n, get_irg_visited(current_ir_graph));
n->out = (ir_node **) 1; /* Space for array size. */
if ((get_irn_op(n) == op_Block)) start = 0; else start = -1;
res = get_irn_arity(n) - start +1; /* --1 or --0; 1 for array size. */
for (i = start; i < get_irn_arity(n); i++) {
irn_arity = get_irn_arity(n);
res = irn_arity - start +1; /* --1 or --0; 1 for array size. */
for (i = start; i < irn_arity; i++) {
/* Optimize Tuples. They annoy if walking the cfg. */
succ = skip_Tuple(get_irn_n(n, i));
set_irn_n(n, i, succ);
......@@ -205,7 +206,7 @@ static int count_outs(ir_node *n) {
}
static ir_node **set_out_edges(ir_node *n, ir_node **free) {
int n_outs, start, i;
int n_outs, start, i, irn_arity;
ir_node *succ;
set_irn_visited(n, get_irg_visited(current_ir_graph));
......@@ -220,7 +221,8 @@ static ir_node **set_out_edges(ir_node *n, ir_node **free) {
n->out[0] = (ir_node *)0;
if (get_irn_op(n) == op_Block) start = 0; else start = -1;
for (i = start; i < get_irn_arity(n); i++) {
irn_arity = get_irn_arity(n);
for (i = start; i < irn_arity; i++) {
succ = get_irn_n(n, i);
/* Recursion */
if (get_irn_visited(succ) < get_irg_visited(current_ir_graph))
......@@ -327,9 +329,9 @@ void compute_ip_outs(ir_graph *irg) { /*irg_walk_func *pre, irg_walk_func *post,
current_ir_graph = get_irp_irg(i);
e = get_irg_end(current_ir_graph);
if (get_irn_visited(e) < get_irg_visited(current_ir_graph)) {
int j;
/* Don't visit the End node. */
/* for (j = 0; j < get_End_n_keepalives(e); j++)
/* int j;
for (j = 0; j < get_End_n_keepalives(e); j++)
cg_walk_2(get_End_keepalive(e, j), pre, post, env);*/
compute_outs(current_ir_graph);
}
......
......@@ -72,5 +72,6 @@ void free_firm (void) {
finish_tarval();
finish_op();
finish_mode();
finish_tpop();
id_finish();
}
......@@ -48,6 +48,7 @@ turn_into_tuple (ir_node *node, int arity)
INLINE void
exchange (ir_node *old, ir_node *nw)
{
assert(get_irn_op(old)->opar != oparity_dynamic);
ir_node *block = old->in[0];
old->op = op_Id;
......
......@@ -46,13 +46,18 @@ static void init_link (ir_node *n, void *env) {
set_irn_link(n, NULL);
}
#if 0 /* Old version. Avoids Ids.
This is not necessary: we do a postwalk, and get_irn_n
removes ids anyways. So it's much cheaper to call the
optimization less often and use the exchange() algorithm. */
static void
optimize_in_place_wrapper (ir_node *n, void *env) {
int i;
int i, irn_arity;
ir_node *optimized, *old;
for (i = 0; i < get_irn_arity(n); i++) {
/* get?irn_n skips Id nodes, so comparison old != optimized does not
irn_arity = get_irn_arity(n);
for (i = 0; i < irn_arity; i++) {
/* get_irn_n skips Id nodes, so comparison old != optimized does not
show all optimizations. Therefore always set new predecessor. */
old = get_irn_n(n, i);
optimized = optimize_in_place_2(old);
......@@ -64,6 +69,15 @@ optimize_in_place_wrapper (ir_node *n, void *env) {
if (optimized != n) exchange (n, optimized);
}
}
#else
static void
optimize_in_place_wrapper (ir_node *n, void *env) {
ir_node *optimized = optimize_in_place_2(n);
if (optimized != n) exchange (n, optimized);
}
#endif
void
local_optimize_graph (ir_graph *irg) {
......@@ -116,7 +130,7 @@ get_new_node (ir_node * n)
in a Block. */
static INLINE int
compute_new_arity(ir_node *b) {
int i, res;
int i, res, irn_arity;
int irg_v, block_v;
irg_v = get_irg_block_visited(current_ir_graph);
......@@ -127,8 +141,8 @@ compute_new_arity(ir_node *b) {
return block_v - irg_v;
} else {
/* compute the number of good predecessors */
res = get_irn_arity(b);
for (i = 0; i < get_irn_arity(b); i++)
res = irn_arity = get_irn_arity(b);
for (i = 0; i < irn_arity; i++)
if (get_irn_opcode(get_irn_n(b, i)) == iro_Bad) res--;
/* save it in the flag. */
set_Block_block_visited(b, irg_v + res);
......@@ -204,7 +218,7 @@ copy_node (ir_node *n, void *env) {
static void
copy_preds (ir_node *n, void *env) {
ir_node *nn, *block;
int i, j;
int i, j, irn_arity;
nn = get_new_node(n);
......@@ -215,7 +229,8 @@ copy_preds (ir_node *n, void *env) {
if (get_irn_opcode(n) == iro_Block) {
/* Don't copy Bad nodes. */
j = 0;
for (i = 0; i < get_irn_arity(n); i++)
irn_arity = get_irn_arity(n);
for (i = 0; i < irn_arity; i++)
if (get_irn_opcode(get_irn_n(n, i)) != iro_Bad) {
set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
/*if (is_backedge(n, i)) set_backedge(nn, j);*/
......@@ -239,7 +254,8 @@ copy_preds (ir_node *n, void *env) {
block = get_nodes_Block(n);
set_irn_n (nn, -1, get_new_node(block));
j = 0;
for (i = 0; i < get_irn_arity(n); i++)
irn_arity = get_irn_arity(n);
for (i = 0; i < irn_arity; i++)
if (get_irn_opcode(get_irn_n(block, i)) != iro_Bad) {
set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
/*if (is_backedge(n, i)) set_backedge(nn, j);*/
......@@ -253,7 +269,8 @@ copy_preds (ir_node *n, void *env) {
if (get_irn_arity(n) == 1)
exchange(n, get_irn_n(n, 0));
} else {
for (i = -1; i < get_irn_arity(n); i++)
irn_arity = get_irn_arity(n);
for (i = -1; i < irn_arity; i++)
set_irn_n (nn, i, get_new_node(get_irn_n(n, i)));
}
/* Now the new node is complete. We can add it to the hash table for cse.
......@@ -267,7 +284,7 @@ static void
copy_graph (void) {
ir_node *oe, *ne; /* old end, new end */
ir_node *ka; /* keep alive */
int i;
int i, irn_arity;
oe = get_irg_end(current_ir_graph);
/* copy the end node by hand, allocate dynamic in array! */
......@@ -290,7 +307,8 @@ copy_graph (void) {
/** ... and now the keep alives. **/
/* First pick the not marked block nodes and walk them. We must pick these
first as else we will oversee blocks reachable from Phis. */
for (i = 0; i < get_irn_arity(oe); i++) {
irn_arity = get_irn_arity(oe);
for (i = 0; i < irn_arity; i++) {
ka = get_irn_n(oe, i);
if ((get_irn_op(ka) == op_Block) &&
(get_irn_visited(ka) < get_irg_visited(current_ir_graph))) {
......@@ -302,7 +320,8 @@ copy_graph (void) {
}
/* Now pick the Phis. Here we will keep all! */
for (i = 0; i < get_irn_arity(oe); i++) {
irn_arity = get_irn_arity(oe);
for (i = 0; i < irn_arity; i++) {
ka = get_irn_n(oe, i);
if ((get_irn_op(ka) == op_Phi)) {
if (get_irn_visited(ka) < get_irg_visited(current_ir_graph)) {
......@@ -552,7 +571,7 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
ir_node **cf_pred;
ir_node *ret, *phi;
ir_node *cf_op = NULL, *bl;
int arity, n_ret, n_exc, n_res, i, j, rem_opt;
int arity, n_ret, n_exc, n_res, i, j, rem_opt, irn_arity;
type *called_frame;
if (!get_optimize() || !get_opt_inline()) return;
......@@ -673,7 +692,8 @@ void inline_method(ir_node *call, ir_graph *called_graph) {
set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
/* -- archive keepalives -- */
for (i = 0; i < get_irn_arity(end); i++)
irn_arity = get_irn_arity(end);
for (i = 0; i < irn_arity; i++)
add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i));
/* The new end node will die. We need not free as the in array is on the obstack:
......@@ -909,7 +929,7 @@ static pdeq *worklist; /* worklist of ir_node*s */
static void
place_floats_early (ir_node *n)
{
int i, start;
int i, start, irn_arity;
/* we must not run into an infinite loop */
assert (irn_not_visited(n));
......@@ -932,7 +952,8 @@ place_floats_early (ir_node *n)
}
/* find the block for this node. */
for (i = 0; i < get_irn_arity(n); i++) {
irn_arity = get_irn_arity(n);
for (i = 0; i < irn_arity; i++) {
ir_node *dep = get_irn_n(n, i);
ir_node *dep_block;
if ((irn_not_visited(dep)) &&
......@@ -960,7 +981,8 @@ place_floats_early (ir_node *n)
/* Add predecessors of non floating nodes on worklist. */
start = (get_irn_op(n) == op_Block) ? 0 : -1;
for (i = start; i < get_irn_arity(n); i++) {
irn_arity = get_irn_arity(n);
for (i = start; i < irn_arity; i++) {
ir_node *pred = get_irn_n(n, i);
if (irn_not_visited(pred)) {
pdeq_putr (worklist, pred);
......@@ -1001,9 +1023,10 @@ consumer_dom_dca (ir_node *dca, ir_node *consumer, ir_node *producer)
if (get_irn_op(consumer) == op_Phi) {
/* our comsumer is a Phi-node, the effective use is in all those
blocks through which the Phi-node reaches producer */
int i;
int i, irn_arity;
ir_node *phi_block = get_nodes_Block(consumer);
for (i = 0; i < get_irn_arity(consumer); i++) {
irn_arity = get_irn_arity(consumer);
for (i = 0; i < irn_arity; i++) {
if (get_irn_n(consumer, i) == producer) {
block = get_nodes_Block(get_Block_cfgpred(phi_block, i));
}
......
......@@ -24,7 +24,7 @@
# include <stdlib.h>
# include "irnode_t.h"
# include "irgraph.h" /* visited flag */
# include "irgraph_t.h" /* visited flag */
# include "irprog.h"
# include "irgwalk.h"
# include "typewalk.h"
......@@ -117,7 +117,7 @@ irg_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env)
{
int i;
assert(node && node->kind==k_ir_node);
#if 0 /* safe */
if (get_irn_visited(node) < get_irg_visited(current_ir_graph)) {
set_irn_visited(node, get_irg_visited(current_ir_graph));
......@@ -130,7 +130,20 @@ irg_walk_2(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env)
if (post) post(node, env);
}
return;
#else /* faster */
if (node->visited < current_ir_graph->visited) {
set_irn_visited(node, current_ir_graph->visited);
if (pre) pre(node, env);
if (node->op != op_Block)
irg_walk_2(get_irn_n(node, -1), pre, post, env);
for (i = get_irn_arity(node) - 1; i >= 0; --i)
irg_walk_2(get_irn_n(node, i), pre, post, env);
if (post) post(node, env);
}
#endif
}
......@@ -352,12 +365,14 @@ void irg_block_walk(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void
assert(get_irn_opcode(block) == iro_Block);
irg_block_walk_2(block, pre, post, env);
/* keepalive: the endless loops ... */
if (get_irn_op(node) == op_End)
for (i = 0; i < get_irn_arity(node); i++) {
if (get_irn_op(node) == op_End) {
int arity = get_irn_arity(node);
for (i = 0; i < arity; i++) {
pred = get_irn_n(node, i);
if (get_irn_op(pred) == op_Block)
irg_block_walk_2(pred, pre, post, env);
}
}
return;
}
......
......@@ -24,6 +24,7 @@
#include "irbackedge_t.h"
#include "irdump.h"
#include "irflag.h"
#include "irop_t.h"
#ifdef DEBUG_libfirm
#include "irprog_t.h"
......@@ -249,11 +250,6 @@ get_irn_inter_n (ir_node *node, int n) {
If it is a block, the entry -1 is NULL. */
INLINE ir_node *
get_irn_n (ir_node *node, int n) {
/* debug @@@ */
if (-1 > n || get_irn_arity(node) <= n) {
printf("pos: %d, arity: %d ", n, get_irn_arity(node));
DDMN(node);
} /**/
assert(node); assert(-1 <= n && n < get_irn_arity(node));
if (interprocedural_view) return get_irn_inter_n (node, n);
return get_irn_intra_n (node, n);
......@@ -1696,11 +1692,7 @@ set_Cast_type (ir_node *node, type *to_tp) {
INLINE int
is_unop (ir_node *node) {
return ( node->op == op_Minus ||
node->op == op_Abs ||
node->op == op_Not ||
node->op == op_Conv ||
node->op == op_Cast );
return (node->op->opar == oparity_unary);
}
INLINE ir_node *
......@@ -1732,7 +1724,9 @@ set_unop_op (ir_node *node, ir_node *op) {
int
is_binop (ir_node *node) {
return (node->op == op_Add ||
return (node->op->opar == oparity_binary);
/* return (node->op == op_Add ||
node->op == op_Cmp ||
node->op == op_Sub ||
node->op == op_Mul ||
node->op == op_Quot ||
......@@ -1745,27 +1739,13 @@ is_binop (ir_node *node) {
node->op == op_Shl ||
node->op == op_Shr ||
node->op == op_Shrs ||
node->op == op_Rot ||
node->op == op_Cmp );
node->op == op_Rot );
*/
}
INLINE ir_node *
get_binop_left (ir_node *node) {
assert (node->op == op_Add ||
node->op == op_Sub ||
node->op == op_Mul ||
node->op == op_Quot ||
node->op == op_DivMod ||
node->op == op_Div ||
node->op == op_Mod ||
node->op == op_And ||
node->op == op_Or ||
node->op == op_Eor ||
node->op == op_Shl ||
node->op == op_Shr ||
node->op == op_Shrs ||
node->op == op_Rot ||
node->op == op_Cmp );
assert (node->op->opar == oparity_binary);
switch (get_irn_opcode (node)) {
case iro_Add : return get_Add_left(node); break;
......@@ -1789,21 +1769,7 @@ get_binop_left (ir_node *node) {
INLINE void
set_binop_left (ir_node *node, ir_node *left) {
assert (node->op == op_Add ||
node->op == op_Sub ||
node->op == op_Mul ||
node->op == op_Quot ||
node->op == op_DivMod ||
node->op == op_Div ||
node->op == op_Mod ||
node->op == op_And ||
node->op == op_Or ||
node->op == op_Eor ||
node->op == op_Shl ||
node->op == op_Shr ||
node->op == op_Shrs ||
node->op == op_Rot ||
node->op == op_Cmp );
assert (node->op->opar == oparity_binary);
switch (get_irn_opcode (node)) {
case iro_Add : set_Add_left(node, left); break;
......@@ -1827,21 +1793,7 @@ set_binop_left (ir_node *node, ir_node *left) {
INLINE ir_node *
get_binop_right (ir_node *node) {
assert (node->op == op_Add ||
node->op == op_Sub ||
node->op == op_Mul ||
node->op == op_Quot ||
node->op == op_DivMod ||
node->op == op_Div ||
node->op == op_Mod ||
node->op == op_And ||
node->op == op_Or ||
node->op == op_Eor ||
node->op == op_Shl ||
node->op == op_Shr ||
node->op == op_Shrs ||
node->op == op_Rot ||
node->op == op_Cmp );
assert (node->op->opar == oparity_binary);
switch (get_irn_opcode (node)) {
case iro_Add : return get_Add_right(node); break;
......@@ -1865,21 +1817,7 @@ get_binop_right (ir_node *node) {
INLINE void
set_binop_right (ir_node *node, ir_node *right) {
assert (node->op == op_Add ||
node->op == op_Sub ||
node->op == op_Mul ||
node->op == op_Quot ||
node->op == op_DivMod ||
node->op == op_Div ||
node->op == op_Mod ||
node->op == op_And ||
node->op == op_Or ||
node->op == op_Eor ||
node->op == op_Shl ||
node->op == op_Shr ||
node->op == op_Shrs ||
node->op == op_Rot ||
node->op == op_Cmp );
assert (node->op->opar == oparity_binary);
switch (get_irn_opcode (node)) {
case iro_Add : set_Add_right(node, right); break;
......@@ -2317,6 +2255,7 @@ skip_Tuple (ir_node *node) {
return node;
}
#if 0
/* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
than any other approach, as Id chains are resolved and all point to the real node, or
all id's are self loops. */
......@@ -2344,6 +2283,40 @@ skip_nop (ir_node *node) {
return node;
}
}
#else
/* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
than any other approach, as Id chains are resolved and all point to the real node, or
all id's are self loops. */
extern int opt_normalize;
INLINE ir_node *
skip_nop (ir_node *node) {
ir_node *pred;
/* don't assert node !!! */
if (!opt_normalize) return node;
/* Don't use get_Id_pred: We get into an endless loop for
self-referencing Ids. */
if (node && (node->op == op_Id) && (node != (pred = node->in[0+1]))) {
if (pred->op != op_Id) return pred; /* shortcut */
ir_node *rem_pred = pred;
ir_node *res;
assert (get_irn_arity (node) > 0);
node->in[0+1] = node;
res = skip_nop(rem_pred);
if (res->op == op_Id) /* self-loop */ return node;
node->in[0+1] = res;
return res;
} else {
return node;
}
}
#endif
INLINE ir_node *
skip_Id (ir_node *node) {
......
......@@ -83,7 +83,7 @@ ir_op *op_FuncCall; ir_op *get_op_FuncCall () { return op_FuncCall; }
ir_op *
new_ir_op (opcode code, const char *name, op_pinned p, int labeled, size_t attr_size)
new_ir_op (opcode code, const char *name, op_pinned p, int labeled, op_arity opar, size_t attr_size)
{
ir_op *res;
......@@ -96,6 +96,7 @@ new_ir_op (opcode code, const char *name, op_pinned p, int labeled, size_t attr_
Set labeled = 1 if the edges should be
enumarated in vcg output, otherwise set
labeled = 0. */
res->opar = opar;
return res;
}
......@@ -106,65 +107,65 @@ void free_ir_op (ir_op *code) {
void
init_op(void)
{
op_Block = new_ir_op (iro_Block, "Block", pinned, 1, sizeof (block_attr));
op_Start = new_ir_op (iro_Start, "Start", pinned, 0, sizeof (start_attr));
op_End = new_ir_op (iro_End, "End", pinned, 0, 0);
op_Jmp = new_ir_op (iro_Jmp, "Jmp", pinned, 0, 0);
op_Cond = new_ir_op (iro_Cond, "Cond", pinned, 1, sizeof(cond_attr));
op_Return = new_ir_op (iro_Return, "Return", pinned, 1, 0);
op_Raise = new_ir_op (iro_Raise, "Raise", pinned, 1, 0);
op_Const = new_ir_op (iro_Const, "Const", floats, 0, sizeof (const_attr));
op_SymConst = new_ir_op (iro_SymConst, "SymConst", floats, 0, sizeof (symconst_attr));
op_Sel = new_ir_op (iro_Sel, "Sel", floats, 1, sizeof (sel_attr));
op_InstOf = new_ir_op (iro_InstOf, "InstOf", floats, 1, sizeof (sel_attr));
op_Call = new_ir_op (iro_Call, "Call", pinned, 1, sizeof (call_attr));
op_Add = new_ir_op (iro_Add, "Add", floats, 0, 0);
op_Minus = new_ir_op (iro_Minus, "Minus", floats, 0, 0);
op_Sub = new_ir_op (iro_Sub, "Sub", floats, 1, 0);
op_Mul = new_ir_op (iro_Mul, "Mul", floats, 0, 0);
op_Quot = new_ir_op (iro_Quot, "Quot", pinned, 1, sizeof(struct irnode **));
op_DivMod = new_ir_op (iro_DivMod, "DivMod", pinned, 1, sizeof(struct irnode **));
op_Div = new_ir_op (iro_Div, "Div", pinned, 1, sizeof(struct irnode **));
op_Mod = new_ir_op (iro_Mod, "Mod", pinned, 1, sizeof(struct irnode **));
op_Abs = new_ir_op (iro_Abs, "Abs", floats, 0, 0);
op_And = new_ir_op (iro_And, "And", floats, 0, 0);
op_Or = new_ir_op (iro_Or, "Or", floats, 0, 0);
op_Eor = new_ir_op (iro_Eor, "Eor", floats, 0, 0);
op_Not = new_ir_op (iro_Not, "Not", floats, 0, 0);
op_Cmp = new_ir_op (iro_Cmp, "Cmp", floats, 1, 0);
op_Shl = new_ir_op (iro_Shl, "Shl", floats, 1, 0);
op_Shr = new_ir_op (iro_Shr, "Shr", floats, 1, 0);
op_Shrs = new_ir_op (iro_Shrs, "Shrs", floats, 1, 0);
op_Rot = new_ir_op (iro_Rot, "Rot", floats, 1, 0);
op_Conv = new_ir_op (iro_Conv, "Conv", floats, 0, 0);
op_Cast = new_ir_op (iro_Cast, "Cast", floats, 0, sizeof (cast_attr));
op_Phi = new_ir_op (iro_Phi, "Phi", pinned, 1, sizeof (int));
op_Load = new_ir_op (iro_Load, "Load", pinned, 1, sizeof(struct irnode **));
op_Store = new_ir_op (iro_Store, "Store", pinned, 1, sizeof(struct irnode **));
op_Alloc = new_ir_op (iro_Alloc, "Alloc", pinned, 1, sizeof (alloc_attr));
op_Free = new_ir_op (iro_Free, "Free", pinned, 1, sizeof (type *));
op_Sync = new_ir_op (iro_Sync, "Sync", pinned, 0, 0);
op_Proj = new_ir_op (iro_Proj, "Proj", floats, 0, sizeof (long));
op_Tuple = new_ir_op (iro_Tuple, "Tuple", floats, 1, 0);
op_Id = new_ir_op (iro_Id, "Id", floats, 0, 0);
op_Bad = new_ir_op (iro_Bad, "Bad", floats, 0, 0);
op_Confirm = new_ir_op (iro_Confirm, "Confirm", floats, 1, sizeof (confirm_attr));
op_Unknown = new_ir_op (iro_Unknown, "Unknown", floats, 0, 0);
op_Filter = new_ir_op (iro_Filter, "Filter", pinned, 1, sizeof(filter_attr));
op_Break = new_ir_op (iro_Break, "Break", pinned, 0, 0);
op_CallBegin = new_ir_op (iro_CallBegin, "CallBegin", pinned, 0, sizeof(callbegin_attr));
op_EndReg = new_ir_op (iro_EndReg, "EndReg", pinned, 0, sizeof(end_attr));
op_EndExcept = new_ir_op (iro_EndExcept, "EndExcept", pinned, 0, sizeof(end_attr));
op_FuncCall = new_ir_op (iro_FuncCall, "FuncCall", floats, 1, sizeof (call_attr));
op_Block = new_ir_op (iro_Block, "Block", pinned, 1, oparity_variable, sizeof (block_attr));
op_Start = new_ir_op (iro_Start, "Start", pinned, 0, oparity_zero, sizeof (start_attr));
op_End = new_ir_op (iro_End, "End", pinned, 0, oparity_dynamic, 0);
op_Jmp = new_ir_op (iro_Jmp, "Jmp", pinned, 0, oparity_zero, 0);
op_Cond = new_ir_op (iro_Cond, "Cond", pinned, 1, oparity_any, sizeof(cond_attr));