Commit b7cb5592 authored by sebastian.buchwald1's avatar sebastian.buchwald1
Browse files

Added a new builtin for saturated increment.

The builtin can be used to generate fast code for unsigned division by constant.

Code generation is supported for the IA32 and the SPARC backend.
Since our ARM backend currently has no Add with Carry instruction,
the builtin is currently not supported on Arm.
The same holds for the AMD64 backend, which does not support a division yet.
parent 37f1b0a3
......@@ -347,7 +347,8 @@ typedef enum ir_builtin_kind {
ir_bk_inport, /**< in port */
ir_bk_outport, /**< out port */
ir_bk_inner_trampoline, /**< address of a trampoline for GCC inner functions */
ir_bk_last = ir_bk_inner_trampoline,
ir_bk_saturating_increment, /**< saturating increment */
ir_bk_last = ir_bk_saturating_increment,
} ir_builtin_kind;
/**
......
......@@ -1221,6 +1221,7 @@ static ir_node *gen_Builtin(ir_node *node)
case ir_bk_outport:
case ir_bk_inport:
case ir_bk_inner_trampoline:
case ir_bk_saturating_increment:
break;
}
panic("Builtin %s not implemented", get_builtin_kind_name(kind));
......@@ -1254,6 +1255,7 @@ static ir_node *gen_Proj_Builtin(ir_node *proj)
return new_node;
case ir_bk_inport:
case ir_bk_inner_trampoline:
case ir_bk_saturating_increment:
break;
}
panic("Builtin %s not implemented", get_builtin_kind_name(kind));
......
......@@ -5243,6 +5243,31 @@ static ir_node *gen_inport(ir_node *node)
return res;
}
/*
* Transform saturating increment.
*/
static ir_node *gen_saturating_increment(ir_node *node)
{
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *operand = be_transform_node(get_Builtin_param(node, 0));
ir_graph *irg = get_Block_irg(block);
ir_node *one = ia32_create_Immediate(irg, NULL, 0, 1);
ir_node *increment = new_bd_ia32_Add(dbgi, block, noreg_GP, noreg_GP, nomem, operand, one);
set_irn_mode(increment, mode_T);
set_ia32_commutative(increment);
SET_IA32_ORIG_NODE(increment, node);
ir_node *value = new_rd_Proj(dbgi, increment, mode_Iu, pn_ia32_Add_res);
ir_node *eflags = new_rd_Proj(dbgi, increment, mode_Iu, pn_ia32_Add_flags);
ir_node *zero = ia32_create_Immediate(irg, NULL, 0, 0);
ir_node *sbb = new_bd_ia32_Sbb(dbgi, block, noreg_GP, noreg_GP, nomem, value, zero, eflags);
set_ia32_ls_mode(sbb, mode_Iu);
SET_IA32_ORIG_NODE(sbb, node);
return sbb;
}
/**
* Transform a builtin inner trampoline
*/
......@@ -5369,6 +5394,8 @@ static ir_node *gen_Builtin(ir_node *node)
return gen_inport(node);
case ir_bk_inner_trampoline:
return gen_inner_trampoline(node);
case ir_bk_saturating_increment:
return gen_saturating_increment(node);
}
panic("Builtin %s not implemented", get_builtin_kind_name(kind));
}
......@@ -5391,6 +5418,7 @@ static ir_node *gen_Proj_Builtin(ir_node *proj)
case ir_bk_parity:
case ir_bk_popcount:
case ir_bk_bswap:
case ir_bk_saturating_increment:
assert(get_Proj_proj(proj) == pn_Builtin_max+1);
return new_node;
case ir_bk_trap:
......
......@@ -422,7 +422,8 @@ static void sparc_lower_for_target(void)
if (sparc_isa_template.fpu_arch == SPARC_FPU_ARCH_SOFTFLOAT)
lower_floating_point();
lower_builtins(0, NULL);
ir_builtin_kind builtin_exceptions[1] = {ir_bk_saturating_increment};
lower_builtins(1, builtin_exceptions);
ir_mode *mode_gp = sparc_reg_classes[CLASS_sparc_gp].mode;
for (size_t i = 0, n_irgs = get_irp_n_irgs(); i < n_irgs; ++i) {
......
......@@ -2370,6 +2370,87 @@ static ir_node *gen_Phi(ir_node *node)
return be_transform_phi(node, req);
}
/*
* Transform saturating increment.
*/
static ir_node *gen_saturating_increment(ir_node *node)
{
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *operand = be_transform_node(get_Builtin_param(node, 0));
ir_node *increment = new_bd_sparc_AddCC_imm(dbgi, block, operand, NULL, 1);
ir_node *value = new_rd_Proj(dbgi, increment, mode_Iu, pn_sparc_AddCC_res);
ir_node *eflags = new_rd_Proj(dbgi, increment, mode_Iu, pn_sparc_AddCC_flags);
ir_graph *irg = get_Block_irg(block);
ir_node *zero = get_g0(irg);
ir_node *sbb = new_bd_sparc_SubX_reg(dbgi, block, value, zero, eflags);
return sbb;
}
/**
* Transform Builtin node.
*/
static ir_node *gen_Builtin(ir_node *node)
{
ir_builtin_kind kind = get_Builtin_kind(node);
switch (kind) {
case ir_bk_trap:
case ir_bk_debugbreak:
case ir_bk_return_address:
case ir_bk_frame_address:
case ir_bk_prefetch:
case ir_bk_ffs:
case ir_bk_clz:
case ir_bk_ctz:
case ir_bk_parity:
case ir_bk_popcount:
case ir_bk_bswap:
case ir_bk_outport:
case ir_bk_inport:
case ir_bk_inner_trampoline:
/* Should not occur in backend. */
break;
case ir_bk_saturating_increment:
return gen_saturating_increment(node);
}
panic("Builtin %s not implemented", get_builtin_kind_name(kind));
}
/**
* Transform Proj(Builtin) node.
*/
static ir_node *gen_Proj_Builtin(ir_node *proj)
{
ir_node *node = get_Proj_pred(proj);
ir_node *new_node = be_transform_node(node);
ir_builtin_kind kind = get_Builtin_kind(node);
switch (kind) {
case ir_bk_return_address:
case ir_bk_frame_address:
case ir_bk_ffs:
case ir_bk_clz:
case ir_bk_ctz:
case ir_bk_parity:
case ir_bk_popcount:
case ir_bk_bswap:
case ir_bk_trap:
case ir_bk_debugbreak:
case ir_bk_prefetch:
case ir_bk_outport:
case ir_bk_inport:
case ir_bk_inner_trampoline:
/* Should not occur in backend. */
break;
case ir_bk_saturating_increment:
assert(get_Proj_proj(proj) == pn_Builtin_max+1);
return new_node;
}
panic("Builtin %s not implemented", get_builtin_kind_name(kind));
}
/**
* Transform a Proj from a Load.
*/
......@@ -2623,6 +2704,8 @@ static ir_node *gen_Proj(ir_node *node)
return gen_Proj_ASM(node);
case iro_Alloc:
return gen_Proj_Alloc(node);
case iro_Builtin:
return gen_Proj_Builtin(node);
case iro_Store:
return gen_Proj_Store(node);
case iro_Load:
......@@ -2678,6 +2761,7 @@ static void sparc_register_transformers(void)
be_set_transform_function(op_Add, gen_Add);
be_set_transform_function(op_Alloc, gen_Alloc);
be_set_transform_function(op_And, gen_And);
be_set_transform_function(op_Builtin, gen_Builtin);
be_set_transform_function(op_Call, gen_Call);
be_set_transform_function(op_Cmp, gen_Cmp);
be_set_transform_function(op_Cond, gen_Cond);
......
......@@ -252,6 +252,7 @@ static void symtbl_init(void)
INSERTENUM(tt_builtin_kind, ir_bk_inport);
INSERTENUM(tt_builtin_kind, ir_bk_outport);
INSERTENUM(tt_builtin_kind, ir_bk_inner_trampoline);
INSERTENUM(tt_builtin_kind, ir_bk_saturating_increment);
INSERTENUM(tt_cond_jmp_predicate, COND_JMP_PRED_NONE);
INSERTENUM(tt_cond_jmp_predicate, COND_JMP_PRED_TRUE);
......
......@@ -742,6 +742,7 @@ const char *get_builtin_kind_name(ir_builtin_kind kind)
X(ir_bk_inport);
X(ir_bk_outport);
X(ir_bk_inner_trampoline);
X(ir_bk_saturating_increment);
}
return "<unknown>";
#undef X
......
......@@ -42,6 +42,7 @@ static const char *get_builtin_name(ir_builtin_kind kind)
case ir_bk_inport:
case ir_bk_outport:
case ir_bk_inner_trampoline:
case ir_bk_saturating_increment:
break;
}
abort();
......@@ -143,6 +144,7 @@ static void lower_builtin(ir_node *node, void *env)
case ir_bk_inport:
case ir_bk_outport:
case ir_bk_inner_trampoline:
case ir_bk_saturating_increment:
/* can't do anything about these, backend will probably fail now */
panic("Can't lower Builtin node of kind %+F", node);
}
......
......@@ -2711,6 +2711,7 @@ static void lower_Builtin(ir_node *builtin, ir_mode *mode)
case ir_bk_inport:
case ir_bk_outport:
case ir_bk_inner_trampoline:
case ir_bk_saturating_increment:
/* Nothing to do. */
return;
case ir_bk_bswap:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment