Commit 27c0106a authored by Robin Redeker's avatar Robin Redeker
Browse files

Added preliminary Conv, Call and Jmp instructions to the amd64 backend.

[r27658]
parent 060aa4c9
......@@ -193,6 +193,96 @@ static void emit_amd64_SymConst(const ir_node *irn)
be_emit_finish_line_gas(irn);
}
/**
* Returns the next block in a block schedule.
*/
static ir_node *sched_next_block(const ir_node *block)
{
return get_irn_link(block);
}
/**
* Returns the target block for a control flow node.
*/
static ir_node *get_cfop_target_block(const ir_node *irn)
{
return get_irn_link(irn);
}
/**
* Emit the target label for a control flow node.
*/
static void amd64_emit_cfop_target(const ir_node *irn)
{
ir_node *block = get_cfop_target_block(irn);
be_gas_emit_block_name(block);
}
/**
* Emit a Jmp.
*/
static void emit_amd64_Jmp(const ir_node *node)
{
ir_node *block, *next_block;
/* for now, the code works for scheduled and non-schedules blocks */
block = get_nodes_block(node);
/* we have a block schedule */
next_block = sched_next_block(block);
if (get_cfop_target_block(node) != next_block) {
be_emit_cstring("\tjmp ");
amd64_emit_cfop_target(node);
} else {
be_emit_cstring("\t/* fallthrough to ");
amd64_emit_cfop_target(node);
be_emit_cstring(" */");
}
be_emit_finish_line_gas(node);
}
/**
* Emits code for a call.
*/
static void emit_be_Call(const ir_node *node)
{
ir_entity *entity = be_Call_get_entity (node);
if (entity) {
be_emit_cstring("\tcall ");
be_gas_emit_entity (be_Call_get_entity(node));
be_emit_finish_line_gas(node);
} else {
be_emit_pad_comment();
be_emit_cstring("/* FIXME: call NULL entity?! */\n");
}
}
/**
* emit copy node
*/
static void emit_be_Copy(const ir_node *irn)
{
ir_mode *mode = get_irn_mode(irn);
if (get_in_reg(irn, 0) == get_out_reg(irn, 0)) {
/* omitted Copy */
return;
}
if (mode_is_float(mode)) {
panic("emit_be_Copy: move not supported for FP");
} else if (mode_is_data(mode)) {
be_emit_cstring("\tmov ");
amd64_emit_source_register(irn, 0);
be_emit_cstring(", ");
amd64_emit_dest_register(irn, 0);
be_emit_finish_line_gas(irn);
} else {
panic("emit_be_Copy: move not supported for this mode");
}
}
/**
* Emits code for a return.
......@@ -229,11 +319,16 @@ static void amd64_register_emitters(void)
amd64_register_spec_emitters();
set_emitter(op_amd64_SymConst, emit_amd64_SymConst);
set_emitter(op_amd64_Jmp, emit_amd64_Jmp);
set_emitter(op_be_Return, emit_be_Return);
set_emitter(op_be_Call, emit_be_Call);
set_emitter(op_be_Copy, emit_be_Copy);
set_emitter(op_be_Start, emit_nothing);
set_emitter(op_be_Keep, emit_nothing);
set_emitter(op_be_Barrier, emit_nothing);
set_emitter(op_be_IncSP, emit_nothing);
set_emitter(op_Phi, emit_nothing);
}
typedef void (*emit_func_ptr) (const ir_node *);
......
......@@ -45,6 +45,12 @@
#include "amd64_new_nodes.h"
#include "gen_amd64_regalloc_if.h"
void set_amd64_ls_mode(ir_node *node, ir_mode *mode)
{
amd64_attr_t *attr = get_amd64_attr(node);
attr->ls_mode = mode;
}
/**
* Dumper interface for dumping amd64 nodes in vcg.
* @param F the output file
......@@ -121,7 +127,6 @@ const amd64_SymConst_attr_t *get_amd64_SymConst_attr_const(const ir_node *node)
return sym_attr;
}
/**
* Returns the argument register requirements of a amd64 node.
*/
......
......@@ -38,6 +38,11 @@
* |___/
***************************************************************************************************/
/**
* Sets the input mode of the node.
*/
void set_amd64_ls_mode(ir_node *n, ir_mode *mode);
/**
* Returns the attributes of an amd64 node.
*/
......
......@@ -35,6 +35,7 @@ struct amd64_attr_t
{
const arch_register_req_t **in_req; /**< register requirements for arguments */
const arch_register_req_t **out_req; /**< register requirements for results */
ir_mode *ls_mode; /**< Stores the "input" mode */
};
struct amd64_immediate_attr_t
......
......@@ -101,9 +101,9 @@ $arch = "amd64";
{ name => "rax", type => 1 },
{ name => "rcx", type => 1 },
{ name => "rdx", type => 1 },
{ name => "rsi", type => 1 },
{ name => "rdi", type => 1 },
{ name => "rbx", type => 2 },
{ name => "rsi", type => 2 },
{ name => "rdi", type => 2 },
{ name => "rbp", type => 2 },
{ name => "rsp", type => 4 }, # stackpointer?
{ name => "r8", type => 1 },
......@@ -114,27 +114,28 @@ $arch = "amd64";
{ name => "r13", type => 2 },
{ name => "r14", type => 2 },
{ name => "r15", type => 2 },
# { name => "gp_NOREG", type => 4 }, # we need a dummy register for NoReg nodes
{ mode => "mode_Iu" }
],
fp => [
{ name => "xmm0", type => 1 },
{ name => "xmm1", type => 1 },
{ name => "xmm2", type => 1 },
{ name => "xmm3", type => 1 },
{ name => "xmm4", type => 1 },
{ name => "xmm5", type => 1 },
{ name => "xmm6", type => 1 },
{ name => "xmm7", type => 1 },
{ name => "xmm8", type => 1 },
{ name => "xmm9", type => 1 },
{ name => "xmm10", type => 1 },
{ name => "xmm11", type => 1 },
{ name => "xmm12", type => 1 },
{ name => "xmm13", type => 1 },
{ name => "xmm14", type => 1 },
{ name => "xmm15", type => 1 },
{ mode => "mode_D" }
]
# fp => [
# { name => "xmm0", type => 1 },
# { name => "xmm1", type => 1 },
# { name => "xmm2", type => 1 },
# { name => "xmm3", type => 1 },
# { name => "xmm4", type => 1 },
# { name => "xmm5", type => 1 },
# { name => "xmm6", type => 1 },
# { name => "xmm7", type => 1 },
# { name => "xmm8", type => 1 },
# { name => "xmm9", type => 1 },
# { name => "xmm10", type => 1 },
# { name => "xmm11", type => 1 },
# { name => "xmm12", type => 1 },
# { name => "xmm13", type => 1 },
# { name => "xmm14", type => 1 },
# { name => "xmm15", type => 1 },
# { mode => "mode_D" }
# ]
);
%emit_templates = (
......@@ -188,7 +189,7 @@ Add => {
out => [ "gp" ] },
in => [ "left", "right" ],
emit => ". mov %S2, %D1\n"
. ". add %S1, %D1\n",
. ". add %S1, %D1\n",
outs => [ "res" ],
mode => "mode_Iu",
},
......@@ -208,4 +209,28 @@ SymConst => {
reg_req => { out => [ "gp" ] },
mode => 'mode_Iu',
},
Conv => {
state => "exc_pinned",
attr => "ir_mode *smaller_mode",
init_attr => "attr->ls_mode = smaller_mode;",
reg_req => { in => [ "gp" ], out => [ "gp" ] },
ins => [ "val" ],
outs => [ "res" ],
mode => 'mode_Iu',
},
Jmp => {
state => "pinned",
op_flags => "X",
reg_req => { out => [ "none" ] },
mode => "mode_X",
},
#NoReg_GP => {
# state => "pinned",
# op_flags => "c|NB|NI",
# reg_req => { out => [ "gp_NOREG:I" ] },
# units => [],
# emit => "",
# latency => 0,
# mode => "mode_Iu",
#},
);
......@@ -32,6 +32,7 @@
#include "irvrfy.h"
#include "ircons.h"
#include "iropt_t.h"
#include "error.h"
#include "debug.h"
#include "../benode.h"
......@@ -49,8 +50,16 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
/** holds the current code generator during transformation */
static amd64_code_gen_t *env_cg;
///* its enough to have those once */
//static ir_node *nomem, *noreg_GP;
/* Some support functions: */
static inline int mode_needs_gp_reg(ir_mode *mode)
{
return mode_is_int(mode) || mode_is_reference(mode);
}
/**
* Create a DAG constructing a given Const.
*
......@@ -70,7 +79,7 @@ static ir_node *create_const_graph(ir_node *irn, ir_node *block)
}
value = get_tarval_long(tv);
printf ("TEST GENERATE %d\n", value);
//d// printf ("TEST GENERATE %d\n", value);
return new_bd_amd64_Immediate(dbgi, block, value);
}
......@@ -129,7 +138,173 @@ static ir_node *gen_Add(ir_node *node) {
return res;
}
static ir_node *gen_Jmp(ir_node *node)
{
ir_node *block = get_nodes_block(node);
ir_node *new_block = be_transform_node(block);
dbg_info *dbgi = get_irn_dbg_info(node);
return new_bd_amd64_Jmp(dbgi, new_block);
}
static ir_node *gen_be_Call(ir_node *node)
{
ir_node *res = be_duplicate_node(node);
arch_irn_add_flags(res, arch_irn_flags_modify_flags);
return res;
}
///**
// * Create an And that will zero out upper bits.
// *
// * @param dbgi debug info
// * @param block the basic block
// * @param op the original node
// * param src_bits number of lower bits that will remain
// */
//static ir_node *gen_zero_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
// int src_bits)
//{
// if (src_bits == 8) {
// return new_bd_arm_And_imm(dbgi, block, op, 0xFF, 0);
// } else if (src_bits == 16) {
// ir_node *lshift = new_bd_arm_Mov_reg_shift_imm(dbgi, block, op, ARM_SHF_LSL_IMM, 16);
// ir_node *rshift = new_bd_arm_Mov_reg_shift_imm(dbgi, block, lshift, ARM_SHF_LSR_IMM, 16);
// return rshift;
// } else {
// panic("zero extension only supported for 8 and 16 bits");
// }
//}
//
///**
// * Generate code for a sign extension.
// */
//static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
// int src_bits)
//{
// int shift_width = 32 - src_bits;
// ir_node *lshift_node = new_bd_arm_Mov_reg_shift_imm(dbgi, block, op, ARM_SHF_LSL_IMM, shift_width);
// ir_node *rshift_node = new_bd_arm_Mov_reg_shift_imm(dbgi, block, lshift_node, ARM_SHF_ASR_IMM, shift_width);
// return rshift_node;
//}
//
//static ir_node *gen_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
// ir_mode *orig_mode)
//{
// int bits = get_mode_size_bits(orig_mode);
// if (bits == 32)
// return op;
//
// if (mode_is_signed(orig_mode)) {
// return gen_sign_extension(dbgi, block, op, bits);
// } else {
// return gen_zero_extension(dbgi, block, op, bits);
// }
//}
//
///**
// * returns true if it is assured, that the upper bits of a node are "clean"
// * which means for a 16 or 8 bit value, that the upper bits in the register
// * are 0 for unsigned and a copy of the last significant bit for signed
// * numbers.
// */
//static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode)
//{
// (void) transformed_node;
// (void) mode;
// /* TODO */
// return false;
//}
/**
* Change some phi modes
*/
static ir_node *gen_Phi(ir_node *node)
{
const arch_register_req_t *req;
ir_node *block = be_transform_node(get_nodes_block(node));
ir_graph *irg = current_ir_graph;
dbg_info *dbgi = get_irn_dbg_info(node);
ir_mode *mode = get_irn_mode(node);
ir_node *phi;
if (mode_needs_gp_reg(mode)) {
/* all integer operations are on 32bit registers now */
mode = mode_Iu;
req = amd64_reg_classes[CLASS_amd64_gp].class_req;
} else {
req = arch_no_register_req;
}
/* phi nodes allow loops, so we use the old arguments for now
* and fix this later */
phi = new_ir_node(dbgi, irg, block, op_Phi, mode, get_irn_arity(node),
get_irn_in(node) + 1);
copy_node_attr(irg, node, phi);
be_duplicate_deps(node, phi);
arch_set_out_register_req(phi, 0, req);
be_enqueue_preds(node);
return phi;
}
/**
* Transforms a Conv node.
*
* @return The created ia32 Conv node
*/
static ir_node *gen_Conv(ir_node *node)
{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *op = get_Conv_op(node);
ir_node *new_op = be_transform_node(op);
ir_mode *src_mode = get_irn_mode(op);
ir_mode *dst_mode = get_irn_mode(node);
dbg_info *dbgi = get_irn_dbg_info(node);
if (src_mode == dst_mode)
return new_op;
if (mode_is_float(src_mode) || mode_is_float(dst_mode)) {
panic("float not supported yet");
} else { /* complete in gp registers */
int src_bits = get_mode_size_bits(src_mode);
int dst_bits = get_mode_size_bits(dst_mode);
int min_bits;
ir_mode *min_mode;
if (src_bits == dst_bits) {
/* kill unneccessary conv */
return new_op;
}
if (src_bits < dst_bits) {
min_bits = src_bits;
min_mode = src_mode;
} else {
min_bits = dst_bits;
min_mode = dst_mode;
}
return new_bd_amd64_Conv(dbgi, block, new_op, min_mode);
//if (upper_bits_clean(new_op, min_mode)) {
// return new_op;
//}
//if (mode_is_signed(min_mode)) {
// return gen_sign_extension(dbg, block, new_op, min_bits);
//} else {
// return gen_zero_extension(dbg, block, new_op, min_bits);
//}
}
}
/* Boilerplate code for transformation: */
......@@ -137,6 +312,8 @@ static void amd64_pretransform_node(void)
{
amd64_code_gen_t *cg = env_cg;
(void) cg;
// nomem = get_irg_no_mem(current_ir_graph);
}
static void set_transformer(ir_op *op, be_transform_func amd64_transform_func)
......@@ -151,6 +328,10 @@ static void amd64_register_transformers(void)
set_transformer(op_Const, gen_Const);
set_transformer(op_SymConst, gen_SymConst);
set_transformer(op_Add, gen_Add);
set_transformer(op_be_Call, gen_be_Call);
set_transformer(op_Conv, gen_Conv);
set_transformer(op_Jmp, gen_Jmp);
set_transformer(op_Phi, gen_Phi);
}
......
......@@ -198,6 +198,27 @@ static void *amd64_cg_init(be_irg_t *birg)
}
typedef ir_node *(*create_const_node_func) (dbg_info *dbg, ir_node *block);
/**
* Used to create per-graph unique pseudo nodes.
*/
static inline ir_node *create_const(amd64_code_gen_t *cg, ir_node **place,
create_const_node_func func,
const arch_register_t* reg)
{
ir_node *block, *res;
if (*place != NULL)
return *place;
block = get_irg_start_block(cg->irg);
res = func(NULL, block);
arch_set_irn_register(res, reg);
*place = res;
return res;
}
const arch_isa_if_t amd64_isa_if;
static amd64_isa_t amd64_isa_template = {
......@@ -275,10 +296,8 @@ static const arch_register_class_t *amd64_get_reg_class(unsigned i)
*/
static const arch_register_class_t *amd64_get_reg_class_for_mode(const ir_mode *mode)
{
if (mode_is_float(mode))
return &amd64_reg_classes[CLASS_amd64_fp];
else
return &amd64_reg_classes[CLASS_amd64_gp];
assert(!mode_is_float(mode));
return &amd64_reg_classes[CLASS_amd64_gp];
}
......@@ -406,10 +425,10 @@ static void amd64_get_call_abi(const void *self, ir_type *method_type,
for (i = 0; i < n; i++) {
tp = get_method_param_type(method_type, i);
mode = get_type_mode(tp);
printf ("MODE %p %p XX %d\n", mode, mode_Iu, i);
//d// printf ("MODE %p %p XX %d\n", mode, mode_Iu, i);
if (!no_reg && (i == 0 || i == 1) && mode == mode_Iu) {
printf("TEST%d\n", i);
//d// printf("TEST%d\n", i);
be_abi_call_param_reg(abi, i,
i == 0 ? &amd64_gp_regs[REG_RDI]
: &amd64_gp_regs[REG_RSI],
......
......@@ -41,6 +41,7 @@ struct amd64_code_gen_t {
amd64_isa_t *isa; /**< the isa instance */
be_irg_t *birg; /**< The be-irg (contains additional information about the irg) */
char dump; /**< set to 1 if graphs should be dumped */
ir_node *noreg_gp; /**< unique NoReg_GP node */
};
struct amd64_isa_t {
......@@ -59,4 +60,6 @@ struct amd64_transform_env_t {
ir_mode *mode; /**< The mode of the irn */
};
ir_node *amd64_new_NoReg_gp(amd64_code_gen_t *cg);
#endif
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment