Commit 73adc744 authored by Robin Redeker's avatar Robin Redeker
Browse files

amd64: Cmp, Cond and Jcc seem to work for now.

[r27660]
parent c03098df
......@@ -117,9 +117,9 @@ static const arch_register_t *get_out_reg(const ir_node *node, int pos)
void amd64_emit_immediate(const ir_node *node)
{
const amd64_immediate_attr_t *attr = get_amd64_immediate_attr_const (node);
const amd64_attr_t *attr = get_amd64_attr_const (node);
be_emit_char('$');
be_emit_irprintf("0x%X", attr->imm_value);
be_emit_irprintf("0x%X", attr->ext.imm_value);
}
void amd64_emit_source_register(const ir_node *node, int pos)
......@@ -185,7 +185,6 @@ static void emit_amd64_SymConst(const ir_node *irn)
// }
// label = entry->label;
be_gas_emit_entity(attr->entity);
be_emit_char(':');
be_emit_finish_line_gas(irn);
......@@ -242,6 +241,85 @@ static void emit_amd64_Jmp(const ir_node *node)
be_emit_finish_line_gas(node);
}
/**
* Emit a Compare with conditional branch.
*/
static void emit_amd64_Jcc(const ir_node *irn)
{
const ir_edge_t *edge;
const ir_node *proj_true = NULL;
const ir_node *proj_false = NULL;
const ir_node *block;
const ir_node *next_block;
const char *suffix;
const amd64_attr_t *attr = get_irn_generic_attr_const(irn);
int proj_num = attr->ext.pnc;
ir_node *op1 = get_irn_n(irn, 0);
const amd64_attr_t *cmp_attr = get_irn_generic_attr_const(op1);
bool is_signed = !cmp_attr->data.cmp_unsigned;
assert(is_amd64_Cmp(op1));
foreach_out_edge(irn, edge) {
ir_node *proj = get_edge_src_irn(edge);
long nr = get_Proj_proj(proj);
if (nr == pn_Cond_true) {
proj_true = proj;
} else {
proj_false = proj;
}
}
if (cmp_attr->data.ins_permuted) {
proj_num = get_mirrored_pnc(proj_num);
}
/* for now, the code works for scheduled and non-schedules blocks */
block = get_nodes_block(irn);
/* we have a block schedule */
next_block = sched_next_block(block);
assert(proj_num != pn_Cmp_False);
assert(proj_num != pn_Cmp_True);
if (get_cfop_target_block(proj_true) == next_block) {
/* exchange both proj's so the second one can be omitted */
const ir_node *t = proj_true;
proj_true = proj_false;
proj_false = t;
proj_num = get_negated_pnc(proj_num, mode_Iu);
}
switch (proj_num) {
case pn_Cmp_Eq: suffix = "e"; break;
case pn_Cmp_Lt: suffix = is_signed ? "l" : "b"; break;
case pn_Cmp_Le: suffix = is_signed ? "le" : "be"; break;
case pn_Cmp_Gt: suffix = is_signed ? "g" : "o"; break;
case pn_Cmp_Ge: suffix = is_signed ? "ge" : "oe"; break;
case pn_Cmp_Lg: suffix = "ne"; break;
case pn_Cmp_Leg: suffix = "mp"; break;
default: panic("Cmp has unsupported pnc");
}
/* emit the true proj */
be_emit_irprintf("\tj%s ", suffix);
amd64_emit_cfop_target(proj_true);
be_emit_finish_line_gas(proj_true);
if (get_cfop_target_block(proj_false) == next_block) {
be_emit_cstring("\t/* fallthrough to ");
amd64_emit_cfop_target(proj_false);
be_emit_cstring(" */");
be_emit_finish_line_gas(proj_false);
} else {
be_emit_cstring("\tjmp ");
amd64_emit_cfop_target(proj_false);
be_emit_finish_line_gas(proj_false);
}
}
/**
* Emits code for a call.
*/
......@@ -320,6 +398,7 @@ static void amd64_register_emitters(void)
set_emitter(op_amd64_SymConst, emit_amd64_SymConst);
set_emitter(op_amd64_Jmp, emit_amd64_Jmp);
set_emitter(op_amd64_Jcc, emit_amd64_Jcc);
set_emitter(op_be_Return, emit_be_Return);
set_emitter(op_be_Call, emit_be_Call);
set_emitter(op_be_Copy, emit_be_Copy);
......@@ -408,11 +487,11 @@ void amd64_gen_routine(const amd64_code_gen_t *cg, ir_graph *irg)
irg_block_walk_graph(irg, amd64_gen_labels, NULL, NULL);
n = ARR_LEN(blk_sched);
for (i = 0; i < n; ++i) {
for (i = 0; i < n; i++) {
ir_node *block = blk_sched[i];
ir_node *prev = i > 0 ? blk_sched[i-1] : NULL;
ir_node *next = (i + 1) < n ? blk_sched[i+1] : NULL;
set_irn_link(block, prev);
set_irn_link(block, next);
}
for (i = 0; i < n; ++i) {
......
......@@ -101,24 +101,6 @@ amd64_attr_t *get_amd64_attr(ir_node *node)
return (amd64_attr_t *)get_irn_generic_attr(node);
}
const amd64_immediate_attr_t *get_amd64_immediate_attr_const(const ir_node *node)
{
const amd64_attr_t *attr = get_amd64_attr_const(node);
const amd64_immediate_attr_t *imm_attr = CONST_CAST_AMD64_ATTR(amd64_immediate_attr_t, attr);
return imm_attr;
}
/*
static amd64_immediate_attr_t *get_amd64_immediate_attr(ir_node *node)
{
amd64_attr_t *attr = get_amd64_attr(node);
amd64_immediate_attr_t *imm_attr = CAST_AMD64_ATTR(amd64_immediate_attr_t, attr);
return imm_attr;
}
*/
const amd64_SymConst_attr_t *get_amd64_SymConst_attr_const(const ir_node *node)
{
const amd64_attr_t *attr = get_amd64_attr_const(node);
......@@ -164,7 +146,8 @@ static void init_amd64_attributes(ir_node *node, arch_irn_flags_t flags,
{
ir_graph *irg = get_irn_irg(node);
struct obstack *obst = get_irg_obstack(irg);
amd64_attr_t *attr = get_amd64_attr(node);
amd64_attr_t *attr = get_amd64_attr(node);
backend_info_t *info;
(void) execution_units;
......@@ -174,15 +157,11 @@ static void init_amd64_attributes(ir_node *node, arch_irn_flags_t flags,
info = be_get_info(node);
info->out_infos = NEW_ARR_D(reg_out_info_t, obst, n_res);
memset(info->out_infos, 0, n_res * sizeof(info->out_infos[0]));
}
/**
* Initialize immediate attributes.
*/
static void init_amd64_immediate_attributes(ir_node *node, unsigned imm_value)
{
amd64_immediate_attr_t *attr = get_irn_generic_attr (node);
attr->imm_value = imm_value;
attr->data.ins_permuted = 0;
attr->data.cmp_unsigned = 0;
attr->ext.pnc = 0;
attr->ext.imm_value = 0;
}
/**
......@@ -206,27 +185,13 @@ static int cmp_amd64_attr_SymConst(ir_node *a, ir_node *b)
return 0;
}
/** Compare node attributes for Immediates. */
static int cmp_amd64_attr_immediate(ir_node *a, ir_node *b)
{
const amd64_immediate_attr_t *attr_a = get_amd64_immediate_attr_const(a);
const amd64_immediate_attr_t *attr_b = get_amd64_immediate_attr_const(b);
if (attr_a->imm_value != attr_b->imm_value)
return 1;
return 0;
}
/** Compare common amd64 node attributes. */
static int cmp_amd64_attr(ir_node *a, ir_node *b)
{
const amd64_attr_t *attr_a = get_amd64_attr_const(a);
const amd64_attr_t *attr_b = get_amd64_attr_const(b);
(void) attr_a;
(void) attr_b;
return 0;
return attr_a->ext.imm_value != attr_b->ext.imm_value;
}
/* Include the generated constructor functions */
......
......@@ -49,7 +49,6 @@ void set_amd64_ls_mode(ir_node *n, ir_mode *mode);
amd64_attr_t *get_amd64_attr(ir_node *node);
const amd64_attr_t *get_amd64_attr_const(const ir_node *node);
const amd64_immediate_attr_t *get_amd64_immediate_attr_const(const ir_node *node);
const amd64_SymConst_attr_t *get_amd64_SymConst_attr_const(const ir_node *node);
/**
......
......@@ -28,7 +28,6 @@
#include "../bearch.h"
typedef struct amd64_attr_t amd64_attr_t;
typedef struct amd64_immediate_attr_t amd64_immediate_attr_t;
typedef struct amd64_SymConst_attr_t amd64_SymConst_attr_t;
struct amd64_attr_t
......@@ -36,11 +35,15 @@ struct amd64_attr_t
const arch_register_req_t **in_req; /**< register requirements for arguments */
const arch_register_req_t **out_req; /**< register requirements for results */
ir_mode *ls_mode; /**< Stores the "input" mode */
};
struct amd64_immediate_attr_t
{
unsigned imm_value; /**< the immediate value to load */
struct amd64_attr_data_bitfield {
unsigned ins_permuted : 1; /**< inputs of node have been permuted
(for commutative nodes) */
unsigned cmp_unsigned : 1; /**< compare should be unsigned */
} data;
struct amd64_attr_extended {
pn_Cmp pnc; /**< type of compare operation >*/
unsigned imm_value; /**< immediate value to use >*/
} ext;
};
struct amd64_SymConst_attr_t
......
......@@ -136,8 +136,28 @@ $arch = "amd64";
# { name => "xmm15", type => 1 },
# { mode => "mode_D" }
# ]
flags => [
{ name => "eflags", type => 0 },
{ mode => "mode_Iu", flags => "manual_ra" }
],
);
$mode_gp = "mode_Iu";
$mode_flags = "mode_Iu";
sub amd64_custom_init_attr {
my $constr = shift;
my $node = shift;
my $name = shift;
my $res = "";
if(defined($node->{modified_flags})) {
$res .= "\tarch_irn_add_flags(res, arch_irn_flags_modify_flags);\n";
}
return $res;
}
$custom_init_attr_func = \&amd64_custom_init_attr;
%emit_templates = (
S1 => "${arch}_emit_source_register(node, 0);",
S2 => "${arch}_emit_source_register(node, 1);",
......@@ -157,18 +177,18 @@ $arch = "amd64";
%init_attr = (
amd64_attr_t =>
"\tinit_amd64_attributes(res, flags, in_reqs, exec_units, n_res);",
amd64_immediate_attr_t =>
"\tinit_amd64_attributes(res, flags, in_reqs, exec_units, n_res);"
. "\tinit_amd64_immediate_attributes(res, imm_value);",
amd64_SymConst_attr_t =>
"\tinit_amd64_attributes(res, flags, in_reqs, exec_units, n_res);"
. "\tinit_amd64_SymConst_attributes(res, entity);",
amd64_condcode_attr_t =>
"\tinit_amd64_attributes(res, flags, in_reqs, exec_units, n_res);"
. "\tinit_amd64_condcode_attributes(res, pnc);",
);
%compare_attr = (
amd64_attr_t => "cmp_amd64_attr",
amd64_immediate_attr_t => "cmp_amd64_attr_immediate",
amd64_SymConst_attr_t => "cmp_amd64_attr_SymConst",
amd64_condcode_attr_t => "cmp_amd64_attr_condcode",
);
%nodes = (
......@@ -191,15 +211,15 @@ Add => {
emit => ". mov %S2, %D1\n"
. ". add %S1, %D1\n",
outs => [ "res" ],
mode => "mode_Iu",
mode => $mode_gp,
},
Immediate => {
op_flags => "c",
attr => "unsigned imm_value",
attr_type => "amd64_immediate_attr_t",
init_attr => "attr->ext.imm_value = imm_value;",
reg_req => { out => [ "gp" ] },
emit => '. movq %C, %D1',
mode => "mode_Iu",
mode => $mode_gp,
},
SymConst => {
op_flags => "c",
......@@ -207,7 +227,7 @@ SymConst => {
attr => "ir_entity *entity",
attr_type => "amd64_SymConst_attr_t",
reg_req => { out => [ "gp" ] },
mode => 'mode_Iu',
mode => $mode_gp,
},
Conv => {
state => "exc_pinned",
......@@ -216,7 +236,7 @@ Conv => {
reg_req => { in => [ "gp" ], out => [ "gp" ] },
ins => [ "val" ],
outs => [ "res" ],
mode => 'mode_Iu',
mode => $mode_gp,
},
Jmp => {
state => "pinned",
......@@ -224,6 +244,31 @@ Jmp => {
reg_req => { out => [ "none" ] },
mode => "mode_X",
},
Cmp => {
irn_flags => "R",
state => "exc_pinned",
reg_req => { in => [ "gp", "gp" ],
out => [ "flags" ] },
ins => [ "left", "right" ],
outs => [ "eflags" ],
emit => '. cmp %S1, %S2',
attr => "int ins_permuted, int cmp_unsigned",
init_attr => "attr->data.ins_permuted = ins_permuted;\n".
"\tattr->data.cmp_unsigned = cmp_unsigned;\n",
mode => $mode_flags,
modified_flags => 1,
},
Jcc => {
state => "pinned",
op_flags => "L|X|Y",
reg_req => { in => [ "eflags" ], out => [ "none", "none" ] },
ins => [ "eflags" ],
outs => [ "false", "true" ],
attr => "pn_Cmp pnc",
init_attr => "attr->ext.pnc = pnc;",
mode => "mode_T",
},
#NoReg_GP => {
# state => "pinned",
# op_flags => "c|NB|NI",
......@@ -231,6 +276,6 @@ Jmp => {
# units => [],
# emit => "",
# latency => 0,
# mode => "mode_Iu",
# mode => $mode_gp,
#},
);
......@@ -155,6 +155,57 @@ static ir_node *gen_be_Call(ir_node *node)
return res;
}
static ir_node *gen_Cmp(ir_node *node)
{
ir_node *block = be_transform_node(get_nodes_block(node));
ir_node *op1 = get_Cmp_left(node);
ir_node *op2 = get_Cmp_right(node);
ir_mode *cmp_mode = get_irn_mode(op1);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *new_op1;
ir_node *new_op2;
bool is_unsigned;
if (mode_is_float(cmp_mode)) {
panic("Floating point not implemented yet (in gen_Cmp)!");
}
assert(get_irn_mode(op2) == cmp_mode);
is_unsigned = !mode_is_signed(cmp_mode);
new_op1 = be_transform_node(op1);
// new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode);
new_op2 = be_transform_node(op2);
// new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode);
return new_bd_amd64_Cmp(dbgi, block, new_op1, new_op2, false,
is_unsigned);
}
/**
* Transforms a Cond.
*
* @return the created ARM Cond node
*/
static ir_node *gen_Cond(ir_node *node)
{
ir_node *selector = get_Cond_selector(node);
ir_mode *mode = get_irn_mode(selector);
ir_node *block;
ir_node *flag_node;
dbg_info *dbgi;
if (mode != mode_b) {
panic ("create_Switch not implemented yet!");
// return gen_SwitchJmp(node);
}
assert(is_Proj(selector));
block = be_transform_node(get_nodes_block(node));
dbgi = get_irn_dbg_info(node);
flag_node = be_transform_node(get_Proj_pred(selector));
return new_bd_amd64_Jcc(dbgi, block, flag_node, get_Proj_proj(selector));
}
///**
// * Create an And that will zero out upper bits.
......@@ -331,6 +382,8 @@ static void amd64_register_transformers(void)
set_transformer(op_be_Call, gen_be_Call);
set_transformer(op_Conv, gen_Conv);
set_transformer(op_Jmp, gen_Jmp);
set_transformer(op_Cmp, gen_Cmp);
set_transformer(op_Cond, gen_Cond);
set_transformer(op_Phi, gen_Phi);
}
......
......@@ -44,6 +44,7 @@
#include "../bemodule.h"
#include "../begnuas.h"
#include "../belistsched.h"
#include "../beflags.h"
#include "bearch_amd64_t.h"
......@@ -135,11 +136,11 @@ static void amd64_finish_irg(void *self)
dump_ir_graph(irg, "amd64-finished");
}
static void amd64_before_ra(void *self)
{
(void) self;
/* Some stuff you need to do after scheduling but before register allocation */
amd64_code_gen_t *cg = self;
be_sched_fix_flags(cg->birg, &amd64_reg_classes[CLASS_amd64_flags], 0);
}
static void amd64_after_ra(void *self)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment