Commit 39fa8839 authored by Matthias Braun's avatar Matthias Braun
Browse files

amd64: introduce custom mode for xmm registers

parent cd8650f5
......@@ -680,7 +680,7 @@ static void emit_be_Copy(const ir_node *irn)
return;
}
if (mode_is_float(mode)) {
if (mode == amd64_mode_xmm) {
amd64_emitf(irn, "movapd %^S0, %^D0");
} else if (mode_is_data(mode)) {
amd64_emitf(irn, "mov %^S0, %^D0");
......
......@@ -12,6 +12,7 @@
#include "amd64_nodes_attr.h"
#include "amd64_transform.h"
#include "bearch.h"
#include "bearch_amd64_t.h"
#include "benode.h"
#include "besched.h"
#include "debug.h"
......@@ -102,14 +103,14 @@ static void transform_sub_to_neg_add(ir_node *node,
ir_node *xor_in[] = { in2 };
ir_node *xor = new_bd_amd64_xXorp(dbgi, block, ARRAY_SIZE(xor_in),
xor_in, &xor_attr);
ir_node *neg = new_r_Proj(xor, mode_D, pn_amd64_xXorp_res);
ir_node *neg = new_r_Proj(xor, amd64_mode_xmm, pn_amd64_xXorp_res);
sched_add_before(node, xor);
arch_set_irn_register(neg, in2_reg);
ir_node *in[] = { neg, in1 };
add = new_bd_amd64_xAdds(dbgi, block, ARRAY_SIZE(in), in, attr);
add_res = new_r_Proj(add, mode_D, pn_amd64_xAdds_res);
add_res = new_r_Proj(add, amd64_mode_xmm, pn_amd64_xAdds_res);
} else {
assert(is_amd64_Sub(node));
ir_node *neg = new_bd_amd64_Neg(dbgi, block, in2, attr->base.insn_mode);
......
......@@ -28,6 +28,7 @@
#include "amd64_nodes_attr.h"
#include "amd64_new_nodes.h"
#include "bearch_amd64_t.h"
#include "gen_amd64_regalloc_if.h"
static const char *get_op_mode_string(amd64_op_mode_t mode)
......
......@@ -2,7 +2,7 @@ $arch = "amd64";
$mode_gp = "mode_Lu";
$mode_flags = "mode_Iu";
$mode_xmm = "mode_D"; #TODO 128bit fp-mode
$mode_xmm = "amd64_mode_xmm";
$status_flags = "all"; # TODO
$all_flags = "all";
......
......@@ -240,7 +240,8 @@ static const arch_register_req_t *xmm_xmm_reqs[] = {
static inline bool mode_needs_gp_reg(ir_mode *mode)
{
return get_mode_arithmetic(mode) == irma_twos_complement;
return get_mode_arithmetic(mode) == irma_twos_complement
&& mode != amd64_mode_xmm; /* mode_xmm is 128bit int at the moment */
}
static bool is_downconv(const ir_node *node)
......@@ -757,7 +758,7 @@ static ir_node *gen_binop_am(ir_node *node, ir_node *op1, ir_node *op2,
if (mode_is_float(mode)) {
arch_set_irn_register_req_out(new_node, 0,
&amd64_requirement_xmm_same_0);
return new_r_Proj(new_node, mode_D, pn_amd64_xSubs_res);
return new_r_Proj(new_node, amd64_mode_xmm, pn_amd64_xSubs_res);
} else {
arch_set_irn_register_req_out(new_node, 0,
&amd64_requirement_gp_same_0);
......@@ -1153,7 +1154,7 @@ static ir_node *gen_Proj_Div(ir_node *const node)
ir_mode *mode;
if (mode_is_float(get_Div_resmode(pred)))
mode = mode_D;
mode = amd64_mode_xmm;
else
mode = mode_gp;
......@@ -1807,7 +1808,7 @@ static ir_node *gen_Proj_Proj_Call(ir_node *node)
if (mode_needs_gp_reg(mode))
mode = mode_gp;
else if (mode_is_float(mode))
mode = mode_D;
mode = amd64_mode_xmm;
x86_free_calling_convention(cconv);
return new_r_Proj(new_call, mode, new_pn);
}
......@@ -1851,7 +1852,7 @@ static ir_node *gen_Proj_Proj_Start(ir_node *node)
if (mode_is_float(mode)) {
load = new_bd_amd64_xMovs(NULL, new_block, ARRAY_SIZE(in),
in, insn_mode, AMD64_OP_ADDR, addr);
value = new_r_Proj(load, mode_D, pn_amd64_xMovs_res);
value = new_r_Proj(load, amd64_mode_xmm, pn_amd64_xMovs_res);
} else if (get_mode_size_bits(mode) < 64 && mode_is_signed(mode)) {
load = new_bd_amd64_Movs(NULL, new_block, ARRAY_SIZE(in),
in, insn_mode, AMD64_OP_ADDR, addr);
......@@ -2028,7 +2029,7 @@ static ir_node *gen_Conv(ir_node *node)
conv = new_bd_amd64_CvtSS2SD(dbgi, block, ARRAY_SIZE(in),
in, insn_mode, AMD64_OP_REG,
addr);
res = new_r_Proj(conv, mode_D, pn_amd64_CvtSS2SD_res);
res = new_r_Proj(conv, amd64_mode_xmm, pn_amd64_CvtSS2SD_res);
} else {
conv = new_bd_amd64_CvtSD2SS(dbgi, block, ARRAY_SIZE(in),
in, insn_mode, AMD64_OP_REG,
......@@ -2092,7 +2093,7 @@ static ir_node *gen_Conv(ir_node *node)
in, insn_mode, AMD64_OP_REG,
addr);
res = new_r_Proj(conv, mode_D, pn_amd64_CvtSI2SD_res);
res = new_r_Proj(conv, amd64_mode_xmm, pn_amd64_CvtSI2SD_res);
}
reqs = reg_reqs;
......@@ -2334,7 +2335,7 @@ static ir_node *gen_Proj_Load(ir_node *node)
switch (get_amd64_irn_opcode(new_load)) {
case iro_amd64_xMovs:
if (pn == pn_Load_res) {
return new_rd_Proj(dbgi, new_load, mode_D, pn_amd64_xMovs_res);
return new_rd_Proj(dbgi, new_load, amd64_mode_xmm, pn_amd64_xMovs_res);
} else if (pn == pn_Load_M) {
return new_rd_Proj(dbgi, new_load, mode_M, pn_amd64_xMovs_M);
}
......
......@@ -47,6 +47,7 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
ir_mode *amd64_mode_E;
ir_type *amd64_type_E;
ir_mode *amd64_mode_xmm;
static ir_entity *amd64_get_frame_entity(const ir_node *node)
{
......@@ -780,6 +781,10 @@ static void amd64_init_types(void)
set_type_size_bytes(amd64_type_E, 16);
set_type_alignment_bytes(amd64_type_E, 16);
/* use an int128 mode for xmm registers for now, so that firm allows us to
* create constants with the xmm mode... */
amd64_mode_xmm = new_int_mode("x86_xmm", irma_twos_complement, 128, 0, 0);
amd64_backend_params.type_long_double = amd64_type_E;
}
......
......@@ -20,6 +20,7 @@ typedef struct amd64_isa_t {
extern ir_mode *amd64_mode_E;
extern ir_type *amd64_type_E;
extern ir_mode *amd64_mode_xmm;
#define AMD64_REGISTER_SIZE 8
/** power of two stack alignment on calls */
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment