Commit f642eb79 authored by Matthias Braun's avatar Matthias Braun
Browse files

amd64: support PIC code

parent 015ef327
......@@ -196,8 +196,8 @@ static void amd64_emit_immediate(const amd64_imm_t *const imm)
}
}
static void amd64_emit_am(const arch_register_t *const base,
const amd64_imm_t *const imm)
static void amd64_emit_am_old(const arch_register_t *const base,
const amd64_imm_t *const imm)
{
if (base == NULL) {
amd64_emit_immediate(imm);
......@@ -207,6 +207,46 @@ static void amd64_emit_am(const arch_register_t *const base,
}
static void amd64_emit_am(const ir_node *const node,
const amd64_am_info_t *const am)
{
ir_entity *entity = am->symconst;
if (entity != NULL) {
be_gas_emit_entity(entity);
}
int32_t offset = am->offset;
uint8_t base_input = am->base_input;
uint8_t index_input = am->index_input;
if (offset != 0 || (entity == NULL && base_input == NO_INPUT
&& index_input == NO_INPUT)) {
if (entity != NULL) {
be_emit_irprintf("%+d", offset);
} else {
be_emit_irprintf("%d", offset);
}
}
if (base_input != NO_INPUT || index_input != NO_INPUT) {
be_emit_char('(');
if (base_input == RIP_INPUT) {
be_emit_cstring("%rip");
} else if (base_input != NO_INPUT) {
be_emit_char(',');
const arch_register_t *reg
= arch_get_irn_register_in(node, base_input);
emit_register(reg);
unsigned scale = am->log_scale;
if (scale > 0)
be_emit_irprintf(",%u", 1 << scale);
}
be_emit_char(')');
}
}
void amd64_emitf(ir_node const *const node, char const *fmt, ...)
{
va_list ap;
......@@ -257,9 +297,13 @@ end_of_mods:
switch (*fmt++) {
case 'M': {
amd64_attr_t const *const attr = get_amd64_attr_const(node);
arch_register_t const *const base
= arch_get_irn_register_in(node, 0);
amd64_emit_am(base, &attr->imm);
if (mod & EMIT_RESPECT_LS) {
arch_register_t const *const base
= arch_get_irn_register_in(node, 0);
amd64_emit_am_old(base, &attr->imm);
} else {
amd64_emit_am(node, &attr->am);
}
break;
}
default:
......@@ -485,10 +529,10 @@ static void emit_amd64_LoadZ(const ir_node *node)
{
const amd64_attr_t *attr = get_amd64_attr_const(node);
switch (attr->data.insn_mode) {
case INSN_MODE_8: amd64_emitf(node, "movzbq %O(%^S0), %^D0"); break;
case INSN_MODE_16: amd64_emitf(node, "movzwq %O(%^S0), %^D0"); break;
case INSN_MODE_8: amd64_emitf(node, "movzbq %AM, %^D0"); break;
case INSN_MODE_16: amd64_emitf(node, "movzwq %AM, %^D0"); break;
case INSN_MODE_32:
case INSN_MODE_64: amd64_emitf(node, "mov%M %O(%^S0), %D0"); break;
case INSN_MODE_64: amd64_emitf(node, "mov%M %AM, %D0"); break;
default:
panic("invalid insn mode");
}
......
......@@ -11,7 +11,9 @@
#define FIRM_BE_AMD64_AMD64_NODES_ATTR_H
#include <stdint.h>
#include "bearch.h"
#include "compiler.h"
typedef struct amd64_attr_t amd64_attr_t;
typedef struct amd64_SymConst_attr_t amd64_SymConst_attr_t;
......@@ -24,11 +26,36 @@ typedef enum {
INSN_MODE_8
} amd64_insn_mode_t;
typedef enum {
AMD64_SEGMENT_DEFAULT,
AMD64_SEGMENT_CS,
AMD64_SEGMENT_SS,
AMD64_SEGMENT_DS,
AMD64_SEGMENT_ES,
AMD64_SEGMENT_FS,
AMD64_SEGMENT_GS,
} amd64_segment_selector_t;
typedef struct amd64_imm_t {
int64_t offset;
ir_entity *symconst;
} amd64_imm_t;
enum {
NO_INPUT = 0xFF,
RIP_INPUT = 0xFE, /* can be used as base_input for PIC code */
};
typedef struct amd64_am_info_t {
int64_t offset;
ir_entity *symconst;
uint8_t base_input;
uint8_t index_input;
uint8_t mem_input;
unsigned log_scale : 2; /* 0, 1, 2, 3 (giving scale 1, 2, 4, 8) */
ENUMBF(amd64_segment_selector_t) segment : 4;
} amd64_am_info_t;
struct amd64_attr_t
{
except_attr exc; /**< the exception attribute. MUST be the first one. */
......@@ -42,7 +69,8 @@ struct amd64_attr_t
struct amd64_attr_extended {
ir_relation relation; /**< type of compare operation >*/
} ext;
amd64_imm_t imm;
amd64_imm_t imm;
amd64_am_info_t am;
};
struct amd64_SymConst_attr_t
......
......@@ -308,6 +308,18 @@ Cmp => {
modified_flags => 1,
},
Lea => {
irn_flags => [ "rematerializable" ],
arity => "variable",
outs => [ "res", "flags", "M" ],
attr => "amd64_insn_mode_t insn_mode, amd64_am_info_t am",
reg_req => { out => [ "gp", "flags", "none" ] },
init_attr => "attr->data.insn_mode = insn_mode;\n".
"\tattr->am = am;\n",
emit => "lea%M %AM, %D0",
mode => $mode_gp,
},
Jcc => {
state => "pinned",
op_flags => [ "cfopcode", "forking" ],
......@@ -322,27 +334,26 @@ Jcc => {
LoadZ => {
op_flags => [ "uses_memory" ],
state => "exc_pinned",
reg_req => { in => [ "gp", "none" ],
out => [ "gp", "none" ] },
ins => [ "ptr", "mem" ],
reg_req => { out => [ "gp", "none" ] },
arity => "variable",
outs => [ "res", "M" ],
attr => "amd64_insn_mode_t insn_mode, ir_entity *entity",
attr_type => "amd64_SymConst_attr_t",
init_attr => "attr->base.data.insn_mode = insn_mode;",
emit => "mov%M %O(%^S0), %D0"
attr => "amd64_insn_mode_t insn_mode, amd64_am_info_t am",
attr_type => "amd64_attr_t",
init_attr => "attr->data.insn_mode = insn_mode;\n"
."\tattr->am = am;\n",
},
LoadS => {
op_flags => [ "uses_memory" ],
state => "exc_pinned",
reg_req => { in => [ "gp", "none" ],
out => [ "gp", "none" ] },
ins => [ "ptr", "mem" ],
reg_req => { out => [ "gp", "none" ] },
arity => "variable",
outs => [ "res", "M" ],
attr => "amd64_insn_mode_t insn_mode, ir_entity *entity",
attr_type => "amd64_SymConst_attr_t",
init_attr => "attr->base.data.insn_mode = insn_mode;",
emit => "movs%Mq %O(%^S0), %^D0"
attr => "amd64_insn_mode_t insn_mode, amd64_am_info_t am",
attr_type => "amd64_attr_t",
init_attr => "attr->data.insn_mode = insn_mode;\n"
."\tattr->am = am;\n",
emit => "movs%Mq %AM, %^D0"
},
FrameAddr => {
......@@ -358,14 +369,15 @@ FrameAddr => {
Store => {
op_flags => [ "uses_memory" ],
state => "exc_pinned",
reg_req => { in => [ "gp", "gp", "none" ], out => [ "none" ] },
ins => [ "ptr", "val", "mem" ],
reg_req => { out => [ "none" ] },
arity => "variable",
outs => [ "M" ],
attr => "amd64_insn_mode_t insn_mode, ir_entity *entity",
attr_type => "amd64_SymConst_attr_t",
init_attr => "attr->base.data.insn_mode = insn_mode;",
attr => "amd64_insn_mode_t insn_mode, amd64_am_info_t am",
attr_type => "amd64_attr_t",
init_attr => "attr->data.insn_mode = insn_mode;\n"
."\tattr->am = am;\n",
mode => "mode_M",
emit => "mov%M %S1, %O(%^S0)"
emit => "mov%M %S0, %AM"
},
SwitchJmp => {
......
......@@ -16,6 +16,7 @@
#include "error.h"
#include "debug.h"
#include "tv_t.h"
#include "util.h"
#include "benode.h"
#include "betranshlp.h"
......@@ -23,6 +24,7 @@
#include "bearch_amd64_t.h"
#include "beirg.h"
#include "beabihelper.h"
#include "besched.h"
#include "amd64_nodes_attr.h"
#include "amd64_transform.h"
......@@ -48,6 +50,30 @@ static size_t start_params_offset;
static pmap *node_to_stack;
static be_stackorder_t *stackorder;
static const arch_register_req_t amd64_requirement_gp = {
arch_register_req_type_normal,
&amd64_reg_classes[CLASS_amd64_gp],
NULL,
0,
0,
1
};
static const arch_register_req_t *am_load_reqs[] = {
&arch_no_requirement,
};
static const arch_register_req_t *am_load_base_reqs[] = {
&amd64_requirement_gp,
&arch_no_requirement,
};
static const arch_register_req_t *am_store_base_reqs[] = {
&amd64_requirement_gp,
&amd64_requirement_gp,
&arch_no_requirement,
};
static inline int mode_needs_gp_reg(ir_mode *mode)
{
return mode_is_int(mode) || mode_is_reference(mode);
......@@ -103,13 +129,56 @@ static ir_node *gen_Const(ir_node *node)
return new_bd_amd64_Const(dbgi, block, imode, val, NULL);
}
typedef enum reference_mode_t {
REFERENCE_DIRECT,
REFERENCE_IP_RELATIVE,
REFERENCE_GOT,
} reference_mode_t;
static reference_mode_t need_relative_addressing(const ir_entity *entity)
{
if (!be_options.pic)
return REFERENCE_DIRECT;
/* simply everything is instruction pointer relative, external functions
* use a global offset table */
return entity_has_definition(entity)
&& (get_entity_linkage(entity) & IR_LINKAGE_MERGE) == 0
? REFERENCE_IP_RELATIVE : REFERENCE_GOT;
}
static ir_node *gen_SymConst(ir_node *node)
{
ir_node *block = be_transform_node(get_nodes_block(node));
dbg_info *dbgi = get_irn_dbg_info(node);
ir_entity *entity = get_SymConst_entity(node);
return new_bd_amd64_Const(dbgi, block, INSN_MODE_32, 0, entity);
/* do we need RIP-relative addressing because of PIC? */
reference_mode_t mode = need_relative_addressing(entity);
if (mode == REFERENCE_DIRECT) {
return new_bd_amd64_Const(dbgi, block, INSN_MODE_64, 0, entity);
}
amd64_am_info_t am;
memset(&am, 0, sizeof(am));
am.base_input = RIP_INPUT;
am.index_input = NO_INPUT;
if (mode == REFERENCE_IP_RELATIVE) {
am.symconst = entity;
ir_node *lea
= new_bd_amd64_Lea(dbgi, block, 0, NULL, INSN_MODE_64, am);
return lea;
} else {
assert(mode == REFERENCE_GOT);
am.symconst = new_got_entry_entity(entity);
ir_graph *irg = get_irn_irg(node);
ir_node *in[] = { get_irg_no_mem(irg) };
ir_node *load
= new_bd_amd64_LoadZ(dbgi, block, ARRAY_SIZE(in), in,
INSN_MODE_64, am);
arch_set_irn_register_reqs_in(load, am_load_reqs);
return new_r_Proj(load, mode_gp, pn_amd64_LoadZ_res);
}
}
typedef ir_node* (*binop_constructor)(dbg_info *dbgi, ir_node *block,
......@@ -570,8 +639,13 @@ static ir_node *gen_Call(ir_node *node)
/* we need a store if we're here */
mode = mode_gp;
amd64_insn_mode_t imode = get_insn_mode_from_mode(mode);
ir_node *store = new_bd_amd64_Store(dbgi, new_block, new_value, incsp,
new_mem, imode, NULL);
amd64_am_info_t am;
memset(&am, 0, sizeof(am));
am.base_input = 1;
am.index_input = NO_INPUT;
ir_node *in[] = { new_value, incsp, new_mem };
ir_node *store = new_bd_amd64_Store(dbgi, new_block, ARRAY_SIZE(in), in,
imode, am);
set_irn_pinned(store, op_pin_state_floats);
sync_ins[sync_arity++] = store;
}
......@@ -705,17 +779,24 @@ static ir_node *gen_Proj_Proj_Start(ir_node *node)
amd64_insn_mode_t imode = get_insn_mode_from_mode(mode);
/* TODO: use the AM form for the address calculation */
ir_node *addr = new_bd_amd64_FrameAddr(NULL, new_block, base,
param->entity);
amd64_am_info_t am;
memset(&am, 0, sizeof(am));
am.base_input = 0;
am.mem_input = 1;
am.symconst = param->entity;
ir_node *in[] = { base, mem };
ir_node *load;
ir_node *value;
if (get_mode_size_bits(mode) < 64 && mode_is_signed(mode)) {
load = new_bd_amd64_LoadS(NULL, new_block, addr, mem, imode, NULL);
load = new_bd_amd64_LoadS(NULL, new_block, ARRAY_SIZE(in),
in, imode, am);
value = new_r_Proj(load, mode_gp, pn_amd64_LoadS_res);
} else {
load = new_bd_amd64_LoadZ(NULL, new_block, addr, mem, imode, NULL);
load = new_bd_amd64_LoadZ(NULL, new_block, ARRAY_SIZE(in),
in, imode, am);
value = new_r_Proj(load, mode_gp, pn_amd64_LoadZ_res);
}
arch_set_irn_register_reqs_in(load, am_load_base_reqs);
set_irn_pinned(load, op_pin_state_floats);
return value;
}
......@@ -854,12 +935,60 @@ static ir_node *gen_Store(ir_node *node)
} else {
assert(mode_needs_gp_reg(mode) && "unsupported mode for Store");
amd64_insn_mode_t insn_mode = get_insn_mode_from_mode(mode);
new_store = new_bd_amd64_Store(dbgi, block, new_ptr, new_val, new_mem, insn_mode, NULL);
amd64_am_info_t am;
memset(&am, 0, sizeof(am));
am.base_input = 1;
am.index_input = NO_INPUT;
ir_node *in[] = { new_val, new_ptr, new_mem };
new_store = new_bd_amd64_Store(dbgi, block, ARRAY_SIZE(in), in,
insn_mode, am);
arch_set_irn_register_reqs_in(new_store, am_store_base_reqs);
}
set_irn_pinned(new_store, get_irn_pinned(node));
return new_store;
}
ir_node *amd64_new_spill(ir_node *value, ir_node *after)
{
ir_node *block = get_block(after);
ir_graph *irg = get_irn_irg(block);
ir_node *frame = get_irg_frame(irg);
ir_node *mem = get_irg_no_mem(irg);
amd64_am_info_t am;
memset(&am, 0, sizeof(am));
am.base_input = 1;
am.index_input = NO_INPUT;
ir_node *in[] = { value, frame, mem };
ir_node *store = new_bd_amd64_Store(NULL, block, ARRAY_SIZE(in), in,
INSN_MODE_64, am);
arch_set_irn_register_reqs_in(store, am_store_base_reqs);
sched_add_after(after, store);
return store;
}
ir_node *amd64_new_reload(ir_node *value, ir_node *spill, ir_node *before)
{
ir_node *block = get_block(before);
ir_graph *irg = get_irn_irg(block);
ir_node *frame = get_irg_frame(irg);
ir_mode *mode = get_irn_mode(value);
amd64_am_info_t am;
memset(&am, 0, sizeof(am));
am.base_input = 0;
am.index_input = NO_INPUT;
ir_node *in[] = { frame, spill };
ir_node *load = new_bd_amd64_LoadZ(NULL, block, ARRAY_SIZE(in), in,
INSN_MODE_64, am);
arch_set_irn_register_reqs_in(load, am_load_base_reqs);
sched_add_before(before, load);
ir_node *res = new_r_Proj(load, mode, pn_amd64_LoadZ_res);
return res;
}
static ir_node *gen_Load(ir_node *node)
{
ir_node *block = be_transform_node(get_nodes_block(node));
......@@ -876,11 +1005,19 @@ static ir_node *gen_Load(ir_node *node)
} else {
assert(mode_needs_gp_reg(mode) && "unsupported mode for Load");
amd64_insn_mode_t insn_mode = get_insn_mode_from_mode(mode);
amd64_am_info_t am;
memset(&am, 0, sizeof(am));
am.base_input = 0;
am.mem_input = 1;
ir_node *in[] = { new_ptr, new_mem };
if (get_mode_size_bits(mode) < 64 && mode_is_signed(mode)) {
new_load = new_bd_amd64_LoadS(dbgi, block, new_ptr, new_mem, insn_mode, NULL);
new_load = new_bd_amd64_LoadS(dbgi, block, ARRAY_SIZE(in), in,
insn_mode, am);
} else {
new_load = new_bd_amd64_LoadZ(dbgi, block, new_ptr, new_mem, insn_mode, NULL);
new_load = new_bd_amd64_LoadZ(dbgi, block, ARRAY_SIZE(in), in,
insn_mode, am);
}
arch_set_irn_register_reqs_in(new_load, am_load_base_reqs);
}
set_irn_pinned(new_load, get_irn_pinned(node));
......
......@@ -12,6 +12,10 @@
void amd64_init_transform(void);
ir_node *amd64_new_spill(ir_node *value, ir_node *after);
ir_node *amd64_new_reload(ir_node *value, ir_node *spill, ir_node *before);
void amd64_transform_graph(ir_graph *irg);
#endif
......@@ -16,6 +16,7 @@
#include "lower_calls.h"
#include "debug.h"
#include "error.h"
#include "util.h"
#include "be_t.h"
#include "bearch.h"
#include "beirg.h"
......@@ -48,18 +49,17 @@ static ir_entity *amd64_get_frame_entity(const ir_node *node)
if (is_amd64_FrameAddr(node)) {
const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
return attr->entity;
} else if (is_amd64_Store(node)) {
const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
return attr->entity;
} else if (is_amd64_LoadS(node) || is_amd64_LoadZ(node)) {
const amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr_const(node);
return attr->entity;
} else if (is_amd64_Store(node) || is_amd64_LoadS(node)
|| is_amd64_LoadZ(node)) {
const amd64_attr_t *attr = get_amd64_attr_const(node);
ir_entity *entity = attr->am.symconst;
if (entity == NULL)
return NULL;
ir_type *parent = get_entity_owner(entity);
if (is_frame_type(parent))
return entity;
}
(void) node;
/* TODO: return the ir_entity assigned to the frame */
return NULL;
}
......@@ -67,12 +67,15 @@ static ir_entity *amd64_get_frame_entity(const ir_node *node)
* This function is called by the generic backend to correct offsets for
* nodes accessing the stack.
*/
static void amd64_set_frame_offset(ir_node *irn, int offset)
static void amd64_set_frame_offset(ir_node *node, int offset)
{
if (is_amd64_FrameAddr(irn) || is_amd64_Store(irn) || is_amd64_LoadS(irn)
|| is_amd64_LoadZ(irn)) {
amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(irn);
if (is_amd64_FrameAddr(node)) {
amd64_SymConst_attr_t *attr = get_amd64_SymConst_attr(node);
attr->fp_offset += offset;
} else if (is_amd64_Store(node) || is_amd64_LoadS(node)
|| is_amd64_LoadZ(node)) {
amd64_attr_t *attr = get_amd64_attr(node);
attr->am.offset += offset;
}
}
......@@ -108,43 +111,6 @@ static void amd64_before_ra(ir_graph *irg)
be_add_missing_keeps(irg);
}
static void transform_Reload(ir_node *node)
{
ir_graph *irg = get_irn_irg(node);
ir_node *block = get_nodes_block(node);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *ptr = get_irg_frame(irg);
ir_node *mem = get_irn_n(node, n_be_Reload_mem);
ir_mode *mode = get_irn_mode(node);
ir_entity *entity = be_get_frame_entity(node);
ir_node *load = new_bd_amd64_LoadZ(dbgi, block, ptr, mem, INSN_MODE_64, entity);
sched_replace(node, load);
ir_node *proj = new_rd_Proj(dbgi, load, mode, pn_amd64_LoadZ_res);
const arch_register_t *reg = arch_get_irn_register(node);
arch_set_irn_register(proj, reg);
exchange(node, proj);
}
static void transform_Spill(ir_node *node)
{
ir_graph *irg = get_irn_irg(node);
ir_node *block = get_nodes_block(node);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *ptr = get_irg_frame(irg);
ir_node *mem = get_irg_no_mem(irg);
ir_node *val = get_irn_n(node, n_be_Spill_val);
ir_entity *entity = be_get_frame_entity(node);
ir_node *store = new_bd_amd64_Store(dbgi, block, ptr, val, mem, INSN_MODE_64, entity);
sched_replace(node, store);
exchange(node, store);
}
static ir_node *create_push(ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent)
{
dbg_info *dbgi = get_irn_dbg_info(node);
......@@ -261,11 +227,7 @@ static void amd64_after_ra_walker(ir_node *block, void *data)
(void) data;
sched_foreach_reverse_safe(block, node) {
if (be_is_Reload(node)) {
transform_Reload(node);
} else if (be_is_Spill(node)) {
transform_Spill(node);
} else if (be_is_MemPerm(node)) {
if (be_is_MemPerm(node)) {
transform_MemPerm(node);
}
}
......@@ -568,8 +530,8 @@ const arch_isa_if_t amd64_isa_if = {
amd64_end_codegeneration,
amd64_get_call_abi,
NULL, /* mark remat */
be_new_spill,
be_new_reload,
amd64_new_spill,
amd64_new_reload,
amd64_register_saved_by,
NULL, /* handle intrinsics */
......
......@@ -1286,10 +1286,15 @@ char const *be_gas_get_private_prefix(void)
void be_gas_emit_entity(const ir_entity *entity)
{
if (entity->type == get_code_type()) {
if (entity->entity_kind == IR_ENTITY_LABEL) {
ir_label_t label = get_entity_label(entity);
be_emit_irprintf("%s_%lu", be_gas_get_private_prefix(), label);
return;
} else if (entity->entity_kind == IR_ENTITY_GOTENTRY) {
ir_entity *referenced = entity->attr.got.referenced;
be_gas_emit_entity(referenced);
be_emit_cstring("@GOTPCREL");
return;
}
if (get_entity_visibility(entity) == ir_visibility_private) {
......
Markdown is supported
0% or .