Commit ff26d458 authored by Matthias Braun's avatar Matthias Braun
Browse files

cleanup, use C99

parent 00b938cf
......@@ -187,7 +187,6 @@ static void rewrite_unsigned_float_Conv(ir_node *node)
part_block(node);
{
ir_node *block = get_nodes_block(node);
ir_node *unsigned_x = get_Conv_op(node);
ir_mode *mode_u = get_irn_mode(unsigned_x);
......@@ -209,25 +208,20 @@ static void rewrite_unsigned_float_Conv(ir_node *node)
ir_node *false_jmp = new_r_Jmp(false_block);
ir_tarval *correction = new_tarval_from_double(4294967296., mode_d);
ir_node *c_const = new_r_Const(irg, correction);
ir_node *fadd = new_rd_Add(dbgi, true_block, res, c_const,
mode_d);
ir_node *fadd = new_rd_Add(dbgi, true_block, res, c_const, mode_d);
ir_node *lower_in[2] = { true_jmp, false_jmp };
ir_node *phi_in[2] = { fadd, res };
ir_mode *dest_mode = get_irn_mode(node);
ir_node *phi;
ir_node *res_conv;
set_irn_in(lower_block, ARRAY_SIZE(lower_in), lower_in);
phi = new_r_Phi(lower_block, ARRAY_SIZE(phi_in), phi_in, mode_d);
ir_node *phi = new_r_Phi(lower_block, ARRAY_SIZE(phi_in), phi_in, mode_d);
assert(get_Block_phis(lower_block) == NULL);
set_Block_phis(lower_block, phi);
set_Phi_next(phi, NULL);
res_conv = new_rd_Conv(dbgi, lower_block, phi, dest_mode);
ir_node *res_conv = new_rd_Conv(dbgi, lower_block, phi, dest_mode);
exchange(node, res_conv);
}
}
/**
......@@ -249,7 +243,6 @@ static void rewrite_float_unsigned_Conv(ir_node *node)
part_block(node);
{
ir_node *block = get_nodes_block(node);
ir_node *float_x = get_Conv_op(node);
ir_mode *mode_u = get_irn_mode(node);
......@@ -281,26 +274,23 @@ static void rewrite_float_unsigned_Conv(ir_node *node)
ir_node *lower_in[2] = { true_jmp, false_jmp };
ir_node *phi_in[2] = { xorn, converted };
ir_node *phi;
ir_node *res_conv;
set_irn_in(lower_block, ARRAY_SIZE(lower_in), lower_in);
phi = new_r_Phi(lower_block, ARRAY_SIZE(phi_in), phi_in, mode_s);
ir_node *phi = new_r_Phi(lower_block, ARRAY_SIZE(phi_in), phi_in, mode_s);
assert(get_Block_phis(lower_block) == NULL);
set_Block_phis(lower_block, phi);
set_Phi_next(phi, NULL);
res_conv = new_rd_Conv(dbgi, lower_block, phi, mode_u);
ir_node *res_conv = new_rd_Conv(dbgi, lower_block, phi, mode_u);
exchange(node, res_conv);
}
}
static int sparc_rewrite_Conv(ir_node *node, void *ctx)
{
(void) ctx;
ir_mode *to_mode = get_irn_mode(node);
ir_node *op = get_Conv_op(node);
ir_mode *from_mode = get_irn_mode(op);
(void) ctx;
if (mode_is_float(to_mode) && mode_is_int(from_mode)
&& get_mode_size_bits(from_mode) == 32
......@@ -320,77 +310,66 @@ static int sparc_rewrite_Conv(ir_node *node, void *ctx)
static void sparc_handle_intrinsics(void)
{
ir_type *tp, *int_tp, *uint_tp;
i_record records[8];
i_record records[3];
size_t n_records = 0;
runtime_rt rt_iMod, rt_uMod;
#define ID(x) new_id_from_chars(x, sizeof(x)-1)
int_tp = new_type_primitive(mode_Is);
uint_tp = new_type_primitive(mode_Iu);
/* we need to rewrite some forms of int->float conversions */
{
i_instr_record *map_Conv = &records[n_records++].i_instr;
map_Conv->kind = INTRINSIC_INSTR;
map_Conv->op = op_Conv;
map_Conv->i_mapper = sparc_rewrite_Conv;
}
/* SPARC has no signed mod instruction ... */
{
i_instr_record *map_Mod = &records[n_records++].i_instr;
tp = new_type_method(2, 1);
set_method_param_type(tp, 0, int_tp);
set_method_param_type(tp, 1, int_tp);
set_method_res_type(tp, 0, int_tp);
rt_iMod.ent = new_entity(get_glob_type(), ID(".rem"), tp);
set_entity_ld_ident(rt_iMod.ent, ID(".rem"));
/* SPARC has no signed mod instruction ... */
ir_type *int_tp = new_type_primitive(mode_Is);
ir_type *mod_tp = new_type_method(2, 1);
set_method_param_type(mod_tp, 0, int_tp);
set_method_param_type(mod_tp, 1, int_tp);
set_method_res_type(mod_tp, 0, int_tp);
runtime_rt rt_iMod;
ident *mod_id = new_id_from_str(".rem");
rt_iMod.ent = new_entity(get_glob_type(), mod_id, mod_tp);
set_entity_ld_ident(rt_iMod.ent, mod_id);
rt_iMod.mode = mode_T;
rt_iMod.res_mode = mode_Is;
rt_iMod.mem_proj_nr = pn_Mod_M;
rt_iMod.regular_proj_nr = pn_Mod_X_regular;
rt_iMod.exc_proj_nr = pn_Mod_X_except;
rt_iMod.res_proj_nr = pn_Mod_res;
set_entity_visibility(rt_iMod.ent, ir_visibility_external);
map_Mod->kind = INTRINSIC_INSTR;
map_Mod->op = op_Mod;
map_Mod->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
map_Mod->ctx = &rt_iMod;
}
/* ... nor an unsigned mod. */
{
i_instr_record *map_Mod = &records[n_records++].i_instr;
tp = new_type_method(2, 1);
set_method_param_type(tp, 0, uint_tp);
set_method_param_type(tp, 1, uint_tp);
set_method_res_type(tp, 0, uint_tp);
i_instr_record *map_imod = &records[n_records++].i_instr;
map_imod->kind = INTRINSIC_INSTR;
map_imod->op = op_Mod;
map_imod->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
map_imod->ctx = &rt_iMod;
rt_uMod.ent = new_entity(get_glob_type(), ID(".urem"), tp);
set_entity_ld_ident(rt_uMod.ent, ID(".urem"));
/* ... nor an unsigned mod. */
ir_type *umod_tp = new_type_method(2, 1);
ir_type *uint_tp = new_type_primitive(mode_Iu);
set_method_param_type(umod_tp, 0, uint_tp);
set_method_param_type(umod_tp, 1, uint_tp);
set_method_res_type(umod_tp, 0, uint_tp);
runtime_rt rt_uMod;
ident *umod_id = new_id_from_str(".urem");
rt_uMod.ent = new_entity(get_glob_type(), umod_id, umod_tp);
set_entity_ld_ident(rt_uMod.ent, umod_id);
rt_uMod.mode = mode_T;
rt_uMod.res_mode = mode_Iu;
rt_uMod.mem_proj_nr = pn_Mod_M;
rt_uMod.regular_proj_nr = pn_Mod_X_regular;
rt_uMod.exc_proj_nr = pn_Mod_X_except;
rt_uMod.res_proj_nr = pn_Mod_res;
set_entity_visibility(rt_uMod.ent, ir_visibility_external);
map_Mod->kind = INTRINSIC_INSTR;
map_Mod->op = op_Mod;
map_Mod->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
map_Mod->ctx = &rt_uMod;
}
i_instr_record *map_umod = &records[n_records++].i_instr;
map_umod->kind = INTRINSIC_INSTR;
map_umod->op = op_Mod;
map_umod->i_mapper = (i_mapper_func)i_mapper_RuntimeCall;
map_umod->ctx = &rt_uMod;
assert(n_records < ARRAY_SIZE(records));
assert(n_records <= ARRAY_SIZE(records));
lower_intrinsics(records, n_records, /*part_block_used=*/ true);
}
......@@ -430,12 +409,9 @@ static void sparc_end_codegeneration(void *self)
static void sparc_lower_for_target(void)
{
ir_mode *mode_gp = sparc_reg_classes[CLASS_sparc_gp].mode;
size_t i, n_irgs = get_irp_n_irgs();
lower_calls_with_compounds(LF_RETURN_HIDDEN);
for (i = 0; i < n_irgs; ++i) {
for (size_t i = 0, n_irgs = get_irp_n_irgs(); i < n_irgs; ++i) {
ir_graph *irg = get_irp_irg(i);
/* Turn all small CopyBs into loads/stores and all bigger CopyBs into
* memcpy calls. */
......@@ -447,14 +423,15 @@ static void sparc_lower_for_target(void)
lower_builtins(0, NULL);
for (size_t i = 0; i < n_irgs; ++i) {
ir_mode *mode_gp = sparc_reg_classes[CLASS_sparc_gp].mode;
for (size_t i = 0, n_irgs = get_irp_n_irgs(); i < n_irgs; ++i) {
ir_graph *irg = get_irp_irg(i);
lower_switch(irg, 4, 256, mode_gp);
}
sparc_lower_64bit();
for (i = 0; i < n_irgs; ++i) {
for (size_t i = 0, n_irgs = get_irp_n_irgs(); i < n_irgs; ++i) {
ir_graph *irg = get_irp_irg(i);
ir_lower_mode_b(irg, mode_Iu);
/* TODO: Pass SPARC_MIN_STACKSIZE as addr_delta as soon as
......@@ -555,8 +532,8 @@ static ir_node *sparc_new_spill(ir_node *value, ir_node *after)
ir_node *frame = get_irg_frame(irg);
ir_node *mem = get_irg_no_mem(irg);
ir_mode *mode = get_irn_mode(value);
ir_node *store;
ir_node *store;
if (mode_is_float(mode)) {
store = create_stf(NULL, block, value, frame, mem, mode, NULL, 0, true);
} else {
......@@ -574,9 +551,8 @@ static ir_node *sparc_new_reload(ir_node *value, ir_node *spill,
ir_graph *irg = get_irn_irg(value);
ir_node *frame = get_irg_frame(irg);
ir_mode *mode = get_irn_mode(value);
ir_node *load;
ir_node *res;
ir_node *load;
if (mode_is_float(mode)) {
load = create_ldf(NULL, block, frame, spill, mode, NULL, 0, true);
} else {
......@@ -585,7 +561,7 @@ static ir_node *sparc_new_reload(ir_node *value, ir_node *spill,
}
sched_add_before(before, load);
assert((long)pn_sparc_Ld_res == (long)pn_sparc_Ldf_res);
res = new_r_Proj(load, mode, pn_sparc_Ld_res);
ir_node *res = new_r_Proj(load, mode, pn_sparc_Ld_res);
return res;
}
......
......@@ -375,8 +375,7 @@ static bool can_move_up_into_delayslot(const ir_node *node, const ir_node *to)
return false;
/* node must not use any results of 'to' */
int arity = get_irn_arity(node);
for (int i = 0; i < arity; ++i) {
for (int i = 0, arity = get_irn_arity(node); i < arity; ++i) {
ir_node *in = get_irn_n(node, i);
ir_node *skipped = skip_Proj(in);
if (skipped == to)
......@@ -417,8 +416,7 @@ static bool can_move_up_into_delayslot(const ir_node *node, const ir_node *to)
} else if (is_sparc_SDiv(to) || is_sparc_UDiv(to)) {
/* node will be inserted between wr and div so it must not overwrite
* anything except the wr input */
int arity = get_irn_arity(to);
for (int i = 0; i < arity; ++i) {
for (int i = 0, arity = get_irn_arity(to); i < arity; ++i) {
assert((long)n_sparc_SDiv_dividend_high == (long)n_sparc_UDiv_dividend_high);
if (i == n_sparc_SDiv_dividend_high)
continue;
......
......@@ -54,12 +54,11 @@ static void kill_unused_stacknodes(ir_node *node)
} else if (is_Phi(node)) {
int arity = get_irn_arity(node);
ir_node **ins = ALLOCAN(ir_node*, arity);
int i;
sched_remove(node);
memcpy(ins, get_irn_in(node), arity*sizeof(ins[0]));
kill_node(node);
for (i = 0; i < arity; ++i)
for (int i = 0; i < arity; ++i)
kill_unused_stacknodes(ins[i]);
}
}
......@@ -103,17 +102,12 @@ void sparc_introduce_prolog_epilog(ir_graph *irg)
unsigned frame_size = get_type_size_bytes(frame_type);
/* introduce epilog for every return node */
{
ir_node *end_block = get_irg_end_block(irg);
int arity = get_irn_arity(end_block);
int i;
for (i = 0; i < arity; ++i) {
for (int i = 0, arity = get_irn_arity(end_block); i < arity; ++i) {
ir_node *ret = get_irn_n(end_block, i);
assert(is_sparc_Return(ret));
introduce_epilog(ret);
}
}
while (be_is_Keep(sched_next(schedpoint)))
schedpoint = sched_next(schedpoint);
......@@ -172,7 +166,7 @@ static void finish_sparc_Save(ir_node *node)
sparc_attr_t *attr = get_sparc_attr(node);
int offset = attr->immediate_value;
if (! sparc_is_value_imm_encodeable(offset)) {
if (!sparc_is_value_imm_encodeable(offset)) {
ir_node *base = get_irn_n(node, n_sparc_Save_stack);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
......@@ -198,7 +192,7 @@ static void finish_be_IncSP(ir_node *node)
int offset = be_get_IncSP_offset(node);
/* we might have to break the IncSP apart if the constant has become too big */
if (! sparc_is_value_imm_encodeable(offset) && ! sparc_is_value_imm_encodeable(-offset)) {
if (!sparc_is_value_imm_encodeable(offset) && !sparc_is_value_imm_encodeable(-offset)) {
ir_node *sp = be_get_IncSP_pred(node);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
......@@ -221,7 +215,7 @@ static void finish_sparc_FrameAddr(ir_node *node)
sparc_attr_t *attr = get_sparc_attr(node);
int offset = attr->immediate_value;
if (! sparc_is_value_imm_encodeable(offset)) {
if (!sparc_is_value_imm_encodeable(offset)) {
ir_node *base = get_irn_n(node, n_sparc_FrameAddr_base);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
......@@ -241,10 +235,10 @@ static void finish_sparc_Ld(ir_node *node)
int offset = attr->immediate_value;
const sparc_load_store_attr_t *load_store_attr = get_sparc_load_store_attr_const(node);
if (! load_store_attr->is_frame_entity)
if (!load_store_attr->is_frame_entity)
return;
if (! sparc_is_value_imm_encodeable(offset)) {
if (!sparc_is_value_imm_encodeable(offset)) {
ir_node *ptr = get_irn_n(node, n_sparc_Ld_ptr);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
......@@ -302,10 +296,10 @@ static void finish_sparc_Ldf(ir_node *node)
int offset = attr->immediate_value;
const sparc_load_store_attr_t *load_store_attr = get_sparc_load_store_attr_const(node);
if (! load_store_attr->is_frame_entity)
if (!load_store_attr->is_frame_entity)
return;
if (! sparc_is_value_imm_encodeable(offset)) {
if (!sparc_is_value_imm_encodeable(offset)) {
ir_node *ptr = get_irn_n(node, n_sparc_Ldf_ptr);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
......@@ -334,10 +328,10 @@ static void finish_sparc_St(ir_node *node)
int offset = attr->immediate_value;
const sparc_load_store_attr_t *load_store_attr = get_sparc_load_store_attr_const(node);
if (! load_store_attr->is_frame_entity)
if (!load_store_attr->is_frame_entity)
return;
if (! sparc_is_value_imm_encodeable(offset)) {
if (!sparc_is_value_imm_encodeable(offset)) {
ir_node *ptr = get_irn_n(node, n_sparc_St_ptr);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
......@@ -366,10 +360,10 @@ static void finish_sparc_Stf(ir_node *node)
int offset = attr->immediate_value;
const sparc_load_store_attr_t *load_store_attr = get_sparc_load_store_attr_const(node);
if (! load_store_attr->is_frame_entity)
if (!load_store_attr->is_frame_entity)
return;
if (! sparc_is_value_imm_encodeable(offset)) {
if (!sparc_is_value_imm_encodeable(offset)) {
ir_node *ptr = get_irn_n(node, n_sparc_Stf_ptr);
dbg_info *dbgi = get_irn_dbg_info(node);
ir_node *block = get_nodes_block(node);
......@@ -395,12 +389,11 @@ static void finish_sparc_Stf(ir_node *node)
static void peephole_be_IncSP(ir_node *node)
{
ir_node *pred;
node = be_peephole_IncSP_IncSP(node);
if (!be_is_IncSP(node))
return;
pred = be_get_IncSP_pred(node);
ir_node *pred = be_get_IncSP_pred(node);
if (is_sparc_Save(pred) && be_has_only_one_user(pred)) {
int offset = -be_get_IncSP_offset(node);
sparc_attr_t *attr = get_sparc_attr(pred);
......@@ -570,14 +563,10 @@ static void register_peephole_optimisation(ir_op *op, peephole_opt_func func)
static void sparc_collect_frame_entity_nodes(ir_node *node, void *data)
{
be_fec_env_t *env = (be_fec_env_t*)data;
const ir_mode *mode;
int align;
ir_entity *entity;
const sparc_load_store_attr_t *attr;
if (be_is_Reload(node) && be_get_frame_entity(node) == NULL) {
mode = get_irn_mode(node);
align = get_mode_size_bytes(mode);
ir_mode *mode = get_irn_mode(node);
unsigned align = get_mode_size_bytes(mode);
be_node_needs_frame_entity(env, node, mode, align);
return;
}
......@@ -585,16 +574,16 @@ static void sparc_collect_frame_entity_nodes(ir_node *node, void *data)
if (!is_sparc_Ld(node) && !is_sparc_Ldf(node))
return;
attr = get_sparc_load_store_attr_const(node);
entity = attr->base.immediate_value_entity;
mode = attr->load_store_mode;
const sparc_load_store_attr_t *attr = get_sparc_load_store_attr_const(node);
ir_entity *entity = attr->base.immediate_value_entity;
ir_mode *mode = attr->load_store_mode;
if (entity != NULL)
return;
if (!attr->is_frame_entity)
return;
if (arch_get_irn_flags(node) & sparc_arch_irn_flag_needs_64bit_spillslot)
mode = mode_Lu;
align = get_mode_size_bytes(mode);
unsigned align = get_mode_size_bytes(mode);
be_node_needs_frame_entity(env, node, mode, align);
}
......
......@@ -78,13 +78,10 @@ static ir_entity *create_64_intrinsic_fkt(ir_type *method, const ir_op *op,
const ir_mode *imode,
const ir_mode *omode, void *context)
{
ir_type *glob = get_glob_type();
const char *name;
ident *id;
ir_entity *result;
(void) context;
(void) omode;
const char *name;
if (op == op_Mul) {
name = "__muldi3";
} else if (op == op_Div) {
......@@ -118,8 +115,9 @@ static ir_entity *create_64_intrinsic_fkt(ir_type *method, const ir_op *op,
} else {
panic("Can't lower unexpected 64bit operation %s", get_op_name(op));
}
id = new_id_from_str(name);
result = new_entity(glob, id, method);
ident *id = new_id_from_str(name);
ir_type *glob = get_glob_type();
ir_entity *result = new_entity(glob, id, method);
set_entity_ld_ident(result, id);
set_entity_visibility(result, ir_visibility_external);
return result;
......
......@@ -70,8 +70,6 @@ static bool has_fp_conv_attr(const ir_node *node)
*/
static void sparc_dump_node(FILE *F, const ir_node *n, dump_reason_t reason)
{
const sparc_attr_t *sparc_attr;
switch (reason) {
case dump_node_opcode_txt:
fprintf(F, "%s", get_irn_opname(n));
......@@ -82,7 +80,7 @@ static void sparc_dump_node(FILE *F, const ir_node *n, dump_reason_t reason)
case dump_node_info_txt:
arch_dump_reqs_and_registers(F, n);
sparc_attr = get_sparc_attr_const(n);
const sparc_attr_t *sparc_attr = get_sparc_attr_const(n);
if (sparc_attr->immediate_value_entity) {
ir_fprintf(F, "entity: %+F (offset %d)\n",
sparc_attr->immediate_value_entity,
......@@ -227,14 +225,12 @@ static void init_sparc_attributes(ir_node *node, arch_irn_flags_t flags,
const arch_register_req_t **in_reqs,
int n_res)
{
ir_graph *irg = get_irn_irg(node);
struct obstack *obst = get_irg_obstack(irg);
backend_info_t *info;
arch_set_irn_flags(node, flags);
arch_set_irn_register_reqs_in(node, in_reqs);
info = be_get_info(node);
backend_info_t *info = be_get_info(node);
ir_graph *irg = get_irn_irg(node);
struct obstack *obst = get_irg_obstack(irg);
info->out_infos = NEW_ARR_DZ(reg_out_info_t, obst, n_res);
}
......
......@@ -171,19 +171,16 @@ static ir_type *compute_arg_type(ir_graph *irg, calling_convention_t *cconv,
ir_type *frame_type = get_irg_frame_type(irg);
size_t n_frame_members = get_compound_n_members(frame_type);
size_t f;
size_t i;
ir_type *res = new_type_struct(id_mangle_u(get_entity_ident(entity), new_id_from_chars("arg_type", 8)));
/* search for existing value_param entities */
for (f = n_frame_members; f > 0; ) {
ir_entity *member = get_compound_member(frame_type, --f);
size_t num;
for (size_t f = n_frame_members; f-- > 0; ) {
ir_entity *member = get_compound_member(frame_type, f);
if (!is_parameter_entity(member))
continue;
num = get_entity_parameter_number(member);
size_t num = get_entity_parameter_number(member);
if (num == IR_VA_START_PARAMETER_NUMBER) {
if (va_start_entity != NULL)
panic("multiple va_start entities found (%+F,%+F)",
......@@ -201,7 +198,7 @@ static ir_type *compute_arg_type(ir_graph *irg, calling_convention_t *cconv,
}
/* calculate offsets/create missing entities */
for (i = 0; i < n_params; ++i) {
for (size_t i = 0; i < n_params; ++i) {
reg_or_stackslot_t *param = &cconv->parameters[i];
ir_entity *entity = param_map[i];
......@@ -227,8 +224,8 @@ static ir_type *compute_arg_type(ir_graph *irg, calling_convention_t *cconv,
* original number of parameters */
ir_type *non_lowered = get_higher_type(mtp);
size_t orig_n_params = get_method_n_params(non_lowered);
long offset;
assert(get_method_variadicity(mtp) == variadicity_variadic);
long offset;
if (orig_n_params < n_params) {
assert(param_map[orig_n_params] != NULL);
offset = get_entity_offset(param_map[orig_n_params]);
......@@ -247,10 +244,9 @@ static ir_type *compute_arg_type(ir_graph *irg, calling_convention_t *cconv,
void sparc_create_stacklayout(ir_graph *irg, calling_convention_t *cconv)
{
be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
ir_type *between_type;
memset(layout, 0, sizeof(*layout));
between_type = new_type_class(new_id_from_str("sparc_between_type"));
ir_type *between_type = new_type_class(new_id_from_str("sparc_between_type"));
if (cconv->omit_fp) {
set_type_size_bytes(between_type, 0);
} else {
......
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment