Commit 10e58f96 authored by yb9976's avatar yb9976
Browse files

Fixed some warning about unused variables.

parent aa2711d2
......@@ -171,11 +171,10 @@ static int vrp_update_node(ir_node *node)
}
case iro_Rotl: {
const vrp_attr *vrp_left, *vrp_right;
const vrp_attr *vrp_left;
const ir_node *right = get_Rotl_right(node);
vrp_left = get_vrp_attr(get_Rotl_left(node));
vrp_right = get_vrp_attr(get_Rotl_right(node));
/* We can only compute this if the right value is a constant*/
if (is_Const(right)) {
......@@ -187,10 +186,9 @@ static int vrp_update_node(ir_node *node)
}
case iro_Shl: {
const vrp_attr *vrp_left, *vrp_right;
const vrp_attr *vrp_left;
const ir_node *right = get_Shl_right(node);
vrp_left = get_vrp_attr(get_Shl_left(node));
vrp_right = get_vrp_attr(get_Shl_right(node));
/* We can only compute this if the right value is a constant*/
if (is_Const(right)) {
......@@ -201,11 +199,10 @@ static int vrp_update_node(ir_node *node)
}
case iro_Shr: {
const vrp_attr *vrp_left, *vrp_right;
const vrp_attr *vrp_left;
const ir_node *right = get_Shr_right(node);
vrp_left = get_vrp_attr(get_Shr_left(node));
vrp_right = get_vrp_attr(get_Shr_right(node));
/* We can only compute this if the right value is a constant*/
if (is_Const(right)) {
......@@ -216,11 +213,10 @@ static int vrp_update_node(ir_node *node)
}
case iro_Shrs: {
const vrp_attr *vrp_left, *vrp_right;
const vrp_attr *vrp_left;
const ir_node *right = get_Shrs_right(node);
vrp_left = get_vrp_attr(get_Shrs_left(node));
vrp_right = get_vrp_attr(get_Shrs_right(node));
/* We can only compute this if the right value is a constant*/
if (is_Const(right)) {
......
......@@ -361,7 +361,6 @@ static ir_node *gen_Conv(ir_node *node)
} else { /* complete in gp registers */
int src_bits = get_mode_size_bits(src_mode);
int dst_bits = get_mode_size_bits(dst_mode);
int min_bits;
ir_mode *min_mode;
if (src_bits == dst_bits) {
......@@ -370,10 +369,8 @@ static ir_node *gen_Conv(ir_node *node)
}
if (src_bits < dst_bits) {
min_bits = src_bits;
min_mode = src_mode;
} else {
min_bits = dst_bits;
min_mode = dst_mode;
}
......
......@@ -329,7 +329,6 @@ static void emit_arm_SymConst(const ir_node *irn)
{
const arm_SymConst_attr_t *attr = get_arm_SymConst_attr_const(irn);
sym_or_tv_t key, *entry;
unsigned label;
key.u.entity = attr->entity;
key.is_entity = true;
......@@ -339,7 +338,6 @@ static void emit_arm_SymConst(const ir_node *irn)
/* allocate a label */
entry->label = get_unique_label();
}
label = entry->label;
/* load the symbol indirect */
be_emit_cstring("\tldr ");
......@@ -368,7 +366,6 @@ static void emit_arm_FrameAddr(const ir_node *irn)
static void emit_arm_fConst(const ir_node *irn)
{
sym_or_tv_t key, *entry;
unsigned label;
ir_mode *mode;
key.u.tv = get_fConst_value(irn);
......@@ -379,7 +376,6 @@ static void emit_arm_fConst(const ir_node *irn)
/* allocate a label */
entry->label = get_unique_label();
}
label = entry->label;
/* load the tarval indirect */
mode = get_irn_mode(irn);
......
......@@ -1752,7 +1752,6 @@ static ir_node *get_stack_pointer_for(ir_node *node)
{
/* get predecessor in stack_order list */
ir_node *stack_pred = be_get_stack_pred(abihelper, node);
ir_node *stack_pred_transformed;
ir_node *stack;
if (stack_pred == NULL) {
......@@ -1762,8 +1761,8 @@ static ir_node *get_stack_pointer_for(ir_node *node)
return sp_proj;
}
stack_pred_transformed = be_transform_node(stack_pred);
stack = (ir_node*)pmap_get(node_to_stack, stack_pred);
be_transform_node(stack_pred);
stack = (ir_node*)pmap_get(node_to_stack, stack_pred);
if (stack == NULL) {
return get_stack_pointer_for(stack_pred);
}
......
......@@ -396,7 +396,6 @@ static bool has_real_user(const ir_node *node)
static ir_node *add_to_keep(ir_node *last_keep,
const arch_register_class_t *cls, ir_node *node)
{
const ir_node *op;
if (last_keep != NULL) {
be_Keep_add_node(last_keep, cls, node);
} else {
......@@ -410,7 +409,6 @@ static ir_node *add_to_keep(ir_node *last_keep,
sched_add_after(schedpoint, last_keep);
}
}
op = skip_Proj_const(node);
return last_keep;
}
......
......@@ -842,14 +842,13 @@ static void apply_coloring(co2_cloud_irn_t *ci, col_t col, int depth)
{
const ir_node *irn = ci->inh.irn;
int *front = FRONT_BASE(ci, col);
int i, ok;
int i;
struct list_head changed;
INIT_LIST_HEAD(&changed);
DBG((ci->cloud->env->dbg, LEVEL_2, "%2{firm:indent}setting %+F to %d\n", depth, irn, col));
ok = change_color_single(ci->cloud->env, irn, col, &changed, depth);
// assert(ok && "Color changing may not fail while committing the coloring");
change_color_single(ci->cloud->env, irn, col, &changed, depth);
materialize_coloring(&changed);
for (i = 0; i < ci->mst_n_childs; ++i) {
......
......@@ -87,7 +87,7 @@ ir_node *insert_Perm_after(ir_graph *irg, const arch_register_class_t *cls,
ir_nodeset_t live;
ir_nodeset_iterator_t iter;
ir_node *curr, *irn, *perm, **nodes;
ir_node *irn, *perm, **nodes;
size_t i, n;
DBG((dbg, LEVEL_1, "Insert Perm after: %+F\n", pos));
......@@ -116,7 +116,6 @@ ir_node *insert_Perm_after(ir_graph *irg, const arch_register_class_t *cls,
sched_add_after(pos, perm);
free(nodes);
curr = perm;
for (i = 0; i < n; ++i) {
ir_node *perm_op = get_irn_n(perm, i);
const arch_register_t *reg = arch_get_irn_register(perm_op);
......@@ -126,8 +125,6 @@ ir_node *insert_Perm_after(ir_graph *irg, const arch_register_class_t *cls,
ir_node *proj = new_r_Proj(perm, mode, i);
arch_set_irn_register(proj, reg);
curr = proj;
be_ssa_construction_init(&senv, irg);
be_ssa_construction_add_copy(&senv, perm_op);
be_ssa_construction_add_copy(&senv, proj);
......
......@@ -501,7 +501,6 @@ static void be_main_loop(FILE *file_handle, const char *cup_name)
static const char suffix[] = ".prof";
size_t i, num_birgs;
int stat_active = 0;
be_main_env_t env;
char prof_filename[256];
be_irg_t *birgs;
......@@ -567,8 +566,6 @@ static void be_main_loop(FILE *file_handle, const char *cup_name)
num_birgs++;
}
stat_active = stat_is_active();
/* For all graphs */
for (i = 0; i < num_birgs; ++i) {
be_irg_t *birg = &birgs[i];
......
......@@ -641,7 +641,6 @@ ir_node *be_new_AddSP(const arch_register_t *sp, ir_node *bl, ir_node *old_sp,
{
ir_node *irn;
ir_node *in[n_be_AddSP_last];
const arch_register_class_t *cls;
ir_graph *irg;
be_node_attr_t *attr;
......@@ -661,8 +660,6 @@ ir_node *be_new_AddSP(const arch_register_t *sp, ir_node *bl, ir_node *old_sp,
be_set_constr_single_reg_out(irn, pn_be_AddSP_sp, sp,
arch_register_req_type_produces_sp);
cls = arch_register_get_class(sp);
return irn;
}
......
......@@ -663,7 +663,7 @@ static ir_node *heuristic_select(void *block_env, ir_nodeset_t *ns)
ir_node *irn, *cand = NULL;
int max_prio = INT_MIN;
int cur_prio = INT_MIN;
int reg_fact, cand_reg_fact;
int reg_fact;
ir_nodeset_iterator_t iter;
/* Note: register pressure calculation needs an overhaul, you need correct
* tracking for each register class indidually and weight by each class
......@@ -706,7 +706,6 @@ static ir_node *heuristic_select(void *block_env, ir_nodeset_t *ns)
if (cur_prio > max_prio) {
cand = irn;
max_prio = cur_prio;
cand_reg_fact = reg_fact;
}
DBG((trace_env->dbg, LEVEL_4, "checked NODE %+F\n", irn));
......
......@@ -199,15 +199,14 @@ void be_node_needs_frame_entity(be_fec_env_t *env, ir_node *node,
const ir_mode *mode, int align)
{
ir_node *spillnode = get_memory_edge(node);
spill_t *spill;
assert(spillnode != NULL);
/* walk upwards and collect all phis and spills on this way */
if (is_Phi(spillnode)) {
spill = collect_memphi(env, spillnode, mode, align);
collect_memphi(env, spillnode, mode, align);
} else {
spill = collect_spill(env, spillnode, mode, align);
collect_spill(env, spillnode, mode, align);
}
ARR_APP1(ir_node *, env->reloads, node);
......
......@@ -811,13 +811,11 @@ static void autodetect_arch(void)
/* We use the cpuid instruction to detect the CPU features */
if (x86_toogle_cpuid()) {
cpuid_registers regs;
unsigned highest_level;
char vendorid[13];
x86_cpu_info_t cpu_info;
/* get vendor ID */
x86_cpuid(&regs, 0);
highest_level = regs.r.eax;
memcpy(&vendorid[0], &regs.r.ebx, 4);
memcpy(&vendorid[4], &regs.r.edx, 4);
memcpy(&vendorid[8], &regs.r.ecx, 4);
......
......@@ -446,20 +446,12 @@ ir_node *ia32_gen_ASM(ir_node *node)
const ir_asm_constraint *in_constraints;
const ir_asm_constraint *out_constraints;
ident **clobbers;
int clobbers_flags = 0;
unsigned clobber_bits[N_IA32_CLASSES];
int out_size;
backend_info_t *info;
memset(&clobber_bits, 0, sizeof(clobber_bits));
/* workaround for lots of buggy code out there as most people think volatile
* asm is enough for everything and forget the flags (linux kernel, etc.)
*/
if (get_irn_pinned(node) == op_pin_state_pinned) {
clobbers_flags = 1;
}
arity = get_irn_arity(node);
in = ALLOCANZ(ir_node*, arity);
......@@ -472,7 +464,6 @@ ir_node *ia32_gen_ASM(ir_node *node)
if (strcmp(c, "memory") == 0)
continue;
if (strcmp(c, "cc") == 0) {
clobbers_flags = 1;
continue;
}
......
......@@ -1262,11 +1262,6 @@ static void emit_ia32_SwitchJmp(const ir_node *node)
*/
static void emit_ia32_Jmp(const ir_node *node)
{
ir_node *block;
/* for now, the code works for scheduled and non-schedules blocks */
block = get_nodes_block(node);
/* we have a block schedule */
if (can_be_fallthrough(node)) {
ia32_emitf(node, "\t/* fallthrough to %L */\n");
......@@ -3370,7 +3365,6 @@ static void bemit_ia32_jcc(const ir_node *node)
const ir_node *proj_false;
const ir_node *dest_true;
const ir_node *dest_false;
const ir_node *block;
cc = determine_final_cc(node, 0, cc);
......@@ -3381,8 +3375,6 @@ static void bemit_ia32_jcc(const ir_node *node)
proj_false = get_proj(node, pn_ia32_Jcc_false);
assert(proj_false && "Jcc without false Proj");
block = get_nodes_block(node);
if (can_be_fallthrough(proj_true)) {
/* exchange both proj's so the second one can be omitted */
const ir_node *t = proj_true;
......
......@@ -354,13 +354,11 @@ static void peephole_ia32_Test(ir_node *node)
*/
static void peephole_ia32_Return(ir_node *node)
{
ir_node *block, *irn;
ir_node *irn;
if (!ia32_cg_config.use_pad_return)
return;
block = get_nodes_block(node);
/* check if this return is the first on the block */
sched_foreach_reverse_from(node, irn) {
switch (get_irn_opcode(irn)) {
......@@ -662,7 +660,6 @@ static void peephole_Load_IncSP_to_pop(ir_node *irn)
int i, maxslot, inc_ofs, ofs;
ir_node *node, *pred_sp, *block;
ir_node *loads[MAXPUSH_OPTIMIZE];
ir_graph *irg;
unsigned regmask = 0;
unsigned copymask = ~0;
......@@ -772,7 +769,6 @@ static void peephole_Load_IncSP_to_pop(ir_node *irn)
/* create a new IncSP if needed */
block = get_nodes_block(irn);
irg = get_irn_irg(irn);
if (inc_ofs > 0) {
pred_sp = be_new_IncSP(esp, block, pred_sp, -inc_ofs, be_get_IncSP_align(irn));
sched_add_before(irn, pred_sp);
......
......@@ -464,6 +464,7 @@ EOF
if (exists($n->{"init_attr"})) {
$temp .= "\tattr = (${attr_type}*)get_irn_generic_attr(res);\n";
$temp .= "\t(void) attr; /* avoid potential warning */\n";
$temp .= "\t".$n->{"init_attr"}."\n";
}
......
......@@ -1439,7 +1439,6 @@ static ir_node *get_stack_pointer_for(ir_node *node)
{
/* get predecessor in stack_order list */
ir_node *stack_pred = be_get_stack_pred(abihelper, node);
ir_node *stack_pred_transformed;
ir_node *stack;
if (stack_pred == NULL) {
......@@ -1449,8 +1448,8 @@ static ir_node *get_stack_pointer_for(ir_node *node)
return sp_proj;
}
stack_pred_transformed = be_transform_node(stack_pred);
stack = (ir_node*)pmap_get(node_to_stack, stack_pred);
be_transform_node(stack_pred);
stack = (ir_node*)pmap_get(node_to_stack, stack_pred);
if (stack == NULL) {
return get_stack_pointer_for(stack_pred);
}
......
......@@ -605,7 +605,6 @@ int smaller_mode(const ir_mode *sm, const ir_mode *lm)
and backwards without loss. */
int values_in_mode(const ir_mode *sm, const ir_mode *lm)
{
int sm_bits, lm_bits;
ir_mode_arithmetic arith;
assert(sm);
......@@ -616,9 +615,6 @@ int values_in_mode(const ir_mode *sm, const ir_mode *lm)
if (sm == mode_b)
return mode_is_int(lm);
sm_bits = get_mode_size_bits(sm);
lm_bits = get_mode_size_bits(lm);
arith = get_mode_arithmetic(sm);
if (arith != get_mode_arithmetic(lm))
return 0;
......
......@@ -1228,7 +1228,6 @@ static ir_node *equivalent_node_Phi(ir_node *n)
int i, n_preds;
ir_node *oldn = n;
ir_node *block;
ir_node *first_val = NULL; /* to shutup gcc */
if (!get_opt_optimize() &&
......@@ -1237,8 +1236,6 @@ static ir_node *equivalent_node_Phi(ir_node *n)
n_preds = get_Phi_n_preds(n);
block = get_nodes_block(n);
/* Phi of dead Region without predecessors. */
if (n_preds == 0)
return n;
......
......@@ -1056,7 +1056,6 @@ void apply_RII(pbqp_t *pbqp)
unsigned col_len;
unsigned row_index;
unsigned row_len;
unsigned node_len;
assert(pbqp_node_get_degree(node) == 2);
......@@ -1113,7 +1112,6 @@ void apply_RII(pbqp_t *pbqp)
row_len = src_vec->len;
col_len = tgt_vec->len;
node_len = node_vec->len;
mat = pbqp_matrix_alloc(pbqp, row_len, col_len);
......@@ -1232,14 +1230,12 @@ static void select_column(pbqp_edge_t *edge, unsigned col_index)
static void select_row(pbqp_edge_t *edge, unsigned row_index)
{
pbqp_matrix_t *mat;
pbqp_node_t *src_node;
pbqp_node_t *tgt_node;
vector_t *tgt_vec;
unsigned tgt_len;
unsigned tgt_index;
unsigned new_infinity = 0;
src_node = edge->src;
tgt_node = edge->tgt;
tgt_vec = tgt_node->costs;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment