Commit 16fc4cdb authored by Christoph Mallon's avatar Christoph Mallon
Browse files

Fix several size_t related signed/unsigned warnings.

[r28330]
parent 6318fb44
......@@ -984,7 +984,7 @@ static void compute_loop_depth(ir_graph *irg, void *env)
/* For callees, we want to remember the Call nodes, too. */
typedef struct ana_entry2 {
ir_loop **loop_stack; /**< a stack of ir_loop entries */
int tos; /**< the top of stack entry */
size_t tos; /**< the top of stack entry */
int recursion_nesting;
} ana_entry2;
......@@ -1014,9 +1014,9 @@ static ir_loop *pop2(ana_entry2 *e)
*/
static int in_stack(ana_entry2 *e, ir_loop *g)
{
int i;
for (i = e->tos-1; i >= 0; --i) {
if (e->loop_stack[i] == g) return 1;
size_t i;
for (i = e->tos; i != 0;) {
if (e->loop_stack[--i] == g) return 1;
}
return 0;
}
......
......@@ -151,7 +151,7 @@ static inline int get_irn_dfn(ir_node *n)
/** An IR-node stack */
static ir_node **stack = NULL;
/** The top (index) of the IR-node stack */
static int tos = 0;
static size_t tos = 0;
/**
* Initializes the IR-node stack
......@@ -467,9 +467,10 @@ static int largest_dfn_pred(ir_node *n)
static ir_node *find_tail(ir_node *n)
{
ir_node *m;
int i, res_index = -2;
int res_index = -2;
size_t i;
m = stack[tos-1]; /* tos = top of stack */
m = stack[tos - 1]; /* tos = top of stack */
if (is_head(m, n)) {
res_index = smallest_dfn_pred(m, 0);
if ((res_index == -2) && /* no smallest dfn pred found. */
......@@ -478,16 +479,15 @@ static ir_node *find_tail(ir_node *n)
} else {
if (m == n)
return NULL;
for (i = tos-2; i >= 0; --i) {
m = stack[i];
for (i = tos - 1; i != 0;) {
m = stack[--i];
if (is_head(m, n)) {
res_index = smallest_dfn_pred(m, get_irn_dfn(m) + 1);
if (res_index == -2) /* no smallest dfn pred found. */
res_index = largest_dfn_pred(m);
if ((m == n) && (res_index == -2)) {
i = -1;
i = (size_t)-1;
}
break;
}
......@@ -496,15 +496,15 @@ static ir_node *find_tail(ir_node *n)
/* We should not walk past our selves on the stack: The upcoming nodes
are not in this loop. We assume a loop not reachable from Start. */
if (m == n) {
i = -1;
i = (size_t)-1;
break;
}
}
if (i < 0) {
if (i == (size_t)-1) {
/* A dead loop not reachable from Start. */
for (i = tos-2; i >= 0; --i) {
m = stack[i];
for (i = tos - 1; i != 0;) {
m = stack[--i];
if (is_endless_head(m, n)) {
res_index = smallest_dfn_pred (m, get_irn_dfn(m) + 1);
if (res_index == -2) /* no smallest dfn pred found. */
......
......@@ -939,8 +939,9 @@ static ir_region *acyclic_region_type(struct obstack *obst, ir_region *node)
*/
static void replace_pred(ir_region *succ, ir_region *reg)
{
int i, len = get_region_n_preds(succ);
int have_one = 0;
int have_one = 0;
size_t len = get_region_n_preds(succ);
size_t i;
for (i = 0; i < len; ++i) {
ir_region *pred = get_region_pred(succ, i);
......@@ -971,8 +972,9 @@ static void replace_pred(ir_region *succ, ir_region *reg)
*/
static void replace_succ(ir_region *pred, ir_region *reg)
{
int i, len = get_region_n_succs(pred);
int have_one = 0;
int have_one = 0;
size_t len = get_region_n_succs(pred);
size_t i;
for (i = 0; i < len; ++i) {
ir_region *succ = get_region_succ(pred, i);
......
......@@ -240,10 +240,10 @@ static void rsm_set_reg_value(register_state_mapping_t *rsm,
static ir_node *rsm_create_barrier(register_state_mapping_t *rsm,
ir_node *block)
{
int n_barrier_outs = ARR_LEN(rsm->regs);
size_t n_barrier_outs = ARR_LEN(rsm->regs);
ir_node **in = rsm->value_map;
ir_node *barrier;
int o;
size_t o;
assert(ARR_LEN(rsm->value_map) == n_barrier_outs);
......@@ -409,11 +409,11 @@ ir_node *be_epilog_create_barrier(beabi_helper_env_t *env, ir_node *block)
ir_node *be_epilog_create_return(beabi_helper_env_t *env, dbg_info *dbgi,
ir_node *block)
{
int n_return_in = ARR_LEN(env->epilog.regs);
size_t n_return_in = ARR_LEN(env->epilog.regs);
ir_node **in = env->epilog.value_map;
int n_res = 1; /* TODO */
unsigned pop = 0; /* TODO */
int i;
size_t i;
ir_node *ret;
assert(ARR_LEN(env->epilog.value_map) == n_return_in);
......
......@@ -736,13 +736,13 @@ static void build_affinity_chunks(co_mst_env_t *env)
static __attribute__((unused)) void chunk_order_nodes(co_mst_env_t *env, aff_chunk_t *chunk)
{
pqueue_t *grow = new_pqueue();
const ir_node *max_node = NULL;
int max_weight = 0;
int i;
pqueue_t *grow = new_pqueue();
ir_node const *max_node = NULL;
int max_weight = 0;
size_t i;
for (i = ARR_LEN(chunk->n) - 1; i >= 0; i--) {
const ir_node *irn = chunk->n[i];
for (i = ARR_LEN(chunk->n); i != 0;) {
const ir_node *irn = chunk->n[--i];
affinity_node_t *an = get_affinity_info(env->co, irn);
int w = 0;
neighb_t *neigh;
......@@ -764,8 +764,8 @@ static __attribute__((unused)) void chunk_order_nodes(co_mst_env_t *env, aff_chu
if (max_node) {
bitset_t *visited = bitset_irg_malloc(env->co->irg);
for (i = ARR_LEN(chunk->n) - 1; i >= 0; --i)
bitset_add_irn(visited, chunk->n[i]);
for (i = ARR_LEN(chunk->n); i != 0;)
bitset_add_irn(visited, chunk->n[--i]);
pqueue_put(grow, (void *) max_node, max_weight);
bitset_remv_irn(visited, max_node);
......@@ -1183,7 +1183,11 @@ static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c)
waitq *best_starts = NULL;
col_cost_t *order = ALLOCANZ(col_cost_t, env->n_regs);
bitset_t *visited;
int idx, len, i, nidx, pos;
int i;
size_t idx;
size_t len;
size_t nidx;
size_t pos;
struct list_head changed;
DB((dbg, LEVEL_2, "fragmentizing chunk #%u", c->id));
......
......@@ -79,9 +79,9 @@ int be_nodes_equal(ir_node *node1, ir_node *node2)
{
const backend_info_t *info1 = be_get_info(node1);
const backend_info_t *info2 = be_get_info(node2);
int len = ARR_LEN(info1->out_infos);
int arity = get_irn_arity(node1);
int i;
size_t len = ARR_LEN(info1->out_infos);
size_t arity = get_irn_arity(node1);
size_t i;
if (ARR_LEN(info2->out_infos) != len)
return false;
......
......@@ -159,7 +159,7 @@ typedef struct block_sched_env_t {
*/
static inline int is_already_scheduled(block_sched_env_t *env, ir_node *n)
{
int idx = get_irn_idx(n);
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].already_sched;
......@@ -170,7 +170,7 @@ static inline int is_already_scheduled(block_sched_env_t *env, ir_node *n)
*/
static inline void set_already_scheduled(block_sched_env_t *env, ir_node *n)
{
int idx = get_irn_idx(n);
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].already_sched = 1;
......@@ -264,7 +264,7 @@ static void make_users_ready(block_sched_env_t *env, ir_node *irn)
*/
static inline int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n)
{
int idx = get_irn_idx(n);
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].num_not_sched_user;
......@@ -275,7 +275,7 @@ static inline int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n)
*/
static inline void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num)
{
int idx = get_irn_idx(n);
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].num_not_sched_user = num;
......@@ -286,7 +286,7 @@ static inline void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, in
*/
static inline int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num)
{
int idx = get_irn_idx(n);
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].num_not_sched_user += num;
......
......@@ -1195,9 +1195,9 @@ static void compute_killing_function(rss_t *rss)
ir_nodeset_t y;
ir_nodeset_iterator_t iter;
child_t **sks = NEW_ARR_F(child_t *, 20);
int cur_len = 0;
int cur_size = 20;
int i;
size_t cur_len = 0;
size_t cur_size = 20;
size_t i;
ir_nodeset_init_size(&x, 10);
ir_nodeset_init_size(&y, 10);
......@@ -1218,8 +1218,8 @@ static void compute_killing_function(rss_t *rss)
t = select_child_max_cost(rss, &x, &y, t, cbc);
if (cur_len >= cur_size) {
ARR_EXTO(child_t *, sks, cur_size * 2);
cur_size *= 2;
ARR_EXTO(child_t *, sks, cur_size);
}
DBG((rss->dbg, LEVEL_2, "\t\tinsert child %+F (%.3f) into SKS at pos %d\n", t->irn, t->cost, cur_len));
......@@ -1237,8 +1237,8 @@ static void compute_killing_function(rss_t *rss)
DBG((rss->dbg, LEVEL_2, "\tprocessing SKS for cbc %d:\n", cbc->nr));
/* build killing function */
for (i = cur_len - 1; i >= 0; --i) { /* loop over sks in decreasing cost order */
child_t *t = sks[i];
for (i = cur_len; i != 0;) { /* loop over sks in decreasing cost order */
child_t *t = sks[--i];
rss_irn_t *rt = get_rss_irn(rss, t->irn);
plist_element_t *p_el;
......
......@@ -74,7 +74,7 @@ static ir_node *get_nodeset_node(const ir_nodeset_t *nodeset)
*/
static inline unsigned is_root_node(trace_env_t *env, ir_node *n)
{
int idx = get_irn_idx(n);
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].is_root;
......@@ -85,7 +85,7 @@ static inline unsigned is_root_node(trace_env_t *env, ir_node *n)
*/
static inline void mark_root_node(trace_env_t *env, ir_node *n)
{
int idx = get_irn_idx(n);
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].is_root = 1;
......@@ -96,7 +96,7 @@ static inline void mark_root_node(trace_env_t *env, ir_node *n)
*/
static inline sched_timestep_t get_irn_delay(trace_env_t *env, ir_node *n)
{
int idx = get_irn_idx(n);
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].delay;
......@@ -107,7 +107,7 @@ static inline sched_timestep_t get_irn_delay(trace_env_t *env, ir_node *n)
*/
static inline void set_irn_delay(trace_env_t *env, ir_node *n, sched_timestep_t delay)
{
int idx = get_irn_idx(n);
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].delay = delay;
......@@ -118,7 +118,7 @@ static inline void set_irn_delay(trace_env_t *env, ir_node *n, sched_timestep_t
*/
static inline sched_timestep_t get_irn_etime(trace_env_t *env, ir_node *n)
{
int idx = get_irn_idx(n);
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].etime;
......@@ -129,7 +129,7 @@ static inline sched_timestep_t get_irn_etime(trace_env_t *env, ir_node *n)
*/
static inline void set_irn_etime(trace_env_t *env, ir_node *n, sched_timestep_t etime)
{
int idx = get_irn_idx(n);
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].etime = etime;
......@@ -140,7 +140,7 @@ static inline void set_irn_etime(trace_env_t *env, ir_node *n, sched_timestep_t
*/
static inline unsigned get_irn_num_user(trace_env_t *env, ir_node *n)
{
int idx = get_irn_idx(n);
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].num_user;
......@@ -151,7 +151,7 @@ static inline unsigned get_irn_num_user(trace_env_t *env, ir_node *n)
*/
static inline void set_irn_num_user(trace_env_t *env, ir_node *n, unsigned num_user)
{
int idx = get_irn_idx(n);
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].num_user = num_user;
......@@ -162,7 +162,7 @@ static inline void set_irn_num_user(trace_env_t *env, ir_node *n, unsigned num_u
*/
static inline int get_irn_reg_diff(trace_env_t *env, ir_node *n)
{
int idx = get_irn_idx(n);
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].reg_diff;
......@@ -173,7 +173,7 @@ static inline int get_irn_reg_diff(trace_env_t *env, ir_node *n)
*/
static inline void set_irn_reg_diff(trace_env_t *env, ir_node *n, int reg_diff)
{
int idx = get_irn_idx(n);
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].reg_diff = reg_diff;
......@@ -184,7 +184,7 @@ static inline void set_irn_reg_diff(trace_env_t *env, ir_node *n, int reg_diff)
*/
static inline int get_irn_preorder(trace_env_t *env, ir_node *n)
{
int idx = get_irn_idx(n);
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].preorder;
......@@ -195,7 +195,7 @@ static inline int get_irn_preorder(trace_env_t *env, ir_node *n)
*/
static inline void set_irn_preorder(trace_env_t *env, ir_node *n, int pos)
{
int idx = get_irn_idx(n);
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].preorder = pos;
......@@ -206,7 +206,7 @@ static inline void set_irn_preorder(trace_env_t *env, ir_node *n, int pos)
*/
static inline unsigned get_irn_critical_path_len(trace_env_t *env, ir_node *n)
{
int idx = get_irn_idx(n);
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
return env->sched_info[idx].critical_path_len;
......@@ -217,7 +217,7 @@ static inline unsigned get_irn_critical_path_len(trace_env_t *env, ir_node *n)
*/
static inline void set_irn_critical_path_len(trace_env_t *env, ir_node *n, unsigned len)
{
int idx = get_irn_idx(n);
unsigned const idx = get_irn_idx(n);
assert(idx < ARR_LEN(env->sched_info));
env->sched_info[idx].critical_path_len = len;
......
......@@ -617,7 +617,7 @@ static void decide_start_workset(const ir_node *block)
}
pressure = be_get_loop_pressure(loop_ana, cls, loop);
assert(ARR_LEN(delayed) <= (signed)pressure);
assert(ARR_LEN(delayed) <= pressure);
free_slots = n_regs - ARR_LEN(starters);
free_pressure_slots = n_regs - (pressure - ARR_LEN(delayed));
free_slots = MIN(free_slots, free_pressure_slots);
......@@ -628,7 +628,8 @@ static void decide_start_workset(const ir_node *block)
DB((dbg, DBG_START, "Loop pressure %d, taking %d delayed vals\n",
pressure, free_slots));
if (free_slots > 0) {
int i;
size_t i;
qsort(delayed, ARR_LEN(delayed), sizeof(delayed[0]), loc_compare);
for (i = 0; i < ARR_LEN(delayed) && free_slots > 0; ++i) {
......
......@@ -591,7 +591,7 @@ static void assign_spillslots(be_fec_env_t *env)
int spillcount = set_count(env->spills);
spill_slot_t *spillslots = ALLOCANZ(spill_slot_t, spillcount);
spill_t *spill;
int i;
size_t i;
/* construct spillslots */
foreach_set(env->spills, spill_t*, spill) {
......
......@@ -1420,7 +1420,7 @@ static const char* emit_asm_operand(const ir_node *node, const char *s)
s += p;
}
if (num < 0 || ARR_LEN(asm_regs) <= num) {
if (num < 0 || ARR_LEN(asm_regs) <= (size_t)num) {
ir_fprintf(stderr,
"Error: Custom assembler references invalid input/output (%+F)\n",
node);
......
......@@ -342,7 +342,7 @@ void add_irp_opcode(ir_op *opcode)
/* Removes opcode from the list of opcodes and shrinks the list by one. */
void remove_irp_opcode(ir_op *opcode)
{
assert((int) opcode->code < ARR_LEN(irp->opcodes));
assert(opcode->code < ARR_LEN(irp->opcodes));
irp->opcodes[opcode->code] = NULL;
}
......
......@@ -508,7 +508,7 @@ static unsigned allocate_value_numbers(pset *sels, ir_entity *ent, unsigned vnum
SET_VNUM(sel, key->vnum);
DB((dbg, SET_LEVEL_3, " %+F represents value %u\n", sel, key->vnum));
ARR_EXTO(ir_mode *, *modes, (int)((key->vnum + 15) & ~15));
ARR_EXTO(ir_mode *, *modes, (key->vnum + 15) & ~15);
(*modes)[key->vnum] = get_type_mode(get_entity_type(get_Sel_entity(sel)));
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment