Commit abbf9492 authored by Christoph Mallon's avatar Christoph Mallon
Browse files

Some more cleanup: Put the return type and other specifiers on the same line...

Some more cleanup: Put the return type and other specifiers on the same line as the declarator of a function declaration.

[r27155]
parent 41aa4c0d
......@@ -282,8 +282,7 @@ typedef int (ir_arr_cmp_func_t)(const void *a, const void *b);
* @note The differences to bsearch(3) which does not give proper insert locations
* in the case that the element is not conatined in the array.
*/
static inline int
ir_arr_bsearch(const void *arr, size_t elm_size, ir_arr_cmp_func_t *cmp, const void *elm)
static inline int ir_arr_bsearch(const void *arr, size_t elm_size, ir_arr_cmp_func_t *cmp, const void *elm)
{
int hi = ARR_LEN(arr);
int lo = 0;
......
......@@ -129,10 +129,8 @@ static inline unsigned *rbitset_w_size_obstack_alloc(struct obstack *obst, unsig
*
* @return the new bitset
*/
static inline
unsigned *rbitset_duplicate_obstack_alloc(struct obstack *obst,
const unsigned *old_bitset,
unsigned size)
static inline unsigned *rbitset_duplicate_obstack_alloc(struct obstack *obst,
const unsigned *old_bitset, unsigned size)
{
unsigned size_bytes = BITSET_SIZE_BYTES(size);
unsigned *res = obstack_alloc(obst, size_bytes);
......
......@@ -262,14 +262,8 @@ op_pin_state is_irn_pinned_in_irg(const ir_node *node);
* @param arity The arity of the new node, <0 if can be changed dynamically.
* @param in An array of arity predecessor nodes.
*/
ir_node *
new_ir_node(dbg_info *db,
ir_graph *irg,
ir_node *block,
ir_op *op,
ir_mode *mode,
int arity,
ir_node *in[]);
ir_node *new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op,
ir_mode *mode, int arity, ir_node *in[]);
/**
* Return the block the node belongs to. This is only
......
......@@ -45,8 +45,7 @@ COMPILETIME_ASSERT(UINT_MAX == 4294967295U, uintmax)
*
* @note See hacker's delight, page 27.
*/
static inline
int add_saturated(int x, int y)
static inline int add_saturated(int x, int y)
{
int sum = x + y;
/*
......@@ -74,8 +73,8 @@ int add_saturated(int x, int y)
* @param x A 32-bit word.
* @return The number of bits set in x.
*/
static inline
unsigned popcnt(unsigned x) {
static inline unsigned popcnt(unsigned x)
{
x -= ((x >> 1) & 0x55555555);
x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
x = (x + (x >> 4)) & 0x0f0f0f0f;
......@@ -89,8 +88,8 @@ unsigned popcnt(unsigned x) {
* @param x The word.
* @return The number of leading (from the most significant bit) zeros.
*/
static inline
unsigned nlz(unsigned x) {
static inline unsigned nlz(unsigned x)
{
#ifdef USE_X86_ASSEMBLY
unsigned res;
if(x == 0)
......@@ -118,8 +117,8 @@ unsigned nlz(unsigned x) {
* @param x The word.
* @return The number of trailing zeros.
*/
static inline
unsigned ntz(unsigned x) {
static inline unsigned ntz(unsigned x)
{
#ifdef USE_X86_ASSEMBLY
unsigned res;
if(x == 0)
......@@ -162,8 +161,7 @@ unsigned ntz(unsigned x) {
* Returns the biggest power of 2 that is equal or smaller than @p x
* (see hackers delight power-of-2 boundaries, page 48)
*/
static inline
unsigned floor_po2(unsigned x)
static inline unsigned floor_po2(unsigned x)
{
#ifdef USE_X86_ASSEMBLY // in this case nlz is fast
if(x == 0)
......@@ -185,8 +183,7 @@ unsigned floor_po2(unsigned x)
* @remark x has to be <= 0x8000000 of course
* @note see hackers delight power-of-2 boundaries, page 48
*/
static inline
unsigned ceil_po2(unsigned x)
static inline unsigned ceil_po2(unsigned x)
{
if(x == 0)
return 0;
......@@ -209,8 +206,7 @@ unsigned ceil_po2(unsigned x)
/**
* Tests whether @p x is a power of 2
*/
static inline
int is_po2(unsigned x)
static inline int is_po2(unsigned x)
{
return (x & (x-1)) == 0;
}
......
......@@ -219,8 +219,7 @@ size_t hashset_size(const HashSet *self)
* @note also see comments for hashset_insert()
* @internal
*/
static inline
InsertReturnValue insert_nogrow(HashSet *self, KeyType key)
static inline InsertReturnValue insert_nogrow(HashSet *self, KeyType key)
{
size_t num_probes = 0;
size_t num_buckets = self->num_buckets;
......@@ -270,8 +269,7 @@ InsertReturnValue insert_nogrow(HashSet *self, KeyType key)
* calculate shrink and enlarge limits
* @internal
*/
static inline
void reset_thresholds(HashSet *self)
static inline void reset_thresholds(HashSet *self)
{
self->enlarge_threshold = (size_t) HT_OCCUPANCY_FLT(self->num_buckets);
self->shrink_threshold = (size_t) HT_EMPTY_FLT(self->num_buckets);
......@@ -284,8 +282,7 @@ void reset_thresholds(HashSet *self)
* contains no deleted entries and the element doesn't exist in the hashset yet.
* @internal
*/
static
void insert_new(HashSet *self, unsigned hash, ValueType value)
static void insert_new(HashSet *self, unsigned hash, ValueType value)
{
size_t num_probes = 0;
size_t num_buckets = self->num_buckets;
......@@ -326,8 +323,7 @@ void insert_new(HashSet *self, unsigned hash, ValueType value)
* Resize the hashset
* @internal
*/
static inline
void resize(HashSet *self, size_t new_size)
static inline void resize(HashSet *self, size_t new_size)
{
size_t num_buckets = self->num_buckets;
size_t i;
......@@ -371,8 +367,7 @@ static inline void resize(HashSet *self, size_t new_size);
* grow the hashset if adding 1 more elements would make it too crowded
* @internal
*/
static inline
void maybe_grow(HashSet *self)
static inline void maybe_grow(HashSet *self)
{
size_t resize_to;
......@@ -388,8 +383,7 @@ void maybe_grow(HashSet *self)
* shrink the hashset if it is only sparsely filled
* @internal
*/
static inline
void maybe_shrink(HashSet *self)
static inline void maybe_shrink(HashSet *self)
{
size_t size;
size_t resize_to;
......@@ -516,8 +510,7 @@ void hashset_remove(HashSet *self, ConstKeyType key)
* Initializes hashset with a specific size
* @internal
*/
static inline
void init_size(HashSet *self, size_t initial_size)
static inline void init_size(HashSet *self, size_t initial_size)
{
if (initial_size < 4)
initial_size = 4;
......
......@@ -114,8 +114,7 @@ struct SET {
#ifdef STATS
void
MANGLEP(stats) (SET *table)
void MANGLEP(stats) (SET *table)
{
int nfree = 0;
#ifdef PSET
......@@ -126,8 +125,7 @@ MANGLEP(stats) (SET *table)
table->naccess, table->ncollision, table->nkey, table->ndups, table->max_chain_len, nfree);
}
static inline void
stat_chain_len (SET *table, int chain_len)
static inline void stat_chain_len(SET *table, int chain_len)
{
table->ncollision += chain_len;
if (table->max_chain_len < chain_len) table->max_chain_len = chain_len;
......@@ -149,8 +147,7 @@ stat_chain_len (SET *table, int chain_len)
const char *MANGLEP(tag);
void
MANGLEP(describe) (SET *table)
void MANGLEP(describe) (SET *table)
{
unsigned i, j, collide;
Element *ptr;
......@@ -180,8 +177,7 @@ MANGLEP(describe) (SET *table)
#endif /* !DEBUG */
SET *
(PMANGLE(new)) (MANGLEP(cmp_fun) cmp, int nslots)
SET *(PMANGLE(new)) (MANGLEP(cmp_fun) cmp, int nslots)
{
int i;
SET *table = XMALLOC(SET);
......@@ -221,8 +217,7 @@ SET *
}
void
PMANGLE(del) (SET *table)
void PMANGLE(del) (SET *table)
{
#ifdef DEBUG
MANGLEP(tag) = table->tag;
......@@ -231,8 +226,7 @@ PMANGLE(del) (SET *table)
xfree (table);
}
int
MANGLEP(count) (SET *table)
int MANGLEP(count) (SET *table)
{
return table->nkey;
}
......@@ -241,8 +235,7 @@ MANGLEP(count) (SET *table)
* do one iteration step, return 1
* if still data in the set, 0 else
*/
static inline int
iter_step (SET *table)
static inline int iter_step(SET *table)
{
if (++table->iter_j >= SEGMENT_SIZE) {
table->iter_j = 0;
......@@ -257,8 +250,7 @@ iter_step (SET *table)
/*
* finds the first entry in the table
*/
void *
MANGLEP(first) (SET *table)
void * MANGLEP(first) (SET *table)
{
assert (!table->iter_tail);
table->iter_i = 0;
......@@ -274,8 +266,7 @@ MANGLEP(first) (SET *table)
/*
* returns next entry in the table
*/
void *
MANGLEP(next) (SET *table)
void *MANGLEP(next) (SET *table)
{
if (!table->iter_tail)
return NULL;
......@@ -293,8 +284,7 @@ MANGLEP(next) (SET *table)
return table->iter_tail->entry.dptr;
}
void
MANGLEP(break) (SET *table)
void MANGLEP(break) (SET *table)
{
table->iter_tail = NULL;
}
......@@ -302,8 +292,7 @@ MANGLEP(break) (SET *table)
/*
* limit the hash value
*/
static inline unsigned
Hash (SET *table, unsigned h)
static inline unsigned Hash(SET *table, unsigned h)
{
unsigned address;
address = h & (table->maxp - 1); /* h % table->maxp */
......@@ -316,8 +305,7 @@ Hash (SET *table, unsigned h)
* returns non-zero if the number of elements in
* the set is greater then number of segments * MAX_LOAD_FACTOR
*/
static inline int
loaded (SET *table)
static inline int loaded(SET *table)
{
return ( ++table->nkey
> (table->nseg << SEGMENT_SIZE_SHIFT) * MAX_LOAD_FACTOR);
......@@ -331,8 +319,7 @@ loaded (SET *table)
* after all segments were split, table->p is set to zero and
* table->maxp is duplicated.
*/
static void
expand_table (SET *table)
static void expand_table(SET *table)
{
unsigned NewAddress;
int OldSegmentIndex, NewSegmentIndex;
......@@ -389,8 +376,7 @@ expand_table (SET *table)
}
void *
MANGLE(_,_search) (SET *table,
void * MANGLE(_,_search) (SET *table,
const void *key,
#ifndef PSET
size_t size,
......@@ -475,8 +461,7 @@ int pset_default_ptr_cmp(const void *x, const void *y)
return x != y;
}
void *
pset_remove (SET *table, const void *key, unsigned hash)
void *pset_remove(SET *table, const void *key, unsigned hash)
{
unsigned h;
Segment *CurrentSegment;
......@@ -529,15 +514,13 @@ pset_remove (SET *table, const void *key, unsigned hash)
}
void *
(pset_find) (SET *se, const void *key, unsigned hash)
void *(pset_find) (SET *se, const void *key, unsigned hash)
{
return pset_find (se, key, hash);
}
void *
(pset_insert) (SET *se, const void *key, unsigned hash)
void *(pset_insert) (SET *se, const void *key, unsigned hash)
{
return pset_insert (se, key, hash);
}
......@@ -559,22 +542,19 @@ void pset_insert_pset_ptr(pset *target, pset *src)
#else /* !PSET */
void *
(set_find) (set *se, const void *key, size_t size, unsigned hash)
void *(set_find) (set *se, const void *key, size_t size, unsigned hash)
{
return set_find (se, key, size, hash);
}
void *
(set_insert) (set *se, const void *key, size_t size, unsigned hash)
void *(set_insert) (set *se, const void *key, size_t size, unsigned hash)
{
return set_insert (se, key, size, hash);
}
set_entry *
(set_hinsert) (set *se, const void *key, size_t size, unsigned hash)
set_entry *(set_hinsert) (set *se, const void *key, size_t size, unsigned hash)
{
return set_hinsert (se, key, size, hash);
}
......
......@@ -84,8 +84,7 @@ struct ir_exec_freq {
unsigned infeasible : 1;
};
static int
cmp_freq(const void *a, const void *b, size_t size)
static int cmp_freq(const void *a, const void *b, size_t size)
{
const freq_t *p = a;
const freq_t *q = b;
......@@ -94,8 +93,7 @@ cmp_freq(const void *a, const void *b, size_t size)
return !(p->irn == q->irn);
}
static freq_t *
set_find_freq(set * set, const ir_node * irn)
static freq_t *set_find_freq(set *set, const ir_node *irn)
{
freq_t query;
......@@ -103,8 +101,7 @@ set_find_freq(set * set, const ir_node * irn)
return set_find(set, &query, sizeof(query), HASH_PTR(irn));
}
static freq_t *
set_insert_freq(set * set, const ir_node * irn)
static freq_t *set_insert_freq(set *set, const ir_node *irn)
{
freq_t query;
......@@ -114,8 +111,7 @@ set_insert_freq(set * set, const ir_node * irn)
return set_insert(set, &query, sizeof(query), HASH_PTR(irn));
}
double
get_block_execfreq(const ir_exec_freq *ef, const ir_node * irn)
double get_block_execfreq(const ir_exec_freq *ef, const ir_node *irn)
{
if (!ef->infeasible) {
set *freqs = ef->set;
......@@ -139,8 +135,7 @@ get_block_execfreq_ulong(const ir_exec_freq *ef, const ir_node *bb)
return res;
}
static double *
solve_lgs(gs_matrix_t *mat, double *x, int size)
static double *solve_lgs(gs_matrix_t *mat, double *x, int size)
{
double init = 1.0 / size;
double dev;
......@@ -181,8 +176,7 @@ solve_lgs(gs_matrix_t *mat, double *x, int size)
/*
* Determine probability that predecessor pos takes this cf edge.
*/
static double
get_cf_probability(ir_node *bb, int pos, double loop_weight)
static double get_cf_probability(ir_node *bb, int pos, double loop_weight)
{
double sum = 0.0;
double cur = 1.0;
......@@ -260,8 +254,7 @@ static void collect_blocks(ir_node *bl, void *data)
set_insert_freq(freqs, bl);
}
ir_exec_freq *
compute_execfreq(ir_graph * irg, double loop_weight)
ir_exec_freq *compute_execfreq(ir_graph *irg, double loop_weight)
{
gs_matrix_t *mat;
int size;
......@@ -433,8 +426,7 @@ compute_execfreq(ir_graph * irg, double loop_weight)
return ef;
}
void
free_execfreq(ir_exec_freq *ef)
void free_execfreq(ir_exec_freq *ef)
{
del_set(ef->set);
unregister_hook(hook_node_info, &ef->hook);
......
......@@ -153,8 +153,7 @@ Cond_prob get_ProjX_probability(ir_node *n)
/* A walker that only visits the nodes we want to see. */
static void
my_irg_walk_2_both(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env)
static void my_irg_walk_2_both(ir_node *node, irg_walk_func *pre, irg_walk_func *post, void * env)
{
int i;
set_irn_visited(node, current_ir_graph->visited);
......
......@@ -45,8 +45,8 @@ struct _ir_extblk {
* Checks whether a pointer points to a extended basic block.
* Intern version for libFirm.
*/
static inline int
_is_ir_extbb(const void *thing) {
static inline int _is_ir_extbb(const void *thing)
{
return (get_kind(thing) == k_ir_extblk);
}
......@@ -54,8 +54,8 @@ _is_ir_extbb(const void *thing) {
* Gets the visited counter of an extended block.
* Internal version for libFirm.
*/
static inline ir_visited_t
_get_extbb_visited(const ir_extblk *blk) {
static inline ir_visited_t _get_extbb_visited(const ir_extblk *blk)
{
assert(blk);
return blk->visited;
}
......@@ -64,8 +64,8 @@ _get_extbb_visited(const ir_extblk *blk) {
* Sets the visited counter of an extended block.
* Internal version for libFirm.
*/
static inline void
_set_extbb_visited(ir_extblk *blk, ir_visited_t visited) {
static inline void _set_extbb_visited(ir_extblk *blk, ir_visited_t visited)
{
assert(blk);
blk->visited = visited;
}
......@@ -74,8 +74,8 @@ _set_extbb_visited(ir_extblk *blk, ir_visited_t visited) {
* Mark an extended block as visited in a graph.
* Internal version for libFirm.
*/
static inline void
_mark_extbb_visited(ir_extblk *blk) {
static inline void _mark_extbb_visited(ir_extblk *blk)
{
assert(blk);
blk->visited = current_ir_graph->block_visited;
}
......@@ -84,8 +84,8 @@ _mark_extbb_visited(ir_extblk *blk) {
* Returns non-zero if an extended was visited.
* Internal version for libFirm.
*/
static inline int
_extbb_visited(const ir_extblk *blk) {
static inline int _extbb_visited(const ir_extblk *blk)
{
assert(blk);
return blk->visited >= current_ir_graph->block_visited;
}
......@@ -94,8 +94,8 @@ _extbb_visited(const ir_extblk *blk) {
* Returns non-zero if an extended block was NOT visited.
* Internal version for libFirm.
*/
static inline int
_extbb_not_visited(const ir_extblk *blk) {
static inline int _extbb_not_visited(const ir_extblk *blk)
{
assert(blk);
return blk->visited < current_ir_graph->block_visited;
}
......@@ -104,8 +104,8 @@ _extbb_not_visited(const ir_extblk *blk) {
* Returns the link field of an extended block.
* Internal version for libFirm.
*/
static inline void *
_get_extbb_link(const ir_extblk *blk) {
static inline void *_get_extbb_link(const ir_extblk *blk)
{
assert(blk);
return blk->link;
}
......@@ -114,8 +114,8 @@ _get_extbb_link(const ir_extblk *blk) {
* Sets the link field of an extended block.
* Internal version for libFirm.
*/
static inline void
_set_extbb_link(ir_extblk *blk, void *link) {
static inline void _set_extbb_link(ir_extblk *blk, void *link)
{
assert(blk);
blk->link = link;
}
......@@ -123,8 +123,8 @@ _set_extbb_link(ir_extblk *blk, void *link) {
/**
* Return the number of basis blocks of an extended block
*/
static inline int
_get_extbb_n_blocks(const ir_extblk *blk) {
static inline int _get_extbb_n_blocks(const ir_extblk *blk)
{
assert(blk);
return ARR_LEN(blk->blks);
}
......@@ -132,8 +132,7 @@ _get_extbb_n_blocks(const ir_extblk *blk) {
/**
* Return the i'th basis block of an extended block
*/
static inline ir_node *
_get_extbb_block(const ir_extblk *blk, int pos)
static inline ir_node *_get_extbb_block(const ir_extblk *blk, int pos)
{
assert(blk && 0 <= pos && pos < _get_extbb_n_blocks(blk));
return blk->blks[pos];
......@@ -142,8 +141,7 @@ _get_extbb_block(const ir_extblk *blk, int pos)
/**
* Return the leader basis block of an extended block
*/
static inline ir_node *
_get_extbb_leader(const ir_extblk *blk)
static inline ir_node *_get_extbb_leader(const ir_extblk *blk)
{
return blk->blks[0];
}
......
......@@ -109,44 +109,44 @@ void mature_loops(ir_loop *loop, struct obstack *obst);
/* -------- inline functions -------- */
static inline int
_is_ir_loop(const void *thing) {
static inline int _is_ir_loop(const void *thing)
{
return get_kind(thing) == k_ir_loop;
}
static inline void
_set_irg_loop(ir_graph *irg, ir_loop *loop) {
static inline void _set_irg_loop(ir_graph *irg, ir_loop *loop)
{
assert(irg);
irg->loop = loop;
}
static inline ir_loop *
_get_irg_loop(const ir_graph *irg) {
static inline ir_loop *_get_irg_loop(const ir_graph *irg)
{
assert(irg);
return irg->loop;
}
static inline ir_loop *
_get_loop_outer_loop(const ir_loop *loop) {
static inline ir_loop *_get_loop_outer_loop(const ir_loop *loop)
{
assert(_is_ir_loop(loop));
return loop->outer_loop;
}
static inline int
_get_loop_depth(const ir_loop *loop) {
static inline int _get_loop_depth(const ir_loop *loop)
{
assert(_is_ir_loop(loop));
return loop->depth;
}
static inline int
_get_loop_n_sons(const ir_loop *loop) {
static inline int _get_loop_n_sons(const ir_loop *loop)
{
assert(_is_ir_loop(loop));
return loop->n_sons;
}
/* Uses temporary information to get the loop */
static inline ir_loop *
_get_irn_loop(const ir_node *n) {
static inline ir_loop *_get_irn_loop(const ir_node *n)
{
return n->