Commit 18489c39 authored by Christoph Mallon's avatar Christoph Mallon
Browse files

util: Add an use MEMCPY().

parent 79a69c11
......@@ -16,6 +16,7 @@
#include "fourcc.h"
#include "pdeq.h"
#include "util.h"
#include "xmalloc.h"
/* Pointer Double Ended Queue */
......@@ -401,11 +402,11 @@ void **pdeq_copyl(pdeq *dq, const void **dst)
if (n + p > NDATA) {
/* p is always < NDATA */
size_t nn = NDATA - p;
memcpy((void *) d, &q->data[p], nn * sizeof(void *)); d += nn;
MEMCPY(d, &q->data[p], nn); d += nn;
p = 0; n -= nn;
}
memcpy((void *) d, &q->data[p], n * sizeof(void *)); d += n;
MEMCPY(d, &q->data[p], n); d += n;
q = q->r;
}
......
......@@ -13,6 +13,7 @@
#define FIRM_ADT_UTIL_H
#include <stddef.h>
#include <string.h>
/**
* Returns size of a static array. Warning: This returns invalid values for
......@@ -49,4 +50,13 @@
#define QSORT_ARR(base, cmp) QSORT((base), ARR_LEN((base)), (cmp))
static inline void *safe_memcpy(void* const dst, void const* const src, size_t const n)
{
/* Calling memcpy with a null pointer leads to undefined behavior,
* even if we copy zero bytes (C99 7.21.1.p2). */
return n != 0 ? memcpy(dst, src, n) : dst;
}
#define MEMCPY(dst, src, n) safe_memcpy((dst), (src), (n) * sizeof(*(1 ? (dst) : (src))))
#endif
......@@ -68,7 +68,7 @@ void cg_set_call_callee_arr(ir_node *node, size_t n, ir_entity **arr)
ir_graph *const irg = get_irn_irg(node);
node->attr.call.callee_arr = NEW_ARR_D(ir_entity*, get_irg_obstack(irg), n);
}
memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
MEMCPY(node->attr.call.callee_arr, arr, n);
}
void cg_remove_call_callee_arr(ir_node *node)
......
......@@ -187,8 +187,8 @@ void be_Perm_reduce(ir_node *perm, int new_size, int *map)
ir_node **new_in = ALLOCAN(ir_node*, new_size);
/* save the old register data */
memcpy(old_in_reqs, info->in_reqs, arity * sizeof(old_in_reqs[0]));
memcpy(old_infos, info->out_infos, arity * sizeof(old_infos[0]));
MEMCPY(old_in_reqs, info->in_reqs, arity);
MEMCPY(old_infos, info->out_infos, arity);
/* compose the new in array and set the new register data directly */
for (int i = 0; i < new_size; ++i) {
......@@ -209,7 +209,7 @@ ir_node *be_new_MemPerm(ir_node *const block, int n, ir_node *const *const in)
ir_node **real_in = ALLOCAN(ir_node*, n + 1);
real_in[0] = frame;
memcpy(&real_in[1], in, n * sizeof(real_in[0]));
MEMCPY(&real_in[1], in, n);
ir_node *irn = new_ir_node(NULL, irg, block, op_be_MemPerm, mode_T, n+1, real_in);
......@@ -305,7 +305,7 @@ ir_node *be_new_CopyKeep(ir_node *bl, ir_node *src, int n, ir_node *in_keep[])
int arity = n+1;
ir_node **in = ALLOCAN(ir_node*, arity);
in[0] = src;
memcpy(&in[1], in_keep, n * sizeof(in[0]));
MEMCPY(&in[1], in_keep, n);
ir_node *irn = new_ir_node(NULL, irg, bl, op_be_CopyKeep, mode, arity, in);
init_node_attr(irn, arity, 1, arch_irn_flag_schedule_first);
be_node_attr_t *attr = (be_node_attr_t*)get_irn_generic_attr(irn);
......
......@@ -173,7 +173,7 @@ static void mark_as_copy_of(ir_node *copy, ir_node *value)
copy_info->original_value = original;
/* copy over allocation preferences */
memcpy(copy_info->prefs, info->prefs, n_regs * sizeof(copy_info->prefs[0]));
MEMCPY(copy_info->prefs, info->prefs, n_regs);
}
/**
......@@ -467,7 +467,7 @@ static void set_congruence_prefs(ir_node *node, void *data)
allocation_info_t *head_info = get_allocation_info(head);
allocation_info_t *info = get_allocation_info(node);
memcpy(info->prefs, head_info->prefs, n_regs * sizeof(info->prefs[0]));
MEMCPY(info->prefs, head_info->prefs, n_regs);
}
static void combine_congruence_classes(void)
......
......@@ -123,7 +123,7 @@ static void workset_copy(workset_t *dest, const workset_t *src)
static void workset_bulk_fill(workset_t *workset, int count, const loc_t *locs)
{
workset->len = count;
memcpy(&(workset->vals[0]), locs, count * sizeof(locs[0]));
MEMCPY(&workset->vals[0], locs, count);
}
/**
......
......@@ -150,7 +150,7 @@ void be_subtract_node_stats(be_node_stats_t *stats, be_node_stats_t *sub)
void be_copy_node_stats(be_node_stats_t *dest, be_node_stats_t *src)
{
memcpy(dest, src, sizeof(be_node_stats_t));
MEMCPY(dest, src, 1);
}
static const char *get_stat_name(enum be_stat_tag_t tag)
......
......@@ -23,6 +23,7 @@
#include "ia32_architecture.h"
#include "ia32_common_transform.h"
#include "ia32_new_nodes.h"
#include "util.h"
#include "gen_ia32_new_nodes.h"
#include "gen_ia32_regalloc_if.h"
......@@ -548,7 +549,7 @@ ir_node *ia32_gen_ASM(ir_node *node)
memcpy(new_in_reg_reqs, in_reg_reqs,
n_ins*sizeof(new_in_reg_reqs[0]));
ir_node **new_in = ALLOCANZ(ir_node*, in_size);
memcpy(new_in, in, n_ins*sizeof(new_in[0]));
MEMCPY(new_in, in, n_ins);
in_reg_reqs = new_in_reg_reqs;
in = new_in;
......
......@@ -1466,7 +1466,7 @@ static void pick_delay_slots(size_t n_blocks, ir_node **blocks)
{
/* create blocklist sorted by execution frequency */
ir_node **sorted_blocks = XMALLOCN(ir_node*, n_blocks);
memcpy(sorted_blocks, blocks, n_blocks*sizeof(sorted_blocks[0]));
MEMCPY(sorted_blocks, blocks, n_blocks);
QSORT(sorted_blocks, n_blocks, cmp_block_execfreqs);
for (size_t i = 0; i < n_blocks; ++i) {
......
......@@ -39,6 +39,7 @@
#include "bestack.h"
#include "beutil.h"
#include "panic.h"
#include "util.h"
static int get_first_same(const arch_register_req_t *req)
{
......@@ -133,7 +134,7 @@ static void kill_unused_stacknodes(ir_node *node)
int arity = get_irn_arity(node);
ir_node **ins = ALLOCAN(ir_node*, arity);
sched_remove(node);
memcpy(ins, get_irn_in(node), arity*sizeof(ins[0]));
MEMCPY(ins, get_irn_in(node), arity);
kill_node(node);
for (int i = 0; i < arity; ++i)
......
......@@ -51,7 +51,7 @@ ir_node *new_rd_ASM(dbg_info *db, ir_node *block, ir_node *mem,
int const r_arity = arity + 1;
ir_node **const r_in = ALLOCAN(ir_node*, r_arity);
r_in[0] = mem;
memcpy(&r_in[1], in, arity * sizeof(r_in[1]));
MEMCPY(&r_in[1], in, arity);
ir_node *res = new_ir_node(db, irg, block, op_ASM, mode_T, r_arity, r_in);
......@@ -63,9 +63,9 @@ ir_node *new_rd_ASM(dbg_info *db, ir_node *block, ir_node *mem,
a->clobbers = NEW_ARR_D(ident*, obst, n_clobber);
a->text = text;
memcpy(a->input_constraints, inputs, sizeof(inputs[0]) * arity);
memcpy(a->output_constraints, outputs, sizeof(outputs[0]) * n_outs);
memcpy(a->clobbers, clobber, sizeof(clobber[0]) * n_clobber);
MEMCPY(a->input_constraints, inputs, arity);
MEMCPY(a->output_constraints, outputs, n_outs);
MEMCPY(a->clobbers, clobber, n_clobber);
verify_new_node(irg, res);
res = optimize_node(res);
......
......@@ -99,7 +99,7 @@ ir_node *new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op,
res->in = NEW_ARR_F(ir_node *, (arity+1));
else
res->in = NEW_ARR_D(ir_node*, get_irg_obstack(irg), arity + 1);
memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
MEMCPY(&res->in[1], in, arity);
}
res->in[0] = block;
......@@ -178,10 +178,7 @@ void set_irn_in(ir_node *const node, int const arity, ir_node *const *const in)
}
fix_backedges(get_irg_obstack(irg), node);
/* Calling memcpy with a null pointer leads to undefined behavior,
* even if we copy zero bytes (C99 7.21.1.p2). */
if (arity > 0)
memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
MEMCPY(*pOld_in + 1, in, arity);
/* update irg flags */
clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
......
......@@ -7822,7 +7822,7 @@ void ir_normalize_node(ir_node *n)
if (!ins_sorted) {
ir_node **ins = get_irn_in(n)+1;
ir_node **new_ins = XMALLOCN(ir_node*, arity);
memcpy(new_ins, ins, arity*sizeof(ins[0]));
MEMCPY(new_ins, ins, arity);
QSORT(new_ins, arity, cmp_node_nr);
set_irn_in(n, arity, new_ins);
free(new_ins);
......@@ -7915,7 +7915,7 @@ ir_node *optimize_node(ir_node *n)
oldn->in = ALLOCAN(ir_node*, n_in);
/* ARG, copy the in array, we need it for statistics */
memcpy(oldn->in, n->in, n_in * sizeof(n->in[0]));
MEMCPY(oldn->in, n->in, n_in);
/* note the inplace edges module */
edges_node_deleted(n);
......
......@@ -415,7 +415,7 @@ int lc_evpprintf(const lc_arg_env_t *env, lc_appendable_t *app, const char *fmt,
lc_arg_t tmp;
name = (char*) malloc(sizeof(char) * (n + 1));
memcpy(name, named, sizeof(char) * n);
MEMCPY(name, named, n);
name[n] = '\0';
tmp.name = name;
......
......@@ -147,7 +147,7 @@ static unsigned optimize_pointless_forks(ir_node *block, unsigned n_cfgpreds,
if (new_cfgpreds == NULL) {
new_cfgpreds = XMALLOCN(ir_node*, n_cfgpreds);
memcpy(new_cfgpreds, cfgpreds, n_cfgpreds*sizeof(cfgpreds[0]));
MEMCPY(new_cfgpreds, cfgpreds, n_cfgpreds);
}
if (is_Cond(cfop)) {
/* replace Cond with Jmp */
......
......@@ -2099,7 +2099,7 @@ static void combine_memop(ir_node *sync, void *data)
unsigned machine_size = be_get_machine_size();
int n_preds = get_Sync_n_preds(sync);
ir_node **new_in = ALLOCAN(ir_node*, n_preds);
memcpy(new_in, get_irn_in(sync)+1, n_preds * sizeof(new_in[0]));
MEMCPY(new_in, get_irn_in(sync) + 1, n_preds);
QSORT(new_in, n_preds, cmp_ptrs);
......
......@@ -1197,7 +1197,7 @@ static int backward_antic(block_t *bl)
int i;
rbitset_copy(env.curr_set, succ_bl->anticL_in, env.rbs_size);
memcpy(env.curr_id_2_memop, succ_bl->id_2_memop_antic, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
MEMCPY(env.curr_id_2_memop, succ_bl->id_2_memop_antic, env.rbs_size);
/* Hmm: probably we want kill merges of Loads ans Stores here */
for (i = n - 1; i > 0; --i) {
......@@ -1239,7 +1239,7 @@ static int backward_antic(block_t *bl)
}
}
memcpy(bl->id_2_memop_antic, env.curr_id_2_memop, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
MEMCPY(bl->id_2_memop_antic, env.curr_id_2_memop, env.rbs_size);
if (! rbitsets_equal(bl->anticL_in, env.curr_set, env.rbs_size)) {
/* changed */
rbitset_copy(bl->anticL_in, env.curr_set, env.rbs_size);
......@@ -1577,7 +1577,7 @@ static int insert_Load(block_t *bl)
rbitset_copy(env.curr_set, pred_bl->avail_out, env.rbs_size);
memcpy(env.curr_id_2_memop, pred_bl->id_2_memop_avail, env.rbs_size * sizeof(bl->id_2_memop_avail[0]));
MEMCPY(env.curr_id_2_memop, pred_bl->id_2_memop_avail, env.rbs_size);
}
if (n > 1) {
......@@ -1712,7 +1712,7 @@ static int insert_Load(block_t *bl)
calc_gen_kill_avail(bl);
/* always update the map after gen/kill, as values might have been changed due to RAR/WAR/WAW */
memcpy(bl->id_2_memop_avail, env.curr_id_2_memop, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
MEMCPY(bl->id_2_memop_avail, env.curr_id_2_memop, env.rbs_size);
if (!rbitsets_equal(bl->avail_out, env.curr_set, env.rbs_size)) {
/* the avail set has changed */
......
......@@ -387,7 +387,7 @@ pattern_dumper_t *new_vcg_dumper(const char *vcg_name, unsigned max_pattern)
if (res) {
FILE *f;
memcpy(res, &vcg_dump, sizeof(*res));
*res = vcg_dump;
priv = (vcg_private_t *)(res + 1);
memset(priv, 0, sizeof(*priv));
......
......@@ -708,10 +708,10 @@ ir_type *clone_type_method(ir_type *tp)
res->size = tp->size;
res->attr.ma.n_params = n_params;
res->attr.ma.params = XMALLOCN(ir_type*, n_params);
memcpy(res->attr.ma.params, tp->attr.ma.params, n_params * sizeof(res->attr.ma.params[0]));
MEMCPY(res->attr.ma.params, tp->attr.ma.params, n_params);
res->attr.ma.n_res = n_res;
res->attr.ma.res_type = XMALLOCN(ir_type*, n_res);
memcpy(res->attr.ma.res_type, tp->attr.ma.res_type, n_res * sizeof(res->attr.ma.res_type[0]));
MEMCPY(res->attr.ma.res_type, tp->attr.ma.res_type, n_res);
res->attr.ma.variadicity = tp->attr.ma.variadicity;
res->attr.ma.properties = tp->attr.ma.properties;
res->attr.ma.irg_calling_conv = tp->attr.ma.irg_calling_conv;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment