Commit cdbc2a0c authored by Matthias Braun's avatar Matthias Braun
Browse files

normalize spelling: optimisation => optimization

parent 3882b249
......@@ -205,7 +205,7 @@ FIRM_API const backend_params *be_get_backend_param(void);
* implementation of boolean values, if-conversion, with target specific
* settings.
* The resulting graph is still a "normal" firm-graph on which you can and
* should perform further architecture-neutral optimisations before be_main.
* should perform further architecture-neutral optimizations before be_main.
*/
FIRM_API void be_lower_for_target(void);
......
......@@ -238,7 +238,7 @@ static void peephole_arm_Str_Ldr(ir_node *node)
/**
* Register a peephole optimization function.
*/
static void register_peephole_optimisation(ir_op *op, peephole_opt_func func)
static void register_peephole_optimization(ir_op *op, peephole_opt_func func)
{
assert(op->ops.generic == NULL);
op->ops.generic = (op_func)func;
......@@ -249,10 +249,10 @@ void arm_peephole_optimization(ir_graph *irg)
{
/* register peephole optimizations */
ir_clear_opcodes_generic_func();
register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
register_peephole_optimisation(op_arm_Str, peephole_arm_Str_Ldr);
register_peephole_optimisation(op_arm_Ldr, peephole_arm_Str_Ldr);
register_peephole_optimisation(op_arm_FrameAddr, peephole_arm_FrameAddr);
register_peephole_optimization(op_be_IncSP, peephole_be_IncSP);
register_peephole_optimization(op_arm_Str, peephole_arm_Str_Ldr);
register_peephole_optimization(op_arm_Ldr, peephole_arm_Str_Ldr);
register_peephole_optimization(op_arm_FrameAddr, peephole_arm_FrameAddr);
be_peephole_opt(irg);
}
......@@ -5,7 +5,7 @@
/**
* @file
* @brief Peephole optimisation framework keeps track of which registers contain which values
* @brief Peephole optimization framework keeps track of which registers contain which values
* @author Matthias Braun
*/
#include "array_t.h"
......@@ -94,7 +94,7 @@ static void set_uses(ir_node *node)
}
/**
* must be called from peephole optimisations before a node will be killed
* must be called from peephole optimizations before a node will be killed
* and its users will be redirected to new_node.
* so bepeephole can update its internal state.
*
......
......@@ -5,7 +5,7 @@
/**
* @file
* @brief peephole optimisation framework
* @brief peephole optimization framework
* @author Matthias Braun
*/
#ifndef BEPEEPHOLE_H
......@@ -27,12 +27,12 @@ static inline ir_node *be_peephole_get_reg_value(const arch_register_t *reg)
}
/**
* Datatype of the generic op handler for optimisation.
* Datatype of the generic op handler for optimization.
*/
typedef void (*peephole_opt_func) (ir_node *node);
/**
* When doing peephole optimisation use this function instead of plain
* When doing peephole optimization use this function instead of plain
* exchange(), so it can update its internal state. This function also removes
* the old node from the schedule.
*/
......@@ -65,11 +65,11 @@ bool be_can_move_up(ir_heights_t *heights, const ir_node *node,
const ir_node *after);
/**
* Do peephole optimisations. It traverses the schedule of all blocks in
* Do peephole optimizations. It traverses the schedule of all blocks in
* backward direction. The register_values variable indicates which (live)
* values are stored in which register.
* The generic op handler is called for each node if it exists. That's where
* backend specific optimisations should be performed based on the
* backend specific optimizations should be performed based on the
* register-liveness information.
*/
void be_peephole_opt(ir_graph *irg);
......
......@@ -1027,7 +1027,7 @@ static void ia32_emit(ir_graph *irg)
/*
* Last touchups for the graph before emit: x87 simulation to replace the
* virtual with real x87 instructions, creating a block schedule and
* peephole optimisations.
* peephole optimizations.
*/
ia32_irg_data_t *irg_data = ia32_get_irg_data(irg);
be_stack_layout_t *stack_layout = be_get_irg_stack_layout(irg);
......@@ -1055,7 +1055,7 @@ static void ia32_emit(ir_graph *irg)
ia32_x87_simulate_graph(irg);
}
/* do peephole optimisations */
/* do peephole optimizations */
ia32_peephole_optimization(irg);
be_remove_dead_nodes_from_schedule(irg);
......@@ -1678,8 +1678,8 @@ static void ia32_get_call_abi(ir_type *method_type, be_abi_call_t *abi)
be_abi_call_param_reg(abi, i, reg, ABI_CONTEXT_BOTH);
++regnum;
} else {
/* Micro optimisation: if the mode is shorter than 4 bytes, load 4 bytes.
* movl has a shorter opcode than mov[sz][bw]l */
/* Micro optimization: if the mode is shorter than 4 bytes,
* load 4 bytes. movl has a shorter opcode than mov[sz][bw]l */
ir_mode *load_mode = mode;
if (mode != NULL) {
......
......@@ -237,7 +237,7 @@ static const lc_opt_table_entry_t ia32_architecture_options[] = {
LC_OPT_ENT_ENUM_INT("tune", "optimize for instruction architecture", &opt_arch_var),
LC_OPT_ENT_ENUM_INT("fpmath", "select the floating point unit", &fp_unit_var),
LC_OPT_ENT_BOOL ("optcc", "optimize calling convention", &opt_cc),
LC_OPT_ENT_BOOL ("unsafe_floatconv", "do unsafe floating point controlword optimisations", &opt_unsafe_floatconv),
LC_OPT_ENT_BOOL ("unsafe_floatconv", "do unsafe floating point controlword optimizations", &opt_unsafe_floatconv),
LC_OPT_ENT_BOOL ("machcode", "output machine code instead of assembler", &emit_machcode),
LC_OPT_ENT_BOOL ("soft-float", "equivalent to fpmath=softfloat", &use_softfloat),
LC_OPT_ENT_BOOL ("sse", "gcc compatibility", &use_sse),
......
......@@ -767,7 +767,7 @@ static void peephole_be_IncSP(ir_node *node)
}
/**
* Peephole optimisation for ia32_Const's
* Peephole optimization for ia32_Const's
*/
static void peephole_ia32_Const(ir_node *node)
{
......@@ -1062,9 +1062,9 @@ static void peephole_ia32_Conv_I2I(ir_node *node)
}
/**
* Register a peephole optimisation function.
* Register a peephole optimization function.
*/
static void register_peephole_optimisation(ir_op *op, peephole_opt_func func)
static void register_peephole_optimization(ir_op *op, peephole_opt_func func)
{
assert(op->ops.generic == NULL);
op->ops.generic = (op_func)func;
......@@ -1079,22 +1079,22 @@ void ia32_peephole_optimization(ir_graph *irg)
/* pass 1 */
ir_clear_opcodes_generic_func();
register_peephole_optimisation(op_ia32_Cmp, peephole_ia32_Cmp);
register_peephole_optimisation(op_ia32_Lea, peephole_ia32_Lea);
register_peephole_optimization(op_ia32_Cmp, peephole_ia32_Cmp);
register_peephole_optimization(op_ia32_Lea, peephole_ia32_Lea);
if (ia32_cg_config.use_short_sex_eax)
register_peephole_optimisation(op_ia32_Conv_I2I, peephole_ia32_Conv_I2I);
register_peephole_optimization(op_ia32_Conv_I2I, peephole_ia32_Conv_I2I);
if (ia32_cg_config.use_pxor)
register_peephole_optimisation(op_ia32_xZero, peephole_ia32_xZero);
register_peephole_optimization(op_ia32_xZero, peephole_ia32_xZero);
if (! ia32_cg_config.use_imul_mem_imm32)
register_peephole_optimisation(op_ia32_IMul, peephole_ia32_Imul_split);
register_peephole_optimization(op_ia32_IMul, peephole_ia32_Imul_split);
be_peephole_opt(irg);
/* pass 2 */
ir_clear_opcodes_generic_func();
register_peephole_optimisation(op_ia32_Const, peephole_ia32_Const);
register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
register_peephole_optimisation(op_ia32_Test, peephole_ia32_Test);
register_peephole_optimisation(op_be_Return, peephole_ia32_Return);
register_peephole_optimization(op_ia32_Const, peephole_ia32_Const);
register_peephole_optimization(op_be_IncSP, peephole_be_IncSP);
register_peephole_optimization(op_ia32_Test, peephole_ia32_Test);
register_peephole_optimization(op_be_Return, peephole_ia32_Return);
be_peephole_opt(irg);
}
......@@ -1268,7 +1268,7 @@ static void optimize_conv_conv(ir_node *node)
if (get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
result_conv = pred_proj;
} else {
/* no optimisation possible if smaller conv is sign-extend */
/* no optimization possible if smaller conv is sign-extend */
if (mode_is_signed(pred_mode)) {
return;
}
......
......@@ -674,7 +674,7 @@ static void peephole_sparc_SubCC(ir_node *node)
arch_set_irn_register_out(node, pn_sparc_SubCC_flags, NULL);
}
static void register_peephole_optimisation(ir_op *op, peephole_opt_func func)
static void register_peephole_optimization(ir_op *op, peephole_opt_func func)
{
assert(op->ops.generic == NULL);
op->ops.generic = (op_func) func;
......@@ -771,25 +771,25 @@ void sparc_finish_graph(ir_graph *irg)
/* perform peephole optimizations */
ir_clear_opcodes_generic_func();
register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
register_peephole_optimisation(op_sparc_FrameAddr, peephole_sparc_FrameAddr);
register_peephole_optimisation(op_sparc_RestoreZero,
register_peephole_optimization(op_be_IncSP, peephole_be_IncSP);
register_peephole_optimization(op_sparc_FrameAddr, peephole_sparc_FrameAddr);
register_peephole_optimization(op_sparc_RestoreZero,
peephole_sparc_RestoreZero);
register_peephole_optimisation(op_sparc_Ldf, split_sparc_ldf);
register_peephole_optimisation(op_sparc_AddCC, peephole_sparc_AddCC);
register_peephole_optimisation(op_sparc_SubCC, peephole_sparc_SubCC);
register_peephole_optimization(op_sparc_Ldf, split_sparc_ldf);
register_peephole_optimization(op_sparc_AddCC, peephole_sparc_AddCC);
register_peephole_optimization(op_sparc_SubCC, peephole_sparc_SubCC);
be_peephole_opt(irg);
/* perform legalizations (mostly fix nodes with too big immediates) */
ir_clear_opcodes_generic_func();
register_peephole_optimisation(op_be_IncSP, finish_be_IncSP);
register_peephole_optimisation(op_sparc_FrameAddr, finish_sparc_FrameAddr);
register_peephole_optimisation(op_sparc_Ld, finish_sparc_Ld);
register_peephole_optimisation(op_sparc_Ldf, finish_sparc_Ldf);
register_peephole_optimisation(op_sparc_Return, finish_sparc_Return);
register_peephole_optimisation(op_sparc_Save, finish_sparc_Save);
register_peephole_optimisation(op_sparc_St, finish_sparc_St);
register_peephole_optimisation(op_sparc_Stf, finish_sparc_Stf);
register_peephole_optimization(op_be_IncSP, finish_be_IncSP);
register_peephole_optimization(op_sparc_FrameAddr, finish_sparc_FrameAddr);
register_peephole_optimization(op_sparc_Ld, finish_sparc_Ld);
register_peephole_optimization(op_sparc_Ldf, finish_sparc_Ldf);
register_peephole_optimization(op_sparc_Return, finish_sparc_Return);
register_peephole_optimization(op_sparc_Save, finish_sparc_Save);
register_peephole_optimization(op_sparc_St, finish_sparc_St);
register_peephole_optimization(op_sparc_Stf, finish_sparc_Stf);
be_peephole_opt(irg);
heights_free(heights);
......
......@@ -5,7 +5,7 @@
/**
* @file
* @brief Definitions for optimisation flags
* @brief Definitions for optimization flags
* @author Michael Beck, Sebastian Hack
*/
......
......@@ -5620,7 +5620,7 @@ static ir_node *transform_node_Mux(ir_node *n)
return n;
}
/* the following optimisations create new mode_b nodes, so only do them
/* the following optimizations create new mode_b nodes, so only do them
* before mode_b lowering */
if (!irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_MODEB_LOWERED)) {
if (is_Mux(t)) {
......@@ -6766,7 +6766,7 @@ ir_node *optimize_in_place_2(ir_node *n)
/* Now we have a legal, useful node. Enter it in hash table for cse.
*
* Note: This is only necessary because some of the optimisations
* Note: This is only necessary because some of the optimizations
* operate in-place (set_XXX_bla, turn_into_tuple, ...) which is considered
* bad practice and should be fixed sometime.
*/
......
......@@ -35,7 +35,7 @@ static void unreachable_to_bad(ir_node *node, void *env)
ir_graph *irg;
int arity;
int i;
/* optimisation: we do not have to do anything inside the unreachable
/* optimization: we do not have to do anything inside the unreachable
* code */
if (is_block_unreachable(node))
return;
......@@ -54,7 +54,7 @@ static void unreachable_to_bad(ir_node *node, void *env)
int arity;
int i;
ir_graph *irg;
/* optimisation: we do not have to do anything inside the unreachable
/* optimization: we do not have to do anything inside the unreachable
* code */
if (is_block_unreachable(block))
return;
......
......@@ -586,7 +586,7 @@ static ir_entity *create_compound_arg_entity(ir_graph *irg, ir_type *type)
ident *id = id_unique("$compound_param.%u");
ir_entity *entity = new_entity(frame, id, type);
/* TODO:
* we could do some optimisations here and create a big union type for all
* we could do some optimizations here and create a big union type for all
* different call types in a function */
return entity;
}
......
......@@ -5,7 +5,7 @@
/**
* @file
* @brief conv node optimisation
* @brief conv node optimization
* @author Matthias Braun, Christoph Mallon
*
* Try to minimize the number of conv nodes by changing modes of operations.
......@@ -135,7 +135,7 @@ static int get_conv_costs(const ir_node *node, ir_mode *dest_mode)
}
costs = 0;
// The shift count does not participate in the conv optimisation
// The shift count does not participate in the conv optimization
arity = is_Shl(node) ? 1 : get_irn_arity(node);
for (i = 0; i < arity; ++i) {
ir_node *pred = get_irn_n(node, i);
......@@ -206,7 +206,7 @@ static ir_node *conv_transform(ir_node *node, ir_mode *dest_mode)
arity = get_irn_arity(node);
ins = ALLOCAN(ir_node *, arity);
// The shift count does not participate in the conv optimisation
// The shift count does not participate in the conv optimization
conv_arity = is_Shl(node) ? 1 : arity;
for (i = 0; i < conv_arity; i++) {
ir_node *pred = get_irn_n(node, i);
......
......@@ -6,7 +6,7 @@
/**
* @file
* @author Matthias Braun
* @brief Init functions for various optimisations
* @brief Init functions for various optimizations
*/
#ifndef FIRM_OPT_INIT_H
#define FIRM_OPT_INIT_H
......
......@@ -5,7 +5,7 @@
/**
* @file
* @brief parallelizing Load/Store optimisation
* @brief parallelizing Load/Store optimization
* @author Christoph Mallon
*/
#include "iroptimize.h"
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment