Commit 1f03a3bf authored by upedk's avatar upedk
Browse files

Initial commit.

parents
# Default ignored files
/shelf/
/workspace.xml
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml
# Editor-based HTTP Client requests
/httpRequests/
<component name="InspectionProjectProfileManager">
<profile version="1.0">
<option name="myName" value="Project Default" />
<inspection_tool class="DuplicatedCode" enabled="true" level="WEAK WARNING" enabled_by_default="true">
<Languages>
<language minSize="121" name="Python" />
</Languages>
</inspection_tool>
<inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
<option name="ignoredPackages">
<value>
<list size="11">
<item index="0" class="java.lang.String" itemvalue="folium" />
<item index="1" class="java.lang.String" itemvalue="scoop" />
<item index="2" class="java.lang.String" itemvalue="branca" />
<item index="3" class="java.lang.String" itemvalue="earthengine-api" />
<item index="4" class="java.lang.String" itemvalue="pyzmq" />
<item index="5" class="java.lang.String" itemvalue="google-auth-httplib2" />
<item index="6" class="java.lang.String" itemvalue="hyperopt" />
<item index="7" class="java.lang.String" itemvalue="httplib2" />
<item index="8" class="java.lang.String" itemvalue="google-api-python-client" />
<item index="9" class="java.lang.String" itemvalue="ortools" />
<item index="10" class="java.lang.String" itemvalue="google-api-core" />
</list>
</value>
</option>
</inspection_tool>
</profile>
</component>
\ No newline at end of file
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8 (ogdsm-dach)" project-jdk-type="Python SDK" />
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/ogdsm-dach.iml" filepath="$PROJECT_DIR$/.idea/ogdsm-dach.iml" />
</modules>
</component>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>
\ No newline at end of file
import numpy as np
class customStats:
"""
Similar to DEAP's default Stats class. Adapted to log stats for multiple objectives
"""
def __init__(self, key, stat_values):
self.stat_values = stat_values
self.key = key
self.stat_values = stat_values
def compile(self, population):
entry = {}
fitness_values = tuple(self.key(elem) for elem in population)
avg_value = np.mean(fitness_values, axis=0)
std_value = np.std(fitness_values, axis=0)
min_value = np.min(fitness_values, axis=0)
max_value = np.max(fitness_values, axis=0)
i = 0
for value in self.stat_values:
entry[value] = {
'avg': avg_value[i],
'std': std_value[i],
'min': min_value[i],
'max': max_value[i]
}
i = i + 1
return entry
\ No newline at end of file
from deap import tools
import random
import numpy as np
import time
import copy
import deap_dsm_config
# Create DEAP functions in global scope -> necessary for SCOOP
# https://deap.readthedocs.io/en/master/tutorials/basic/part4.html
def init_random_seed(job_dict):
"""
Inits individual with random schedule.
- respecting timeframe (not scheduling jobs before release or extending over deadline)
:return: list representing individual
"""
individual = []
start = time.perf_counter()
for job_id, job_details in job_dict.items():
# Set this job's start time in individual
start_time = random.randint(job_details['release'], job_details['deadline']-(job_details['duration']-1))
# Add this job to individual
individual.append(start_time)
print('end init, time: ' + str(time.perf_counter() - start))
return individual
def dsm_mutate(individual, job_dict):
"""
Mutate individual while making sure to only allow valid results of mutation.
This means:
- jobs are mutated in valid timeframe only
Mutate jobs with bad working_hours performance with higher probability
- individuals job performance list is read and applied
:param individual:
:param job_dict:
:return:
"""
# Loop through individual
i = 0
for job in individual:
# get job details
job_details = job_dict[i]
try:
a = individual.job_fitness[i]
except IndexError:
print('job_fitness index error at index ' + str(i))
# Mutate this job's start time with normalized job fitness as probability
if random.random() < individual.job_fitness[i]:
individual[i] = random.randint(job_details['release'], job_details['deadline']-(job_details['duration']-1))
i = i + 1
return individual
def eval_fitness(individual, optimization_length, optimization_start, consumers_number, job_dict, power, working_hours,
verbose=False, final_eval=False):
"""
Fitness function of this DEAP GA
:param individual:
:param optimization_length: number of timesteps in which jobs can be scheduled
:param optimization_start: starting timestep relative to start of year
:param consumers_number: number of different consumers
:param job_dict: dict containing job information
:param power: available power
:param working_hours: available working_hours
:param verbose: default False, print eval results if True
:param final_eval:
:return: tuple with optimization objectives
"""
eval_start = time.perf_counter()
# Objective variables
power_overshoot = 0 # kWh of demand overshooting availability
possible_hours = 0 # Number of hours of operation during possible hours
blocked_hours = 0 # Number of hours of operation during blocked hours
parallel_schedule_penalty = 0 # Number of hours of double operation of one consumer -> not to be permitted!
job_id = 0
# Initialize power and operation schedule
power_demand = np.zeros(24*365)
operation_schedule = np.full((consumers_number, 24*365), None)
# Reset this individuals job_fitness list. One entry for each job reset to None.
total_job_fitness = np.zeros(len(individual))
# ---- Loop through all jobs in this individual
while job_id < len(individual):
# Individual job_fitness variable
job_fitness = 0
# Get job details
start_time = individual[job_id] # Start time is int value from individual list.
job = job_dict[job_id] # get this job's parameters
i = start_time
# Loop through every of this jobs timesteps. # TEST
while i < start_time + (job['duration'] - 1) and i < optimization_start + optimization_length:
# ---- Power demand ----
# Add this jobs demand to total demand
power_demand[i] = power_demand[i] + job['demand']
# ---- Consumer operation ----
# Set operation for this consumer in the operation schedule
# ~ 0.15s per individual (full problem) (~35%)
# Check if consumer has already scheduled operation in this timeframe
if operation_schedule[job['consumer_id']][i] is None: # If no operation yet
# Set operation.
# !! CHANGE: Operation marked with job ID -> basis to identify jobs that contribute to power overshoot
operation_schedule[job['consumer_id']][i] = job_id
# Check if in blocked hours
if working_hours[job['consumer_id']][i] == 3: # If blocked
job_fitness = job_fitness + 2 # Update job fitness
blocked_hours = blocked_hours + 1 # Update blocked_hours counter
elif working_hours[job['consumer_id']][i] == 2: # If possible
job_fitness = job_fitness + 1 # Update job_fitness
possible_hours = possible_hours + 1 # Update possible_hours counter
else: # If scheduled parallel, add penalty
parallel_schedule_penalty = parallel_schedule_penalty + 1
# Add job_fitness penalty
job_fitness = job_fitness + 1
i = i + 1
# Save job_fitness of this job.
total_job_fitness[job_id] = job_fitness
job_id = job_id + 1
# ---- End job loop
# Calculate difference of produced power and demand
power_diff = power - power_demand
# Calculate power overshoot
power_overshoot = ((power_diff < 0) * power_diff).sum(axis=0) * (-1)
# Update job_fitness with respect to power overshoot
# Loop through power_diff array
t = 0
for power_value in power_diff:
if power_value < 0: # Check if negative -> = power_overshoot
# Select this timestep in the operation schedule
operation = operation_schedule[:,t]
# Loop through all scheduled jobs at this timestep -> get their ID
for operation_job_id in operation:
if operation_job_id is not None: # If 0 -> no operation
# Add penalty to the job_fitness of the jobs at this timestep
# (inverted, job_fitness is best when 0 and worse when larger)
total_job_fitness[operation_job_id] += (-power_value)
t += 1
# Normalize job_fitness
# Normalized to value between 1 (=highest job_fitness penalty) and 0 (=lowest job_fitness penalty)
# This is later used as probability of mutation of every single job
total_job_fitness = total_job_fitness / np.max(total_job_fitness)
# Calculate total fitness
total_fitness = power_overshoot * deap_dsm_config.power_objective + \
possible_hours * deap_dsm_config.possible_hours_objective + \
blocked_hours * deap_dsm_config.blocked_hours_objective + \
parallel_schedule_penalty * deap_dsm_config.parallel_schedule_objective
if verbose:
print("power_overshoot: " + str(power_overshoot))
print("possible_hours: " + str(possible_hours))
print("blocked_hours: " + str(blocked_hours))
print("parallel_schedule: " + str(parallel_schedule_penalty))
print("total_fitness: " + str(total_fitness))
if final_eval:
dsm_result_dict = {
"power_overshoot": power_overshoot, # score
"possible_hours": possible_hours, # score
"blocked_hours": blocked_hours, # score
"parallel_schedule_penalty": parallel_schedule_penalty, # score
"total_fitness": total_fitness, # score
# 2D array with every consumer's power demand for every timestep
"total_power_demand": power_demand, # array with total power demand for every timestep
"operation_schedule": operation_schedule # working_hours usage matrix
}
return dsm_result_dict
return total_job_fitness, total_fitness, possible_hours, blocked_hours, power_overshoot, parallel_schedule_penalty
def dsm_var_and(population, toolbox, mut_prob, crossover_prob, job_dict):
"""
Variation adapted from DEAP's varAnd function
- crossover and mutation performed with given probability
returns offspring
"""
# Create offspring by cloning every individual df this population
offspring = [toolbox.clone(ind) for ind in population]
# Apply crossover and mutation on the offspring
for i in range(1, len(offspring), 2):
if random.random() < crossover_prob:
# Randomly perform crossover on individuals
offspring[i - 1], offspring[i] = toolbox.two_point_crossover(offspring[i - 1], offspring[i])
del offspring[i - 1].fitness.values, offspring[i].fitness.values # Delete fitness values for individuals
for i in range(len(offspring)):
if random.random() < mut_prob:
# Apply custom mutation.
offspring[i] = toolbox.mutate(offspring[i], job_dict=job_dict)
del offspring[i].fitness.values
return offspring
def main_ga(population, toolbox, cxpb, mutpb, ngen, stats, halloffame, job_dict):
logbook = tools.Logbook()
logbook.header = ['gen', 'nevals'] + (stats.stat_values if stats else [])
# Define logbook chapters (=each fitness value)
# Each chapter has stats as headers
for stat_value in stats.stat_values:
logbook.chapters[stat_value].header = 'max', 'min', 'avg', 'std'
# ----- EVALUATION -----
# Evaluate the individuals with an invalid fitness
print('start eval')
eval_start = time.perf_counter()
invalid_ind = [ind for ind in population if not ind.fitness.valid]
# Apply fitness function to each individual with invalid fitness by mapping (parallelized)
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
# Assign calculated fitness values to these individuals.
for ind, fit in zip(invalid_ind, fitnesses):
ind.job_fitness = fit[0] # Assign job_fitness -> first value returned by eval
ind.fitness.values = fit[1:] # Assign individual fitness values -> rest returned by eval
print("Eval time: " + str(time.perf_counter() - eval_start))
# Update hall of fame
if halloffame is not None:
halloffame.update(population)
# List to hold evolution of hall of fame = best individual of each generation
hof_evolution = []
record = stats.compile(population) if stats else {}
logbook.record(gen=0, nevals=len(invalid_ind), **record)
print(logbook.stream)
# Begin the generational process
for gen in range(1, ngen + 1):
gen_loop_time = time.perf_counter()
# ------ SELECTION ------- #
# Select regular offspring with given length to create full offspring with same size as population
offspring = toolbox.select_tourn(population, len(population))
# Perform mutation and variation (=varAnd) on regular offspring
offspring = dsm_var_and(offspring, toolbox, cxpb, mutpb, job_dict)
# ----- EVALUATE -----
eval_start = time.perf_counter()
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.job_fitness = fit[0] # Assign job_fitness -> first value returned by eval
ind.fitness.values = fit[1:] # Assign individual fitness values -> rest returned by eval
print("Eval time: " + str(time.perf_counter()-eval_start))
# Update the hall of fame with the generated individuals
if halloffame is not None:
halloffame.update(offspring)
# Replace the current population by the offspring
population[:] = offspring
# Append the current generation statistics to the logbook
record = stats.compile(population) if stats else {}
logbook.record(gen=gen, nevals=len(invalid_ind), **record)
# Record best individual seen so far
hof_evolution.append(copy.deepcopy(halloffame))
print('Gen loop time: ' + str(time.perf_counter() - gen_loop_time))
print(logbook.stream)
return population, logbook, hof_evolution
# Input files
input_data_path = './input/System Design Input v08.xlsx'
consumer_data_path = './input/Consumer Data Input v05 one month.xlsx'
# GA parameter
pop_size = 400
n_gen = 1000
tourn_size = 3
crossover_prob = 0.5
mut_prob = 0.5
# Objective weights -> all minimized
power_objective = 3
blocked_hours_objective = 2
possible_hours_objective = 1
parallel_schedule_objective = 100
# timeframe_objective = -1.0 # Not needed since timeframe violations are not possible due to seeding and mutation
# General parameter
#optimization_start = 0
#optimization_length = 8760
optimization_start = 2160 # April 1st
optimization_length = 720 # One Month
# Buffer in kW of that scheduled consumption needs to be below produced power [kW].
power_buffer = 1
from deap import base
from deap import tools
from scoop import futures
import random
import deap_dsm_config
import deap_dsm
import custom_stats
# Creator helper function (see https://github.com/DEAP/deap/issues/57)
creator = None
def set_creator(cr):
global creator
creator = cr
def deap_run(optimization_start, optimization_length, consumers_number, job_dict, power_availability, working_hours_array):
pop_size = deap_dsm_config.pop_size
n_gen = deap_dsm_config.n_gen
tourn_size = deap_dsm_config.tourn_size
crossover_prob = deap_dsm_config.crossover_prob
mut_prob = deap_dsm_config.mut_prob
toolbox = base.Toolbox()
# Register custom seeding function
toolbox.register("init_seed", deap_dsm.init_seed, job_dict)
# Alternative random seed
toolbox.register("random_seed", deap_dsm.init_random_seed, job_dict)
# Random seed individual
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.random_seed)
# Register population
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", deap_dsm.eval_fitness,
optimization_length=optimization_length,
optimization_start=optimization_start,
consumers_number=consumers_number,
job_dict=job_dict,
power=power_availability,
working_hours=working_hours_array)
toolbox.register("two_point_crossover", tools.cxTwoPoint)
toolbox.register("mutate", deap_dsm.dsm_mutate, job_dict=job_dict)
toolbox.register("select_tourn", tools.selTournament, tournsize=tourn_size)
# Register scoops parallel mapping for DEAP
toolbox.register("map", futures.map)
random.seed(64)
pop = toolbox.population(n=pop_size)
hof = tools.HallOfFame(1)
stats = custom_stats.customStats(lambda ind: ind.fitness.values, ['total_fitness', 'possible_hours',
'blocked_hours', 'power_overshoot',
'parallel_schedule'])
pop, logbook, hof_evolution = deap_dsm.main_ga(pop, toolbox, cxpb=crossover_prob, mutpb=mut_prob, ngen=n_gen,
stats=stats, halloffame=hof, job_dict=job_dict)
print('GA Result: \n')
print(logbook)
return pop, hof_evolution, logbook
import datetime
import time
from tqdm import tqdm
import pickle
import os
import shutil
import json
import numpy as np
import tkinter as tk
from tkinter import filedialog
import scenario as c_s
import deap_dsm_config
from deap import creator
from deap import base
from deap_run import set_creator
from deap_run import deap_run
# Creator functions in main to make SCOOP work
set_creator(creator)
# Register fitness
# !!! DEAP selectTourn only compares first objective -> added in eval function to total fitness (first objective)
# but saved separately to make logging possible
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0, -1.0, -1.0, -1.0))
# Create individual
# job_fitness attribute for list to hold fitness rating for each individual job's fitness rating
creator.create("Individual", list, typecode='i', fitness=creator.FitnessMin, job_fitness=[])
t_total_start = time.perf_counter()
if __name__ == '__main__':
run_name = input("Run name ('read' to get cached run): ")
if run_name == 'read': # Read cached files from previous run
root = tk.Tk()
root.withdraw()
cache_folder_path = filedialog.askdirectory(initialdir='./deap_dsm_cache/')
with open(cache_folder_path+'/scenarios_list.pkl', 'rb') as cache_file:
cached_scenarios_list = pickle.load(cache_file)
# Read scenarios input data
with open(cache_folder_path+'/scenarios_data') as scenarios_data:
scenarios_dict = json.load(scenarios_data)
# Read consumption input data
with open(cache_folder_path+'/consumption_data') as consumption_data:
consumption_dict = json.load(consumption_data)
# Read consumption input data
with open(cache_folder_path+'/working_hours_data') as working_hours_data:
# Initialize Numpy array with row for every consumer
working_hours_array = np.zeros((working_hours_data['consumers_number'], 24 * 365))
for consumer_id, working_hours in working_hours_data['working_hours'].keys():
# Add this consumers operation times to list
working_hours_array[consumer_id] = working_hours
for scenario in cached_scenarios_list:
# -------- Plot results ----------
scenario.plot_dsm_result(consumers_number=working_hours_data['consumers_number'],
optimization_start=deap_dsm_config.optimization_start,
optimization_length=deap_dsm_config.optimization_length,
job_dict=consumption_dict['job_dict'],
power_availability=scenario.power_availability,
working_hours_array=working_hours_array,
cache_folder_path=cache_folder_path
)
# Declare animation as global.
animation = None