Commit f5a11ced authored by steffen.schotthoefer's avatar steffen.schotthoefer
Browse files

tidy up repository

parents 099b6d20 1d4b4c55
Pipeline #115648 failed with stage
in 15 minutes and 15 seconds
......@@ -29,7 +29,7 @@ CFL_NUMBER = 0.7
% Final time for simulation
TIME_FINAL = 0.3
% Maximal Moment degree
MAX_MOMENT_SOLVER = 3
MAX_MOMENT_SOLVER = 1
%
%% Entropy settings
ENTROPY_FUNCTIONAL = MAXWELL_BOLZMANN
......@@ -53,12 +53,10 @@ BC_DIRICHLET = ( void )
% Quadrature Rule
%QUAD_TYPE = MONTE_CARLO
QUAD_TYPE = GAUSS_LEGENDRE_TENSORIZED
%
% Quadrature Order
QUAD_ORDER = 8
%
%
% ----- Output ----
%
VOLUME_OUTPUT = (ANALYTIC, MINIMAL, MOMENTS, DUAL_MOMENTS)
......
......@@ -12,7 +12,7 @@ def custom_loss1dMBPrime(): # (label,prediciton)
def initialize_network():
# Load model
model = tf.keras.models.load_model('saved_model_GPU/_EntropyLoss_1_300_M_0', custom_objects={ 'loss':custom_loss1dMBPrime })
model = tf.keras.models.load_model('neural_network_model/_EntropyLoss_1_300_M_0', custom_objects={ 'loss':custom_loss1dMBPrime })
# Check its architecture
model.summary()
......
This diff is collapsed.
#!/bin/bash
#BATCH --ntasks=24
#SBATCH --time=24:00:00
#SBATCH --mem=20gb
#SBATCH --partition=single
#SBATCH --job-name=EntropyTrainerCPU
#SBATCH --output=0_CPU_training_out_%j
#SBATCH --error=0_CPU_training_err_%j
python3 trainNNMK2.py
#!/bin/bash
#SBATCH --ntasks=20
#SBATCH --partition=gpu_4
#SBATCH --time=24:00:00
#SBATCH --mem=20gb
#SBATCH --gres=gpu:1
#SBATCH --job-name=EntropyTrainer
#SBATCH --output=0_GPU_out_%j
#SBATCH --error=0_GPU_err_%j
python3 trainNNMK2.py
This diff is collapsed.
This diff is collapsed.
'''
Author: Steffen Schotthöfer
Description: This file contains the Training routine for the entropy NN
'''
# Imports
import pandas as pd
import random # for generating random numbers
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt # MATLAB like plotting routines
from sklearn.preprocessing import normalize
def main():
(xDataTrain,yDataTrain,xDataTest,yDataTest) = preprocess_data("trainNN.csv")
#plot_data(xDataTrain,yDataTrain)
model = create_model()
cb_list = create_callbacks()
# Do the training
history = model.fit(xDataTrain, yDataTrain, validation_split=0.25, epochs=100, batch_size=500, verbose=1)
# Evaluation tests
score = model.evaluate(xDataTest, yDataTest)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# plot training results
print_output(history)
# Evaluation tests
score = model.evaluate(xDataTest, yDataTest)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# save model
model.save('model_M4')
return 0
# Build the network:
def create_model():
''' mark 1 model
model = tf.keras.models.Sequential([
keras.layers.Dense(256, activation='relu', input_shape=(4,)),
keras.layers.Dropout(0.3),
keras.layers.Dense(512, activation='relu', input_shape=(64,)),
keras.layers.Dropout(0.3),
keras.layers.Dense(256, activation='relu', input_shape=(256,)),
keras.layers.Dropout(0.3),
keras.layers.Dense(128, activation='relu', input_shape=(128,)),
keras.layers.Dropout(0.3),
keras.layers.Dense(4,)
])
'''
#leakyRelu = tf.keras.layers.LeakyReLU(alpha=0.1)
model = tf.keras.models.Sequential([
keras.layers.Dense(256, activation='sigmoid', input_shape=(4,)),
keras.layers.Dense(512, activation='sigmoid'),
keras.layers.Dropout(0.2),
keras.layers.Dense(256, activation='sigmoid'),
keras.layers.Dropout(0.2),
keras.layers.Dense(128, activation='sigmoid'),
keras.layers.Dense(4, )
])
model.summary()
model.compile(loss=tf.keras.losses.MeanSquaredError(), optimizer='adam', metrics=['accuracy'])
return model
#Create Callbacks
def create_callbacks():
cb_list = [keras.callbacks.EarlyStopping(
monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='auto',
baseline=None, restore_best_weights=False
)]
return cb_list
def preprocess_data(filename):
# reading csv file
dataFrameInput = pd.read_csv(filename) #outputs a dataframe object
idx = 0
xDataList = list()
yDataList = list()
data = dataFrameInput.values # numpy array
## Somehow t = 0 has an odd number of elements, just cut it out
for row in data:
if (row[2] > 0):
if (idx % 2 == 0):
xDataList.append(row)
else:
yDataList.append(row)
idx = idx + 1
# merge the lists
DataList = list()
for rowX, rowY in zip(xDataList, yDataList):
DataList.append([rowX, rowY])
# Shuffle data
random.shuffle(DataList)
DataArray = np.asarray(DataList)
#print(DataArray.shape)
# Strip off header information, i.e. the first 3 cols
DataArraySlim = DataArray[:, :, 3:]
#print(DataArraySlim.shape)
# split in train and test data (ratio 4:1)
DataTrain = DataArraySlim[:4 * int(DataArraySlim.shape[0] / 5)]
DataTest = DataArraySlim[4 * int(DataArraySlim.shape[0] / 5):]
# Split in x (input) and y (output) data
xDataTrain = DataTrain[:, 0, :]
yDataTrain = DataTrain[:, 1, :]
xDataTest = DataTest[:, 0, :]
yDataTest = DataTest[:, 1, :]
#Normalize Input
xDataTrain = normalize(xDataTrain, axis=1, norm='l1')
xDataTest = normalize(xDataTest, axis=1, norm='l1')
return (xDataTrain,yDataTrain,xDataTest,yDataTest)
def plot_data(xDataTrain,yDataTrain):
fig, axs = plt.subplots(4)
axs[0].plot(xDataTrain[:,0])
axs[0].set_title('m_0^0')
axs[0].set_ylabel('value')
axs[0].set_xlabel('sample id')
axs[1].plot(xDataTrain[:,1])
axs[1].set_title('m_1^-1')
axs[1].set_ylabel('value')
axs[1].set_xlabel('sample id')
axs[2].plot(xDataTrain[:,2])
axs[2].set_title('m_1^0')
axs[2].set_ylabel('value')
axs[2].set_xlabel('sample id')
axs[3].plot(xDataTrain[:,3])
axs[3].set_title('m_1^1')
axs[3].set_ylabel('value')
axs[3].set_xlabel('sample id')
plt.show()
fig, axs = plt.subplots(4)
axs[0].plot(yDataTrain[:, 0])
axs[0].set_title('m_0^0')
axs[0].set_ylabel('value')
axs[0].set_xlabel('sample id')
axs[1].plot(yDataTrain[:, 1])
axs[1].set_title('m_1^-1')
axs[1].set_ylabel('value')
axs[1].set_xlabel('sample id')
axs[2].plot(yDataTrain[:, 2])
axs[2].set_title('m_1^0')
axs[2].set_ylabel('value')
axs[2].set_xlabel('sample id')
axs[3].plot(yDataTrain[:, 3])
axs[3].set_title('m_1^1')
axs[3].set_ylabel('value')
axs[3].set_xlabel('sample id')
plt.show()
return 0
def print_output(history):
fig, axs = plt.subplots(2)
print(history.history.keys())
axs[0].plot(history.history['accuracy'])
axs[0].plot(history.history['val_accuracy'])
axs[0].set_title('model accuracy')
axs[0].set_ylabel('accuracy')
axs[0].set_xlabel('epoch')
axs[0].legend(['train_acc', 'val_acc'], loc='upper left')
# axs[0].show()
# summarize history for loss
axs[1].plot(history.history['loss'])
axs[1].plot(history.history['val_loss'])
axs[1].set_title('model loss')
axs[1].set_ylabel('loss')
axs[1].set_xlabel('epoch')
axs[1].legend(['train_loss', 'val_loss'], loc='upper left')
# axs[1].show()
plt.show()
return 0
if __name__ == '__main__':
main()
### This is a script for the training of the
### Second NN approach
#imports
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import math
from tensorflow.keras.callbacks import EarlyStopping,ModelCheckpoint
import random
import json
# Custom Loss
def custom_loss1dMB(u_input, alpha_pred): # (label,prediciton)
return 4 * math.pi * tf.math.exp(alpha_pred * np.sqrt(1 / (4 * np.pi))) - alpha_pred * u_input
# Custom Loss
def custom_loss1dMBPrime(): # (label,prediciton)
def loss(u_input, alpha_pred):
return 0.5*tf.square(4*math.pi*np.sqrt(1/(4*np.pi))*tf.math.exp(alpha_pred*np.sqrt(1/(4*np.pi))) - u_input)
return loss
# Build the network:
def create_model():
# Define the input
input_ = keras.Input(shape=(1,))
# Hidden layers
hidden1 = layers.Dense(4, activation="tanh")(input_)
hidden2 = layers.Dense(8, activation="tanh")(hidden1)
hidden3 = layers.Dense(32, activation="tanh")(hidden2)
hidden4 = layers.Dense(8, activation="tanh")(hidden3)
hidden5 = layers.Dense(4, activation="tanh")(hidden4)
# Define the ouput
output_ = layers.Dense(1)(hidden5)
# Create the model
model = keras.Model(inputs=[input_], outputs=[output_])
model.summary()
# tf.keras.losses.MeanSquaredError()
# custom_loss1d
model.compile(loss=custom_loss1dMBPrime(), optimizer='adam')#, metrics=[custom_loss1dMB, custom_loss1dMBPrime])
return model
def main():
print("Create Model")
model = create_model()
print("Create Training Data")
# build training data and shuffe!
uTrain = np.arange(0.1, 400, 0.000001)
random.shuffle(uTrain)
# Create Early Stopping callback
es = EarlyStopping(monitor='loss', mode='min', min_delta=0.00005, patience=50,
verbose=10) # loss == custom_loss1dMBPrime by model definition
mc = ModelCheckpoint('saved_model_GPU2/best_model_1_300.h5', monitor='loss', mode='min', save_best_only=True)
# Train the model
print("Train Model")
history = model.fit(uTrain, uTrain, validation_split=0.3, epochs=1500, batch_size=900000, verbose=1,
callbacks=[es, mc])
#save trained model
print("save model")
model.save('saved_model_GPU2/_EntropyLoss_1_300_M_0')
# summarize history for loss
print("save history")
with open('saved_model_GPU2/_EntropyLoss_1_300_M_0_hist.json', 'w') as file:
json.dump(history.history, file)
print("history saved")
# load history
'''
with open('saved_model/_EntropyLoss_1_300_M_0_hist.json') as json_file:
history1 = json.load(json_file)
'''
print("Training Sequence successfully finished")
return 0
if __name__ == '__main__':
main()
......@@ -371,6 +371,7 @@ void Config::SetPointersNull( void ) {
}
void Config::SetPostprocessing() {
// append '/' to all dirs to allow for simple path addition
if( _logDir[_logDir.size() - 1] != '/' ) _logDir.append( "/" );
if( _outputDir[_outputDir.size() - 1] != '/' ) _outputDir.append( "/" );
......
......@@ -194,7 +194,7 @@ VectorVector LineSource_PN::SetupIC() {
for( unsigned j = 0; j < cellMids.size(); ++j ) {
double x = cellMids[j][0];
double y = cellMids[j][1]; // (x- 0.5) * (x- 0.5)
psi[j][0] = sqrt( 4 * M_PI ) * 1.0 / ( 4.0 * M_PI * t ) * std::exp( -( x * x + y * y ) / ( 4 * t ) );
psi[j][0] = /*sqrt( 4 * M_PI ) * */ 1.0 / ( 4.0 * M_PI * t ) * std::exp( -( x * x + y * y ) / ( 4 * t ) );
}
return psi;
}
This diff is collapsed.
-std=c17
\ No newline at end of file
-std=c++17
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment