Commit 0a7c80e4 authored by jbracher's avatar jbracher
Browse files

Updating R codes.

parent 25af3123
rm(list = ls())
# rm(list = ls())
library(dplyr)
library(knitr)
setwd("/home/johannes/Documents/Teaching/Ensemble_Seminar/ptsfc_results/evaluation")
# setwd("/home/johannes/Documents/Teaching/Ensemble_Seminar/ptsfc_results/code")
# load all evaluation files, append to each other
lf <- list.files() %>% sort
lf <- list.files("../evaluation") %>% sort
# weather variables not covered on Oct 27
dat <- read.csv(lf[1]) %>% filter(target == "DAX")
dat <- read.csv(paste0("../evaluation/", lf[1])) %>% filter(target == "DAX")
for (jj in lf[-1]){
dat <- rbind(dat, read.csv(jj))
dat <- rbind(dat, read.csv(paste0("../evaluation/", jj)))
}
# omit ensemble/benchmark models from ranking
dat <- dat %>%
......
# evaluate forecasts
# setwd("/home/johannes/Documents/Teaching/Ensemble_Seminar/ptsfc_results/code")
# needs to be run from repository root folder
# setwd("/home/johannes/Documents/Teaching/Ensemble_Seminar/ptsfc_results/")
library(quantmod)
# source functions:
source("code/functions.R")
forecast_date <- as.Date("2021-11-10")
wt <- gsub("-", "", as.character(forecast_date))
source("functions.R")
# get all forecasts:
forecast_dates0 <- Sys.Date() - 10:2 # as.Date("2021-11-17")
forecast_dates <- forecast_dates0[weekdays(forecast_dates0) %in% c("Mittwoch", "Wednesday")]
# list files
files <- list.files(wt)
# read in:
all_forecasts <- NULL
for(i in seq_along(files)){
dat_temp <- read.csv(paste0(wt, "/", files[i]),
colClasses = c(forecast_date = "Date"))
dat_temp$X <- NULL
for(i in seq_along(forecast_dates)){
forecast_date <- forecast_dates[i]
# the name of the folder where forecasts are stored
week_tag <- gsub("-", "", as.character(forecast_date))
# get all forecasts:
dat_temp$model <- extract_name_from_file(files[i])
# list files
files <- list.files(paste0("../", week_tag))
if(!is.null(all_forecasts)){
all_forecasts <- rbind(all_forecasts, dat_temp)
}else{
all_forecasts <- dat_temp
# read in:
all_forecasts <- NULL
for(i in seq_along(files)){
dat_temp <- read.csv(paste0("../", week_tag, "/", files[i]),
colClasses = c(forecast_date = "Date"))
dat_temp$X <- NULL
dat_temp$model <- extract_name_from_file(files[i])
if(!is.null(all_forecasts)){
all_forecasts <- rbind(all_forecasts, dat_temp)
}else{
all_forecasts <- dat_temp
}
}
}
# add truths to DAX forecasts:
# generate the truth data for a given week:
library(quantmod)
getSymbols('^GDAXI',src='yahoo', from = as.Date("2021-09-01"))
dat_dax <- data.frame("date" = index(GDAXI), value = GDAXI$GDAXI.Close)
colnames(dat_dax) <- c("date", "value")
dat_dax$date <- as.Date(dat_dax$date)
reference_value <- dat_dax$value[dat_dax$date == forecast_date]
dat_dax$value <- 100*(log(dat_dax$value) - log(reference_value))
truths_dax <- subset(dat_dax, date %in% (forecast_date + c(1, 2, 5, 6, 7)))
truths_dax <- truths_dax[order(truths_dax$date), ]
truths_dax$target <- "DAX"
truths_dax$horizon <- c("1 day", "2 day", "5 day", "6 day", "7 day")[1:nrow(truths_dax)]
truths_dax$date <- NULL
# subset forecasts to DAX:
dax_forecasts <- subset(all_forecasts, target == "DAX")
# remove disallowed targets:
dax_forecasts <- subset(dax_forecasts,
horizon %in% c("1 day", "2 day", "5 day", "6 day", "7 day"))
# add truth data:
dax_forecasts <- merge(dax_forecasts, truths_dax, by = c("target", "horizon"), all.x = TRUE)
# add truths to wind forecasts:
truth_wind <- read.csv("ptsfc_viz/plot_data/wind.csv")
truth_wind$time <- as.POSIXlt(truth_wind$time, tz = "UTC")
wind_forecasts <- subset(all_forecasts, target == "wind")
# remove disallowed targets:
wind_forecasts <- subset(wind_forecasts,
horizon %in% c("36 hour", "48 hour", "60 hour", "72 hour", "84 hour"))
wind_forecasts$forecast_date <- as.POSIXlt(wind_forecasts$forecast_date)
hours_numeric <- as.numeric(gsub(" hour", "", wind_forecasts$horizon))
wind_forecasts$target_end_date <- wind_forecasts$forecast_date + hours_numeric*60*60
wind_forecasts$target_end_date <- as.POSIXlt(wind_forecasts$target_end_date)
# add truth data:
wind_forecasts <- merge(wind_forecasts, truth_wind, by.x = "target_end_date", by.y = "time", all.x = TRUE)
# restrict to relevant columns
wind_forecasts <- wind_forecasts[, c(colnames(all_forecasts), "value")]
# add truths to temperature forecasts:
truth_temperature <- read.csv("ptsfc_viz/plot_data/t2m.csv")
truth_temperature$time <- as.POSIXlt(truth_temperature$time, tz = "UTC")
# remove disallowed targets:
temperature_forecasts <- subset(all_forecasts, target == "temperature")
temperature_forecasts <- subset(temperature_forecasts,
horizon %in% c("36 hour", "48 hour", "60 hour", "72 hour", "84 hour"))
temperature_forecasts$forecast_date <- as.POSIXlt(temperature_forecasts$forecast_date)
hours_numeric <- as.numeric(gsub(" hour", "", temperature_forecasts$horizon))
temperature_forecasts$target_end_date <- temperature_forecasts$forecast_date + hours_numeric*60*60
temperature_forecasts$target_end_date <- as.POSIXlt(temperature_forecasts$target_end_date)
# add truth data:
temperature_forecasts <- merge(temperature_forecasts, truth_temperature, by.x = "target_end_date", by.y = "time", all.x = TRUE)
# restrict to relevant columns
temperature_forecasts <- temperature_forecasts[, c(colnames(all_forecasts), "value")]
# later this step will also have to contain the weather data:
all_forecasts <- rbind(dax_forecasts, wind_forecasts, temperature_forecasts)
# all_forecasts <- dax_forecasts # only needed in first week
# compute linear quantile scores for different quantile levels:
all_forecasts$qscore_q0.025 <- qs(all_forecasts$q0.025, all_forecasts$value, 0.025)
all_forecasts$qscore_q0.25 <- qs(all_forecasts$q0.25, all_forecasts$value, 0.25)
all_forecasts$qscore_q0.5 <- qs(all_forecasts$q0.5, all_forecasts$value, 0.5)
all_forecasts$qscore_q0.75 <- qs(all_forecasts$q0.75, all_forecasts$value, 0.75)
all_forecasts$qscore_q0.975 <- qs(all_forecasts$q0.975, all_forecasts$value, 0.975)
# average linear quantile score:
all_forecasts$mean_qscore <-
(all_forecasts$qscore_q0.025 +
all_forecasts$qscore_q0.25 +
all_forecasts$qscore_q0.5 +
all_forecasts$qscore_q0.75 +
all_forecasts$qscore_q0.975)/5
# absolute error:
all_forecasts$ae <- abs(all_forecasts$q0.5 - all_forecasts$value)
# coverage of intervals:
all_forecasts$interval_coverage_0.5 <- (all_forecasts$value >= all_forecasts$q0.25 & all_forecasts$value <= all_forecasts$q0.75)
all_forecasts$interval_coverage_0.95 <- (all_forecasts$value >= all_forecasts$q0.025 & all_forecasts$value <= all_forecasts$q0.975)
# re-order columns:
all_forecasts <- all_forecasts[, c("model", colnames(all_forecasts)[colnames(all_forecasts) != "model"])]
# add imputed rows:
all_forecasts$scores_imputed <- FALSE
models <- read.csv("ptsfc_viz/plot_data/list_teams.csv")$model
for(tg in c("DAX", "wind", "temperature")){
sub_tg <- subset(all_forecasts, target == tg)
horizons_present <- unique(sub_tg$horizon)
for(h in horizons_present){
sub_tg_h <- subset(sub_tg, horizon == h)
for(mod in models){
if(!mod %in% sub_tg_h$model){
row_to_add <- sub_tg_h[1, ]
row_to_add$model <- mod
row_to_add$mean_qscore <- 1.01*max(sub_tg_h$mean_qscore)
row_to_add$ae <- 1.01*max(sub_tg_h$ae)
row_to_add$scores_imputed <- TRUE
row_to_add[, c("q0.025", "q0.25", "q0.5", "q0.75", "q0.975",
"qscore_q0.025", "qscore_q0.25", "qscore_q0.5", "qscore_q0.75", "qscore_q0.975",
"interval_coverage_0.5", "interval_coverage_0.95")] <- NA
row_to_add[, c("interval_coverage_0.5", "interval_coverage_0.95")] <- FALSE
all_forecasts <- rbind(all_forecasts, row_to_add)
# add truths to DAX forecasts:
# generate the DAX truth data for a given week:
getSymbols('^GDAXI',src='yahoo', from = as.Date("2021-09-01"))
dat_dax <- data.frame("date" = index(GDAXI), value = GDAXI$GDAXI.Close)
colnames(dat_dax) <- c("date", "value")
dat_dax$date <- as.Date(dat_dax$date)
reference_value <- dat_dax$value[dat_dax$date == forecast_date]
dat_dax$value <- 100*(log(dat_dax$value) - log(reference_value))
truths_dax <- subset(dat_dax, date %in% (forecast_date + c(1, 2, 5, 6, 7)))
truths_dax <- truths_dax[order(truths_dax$date), ]
truths_dax$target <- "DAX"
truths_dax$horizon <- c("1 day", "2 day", "5 day", "6 day", "7 day")[1:nrow(truths_dax)]
truths_dax$date <- NULL
# subset forecasts to DAX:
dax_forecasts <- subset(all_forecasts, target == "DAX")
# remove disallowed targets:
dax_forecasts <- subset(dax_forecasts,
horizon %in% c("1 day", "2 day", "5 day", "6 day", "7 day"))
# add truth data:
dax_forecasts <- merge(dax_forecasts, truths_dax, by = c("target", "horizon"), all.x = TRUE)
# add truths to wind forecasts:
truth_wind <- read.csv("../ptsfc_viz/plot_data/wind.csv")
truth_wind$time <- as.POSIXlt(truth_wind$time, tz = "UTC")
wind_forecasts <- subset(all_forecasts, target == "wind")
# remove disallowed targets:
wind_forecasts <- subset(wind_forecasts,
horizon %in% c("36 hour", "48 hour", "60 hour", "72 hour", "84 hour"))
wind_forecasts$forecast_date <- as.POSIXlt(wind_forecasts$forecast_date)
hours_numeric <- as.numeric(gsub(" hour", "", wind_forecasts$horizon))
wind_forecasts$target_end_date <- wind_forecasts$forecast_date + hours_numeric*60*60
wind_forecasts$target_end_date <- as.POSIXlt(wind_forecasts$target_end_date)
# add truth data:
wind_forecasts <- merge(wind_forecasts, truth_wind, by.x = "target_end_date", by.y = "time", all.x = TRUE)
# restrict to relevant columns
wind_forecasts <- wind_forecasts[, c(colnames(all_forecasts), "value")]
# add truths to temperature forecasts:
truth_temperature <- read.csv("../ptsfc_viz/plot_data/t2m.csv")
truth_temperature$time <- as.POSIXlt(truth_temperature$time, tz = "UTC")
# remove disallowed targets:
temperature_forecasts <- subset(all_forecasts, target == "temperature")
temperature_forecasts <- subset(temperature_forecasts,
horizon %in% c("36 hour", "48 hour", "60 hour", "72 hour", "84 hour"))
temperature_forecasts$forecast_date <- as.POSIXlt(temperature_forecasts$forecast_date)
hours_numeric <- as.numeric(gsub(" hour", "", temperature_forecasts$horizon))
temperature_forecasts$target_end_date <- temperature_forecasts$forecast_date + hours_numeric*60*60
temperature_forecasts$target_end_date <- as.POSIXlt(temperature_forecasts$target_end_date)
# add truth data:
temperature_forecasts <- merge(temperature_forecasts, truth_temperature, by.x = "target_end_date", by.y = "time", all.x = TRUE)
# restrict to relevant columns
temperature_forecasts <- temperature_forecasts[, c(colnames(all_forecasts), "value")]
# pool together:
all_forecasts <- rbind(dax_forecasts, wind_forecasts, temperature_forecasts)
# in first week only DAX to be scored, use this line instead:
# all_forecasts <- dax_forecasts # only needed in first week
# compute linear quantile scores for different quantile levels:
all_forecasts$qscore_q0.025 <- qs(all_forecasts$q0.025, all_forecasts$value, 0.025)
all_forecasts$qscore_q0.25 <- qs(all_forecasts$q0.25, all_forecasts$value, 0.25)
all_forecasts$qscore_q0.5 <- qs(all_forecasts$q0.5, all_forecasts$value, 0.5)
all_forecasts$qscore_q0.75 <- qs(all_forecasts$q0.75, all_forecasts$value, 0.75)
all_forecasts$qscore_q0.975 <- qs(all_forecasts$q0.975, all_forecasts$value, 0.975)
# average linear quantile score:
all_forecasts$mean_qscore <-
(all_forecasts$qscore_q0.025 +
all_forecasts$qscore_q0.25 +
all_forecasts$qscore_q0.5 +
all_forecasts$qscore_q0.75 +
all_forecasts$qscore_q0.975)/5
# absolute error:
all_forecasts$ae <- abs(all_forecasts$q0.5 - all_forecasts$value)
# coverage of intervals:
all_forecasts$interval_coverage_0.5 <- (all_forecasts$value >= all_forecasts$q0.25 & all_forecasts$value <= all_forecasts$q0.75)
all_forecasts$interval_coverage_0.95 <- (all_forecasts$value >= all_forecasts$q0.025 & all_forecasts$value <= all_forecasts$q0.975)
# re-order columns:
all_forecasts <- all_forecasts[, c("model", colnames(all_forecasts)[colnames(all_forecasts) != "model"])]
# add imputed rows for missing scores::
all_forecasts$scores_imputed <- FALSE
models <- read.csv("../ptsfc_viz/plot_data/list_teams.csv")$model
# run through targets
for(tg in c("DAX", "wind", "temperature")){
# restrict to this target:
sub_tg <- subset(all_forecasts, target == tg)
# check which horizons have been addressed by at least one participant:
horizons_present <- unique(sub_tg$horizon)
# run through these horizons:
for(h in horizons_present){
# subset to this horizon:
sub_tg_h <- subset(sub_tg, horizon == h)
# run through models and check if they are present
for(mod in models){
# if not: add a row with the worst score
if(!mod %in% sub_tg_h$model){
row_to_add <- sub_tg_h[1, ]
row_to_add$model <- mod
row_to_add$mean_qscore <- 1.01*max(sub_tg_h$mean_qscore)
row_to_add$ae <- 1.01*max(sub_tg_h$ae)
row_to_add$scores_imputed <- TRUE
row_to_add[, c("q0.025", "q0.25", "q0.5", "q0.75", "q0.975",
"qscore_q0.025", "qscore_q0.25", "qscore_q0.5", "qscore_q0.75", "qscore_q0.975",
"interval_coverage_0.5", "interval_coverage_0.95")] <- NA
# set coverge to FALSE
row_to_add[, c("interval_coverage_0.5", "interval_coverage_0.95")] <- FALSE
# add to the data frame:
all_forecasts <- rbind(all_forecasts, row_to_add)
}
}
}
}
# write out file:
write.csv(all_forecasts, file = paste0("../evaluation/evaluation_", week_tag, ".csv"), row.names = FALSE)
}
write.csv(all_forecasts, file = paste0("evaluation/evaluation_", wt, ".csv"), row.names = FALSE)
# generate ensemble forecasts
# needs to be run from repository root folder
# setwd("/home/johannes/Documents/Teaching/Ensemble_Seminar/ptsfc_results/")
# setwd("/home/johannes/Documents/Teaching/Ensemble_Seminar/ptsfc_results/code")
forecast_date <- as.Date("2021-11-03")
forecast_date0 <- Sys.Date() - 0:6
forecast_date <- forecast_date0[weekdays(forecast_date0) %in% c("Mittwoch", "Wednesday")]
wt <- gsub("-", "", as.character(forecast_date))
# list files:
files <- list.files(wt)
files <- list.files(paste0("../", wt))
# read in all forecasts:
all_forecasts <- NULL
for(i in seq_along(files)){
dat_temp <- read.csv(paste0(wt, "/", files[i]))
all_forecasts <- read.csv(paste0("../", wt, "/", files[1]))
all_forecasts$X <- NULL
for(i in seq_along(files)[-1]){
dat_temp <- read.csv(paste0("../", wt, "/", files[i]))
dat_temp$X <- NULL
if(is.null(all_forecasts)){
all_forecasts <- dat_temp
}else{
all_forecasts <- rbind(all_forecasts, dat_temp)
}
all_forecasts <- rbind(all_forecasts, dat_temp)
}
# remove disallowed targets:
......@@ -37,12 +35,12 @@ mean_ensemble <- aggregate(cbind(q0.025, q0.25, q0.5, q0.75, q0.975) ~ forecast_
# restrict to DAX for now:
#mean_ensemble <- subset(mean_ensemble, target == "DAX")
# write out:
write.csv(mean_ensemble, file = paste0(wt, "/", date, "_mean_ensemble.csv"))
write.csv(mean_ensemble, file = paste0("../", wt, "/", date, "_mean_ensemble.csv"))
# compute median ensemble:
median_ensemble <- aggregate(cbind(q0.025, q0.25, q0.5, q0.75, q0.975) ~ forecast_date + target + horizon, data = all_forecasts, FUN = median, na.rm = TRUE)
#median_ensemble <- subset(median_ensemble, target == "DAX")
# write out:
write.csv(median_ensemble, file = paste0(wt, "/", date, "_median_ensemble.csv"))
write.csv(median_ensemble, file = paste0("../", wt, "/", date, "_median_ensemble.csv"))
# process data for visualization
library(lubridate)
# to be run from code
# setwd("/home/johannes/Documents/Teaching/Ensemble_Seminar/ptsfc_results/code")
source("functions.R")
# get folder names where forecasts are stored
labels_weeks <- list.dirs("..", full.names = FALSE)
labels_weeks <- labels_weeks[grepl("202", labels_weeks)]
# run through folders:
for(lab in labels_weeks){
# identify files
fls <- list.files(paste0("../", lab))
# run through files and read in:
result <- NULL
for(i in seq_along(fls)){
dat_temp <- read.csv(paste0("../", lab, "/", fls[i]),
colClasses = c(forecast_date = "Date"))
# remove row numbers if necessary:
dat_temp$X <- NULL
# add model name:
dat_temp$model <- extract_name_from_file(fls[i])
# add column for target end date / time:
dat_temp$target_end_date <- NA
# re-format forecast time:
dat_temp$forecast_date <- as.POSIXlt(dat_temp$forecast_date, "GMT") + 24*60*60 - 60
# handle DAX forecasts:
subs_DAX <- subset(dat_temp, target == "DAX")
# generate target end date / time:
days_numeric <- as.numeric(gsub(" day", "", subs_DAX$horizon))
subs_DAX$target_end_date <- subs_DAX$forecast_date + 60 + days_numeric*24*60*60 - 6.5*60*60
# handle weather forecasts:
subs_weather <- subset(dat_temp, target %in% c("wind", "temperature"))
# generate target end date / time:
hours_numeric <- as.numeric(gsub(" hour", "", subs_weather$horizon))
subs_weather$target_end_date <- subs_weather$forecast_date + 60 + hours_numeric*60*60 - 24*60*60
# put back together:
dat_temp <- rbind(subs_DAX, subs_weather)
# add to data.frame:
if(!is.null(result)){
result <- rbind(result, dat_temp)
}else{
result <- dat_temp
}
}
# get evaluation data:
dat_eval <- NULL
try({
dat_eval <- read.csv(paste0("../evaluation/evaluation_", lab, ".csv"))
}, silent = TRUE)
# write out if non-empty
if(!is.null(result)){
# add evaluation data:
if(!is.null(dat_eval)){
dat_eval <- dat_eval[, c("target", "model", "horizon", "mean_qscore", "ae")]
result <- merge(result, dat_eval, by = c("model", "target", "horizon"), all.x = TRUE)
}else{
result$mean_qscore <- result$ae <- NA
}
# result <- subset(result, target == "DAX" & horizon %in% paste(c(1, 2, 5, 6, 7), "day"))
forecast_date <- as.Date(result$forecast_date[1])
write.csv(result, file = paste0("../ptsfc_viz/plot_data/plot_data_", forecast_date, ".csv"), row.names = FALSE)
}
}
# setwd("/home/johannes/Documents/Teaching/Ensemble_Seminar/ptsfc_results/code/")
library(rdwd)
## temperature data
dwd_url <- selectDWD(
name = "Berlin-Tempelhof",
res = "hourly",
per = "recent",
var = "air_temperature"
)
obs_data_t2m <- dataDWD(dwd_url, force = TRUE, overwrite = TRUE)
obs_data_t2m_ts <- obs_data_t2m[,c("MESS_DATUM", "TT_TU")]
names(obs_data_t2m_ts) <- c("time", "value")
obs_data_t2m_ts <- subset(obs_data_t2m_ts,
grepl("00:00:00", as.character(obs_data_t2m_ts$time)) |
grepl("12:00:00", as.character(obs_data_t2m_ts$time)))
obs_data_t2m_ts <- tail(obs_data_t2m_ts, 300)
plot(obs_data_t2m_ts$time, obs_data_t2m_ts$Observation, type = "l")
write.csv(obs_data_t2m_ts, file = "../ptsfc_viz/plot_data/t2m.csv", row.names = FALSE)
## wind data
dwd_url <- selectDWD(
name = "Berlin-Tempelhof",
res = "hourly",
per = "recent",
var = "wind"
)
obs_data_wind <- dataDWD(dwd_url, force = TRUE, overwrite = TRUE)
obs_data_wind_ts <- obs_data_wind[,c("MESS_DATUM", "F")]
names(obs_data_wind_ts) <- c("time", "value")
obs_data_wind_ts$value <- obs_data_wind_ts$value*3.6
obs_data_wind_ts <- subset(obs_data_wind_ts,
grepl("00:00:00", as.character(obs_data_wind_ts$time)) |
grepl("12:00:00", as.character(obs_data_wind_ts$time)))
obs_data_wind_ts <- tail(obs_data_wind_ts, 300)
write.csv(obs_data_wind_ts, file = "../ptsfc_viz/plot_data/wind.csv", row.names = FALSE)
plot(obs_data_wind_ts$time, obs_data_wind_ts$value, type = "l")
# evaluate forecasts
# needs to be run from repository root folder
# setwd("/home/johannes/Documents/Teaching/Ensemble_Seminar/ptsfc_results/")
# setwd("/home/johannes/Documents/Teaching/Ensemble_Seminar/ptsfc_results/code")
library(dplyr)
eval_files <- list.files("evaluation")
eval_files <- list.files("../evaluation")
eval_files <- eval_files[grep("evaluation_2", eval_files)]
all_evals <- NULL
# read in all evaluation files:
for(fl in eval_files){
to_add <- read.csv(paste0("evaluation/", fl))
to_add <- read.csv(paste0("../evaluation/", fl))
if(is.null(all_evals)){
all_evals <- to_add
}else{
......@@ -71,4 +71,4 @@ summary_all_evals <- rbind(summary_all_evals2, summary_all_evals3) %>%
# drop benchmark
filter(model != "Benchmark") %>% ungroup
write.csv(summary_all_evals, file = "ptsfc_viz/plot_data/summary_eval.csv", row.names = FALSE)
write.csv(summary_all_evals, file = "../ptsfc_viz/plot_data/summary_eval.csv", row.names = FALSE)
setwd("/home/johannes/Documents/Teaching/Ensemble_Seminar/ptsfc_results/code")
# update the wind data (stored in ptsfc_viz, but also used for evaluation etc):
source("retrieve_weather_data_ts.R")
# update evaluation. This will run the evaluation for all Wednesdays in Sys.Date() - 10:2
source("evaluate_forecasts.R")
# update summary of evaluations:
source("summarize_evaluation.R")
# update rankings:
source("compute_ranking.R")
# generate ensembles:
source("generate_ensembles.R")
# update plot data:
source("process_viz_data.R")
# commit to repo:
# <date>_me*.csv (ensembles)
# all csv files in ptsfc_viz/plot_data
# evaluation/evaluation_<date>.csv
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment