Commit be73f2d4 authored by jbracher's avatar jbracher
Browse files

Adding Fabian's code for ranking.

parent 0865044c
rm(list = ls())
library(dplyr)
library(knitr)
setwd("/home/johannes/Documents/Teaching/Ensemble_Seminar/ptsfc_results/evaluation")
# load all evaluation files, append to each other
lf <- list.files() %>% sort
# weather variables not covered on Oct 27
dat <- read.csv(lf[1]) %>% filter(target == "DAX")
for (jj in lf[-1]){
dat <- rbind(dat, read.csv(jj))
}
# omit ensemble/benchmark models from ranking
dat <- dat %>%
filter(!grepl("ensemble", model),
!grepl("EMOS", model), !grepl("benchmark", model))
# select relevant cols, omit NAs (= cases where truth data is not yet available)
ranking <- dat %>% select(model, target, horizon, mean_qscore) %>% na.omit %>%
# compute mean scores sepearately for each target and horizon
group_by(model, target, horizon) %>%
summarise(n = n(), mean_qscore = mean(mean_qscore)) %>%
# compute ranking for each target and horizon
ungroup %>% group_by(target, horizon) %>%
mutate(rk = rank(mean_qscore)) %>%
# compute average rank for each participant
ungroup %>% group_by(model) %>%
# summary information
# nr of evaluation cases as double check
# mean ranks separately for each target and overall
summarise(n_eval_cases = sum(n),
mean_rk_wind = mean(rk[target == "wind"]),
mean_rk_temp = mean(rk[target == "temperature"]),
mean_rk_dax = mean(rk[target == "DAX"]),
mean_rk_overall = mean(rk)) %>%
ungroup
if (length(unique(ranking$n_eval_cases)) != 1){
stop("Nr of evaluation cases differs across participants\n
Perhaps score imputation didn't work properly?")
}
kable(ranking[,-2], digits = 2)
write.csv(ranking, file = "../ptsfc_viz/plot_data/rankings.csv")
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment