knitr::opts_chunk$set(echo = TRUE,
cache.lazy = FALSE,
dev = c("svglite", "pdf", "png"),
dpi = 300,
fig.path = 'figures/',
fig.keep = "high")
#added from: https://github.com/yihui/knitr-examples/blob/master/077-wrap-output.Rmd
library(knitr)
hook_output = knit_hooks$get('output')
knit_hooks$set(output = function(x, options) {
# this hook is used only when the linewidth option is not NULL
if (!is.null(n <- options$linewidth)) {
x = knitr:::split_lines(x)
# any lines wider than n should be wrapped
if (any(nchar(x) > n)) x = strwrap(x, width = n)
x = paste(x, collapse = '\n')
}
hook_output(x, options)
})
library(tidyverse) # A collection of packages for data science. More about it on
# www.tidyverse.com
library(magrittr) # A package that provides pipe operators like %>%
library(lme4) # A package
library(glue) # A package that provides interpolated string functions.
library(latex2exp) # A package that converts latex to plotmath expressions.library(lmerTest)
library(lmerTest) # A package that improves inference for LMEM.
library(emmeans) # A package that allows to conduct post-hoc tests.
library(parameters) # A package that
library(optimx)
── Attaching packages ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── tidyverse 1.3.1 ── ✔ ggplot2 3.3.3 ✔ purrr 0.3.4 ✔ tibble 3.1.1 ✔ dplyr 1.0.5 ✔ tidyr 1.1.3 ✔ stringr 1.4.0 ✔ readr 1.4.0 ✔ forcats 0.5.1 ── Conflicts ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── tidyverse_conflicts() ── ✖ dplyr::filter() masks stats::filter() ✖ dplyr::lag() masks stats::lag() Attaching package: ‘magrittr’ The following object is masked from ‘package:purrr’: set_names The following object is masked from ‘package:tidyr’: extract Loading required package: Matrix Attaching package: ‘Matrix’ The following objects are masked from ‘package:tidyr’: expand, pack, unpack Attaching package: ‘glue’ The following object is masked from ‘package:dplyr’: collapse Attaching package: ‘lmerTest’ The following object is masked from ‘package:lme4’: lmer The following object is masked from ‘package:stats’: step
read data
data_hyperPar <-
readRDS("data/data_hyperparameter-marking_partial.rds")
summary(data_hyperPar)
ter sentence_id system decoder_dropout Min. :0.0000 0_0 : 54 Marking:56214 0.2:37476 1st Qu.:0.1000 0_1 : 54 0.4:18738 Median :0.2222 0_10 : 54 Mean :0.2677 0_11 : 54 3rd Qu.:0.3667 0_12 : 54 Max. :7.3750 0_13 : 54 (Other):55890 decoder_dropout_hidden encoder_dropout learning_rate seed 0.2:18738 0.2:18738 1e-04:18738 42:18738 0.4:18738 0.4:18738 3e-04:18738 43:18738 0.6:18738 0.6:18738 5e-04:18738 44:18738 delta_scheme src_length src_length_class (-0.5:0.5):28107 Min. : 2.00 short :20898 (0:1) :28107 1st Qu.:12.00 typical :33966 Median :19.00 very long: 1350 Mean :21.93 3rd Qu.:29.00 Max. :86.00
analyze weighting scheme as fixed parameter
model_delta <-
lmer(ter ~ src_length_class*delta_scheme + (1 | sentence_id),
data = data_hyperPar)
anova(model_delta)
Sum Sq | Mean Sq | NumDF | DenDF | F value | Pr(>F) | |
---|---|---|---|---|---|---|
<dbl> | <dbl> | <int> | <dbl> | <dbl> | <dbl> | |
src_length_class | 6.6285923 | 3.3142961 | 2 | 1038 | 175.61712 | 2.022788e-66 |
delta_scheme | 0.8690301 | 0.8690301 | 1 | 55170 | 46.04796 | 1.165549e-11 |
src_length_class:delta_scheme | 0.9838778 | 0.4919389 | 2 | 55170 | 26.06674 | 4.838431e-12 |
group_means_delta <-
emmeans(model_delta, pairwise ~ delta_scheme:src_length_class, lmer.df = "asymptotic")
ggplot(as_tibble(group_means_delta$emmeans)) +
theme_bw() +
theme(legend.position = c(0.1,.8)) +
xlab("") +
ylab("Estimated Group Mean") +
geom_pointrange(aes(x = src_length_class,
y = emmean,
ymin = emmean - SE,
ymax = emmean + SE,
shape = delta_scheme),
alpha = .7) +
geom_line(aes(x = src_length_class,
y = emmean,
group = delta_scheme,
linetype = delta_scheme),
alpha = .3)
emmeans(object = model_delta,
spec = pairwise ~ delta_scheme | src_length_class,
lmer.df = "asymptotic")$contrasts
src_length_class = short: contrast estimate SE df z.ratio p.value ((-0.5:0.5)) - (0:1) 0.001661 0.00190 Inf 0.874 0.3822 src_length_class = typical: contrast estimate SE df z.ratio p.value ((-0.5:0.5)) - (0:1) -0.000988 0.00149 Inf -0.663 0.5075 src_length_class = very long: contrast estimate SE df z.ratio p.value ((-0.5:0.5)) - (0:1) -0.053998 0.00748 Inf -7.221 <.0001 Degrees-of-freedom method: asymptotic
model_lr <-
lmer(ter ~
src_length_class +
(1 | sentence_id) +
#nested random effect expands to:
#(1 | learning_rate) + (1 | learning_rate:src_length_class)
(1 | learning_rate / src_length_class),
data = select(data_hyperPar,
ter, src_length_class, sentence_id, learning_rate, delta_scheme),
subset = delta_scheme == "(-0.5:0.5)")
model_basic <-
lmer(ter ~
src_length_class +
(1 | sentence_id),
data = select(data_hyperPar,
ter, src_length_class, sentence_id, learning_rate, delta_scheme),
subset = delta_scheme == "(-0.5:0.5)")
anova(model_basic, model_lr, refit = FALSE) #refit = FALSE prevents refitting with ML method
npar | AIC | BIC | logLik | deviance | Chisq | Df | Pr(>Chisq) | |
---|---|---|---|---|---|---|---|---|
<dbl> | <dbl> | <dbl> | <dbl> | <dbl> | <dbl> | <dbl> | <dbl> | |
model_basic | 5 | -25887.58 | -25846.36 | 12948.79 | -25897.58 | NA | NA | NA |
model_lr | 7 | -26672.00 | -26614.29 | 13343.00 | -26686.00 | 788.4153 | 2 | 6.277444e-172 |
VarCorr(model_lr) %>% print(., comp = "Variance")
Groups Name Variance sentence_id (Intercept) 0.06762128 src_length_class:learning_rate (Intercept) 0.00039550 learning_rate (Intercept) 0.00043103 Residual 0.01910124
ICC <- function(model) {
nested_terms <-
formula(model) %>%
as.character(.) %>%
str_extract_all(., "(?<=[|] ?)[[:word:]]+[/][[:word:]]+(?= ?[)])") %>%
unlist(.) %>%
na.omit(.) %>%
as.vector(.) %>%
str_trim(.)
variances <-
as_tibble(VarCorr(model)) %>%
select(effect = grp,
variance = vcov)
rpl_effect <- function(x, str) {
x <- str_trim(x)
nested_hyperPar <-
strsplit(str, "/", fixed = TRUE)[[1]][1] %>%
str_trim(.) %>%
#add regx condition to prevent substring matching:
paste0(., "($|:)")
x[grepl(nested_hyperPar, x)] <- str
return(x)
}
variances$effect <-
reduce(.x = nested_terms,
.f = rpl_effect,
.init = variances$effect,
.dir = "forward")
variances %<>%
group_by(effect) %>%
summarize(variance = sum(variance, na.rm = TRUE)) %>%
ungroup() %>%
mutate(icc = round(variance / sum(variance, na.rm = TRUE), 4),
icc_perc = round(icc * 100, 2)) %>%
arrange(desc(icc))
return(variances)
}
ICC(model_lr)
effect | variance | icc | icc_perc |
---|---|---|---|
<chr> | <dbl> | <dbl> | <dbl> |
sentence_id | 0.0676212801 | 0.7724 | 77.24 |
Residual | 0.0191012399 | 0.2182 | 21.82 |
learning_rate/src_length_class | 0.0008265372 | 0.0094 | 0.94 |
We see that still most of the variation is due to sentence differences (about 77%) and only a small amount is due to different learning rates (about 1%).
model_lrs <-
lmer(ter ~
src_length_class +
(1 | sentence_id) +
(1 | learning_rate / src_length_class) +
(1 | seed / src_length_class),
data = select(data_hyperPar, seed,
ter, src_length_class, sentence_id, learning_rate, delta_scheme),
subset = delta_scheme == "(-0.5:0.5)")
boundary (singular) fit: see ?isSingular
model_noSeed <-
lmer(ter ~
src_length_class +
(1 | sentence_id) +
(1 | learning_rate / src_length_class),
data = select(data_hyperPar, seed,
ter, src_length_class, sentence_id, learning_rate, delta_scheme),
subset = delta_scheme == "(-0.5:0.5)",
control = lmerControl(optimizer ='optimx', optCtrl=list(method='nlminb')))
model_nolr <-
lmer(ter ~
src_length_class +
(1 | sentence_id) +
(1 | seed / src_length_class),
data = select(data_hyperPar, seed,
ter, src_length_class, sentence_id, learning_rate, delta_scheme),
subset = delta_scheme == "(-0.5:0.5)",
control = lmerControl(optimizer ='optimx', optCtrl=list(method='nlminb')))
Warning message in optwrap(optimizer, devfun, getStart(start, rho$pp), lower = rho$lower, : “convergence code 1 from optimx: none” boundary (singular) fit: see ?isSingular
anova(model_lrs, model_noSeed, refit = FALSE)
npar | AIC | BIC | logLik | deviance | Chisq | Df | Pr(>Chisq) | |
---|---|---|---|---|---|---|---|---|
<dbl> | <dbl> | <dbl> | <dbl> | <dbl> | <dbl> | <dbl> | <dbl> | |
model_noSeed | 7 | -26672.00 | -26614.29 | 13343.00 | -26686.00 | NA | NA | NA |
model_lrs | 9 | -26676.99 | -26602.79 | 13347.49 | -26694.99 | 8.986626 | 2 | 0.01118353 |
anova(model_lrs, model_nolr, refit = FALSE)
npar | AIC | BIC | logLik | deviance | Chisq | Df | Pr(>Chisq) | |
---|---|---|---|---|---|---|---|---|
<dbl> | <dbl> | <dbl> | <dbl> | <dbl> | <dbl> | <dbl> | <dbl> | |
model_nolr | 7 | -25892.19 | -25834.48 | 12953.09 | -25906.19 | NA | NA | NA |
model_lrs | 9 | -26676.99 | -26602.79 | 13347.49 | -26694.99 | 788.7997 | 2 | 5.179872e-172 |
ICC(model_lrs)
effect | variance | icc | icc_perc |
---|---|---|---|
<chr> | <dbl> | <dbl> | <dbl> |
sentence_id | 6.762072e-02 | 0.7723 | 77.23 |
Residual | 1.909206e-02 | 0.2181 | 21.81 |
learning_rate/src_length_class | 8.262321e-04 | 0.0094 | 0.94 |
seed/src_length_class | 1.326379e-05 | 0.0002 | 0.02 |
minimize data for model
data_hyperPar %<>%
filter(delta_scheme == "(-0.5:0.5)") %>%
select(-system, -src_length, -delta_scheme)
show partial grid
data_hyperPar %>%
select(-ter, -sentence_id, -src_length_class, -seed) %>%
distinct() %>%
head(., n=nrow(.))
decoder_dropout | decoder_dropout_hidden | encoder_dropout | learning_rate |
---|---|---|---|
<fct> | <fct> | <fct> | <fct> |
0.2 | 0.2 | 0.2 | 1e-04 |
0.2 | 0.2 | 0.2 | 3e-04 |
0.2 | 0.6 | 0.6 | 3e-04 |
0.4 | 0.4 | 0.4 | 3e-04 |
0.4 | 0.4 | 0.4 | 1e-04 |
0.2 | 0.2 | 0.2 | 5e-04 |
0.2 | 0.6 | 0.6 | 5e-04 |
0.4 | 0.4 | 0.4 | 5e-04 |
0.2 | 0.6 | 0.6 | 1e-04 |
variance decomposition partial grid
var_decomp_partial <-
lmer(ter ~
(1 | sentence_id) +
(1 | learning_rate) +
(1 | seed) +
(1 | decoder_dropout) +
(1 | decoder_dropout_hidden) +
(1 | encoder_dropout),
data = data_hyperPar,
control = lmerControl(optimizer = "nloptwrap", calc.derivs = FALSE))
Warning message: “Model failed to converge with 1 negative eigenvalue: -1.0e+00”
ICC for partial grid
ICC(var_decomp_partial)
effect | variance | icc | icc_perc |
---|---|---|---|
<chr> | <dbl> | <dbl> | <dbl> |
sentence_id | 9.049215e-02 | 0.8189 | 81.89 |
Residual | 1.894502e-02 | 0.1714 | 17.14 |
learning_rate | 8.012123e-04 | 0.0073 | 0.73 |
decoder_dropout_hidden | 1.341420e-04 | 0.0012 | 0.12 |
encoder_dropout | 1.178772e-04 | 0.0011 | 0.11 |
seed | 1.330220e-05 | 0.0001 | 0.01 |
decoder_dropout | 1.524030e-09 | 0.0000 | 0.00 |
load full grid data
data_hyperPar_full <-
readRDS("data/data_hyperparameter-marking_full.rds") %>%
filter(delta_scheme == "(-0.5:0.5)") %>%
select(-system, -src_length, -delta_scheme)
show full grid
data_hyperPar_full %>%
select(-ter, -sentence_id, -src_length_class) %>%
group_by(across())
decoder_dropout | decoder_dropout_hidden | encoder_dropout | learning_rate | seed |
---|---|---|---|---|
<fct> | <fct> | <fct> | <fct> | <fct> |
0.2 | 0.4 | 0.4 | 5e-04 | 42 |
0.4 | 0.2 | 0.6 | 1e-04 | 43 |
0.2 | 0.2 | 0.4 | 1e-04 | 43 |
0.2 | 0.6 | 0.4 | 3e-04 | 42 |
0.2 | 0.2 | 0.2 | 1e-04 | 42 |
0.2 | 0.2 | 0.2 | 3e-04 | 42 |
0.4 | 0.2 | 0.2 | 1e-04 | 43 |
0.4 | 0.4 | 0.2 | 5e-04 | 44 |
0.2 | 0.4 | 0.2 | 1e-04 | 42 |
0.4 | 0.6 | 0.6 | 1e-04 | 43 |
0.4 | 0.4 | 0.2 | 1e-04 | 43 |
0.4 | 0.6 | 0.4 | 3e-04 | 44 |
0.2 | 0.6 | 0.2 | 3e-04 | 42 |
0.4 | 0.2 | 0.6 | 1e-04 | 42 |
0.4 | 0.2 | 0.6 | 3e-04 | 44 |
0.2 | 0.6 | 0.6 | 3e-04 | 44 |
0.4 | 0.6 | 0.6 | 3e-04 | 42 |
0.4 | 0.4 | 0.4 | 3e-04 | 43 |
0.2 | 0.4 | 0.2 | 1e-04 | 44 |
0.4 | 0.6 | 0.4 | 1e-04 | 44 |
0.2 | 0.2 | 0.6 | 5e-04 | 42 |
0.2 | 0.6 | 0.2 | 3e-04 | 43 |
0.2 | 0.2 | 0.6 | 5e-04 | 43 |
0.4 | 0.4 | 0.6 | 1e-04 | 42 |
0.4 | 0.6 | 0.2 | 3e-04 | 44 |
0.2 | 0.4 | 0.6 | 3e-04 | 43 |
0.4 | 0.6 | 0.2 | 3e-04 | 43 |
0.2 | 0.6 | 0.4 | 5e-04 | 42 |
0.2 | 0.6 | 0.4 | 3e-04 | 43 |
0.4 | 0.4 | 0.6 | 3e-04 | 42 |
⋮ | ⋮ | ⋮ | ⋮ | ⋮ |
0.2 | 0.2 | 0.6 | 3e-04 | 43 |
0.4 | 0.4 | 0.6 | 1e-04 | 44 |
0.4 | 0.2 | 0.4 | 1e-04 | 44 |
0.2 | 0.2 | 0.4 | 3e-04 | 43 |
0.2 | 0.2 | 0.4 | 1e-04 | 44 |
0.2 | 0.4 | 0.6 | 5e-04 | 42 |
0.2 | 0.4 | 0.2 | 5e-04 | 44 |
0.4 | 0.2 | 0.4 | 5e-04 | 42 |
0.4 | 0.4 | 0.6 | 5e-04 | 43 |
0.4 | 0.2 | 0.6 | 5e-04 | 44 |
0.2 | 0.2 | 0.4 | 5e-04 | 43 |
0.4 | 0.2 | 0.4 | 3e-04 | 44 |
0.2 | 0.6 | 0.2 | 5e-04 | 43 |
0.4 | 0.4 | 0.6 | 3e-04 | 43 |
0.2 | 0.6 | 0.6 | 1e-04 | 42 |
0.4 | 0.4 | 0.2 | 5e-04 | 43 |
0.2 | 0.6 | 0.4 | 1e-04 | 44 |
0.2 | 0.4 | 0.2 | 5e-04 | 43 |
0.2 | 0.2 | 0.2 | 1e-04 | 44 |
0.2 | 0.2 | 0.6 | 1e-04 | 44 |
0.4 | 0.2 | 0.2 | 1e-04 | 44 |
0.2 | 0.4 | 0.6 | 3e-04 | 42 |
0.2 | 0.6 | 0.2 | 5e-04 | 44 |
0.2 | 0.2 | 0.2 | 1e-04 | 43 |
0.4 | 0.2 | 0.2 | 5e-04 | 44 |
0.4 | 0.4 | 0.4 | 5e-04 | 43 |
0.2 | 0.2 | 0.4 | 3e-04 | 44 |
0.4 | 0.4 | 0.2 | 5e-04 | 42 |
0.4 | 0.6 | 0.6 | 5e-04 | 42 |
0.2 | 0.4 | 0.6 | 1e-04 | 44 |
variance decomposition full grid
var_decomp_full <-
lmer(ter ~
(1 | sentence_id) +
(1 | learning_rate) +
(1 | seed) +
(1 | decoder_dropout) +
(1 | decoder_dropout_hidden) +
(1 | encoder_dropout),
data = data_hyperPar_full,
control = lmerControl(optimizer = "nloptwrap", calc.derivs = FALSE))
ICC for full grid
ICC(var_decomp_full)
effect | variance | icc | icc_perc |
---|---|---|---|
<chr> | <dbl> | <dbl> | <dbl> |
sentence_id | 1.087316e-01 | 0.8591 | 85.91 |
Residual | 1.730427e-02 | 0.1367 | 13.67 |
learning_rate | 4.133873e-04 | 0.0033 | 0.33 |
encoder_dropout | 9.792333e-05 | 0.0008 | 0.08 |
decoder_dropout | 6.712741e-06 | 0.0001 | 0.01 |
decoder_dropout_hidden | 1.474430e-05 | 0.0001 | 0.01 |
seed | 1.663100e-06 | 0.0000 | 0.00 |
load extended grid data
data_hyperPar_extended <-
readRDS("data/data_hyperparameter-marking_extended.rds") %>%
filter(delta_scheme == "(-0.5:0.5)") %>%
select(-system, -src_length, -delta_scheme)
show extended grid
data_hyperPar_extended %>%
select(-ter, -sentence_id, -src_length_class, -seed) %>%
distinct()
decoder_dropout | decoder_dropout_hidden | encoder_dropout | learning_rate |
---|---|---|---|
<fct> | <fct> | <fct> | <fct> |
0 | 0.4 | 0 | 5e-04 |
0.6 | 0.4 | 0.4 | 3e-04 |
0.4 | 0.6 | 0 | 0.001 |
0 | 0 | 0.6 | 3e-04 |
0.2 | 0 | 0.6 | 0.001 |
0 | 0.6 | 0.6 | 5e-04 |
0 | 0 | 0.4 | 0.001 |
0.2 | 0.4 | 0.4 | 5e-04 |
0.2 | 0 | 0.4 | 3e-04 |
0.4 | 0.4 | 0.2 | 0.001 |
0.4 | 0.4 | 0 | 3e-04 |
0.4 | 0 | 0.2 | 1e-04 |
0.6 | 0.6 | 0 | 3e-04 |
0 | 0.2 | 0.2 | 3e-04 |
0.6 | 0.6 | 0.4 | 1e-04 |
0.6 | 0.2 | 0.2 | 5e-04 |
0.6 | 0 | 0.4 | 0.001 |
0.6 | 0.2 | 0.4 | 1e-04 |
0.4 | 0.2 | 0.6 | 1e-04 |
0.4 | 0.2 | 0 | 1e-04 |
0 | 0.2 | 0 | 5e-04 |
0.2 | 0.2 | 0.4 | 1e-04 |
0 | 0.4 | 0.6 | 5e-04 |
0.6 | 0.6 | 0 | 0.001 |
0 | 0.6 | 0.6 | 1e-04 |
0.6 | 0.6 | 0.2 | 1e-04 |
0.4 | 0.6 | 0.6 | 0.001 |
0.4 | 0.4 | 0.6 | 0.001 |
0.2 | 0.2 | 0.6 | 0.001 |
0.6 | 0.6 | 0 | 5e-04 |
⋮ | ⋮ | ⋮ | ⋮ |
0.2 | 0 | 0.2 | 3e-04 |
0.6 | 0.2 | 0 | 1e-04 |
0.6 | 0.4 | 0.2 | 5e-04 |
0.4 | 0.2 | 0.4 | 5e-04 |
0.2 | 0.4 | 0 | 5e-04 |
0.4 | 0.2 | 0.4 | 3e-04 |
0.6 | 0.2 | 0.2 | 3e-04 |
0.4 | 0 | 0.4 | 0.001 |
0.2 | 0.6 | 0.2 | 5e-04 |
0 | 0 | 0 | 1e-04 |
0.4 | 0.6 | 0.4 | 5e-04 |
0.2 | 0 | 0.2 | 0.001 |
0.4 | 0.2 | 0.4 | 0.001 |
0 | 0 | 0.4 | 5e-04 |
0.6 | 0.2 | 0.4 | 3e-04 |
0.2 | 0.2 | 0.4 | 3e-04 |
0.6 | 0.2 | 0 | 3e-04 |
0.2 | 0.4 | 0.6 | 1e-04 |
0.6 | 0.4 | 0.2 | 3e-04 |
0.6 | 0.6 | 0.4 | 3e-04 |
0 | 0.4 | 0.6 | 3e-04 |
0 | 0.2 | 0.6 | 0.001 |
0.2 | 0.4 | 0 | 1e-04 |
0.6 | 0 | 0 | 1e-04 |
0 | 0 | 0.6 | 5e-04 |
0.2 | 0.6 | 0.6 | 1e-04 |
0.6 | 0 | 0.4 | 3e-04 |
0 | 0.4 | 0.4 | 5e-04 |
0.4 | 0 | 0.2 | 5e-04 |
0.6 | 0 | 0 | 3e-04 |
variance decomposition extended grid
var_decomp_extended <-
lmer(ter ~
(1 | sentence_id) +
(1 | learning_rate) +
(1 | seed) +
(1 | decoder_dropout) +
(1 | decoder_dropout_hidden) +
(1 | encoder_dropout),
data = data_hyperPar_extended,
control = lmerControl(optimizer = "nloptwrap", calc.derivs = FALSE))
ICC for extended grid
ICC(var_decomp_extended)
effect | variance | icc | icc_perc |
---|---|---|---|
<chr> | <dbl> | <dbl> | <dbl> |
sentence_id | 9.018072e-02 | 0.7684 | 76.84 |
Residual | 2.457837e-02 | 0.2094 | 20.94 |
learning_rate | 2.401299e-03 | 0.0205 | 2.05 |
decoder_dropout | 1.198882e-04 | 0.0010 | 0.10 |
encoder_dropout | 5.429719e-05 | 0.0005 | 0.05 |
decoder_dropout_hidden | 3.144136e-05 | 0.0003 | 0.03 |
seed | 1.553485e-06 | 0.0000 | 0.00 |
var_decomp_partial <-
lmer(ter ~ (1 | sentence_id),
data = data_hyperPar,
control = lmerControl(optimizer = "nloptwrap", calc.derivs = FALSE))
ICC(var_decomp_partial)
effect | variance | icc | icc_perc |
---|---|---|---|
<chr> | <dbl> | <dbl> | <dbl> |
sentence_id | 0.09046723 | 0.8213 | 82.13 |
Residual | 0.01968357 | 0.1787 | 17.87 |
var_decomp_full <-
lmer(ter ~ (1 | sentence_id),
data = data_hyperPar_full,
control = lmerControl(optimizer = "nloptwrap", calc.derivs = FALSE))
ICC(var_decomp_full)
effect | variance | icc | icc_perc |
---|---|---|---|
<chr> | <dbl> | <dbl> | <dbl> |
sentence_id | 0.1087282 | 0.8603 | 86.03 |
Residual | 0.0176615 | 0.1397 | 13.97 |