ID
int64
1
1.07k
Comment
stringlengths
8
1.13k
Code
stringlengths
10
4.28k
Label
stringclasses
4 values
Source
stringlengths
21
21
File
stringlengths
4
82
201
Transform academic background and job category variables from character to factor
class(expertsample$acadgroup) expertsample$acadgroup<-as.factor(expertsample$acadgroup) class(expertsample$job_cat) expertsample$job_cat <- as.factor(expertsample$job_cat)
Data Variable
https://osf.io/u9hkj/
expertsurvey_CDRCCS.R
202
graphical check for normal distribution of differences support (mean)
diff_support <- bothexpert$BECCS_support-bothexpert$DACCS_support hist(diff_support)
Visualization
https://osf.io/u9hkj/
expertsurvey_CDRCCS.R
203
Wilcoxon tests support (mean)
wilcox.test(bothexpert$BECCS_support, bothexpert$DACCS_support, paired=TRUE)
Statistical Test
https://osf.io/u9hkj/
expertsurvey_CDRCCS.R
204
correlations separately for each condition
data_con <- data[data$Cond == "Control", ] data_ent <- data[data$Cond == "Entitlement", ] chart.Correlation(data_con[ , c("pes","prestige","dominance","benign","malicious","pain")], use = "pairwise.complete.obs", pch = 20, histogram = TRUE) chart.Correlation(data_ent[ , c("pes","prestige","dominance","benign","malicious","pain")], use = "pairwise.complete.obs", pch = 20, histogram = TRUE) rm(data_con,data_ent)
Data Variable
https://osf.io/sb3kw/
Study2A_analyses.R
205
Estimate partial correlation matrices using fiml (partial cors and not glasso b/c different sample sizes leads to different glasso penalties)
net1hi <- cor1.edhi$cor %>% EBICglasso(., n = cor1.edhi$n) net2hi <- cor2.edhi$cor %>% EBICglasso(., n = cor2.edhi$n) net3hi <- cor3.edhi$cor %>% EBICglasso(., n = cor3.edhi$n) net4hi <- cor4.edhi$cor %>% EBICglasso(., n = cor4.edhi$n) net5hi <- cor5.edhi$cor %>% EBICglasso(., n = cor5.edhi$n) net6hi <- cor6.edhi$cor %>% EBICglasso(., n = cor6.edhi$n) net7hi <- cor7.edhi$cor %>% EBICglasso(., n = cor7.edhi$n) net1lo <- cor1.edlo$cor %>% EBICglasso(., n = cor1.edlo$n) net2lo <- cor2.edlo$cor %>% EBICglasso(., n = cor2.edlo$n) net3lo <- cor3.edlo$cor %>% EBICglasso(., n = cor3.edlo$n) net4lo <- cor4.edlo$cor %>% EBICglasso(., n = cor4.edlo$n) net5lo <- cor5.edlo$cor %>% EBICglasso(., n = cor5.edlo$n) net6lo <- cor6.edlo$cor %>% EBICglasso(., n = cor6.edlo$n) net7lo <- cor7.edlo$cor %>% EBICglasso(., n = cor7.edlo$n)
Statistical Modeling
https://osf.io/mj5nh/
educationnetworksatleast6.R
206
5. Confirmatory Factor analysis To replicate our results without item 1, delete yts_1 from the analyses 5.1 yes/no RESIST
dataCFA1 <- na.omit(data.frame(data$id,data$yts_1, data$yts_2, data$yts_3, data$yts_4, data$yts_5, data$yts_6, data$yts_7, data$yts_8, data$yts_9, data$yts_10, data$yts_11, data$yts_12, data$yts_13)) colnames (dataCFA1) <-c("id","yts_1","yts_2","yts_3","yts_4","yts_5","yts_6", "yts_7","yts_8","yts_9","yts_10","yts_11","yts_12", "yts_13") model1 <- 'EA =~ yts_1 + yts_2 + yts_3 + yts_4 + yts_5 + yts_6 + yts_7 + yts_8 + yts_9 + yts_10 + yts_11 + yts_12 + yts_13' fit1 <- cfa(model1, data=dataCFA1, meanstructure=T, std.lv=T, estimator = "WLSMV", ordered = c("yts_1", "yts_2", "yts_3", "yts_4", "yts_5", "yts_6", "yts_7", "yts_8", "yts_9", "yts_10", "yts_11", "yts_12", "yts_13")) summary(fit1, fit.measures=TRUE, standardized = T) fitMeasures(fit1,c("chisq","df","pvalue","cfi","tli","rmsea", "wrmr",'rmsea.ci.lower','rmsea.ci.upper')) modindices(fit1, sort. = T)
Statistical Modeling
https://osf.io/g2nkw/
YCAS_Scale_development.R
207
get factorscores and include them in origingal data
dataCFA1$factorscore1 <- predict(fit1) data <- left_join(data, dataCFA1, by = "id") data$factorscore1 dataCFA3$factorscore3 <- predict(fit3) data1 <- left_join(data1, dataCFA3, by = "record_id") data1$factorscore3 dataCFA4$factorscore4 <- predict(fit4) data1 <- left_join(data1, dataCFA4, by = "record_id") data1$factorscore4
Data Variable
https://osf.io/g2nkw/
YCAS_Scale_development.R
208
join behDat and demDat
allData = inner_join(behDat, demDat, by = "userCode")
Data Variable
https://osf.io/wcfj3/
1_dataPrep.R
209
Standardize Variables zscoring
df_overall = df df_overall$sWM = scale(df_overall$sWM) df_overall$rtWM = scale(df_overall$rtWM) df_overall$rtDivAtt = scale(df_overall$rtDivAtt) df_overall$rtAlert = scale(df_overall$rtAlert) df_overall$sRWT = scale(df_overall$sRWT)
Data Variable
https://osf.io/wcfj3/
1_dataPrep.R
210
Remove time points above 29, because not everyone attended that often
df_overall = df_overall %>% filter(Time < 29) %>% ungroup()
Data Variable
https://osf.io/wcfj3/
1_dataPrep.R
211
ensure that userCode is a factor
df_overall$userCode = as.factor(df_overall$userCode)
Data Variable
https://osf.io/wcfj3/
1_dataPrep.R
212
We can also try to cluster the languages based on the feature values they have. We currently use hierarchical clustering. This will normally also reflect the PCA visualization. The output can also be compared with the family grouping of the languages. generate the clusters
hclust_avg <- hclust(daisy(con_data)) hclust_avg <- hclust(daisy(con_data))
Visualization
https://osf.io/6hx2n/
DM_a_k.R
213
add squares around the clusters
rect.hclust(hclust_avg , k = 4, border = 1:4) rect.hclust(hclust_avg , k = 4, border = 1:4)
Visualization
https://osf.io/6hx2n/
DM_a_k.R
214
Extracting distances We can extract the pairwise distance between the points of the data set. First, we can visualize the distances in a twodimensional space. Normally, we expect that this visualization matches the output of the PCA. change the content to a distance matrix
distances <- con_data %>% dist(method = "euclidean") distances <- con_data %>% dist(method = "euclidean")
Visualization
https://osf.io/6hx2n/
DM_a_k.R
215
recodes ethnicity (Decline/Other category is set to missing)
sambdif<-samb %>% select(1:19, SAmb_tot, gender, ethnic) %>% mutate(ethnic_fac = rec(ethnic, rec = "1=0;; 6=NA;; else=1")) %>% convert(fct(gender, ethnic_fac)) table(sambdif$gender) # 0=women, 1=men table(sambdif$ethnic_fac) #0=EuroAm, 1=PoC
Data Variable
https://osf.io/ztycp/
Schizotypal Ambivalence.R
216
reorder the list of results to match the alignment of the dendrogram
list.res.reo<-list.res[order.dendrogram(dend1)] layd<-c(10,1) tiff("Figure_hclust2.tif",width=sum(layd),height=11.5,units="cm",res=600,compression="lzw") layout(matrix(1:2,nrow=1),widths=layd) par(mar=c(3.5,1,1.5,14),mgp=c(2,0.8,0)) plot(dend1, cex = 0.8, horiz=T,xlab="Height")
Visualization
https://osf.io/greqt/
04_clusters2_tvals.R
217
function to read in cleaned data and format it as a tibble that can be used for plotting
read_data_for_plotting <- function(data_path) { dat <- read_csv(data_path, col_types = cols())
Visualization
https://osf.io/fb5tw/
plotting_style.R
218
create a matrix of relevant item loadings to calculate nonrefined factor (scale) scores
scaleMatrix <- data.frame(data = matrix(EFA$loadings, ncol = 6)) scaleMatrix <- lapply(scaleMatrix, function(x) ifelse(x >= 0.4, 1, ifelse(x <= -0.4, -1, NA))) scaleMatrix <- data.frame(data = scaleMatrix, row.names = colnames(mainData[, c(2:56)])) colnames(scaleMatrix) <- c("Sensory", "CognitiveDemand", "ThreatToSelf", "CrossSettings", "Safety", "States")
Statistical Modeling
https://osf.io/2j47e/
Cluster analysis non-refined factor scores.R
219
get number of patients (from 'table_overview_of_studies.R')
df_long <- left_join(df_long, tab_study_overview %>% select(study_name, N_Patients) %>% rename(Study = study_name) %>% mutate(N_Patients = as.numeric(str_extract(N_Patients, '(\\d*)'))), by = 'Study')
Data Variable
https://osf.io/fykpt/
03_table_comorbidities_by_study.R
220
add error bars with standard error
plot <- plot + geom_errorbar(aes(ymin=lower,ymax=upper),width=.2) plot <- plot + ggtitle("2a) Average accuracy across four Cue Type conditions")
Visualization
https://osf.io/bfq39/
Code_LMMs_BestPractice_Example_withOutput.R
221
Capture Age for Each particpant
ParticipantAge <- summarize(Summary, count = n(), Age = mean(Age, na.rm = T))
Data Variable
https://osf.io/2uf8j/
Negativity R.R
222
EVENT VALENCE: Compare Rated Strength of Positive & Negative Events CHECKING FOR OUTLIERS Figure: Boxplot Order Conditions
EventValenceRatings$Valence <- factor(EventValenceRatings$Valence, levels = c("Positive", "Negative")) BoxplotFig <- ggplot(EventValenceRatings, aes(x=Valence, y=Strength)) + theme_bw() BoxplotFig <- BoxplotFig + geom_boxplot(aes(color = Valence, fill = Valence), outlier.size = 2.5, alpha = 0.5) BoxplotFig <- BoxplotFig + labs(x="Event Valence", y="Event Valence Ratings (-50 to 50)") + theme(title= element_text(size=14, face='bold'), axis.title.x= element_text(size=14), axis.text.x= element_text(face="bold", size=14), axis.title.y = element_text(size=14), axis.text.y= element_text(face="bold", size=14), strip.text.x= element_text(size = 14, face='bold'), strip.text.y= element_text(size = 14, face='bold'))
Visualization
https://osf.io/2uf8j/
Negativity R.R
223
Dotplot Valence Figure (Winsorized & ReverseCoded)
EventValenceRatings_MEANFig <- ggplot(data=EventValenceRatings_ByParticipant_MEAN, aes(x=Valence, y=Strength_ReverseCoded)) EventValenceRatings_MEANFig <- EventValenceRatings_MEANFig + stat_summary(fun=mean, geom="bar", position ="dodge", alpha=0.5, aes(fill=Valence)) EventValenceRatings_MEANFig <- EventValenceRatings_MEANFig + theme_bw() EventValenceRatings_MEANFig <- EventValenceRatings_MEANFig + geom_violin(alpha = 0.5, color='grey50')
Visualization
https://osf.io/2uf8j/
Negativity R.R
224
Mixed Effects Model with EventPosition as a Covariate (Linear & Quadratic) Random Effects Structure simplified to converge: ChainPosition removed from ChainID SocialContext removed from Event
Analysis1GLMER <- glmer (Present~ SocialContext.C*Valence.C*ChainPosition.C + poly(EventPosition.C, 2) + (1 + Valence.C | ChainID) + (1 | Event), data=Analysis1, family=binomial) summary(Analysis1GLMER) Analysis2GLMER <- glmer (Present~ SocialContext.C*Valence.C*ChainPosition.C + poly(EventPosition.C, 2) + (1 + Valence.C | ChainID) + (1 | Event), data=Analysis2, family=binomial) summary(Analysis2GLMER)
Statistical Modeling
https://osf.io/2uf8j/
Negativity R.R
225
Mixed Effects Model with EventPosition as a Covariate (Linear & Quadratic) Random Effects Structure simplified to converge: Valence & ChainPosition removed from ChainID SocialContext removed from Event
Analysis3GLMER <- glmer (Present~ SocialContext.C*Valence.C*ChainPosition.C + poly(EventPosition.C, 2) + (1 | ChainID) + (1 | Event), data=Analysis3, family=binomial) summary(Analysis3GLMER)
Statistical Modeling
https://osf.io/2uf8j/
Negativity R.R
226
VISUALISE BIAS (POSITIVE, NEGATIVE) IN EACH SocialContext Substract Mean Positive From Mean Negative Survival Score for each SocialContext Prediction is that Perference for Negativity Decreases first with Communicative Intent and then further with Social Interaction Calculate Means for Positive & Negative Information Calculate Mean Positive Mean Negative for Each Chain Plot Filter
Bias <- filter(NegativityData, Valence %in% c("Positive", "Negative"))
Visualization
https://osf.io/2uf8j/
Negativity R.R
227
Create a Vector of colours based on each Participant's Mean Bias Score (Positive Negative) Use this to Color the dot points by Value
wideBiasMEAN <- mutate(wideBiasMEAN, ColourFig = ifelse(Bias >1, "#006A40FF", ifelse(Bias <0, "#F08892FF", "#95828DFF"))) NegativityBiasFig <- ggplot(data=wideBiasMEAN, aes(x=SocialContext, y=Bias)) NegativityBiasFig <- NegativityBiasFig + stat_summary(fun.y=mean, geom="bar", position ="dodge", alpha=0.5, fill="red") NegativityBiasFig <- NegativityBiasFig + theme_bw() NegativityBiasFig <- NegativityBiasFig + geom_violin(alpha = 0.5, color='grey50')
Visualization
https://osf.io/2uf8j/
Negativity R.R
228
Geom_dotplot with points coloured by Value
NegativityBiasFig <- NegativityBiasFig + geom_dotplot(aes(x=SocialContext, y=Bias, fill=ColourFig), binaxis='y', stackdir='center', dotsize=0.9, alpha = 1, stroke=0.75, colour='grey15') + scale_fill_identity()
Visualization
https://osf.io/2uf8j/
Negativity R.R
229
Add Violin Geom to give indication or data normality
ResolutionCollapsedFig <- ResolutionCollapsedFig + geom_violin(alpha = 0.0, colour='grey50') ResolutionALLPosNegFig <- ResolutionALLPosNegFig + geom_violin(alpha = 0.5, color='grey50')
Visualization
https://osf.io/2uf8j/
Negativity R.R
230
Run IRT model: 3dim model for math, read and scie, no weights Possibility to set seeds and iteration number in the function parameter
mod <- run.irt(pa12_resp_s, items = item.dif)
Statistical Modeling
https://osf.io/8fzns/
3_IRT.R
231
function to create tick marks on a logscale
log10Tck <- function(side, type){ lim <- switch(side, x = par('usr')[1:2], y = par('usr')[3:4], stop('side argument must be "x" or "y"')) at <- floor(lim[1]) : ceiling(lim[2]) at <- 0:8 return(switch(type, minor = outer(1:9, 10^(min(at):max(at)))[,1:8], major = 10^at, stop('type argument must be "major" or "minor"') )) } if(saveFigures) cairo_ps(file = '../R_Output/Images/OriginalBFScatter.eps', onefile = TRUE, fallback_resolution = 600, width = 7.3, height = 4.16)
Visualization
https://osf.io/x72cy/
ConfirmatoryAnalyses.R
232
Plot: original Bayes factor and replication effect size
plot(original.bf, replication.effectsizes, xlim = c(1, 10^8), ylim = c(-0.2, 1), axes = FALSE, log = 'x', cex = 2, cex.lab = 1.6, pch = 21, bg = c('grey36', 'grey')[replication.outcomes], xlab = 'Bayes Factor Original Study', ylab = 'Replication Effect Size (r)')
Visualization
https://osf.io/x72cy/
ConfirmatoryAnalyses.R
233
take the xaxis indices and add a jitter, proportional to the N in each level
myjitter <- jitter(rep(i, length(thisvalues)), amount=levelProportions[i]/6) points(myjitter, thisvalues, pch=1, col=rgb(0,0,0,.9), cex = 1.5, lwd = 1.5) } graphics.off() rm(myjitter, thislevel, thisvalues, mylevels, levelProportions, nsubjects, subjects)
Data Variable
https://osf.io/x72cy/
ConfirmatoryAnalyses.R
234
Multiple Regression Analysis
multreg.second(Speaking ~ Vocabulary+Grammar+Writing+Reading, corr=correl, n=100) multreg.second(Y~ X1+X2+X3, corr=correl, n=100) multreg.second(Score~ Wordcount+CLI+Commas+Stopwords+Linking+WordsSentence, corr=correl, n=200) multreg(Score~ Wordcount+CLI+Commas+Stopwords+Linking+WordsSentence, data=dat1) lm.out <- lm(Score ~., dat1)
Statistical Modeling
https://osf.io/uxdwh/
code.R
235
Dominance Analysis (using dominanceanalysis package)
lm.cov <- lmWithCov(Score ~ Wordcount+CLI+Commas+Stopwords+Linking+WordsSentence, correl) da <- dominanceAnalysis(lm.cov) print(da)
Statistical Modeling
https://osf.io/uxdwh/
code.R
236
FUnctions for Cleaning Function to generate a table with all observations within a variable and its corresponding count
check_observation <- function (df, column) { require (tidyverse) check <- df %>% group_by (as.vector(unlist(df[, column]))) %>% count () colnames(check)[1] <- column return (check) } recur.collapse <- function(df, n = ncol(df)) { #collapse function combine <- function(x,y){ if(!is.na(x)){ return(x) } else if (!is.na(y)) { return(y) } else { return(NA) } } if(n==1){ return(df[,1]) } else { return(mapply(combine, df[,n], recur.collapse(df, n-1))) } } multi_spread <- function(df, id, key, var){ require(gtools) require(tidyverse) df.list <- lapply(var, function(x){ df.temp <- spread(df[, c(id, key, x)], key, x) # spread 1 variable cname <- mixedsort(unique(as.vector(unlist(df[,key])))) # sort colnames in alphanumeric order df.temp <- df.temp[, c(id, cname)] # rename column names colnames(df.temp)[2:ncol(df.temp)] <- paste(x, colnames(df.temp[,cname]), sep= "_") #add key to column names return(df.temp) }) df.output <- df.list[[1]] # initialise for(i in 1:(length(var)-1)){ # combine matrices together df.output <- full_join(df.output, df.list[[i+1]]) } return(as.data.frame(df.output)) }
Data Variable
https://osf.io/6qej7/
ECoach Functions.R
237
Extract posteriors of effects from Bayesian ANOVA
bestmod <- modBF[1] chains <- posterior(bestmod, iterations=50000, columnFilter="^Subject$") colnames(chains)
Statistical Modeling
https://osf.io/8abj4/
Exp2.R
238
STATISTICAL TEST: Nonparametric Spearman's correlation nonnormalized conditional entropy
cor.test(results$DATE, results$CEDenominationsMotifs, method = "spearman") rdates <- rev(dates) #dates = centuries BCE plot(results$DATE, results$CEDenominationsMotifs, xlim = c(6,4), xaxt='n', xlab = "Century BCE", ylab = "H(D|d)", main = "P2: Conditional entropy of denominantions given designs") axis(1, at = rdates, labels = rdates)
Statistical Test
https://osf.io/uckzx/
P2_analysis_oldbins.R
239
____________________________________________________________ Plots with H(D|d) nonnormalized conditional entropy Getting statistics: mean, meadian, standard deviation among authorities per period for nonnormalized conditional entropy of denomination given designs
N <- aggregate(CEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = length) MEAN <- aggregate(CEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = mean) MEDIAN <- aggregate(CEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = median) SD <- aggregate(CEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = sd) resultspoleis_summary <- cbind.data.frame(N, MEAN$CEDenominationsMotifs, MEDIAN$CEDenominationsMotifs, SD$CEDenominationsMotifs) colnames(resultspoleis_summary) <- c("DATE","N","MEAN","MEDIAN","SD") resultspoleis_summary$SE <- resultspoleis_summary$SD / sqrt(resultspoleis_summary$N)
Visualization
https://osf.io/uckzx/
P2_analysis_oldbins.R
240
Getting statistics: mean, meadian, standard deviation among authorities per period for normalized conditional entropy of denomination given designs
N <- aggregate(NormCEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = length) MEAN <- aggregate(NormCEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = mean) MEDIAN <- aggregate(NormCEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = median) SD <- aggregate(NormCEDenominationsMotifs ~ DATE, data = resultspoleis, FUN = sd) Nresultspoleis_summary <- cbind.data.frame(N, MEAN$NormCEDenominationsMotifs, MEDIAN$NormCEDenominationsMotifs, SD$NormCEDenominationsMotifs) colnames(Nresultspoleis_summary) <- c("DATE","N","MEAN","MEDIAN","SD") Nresultspoleis_summary$SE <- Nresultspoleis_summary$SD / sqrt(Nresultspoleis_summary$N)
Statistical Modeling
https://osf.io/uckzx/
P2_analysis_oldbins.R
241
aggregate data, zstandardize all predictor variables for the model
recogfreq <- aggregate(cbind(correct, intrusion, new) ~ id+setsize+rsizeList+rsizeNPL, data=recogdat, FUN=sum) recogfreq$zsetsize <- (recogfreq$setsize - mean(recogfreq$setsize))/sd(recogfreq$setsize) recogfreq$zrsizeList <- (recogfreq$rsizeList - mean(recogfreq$rsizeList))/sd(recogfreq$rsizeList) recogfreq$zrsizeNPL <- (recogfreq$rsizeNPL - mean(recogfreq$rsizeNPL))/sd(recogfreq$rsizeNPL) for (sc in 1:length(scales)) { prior <- paste0("cauchy(0, ", as.character(scales[sc]), ")") fixefPrior <- c(set_prior(prior, class="b")) ranefPrior <- set_prior("gamma(1,0.04)", class="sd")
Data Variable
https://osf.io/qy5sd/
PairsBindingRSS_Stats.R
242
Cronbach's alpha (ingroup)
iden.ing.woOutliers.a <- c("ingroup.value", "ingroup.like", "ingroup.connected") iden.ing.woOutliers <- data.all[iden.ing.woOutliers.a] cronbach(iden.ing.woOutliers)
Statistical Test
https://osf.io/9tnmv/
Exp1a_Election_post.R
243
min and max age, mean and sd age, percentage of men and women
minAge = min(df_preQ$age) maxAge = max(df_preQ$age) meanAge = mean(df_preQ$age) sdAge = sd(df_preQ$age) females = length(which(df_preQ$gender == "female")) males = length(which(df_preQ$gender == "male")) other = length(which(df_preQ$gender == "other"))
Data Variable
https://osf.io/xh36s/
quest_analyses.R
244
Check normality with QQ plot and ShapiroWilk test Build the linear model
model <- lm(FW ~ condition, data = df_postQ)
Statistical Test
https://osf.io/xh36s/
quest_analyses.R
245
Question 4: pearson correlation between the different subscales
df_cor <- data.frame(df_postQ$FW, df_postQ$DU, df_postQ$DET) colnames(df_cor) <- c('FW', 'DU', 'DET') res_cor <- rcorr(as.matrix(df_cor))
Statistical Test
https://osf.io/xh36s/
quest_analyses.R
246
Visualize correlations Insignificant correlations are leaved blank
corrplot(res_cor$r, method = 'number', type="upper", order="hclust", p.mat = res_cor$P, sig.level = 0.05, insig = "blank")
Visualization
https://osf.io/xh36s/
quest_analyses.R
247
Stepwise regression model
step.model <- stepAIC(full.model, direction = "both", trace = FALSE) summary(step.model) step.model <- stepAIC(full.model, direction = "both", trace = FALSE) summary(step.model) step.model <- stepAIC(full.model, direction = "both", trace = FALSE) summary(step.model)
Statistical Modeling
https://osf.io/xh36s/
quest_analyses.R
248
Subset the data set to use base2 instead of base1 for the individuals that have the 2nd baseline Extract values of frogs that have two baselines
base2_frogs <-filter(hormones, condition == "base_02")
Data Variable
https://osf.io/3bpn6/
os_testosterone_analysis.R
249
LMER testosterone by sex and condition with interaction
m1_osT <- lmer(log(t_conc_corr) ~ sex*condition + (1|id), data = hormones)
Statistical Modeling
https://osf.io/3bpn6/
os_testosterone_analysis.R
250
Sex difference in Tlevel Model plot wit sjPlot
plot_model(m1_osT_1, title = "", axis.title = "Fixed effect estimates", axis.labels = c("Time point\n (back home)", "Sex \n(male)"), dot.size = 5, line.size = 2, transform = NULL, sort.est = TRUE, colors = "gs", vline.color = "darkgrey")+ ylim(-0.5, 1) + theme_sjplot2(24)
Visualization
https://osf.io/3bpn6/
os_testosterone_analysis.R
251
now the random slopes part of the model including random intercepts and slopes and their correlation:
pot.terms.with.corr=paste(c(xnames, fe.me[modes[fe.me]!="factor"]), collapse="+")#get all fixed effects terms together... pot.terms.with.corr=paste(paste("(1+", pot.terms.with.corr, "|", re, ")", sep=""), collapse="+")#... and paste random effects, brackets and all that pot.terms.with.corr=paste(c(xnames, fe.me[modes[fe.me]!="factor"]), collapse="+")#get all fixed effects terms together... pot.terms.with.corr=paste(paste("(1+", pot.terms.with.corr, "|", re, ")", sep=""), collapse="+")#... and paste random effects, brackets and all that
Statistical Modeling
https://osf.io/vjeb3/
diagnostic_fcns.r
252
Supplemental Material: To check whether the participants followed gaze at all, we tested the gaze following score against chance level by running a one sample test against zero. Children followed gaze in the additional gazefollowing task (M 3;; SD 1.66;; t(29) 9.89, p .00, d 1.8). Correlation GF & Difference Score
Data$DiffSocCon <- as.numeric(Data$DiffSocCon) cor.test(Data$DS, Data$DiffSocCon, method = "pearson") #only younger
Statistical Test
https://osf.io/4a9b6/
EXPLORATORY_Gaze Following_ANALYSES.R
253
use network data only for students in classrooms with at least 50% response rate
dat$nwinclude <- 0 dat[which(dat$prop_part > .50 & !is.na(dat$prop_part)),"nwinclude"] <- 1 length(unique(dat[which(dat$nwinclude == 1), "IDTESTGROUP_FDZ"])) # 1708 classes with at least 50% response rate length(unique(dat[which(dat$nwinclude == 0), "IDTESTGROUP_FDZ"])) # 290 classes excluded for network related analyses dat[which(dat$nwinclude == 0), c(nw_covariates, grep("deg", names(dat), value = T))] <- NA #recode sociometric variables to NA for classes with low level of completeness length(unique(dat[which(dat$nwinclude == 0), "IDSTUD_FDZ"])) # 5450 students without sociometric data length(unique(dat[which(dat$nwinclude == 1), "IDSTUD_FDZ"])) # 36,920 students with sociometric data
Data Variable
https://osf.io/hu2n8/
01_get_gads.R
254
Reorder factor levels: (this will be useful once we create the plot legend)
dataset$trial_condition <- factor(dataset$trial_condition, levels = c("diff_3sg", "diff_3pl", "same_3sg", "same_3pl")) dataset$trial_condition_new <- factor(dataset$trial_condition_new, levels = c("different number", "same number"))
Visualization
https://osf.io/37rfb/
prediction_plots.R
255
Prepare dataframe for eyetracking analysis & graphs (package eyetrackingR required)
eyetrackingr.data <- make_eyetrackingr_data(dataset, participant_column = "participant_number", trial_column = "item_nr", time_column = "time_ms_absolute", trackloss_column = "trackloss", aoi_columns = c("average_target_sample_count_proportion", "average_distractor_sample_count_proportion"), treat_non_aoi_looks_as_missing = TRUE )
Visualization
https://osf.io/37rfb/
prediction_plots.R
256
Remove trackloss per trial > 50% (removed 4 trials)
eyetrackingr.data <- clean_by_trackloss(data = eyetrackingr.data, trial_prop_thresh = .5)
Data Variable
https://osf.io/37rfb/
prediction_plots.R
257
check histogram for binning
hist(quop_use$t1_eff) quop_use$bins<-cut(quop_use$t1_eff,quantile(quop_use$t1_eff,p=seq(0,1,length=29),na.rm=T))
Visualization
https://osf.io/vphyt/
Sentence_Level.R
258
visualize with forest plot compare corona vs. precorona
forest(lvd$est[1:16],sei=lvd$se[1:16], ilab = cbind(c("Pre-pandemic Cohorts vs. First Pandemic Cohort",rep("",7),"Pre-pandemic Cohorts vs. Second Pandemic Cohort",rep("",7)), paste0("T",rep(1:8,2)), c("Schools Open","???","???","???","Schools Closed","???","???","Alternating Lessons", "Schools Open","???","Alternating Lessons","???","???","???","???","Schools Open")), ilab.xpos = c(-1.35,-.95,-.65), ylim = c(0,22), xlim = c(-1.7,1), slab = NA, rows = c(18:11, 8:1), at = c(-.7,-.35,0,.35,.7), xlab = "Latent Variance Differences", col=c("gray","gray", "gray","gray", "black", "black", "black", "black", "gray", "gray", "black", "black", "black", "black", "black", "gray")) forest(lmd$est[1:16],sei=lmd$se[1:16], ilab = cbind(c("Pre-pandemic Cohorts vs. First Pandemic Cohort",rep("",7),"Pre-pandemic Cohorts vs. Second Pandemic Cohort",rep("",7)), paste0("T",rep(1:8,2)), c("Schools Open","???","???","???","Schools Closed","???","???","Alternating Lessons", "Schools Open","???","Alternating Lessons","???","???","???","???","Schools Open")), ilab.xpos = c(-1.35,-.95,-.65), ylim = c(0,22), xlim = c(-1.7,1), slab = NA, rows = c(18:11, 8:1), at = c(-.7,-.35,0,.35,.7), xlab = "Standardized Latent Mean Differences", col=c("gray","gray", "gray","gray", "black", "black", "black", "black", "gray", "gray", "black", "black", "black", "black", "black", "gray"))
Visualization
https://osf.io/vphyt/
Sentence_Level.R
259
to see how well the distances are measured
stressplot(species_matrix_short.nmds) #R2 = 0.961, linear R2 = 0.891 data.scores <- as.data.frame(scores(species_matrix_short.nmds$points)) data.scores$site <- substr(rownames(data.scores),1,6) data.scores$plots <- substr(rownames(data.scores),8,15) species.scores <- as.data.frame(scores(species_matrix_short.nmds, "species")) species.scores$species <- rownames(species.scores)
Statistical Test
https://osf.io/uq3cv/
5_NMDS_comparing_dawn_and_morning_assemblage_compositions.R
260
now statistical test to see if communities are statistically different from one another
species_matrix2 <- species_matrix %>% mutate_if(is.numeric, ~1 * (. > 0)) sp_matrix_with_group <- species_matrix2 rownames(sp_matrix_with_group) <- rownames(species_matrix) sp_matrix_with_group$grouping <- substr(rownames(sp_matrix_with_group), 7,13) ano <- anosim(species_matrix2, distance = 'euclidean', grouping = sp_matrix_with_group$grouping) summary(ano)
Statistical Test
https://osf.io/uq3cv/
5_NMDS_comparing_dawn_and_morning_assemblage_compositions.R
261
SEM Analysis Section The following section sets up a series of path models. Note that this is a highly revised version of the SEM section, which originally tried to incorporate a complex latent variable model to simultaneously test all hypotheses. I decided to break it down into smaller, simpler models, and to focused on observedvariableonly models using the composite meausres instead. I'm not sure if that's the "best" approach or not, but I think it at least makes for a simpler approach. The code below is thus a mixture of what I originally wrote and the revised models. Let's check differences between the two groups:
t.test(Measures$Age~Measures$Normative) t.test(Measures$Adj_Income~Measures$Normative) t.test(Measures$Parent_Edu~Measures$Normative) t.test(Measures$Political~Measures$Normative) t.test(Measures$Crit_Reflection_Mean~Measures$Normative) t.test(Measures$Efficacy_Mean~Measures$Normative) t.test(Measures$Crit_Action_Mean~Measures$Normative) t.test(Measures$BLM_Activism~Measures$Normative) t.test(Measures$Militarism_Mean~Measures$Normative) t.test(Measures$EDNhComp~Measures$Normative) t.test(Measures$ACEs_Sum~Measures$Normative) t.test(Measures$MEQ_Total~Measures$Normative) t.test(Measures$Discrim_Major_Sum~Measures$Normative) t.test(Measures$Discrim_Everyday_Mean~Measures$Normative) t.test(Measures$BNSS_Mean~Measures$Normative) t.test(Measures$BNSSh_Mean~Measures$Normative) t.test(Measures$Zero_Sum_Mean~Measures$Normative) sjmisc::frq(Measures$Race, out = "v") Measures %>% group_by(Race) %>% summarise(BLM_Mean = mean(BLM_Activism)) data <- Measures %>% filter(Race %in% c("Black or African-American","White or Caucasian") & !is.na(BLM_Activism)) %>% mutate(Black = ifelse(Race == "Black or African-American", 1,0))
Statistical Modeling
https://osf.io/xhrw6/
4_sem_models.R
262
To assess BLM Activism by race, keep in mind that the BLM measure is count data (with a lot of zeros) A ttest therefore is not appropriate. We will use "zeroinflated Poisson regression"
ggplot(Measures, aes(BLM_Activism)) + geom_histogram() model.zi = zeroinfl(BLM_Activism ~ Black, data = data, dist = "poisson") summary(model.zi) Descriptives3 <- Measures %>% select(Normative,Age,Adj_Income,Parent_Edu,Political,ACEs_Sum,EDNhComp,Discrim_Major_Sum,Discrim_Everyday_Mean,MEXQ_Exp, Crit_Reflection_Mean,Efficacy_Mean,Crit_Action_Mean,BLM_Activism, Militarism_Mean) %>% psych::describeBy(group="Normative") %>% as.data.frame() %>% rownames_to_column(var="Measure") %>% select(-vars) DescrNonNorm<- as.data.frame(Descriptives3[1]) %>% rownames_to_column(var="Measure") DescrNorm<- as.data.frame(Descriptives3[2]) %>% rownames_to_column(var="Measure")
Statistical Modeling
https://osf.io/xhrw6/
4_sem_models.R
263
Write cleaned covariate files to xlsx
cleaned_group_covariates_data <- list( "cleaned_group_participant" = cleaned_group_participant_dat, "cleaned_group_instructional" = cleaned_group_instructional_dat, "cleaned_group_DV" = cleaned_group_DV_dat, "cleaned_group_race_ethnicity" = race_eth ) write_xlsx(cleaned_group_covariates_data, path = "Cleaned data/Group covariates data tables.xlsx")
Data Variable
https://osf.io/b5ydr/
3-Cleancovariatesforgroupdesigns.R
264
Weighted regression model for Ordinary People subscale score (mean)
m1.ppl.svyglm.fit <- svyglm(scipopppl ~ age + gender + education.comp + education.uni + sciprox.score + urbanity.log + languageregion.ger + languageregion.ita, design = bar.design.scipopppl, family = gaussian, na.action = na.omit) m2.ppl.svyglm.fit <- svyglm(scipopppl ~ age + gender + education.comp + education.uni + sciprox.score + urbanity.log + languageregion.ger + languageregion.ita + polorientation + religiosity, design = bar.design.scipopppl, family = gaussian, na.action = na.omit) m3.ppl.svyglm.fit <- svyglm(scipopppl ~ age + gender + education.comp + education.uni + sciprox.score + urbanity.log + languageregion.ger + languageregion.ita + polorientation + religiosity + interestscience + sciliteracy + trustscience + trustscientists, design = bar.design.scipopppl, family = gaussian, na.action = na.omit)
Statistical Modeling
https://osf.io/qj4xr/
03_explaining-scipop-attitudes.R
265
Model assumption checks Do some assumption checks for multiple linear regression (e.g., see Field, 2012, p. 292). These are: (1) Multicollinearity (2) Nonnormality/heteroscedasticity of residuals Assumption checks (1): Multicollinearity Specify model and inspect GVIFs Note: The VIF (usual collinearity diagnostic) may not be applicable to models with dummy regressors constructed from a polytomous categorical variable or polynomial regressors (Fox, 2016: 357). Fox and Monette (1992) introduced generalized variance inflation factor (GVIF) for these cases. As education.uni/education.comp and languageregion.ger/languageregion.ita are such dummy regressors based on polytomous categorial variables (i.e. education and languageregion), we should compute GVIFs. car::vif() does that automatically. GVIFs of SciPop Score models
car::vif(m1.scipopgoertz.svyglm.fit) %>% as.data.frame %>% rename(GVIF = deparse(substitute(.))) car::vif(m2.scipopgoertz.svyglm.fit) %>% as.data.frame %>% rename(GVIF = deparse(substitute(.))) car::vif(m3.scipopgoertz.svyglm.fit) %>% as.data.frame %>% rename(GVIF = deparse(substitute(.)))
Statistical Test
https://osf.io/qj4xr/
03_explaining-scipop-attitudes.R
266
Nonnormality/heteroscedasticity of residuals of ppl Score models
distributionchecks(m1.ppl.svyglm.fit) distributionchecks(m2.ppl.svyglm.fit) distributionchecks(m3.ppl.svyglm.fit)
Statistical Modeling
https://osf.io/qj4xr/
03_explaining-scipop-attitudes.R
267
ADDITIONAL ANALYSIS B: TESTING A USHAPED RELATIONSHIP BETWEEN SCIPOP AND POLITICAL ORIENTATION 1st test: Quadratic regression on weighted data
bar2019pop$polorientation2 <- bar2019pop$polorientation^2 bar.design <- svydesign(id = ~0, data = bar2019pop, weights = ~weight) quad.scipopgoertz.svyglm.fit <- svyglm(scipopgoertz ~ age + gender + education.comp + education.uni + sciprox.score + urbanity.log + languageregion.ger + languageregion.ita + polorientation + polorientation2 + religiosity + interestscience + sciliteracy + trustscience + trustscientists, design = bar.design, family = gaussian, na.action = na.omit) summ(model = quad.scipopgoertz.svyglm.fit, binary.inputs = "full", n.sd = 2, transform.response = F, confint = F, vifs = F, scale = F, model.info = T, model.fit = T, digits = 4) quad.scipopgoertz.svyglm.fit$aic m0.scipopgoertz.svyglm.fit <- svyglm(scipopgoertz ~ 1, bar.design, family = gaussian, na.action = na.omit) anova(m0.scipopgoertz.svyglm.fit, quad.scipopgoertz.svyglm.fit, test = "F", method = "Wald")
Statistical Test
https://osf.io/qj4xr/
03_explaining-scipop-attitudes.R
268
chisquare to compare vegan in experienced vs. not experienced for each advocacy type graphic video
data %>% xtabs(~ graphic_exp_buc + vegn_bin, .) %>% print() %>% proportions("graphic_exp_buc") graphic_tmp <- chisq.test(xtabs(~ graphic_exp_buc + vegn_bin, data))
Statistical Test
https://osf.io/3aryn/
5chisquare_Spanish.R
269
Boosted Regression Trees CUS BRT plot
CUS.BRT.plot <- ggplot(data = CUS.BRT, aes(x = predictors, y = influence, fill = predictor.type)) + geom_bar(stat = "identity") + geom_hline(yintercept = 5, linetype = 3, size = 1, colour = "gray60") + scale_fill_manual(values = viridis.3) + coord_flip() + labs(x = "Predictor", y = "Relative Influence") + scale_y_continuous(breaks = c(0, 10, 20, 30, 40), limits = c(0, 40)) + scale_x_discrete(limits = BRT.plot.label.limits, labels = BRT.plot.labels) + theme_pubr() + theme(legend.position = c(0.85, 0.825), legend.title = element_blank())
Visualization
https://osf.io/62je8/
DMS-NRSA-CA-QC-Figures.R
270
Sets limits of the plot based on user choice
limits <- matrix(0,2,2) limits <- if (lim == 1){ limits <-u.plot.limit(min.age,max.age,lim,min.ylim,max.ylim) } else if (lim == 2){ limits <-u.plot.limit(min.age,max.age,lim,min.ylim,max.ylim) } else { limits <-u.plot.limit(min.xlim,max.xlim,lim,min.ylim,max.ylim) } limconc <-uconcplot(limits[1,1],limits[1,2],int) #sets the concordia line based on plot limts plot_limconc <- limconc[2:(nrow(limconc)-1),]
Visualization
https://osf.io/p46mb/
U-PbGeochronologyScripts.R
271
BELOW CALCULATES THE ELLIPSES calculate x/y coords that define ellipse based in input data including rho
ell.coords=list() #sets up list to write results to for(n in 1:n){ covmat <-cor2cov2((Pb7Ue[n]/2),(Pb6Ue[n]/2),rho[n]) nn <- 75 cutoff <- stats::qchisq(1-0.05,2) e <- eigen(covmat) a <- sqrt(cutoff*abs(e$values[1])) # major axis b <- sqrt(cutoff*abs(e$values[2])) # minor axis v <- e$vectors[,1] beta <- atan(v[2]/v[1]) theta <- seq(0, 2 * pi, length=nn) out <- matrix(0,nrow=nn,ncol=3) out[,1] <- Pb7U[n] + a * cos(theta) * cos(beta) - b * sin(theta) * sin(beta) out[,2] <- Pb6U[n] + a * cos(theta) * sin(beta) + b * sin(theta) * cos(beta) out[,3] <- n
Visualization
https://osf.io/p46mb/
U-PbGeochronologyScripts.R
272
Functions for plotting models Extracts significance from models for chart legend
extractSign <- function (model, modeltype, MtnRanges, InclMIA = TRUE) { coef <- summary(model)$coefficients$cond SignDF <- data.frame(mtnrng = MtnRanges, main = NA, int_ao = NA, int_po = NA) if (InclMIA) { if (modeltype == "int") { if (coef[row.names(coef) == "AO:MtnRange1", 4] < 0.05) {SignDF$int_ao[SignDF$mtnrng == "Coast - North"] <- "I"} if (coef[row.names(coef) == "AO:MtnRange2", 4] < 0.05) {SignDF$int_ao[SignDF$mtnrng == "Coast - South"] <- "I"} if (coef[row.names(coef) == "AO:MtnRange3", 4] < 0.05) {SignDF$int_ao[SignDF$mtnrng == "Columbia - North"] <- "I"} if (coef[row.names(coef) == "AO:MtnRange4", 4] < 0.05) {SignDF$int_ao[SignDF$mtnrng == "Columbia - South"] <- "I"} if (coef[row.names(coef) == "AO:MtnRange5", 4] < 0.05) {SignDF$int_ao[SignDF$mtnrng == "Rockies - North"] <- "I"} if (coef[row.names(coef) == "MtnRange1:PO", 4] < 0.05) {SignDF$int_po[SignDF$mtnrng == "Coast - North"] <- "I"} if (coef[row.names(coef) == "MtnRange2:PO", 4] < 0.05) {SignDF$int_po[SignDF$mtnrng == "Coast - South"] <- "I"} if (coef[row.names(coef) == "MtnRange3:PO", 4] < 0.05) {SignDF$int_po[SignDF$mtnrng == "Columbia - North"] <- "I"} if (coef[row.names(coef) == "MtnRange4:PO", 4] < 0.05) {SignDF$int_po[SignDF$mtnrng == "Columbia - South"] <- "I"} if (coef[row.names(coef) == "MtnRange5:PO", 4] < 0.05) {SignDF$int_po[SignDF$mtnrng == "Rockies - North"] <- "I"} } } SignAOMain <- paste("AO: ", format(round(coef[row.names(coef) == "AO", 1], 3), nsmall=3), " (", format(round(coef[row.names(coef) == "AO", 4], 3), nsmall=3),")") SignPOMain <- paste("PO: ", format(round(coef[row.names(coef) == "PO", 1], 3), nsmall=3), " (", format(round(coef[row.names(coef) == "PO", 4], 3), nsmall=3),")") return(list(SignAOMain = SignAOMain, SignPOMain = SignPOMain, SignDF = SignDF)) }
Visualization
https://osf.io/7xsfj/
Fig05To08.R
273
outlier coding M and SD per subject and item depending on distractor condition and soa (independent variables)
subj.M.SD = ddply(rawdat,.(subj,cond,soa), summarize, subj.M=mean(RT, na.rm=T), subj.SD=sd(RT, na.rm=T)) tar.M.SD = ddply(rawdat,.(targ_ID,cond,soa), summarize, tar.M=mean(RT, na.rm=T), tar.SD=sd(RT, na.rm=T)) rawdat = merge(rawdat, subj.M.SD, by=c("subj", "cond", "soa")) rawdat = merge(rawdat, tar.M.SD, by=c("targ_ID", "cond", "soa")) rawdat$subj.min = (rawdat$subj.M - 2*(rawdat$subj.SD)) rawdat$subj.max = (rawdat$subj.M + 2*(rawdat$subj.SD)) rawdat$tar.min = (rawdat$tar.M - 2*(rawdat$tar.SD)) rawdat$tar.max = (rawdat$tar.M + 2*(rawdat$tar.SD))
Data Variable
https://osf.io/c93vs/
exp01_prep.R
274
calculate the value of the closest peer estimate and store in 'closest'
closest<-min(abs(si-firstEstimate))
Data Variable
https://osf.io/rmcuy/
Fig.
275
define Gaussians for updating prior self (firstEstimate)
density_self <- log(dnorm(x, firstEstimate, own_sd ))
Statistical Modeling
https://osf.io/rmcuy/
Fig.
276
1.1. Sample 1 PTSD Symptoms (PSSI) Run linear mixed effect model
a1_model_01_sg_pssi_outcome <- lme(pssi_end ~ pssi_s0 + sg, random = ~ 1 | id, method = "ML", na.action = na.omit, data = data_a1_pssi)
Statistical Modeling
https://osf.io/dgt8x/
04-revisions.R
277
Check asssumption of normality of the residuals
qqnorm(resid(a1_model_01_sg_pssi_outcome)) qqnorm(resid(a2_model_01_sg_pssi_outcome))
Statistical Modeling
https://osf.io/dgt8x/
04-revisions.R
278
Calculate contrasts specified in in the contrast matrix "contrast_outcome"
a1_model_01_sg_pssi_outcome_contrasts <- glht(a1_model_01_sg_pssi_outcome, contrast_outcome) a2_model_01_sg_pssi_outcome_contrasts <- glht(a2_model_01_sg_pssi_outcome, contrast_outcome)
Statistical Modeling
https://osf.io/dgt8x/
04-revisions.R
279
Show contrasts without adjusting pvalues for multiple comparisons
summary(a1_model_01_sg_pssi_outcome_contrasts, test = adjusted("none")) summary(a2_model_01_sg_pssi_outcome_contrasts, test = adjusted("none"))
Statistical Test
https://osf.io/dgt8x/
04-revisions.R
280
Select estimates, standard error and pvalue
table_a1_model_01_sg_pssi_outcome_full <- table_a1_model_01_sg_pssi_outcome_raw %>% dplyr::select(beta = estimate, se = std.error, p = p.value) table_a2_model_01_sg_pssi_outcome_full <- table_a2_model_01_sg_pssi_outcome_raw %>% dplyr::select(beta = estimate, se = std.error, p = p.value)
Data Variable
https://osf.io/dgt8x/
04-revisions.R
281
Calculate pooled standard deviation at baseline
sd_a1_pssi_s0 <- sd(data_a1_pssi$pssi_s0, na.rm = TRUE) sd_a2_pssi_s0 <- sd(data_a2_pssi$pssi_s0, na.rm = TRUE)
Statistical Modeling
https://osf.io/dgt8x/
04-revisions.R
282
Calculate Cohen's d using the absolute value of the baseline adjusted difference from the linear mixed effect model
cohens_d_a1_pssi_end <- a1_pssi_end_diff / sd_a1_pssi_s0 cohens_d_a2_pssi_end <- a2_pssi_end_diff / sd_a2_pssi_s0
Statistical Test
https://osf.io/dgt8x/
04-revisions.R
283
Peer Rank Means and Standard Deviations of Conditions Unspecified Peer Rank
round(mean(unspecified$peer.rank), 2) round(sd(unspecified$peer.rank), 2)
Data Variable
https://osf.io/9tnmv/
Exp4_buddhist_post.R
284
determines and rounds (to digits as specified in digits) mean and sd of x, and then collapses them in a single entry
xx=as.character(round(c(mean(x, na.rm=T), sd(x, na.rm=T)), digits=digits)) if(any(grepl(x=xx, pattern=".", fixed=T))>0){ xx[!grepl(x=xx, pattern=".", fixed=T)]=paste(xx[!grepl(x=xx, pattern=".", fixed=T)], "0", sep=".") xx=matrix(unlist(strsplit(as.character(xx), split=".", fixed=T)), ncol=2, byrow=T) xx[, 2]=unlist(lapply(xx[, 2], function(x){ paste(c(x, paste(c(rep("0", times=digits-nchar(x))), collapse="")), collapse="") })) xx=apply(xx, 1, paste, collapse=".") } paste(xx, collapse=sep) } c.tab<-function(x, digits=NA, n.spaces=1, add.hash=T, incl.fst=F, incl.rownames=T){
Data Variable
https://osf.io/vjeb3/
helpers.r
285
wrapper for savePlot with file "clipboard" and type "wmf" (default;; others are possible)
savePlot(file="clipboard", type=type) } overdisp.correction<-function(coeffs, disp.param){ coeffs[, "Std. Error"]=coeffs[, "Std. Error"]*sqrt(disp.param) coeffs[, "z value"]=coeffs[, "Estimate"]/coeffs[, "Std. Error"] coeffs[, "Pr(>|z|)"]=2*pnorm(q=-abs(coeffs[, "z value"]), mean=0, sd=1) return(coeffs) } merge.ests.ci.stab<-function(coeffs, ci=NULL, tests=NULL, stab=NULL){ ires=coeffs if(!is.null(ci)){ coeffs=cbind(coeffs, ci[rownames(coeffs), ]) } if(!is.null(tests)){ xx=outer(rownames(tests), rownames(coeffs), Vectorize(function(tt, e){ if(nchar(tt)>nchar(e)){ return(0) }else{
Visualization
https://osf.io/vjeb3/
helpers.r
286
Add the starttime (intra_scope_window[1]) to every start position (scope$start) ... ... of the recording timestamp to get the actual start window in milliseconds
starting_times <- df$RecordingTimestamp[scope$start] + as.numeric(intra_scope_window[1]) } if (intra_scope_window[2] != "end") {
Data Variable
https://osf.io/mp9td/
get_looks.R
287
set to current trial duration to 0
current_trial_total_duration[[hn]] <- 0 } current_trial_total_looks[[hn]] <- 0 } current_first_look_duration[[hn]] <- 0 current_first_look_ending_reason[[hn]] <- "" first_looks_collection[[hn]]$found_first <- FALSE first_looks_collection[[hn]]$forced_stop <- FALSE }
Data Variable
https://osf.io/mp9td/
get_looks.R
288
Return sum log likelihood. Include protection against log(0) problems
return(sum(log(pmax(like, 1e-10)))) } else {
Statistical Modeling
https://osf.io/tbczv/
exp1bDead-MNL-SAT-A.r
289
Plot posterior fit and forward simulation prediction
plot_fits <- function(stan_fit_ex, N_t, N_p, obs_times, pred_times, data_vector, S_0, D_0, M_0, filename) { sigma_hat <- median(stan_fit_ex$sigma) CO2_flux_ratios_hat_median <- rep(NA, N_t) #Pre-allocate vector for median model fit CO2_flux_ratios_pred_median <- rep(NA, N_p) sigma_hat <- median(stan_fit_ex$sigma) for (t in 1:N_t) { CO2_flux_ratios_hat_median[t] <- median(stan_fit_ex$CO2_flux_ratios_hat_vector[,t]) } for (t in 1:N_p) { CO2_flux_ratios_pred_median[t] <- median(stan_fit_ex$CO2_flux_ratios_new_vector[,t]) }
Visualization
https://osf.io/7mey8/
stan_CON_adriana_pools5i.r
290
fit linear mixed effects models null model
fitlmer0 <- lmer(rating ~ 1 + (1|subject), data = dat.long1, REML = FALSE) summary(fitlmer0)
Statistical Modeling
https://osf.io/eg6w5/
experiment1c_analyses.R
291
the mean of each parameter across iterations. Keep dimensions for parameters and subjects
mean.params <- t(apply(sampled$samples$alpha[,,keep],1:2,mean))
Statistical Modeling
https://osf.io/wbyj7/
pmwg-DIC.r
292
plot the calibration curve (Abs vs. [BSA]) with an appropriate trendline and figure caption
ggplot(bca, aes(x = BSA, y = Abs562)) +
Visualization
https://osf.io/9e3cu/
BCA_activity_answers.R
293
generate MPT data:
gendat <- genMPT(theta = pnorm(theta), restrictions = "model/restrictions.txt", numItems = numItems, eqnfile="model/2htsm.eqn")
Data Variable
https://osf.io/s82bw/
04_simulation_continuous_predictor.R
294
Recode confirm(oppose/confirm) and won(win/lost) to factors
DF$confirm <- recode(DF$confirm, `0` = "Confirmed", `1` = "Opposed") DF$won <- recode(DF$won, `0` = "Lost", `1` = "Won") DF <- DF %>% mutate(confirm = as_factor(confirm)) %>% mutate(treatment = as_factor(treatment)) %>% mutate(won = as_factor(won)) %>% mutate(majmin = as_factor(majmin)) %>% mutate(experiment = as_factor(experiment)) %>% mutate(evidence = as_factor(evidence)) summary(DF)
Data Variable
https://osf.io/9gjyc/
BRMS + Figure4.R
295
create unique id number for each unique individual
mutate(uniq.id = group_indices(., id, experiment)) %>% mutate(uniq.id = as_factor(uniq.id)) summary(Ind_E1) M_Ind_E1 <- brm(changed ~ won*confirm + trial + (1|uniq.id), data = Ind_E1, family = "bernoulli", iter = 6000, chains = 3, cores = 3, save_all_pars = TRUE, file = "Ind_E1") summary(M_Ind_E1) plot(M_Ind_E1, ask = FALSE) pp_check(M_Ind_E1, check = "distributions") pp_check(M_Ind_E1, check = "residuals") pp_check(M_Ind_E1, "error_scatter_avg") pp_check(M_Ind_E1, check = "scatter")
Data Variable
https://osf.io/9gjyc/
BRMS + Figure4.R
296
generates 95% confidence intervals for each beta coefficient
m.p1_decision.CI <- round(confint(m.p1_decision, parm = "beta_"), 3)
Statistical Test
https://osf.io/uygpq/
Within-paradigm.R
297
exclude people who don't meet the inclusion criteria of having data for at least 3 days in each week
person_weekly_entries <- dat2019 %>% group_by(identity_id, woy) %>% summarise(n_days = length(identity_id)) person_weekly_entries <- as.data.frame(person_weekly_entries) person_weekly_entries <- subset(person_weekly_entries, n_days >= 3)
Data Variable
https://osf.io/wyrav/
Dataprep-ParticipantResponses.R
298
construct a maximal glmer() model This model contains a fixed withinsubjects effect of Ambiguity (effectcoded with 0.5 amb), and betweensubjects fixed effects for Vocabulary and ART test scores, plus random effects by participants and items.
Acc.max <- glmer(accuracy ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CohOnly, family = "binomial", control = glmerControl(optimizer ="bobyqa"))
Statistical Modeling
https://osf.io/hn3bu/
AnalysisCode.R
299
construct a maximal lmer() model This model contains a fixed withinsubjects effect of Ambiguity (effectcoded with 0.5 amb), and betweensubjects fixed effects for Vocabulary and ART test scores, plus random effects by participants and items. Maximal model with interactions
TrialDwellTime.max <- lmer(logTrialDwellTime ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 + Vocab.Cent + ART.Cent | item), data = Data.CorrTrials, REML=FALSE)
Statistical Modeling
https://osf.io/hn3bu/
AnalysisCode.R
300
_firstfixation duration Construct a maximal lmer() model Because not all the necessary models converge, remove correlations between random effects for all models for the firstfixation duration measure:
AOIKey.FF.reduced <- lmer(logFirstFixDur ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 | RECORDING_SESSION_LABEL) + (0 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 | item) + (0 + Vocab.Cent | item) + (0 + ART.Cent | item), data = Data.CorrTrials, REML=FALSE) AOICohcue.FF.reduced <- lmer(logFirstFixDur ~ 1 + Ambiguity.code + Vocab.Cent + ART.Cent + Ambiguity.code : Vocab.Cent + Ambiguity.code : ART.Cent + (1 | RECORDING_SESSION_LABEL) + (0 + Ambiguity.code | RECORDING_SESSION_LABEL) + (1 | item) + (0 + Vocab.Cent | item) + (0 + ART.Cent | item), data = Data.CorrTrials, REML=FALSE)
Statistical Modeling
https://osf.io/hn3bu/
AnalysisCode.R