id
stringlengths 40
40
| repo_name
stringlengths 5
110
| path
stringlengths 2
233
| content
stringlengths 0
1.03M
⌀ | size
int32 0
60M
⌀ | license
stringclasses 15
values |
---|---|---|---|---|---|
2b8fd89b4749253525141e2bd149d1a3082f1af5 | cran/siatclust | tests/ewkm01.R | library(siatclust)
csv <- "ewkm01.csv"
ds <- read.csv("ewkm01.csv")
set.seed(42)
km <- ewkm(ds, 10)
print(km)
cat(km$iterations, km$restarts, km$total.iterations, "\n")
set.seed(42)
km <- ewkm(ds, 10, maxrestart=50)
print(km)
cat(km$iterations, km$restarts, km$total.iterations, "\n")
set.seed(42)
km <- ewkm(ds, 10, maxrestart=-1)
print(km)
cat(km$iterations, km$restarts, km$total.iterations, "\n")
set.seed(42)
km <- ewkm(ds, 10, maxiter=1000, maxrestart=0)
print(km)
cat(km$iterations, km$restarts, km$total.iterations, "\n")
| 536 | gpl-3.0 |
66daaa8e245e478ca922288c3afa522e72f65730 | lujiec/shiny_group_simulator | run_shiny.R | library(shiny)
library(ggplot2)
library(gridExtra)
setwd('..')
runApp('shiny_app') | 88 | mit |
ff7ab7d6a11ce5a607eb8ef31355ef7a2ccf628a | rho-devel/rho | src/extra/testr/filtered-test-suite/strsplit/tc_strsplit_10.R | expected <- eval(parse(text="list(c(\"A\", \"shell\", \"of\", \"class\", \"documentation\", \"has\", \"been\", \"written\", \"to\", \"the\", \"file\", \"'./myTst2/man/DocLink-class.Rd'.\"))"));
test(id=0, code={
argv <- eval(parse(text="list(\"A shell of class documentation has been written to the file './myTst2/man/DocLink-class.Rd'.\\n\", \"[ \\t\\n]\", FALSE, TRUE, TRUE)"));
.Internal(`strsplit`(argv[[1]], argv[[2]], argv[[3]], argv[[4]], argv[[5]]));
}, o=expected);
| 496 | gpl-2.0 |
ff7ab7d6a11ce5a607eb8ef31355ef7a2ccf628a | kmillar/cxxr | src/extra/testr/filtered-test-suite/strsplit/tc_strsplit_10.R | expected <- eval(parse(text="list(c(\"A\", \"shell\", \"of\", \"class\", \"documentation\", \"has\", \"been\", \"written\", \"to\", \"the\", \"file\", \"'./myTst2/man/DocLink-class.Rd'.\"))"));
test(id=0, code={
argv <- eval(parse(text="list(\"A shell of class documentation has been written to the file './myTst2/man/DocLink-class.Rd'.\\n\", \"[ \\t\\n]\", FALSE, TRUE, TRUE)"));
.Internal(`strsplit`(argv[[1]], argv[[2]], argv[[3]], argv[[4]], argv[[5]]));
}, o=expected);
| 496 | gpl-2.0 |
ff7ab7d6a11ce5a607eb8ef31355ef7a2ccf628a | ArunChauhan/cxxr | src/extra/testr/filtered-test-suite/strsplit/tc_strsplit_10.R | expected <- eval(parse(text="list(c(\"A\", \"shell\", \"of\", \"class\", \"documentation\", \"has\", \"been\", \"written\", \"to\", \"the\", \"file\", \"'./myTst2/man/DocLink-class.Rd'.\"))"));
test(id=0, code={
argv <- eval(parse(text="list(\"A shell of class documentation has been written to the file './myTst2/man/DocLink-class.Rd'.\\n\", \"[ \\t\\n]\", FALSE, TRUE, TRUE)"));
.Internal(`strsplit`(argv[[1]], argv[[2]], argv[[3]], argv[[4]], argv[[5]]));
}, o=expected);
| 496 | gpl-2.0 |
ff7ab7d6a11ce5a607eb8ef31355ef7a2ccf628a | cxxr-devel/cxxr | src/extra/testr/filtered-test-suite/strsplit/tc_strsplit_10.R | expected <- eval(parse(text="list(c(\"A\", \"shell\", \"of\", \"class\", \"documentation\", \"has\", \"been\", \"written\", \"to\", \"the\", \"file\", \"'./myTst2/man/DocLink-class.Rd'.\"))"));
test(id=0, code={
argv <- eval(parse(text="list(\"A shell of class documentation has been written to the file './myTst2/man/DocLink-class.Rd'.\\n\", \"[ \\t\\n]\", FALSE, TRUE, TRUE)"));
.Internal(`strsplit`(argv[[1]], argv[[2]], argv[[3]], argv[[4]], argv[[5]]));
}, o=expected);
| 496 | gpl-2.0 |
ff7ab7d6a11ce5a607eb8ef31355ef7a2ccf628a | kmillar/rho | src/extra/testr/filtered-test-suite/strsplit/tc_strsplit_10.R | expected <- eval(parse(text="list(c(\"A\", \"shell\", \"of\", \"class\", \"documentation\", \"has\", \"been\", \"written\", \"to\", \"the\", \"file\", \"'./myTst2/man/DocLink-class.Rd'.\"))"));
test(id=0, code={
argv <- eval(parse(text="list(\"A shell of class documentation has been written to the file './myTst2/man/DocLink-class.Rd'.\\n\", \"[ \\t\\n]\", FALSE, TRUE, TRUE)"));
.Internal(`strsplit`(argv[[1]], argv[[2]], argv[[3]], argv[[4]], argv[[5]]));
}, o=expected);
| 496 | gpl-2.0 |
0c514ad64a488918937febea02b70f9b4f559e2b | StanfordPsych254/replication_project | subsidiary_analysis/compute_sofer_es.R | rm(list=ls())
library(readr)
library(dplyr)
library(tidyr)
library(lsr)
sofer <- read_csv("../subsidiary_analysis/Experiment_1_Rps.csv")
d <- sofer %>%
mutate(subid = 1:n()) %>%
gather(datapoint, rating, starts_with("dft")) %>%
separate(datapoint, into = c("foo","DFT","trial_num"), sep = "_") %>%
mutate(condition = ifelse(Trust1Attrc2 == 1, "Trustworthiness", "Attractiveness"),
DFT = factor(DFT)) %>%
select(-foo, -Trust1Attrc2) %>%
arrange(subid)
# Repeated measures
mod <- aov(rating ~ DFT * condition + Error(subid / DFT), data = d)
summary(mod)
# By subjects
subs <- d %>%
group_by(condition, DFT, subid) %>%
summarise(rating = mean(rating))
mod_subs <- aov(rating ~ DFT * condition, data = subs)
summary(mod_subs)
# By "items"
items <- d %>%
group_by(condition, DFT) %>%
summarise(rating = mean(rating))
mod_items <- aov(rating ~ DFT * condition, data = items)
summary(mod_items)
# linear model by items
items$nDFT <- scale(as.numeric(as.character(items$DFT)), scale=FALSE)
items$condition_e <- factor(items$condition)
contrasts(items$condition_e) <- c(-.5,.5)
mod_lm_items <- lm(rating ~ nDFT * condition_e + I(nDFT^2) * condition_e, data=items)
anova(mod_lm_items)
summary(mod_lm_items)
# linear models by subs
items$nDFT <- scale(as.numeric(as.character(items$DFT)), scale=TRUE)
items$condition_e <- scale(ifelse(items$condition == "Trustworthiness", 1, 0), scale = F)
#contrasts(items$condition_e) <- c(-.5,.5)
mod_lm_items <- lm(scale(rating) ~ condition_e + nDFT + I(nDFT^2) + condition_e:nDFT + condition_e:I(nDFT^2) , data=items)
anova(mod_lm_items)
summary(mod_lm_items)
etaSquared(mod_lm_items)
# original t value on coefficient
mixedModel <- lmer(rating ~ condition_e + nDFT + I(nDFT^2) + condition_e:nDFT + condition_e:I(nDFT^2)
+ (1 + nDFT + I(nDFT^2)| subid),
data = d %>%
mutate(nDFT = scale(as.numeric(as.character(DFT)), scale=TRUE)) %>%
mutate(condition_e = factor(condition)))
summary(mixedModel)
| 2,052 | mit |
ff7ab7d6a11ce5a607eb8ef31355ef7a2ccf628a | krlmlr/cxxr | src/extra/testr/filtered-test-suite/strsplit/tc_strsplit_10.R | expected <- eval(parse(text="list(c(\"A\", \"shell\", \"of\", \"class\", \"documentation\", \"has\", \"been\", \"written\", \"to\", \"the\", \"file\", \"'./myTst2/man/DocLink-class.Rd'.\"))"));
test(id=0, code={
argv <- eval(parse(text="list(\"A shell of class documentation has been written to the file './myTst2/man/DocLink-class.Rd'.\\n\", \"[ \\t\\n]\", FALSE, TRUE, TRUE)"));
.Internal(`strsplit`(argv[[1]], argv[[2]], argv[[3]], argv[[4]], argv[[5]]));
}, o=expected);
| 496 | gpl-2.0 |
5f04d4c75bdc6afb327e122769448325ab0f1f86 | sorensje/R_helperfunctions | aggregate_into_DF.R | aggregate_into_DF <- function(dataDF, form, fun, newVarName, inNewDF = TRUE){
# if(!is.na(subsetExp) & ! is.na(subsetVar)){
#
# }
# print(c("\n",substitute(dataDF)," type: ", expression(substitute(dataDF)),"\n"))
# convenience function that returns original df w/ new aggregated variable (maintains long form)
# dataDF df to aggregate
# form: formula for aggregation (as character)
# fun: function to sweep over
# newVarName: label for new variable
## inNewDF: boolean indicating whether new ag var should be returned as part of original DF
# is meant to be used when you have a subset, or just don't want to put the new var in your DF right away
# inNewDF == FALSE turns this function into a fancy aggregate w/ renaming of the var
# example: cars2 <- aggregate_into_DF(mpg, 'hwy~cyl+year', mean, 'cylYearAvg')
# xtabs(~ cylYearAvg + cyl, cars2)
# xtabs(~ cyl + year + cylYearAvg, cars2)
if(any(names(dataDF) == newVarName)){
cat('name conflict, original DF returned')
return(dataDF)
}
form <-formula(form)
# check vars in formula are in df
if(!all(attr(terms.formula(form),"term.labels") %in% names(dataDF))){
cat('varibles in formula not in DF, original DF returned')
return(dataDF)
}
# agg and rename
aggDF <- aggregate(form, dataDF, fun) # sweep func
names(aggDF) <- c(names(aggDF)[1: (length(aggDF) -1)],newVarName) # rename variable
# should we bother putting new agg var into df?
if(inNewDF == FALSE){
return(aggDF)
}
newDF <- merge(dataDF,aggDF, all.x = TRUE) #combine
if(nrow(newDF) == nrow(dataDF) &
sum(is.na(newDF[, newVarName])) != nrow(dataDF)){
# basic checks: is the new df the expected size, is the new variable not just a string of NAs?
return(newDF)
} else{
cat('error is aggregation, original df returned')
return(dataDF)
}
}
| 1,894 | gpl-2.0 |
1a9d5776fb6b50e88b91021bdefd815a2b099e55 | jannes-m/RQGIS | tests/testthat/test-qgis-prerun.R | context("prerun")
library("raster")
library("sf")
test_that("qgis_session_info yields a list as output", {
skip_on_cran()
info <- qgis_session_info()
# check if the output is a list of length 5 or 6
expect_gt(length(info), 4)
})
test_that("find_algorithms finds QGIS geoalgorithms", {
skip_on_cran()
algs <- find_algorithms()
# just retrieve QGIS geoalgorithms
test <- grep("qgis:", algs, value = TRUE)
# normally there are 101 QGIS geoalgorithms, so check if there are more than
# 50
expect_gt(length(test), 50)
})
test_that("get_extent is working correctly", {
testthat::skip_on_cran()
# test multiple rasters
r1 = raster(extent(0, 1, 1, 2), nrows = 2, ncols = 2)
r2 = raster(extent(-2, 2, 0, 1), nrow = 2, ncols = 3)
ex = get_extent(params = list(list(r1, r2)), "multipleinput")
expect_identical(ex, c("-2", "2", "0", "2"))
# test sf objects
# shift random points by 1000 m to the east and north
ps = random_points
st_geometry(random_points) = st_geometry(random_points) + c(1000, 1000)
ex = get_extent(params = list(list(ps, random_points)),
type_name = "multipleinput")
expect_identical(ex, c("795551.3547", "798242.2819", "8932370.0031",
"8935800.4185"))
# test SpatialObjects
ps = as(ps, "Spatial")
random_points = as(random_points, "Spatial")
ex = get_extent(params = list(list(ps, random_points)),
type_name = "multipleinput")
expect_identical(ex, c("795551.3547", "798242.2819", "8932370.0031",
"8935800.4185"))
})
| 1,586 | lgpl-3.0 |
9eebeeca8816b2a51a202ec89e7254f3a1bd15ea | RCollins13/CNValue | plotting_code/AllExampleLoci/SMARCA2/SMARCA2_CNV_tracks.plot.R | #!/usr/bin/env R
#rCNV Map Project
#Spring 2017
#Talkowski Lab & Collaborators
#Copyright (c) 2017 Ryan Collins
#Distributed under terms of the MIT License
#Code to plot various tracks for SMARCA2 figure
####################################
#####Set parameters & load libraries
####################################
WRKDIR <- "/Users/rlc/Desktop/Collins/Talkowski/CNV_DB/rCNV_map/"
options(scipen=1000,stringsAsFactors=F)
nNDD <- 35693
nCTRL <- 38628
window.start <- 1250000
window.end <- 3250000
require(zoo)
######################
#####Set color vectors
######################
cols.CTRL <- c("#A5A6A7","#DCDDDF","#EAEBEC","#F8F8F9")
cols.GERM <- c("#7B2AB3","#B07FD1","#CAAAE1","#E5D4F0")
cols.NEURO <- c("#00BFF4","#66D9F8","#99E5FB","#CCF2FD")
cols.SOMA <- c("#EC008D","#F466BB","#F799D1","#FBCCE8")
cols.CNCR <- c("#FFCB00","#FFCB00","#FFE066","#FFF5CC")
#################################
#####Helper function to read data
#################################
readCNVs <- function(pheno,CNV,VF,filt,fileType="CNV"){
#Read file
if(fileType=="CNV"){
in.path <- paste(WRKDIR,"/plot_data/SMARCA2/",pheno,".",CNV,".",
VF,".GRCh37.",filt,".bed.gz",sep="")
}else{
in.path <- paste(WRKDIR,"/plot_data/SMARCA2/",pheno,".",CNV,".",
VF,".GRCh37.",filt,".100bp_pileups.bed.gz",sep="")
}
dat <- read.table(in.path,header=F)
#Add column names
if(fileType=="CNV"){
colnames(dat) <- c("chr","start","end","CNV_ID","CNV_type","phenos","PMID")
}else{
colnames(dat) <- c("chr","start","end","CNVs")
}
#Return data
return(dat)
}
###################################################
#####Read 100bp pileups & normalize by sample sizes
###################################################
#Cases, E2, all
NDD.DEL.E2.all.pileup <- readCNVs("NDD","DEL","E2","all","pileup")
NDD.DEL.E2.all.pileup$CNVs.norm <- NDD.DEL.E2.all.pileup$CNVs/nNDD
NDD.DUP.E2.all.pileup <- readCNVs("NDD","DUP","E2","all","pileup")
NDD.DUP.E2.all.pileup$CNVs.norm <- NDD.DUP.E2.all.pileup$CNVs/nNDD
#Cases, E2, coding
NDD.DEL.E2.coding.pileup <- readCNVs("NDD","DEL","E2","coding","pileup")
NDD.DEL.E2.coding.pileup$CNVs.norm <- NDD.DEL.E2.coding.pileup$CNVs/nNDD
NDD.DUP.E2.coding.pileup <- readCNVs("NDD","DUP","E2","coding","pileup")
NDD.DUP.E2.coding.pileup$CNVs.norm <- NDD.DUP.E2.coding.pileup$CNVs/nNDD
#Cases, E2, haplosufficient
NDD.DEL.E2.haplosufficient.pileup <- readCNVs("NDD","DEL","E2","haplosufficient","pileup")
NDD.DEL.E2.haplosufficient.pileup$CNVs.norm <- NDD.DEL.E2.haplosufficient.pileup$CNVs/nNDD
NDD.DUP.E2.haplosufficient.pileup <- readCNVs("NDD","DUP","E2","haplosufficient","pileup")
NDD.DUP.E2.haplosufficient.pileup$CNVs.norm <- NDD.DUP.E2.haplosufficient.pileup$CNVs/nNDD
#Controls, E2, all
CTRL.DEL.E2.all.pileup <- readCNVs("CTRL","DEL","E2","all","pileup")
CTRL.DEL.E2.all.pileup$CNVs.norm <- CTRL.DEL.E2.all.pileup$CNVs/nCTRL
CTRL.DUP.E2.all.pileup <- readCNVs("CTRL","DUP","E2","all","pileup")
CTRL.DUP.E2.all.pileup$CNVs.norm <- CTRL.DUP.E2.all.pileup$CNVs/nCTRL
#Controls, E2, coding
CTRL.DEL.E2.coding.pileup <- readCNVs("CTRL","DEL","E2","coding","pileup")
CTRL.DEL.E2.coding.pileup$CNVs.norm <- CTRL.DEL.E2.coding.pileup$CNVs/nCTRL
CTRL.DUP.E2.coding.pileup <- readCNVs("CTRL","DUP","E2","coding","pileup")
CTRL.DUP.E2.coding.pileup$CNVs.norm <- CTRL.DUP.E2.coding.pileup$CNVs/nCTRL
#Cases, E2, haplosufficient
CTRL.DEL.E2.haplosufficient.pileup <- readCNVs("CTRL","DEL","E2","haplosufficient","pileup")
CTRL.DEL.E2.haplosufficient.pileup$CNVs.norm <- CTRL.DEL.E2.haplosufficient.pileup$CNVs/nCTRL
CTRL.DUP.E2.haplosufficient.pileup <- readCNVs("CTRL","DUP","E2","haplosufficient","pileup")
CTRL.DUP.E2.haplosufficient.pileup$CNVs.norm <- CTRL.DUP.E2.haplosufficient.pileup$CNVs/nCTRL
############################################################################
#####Function to plot mirrored pileups of case vs control, del & dup stacked
############################################################################
mirroredPileups <- function(case.del,case.dup,control.del,control.dup,
axis.scale=2/10000){
#Prep dups for stacked plotting
case.dup$stacked <- case.del$CNVs.norm+case.dup$CNVs.norm
control.dup$stacked <- control.del$CNVs.norm+control.dup$CNVs.norm
#Prep plotting area
stackMax <- max(case.dup$stacked,control.dup$stacked)
par(mar=c(0.4,1.8,0.4,0.2),bty="n")
plot(x=c(window.start,window.end),y=1.05*c(-stackMax,stackMax),type="n",
xaxs="i",yaxs="i",xlab="",ylab="",xaxt="n",yaxt="n")
abline(h=0)
#Add y-axis & gridlines
axis(2,at=seq(0,stackMax,axis.scale),labels=NA)
axis(2,at=seq(0,stackMax,axis.scale),tick=F,line=-0.2,las=2,
labels=10000*seq(0,stackMax,axis.scale))
abline(h=seq(0,stackMax,axis.scale/2),col=cols.CTRL[2])
abline(h=seq(0,stackMax,axis.scale))
axis(2,at=seq(0,-stackMax,-axis.scale),labels=NA)
axis(2,at=seq(0,-stackMax,-axis.scale),tick=F,line=-0.2,las=2,
labels=10000*seq(0,stackMax,axis.scale))
abline(h=seq(0,-stackMax,-axis.scale/2),col=cols.CTRL[2])
abline(h=seq(0,-stackMax,-axis.scale))
#Plot stacked cases (positive y-axis)
polygon(x=c(case.del$start,rev(case.del$start)),
y=c(case.del$CNVs.norm,rep(0,nrow(case.del))),
border=NA,col="red")
polygon(x=c(case.del$start,rev(case.del$start)),
y=c(case.dup$stacked,rev(case.del$CNVs.norm)),
border=NA,col="blue")
# points(x=case.dup$start,y=case.dup$stacked,type="l")
#Plot stacked controls (negative y-axis)
polygon(x=c(control.del$start,rev(control.del$start)),
y=-c(control.del$CNVs.norm,rep(0,nrow(control.del))),
border=NA,col="red")
polygon(x=c(control.del$start,rev(control.del$start)),
y=-c(control.dup$stacked,rev(control.del$CNVs.norm)),
border=NA,col="blue")
# points(x=control.dup$start,y=-control.dup$stacked,type="l")
#Add midline
abline(h=0,lwd=2)
}
#########################################
#####Function to plot odds ratios per bin
#########################################
plotORs <- function(case.del,case.dup,control.del,control.dup,q=0.95){
#Collect sum of del+dup
case.dup$stacked <- case.del$CNVs+case.dup$CNVs
control.dup$stacked <- control.del$CNVs+control.dup$CNVs
#Generate odds ratios
del.ORs <- (case.del$CNVs/control.del$CNVs)/((nNDD-case.del$CNVs)/(nCTRL-control.del$CNVs))
dup.ORs <- (case.dup$CNVs/control.dup$CNVs)/((nNDD-case.dup$CNVs)/(nCTRL-control.dup$CNVs))
cnv.ORs <- (case.dup$stacked/control.dup$stacked)/((nNDD-case.dup$stacked)/(nCTRL-control.dup$stacked))
#Round infinite odds ratios to max value
allORs <- c(del.ORs,dup.ORs,cnv.ORs)
maxVal <- floor(quantile(allORs[which(!is.infinite(allORs))],probs=q,na.rm=T))
del.ORs[which(is.infinite(del.ORs) | del.ORs>maxVal)] <- maxVal
dup.ORs[which(is.infinite(dup.ORs) | dup.ORs>maxVal)] <- maxVal
cnv.ORs[which(is.infinite(cnv.ORs) | cnv.ORs>maxVal)] <- maxVal
del.ORs[which(is.na(del.ORs))] <- 1
dup.ORs[which(is.na(dup.ORs))] <- 1
cnv.ORs[which(is.na(cnv.ORs))] <- 1
#Smooth odds ratios
del.ORs.smooth <- rollmean(del.ORs,k=100)
dup.ORs.smooth <- rollmean(dup.ORs,k=100)
cnv.ORs.smooth <- rollmean(cnv.ORs,k=100)
#Prep plotting area
par(mar=c(0.4,1.8,0.4,0.2),bty="n")
plot(x=c(window.start,window.end),y=c(1,maxVal),type="n",
xaxs="i",yaxs="i",xlab="",ylab="",xaxt="n",yaxt="n")
abline(h=c(1,maxVal))
#Plot smoothed odds ratios
points(x=case.del$start[-c(1:49,(length(del.ORs)-49):length(del.ORs))],y=del.ORs.smooth,
type="l",lwd=3,col="red")
points(x=case.dup$start[-c(1:49,(length(dup.ORs)-49):length(dup.ORs))],y=dup.ORs.smooth,
type="l",lwd=3,col="blue")
# points(x=case.del$start[-c(1:49,(length(cnv.ORs)-49):length(cnv.ORs))],y=cnv.ORs.smooth,
# type="l",lwd=3,col="black")
#Add axis
axis(2,at=c(1,floor(maxVal)),labels=NA)
axis(2,at=c(1,floor(maxVal)),tick=F,
line=-0.2,las=2,labels=c(1,floor(maxVal)))
}
################
#####Plot tracks
################
#Prepare plotting area -- all CNVs
pdf(paste(WRKDIR,"/rCNV_map_paper/Figures/SMARCA2/SMARCA2_CNV_tracks.all_CNVs.pdf",sep=""),
height=2,width=12)
layout(matrix(c(1,2),nrow=2),heights=c(5,2))
#Mirrored pileup -- all CNVs
mirroredPileups(NDD.DEL.E2.all.pileup,
NDD.DUP.E2.all.pileup,
CTRL.DEL.E2.all.pileup,
CTRL.DUP.E2.all.pileup,
axis.scale=8/10000)
#Plot ORs -- all CNVs
plotORs(NDD.DEL.E2.all.pileup,
NDD.DUP.E2.all.pileup,
CTRL.DEL.E2.all.pileup,
CTRL.DUP.E2.all.pileup,
q=0.90)
#Finish plotting
dev.off()
#Prepare plotting area -- coding CNVs
pdf(paste(WRKDIR,"/rCNV_map_paper/Figures/SMARCA2/SMARCA2_CNV_tracks.coding_CNVs.pdf",sep=""),
height=2,width=12)
layout(matrix(c(1,2),nrow=2),heights=c(5,2))
#Mirrored pileup -- coding CNVs
mirroredPileups(NDD.DEL.E2.coding.pileup,
NDD.DUP.E2.coding.pileup,
CTRL.DEL.E2.coding.pileup,
CTRL.DUP.E2.coding.pileup,
axis.scale=8/10000)
#Plot ORs -- coding CNVs
plotORs(NDD.DEL.E2.coding.pileup,
NDD.DUP.E2.coding.pileup,
CTRL.DEL.E2.coding.pileup,
CTRL.DUP.E2.coding.pileup,
q=0.90)
#Finish plotting
dev.off()
#Prepare plotting area -- haplosufficient CNVs
pdf(paste(WRKDIR,"/rCNV_map_paper/Figures/SMARCA2/SMARCA2_CNV_tracks.haplosufficient_CNVs.pdf",sep=""),
height=2,width=12)
layout(matrix(c(1,2),nrow=2),heights=c(5,2))
#Mirrored pileup -- haplosufficient CNVs
mirroredPileups(NDD.DEL.E2.haplosufficient.pileup,
NDD.DUP.E2.haplosufficient.pileup,
CTRL.DEL.E2.haplosufficient.pileup,
CTRL.DUP.E2.haplosufficient.pileup,
axis.scale=4/10000)
#Plot ORs -- haplosufficient CNVs
plotORs(NDD.DEL.E2.haplosufficient.pileup,
NDD.DUP.E2.haplosufficient.pileup,
CTRL.DEL.E2.haplosufficient.pileup,
CTRL.DUP.E2.haplosufficient.pileup,
q=0.99)
dev.off()
| 10,020 | mit |
9eebeeca8816b2a51a202ec89e7254f3a1bd15ea | RCollins13/rCNVmap | plotting_code/AllExampleLoci/SMARCA2/SMARCA2_CNV_tracks.plot.R | #!/usr/bin/env R
#rCNV Map Project
#Spring 2017
#Talkowski Lab & Collaborators
#Copyright (c) 2017 Ryan Collins
#Distributed under terms of the MIT License
#Code to plot various tracks for SMARCA2 figure
####################################
#####Set parameters & load libraries
####################################
WRKDIR <- "/Users/rlc/Desktop/Collins/Talkowski/CNV_DB/rCNV_map/"
options(scipen=1000,stringsAsFactors=F)
nNDD <- 35693
nCTRL <- 38628
window.start <- 1250000
window.end <- 3250000
require(zoo)
######################
#####Set color vectors
######################
cols.CTRL <- c("#A5A6A7","#DCDDDF","#EAEBEC","#F8F8F9")
cols.GERM <- c("#7B2AB3","#B07FD1","#CAAAE1","#E5D4F0")
cols.NEURO <- c("#00BFF4","#66D9F8","#99E5FB","#CCF2FD")
cols.SOMA <- c("#EC008D","#F466BB","#F799D1","#FBCCE8")
cols.CNCR <- c("#FFCB00","#FFCB00","#FFE066","#FFF5CC")
#################################
#####Helper function to read data
#################################
readCNVs <- function(pheno,CNV,VF,filt,fileType="CNV"){
#Read file
if(fileType=="CNV"){
in.path <- paste(WRKDIR,"/plot_data/SMARCA2/",pheno,".",CNV,".",
VF,".GRCh37.",filt,".bed.gz",sep="")
}else{
in.path <- paste(WRKDIR,"/plot_data/SMARCA2/",pheno,".",CNV,".",
VF,".GRCh37.",filt,".100bp_pileups.bed.gz",sep="")
}
dat <- read.table(in.path,header=F)
#Add column names
if(fileType=="CNV"){
colnames(dat) <- c("chr","start","end","CNV_ID","CNV_type","phenos","PMID")
}else{
colnames(dat) <- c("chr","start","end","CNVs")
}
#Return data
return(dat)
}
###################################################
#####Read 100bp pileups & normalize by sample sizes
###################################################
#Cases, E2, all
NDD.DEL.E2.all.pileup <- readCNVs("NDD","DEL","E2","all","pileup")
NDD.DEL.E2.all.pileup$CNVs.norm <- NDD.DEL.E2.all.pileup$CNVs/nNDD
NDD.DUP.E2.all.pileup <- readCNVs("NDD","DUP","E2","all","pileup")
NDD.DUP.E2.all.pileup$CNVs.norm <- NDD.DUP.E2.all.pileup$CNVs/nNDD
#Cases, E2, coding
NDD.DEL.E2.coding.pileup <- readCNVs("NDD","DEL","E2","coding","pileup")
NDD.DEL.E2.coding.pileup$CNVs.norm <- NDD.DEL.E2.coding.pileup$CNVs/nNDD
NDD.DUP.E2.coding.pileup <- readCNVs("NDD","DUP","E2","coding","pileup")
NDD.DUP.E2.coding.pileup$CNVs.norm <- NDD.DUP.E2.coding.pileup$CNVs/nNDD
#Cases, E2, haplosufficient
NDD.DEL.E2.haplosufficient.pileup <- readCNVs("NDD","DEL","E2","haplosufficient","pileup")
NDD.DEL.E2.haplosufficient.pileup$CNVs.norm <- NDD.DEL.E2.haplosufficient.pileup$CNVs/nNDD
NDD.DUP.E2.haplosufficient.pileup <- readCNVs("NDD","DUP","E2","haplosufficient","pileup")
NDD.DUP.E2.haplosufficient.pileup$CNVs.norm <- NDD.DUP.E2.haplosufficient.pileup$CNVs/nNDD
#Controls, E2, all
CTRL.DEL.E2.all.pileup <- readCNVs("CTRL","DEL","E2","all","pileup")
CTRL.DEL.E2.all.pileup$CNVs.norm <- CTRL.DEL.E2.all.pileup$CNVs/nCTRL
CTRL.DUP.E2.all.pileup <- readCNVs("CTRL","DUP","E2","all","pileup")
CTRL.DUP.E2.all.pileup$CNVs.norm <- CTRL.DUP.E2.all.pileup$CNVs/nCTRL
#Controls, E2, coding
CTRL.DEL.E2.coding.pileup <- readCNVs("CTRL","DEL","E2","coding","pileup")
CTRL.DEL.E2.coding.pileup$CNVs.norm <- CTRL.DEL.E2.coding.pileup$CNVs/nCTRL
CTRL.DUP.E2.coding.pileup <- readCNVs("CTRL","DUP","E2","coding","pileup")
CTRL.DUP.E2.coding.pileup$CNVs.norm <- CTRL.DUP.E2.coding.pileup$CNVs/nCTRL
#Cases, E2, haplosufficient
CTRL.DEL.E2.haplosufficient.pileup <- readCNVs("CTRL","DEL","E2","haplosufficient","pileup")
CTRL.DEL.E2.haplosufficient.pileup$CNVs.norm <- CTRL.DEL.E2.haplosufficient.pileup$CNVs/nCTRL
CTRL.DUP.E2.haplosufficient.pileup <- readCNVs("CTRL","DUP","E2","haplosufficient","pileup")
CTRL.DUP.E2.haplosufficient.pileup$CNVs.norm <- CTRL.DUP.E2.haplosufficient.pileup$CNVs/nCTRL
############################################################################
#####Function to plot mirrored pileups of case vs control, del & dup stacked
############################################################################
mirroredPileups <- function(case.del,case.dup,control.del,control.dup,
axis.scale=2/10000){
#Prep dups for stacked plotting
case.dup$stacked <- case.del$CNVs.norm+case.dup$CNVs.norm
control.dup$stacked <- control.del$CNVs.norm+control.dup$CNVs.norm
#Prep plotting area
stackMax <- max(case.dup$stacked,control.dup$stacked)
par(mar=c(0.4,1.8,0.4,0.2),bty="n")
plot(x=c(window.start,window.end),y=1.05*c(-stackMax,stackMax),type="n",
xaxs="i",yaxs="i",xlab="",ylab="",xaxt="n",yaxt="n")
abline(h=0)
#Add y-axis & gridlines
axis(2,at=seq(0,stackMax,axis.scale),labels=NA)
axis(2,at=seq(0,stackMax,axis.scale),tick=F,line=-0.2,las=2,
labels=10000*seq(0,stackMax,axis.scale))
abline(h=seq(0,stackMax,axis.scale/2),col=cols.CTRL[2])
abline(h=seq(0,stackMax,axis.scale))
axis(2,at=seq(0,-stackMax,-axis.scale),labels=NA)
axis(2,at=seq(0,-stackMax,-axis.scale),tick=F,line=-0.2,las=2,
labels=10000*seq(0,stackMax,axis.scale))
abline(h=seq(0,-stackMax,-axis.scale/2),col=cols.CTRL[2])
abline(h=seq(0,-stackMax,-axis.scale))
#Plot stacked cases (positive y-axis)
polygon(x=c(case.del$start,rev(case.del$start)),
y=c(case.del$CNVs.norm,rep(0,nrow(case.del))),
border=NA,col="red")
polygon(x=c(case.del$start,rev(case.del$start)),
y=c(case.dup$stacked,rev(case.del$CNVs.norm)),
border=NA,col="blue")
# points(x=case.dup$start,y=case.dup$stacked,type="l")
#Plot stacked controls (negative y-axis)
polygon(x=c(control.del$start,rev(control.del$start)),
y=-c(control.del$CNVs.norm,rep(0,nrow(control.del))),
border=NA,col="red")
polygon(x=c(control.del$start,rev(control.del$start)),
y=-c(control.dup$stacked,rev(control.del$CNVs.norm)),
border=NA,col="blue")
# points(x=control.dup$start,y=-control.dup$stacked,type="l")
#Add midline
abline(h=0,lwd=2)
}
#########################################
#####Function to plot odds ratios per bin
#########################################
plotORs <- function(case.del,case.dup,control.del,control.dup,q=0.95){
#Collect sum of del+dup
case.dup$stacked <- case.del$CNVs+case.dup$CNVs
control.dup$stacked <- control.del$CNVs+control.dup$CNVs
#Generate odds ratios
del.ORs <- (case.del$CNVs/control.del$CNVs)/((nNDD-case.del$CNVs)/(nCTRL-control.del$CNVs))
dup.ORs <- (case.dup$CNVs/control.dup$CNVs)/((nNDD-case.dup$CNVs)/(nCTRL-control.dup$CNVs))
cnv.ORs <- (case.dup$stacked/control.dup$stacked)/((nNDD-case.dup$stacked)/(nCTRL-control.dup$stacked))
#Round infinite odds ratios to max value
allORs <- c(del.ORs,dup.ORs,cnv.ORs)
maxVal <- floor(quantile(allORs[which(!is.infinite(allORs))],probs=q,na.rm=T))
del.ORs[which(is.infinite(del.ORs) | del.ORs>maxVal)] <- maxVal
dup.ORs[which(is.infinite(dup.ORs) | dup.ORs>maxVal)] <- maxVal
cnv.ORs[which(is.infinite(cnv.ORs) | cnv.ORs>maxVal)] <- maxVal
del.ORs[which(is.na(del.ORs))] <- 1
dup.ORs[which(is.na(dup.ORs))] <- 1
cnv.ORs[which(is.na(cnv.ORs))] <- 1
#Smooth odds ratios
del.ORs.smooth <- rollmean(del.ORs,k=100)
dup.ORs.smooth <- rollmean(dup.ORs,k=100)
cnv.ORs.smooth <- rollmean(cnv.ORs,k=100)
#Prep plotting area
par(mar=c(0.4,1.8,0.4,0.2),bty="n")
plot(x=c(window.start,window.end),y=c(1,maxVal),type="n",
xaxs="i",yaxs="i",xlab="",ylab="",xaxt="n",yaxt="n")
abline(h=c(1,maxVal))
#Plot smoothed odds ratios
points(x=case.del$start[-c(1:49,(length(del.ORs)-49):length(del.ORs))],y=del.ORs.smooth,
type="l",lwd=3,col="red")
points(x=case.dup$start[-c(1:49,(length(dup.ORs)-49):length(dup.ORs))],y=dup.ORs.smooth,
type="l",lwd=3,col="blue")
# points(x=case.del$start[-c(1:49,(length(cnv.ORs)-49):length(cnv.ORs))],y=cnv.ORs.smooth,
# type="l",lwd=3,col="black")
#Add axis
axis(2,at=c(1,floor(maxVal)),labels=NA)
axis(2,at=c(1,floor(maxVal)),tick=F,
line=-0.2,las=2,labels=c(1,floor(maxVal)))
}
################
#####Plot tracks
################
#Prepare plotting area -- all CNVs
pdf(paste(WRKDIR,"/rCNV_map_paper/Figures/SMARCA2/SMARCA2_CNV_tracks.all_CNVs.pdf",sep=""),
height=2,width=12)
layout(matrix(c(1,2),nrow=2),heights=c(5,2))
#Mirrored pileup -- all CNVs
mirroredPileups(NDD.DEL.E2.all.pileup,
NDD.DUP.E2.all.pileup,
CTRL.DEL.E2.all.pileup,
CTRL.DUP.E2.all.pileup,
axis.scale=8/10000)
#Plot ORs -- all CNVs
plotORs(NDD.DEL.E2.all.pileup,
NDD.DUP.E2.all.pileup,
CTRL.DEL.E2.all.pileup,
CTRL.DUP.E2.all.pileup,
q=0.90)
#Finish plotting
dev.off()
#Prepare plotting area -- coding CNVs
pdf(paste(WRKDIR,"/rCNV_map_paper/Figures/SMARCA2/SMARCA2_CNV_tracks.coding_CNVs.pdf",sep=""),
height=2,width=12)
layout(matrix(c(1,2),nrow=2),heights=c(5,2))
#Mirrored pileup -- coding CNVs
mirroredPileups(NDD.DEL.E2.coding.pileup,
NDD.DUP.E2.coding.pileup,
CTRL.DEL.E2.coding.pileup,
CTRL.DUP.E2.coding.pileup,
axis.scale=8/10000)
#Plot ORs -- coding CNVs
plotORs(NDD.DEL.E2.coding.pileup,
NDD.DUP.E2.coding.pileup,
CTRL.DEL.E2.coding.pileup,
CTRL.DUP.E2.coding.pileup,
q=0.90)
#Finish plotting
dev.off()
#Prepare plotting area -- haplosufficient CNVs
pdf(paste(WRKDIR,"/rCNV_map_paper/Figures/SMARCA2/SMARCA2_CNV_tracks.haplosufficient_CNVs.pdf",sep=""),
height=2,width=12)
layout(matrix(c(1,2),nrow=2),heights=c(5,2))
#Mirrored pileup -- haplosufficient CNVs
mirroredPileups(NDD.DEL.E2.haplosufficient.pileup,
NDD.DUP.E2.haplosufficient.pileup,
CTRL.DEL.E2.haplosufficient.pileup,
CTRL.DUP.E2.haplosufficient.pileup,
axis.scale=4/10000)
#Plot ORs -- haplosufficient CNVs
plotORs(NDD.DEL.E2.haplosufficient.pileup,
NDD.DUP.E2.haplosufficient.pileup,
CTRL.DEL.E2.haplosufficient.pileup,
CTRL.DUP.E2.haplosufficient.pileup,
q=0.99)
dev.off()
| 10,020 | mit |
00d66c1bd89f15a3205d5ef2b028f22416c8819f | Forever-Peace/GroundControl | Chapters/ch6-surprisal/gamescore_calc_only.R | #THIS SCRIPT JUST CALCULATES GAME SCORE, WITHOUT ANY OF THE PLOTTING FUNCTIONS ETC FOUND IN THE CHAPTER.
#Simply change the filepath here to load the csv (found in the Ground Control github page), the run the entire script. It will take a while.
df = read.csv("rushing_data_stack.csv") #point this to the location of the csv from the github.
# Load Packages -----------------------------------------------------------
library('reshape2')
library("dplyr")
library('doParallel') #the simulation is massive so we're doing this in parallel.
# Munge starting data -----------------------------------------------------
df_rb = df[df$FantPos=='RB',] #restrict to RBs only
#Turn discrete variables into factors
df_rb$week <- factor(df_rb$week)
df_rb$down <- factor(df_rb$down)
df_rb$year <- factor(df_rb$year)
df_rb$FantPos <- factor(df_rb$FantPos)
df_rb$full_name <- factor(df_rb$full_name)
df_rb$nameweekyear <- paste(df_rb$full_name, df_rb$year, df_rb$week, sep="_")
df_rb <- merge(df_rb, dcast(df_rb, full_name+week+year ~ "nameweekyear_atts", length), by=c("full_name","year", "week")) #Find weekly carries.
# Converting distance to probability --------------------------------------
#Kernal Density Estimation
kde_avg <- density(df_rb$rushing_yds, adjust = 5, n=121, from = -20, to = 100)
df_kde <- data.frame(rushing_yds = kde_avg$x, density = kde_avg$y)
df_kde$runprob <- df_kde$density / sum(df_kde$density)
#Combine "mediocre and bad" runs into a single category
lowprob <- sum(df_kde[0:(which.max(df_kde$runprob)-1),]$runprob)
df_kde[0:(which.max(df_kde$runprob)-1),]$runprob <- lowprob
#Add KDE probabilities to database
df_rb <- merge(df_rb, df_kde, by=c("rushing_yds"))
df_rb$yardname <- df_rb$rushing_yds
df_rb[df_rb$yardname<2,]$yardname <- 1 #Rounds bad runs into single category. Numbers determined by shortest yardage below the max probable run.
df_rb$yardname <- factor(df_rb$yardname) #Turns numerical distance values into names of the distances, and saves it as a factor.
df_kde <- df_kde[df_kde$rushing_yds>0,] #Restricts the KDE to same range.
# Generalized Game Surprisal Calculation ----------------------------------
probvector <- df_kde[df_kde$rushing_yds%in%unique(df_rb$yardname),]$runprob
findprob <- function(nameweekyearID) #define function to take a player-game as an input and extract the Game Probability using the multinomial distribution and the probability vector estimated from the KDE.
{
a<-tapply(df_rb[df_rb$nameweekyear==nameweekyearID,]$yardname,df_rb[df_rb$nameweekyear==nameweekyearID,]$yardname,length) #this uses a trick: "yardname" is just "rushing_yds" turned from a number into a name. This function counts the number of times each run distance occurred. Later, we can apply the probability vector to this set when sampling from the multinomial distribution.
a[is.na(a)]<-0
dmultinom(a,prob=probvector)
}
df_s<-dcast(df_rb, full_name+week+year+homefield+defense+Age+nameweekyear+nameweekyear_atts ~ "nameweekyear_prob", value.var = "nameweekyear", fun.aggregate = function(x) findprob(x)) #Apply the game probability function to the database
df_s$surp <- -log2(df_s$nameweekyear_prob) #convert game probability to Game Surprisal.
#Account for league-average Surprisal ----------------------------------
#Account using league-average simulations. THIS IS HOW WE CALCULATE GAME SCORE IN THE CHAPTER (not loess).
#resampled surprisal at each number of carries
cl <- makeCluster(4) #SET TO NUMBER OF CORES YOU WANT TO USE
registerDoParallel(cl)
#define a function to calculate game surprisal, given a database to sample from (we'll put in df_rb$rushing_yds for "yards") and a number of carries.
resample_yards <- function(yards,samplesize)
{
yardsample = numeric(150000) #populate a vector to fill with game samples. This determines number of simulated games.
for (i in 1:150000) { #if number of simulated games was changed above, change it here too.
a = sample(yards, size=samplesize, replace=T) #sample some carries at random.
b = tapply(a, a, length) #use the name counting trick.
b[is.na(b)]<-0
yardsample[i] <- -log2(dmultinom(b,prob=probvector)) #calculates game surprisal for the sample.
}
yardsample #returns vector of simulated Game Surprisal values.
}
#Define function to apply the game surprisal simulator to every carry size from 1 to 40.
resample_db <- numeric(40)
resample_db <- foreach(n = 1:40, .combine=c) %dopar% {
list(resample_yards(df_rb$yardname, n))
}
stopCluster(cl) #end the CPU cluster.
resample_db_mean <- data.frame(unlist(lapply(resample_db, mean))) #find average of the simulated games at each carry size.
resample_db_mean <- add_rownames(resample_db_mean, "nameweekyear_atts")
names(resample_db_mean)[2] <- "surp_avg"
df_s <- merge(df_s, resample_db_mean, by="nameweekyear_atts") #add these "league average" simulated values to the database.
df_s$gamescore <- df_s$surp - df_s$surp_avg #calculate how much above or below average each game was from the simulated mean.
summary(df_s$gamescore) #there are you Game Score values!
#Save the values into a csv.
#Will automatically put this in your working directory. You can also specify the filepath as part of the filename in the 'file = ' command.
write.table(df_s[c("full_name", "year", "nameweekyear", "nameweekyear_atts", "homefield", "defense", "Age", "surp", "gamescore", "surp_avg")], file = "rb_GameScore.csv", sep = ",")
| 5,412 | gpl-2.0 |
aced0b8bbf6b6d882c308d230c495cb74cf104cd | YvesBas/Tadarida-C | tadaridaC_src/xenocanto/XCwav_mix.R | library(tuneR)
DirW="C:/Users/yvesb/Documents/Tadarida/pollinisateurs/doc_voix_humaine/voix_humaine_database"
ListWrecent=list.files(DirW,pattern=".wav$",full.names=T)
WNrates=c(10,24,50,99)
CoeffSR=1
for (i in 1:length(WNrates))
{
DestMix=gsub("split202",paste0("wn",WNrates[i],"_202"),DirW)
dir.create(DestMix)
for (j in 1:length(ListWrecent))
{
Dur=0
if((file.size(ListWrecent[j]))>50000)
{
tempW=readWave(ListWrecent[j])
Dur=length(tempW@left)/[email protected]
if(Dur>0)
{
wn=noise(kind="white",duration=length(tempW@left),[email protected]
,bit=tempW@bit,pcm=tempW@pcm)
Mix=tempW+wn*WNrates[i]/100
Mix=normalize(Mix,unit="16")
[email protected][email protected]/CoeffSR
writeWave(Mix,filename=paste0(DestMix,"/",basename(ListWrecent[j])))
#savewav(Mix,filename=paste0(DestMix,"/",basename(ListWrecent[j])))
}
}
print(paste(j,ListWrecent[j],Dur))
}
}
| 966 | gpl-3.0 |
76f1318f1226a8e6a61085d948cbafd165883b96 | setempler/miscset | R/scale0.R | #' @name scale0
#' @keywords scale
#' @aliases scaler
#' @author Sven E. Templer
#' @title Scale Numeric Values to Defined Ranges
#' @description
#' Scale numeric values to a range from 0 to 1 with the function
#' \code{scale0} or to a chosen range with \code{scaler}.
#' @param x Numeric vector to transform.
#' @param r Numeric vector of length 2 for range to scale values of
#' \code{x} to.
#' @param b Numeric vector of length 2 to define the border of \code{x}
#' to use as scaling minimum and maximum.
#' @examples
#' #
#'
#' scale0(0:10)
#' scale0(-1:3)
#' scale0(2:3)
#'
#' scaler(0:10)
#' scaler(0:10, 1:2)
#' scaler(0:10, 1:2, c(0, 20))
#'
#' #
#' @rdname scale0
#' @export scale0
scale0 <- function (x) {
xmin <- min(x, na.rm=T)
x <- x - xmin
xmax <- max(x, na.rm=T)
x <- x / xmax
return(x)
}
#' @rdname scale0
#' @export scaler
scaler <- function (x, r = c(0, 1), b = range(x, na.rm = TRUE)) {
rl <- r[1]
ru <- r[2]
bl <- b[1]
bu <- b[2]
if (bl > min(x, na.rm = TRUE))
stop("Lower border in b is greater than minimum of x.")
if (bu < max(x, na.rm = TRUE))
stop("Upper border in b is less than maximum of x.")
(ru - rl) * (x - bl) / (bu - bl) + rl
}
| 1,201 | gpl-3.0 |
7fc9be4ce25cd064720c8f7fa9b4a4c0ceb597aa | SANBI-SA/tools-iuc | tools/limma_voom/limma_voom.R | # This tool takes in a matrix of feature counts as well as gene annotations and
# outputs a table of top expressions as well as various plots for differential
# expression analysis
#
# ARGS: 1.countPath -Path to RData input containing counts
# 2.annoPath -Path to input containing gene annotations
# 3.htmlPath -Path to html file linking to other outputs
# 4.outPath -Path to folder to write all output to
# 5.rdaOpt -String specifying if RData should be saved
# 6.normOpt -String specifying type of normalisation used
# 7.weightOpt -String specifying usage of weights
# 8.contrastData -String containing contrasts of interest
# 9.cpmReq -Float specifying cpm requirement
# 10.sampleReq -Integer specifying cpm requirement
# 11.pAdjOpt -String specifying the p-value adjustment method
# 12.pValReq -Float specifying the p-value requirement
# 13.lfcReq -Float specifying the log-fold-change requirement
# 14.normCounts -String specifying if normalised counts should be output
# 15.factPath -Path to factor information file
# 16.factorData -Strings containing factor names and values if manually input
#
# OUT: Voom Plot
# BCV Plot
# MA Plot
# Expression Table
# HTML file linking to the ouputs
#
# Author: Shian Su - [email protected] - Jan 2014
# Modified by: Maria Doyle - Jun 2017
# Record starting time
timeStart <- as.character(Sys.time())
# Load all required libraries
library(methods, quietly=TRUE, warn.conflicts=FALSE)
library(statmod, quietly=TRUE, warn.conflicts=FALSE)
library(splines, quietly=TRUE, warn.conflicts=FALSE)
library(edgeR, quietly=TRUE, warn.conflicts=FALSE)
library(limma, quietly=TRUE, warn.conflicts=FALSE)
library(scales, quietly=TRUE, warn.conflicts=FALSE)
if (packageVersion("limma") < "3.20.1") {
stop("Please update 'limma' to version >= 3.20.1 to run this tool")
}
################################################################################
### Function Delcaration
################################################################################
# Function to sanitise contrast equations so there are no whitespaces
# surrounding the arithmetic operators, leading or trailing whitespace
sanitiseEquation <- function(equation) {
equation <- gsub(" *[+] *", "+", equation)
equation <- gsub(" *[-] *", "-", equation)
equation <- gsub(" *[/] *", "/", equation)
equation <- gsub(" *[*] *", "*", equation)
equation <- gsub("^\\s+|\\s+$", "", equation)
return(equation)
}
# Function to sanitise group information
sanitiseGroups <- function(string) {
string <- gsub(" *[,] *", ",", string)
string <- gsub("^\\s+|\\s+$", "", string)
return(string)
}
# Function to change periods to whitespace in a string
unmake.names <- function(string) {
string <- gsub(".", " ", string, fixed=TRUE)
return(string)
}
# Generate output folder and paths
makeOut <- function(filename) {
return(paste0(outPath, "/", filename))
}
# Generating design information
pasteListName <- function(string) {
return(paste0("factors$", string))
}
# Create cata function: default path set, default seperator empty and appending
# true by default (Ripped straight from the cat function with altered argument
# defaults)
cata <- function(..., file = htmlPath, sep = "", fill = FALSE, labels = NULL,
append = TRUE) {
if (is.character(file))
if (file == "")
file <- stdout()
else if (substring(file, 1L, 1L) == "|") {
file <- pipe(substring(file, 2L), "w")
on.exit(close(file))
}
else {
file <- file(file, ifelse(append, "a", "w"))
on.exit(close(file))
}
.Internal(cat(list(...), file, sep, fill, labels, append))
}
# Function to write code for html head and title
HtmlHead <- function(title) {
cata("<head>\n")
cata("<title>", title, "</title>\n")
cata("</head>\n")
}
# Function to write code for html links
HtmlLink <- function(address, label=address) {
cata("<a href=\"", address, "\" target=\"_blank\">", label, "</a><br />\n")
}
# Function to write code for html images
HtmlImage <- function(source, label=source, height=600, width=600) {
cata("<img src=\"", source, "\" alt=\"", label, "\" height=\"", height)
cata("\" width=\"", width, "\"/>\n")
}
# Function to write code for html list items
ListItem <- function(...) {
cata("<li>", ..., "</li>\n")
}
TableItem <- function(...) {
cata("<td>", ..., "</td>\n")
}
TableHeadItem <- function(...) {
cata("<th>", ..., "</th>\n")
}
################################################################################
### Input Processing
################################################################################
# Collects arguments from command line
argv <- commandArgs(TRUE)
# Grab arguments
countPath <- as.character(argv[1])
annoPath <- as.character(argv[2])
htmlPath <- as.character(argv[3])
outPath <- as.character(argv[4])
rdaOpt <- as.character(argv[5])
normOpt <- as.character(argv[6])
weightOpt <- as.character(argv[7])
contrastData <- as.character(argv[8])
cpmReq <- as.numeric(argv[9])
sampleReq <- as.numeric(argv[10])
pAdjOpt <- as.character(argv[11])
pValReq <- as.numeric(argv[12])
lfcReq <- as.numeric(argv[13])
normCounts <- as.character(argv[14])
factPath <- as.character(argv[15])
# Process factors
if (as.character(argv[16])=="None") {
factorData <- read.table(factPath, header=TRUE, sep="\t")
factors <- factorData[,-1, drop=FALSE]
} else {
factorData <- list()
for (i in 16:length(argv)) {
newFact <- unlist(strsplit(as.character(argv[i]), split="::"))
factorData <- rbind(factorData, newFact)
} # Factors have the form: FACT_NAME::LEVEL,LEVEL,LEVEL,LEVEL,... The first factor is the Primary Factor.
# Set the row names to be the name of the factor and delete first row
row.names(factorData) <- factorData[, 1]
factorData <- factorData[, -1]
factorData <- sapply(factorData, sanitiseGroups)
factorData <- sapply(factorData, strsplit, split=",")
factorData <- sapply(factorData, make.names)
# Transform factor data into data frame of R factor objects
factors <- data.frame(factorData)
}
# Process other arguments
if (weightOpt=="yes") {
wantWeight <- TRUE
} else {
wantWeight <- FALSE
}
if (rdaOpt=="yes") {
wantRda <- TRUE
} else {
wantRda <- FALSE
}
if (annoPath=="None") {
haveAnno <- FALSE
} else {
haveAnno <- TRUE
}
if (normCounts=="yes") {
wantNorm <- TRUE
} else {
wantNorm <- FALSE
}
#Create output directory
dir.create(outPath, showWarnings=FALSE)
# Split up contrasts seperated by comma into a vector then sanitise
contrastData <- unlist(strsplit(contrastData, split=","))
contrastData <- sanitiseEquation(contrastData)
contrastData <- gsub(" ", ".", contrastData, fixed=TRUE)
bcvOutPdf <- makeOut("bcvplot.pdf")
bcvOutPng <- makeOut("bcvplot.png")
mdsOutPdf <- makeOut("mdsplot.pdf")
mdsOutPng <- makeOut("mdsplot.png")
voomOutPdf <- makeOut("voomplot.pdf")
voomOutPng <- makeOut("voomplot.png")
maOutPdf <- character() # Initialise character vector
maOutPng <- character()
topOut <- character()
for (i in 1:length(contrastData)) {
maOutPdf[i] <- makeOut(paste0("maplot_", contrastData[i], ".pdf"))
maOutPng[i] <- makeOut(paste0("maplot_", contrastData[i], ".png"))
topOut[i] <- makeOut(paste0("limma-voom_", contrastData[i], ".tsv"))
} # Save output paths for each contrast as vectors
normOut <- makeOut("limma-voom_normcounts.tsv")
rdaOut <- makeOut("RData.rda")
sessionOut <- makeOut("session_info.txt")
# Initialise data for html links and images, data frame with columns Label and
# Link
linkData <- data.frame(Label=character(), Link=character(),
stringsAsFactors=FALSE)
imageData <- data.frame(Label=character(), Link=character(),
stringsAsFactors=FALSE)
# Initialise vectors for storage of up/down/neutral regulated counts
upCount <- numeric()
downCount <- numeric()
flatCount <- numeric()
# Read in counts and geneanno data
counts <- read.table(countPath, header=TRUE, sep="\t", stringsAsFactors=FALSE)
row.names(counts) <- counts[, 1]
counts <- counts[ , -1]
countsRows <- nrow(counts)
if (haveAnno) {
geneanno <- read.table(annoPath, header=TRUE, sep="\t", stringsAsFactors=FALSE)
}
################################################################################
### Data Processing
################################################################################
# Extract counts and annotation data
data <- list()
data$counts <- counts
if (haveAnno) {
data$genes <- geneanno
} else {
data$genes <- data.frame(GeneID=row.names(counts))
}
# Filter out genes that do not have a required cpm in a required number of
# samples
preFilterCount <- nrow(data$counts)
sel <- rowSums(cpm(data$counts) > cpmReq) >= sampleReq
data$counts <- data$counts[sel, ]
data$genes <- data$genes[sel, ,drop = FALSE]
postFilterCount <- nrow(data$counts)
filteredCount <- preFilterCount-postFilterCount
# Creating naming data
samplenames <- colnames(data$counts)
sampleanno <- data.frame("sampleID"=samplenames, factors)
# Generating the DGEList object "data"
data$samples <- sampleanno
data$samples$lib.size <- colSums(data$counts)
data$samples$norm.factors <- 1
row.names(data$samples) <- colnames(data$counts)
data <- new("DGEList", data)
factorList <- sapply(names(factors), pasteListName)
formula <- "~0"
for (i in 1:length(factorList)) {
formula <- paste(formula,factorList[i], sep="+")
}
formula <- formula(formula)
design <- model.matrix(formula)
for (i in 1:length(factorList)) {
colnames(design) <- gsub(factorList[i], "", colnames(design), fixed=TRUE)
}
# Calculating normalising factor, estimating dispersion
data <- calcNormFactors(data, method=normOpt)
#data <- estimateDisp(data, design=design, robust=TRUE)
# Generate contrasts information
contrasts <- makeContrasts(contrasts=contrastData, levels=design)
# Name rows of factors according to their sample
row.names(factors) <- names(data$counts)
################################################################################
### Data Output
################################################################################
# BCV Plot
#png(bcvOutPng, width=600, height=600)
#plotBCV(data, main="BCV Plot")
#imageData[1, ] <- c("BCV Plot", "bcvplot.png")
#invisible(dev.off())
#pdf(bcvOutPdf)
#plotBCV(data, main="BCV Plot")
#invisible(dev.off())
if (wantWeight) {
# Creating voom data object and plot
png(voomOutPng, width=1000, height=600)
vData <- voomWithQualityWeights(data, design=design, plot=TRUE)
imageData[1, ] <- c("Voom Plot", "voomplot.png")
invisible(dev.off())
pdf(voomOutPdf, width=14)
vData <- voomWithQualityWeights(data, design=design, plot=TRUE)
linkData[1, ] <- c("Voom Plot (.pdf)", "voomplot.pdf")
invisible(dev.off())
# Generating fit data and top table with weights
wts <- vData$weights
voomFit <- lmFit(vData, design, weights=wts)
} else {
# Creating voom data object and plot
png(voomOutPng, width=600, height=600)
vData <- voom(data, design=design, plot=TRUE)
imageData[1, ] <- c("Voom Plot", "voomplot.png")
invisible(dev.off())
pdf(voomOutPdf)
vData <- voom(data, design=design, plot=TRUE)
linkData[1, ] <- c("Voom Plot (.pdf)", "voomplot.pdf")
invisible(dev.off())
# Generate voom fit
voomFit <- lmFit(vData, design)
}
# Save normalised counts (log2cpm)
if (wantNorm) {
norm_counts <- data.frame(vData$genes, vData$E)
write.table (norm_counts, file=normOut, row.names=FALSE, sep="\t")
linkData <- rbind(linkData, c("limma-voom_normcounts.tsv", "limma-voom_normcounts.tsv"))
}
# Fit linear model and estimate dispersion with eBayes
voomFit <- contrasts.fit(voomFit, contrasts)
voomFit <- eBayes(voomFit)
# Plot MDS
labels <- names(counts)
png(mdsOutPng, width=600, height=600)
# Currently only using a single factor
plotMDS(vData, labels=labels, col=as.numeric(factors[, 1]), cex=0.8)
imgName <- "Voom Plot"
imgAddr <- "mdsplot.png"
imageData <- rbind(imageData, c(imgName, imgAddr))
invisible(dev.off())
pdf(mdsOutPdf)
plotMDS(vData, labels=labels, cex=0.5)
linkName <- paste0("MDS Plot (.pdf)")
linkAddr <- paste0("mdsplot.pdf")
linkData <- rbind(linkData, c(linkName, linkAddr))
invisible(dev.off())
for (i in 1:length(contrastData)) {
status = decideTests(voomFit[, i], adjust.method=pAdjOpt, p.value=pValReq,
lfc=lfcReq)
sumStatus <- summary(status)
# Collect counts for differential expression
upCount[i] <- sumStatus["1",]
downCount[i] <- sumStatus["-1",]
flatCount[i] <- sumStatus["0",]
# Write top expressions table
top <- topTable(voomFit, coef=i, number=Inf, sort.by="P")
write.table(top, file=topOut[i], row.names=FALSE, sep="\t")
linkName <- paste0("limma-voom_", contrastData[i], ".tsv")
linkAddr <- paste0("limma-voom_", contrastData[i], ".tsv")
linkData <- rbind(linkData, c(linkName, linkAddr))
# Plot MA (log ratios vs mean average) using limma package on weighted
pdf(maOutPdf[i])
limma::plotMA(voomFit, status=status, coef=i,
main=paste("MA Plot:", unmake.names(contrastData[i])),
col=alpha(c("firebrick", "blue"), 0.4), values=c("1", "-1"),
xlab="Average Expression", ylab="logFC")
abline(h=0, col="grey", lty=2)
linkName <- paste0("MA Plot_", contrastData[i], " (.pdf)")
linkAddr <- paste0("maplot_", contrastData[i], ".pdf")
linkData <- rbind(linkData, c(linkName, linkAddr))
invisible(dev.off())
png(maOutPng[i], height=600, width=600)
limma::plotMA(voomFit, status=status, coef=i,
main=paste("MA Plot:", unmake.names(contrastData[i])),
col=alpha(c("firebrick", "blue"), 0.4), values=c("1", "-1"),
xlab="Average Expression", ylab="logFC")
abline(h=0, col="grey", lty=2)
imgName <- paste0("MA Plot_", contrastData[i])
imgAddr <- paste0("maplot_", contrastData[i], ".png")
imageData <- rbind(imageData, c(imgName, imgAddr))
invisible(dev.off())
}
sigDiff <- data.frame(Up=upCount, Flat=flatCount, Down=downCount)
row.names(sigDiff) <- contrastData
# Save relevant items as rda object
if (wantRda) {
if (wantWeight) {
save(data, status, vData, labels, factors, wts, voomFit, top, contrasts,
design,
file=rdaOut, ascii=TRUE)
} else {
save(data, status, vData, labels, factors, voomFit, top, contrasts, design,
file=rdaOut, ascii=TRUE)
}
linkData <- rbind(linkData, c("RData (.rda)", "RData.rda"))
}
# Record session info
writeLines(capture.output(sessionInfo()), sessionOut)
linkData <- rbind(linkData, c("Session Info", "session_info.txt"))
# Record ending time and calculate total run time
timeEnd <- as.character(Sys.time())
timeTaken <- capture.output(round(difftime(timeEnd,timeStart), digits=3))
timeTaken <- gsub("Time difference of ", "", timeTaken, fixed=TRUE)
################################################################################
### HTML Generation
################################################################################
# Clear file
cat("", file=htmlPath)
cata("<html>\n")
cata("<body>\n")
cata("<h3>Limma-voom Analysis Output:</h3>\n")
cata("PDF copies of JPEGS available in 'Plots' section.<br />\n")
if (wantWeight) {
HtmlImage(imageData$Link[1], imageData$Label[1], width=1000)
} else {
HtmlImage(imageData$Link[1], imageData$Label[1])
}
for (i in 2:nrow(imageData)) {
HtmlImage(imageData$Link[i], imageData$Label[i])
}
cata("<h4>Differential Expression Counts:</h4>\n")
cata("<table border=\"1\" cellpadding=\"4\">\n")
cata("<tr>\n")
TableItem()
for (i in colnames(sigDiff)) {
TableHeadItem(i)
}
cata("</tr>\n")
for (i in 1:nrow(sigDiff)) {
cata("<tr>\n")
TableHeadItem(unmake.names(row.names(sigDiff)[i]))
for (j in 1:ncol(sigDiff)) {
TableItem(as.character(sigDiff[i, j]))
}
cata("</tr>\n")
}
cata("</table>")
cata("<h4>Plots:</h4>\n")
for (i in 1:nrow(linkData)) {
if (grepl(".pdf", linkData$Link[i])) {
HtmlLink(linkData$Link[i], linkData$Label[i])
}
}
cata("<h4>Tables:</h4>\n")
for (i in 1:nrow(linkData)) {
if (grepl(".tsv", linkData$Link[i])) {
HtmlLink(linkData$Link[i], linkData$Label[i])
}
}
if (wantRda) {
cata("<h4>R Data Object:</h4>\n")
for (i in 1:nrow(linkData)) {
if (grepl(".rda", linkData$Link[i])) {
HtmlLink(linkData$Link[i], linkData$Label[i])
}
}
}
cata("<p>Alt-click links to download file.</p>\n")
cata("<p>Click floppy disc icon associated history item to download ")
cata("all files.</p>\n")
cata("<p>.tsv files can be viewed in Excel or any spreadsheet program.</p>\n")
cata("<h4>Additional Information</h4>\n")
cata("<ul>\n")
if (cpmReq!=0 && sampleReq!=0) {
tempStr <- paste("Genes without more than", cpmReq,
"CPM in at least", sampleReq, "samples are insignificant",
"and filtered out.")
ListItem(tempStr)
filterProp <- round(filteredCount/preFilterCount*100, digits=2)
tempStr <- paste0(filteredCount, " of ", preFilterCount," (", filterProp,
"%) genes were filtered out for low expression.")
ListItem(tempStr)
}
ListItem(normOpt, " was the method used to normalise library sizes.")
if (wantWeight) {
ListItem("Weights were applied to samples.")
} else {
ListItem("Weights were not applied to samples.")
}
if (pAdjOpt!="none") {
if (pAdjOpt=="BH" || pAdjOpt=="BY") {
tempStr <- paste0("MA-Plot highlighted genes are significant at FDR ",
"of ", pValReq," and exhibit log2-fold-change of at ",
"least ", lfcReq, ".")
ListItem(tempStr)
} else if (pAdjOpt=="holm") {
tempStr <- paste0("MA-Plot highlighted genes are significant at adjusted ",
"p-value of ", pValReq," by the Holm(1979) ",
"method, and exhibit log2-fold-change of at least ",
lfcReq, ".")
ListItem(tempStr)
}
} else {
tempStr <- paste0("MA-Plot highlighted genes are significant at p-value ",
"of ", pValReq," and exhibit log2-fold-change of at ",
"least ", lfcReq, ".")
ListItem(tempStr)
}
cata("</ul>\n")
cata("<h4>Summary of experimental data:</h4>\n")
cata("<p>*CHECK THAT SAMPLES ARE ASSOCIATED WITH CORRECT GROUP(S)*</p>\n")
cata("<table border=\"1\" cellpadding=\"3\">\n")
cata("<tr>\n")
TableHeadItem("SampleID")
TableHeadItem(names(factors)[1]," (Primary Factor)")
if (ncol(factors) > 1) {
for (i in names(factors)[2:length(names(factors))]) {
TableHeadItem(i)
}
cata("</tr>\n")
}
for (i in 1:nrow(factors)) {
cata("<tr>\n")
TableHeadItem(row.names(factors)[i])
for (j in 1:ncol(factors)) {
TableItem(as.character(unmake.names(factors[i, j])))
}
cata("</tr>\n")
}
cata("</table>")
cit <- character()
link <- character()
link[1] <- paste0("<a href=\"",
"http://www.bioconductor.org/packages/release/bioc/",
"vignettes/limma/inst/doc/usersguide.pdf",
"\">", "limma User's Guide", "</a>.")
link[2] <- paste0("<a href=\"",
"http://www.bioconductor.org/packages/release/bioc/",
"vignettes/edgeR/inst/doc/edgeRUsersGuide.pdf",
"\">", "edgeR User's Guide", "</a>")
cit[1] <- paste("Please cite the following paper for this tool:")
cit[2] <- paste("Liu R, Holik AZ, Su S, Jansz N, Chen K, Leong HS, Blewitt ME,",
"Asselin-Labat ML, Smyth GK, Ritchie ME (2015). Why weight? ",
"Modelling sample and observational level variability improves power ",
"in RNA-seq analyses. Nucleic Acids Research, 43(15), e97.")
cit[3] <- paste("Please cite the paper below for the limma software itself.",
"Please also try to cite the appropriate methodology articles",
"that describe the statistical methods implemented in limma,",
"depending on which limma functions you are using. The",
"methodology articles are listed in Section 2.1 of the",
link[1],
"Cite no. 3 only if sample weights were used.")
cit[4] <- paste("Smyth GK (2005). Limma: linear models for microarray data.",
"In: 'Bioinformatics and Computational Biology Solutions using",
"R and Bioconductor'. R. Gentleman, V. Carey, S. doit,.",
"Irizarry, W. Huber (eds), Springer, New York, pages 397-420.")
cit[5] <- paste("Please cite the first paper for the software itself and the",
"other papers for the various original statistical methods",
"implemented in edgeR. See Section 1.2 in the", link[2],
"for more detail.")
cit[6] <- paste("Robinson MD, McCarthy DJ and Smyth GK (2010). edgeR: a",
"Bioconductor package for differential expression analysis",
"of digital gene expression data. Bioinformatics 26, 139-140")
cit[7] <- paste("Robinson MD and Smyth GK (2007). Moderated statistical tests",
"for assessing differences in tag abundance. Bioinformatics",
"23, 2881-2887")
cit[8] <- paste("Robinson MD and Smyth GK (2008). Small-sample estimation of",
"negative binomial dispersion, with applications to SAGE data.",
"Biostatistics, 9, 321-332")
cit[9] <- paste("McCarthy DJ, Chen Y and Smyth GK (2012). Differential",
"expression analysis of multifactor RNA-Seq experiments with",
"respect to biological variation. Nucleic Acids Research 40,",
"4288-4297")
cit[10] <- paste("Law CW, Chen Y, Shi W, and Smyth GK (2014). Voom:",
"precision weights unlock linear model analysis tools for",
"RNA-seq read counts. Genome Biology 15, R29.")
cit[11] <- paste("Ritchie ME, Diyagama D, Neilson J, van Laar R,",
"Dobrovic A, Holloway A and Smyth GK (2006).",
"Empirical array quality weights for microarray data.",
"BMC Bioinformatics 7, Article 261.")
cata("<h3>Citations</h3>\n")
cata(cit[1], "\n")
cata("<br>\n")
cata(cit[2], "\n")
cata("<h4>limma</h4>\n")
cata(cit[3], "\n")
cata("<ol>\n")
ListItem(cit[4])
ListItem(cit[10])
ListItem(cit[11])
cata("</ol>\n")
cata("<h4>edgeR</h4>\n")
cata(cit[5], "\n")
cata("<ol>\n")
ListItem(cit[6])
ListItem(cit[7])
ListItem(cit[8])
ListItem(cit[9])
cata("</ol>\n")
cata("<p>Please report problems or suggestions to: [email protected]</p>\n")
for (i in 1:nrow(linkData)) {
if (grepl("session_info", linkData$Link[i])) {
HtmlLink(linkData$Link[i], linkData$Label[i])
}
}
cata("<table border=\"0\">\n")
cata("<tr>\n")
TableItem("Task started at:"); TableItem(timeStart)
cata("</tr>\n")
cata("<tr>\n")
TableItem("Task ended at:"); TableItem(timeEnd)
cata("</tr>\n")
cata("<tr>\n")
TableItem("Task run time:"); TableItem(timeTaken)
cata("<tr>\n")
cata("</table>\n")
cata("</body>\n")
cata("</html>")
| 23,219 | mit |
e80d8b85766c2a2bef500f8de3a1cc4987a3f9c5 | sgagnon/runQuestionnaires | scripts/tools/analysis.R | working_dir = "~/Experiments/AssocMem/data/"
setwd(working_dir)
d0=read.csv("Source_HitRate_n=9.csv")
summary(d0)
with(d0, interaction.plot(ShockCond, TrialType, Source_HitRate))
fit <- aov(Source_HitRate~(ShockCond*TrialType)+Error(Subid/(ShockCond*TrialType)),
data=d0)
summary(fit)
library(ggplot2)
p <- ggplot(d0, aes(factor(ShockCond), Source_HitRate))
p + geom_boxplot(aes(fill=factor(TrialType))) +
geom_jitter(aes(colour = TrialType))
#########################################
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
require(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This is does the summary; it's not easy to understand...
datac <- ddply(data, groupvars, .drop=.drop,
.fun= function(xx, col, na.rm) {
c( N = length2(xx[,col], na.rm=na.rm),
mean = mean (xx[,col], na.rm=na.rm),
sd = sd (xx[,col], na.rm=na.rm)
)
},
measurevar,
na.rm
)
# Rename the "mean" column
datac <- rename(datac, c("mean"=measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
normDataWithin <- function(data=NULL, idvar, measurevar, betweenvars=NULL,
na.rm=FALSE, .drop=TRUE) {
require(plyr)
# Measure var on left, idvar + between vars on right of formula.
data.subjMean <- ddply(data, c(idvar, betweenvars), .drop=.drop,
.fun = function(xx, col, na.rm) {
c(subjMean = mean(xx[,col], na.rm=na.rm))
},
measurevar,
na.rm
)
# Put the subject means with original data
data <- merge(data, data.subjMean)
# Get the normalized data in a new column
measureNormedVar <- paste(measurevar, "_norm", sep="")
data[,measureNormedVar] <- data[,measurevar] - data[,"subjMean"] +
mean(data[,measurevar], na.rm=na.rm)
# Remove this subject mean column
data$subjMean <- NULL
return(data)
}
summarySEwithin <- function(data=NULL, measurevar, betweenvars=NULL, withinvars=NULL,
idvar=NULL, na.rm=FALSE, conf.interval=.95, .drop=TRUE) {
# Ensure that the betweenvars and withinvars are factors
factorvars <- vapply(data[, c(betweenvars, withinvars), drop=FALSE],
FUN=is.factor, FUN.VALUE=logical(1))
if (!all(factorvars)) {
nonfactorvars <- names(factorvars)[!factorvars]
message("Automatically converting the following non-factors to factors: ",
paste(nonfactorvars, collapse = ", "))
data[nonfactorvars] <- lapply(data[nonfactorvars], factor)
}
# Get the means from the un-normed data
datac <- summarySE(data, measurevar, groupvars=c(betweenvars, withinvars),
na.rm=na.rm, conf.interval=conf.interval, .drop=.drop)
# Drop all the unused columns (these will be calculated with normed data)
datac$sd <- NULL
datac$se <- NULL
datac$ci <- NULL
# Norm each subject's data
ndata <- normDataWithin(data, idvar, measurevar, betweenvars, na.rm, .drop=.drop)
# This is the name of the new column
measurevar_n <- paste(measurevar, "_norm", sep="")
# Collapse the normed data - now we can treat between and within vars the same
ndatac <- summarySE(ndata, measurevar_n, groupvars=c(betweenvars, withinvars),
na.rm=na.rm, conf.interval=conf.interval, .drop=.drop)
# Apply correction from Morey (2008) to the standard error and confidence interval
# Get the product of the number of conditions of within-S variables
nWithinGroups <- prod(vapply(ndatac[,withinvars, drop=FALSE], FUN=nlevels,
FUN.VALUE=numeric(1)))
correctionFactor <- sqrt( nWithinGroups / (nWithinGroups-1) )
# Apply the correction factor
ndatac$sd <- ndatac$sd * correctionFactor
ndatac$se <- ndatac$se * correctionFactor
ndatac$ci <- ndatac$ci * correctionFactor
# Combine the un-normed means with the normed results
merge(datac, ndatac)
}
#########################################
d0c <- summarySEwithin(d0, measurevar="Source_HitRate", withinvars=c("ShockCond","TrialType"), idvar="Subid")
ggplot(d0c, aes(x=factor(TrialType), y=Source_HitRate, fill=ShockCond)) +
geom_bar(position=position_dodge(), stat="identity",
colour="black", # Use black outlines,
size=1) + # Thinner lines
geom_errorbar(aes(ymin=Source_HitRate-se, ymax=Source_HitRate+se),
size=.8, # Thinner lines
width=.25,
position=position_dodge(.9)) +
xlab("Trial Type") +
ylab("Source Hit Rate") +
scale_fill_hue(name="Shock Condition", # Legend label, use darker colors
breaks=c("Safe","Threat"),
labels=c("Safe", "Threat")) +
ggtitle("The Effect of Shock Condition and on\nTrial Type on Source Hit Rates") +
scale_fill_manual(values=c("#CCCCCC","#FFFFFF")) #+
#scale_fill_brewer(palette="Set1") +
#scale_fill_hue(c=90, l=80)
obj_safe_hit <- subset(d0, ShockCond=="Safe" & TrialType=="Object",
select=Source_HitRate)
place_safe_hit <- subset(d0, ShockCond=="Safe" & TrialType=="Place",
select=Source_HitRate)
obj_threat_hit <- subset(d0, ShockCond=="Threat" & TrialType=="Object",
select=Source_HitRate)
place_threat_hit <- subset(d0, ShockCond=="Threat" & TrialType=="Place",
select=Source_HitRate)
data <- data.frame(obj_safe_hit, place_safe_hit, obj_threat_hit, place_threat_hit)
ctab <- cor(data)
library(ellipse)
plotcorr(ctab)
colorfun <- colorRamp(c("#CC0000","white","#3366CC"), space="Lab")
plotcorr(ctab, col=rgb(colorfun((ctab+1)/2), maxColorValue=255))
ord <- order(corr.mtcars[1,])
xc <- corr.mtcars[ord, ord]
colors <- c("#A50F15","#DE2D26","#FB6A4A","#FCAE91","#FEE5D9","white",
"#EFF3FF","#BDD7E7","#6BAED6","#3182BD","#08519C")
plotcorr(xc, col=colors[5*xc + 6])
| 6,618 | bsd-3-clause |
af75da5ecc7411ff80c4402698c6ac4d6d5d4963 | gavinsimpson/analogue | R/crossval.wa.R | ## crossval method for wa()
`crossval.wa` <- function(obj, method = c("LOO","kfold","bootstrap"),
nboot = 100, nfold = 10, folds = 5,
verbose = getOption("verbose"), ...) {
method <- match.arg(method)
X <- obj$orig.x
ENV <- obj$orig.env
N <- NROW(X)
M <- NCOL(X)
tolOpts <- obj$options.tol
Dtype <- obj$deshrink
if(identical(method, "LOO")) {
pred <- numeric(N)
nr <- N-1 ## number of rows - 1 for LOO
if(verbose) {
writeLines("\n LOO Cross-validation:")
pb <- txtProgressBar(min = 0, max = nr, style = 3)
on.exit(close(pb))
on.exit(cat("\n"), add = TRUE)
}
for(i in seq_along(pred)) {
if(verbose)
setTxtProgressBar(pb, i)
opt <- w.avg(X[-i, ], ENV[-i])
if(obj$tol.dw)
pred[i] <- predWAT(X, ENV, i, opt, tolOpts, nr, M,
Dtype)
else
pred[i] <- predWA(X, ENV, i, opt, Dtype)
}
}
if(identical(method, "kfold")) {
oob.pred <- matrix(NA, ncol = folds, nrow = N)
if(verbose) {
writeLines("\n n k-fold Cross-validation:")
pb <- txtProgressBar(min = 0, max = folds, style = 3)
on.exit(close(pb))
on.exit(cat("\n"), add = TRUE)
}
ind <- as.integer(rep(seq_len(nfold), length = N)) ## k-fold group indicator
## n k-fold s
for(i in seq_len(folds)) {
if(verbose)
setTxtProgressBar(pb, i)
## do a k-fold CV
pind <- ind[sample.int(N, N, replace = FALSE)] ## sure this should be replace = FALSE
for(k in as.integer(seq_len(nfold))) {
sel <- pind == k ## sel is samples in leave out group
N.oob <- sum(sel) ## N in leave out group
N.mod <- sum(!sel) ## N in the model
sel <- which(sel) ## convert to indices
opt <- w.avg(X[-sel, , drop = FALSE], ENV[-sel])
if(obj$tol.dw) {
oob.pred[sel, i] <- predWAT(X, ENV, sel, opt, tolOpts,
N.mod, M, Dtype)
} else {
oob.pred[sel, i] <- predWA(X, ENV, sel, opt, Dtype)
}
}
}
pred <- rowMeans(oob.pred, na.rm = TRUE)
}
if(identical(method, "bootstrap")) {
oob.pred <- matrix(NA, ncol = nboot, nrow = N)
if(verbose) {
writeLines("\n Bootstrap Cross-validation:")
pb <- txtProgressBar(min = 0, max = nboot, style = 3)
on.exit(close(pb))
on.exit(cat("\n"), add = TRUE)
}
ind <- seq_len(N) ## indicator for samples
for(i in seq_len(nboot)) {
if(verbose)
setTxtProgressBar(pb, i)
bSamp <- sample.int(N, N, replace = TRUE)
sel <- which(!ind %in% bSamp) ## need indices!!!
N.oob <- NROW(X[sel, , drop = FALSE])
N.mod <- N - N.oob
opt <- w.avg(X[-sel, , drop = FALSE], ENV[-sel])
if(obj$tol.dw)
oob.pred[sel, i] <- predWAT(X, ENV, sel, opt, tolOpts,
N.mod, M, Dtype)
else
oob.pred[sel, i] <- predWA(X, ENV, sel, opt, Dtype)
}
pred <- rowMeans(oob.pred, na.rm = TRUE)
}
resid <- ENV - pred
out <- list(fitted.values = pred, residuals = resid)
performance <- data.frame(R2 = cor(pred, ENV)^2,
avgBias = mean(resid),
maxBias = unname(maxBias(resid, ENV)),
RMSEP = sqrt(mean(resid^2)),
RMSEP2 = NA,
s1 = NA,
s2 = NA)
if(identical(method, "bootstrap") ||
(identical(method, "kfold") && folds > 1)) {
performance$s1 <- sqrt(mean(apply(oob.pred, 1, sd, na.rm = TRUE)^2))
performance$s2 <- sqrt(mean(resid^2))
performance$RMSEP2 <- sqrt(performance$s1^2 + performance$s2^2)
}
out$performance <- performance
.call <- match.call()
.call[[1]] <- as.name("crossval")
out$call <- .call
out$CVparams <- list(method = method, nboot = nboot, nfold = nfold,
folds = folds)
class(out) <- "crossval"
out
}
`predWAT` <- function(X, ENV, i, optima, tolOpts, nr, nc,
deSh) {
tol <- w.tol(X[-i, ], ENV[-i], optima, tolOpts$useN2)
tol <- fixUpTol(tol, tolOpts$na.tol, tolOpts$small.tol,
tolOpts$min.tol, tolOpts$f, ENV[-i])
wa.env <- WATpred(X[-i, ], optima, tol, nr, nc)
p <- WATpred(X[i, , drop = FALSE], optima, tol, 1, nc)
deMod <- deshrink(ENV[-i], wa.env, deSh)
deshrinkPred(p, coef(deMod), deSh)
}
`predWA` <- function(X, ENV, i, optima, deSh) {
wa.env <- WApred(X[-i, ], optima)
p <- WApred(X[i, , drop = FALSE], optima)
deMod <- deshrink(ENV[-i], wa.env, deSh)
deshrinkPred(p, coef(deMod), deSh)
}
| 5,200 | gpl-2.0 |
700f8bffc889de7467003d8554e1dd3c47c1de3e | mjharrigan/flux | src/etoh.R | #!/usr/bin/env Rscript
################################################################################
# etoh.R
#
# USAGE: Rscript etoh.R --args cfg = file.cfg
#
################################################################################
# Estimation procedure for heavy alcohol user (ETOH) initiation, remittance, and
# relapse parameters.
#
# DATA SOURCE: National Health Interview Survey (NHIS) 2012 Data
#
################################################################################
# Initiation probability is dependent on age. No initiation probabilities are
# assumed.
#
# Remittance probability found to be 58% w/in first 36 months of initiation
# (from literature). This converts to a monthly probability of 0.024. This
# is for informational purposes only. No remittance value is assumed for
# the simulation.
#
# Relapse probability found to be 46% w/in first 12 months of remittance
# (from literature). This converts to a monthly probability of 0.050. This
# is for informational purposes only. No Relapse value is assumed for
# the simulation.
################################################################################
# A quasi Newtonian optimization method with box constraints is used to find
# the parameters to minimize the objective function. The objective function
# is a least squares of the observed and estimated prevalences of ETOH and
# Former ETOH.
################################################################################
# LOAD PACKAGES AND OPTIONS
################################################################################
options(scipen = 10)
require(common)
################################################################################
# FUNCTION DEFINITIONS
################################################################################
LifeTable <- function(sqliteFile) {
require(RSQLite)
driver <- dbDriver("SQLite")
con <- dbConnect(driver, dbname = sqliteFile)
qry <- dbSendQuery(con, "SELECT * FROM life_table")
res <- fetch(qry, n = -1)
dbHasCompleted(qry)
dbClearResult(qry)
dbDisconnect(con)
dataGen <- split(res, res$gender)
# assume 51% female and 49% male
prob <- (dataGen$female[, 2]*0.51 + dataGen$male[, 2]*0.49)
data <- data.frame(age_months = dataGen$male$age_months, prob = prob)
return(data)
}
SMR <- function(sqliteFile) {
require(RSQLite)
driver <- dbDriver("SQLite")
con <- dbConnect(driver, dbname = sqliteFile)
qry <- dbSendQuery(con, "SELECT * FROM smr_by_gender_risk")
res <- fetch(qry, n = -1)
dbHasCompleted(qry)
dbClearResult(qry)
dbDisconnect(con)
return(res)
}
ProbToRate <- function(prob) { return(-log(1 - prob)) }
RateToProb <- function(rate) { return(1 - (1/exp(rate))) }
FormatPlotData <- function(simCnts, binnedCnts) {
smoothData <- SmoothBinnedData(binnedCnts)
interval <- findInterval(simCnts$age, seq(18, 90, by = 2))
binnedSimCnts <- do.call(rbind, lapply(split(simCnts[, 2:6], interval),
function(x) unlist(lapply(x, mean))))
binnedSimCnts <- cbind(ages = names(Ages(binnedCnts$ages)),
as.data.frame(binnedSimCnts))
names(binnedSimCnts)[names(binnedSimCnts) == "former"] = "former.etoh"
indx <- match(names(binnedCnts), names(binnedSimCnts))
binnedSimCnts <- binnedSimCnts[, indx]
data <- rbind(cbind(binnedCnts, source = "NHIS Data"),
cbind(smoothData, source = "NHIS Data Smoothed"),
cbind(binnedSimCnts, source = "Model Output"))
return(data)
}
ProbPlot <- function(data, plotLab) {
require(ggplot2)
plot <- ggplot(data = data, aes(x = x, y = y))
plot <- plot + geom_line() + xlab(plotLab$x) + ylab(plotLab$y)
plot <- plot + ggtitle(plotLab$title)
}
ETOHPlot <- function(data) {
require(ggplot2)
etohPlot <- ggplot(data = data, aes(x = ages,
group = source,
colour = source,
linetype = source,
shape = source))
etohPlot <- etohPlot + geom_point(aes(y = 100 * etoh / total))
valPnt <- c("NHIS Data" = 19, "NHIS Data Smoothed" = NA, "Model Output" = NA)
etohPlot <- etohPlot + scale_shape_manual(values = valPnt)
etohPlot <- etohPlot + geom_line(aes(y = 100 * etoh / total))
valLine <- c("NHIS Data" = 0, "NHIS Data Smoothed" = 2, "Model Output" = 1)
etohPlot <- etohPlot + scale_linetype_manual(values = valLine)
xAxisSeq <- seq(1, length(ages), 4)
etohPlot <- etohPlot + scale_x_discrete(breaks = ages[xAxisSeq])
etohPlot <- etohPlot + ggtitle("ETOH") + xlab("Age")
etohPlot <- etohPlot + ylim(0, 20) + ylab("Percent")
return(etohPlot)
}
FormerPlot <- function(data) {
require(ggplot2)
formerPlot <- ggplot(data = data, aes(x = ages,
group = source,
colour = source,
linetype = source,
shape = source))
formerPlot <- formerPlot + geom_point(aes(y = 100 * former.etoh / total))
valPnt <- c("NHIS Data" = 19, "NHIS Data Smoothed" = NA, "Model Output" = NA)
formerPlot <- formerPlot + scale_shape_manual(values = valPnt)
formerPlot <- formerPlot + geom_line(aes(y = 100 * former.etoh / total))
valLine <- c("NHIS Data" = 0, "NHIS Data Smoothed" = 2, "Model Output" = 1)
formerPlot <- formerPlot + scale_linetype_manual(values = valLine)
xAxisSeq <- seq(1, length(ages), 4)
formerPlot <- formerPlot + scale_x_discrete(breaks = ages[xAxisSeq])
formerPlot <- formerPlot + ggtitle("Former ETOH") + xlab("Age")
formerPlot <- formerPlot + ylim(0, 20) + ylab("Percent")
return(formerPlot)
}
LifeTableBinned <- function(data) {
data <- data[data$age_months < 90*12, ]
intervals <- findInterval(data$age_months / 12, seq(0, 1200, by = 2))
binnedData <- split(data, intervals)
groupedProb <- unlist(lapply(binnedData, function(x) mean(x$prob)))
ageBin <- unlist(lapply(split(data$age_months/12, intervals),
function(x) paste(x[1], x[2], sep = "-")))
data <- data.frame(ages = ageBin, mortality = groupedProb)
return(data)
}
SliceFn <- function(cnt) { return(cnt[, names(cnt) != "age"]) }
AgeFormat <- function(vec) {
return((12 * as.integer(vec[1])):(12 * (as.integer(vec[2]) + 1) - 1))
}
Ages <- function(binnedAges) {
ages <- lapply(strsplit(as.character(binnedAges), "-", fixed = "T"),
AgeFormat)
names(ages) <- as.character(binnedAges)
return(ages)
}
SmoothBinnedData <- function(binnedCnts) {
smoothETOH <- loess.smooth(x = 1:34,
y = 100 * binnedCnts$etoh / binnedCnts$total,
span = 0.15,
evaluation = 34)
smoothFormer <- loess.smooth(x = 1:34,
y = 100 * binnedCnts$former.etoh /
binnedCnts$total,
span = 0.2,
evaluation = 34)
numETOH <- smoothETOH$y * binnedCnts$total / 100
numFormer <- smoothFormer$y * binnedCnts$total / 100
numNone <- binnedCnts$total - numETOH - numFormer
binnedCnts$etoh <- numETOH
binnedCnts$former.etoh <- numFormer
binnedCnts$none <- numNone
return(binnedCnts)
}
ETOH <- function(age, binnedCnts) {
indx <- which(age == binnedCnts$ages)
if (length(indx) == 0)
stop("Out of age range")
binnedCnts <- SmoothBinnedData(binnedCnts)
return(100 * binnedCnts$etoh[indx] / binnedCnts$total[indx])
}
FormerETOH <- function(age, binnedCnts) {
indx <- which(age == binnedCnts$ages)
if (length(indx) == 0)
stop("Out of age range")
binnedCnts <- SmoothBinnedData(binnedCnts)
return(100 * binnedCnts$former.etoh[indx] / binnedCnts$total[indx])
}
StateTransitions <- function(ages, person, transProb, smr)
{
counts <- data.frame(age = ages, none = 0, etoh = 0, former = 0, dead = 0)
set <- c(0, 1)
for (age in ages) {
bool <- age == counts$age
indx <- which(transProb$age == age)
init <- transProb$init[indx]
remit <- transProb$remit[indx]
relapse <- transProb$relapse[indx]
mortality <- transProb$mortality[indx]
switch(person,
none = {
counts$none[bool] <- counts$none[bool] + 1
prob <- c(1 - mortality, mortality)
draw <- sample(set, size = 1, prob = prob)
if (draw == 0) {
draw <- sample(set, size = 1, prob = c(1 - init, init))
if (draw == 0) {
person <- "none"
} else {
person <- "etoh"
}
} else {
person = "dead"
}
}
, etoh = {
counts$etoh[bool] <- counts$etoh[bool] + 1
mortProb <- smr$ETOH * mortality
prob <- c(1 - mortProb, mortProb)
draw <- sample(set, size = 1, prob = prob)
if (draw == 0) {
draw <- sample(set, size = 1, prob = c(1 - remit, remit))
if (draw == 0) {
person <- "etoh"
} else {
person <- "former"
}
} else {
person <- "dead"
}
}
, former = {
counts$former[bool] <- counts$former[bool] + 1
mortProb <- smr$"Former ETOH" * mortality
prob <- c(1 - mortProb, mortProb)
draw <- sample(set, size = 1, prob = prob)
if (draw == 0) {
draw <- sample(set, size = 1, prob = c(1 - relapse, relapse))
if (draw == 0) {
person <- "former"
} else {
person <- "etoh"
}
} else {
person <- "dead"
}
}
, dead = {
counts$dead[bool] <- counts$dead[bool] + 1
person <- "dead"
})
}
return(counts)
}
RunPerson <- function(person, ages, transProb, smr) {
counts <- StateTransitions(ages, person, transProb, smr)
return(counts)
}
objective <- function(par, initPopulation, ages, lifeTbl, seed) {
init <- par[1]
remit <- par[2]
relapse <- par[3]
prob <- list(init = init, remit = remit, relapse = relapse)
newPop <- PopulationGeneration(initPopulation, lifeTbl, prob)
newETOH <- newPop$etoh
newFormer <- newPop$former
nTotalSurvive <- sum(unlist(newPop))
percent <- list(etoh = 100 * newETOH / nTotalSurvive,
former = 100 * newFormer / nTotalSurvive)
min <- sum((percent$etoh - ETOH(ages, binnedCnts))^2,
(percent$former - FormerETOH(ages, binnedCnts))^2)
return(min)
}
PopulationGeneration <- function(population, lifeTbl, transProb) {
smr <- lifeTbl$smr
mortality <- lifeTbl$mortality
init <- transProb$init
remit <- transProb$remit
relapse <- transProb$relapse
nNone <- population$none
nETOH <- population$etoh
nFormer <- population$former
nNoneSurvive <- nNone * (1 - mortality)
nETOHSurvive <- nETOH * (1 - (mortality*smr$ETOH))
nFormerSurvive <- nFormer * (1 - (mortality*smr$"Former ETOH"))
nTotalSurvive <- nNoneSurvive + nETOHSurvive + nFormerSurvive
nInit <- init * nNoneSurvive
nRelapse <- relapse * nFormerSurvive
nRemit <- remit * nETOHSurvive
newETOH <- nETOHSurvive + nInit - nRemit + nRelapse
newFormer <- nFormerSurvive + nRemit - nRelapse
newNone <- nTotalSurvive - newFormer - newETOH
return(list(none = newNone, etoh = newETOH, former = newFormer))
}
################################################################################
# MAIN ENTRY POINT
#
################################################################################
# get arguments and store config file location
################################################################################
usage <- 'USAGE: Rscript etoh.R --args cfg = file.cfg'
args <- ParseArgs(commandArgs(trailingOnly = T))
if (is.null(args$cfg))
print(usage)
cfg <- ReadCFG(args$cfg)
################################################################################
# get input from config file
################################################################################
initPercentETOH <- cfg$InitialPercentETOH
initPercentFormer <- cfg$InitialPercentFormerETOH
nPpl <- cfg$NumberofPeople
seed <- cfg$RandomSeed
NHISInputData <- cfg$NHISInputData
sqlFile <- cfg$SQLiteDBFile
probOutput <- cfg$FittedProbabilityOutput
figureOutput <- cfg$Figures
################################################################################
# load input dataset
################################################################################
data <- SafeLoad(NHISInputData)
binnedCnts <- data$binnedCnts
rm(data)
################################################################################
# get life table (mortality) data
################################################################################
lifeTbl <- LifeTableBinned(LifeTable(sqlFile))
smr <- SMR(sqlFile)
smr <- lapply(split(smr$smr, smr$risk), mean)
################################################################################
# calculate the starting population numbers for the optimization procedure
# and the simulation
################################################################################
initETOH <- nPpl * initPercentETOH / 100
initFormer <- nPpl * initPercentFormer / 100
initNone <- nPpl - initETOH - initFormer
initPopulation <- list(none = initNone, etoh = initETOH, former = initFormer)
initSimPopulation <- c(rep("none", round(initNone)),
rep("etoh", round(initETOH)),
rep("former", round(initFormer)))
# get the binned age groups
ages <- binnedCnts$ages
# initial transition probability storage structure
prob <- data.frame(age = unlist(Ages(ages)),
init = as.numeric(NA),
remit = as.numeric(NA),
relapse = as.numeric(NA),
mortality = as.numeric(NA))
# initiate parameters
init <- 0.02
remit <- 0.3
relapse <- 0.05
# set the random number generator seed
set.seed(seed)
for (i in 1:length(ages)) {
print(paste("Run", i))
# get monthly mortality
monthlyMort <- lifeTbl$mortality[as.character(ages[i]) == lifeTbl$ages]
simAge <- AgeFormat(unlist(strsplit(as.character(ages[i]), "-", fixed = T)))
# calculate to mortality for the time step
mort <- RateToProb(ProbToRate(monthlyMort) * length(simAge))
personMort <- list(mortality = mort, smr = smr)
##############################################################################
# optimization procedure
##############################################################################
# initial parameters
par <- c(init, remit, relapse)
lower <- c(0.001, 0, 0.05)
upper <- c(0.2, 0.8, 0.8)
scale <- c(init, remit, relapse)
control <- list(parscale = scale)
res <- optim(par, fn = objective, initPopulation = initPopulation,
seed = seed, ages = ages[i], lifeTbl = personMort,
method = "L-BFGS-B", lower = lower, upper = upper,
control = control)
if (res$convergence != 0)
print(paste(res$message, "on run:", i))
# update initial paramters
init <- res$par[1]
remit <- res$par[2]
relapse <- res$par[3]
# calculate the starting population for next time step
transProb <- list(init = init, remit = remit, relapse = relapse)
initPopulation <- PopulationGeneration(initPopulation, personMort, transProb)
##############################################################################
# calculate and store monthly probabilities
##############################################################################
scaledInit <- RateToProb(ProbToRate(init) / length(simAge))
scaledRemit <- RateToProb(ProbToRate(remit) / length(simAge))
scaledRelapse <- RateToProb(ProbToRate(relapse) / length(simAge))
prob$init[simAge == prob$age] <- scaledInit
prob$remit[simAge == prob$age] <- scaledRemit
prob$relapse[simAge == prob$age] <- scaledRelapse
prob$mortality[simAge == prob$age] <- monthlyMort
}
################################################################################
# calculate smoothed proabilities
################################################################################
initProb <- loess.smooth(prob$age, prob$init, span = 0.2,
evaluation = length(prob$age))
remitProb <- loess.smooth(prob$age, prob$remit, span = 0.2,
evaluation = length(prob$age))
relapseProb <- loess.smooth(prob$age, prob$relapse, span = 0.2,
evaluation = length(prob$age))
################################################################################
# output smoothed probabilities to csv
################################################################################
prob <- data.frame(age = prob$age / 12,
init = initProb$y,
remit = remitProb$y,
relapse = relapseProb$y,
mortality = prob$mortality)
write.csv(prob, file = probOutput)
################################################################################
# plot smoothed probabilities
################################################################################
pdf(figureOutput)
plotLab <- list(x = "Age",
y = "Probability",
title = "Monthly Initiation")
print(ProbPlot(as.data.frame(initProb), plotLab))
plotLab <- list(x = "Age",
y = "Probability",
title = "Monthly Remittance")
print(ProbPlot(as.data.frame(remitProb), plotLab))
plotLab <- list(x = "Age",
y = "Probability",
title = "Monthly Relapse")
print(ProbPlot(as.data.frame(relapseProb), plotLab))
################################################################################
# use smoothed probabilities to run full cohort
################################################################################
simCnts <- lapply(initSimPopulation, RunPerson, ages = prob$age,
transProb = prob, smr = smr)
simCnts <- data.frame(age = prob$age, Reduce("+", lapply(simCnts, SliceFn)))
total <- simCnts$none + simCnts$etoh + simCnts$former
simCnts <- cbind(simCnts, total = total)
################################################################################
# plot results from full cohort run
################################################################################
data <- FormatPlotData(simCnts, binnedCnts)
print(ETOHPlot(data))
print(FormerPlot(data))
dev.off()
| 18,969 | mit |
2262140e6ed2cae58984943da628cb1cfab45906 | kmillar/cxxr | src/extra/testr/filtered-test-suite/attr/tc_attr_35.R | expected <- eval(parse(text="NULL"));
test(id=0, code={
argv <- eval(parse(text="list(structure(c(8.85169533448293e-238, 2.77884205079773e-237, 8.5330427463164e-242, 7.89244209468013e-215, 6.74732964729372e-231, 1.30818670504849e-217, 1.39113376416096e-208, 1.35683278955814e-215, 7.74002099666521e-219, 3.64254537730231e-220, 6.75916981442421e-296, 0), .Dim = c(1L, 12L), .Dimnames = list(NULL, c(\"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\"))), \"class\")"));
do.call(`attr`, argv);
}, o=expected);
| 635 | gpl-2.0 |
2262140e6ed2cae58984943da628cb1cfab45906 | kmillar/rho | src/extra/testr/filtered-test-suite/attr/tc_attr_35.R | expected <- eval(parse(text="NULL"));
test(id=0, code={
argv <- eval(parse(text="list(structure(c(8.85169533448293e-238, 2.77884205079773e-237, 8.5330427463164e-242, 7.89244209468013e-215, 6.74732964729372e-231, 1.30818670504849e-217, 1.39113376416096e-208, 1.35683278955814e-215, 7.74002099666521e-219, 3.64254537730231e-220, 6.75916981442421e-296, 0), .Dim = c(1L, 12L), .Dimnames = list(NULL, c(\"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\"))), \"class\")"));
do.call(`attr`, argv);
}, o=expected);
| 635 | gpl-2.0 |
2262140e6ed2cae58984943da628cb1cfab45906 | rho-devel/rho | src/extra/testr/filtered-test-suite/attr/tc_attr_35.R | expected <- eval(parse(text="NULL"));
test(id=0, code={
argv <- eval(parse(text="list(structure(c(8.85169533448293e-238, 2.77884205079773e-237, 8.5330427463164e-242, 7.89244209468013e-215, 6.74732964729372e-231, 1.30818670504849e-217, 1.39113376416096e-208, 1.35683278955814e-215, 7.74002099666521e-219, 3.64254537730231e-220, 6.75916981442421e-296, 0), .Dim = c(1L, 12L), .Dimnames = list(NULL, c(\"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\"))), \"class\")"));
do.call(`attr`, argv);
}, o=expected);
| 635 | gpl-2.0 |
2262140e6ed2cae58984943da628cb1cfab45906 | ArunChauhan/cxxr | src/extra/testr/filtered-test-suite/attr/tc_attr_35.R | expected <- eval(parse(text="NULL"));
test(id=0, code={
argv <- eval(parse(text="list(structure(c(8.85169533448293e-238, 2.77884205079773e-237, 8.5330427463164e-242, 7.89244209468013e-215, 6.74732964729372e-231, 1.30818670504849e-217, 1.39113376416096e-208, 1.35683278955814e-215, 7.74002099666521e-219, 3.64254537730231e-220, 6.75916981442421e-296, 0), .Dim = c(1L, 12L), .Dimnames = list(NULL, c(\"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\"))), \"class\")"));
do.call(`attr`, argv);
}, o=expected);
| 635 | gpl-2.0 |
2262140e6ed2cae58984943da628cb1cfab45906 | cxxr-devel/cxxr | src/extra/testr/filtered-test-suite/attr/tc_attr_35.R | expected <- eval(parse(text="NULL"));
test(id=0, code={
argv <- eval(parse(text="list(structure(c(8.85169533448293e-238, 2.77884205079773e-237, 8.5330427463164e-242, 7.89244209468013e-215, 6.74732964729372e-231, 1.30818670504849e-217, 1.39113376416096e-208, 1.35683278955814e-215, 7.74002099666521e-219, 3.64254537730231e-220, 6.75916981442421e-296, 0), .Dim = c(1L, 12L), .Dimnames = list(NULL, c(\"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\"))), \"class\")"));
do.call(`attr`, argv);
}, o=expected);
| 635 | gpl-2.0 |
2262140e6ed2cae58984943da628cb1cfab45906 | krlmlr/cxxr | src/extra/testr/filtered-test-suite/attr/tc_attr_35.R | expected <- eval(parse(text="NULL"));
test(id=0, code={
argv <- eval(parse(text="list(structure(c(8.85169533448293e-238, 2.77884205079773e-237, 8.5330427463164e-242, 7.89244209468013e-215, 6.74732964729372e-231, 1.30818670504849e-217, 1.39113376416096e-208, 1.35683278955814e-215, 7.74002099666521e-219, 3.64254537730231e-220, 6.75916981442421e-296, 0), .Dim = c(1L, 12L), .Dimnames = list(NULL, c(\"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\"))), \"class\")"));
do.call(`attr`, argv);
}, o=expected);
| 635 | gpl-2.0 |
092d23c63fb20c7f36370fc045f4669c017cb7a4 | dvera/gyro | R/bedtoolsCoverage.R | #' Calculate coverage of a set of bed files over intervals in a single bed file.
#'
#' \code{bedtoolsCoverage} is a wrapper for \code{bedtools intersect -c}.
#'
#' @param bedFiles A character vector of paths to bed files.
#' @param windowfile A string specifying the path to the bed file within which to calculate coverage of beds.
#' @param windowsize Positive integer indicating size of windows used to resize overlapping windows to comply with bedGraph format restriction against overlapping intervals. This only needs to be specified if windows are overlapping and a bedGraph-compliant file is desired.
#' @param stepsize Positive integer indicating size of the steps used to resize overlapping windows to comply with bedGraph format restriction against overlapping intervals. This only needs to be specified if windows are overlapping and a bedGraph-compliant file is desired.
#' @param scalar A number to multiply the read counts by. If "rpm", the read counts will be multiplied by a 1000000/(number of reads) to yield reads-per-million (rpm).
#' @param threads A positive integer specifying how many samples to process simultaneously.
bedtoolsCoverage <-
function( bedFiles , windowfile , windowsize=25 , stepsize=windowsize , scalar="rpm" , threads=getOption("threads",1L) ){
#check if only 1 waindowfile
if(length(windowfile) > 1){stop("bedtoolsCoverage can only take 1 window file")}
#get base name
bedname<-basename(removeext(bedFiles))
winname<-basename(removeext(windowfile))
outname<-paste0(basename(removeext(bedFiles)),"_",winname,".bg")
overlapsize<-(windowsize-stepsize)/2
lsub=floor(overlapsize)
#calculate scaling factor
if(scalar=="rpm"){
scalar=1000000 / filelines( bedFiles, threads=threads )
}
#calculate coverage over windowfile
cmdString <- (paste(
"bedtools intersect -sorted -c",
"-b",bedFiles,
"-a",windowfile,
"| awk '{print $1,$2",
if(stepsize<windowsize){
paste("+",lsub)
},
if(stepsize<windowsize){
paste(",$2+",lsub,"+",stepsize)
} else{",$3"},
",$4*",scalar,
"}' OFS='\\t' >",
outname
))
res<-cmdRun(cmdString, threads)
return(outname)
}
| 2,127 | mit |
e0474f41cf57d4032a9c3c989108c2a059351d79 | mariodeng/FirebrowseR | R/Metadata.ClinicalNames.R | #' Retrieve names of all TCGA clinical data elements (CDEs).
#'
#' Retrieve names of all patient-level clinical data elements (CDES) available in TCGA, unioned across all disease cohorts. A CDE will be listed here only when it has a value other than NA for at least 1 patient case in any disease cohort. For more information on how these CDEs are processed see our <a href="https://confluence.broadinstitute.org/display/GDAC/Documentation">pipeline documentation</a>.
#'
#' @param format Format of result. Default value is json. While json,tsv,csv are available.
#'
#' @export
Metadata.ClinicalNames = function(format = "json"
){
parameters = list(format = format)
validate.Parameters(params = parameters)
url = build.Query(parameters = parameters,
invoker = "Metadata",
method = "ClinicalNames")
ret = download.Data(url, format)
return(ret)
}
| 962 | mit |
1488db29eb764dc221de9057684e7cc8c47a0ff9 | apache/arrow | r/R/feather.R | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#' Write a Feather file (an Arrow IPC file)
#'
#' Feather provides binary columnar serialization for data frames.
#' It is designed to make reading and writing data frames efficient,
#' and to make sharing data across data analysis languages easy.
#' [write_feather()] can write both the Feather Version 1 (V1),
#' a legacy version available starting in 2016, and the Version 2 (V2),
#' which is the Apache Arrow IPC file format.
#' The default version is V2.
#' V1 files are distinct from Arrow IPC files and lack many feathures,
#' such as the ability to store all Arrow data tyeps, and compression support.
#' [write_ipc_file()] can only write V2 files.
#'
#' @param x `data.frame`, [RecordBatch], or [Table]
#' @param sink A string file path, URI, or [OutputStream], or path in a file
#' system (`SubTreeFileSystem`)
#' @param version integer Feather file version, Version 1 or Version 2. Version 2 is the default.
#' @param chunk_size For V2 files, the number of rows that each chunk of data
#' should have in the file. Use a smaller `chunk_size` when you need faster
#' random row access. Default is 64K. This option is not supported for V1.
#' @param compression Name of compression codec to use, if any. Default is
#' "lz4" if LZ4 is available in your build of the Arrow C++ library, otherwise
#' "uncompressed". "zstd" is the other available codec and generally has better
#' compression ratios in exchange for slower read and write performance.
#' "lz4" is shorthand for the "lz4_frame" codec.
#' See [codec_is_available()] for details.
#' `TRUE` and `FALSE` can also be used in place of "default" and "uncompressed".
#' This option is not supported for V1.
#' @param compression_level If `compression` is "zstd", you may
#' specify an integer compression level. If omitted, the compression codec's
#' default compression level is used.
#'
#' @return The input `x`, invisibly. Note that if `sink` is an [OutputStream],
#' the stream will be left open.
#' @export
#' @seealso [RecordBatchWriter] for lower-level access to writing Arrow IPC data.
#' @seealso [Schema] for information about schemas and metadata handling.
#' @examples
#' # We recommend the ".arrow" extension for Arrow IPC files (Feather V2).
#' tf1 <- tempfile(fileext = ".feather")
#' tf2 <- tempfile(fileext = ".arrow")
#' tf3 <- tempfile(fileext = ".arrow")
#' on.exit({
#' unlink(tf1)
#' unlink(tf2)
#' unlink(tf3)
#' })
#' write_feather(mtcars, tf1, version = 1)
#' write_feather(mtcars, tf2)
#' write_ipc_file(mtcars, tf3)
#' @include arrow-object.R
write_feather <- function(x,
sink,
version = 2,
chunk_size = 65536L,
compression = c("default", "lz4", "lz4_frame", "uncompressed", "zstd"),
compression_level = NULL) {
# Handle and validate options before touching data
version <- as.integer(version)
assert_that(version %in% 1:2)
if (isTRUE(compression)) compression <- "default"
if (isFALSE(compression)) compression <- "uncompressed"
# TODO(ARROW-17221): if (missing(compression)), we could detect_compression(sink) here
compression <- match.arg(compression)
chunk_size <- as.integer(chunk_size)
assert_that(chunk_size > 0)
if (compression == "default") {
if (version == 2 && codec_is_available("lz4")) {
compression <- "lz4"
} else {
compression <- "uncompressed"
}
}
if (is.null(compression_level)) {
# Use -1 as sentinal for "default"
compression_level <- -1L
}
compression_level <- as.integer(compression_level)
# Now make sure that options make sense together
if (version == 1) {
if (chunk_size != 65536L) {
stop("Feather version 1 does not support the 'chunk_size' option", call. = FALSE)
}
if (compression != "uncompressed") {
stop("Feather version 1 does not support the 'compression' option", call. = FALSE)
}
if (compression_level != -1L) {
stop("Feather version 1 does not support the 'compression_level' option", call. = FALSE)
}
}
if (compression != "zstd" && compression_level != -1L) {
stop("Can only specify a 'compression_level' when 'compression' is 'zstd'", call. = FALSE)
}
# Finally, add 1 to version because 2 means V1 and 3 means V2 :shrug:
version <- version + 1L
# "lz4" is the convenience
if (compression == "lz4") {
compression <- "lz4_frame"
}
compression <- compression_from_name(compression)
x_out <- x
x <- as_writable_table(x)
if (!inherits(sink, "OutputStream")) {
sink <- make_output_stream(sink)
on.exit(sink$close())
}
ipc___WriteFeather__Table(sink, x, version, chunk_size, compression, compression_level)
invisible(x_out)
}
#' @rdname write_feather
#' @export
write_ipc_file <- function(x,
sink,
chunk_size = 65536L,
compression = c("default", "lz4", "lz4_frame", "uncompressed", "zstd"),
compression_level = NULL) {
mc <- match.call()
mc$version <- 2
mc[[1]] <- get("write_feather", envir = asNamespace("arrow"))
eval.parent(mc)
}
#' Read a Feather file (an Arrow IPC file)
#'
#' Feather provides binary columnar serialization for data frames.
#' It is designed to make reading and writing data frames efficient,
#' and to make sharing data across data analysis languages easy.
#' [read_feather()] can read both the Feather Version 1 (V1), a legacy version available starting in 2016,
#' and the Version 2 (V2), which is the Apache Arrow IPC file format.
#' [read_ipc_file()] is an alias of [read_feather()].
#'
#' @inheritParams read_ipc_stream
#' @inheritParams read_delim_arrow
#' @inheritParams make_readable_file
#'
#' @return A `data.frame` if `as_data_frame` is `TRUE` (the default), or an
#' Arrow [Table] otherwise
#'
#' @export
#' @seealso [FeatherReader] and [RecordBatchReader] for lower-level access to reading Arrow IPC data.
#' @examples
#' # We recommend the ".arrow" extension for Arrow IPC files (Feather V2).
#' tf <- tempfile(fileext = ".arrow")
#' on.exit(unlink(tf))
#' write_feather(mtcars, tf)
#' df <- read_feather(tf)
#' dim(df)
#' # Can select columns
#' df <- read_feather(tf, col_select = starts_with("d"))
read_feather <- function(file, col_select = NULL, as_data_frame = TRUE, mmap = TRUE) {
if (!inherits(file, "RandomAccessFile")) {
# Compression is handled inside the IPC file format, so we don't need
# to detect from the file extension and wrap in a CompressedInputStream
# TODO: Why is this the only read_format() functions that allows passing
# mmap to make_readable_file?
file <- make_readable_file(file, mmap)
on.exit(file$close())
}
reader <- FeatherReader$create(file)
col_select <- enquo(col_select)
columns <- if (!quo_is_null(col_select)) {
sim_df <- as.data.frame(reader$schema)
indices <- eval_select(col_select, sim_df)
names(reader)[indices]
}
out <- tryCatch(
reader$Read(columns),
error = read_compressed_error
)
if (isTRUE(as_data_frame)) {
out <- as.data.frame(out)
}
out
}
#' @rdname read_feather
#' @export
read_ipc_file <- read_feather
#' @title FeatherReader class
#' @rdname FeatherReader
#' @name FeatherReader
#' @docType class
#' @usage NULL
#' @format NULL
#' @description This class enables you to interact with Feather files. Create
#' one to connect to a file or other InputStream, and call `Read()` on it to
#' make an `arrow::Table`. See its usage in [`read_feather()`].
#'
#' @section Factory:
#'
#' The `FeatherReader$create()` factory method instantiates the object and
#' takes the following argument:
#'
#' - `file` an Arrow file connection object inheriting from `RandomAccessFile`.
#'
#' @section Methods:
#'
#' - `$Read(columns)`: Returns a `Table` of the selected columns, a vector of
#' integer indices
#' - `$column_names`: Active binding, returns the column names in the Feather file
#' - `$schema`: Active binding, returns the schema of the Feather file
#' - `$version`: Active binding, returns `1` or `2`, according to the Feather
#' file version
#'
#' @export
#' @include arrow-object.R
FeatherReader <- R6Class("FeatherReader",
inherit = ArrowObject,
public = list(
Read = function(columns) {
ipc___feather___Reader__Read(self, columns)
},
print = function(...) {
cat("FeatherReader:\n")
print(self$schema)
invisible(self)
}
),
active = list(
# versions are officially 2 for V1 and 3 for V2 :shrug:
version = function() ipc___feather___Reader__version(self) - 1L,
column_names = function() names(self$schema),
schema = function() ipc___feather___Reader__schema(self)
)
)
#' @export
names.FeatherReader <- function(x) x$column_names
FeatherReader$create <- function(file) {
assert_is(file, "RandomAccessFile")
ipc___feather___Reader__Open(file)
}
| 9,702 | apache-2.0 |
1488db29eb764dc221de9057684e7cc8c47a0ff9 | kou/arrow | r/R/feather.R | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#' Write a Feather file (an Arrow IPC file)
#'
#' Feather provides binary columnar serialization for data frames.
#' It is designed to make reading and writing data frames efficient,
#' and to make sharing data across data analysis languages easy.
#' [write_feather()] can write both the Feather Version 1 (V1),
#' a legacy version available starting in 2016, and the Version 2 (V2),
#' which is the Apache Arrow IPC file format.
#' The default version is V2.
#' V1 files are distinct from Arrow IPC files and lack many feathures,
#' such as the ability to store all Arrow data tyeps, and compression support.
#' [write_ipc_file()] can only write V2 files.
#'
#' @param x `data.frame`, [RecordBatch], or [Table]
#' @param sink A string file path, URI, or [OutputStream], or path in a file
#' system (`SubTreeFileSystem`)
#' @param version integer Feather file version, Version 1 or Version 2. Version 2 is the default.
#' @param chunk_size For V2 files, the number of rows that each chunk of data
#' should have in the file. Use a smaller `chunk_size` when you need faster
#' random row access. Default is 64K. This option is not supported for V1.
#' @param compression Name of compression codec to use, if any. Default is
#' "lz4" if LZ4 is available in your build of the Arrow C++ library, otherwise
#' "uncompressed". "zstd" is the other available codec and generally has better
#' compression ratios in exchange for slower read and write performance.
#' "lz4" is shorthand for the "lz4_frame" codec.
#' See [codec_is_available()] for details.
#' `TRUE` and `FALSE` can also be used in place of "default" and "uncompressed".
#' This option is not supported for V1.
#' @param compression_level If `compression` is "zstd", you may
#' specify an integer compression level. If omitted, the compression codec's
#' default compression level is used.
#'
#' @return The input `x`, invisibly. Note that if `sink` is an [OutputStream],
#' the stream will be left open.
#' @export
#' @seealso [RecordBatchWriter] for lower-level access to writing Arrow IPC data.
#' @seealso [Schema] for information about schemas and metadata handling.
#' @examples
#' # We recommend the ".arrow" extension for Arrow IPC files (Feather V2).
#' tf1 <- tempfile(fileext = ".feather")
#' tf2 <- tempfile(fileext = ".arrow")
#' tf3 <- tempfile(fileext = ".arrow")
#' on.exit({
#' unlink(tf1)
#' unlink(tf2)
#' unlink(tf3)
#' })
#' write_feather(mtcars, tf1, version = 1)
#' write_feather(mtcars, tf2)
#' write_ipc_file(mtcars, tf3)
#' @include arrow-object.R
write_feather <- function(x,
sink,
version = 2,
chunk_size = 65536L,
compression = c("default", "lz4", "lz4_frame", "uncompressed", "zstd"),
compression_level = NULL) {
# Handle and validate options before touching data
version <- as.integer(version)
assert_that(version %in% 1:2)
if (isTRUE(compression)) compression <- "default"
if (isFALSE(compression)) compression <- "uncompressed"
# TODO(ARROW-17221): if (missing(compression)), we could detect_compression(sink) here
compression <- match.arg(compression)
chunk_size <- as.integer(chunk_size)
assert_that(chunk_size > 0)
if (compression == "default") {
if (version == 2 && codec_is_available("lz4")) {
compression <- "lz4"
} else {
compression <- "uncompressed"
}
}
if (is.null(compression_level)) {
# Use -1 as sentinal for "default"
compression_level <- -1L
}
compression_level <- as.integer(compression_level)
# Now make sure that options make sense together
if (version == 1) {
if (chunk_size != 65536L) {
stop("Feather version 1 does not support the 'chunk_size' option", call. = FALSE)
}
if (compression != "uncompressed") {
stop("Feather version 1 does not support the 'compression' option", call. = FALSE)
}
if (compression_level != -1L) {
stop("Feather version 1 does not support the 'compression_level' option", call. = FALSE)
}
}
if (compression != "zstd" && compression_level != -1L) {
stop("Can only specify a 'compression_level' when 'compression' is 'zstd'", call. = FALSE)
}
# Finally, add 1 to version because 2 means V1 and 3 means V2 :shrug:
version <- version + 1L
# "lz4" is the convenience
if (compression == "lz4") {
compression <- "lz4_frame"
}
compression <- compression_from_name(compression)
x_out <- x
x <- as_writable_table(x)
if (!inherits(sink, "OutputStream")) {
sink <- make_output_stream(sink)
on.exit(sink$close())
}
ipc___WriteFeather__Table(sink, x, version, chunk_size, compression, compression_level)
invisible(x_out)
}
#' @rdname write_feather
#' @export
write_ipc_file <- function(x,
sink,
chunk_size = 65536L,
compression = c("default", "lz4", "lz4_frame", "uncompressed", "zstd"),
compression_level = NULL) {
mc <- match.call()
mc$version <- 2
mc[[1]] <- get("write_feather", envir = asNamespace("arrow"))
eval.parent(mc)
}
#' Read a Feather file (an Arrow IPC file)
#'
#' Feather provides binary columnar serialization for data frames.
#' It is designed to make reading and writing data frames efficient,
#' and to make sharing data across data analysis languages easy.
#' [read_feather()] can read both the Feather Version 1 (V1), a legacy version available starting in 2016,
#' and the Version 2 (V2), which is the Apache Arrow IPC file format.
#' [read_ipc_file()] is an alias of [read_feather()].
#'
#' @inheritParams read_ipc_stream
#' @inheritParams read_delim_arrow
#' @inheritParams make_readable_file
#'
#' @return A `data.frame` if `as_data_frame` is `TRUE` (the default), or an
#' Arrow [Table] otherwise
#'
#' @export
#' @seealso [FeatherReader] and [RecordBatchReader] for lower-level access to reading Arrow IPC data.
#' @examples
#' # We recommend the ".arrow" extension for Arrow IPC files (Feather V2).
#' tf <- tempfile(fileext = ".arrow")
#' on.exit(unlink(tf))
#' write_feather(mtcars, tf)
#' df <- read_feather(tf)
#' dim(df)
#' # Can select columns
#' df <- read_feather(tf, col_select = starts_with("d"))
read_feather <- function(file, col_select = NULL, as_data_frame = TRUE, mmap = TRUE) {
if (!inherits(file, "RandomAccessFile")) {
# Compression is handled inside the IPC file format, so we don't need
# to detect from the file extension and wrap in a CompressedInputStream
# TODO: Why is this the only read_format() functions that allows passing
# mmap to make_readable_file?
file <- make_readable_file(file, mmap)
on.exit(file$close())
}
reader <- FeatherReader$create(file)
col_select <- enquo(col_select)
columns <- if (!quo_is_null(col_select)) {
sim_df <- as.data.frame(reader$schema)
indices <- eval_select(col_select, sim_df)
names(reader)[indices]
}
out <- tryCatch(
reader$Read(columns),
error = read_compressed_error
)
if (isTRUE(as_data_frame)) {
out <- as.data.frame(out)
}
out
}
#' @rdname read_feather
#' @export
read_ipc_file <- read_feather
#' @title FeatherReader class
#' @rdname FeatherReader
#' @name FeatherReader
#' @docType class
#' @usage NULL
#' @format NULL
#' @description This class enables you to interact with Feather files. Create
#' one to connect to a file or other InputStream, and call `Read()` on it to
#' make an `arrow::Table`. See its usage in [`read_feather()`].
#'
#' @section Factory:
#'
#' The `FeatherReader$create()` factory method instantiates the object and
#' takes the following argument:
#'
#' - `file` an Arrow file connection object inheriting from `RandomAccessFile`.
#'
#' @section Methods:
#'
#' - `$Read(columns)`: Returns a `Table` of the selected columns, a vector of
#' integer indices
#' - `$column_names`: Active binding, returns the column names in the Feather file
#' - `$schema`: Active binding, returns the schema of the Feather file
#' - `$version`: Active binding, returns `1` or `2`, according to the Feather
#' file version
#'
#' @export
#' @include arrow-object.R
FeatherReader <- R6Class("FeatherReader",
inherit = ArrowObject,
public = list(
Read = function(columns) {
ipc___feather___Reader__Read(self, columns)
},
print = function(...) {
cat("FeatherReader:\n")
print(self$schema)
invisible(self)
}
),
active = list(
# versions are officially 2 for V1 and 3 for V2 :shrug:
version = function() ipc___feather___Reader__version(self) - 1L,
column_names = function() names(self$schema),
schema = function() ipc___feather___Reader__schema(self)
)
)
#' @export
names.FeatherReader <- function(x) x$column_names
FeatherReader$create <- function(file) {
assert_is(file, "RandomAccessFile")
ipc___feather___Reader__Open(file)
}
| 9,702 | apache-2.0 |
572a7be744e216f1346e00a5c6addf8a435f79c0 | jukiewiczm/renjin | packages/grid/src/main/R/renjinStubs.R |
validUnits <- function(units) {
answer <- NULL
n <- length(units)
integer(n)
}
| 87 | gpl-3.0 |
572a7be744e216f1346e00a5c6addf8a435f79c0 | hlin09/renjin | packages/grid/src/main/R/renjinStubs.R |
validUnits <- function(units) {
answer <- NULL
n <- length(units)
integer(n)
}
| 87 | gpl-3.0 |
7223aea80129539b0e0a5784023a29875517fc71 | nathanlazar/BaTFLED3D | R/im_2_mat.R | #' Plot heatmaps of two matrices in red and blue
#'
#' Displays two heatmaps of matrices using red and blue colors. Options to scale
#' and sort as well as any other graphical parameters with ... Sorting attempts to match
#' columns between the two matrices using their correlation over rows. If \code{sort==TRUE}
#' then the new ordering for the second matrix is returned.
#'
#' @importFrom grDevices colorRampPalette
#' @importFrom stats sd
#' @importFrom graphics image
#'
#' @export
#' @param x1 matrix
#' @param x2 matrix
#' @param high string of either 'red' or 'blue' used to show higher values
#' @param xaxt string indicating how to display the x axis. Suppress x axis with 'n'
#' @param yaxt string indicating how to display the y axis. Suppress y axis with 'n'
#' @param scale logical indicating whether the matrices should be z scaled to have
#' columns with norm zero and standard deviation one.
#' @param absol logical indicating whether to take absolute value of the entries
#' before plotting
#' @param sort logical indicating whether the columns of the matrix should
#' be sorted in decreasing order of their means
#' @param center logical indicating wether to center ranges for x and y around zero
#' @param main1 string to be used as the main title for the first matrix image
#' @param main2 string to be used as the main title for the second matrix image
#' @param ... other graphical parameters passed to image
#'
#' @return If \code{sort==TRUE} the ordering of the second matrix used to match columns.
#'
#' @examples
#' par(mfrow=c(1,2))
#' im_2_mat(matrix(1:12, nrow=3, ncol=4), matrix(13:24, nrow=3, ncol=4), sort=FALSE, scale=FALSE)
#' im_2_mat(matrix(1:12, nrow=3, ncol=4), matrix(13:24, nrow=3, ncol=4), sort=TRUE, scale=FALSE)
#' im_2_mat(matrix(1:12, nrow=3, ncol=4), matrix(13:24, nrow=3, ncol=4), sort=TRUE, scale=TRUE)
#' im_2_mat(matrix(1:12, nrow=3, ncol=4), matrix(13:24, nrow=3, ncol=4), sort=FALSE,
#' scale=FALSE, center=TRUE)
im_2_mat <- function(x1, x2, high='red', xaxt='n', yaxt='n', scale='col',
absol=FALSE, sort=TRUE, center=FALSE, main1='', main2='', ...) {
# display images of rotated matrices with the option to sort columns
# by absolute magnitude, zlimits are set to the max and min of both matrices
rwb <- colorRampPalette(c('red', 'white', 'blue'), space='rgb')
# Remove constant columns if present
if(sum(apply(x1, 2, function(x) all(x == rep(1,nrow(x1)))))>0) {
x1 <- x1[,colnames(x1)!='const',drop=FALSE]
const1 <-TRUE
} else const1 <- FALSE
if(sum(apply(x2, 2, function(x) all(x == rep(1,nrow(x2)))))>0) {
x2 <- x2[,colnames(x2)!='const',drop=FALSE]
const2 <- TRUE
} else const2 <- FALSE
if(scale=='col') {
# Safe version of scale (if sd of columns = 0, set to minimum value)
x1.col.means <- apply(x1, 2, mean)
x1.col.sds <- apply(x1, 2, sd)
x1.col.sds[x1.col.sds==0] <- 1e-300
x1 <- sweep(x1, MARGIN=2, STATS=x1.col.means, FUN='-')
x1 <- sweep(x1, MARGIN=2, STATS=x1.col.sds, FUN='/')
x2.col.means <- apply(x2, 2, mean)
x2.col.sds <- apply(x2, 2, sd)
x2.col.sds[x2.col.sds==0] <- 1e-300
x2 <- sweep(x2, MARGIN=2, STATS=x2.col.means, FUN='-')
x2 <- sweep(x2, MARGIN=2, STATS=x2.col.sds, FUN='/')
} else if(scale=='all') {
x1 <- (x1 - mean(x1))/sd(x1)
x2 <- (x2 - mean(x2))/sd(x2)
}
if(absol) {x1 <- abs(x1); x2 <- abs(x2)}
# Rearrange columns
if(sort) {
if(!all(dim(x1)==dim(x2))) {
if(sum(grepl('const', rownames(x1))) + sum(grepl('const', rownames(x2)))==1) {
x1 <- x1[!grepl('const', rownames(x1)),]
x2 <- x2[!grepl('const', rownames(x1)),]
} else {
print('Can not sort, matrices are different sizes')
sort <- FALSE
}
} else {
# Make a correlation matrix between the columns of the two matrices
cor.mat <- cor(x2,x1)
reorder <- apply(abs(cor.mat), 2, which.max)
reorder[which(duplicated(reorder))] <- (1:ncol(x1))[!(1:ncol(x1) %in% reorder)]
x2 <- x2[,reorder]
cor.mat <- cor(x2, x1)
cor <- diag(cor.mat)
for(i in 1:ncol(x2))
if(cor[i] < 0) {
x2[,i] <- -x2[,i]
reorder[i] <- -reorder[i]
}
}
}
if(center) {
zlim=c(-max(c(abs(x1), abs(x2))), max(c(abs(x1), abs(x2))))
} else {
zlim <- range(x1, x2, na.rm=TRUE)
}
# Add back in constant columns
if(const1)
x1 <- cbind(const=1, x1)
if(const2) {
x2 <- cbind(const=1, x2)
if(sort) {
reorder[reorder > 0] <- reorder[reorder > 0] + 1
reorder[reorder < 0] <- reorder[reorder < 0] - 1
reorder <- c(const=1, reorder)
}
}
if(high=='blue') {
image(rot(x1), col=rwb(256), zlim=zlim, xaxt=xaxt, yaxt=xaxt, main=main1, ...)
image(rot(x2), col=rwb(256), zlim=zlim, xaxt=xaxt, yaxt=xaxt, main=main2, ...)
} else if(high=='red') {
image(rot(x1), col=rev(rwb(256)), zlim=zlim, xaxt=xaxt, yaxt=xaxt, main=main1, ...)
image(rot(x2), col=rev(rwb(256)), zlim=zlim, xaxt=xaxt, yaxt=xaxt, main=main2, ...)
}
if(sort) {
return(reorder)
} else return(1:ncol(x2))
}
| 5,158 | mit |
696b261bcf6c6a2a78bfa8525f8bde85c69fa721 | CodeGit/SequenceImp | dependencies-bin/macosx/bin/R/lib/R/library/ShortRead/unitTests/test_SRList.R | test_SRList_construction <- function() {
srl <- SRList()
checkTrue(validObject(srl))
checkEquals(0, length(srl))
srl <- SRList(list())
checkTrue(validObject(srl))
checkEquals(0, length(srl))
srl <- SRList(list(1))
checkTrue(validObject(srl))
checkEquals(1, length(srl))
srl <- SRList(list(1, 2))
checkTrue(validObject(srl))
checkEquals(2, length(srl))
checkEquals(1, length(srl[[1]]))
srl <- SRList(1, 2)
checkTrue(validObject(srl))
checkEquals(2, length(srl))
checkEquals(1, length(srl[[1]]))
}
| 568 | gpl-3.0 |
a0e6bac81cf7f18e0f0ba0c5dc8858371b6a15e9 | cchacua/m2-migrations | scripts/rsql/queries.R | # To see secure folder:
# SHOW VARIABLES LIKE "secure_file_priv";
# mysql -h "localhost" -u "root" -p
# PATSTAT
dbListTables(patstat)
dbListFields(patstat, "TLS201_APPLN")
dbListFields(patstat, "TLS211_PAT_PUBLN")
dbListFields(patstat, "TLS211_SAMPLE")
# rs<-dbGetQuery(patstat, "SELECT APPLN_ID FROM TLS201_APPLN LIMIT 0, 10;")
# rs<-as.data.frame(rs)
# rs$APPLN_ID<-as.numeric(rs$APPLN_ID)
#q1<- dbSendQuery(patstat, "SELECT APPLN_ID, DOCDB_FAMILY_ID, DOCDB_FAMILY_SIZE, INPADOC_FAMILY_ID, NB_APPLICANTS, NB_INVENTORS FROM TLS201_APPLN LIMIT 1000, 1100;")
q1<- dbSendQuery(patstat, "SELECT TLS201_APPLN.APPLN_ID, TLS201_APPLN.DOCDB_FAMILY_ID, TLS201_APPLN.DOCDB_FAMILY_SIZE, TLS201_APPLN.INPADOC_FAMILY_ID, TLS201_APPLN.NB_APPLICANTS, TLS201_APPLN.NB_INVENTORS FROM TLS201_APPLN LIMIT 1000, 1100;")
apn<-dbFetch(q1)
dbClearResult(q1)
q2<- dbSendQuery(patstat, "SELECT patstat2016b.TLS201_APPLN.APPLN_ID, patstat2016b.TLS201_APPLN.DOCDB_FAMILY_ID, patstat2016b.TLS201_APPLN.DOCDB_FAMILY_SIZE, patstat2016b.TLS201_APPLN.INPADOC_FAMILY_ID, patstat2016b.TLS201_APPLN.NB_APPLICANTS, patstat2016b.TLS201_APPLN.NB_INVENTORS FROM patstat2016b.TLS201_APPLN LIMIT 1000, 1100;")
apn<-dbFetch(q2)
dbClearResult(q2)
q2<- dbSendQuery(patstat, "SELECT * FROM patstat2016b.TLS211_PAT_PUBLN
WHERE patstat2016b.TLS211_PAT_PUBLN.PUBLN_AUTH='US' LIMIT 1000, 1100;")
apn<-dbFetch(q2)
dbClearResult(q2)
q2<- dbSendQuery(patstat, "SELECT * FROM patstat2016b.TLS211_SAMPLE
LIMIT 1000, 1100;")
apn<-dbFetch(q2)
dbClearResult(q2)
q2<- dbSendQuery(patstat, "SELECT CONCAT(PUBLN_AUTH,PUBLN_NR) AS pub_number FROM patstat2016b.TLS211_PAT_PUBLN LIMIT 1000, 1100;")
apn211<-dbFetch(q2)
dbClearResult(q2)
q3<- dbSendQuery(patstat, "SELECT patstat2016b.TLS201_APPLN.APPLN_ID,
patstat2016b.TLS211_PAT_PUBLN.PUBLN_NR,
patstat2016b.TLS201_APPLN.DOCDB_FAMILY_ID,
patstat2016b.TLS201_APPLN.DOCDB_FAMILY_SIZE,
patstat2016b.TLS201_APPLN.INPADOC_FAMILY_ID,
patstat2016b.TLS201_APPLN.NB_APPLICANTS,
patstat2016b.TLS201_APPLN.NB_INVENTORS
FROM patstat2016b.TLS201_APPLN
INNER JOIN patstat2016b.TLS211_PAT_PUBLN
ON patstat2016b.TLS201_APPLN.APPLN_ID=patstat2016b.TLS211_PAT_PUBLN.APPLN_ID
LIMIT 0, 100;")
apn<-dbFetch(q3, n=100)
dbClearResult(q3)
# Riccaboni
dbListTables(riccaboni)
dbListFields(riccaboni, "t01")
q2<- dbSendQuery(riccaboni, "SELECT RIGHT(riccaboni.t01.pat, LENGTH(riccaboni.t01.pat)-2) AS pat, riccaboni.t01.invs, riccaboni.t01.localInvs, riccaboni.t01.apps, riccaboni.t01.yr, riccaboni.t01.classes, riccaboni.t01.wasComplete FROM riccaboni.t01 LIMIT 0, 100;")
apn2<-dbFetch(q2)
dbClearResult(q2)
q3<- dbSendQuery(riccaboni, "SELECT * FROM riccaboni.t01;")
ric.pn<-dbFetch(q3)
dbClearResult(q3)
q3<- dbSendQuery(riccaboni, "SELECT riccaboni.t01.pat FROM riccaboni.t01;")
ric.pn<-dbFetch(q3)
dbClearResult(q3)
save(ric.pn, file="../output/riccabonit01pat.RData")
ric.pn<-as.data.frame(ric.pn)
q3<- dbSendQuery(patstat, "SELECT *
FROM patstat2016b.TLS211_PAT_PUBLN
LIMIT 0, 500;")
apn<-dbFetch(q3, n=500)
dbClearResult(q3) | 3,462 | gpl-3.0 |
a7034d95277f8c7f29fc568a060c5edaf5538df9 | aegorenkov/science_hackday_conservation | cvtest.R | library(boot)
setwd("~/DCHackday")
data.model <- read.csv("datafinal.csv")
odds <- function(x) {
exp(x)/(exp(x)+1)
}
form = formula(protected~ Percent_of_HUC_Rare + MeanPrecip)
fold = 10
x <- na.omit(data.model)
n <- nrow(x)
prop <- n%/%fold
newseq <- rank(runif(n))
k <- as.factor((newseq - 1) %/% prop + 1)
y <- unlist(strsplit(as.character(form), ""))[2]
vec.accuracy <- vector(length = fold)
for (i in seq(fold)) {
fit <- glm(form, family = binomial("logit"), data=x)
fcast <- odds(predict(fit, newdata = x[k == i,]))
cm <- table(x$protected[k == i], ifelse(fcast > .4, 1, 0))
accuracy <- (cm[1, 1] + cm[2, 2])/sum(cm)
#accuracy <- sum((x$protected[k == i]-fcast)**2, na.rm= TRUE)
vec.accuracy[i] <- accuracy
}
avg.accuracy <- mean(vec.accuracy)
avg.error <- 1 - avg.accuracy
cv <- data.frame(Accuracy = avg.accuracy,
Error = avg.error)
cv | 885 | mit |
696b261bcf6c6a2a78bfa8525f8bde85c69fa721 | CodeGit/SequenceImp | dependencies-bin/linux/bin/R/lib64/R/library/ShortRead/unitTests/test_SRList.R | test_SRList_construction <- function() {
srl <- SRList()
checkTrue(validObject(srl))
checkEquals(0, length(srl))
srl <- SRList(list())
checkTrue(validObject(srl))
checkEquals(0, length(srl))
srl <- SRList(list(1))
checkTrue(validObject(srl))
checkEquals(1, length(srl))
srl <- SRList(list(1, 2))
checkTrue(validObject(srl))
checkEquals(2, length(srl))
checkEquals(1, length(srl[[1]]))
srl <- SRList(1, 2)
checkTrue(validObject(srl))
checkEquals(2, length(srl))
checkEquals(1, length(srl[[1]]))
}
| 568 | gpl-3.0 |
696b261bcf6c6a2a78bfa8525f8bde85c69fa721 | CodeGit/SequenceImp | dependencies-bin/windows/bin/R/library/ShortRead/unitTests/test_SRList.R | test_SRList_construction <- function() {
srl <- SRList()
checkTrue(validObject(srl))
checkEquals(0, length(srl))
srl <- SRList(list())
checkTrue(validObject(srl))
checkEquals(0, length(srl))
srl <- SRList(list(1))
checkTrue(validObject(srl))
checkEquals(1, length(srl))
srl <- SRList(list(1, 2))
checkTrue(validObject(srl))
checkEquals(2, length(srl))
checkEquals(1, length(srl[[1]]))
srl <- SRList(1, 2)
checkTrue(validObject(srl))
checkEquals(2, length(srl))
checkEquals(1, length(srl[[1]]))
}
| 568 | gpl-3.0 |
c29b0a495d8eac83cc473b84df69cd985109f1c2 | nsmader/acs-constructicon | pickacs/R/measure.R | measure <- function(obj) {
UseMethod("measure")
}
measure.default <- function(obj) {
warning("Unknown class: ", class(x))
}
| 132 | agpl-3.0 |
f4048b383d0ec5fcb68e8cb697e726fcbfbe676f | SergeyMirvoda/da2016 | classwork2/vectors.R | #Векторы числовой и строковый
num_vector <- c(1, 10, 49)
char_vector <- c("a", "b", "c")
# Заполните булевый вектор
bool_vector <-
# Выберите из числового вектора значения при помощи булевого вектора
num_vector[bool_vector]
# Повторите то же самое для строкововго вектора
#Составте булевый вектор из числового, выбрав элеметны большие 10
x<-num_vector>10
#С помощью вектора x выберите из числового вектора данные
# Запишите то же самое без применения промежуточной переменной x
#Придумайте подобный пример для строковго вектора
| 902 | unlicense |
f4048b383d0ec5fcb68e8cb697e726fcbfbe676f | SergeyMirvoda/MD-DA-2017 | classwork2/vectors.R | #Векторы числовой и строковый
num_vector <- c(1, 10, 49)
char_vector <- c("a", "b", "c")
# Заполните булевый вектор
bool_vector <-
# Выберите из числового вектора значения при помощи булевого вектора
num_vector[bool_vector]
# Повторите то же самое для строкововго вектора
#Составте булевый вектор из числового, выбрав элеметны большие 10
x<-num_vector>10
#С помощью вектора x выберите из числового вектора данные
# Запишите то же самое без применения промежуточной переменной x
#Придумайте подобный пример для строковго вектора
| 902 | mit |
06601c1037b821479f2af97648a4f1d6849afc32 | tudob/wfg | R/wfgTrafos.R | # Transformations
# the function-names are those in the wfg-paper plus prefix wfg
# in the spec the user uses only the part after the "_" (no b/s/r-kind)
# conventions used:
# a parameter-default of =NA shows that there is a default (which will be calculated on demand) as opposed to a required value
# function naming here is as in wfg-paper plus prefix wfg (but tNone does not exist)
# source("R/wfgUtil.R")
#' WFG Transformations
#'
#' tNone is a transformation that changes nothing. It is used to move the entries cursor along to change later entries.\cr
#' tPoly is the polynomial bias transformation.\cr
#' tFlat creates a region in search space in which all points have the same objective values.\cr
#' tParam is the parameter-dependent transformation.\cr
#' tLinear creates a linear shift of the true optimum.\cr
#' tDecept creates regions in the search space that have a sub-optimal value but larger area.\cr
#' tMulti creates many local optima.\cr
#' tSum creates a dependence between different search-space entries.\cr
#' tNonsep creates a dependence between objectives.\cr
#'
#' @param y
#' The value of the search space entry to which to apply this to.
#' \cr
#' @param alpha
#' tPoly: alpha>1 biases toward 0, <1 biases toward 1.
#' \cr
#' @param value
#' tFlat: The value of the flat region. The adjacent regions interpolate to this value.
#' @param from
#' tFlat: The region of the search space that is flat, same for every dimension.\cr
#' tSum: The entries which are made dependent on eachother
#' @param to
#' see from
#' \cr
#' @param y.prime
#' tParam: Is set by the system, it does not occur in the parameters the user writes in the specification
#' @param factor
#' tParam: Determines the slope of the ramp.
#' @param starter
#' tParam: The influence region.
#' @param ender
#' see starter
#' \cr
#' @param zero.loc
#' tLinear: The location of the true optimum.
#' \cr
#' @param opti.loc
#' tDecept and tMulti: The location of the true optimum.
#' @param aperture
#' tDecept: The size of the opening around the true optimum.
#' @param deceptive.value
#' tDecept: The value of the sub-optimal areas.
#' \cr
#' @param num.minima
#' tMulti: The number of the local optima.
#' @param hill.size
#' tMulti: The size of the hills between the local optima.
#' \cr
#' @param i
#' tSum: i, k, M are set by the system, they do not occur in the parameters the user writes in the specification
#' @param k
#' see i
#' @param M
#' see i
#' @param weights
#' tSum: Optional for the weighted sum.
#' \cr
#' @param degree
#' tNonsep: Degree of nonseparability.
#' \cr
#' @return The modified value.
#' @export
wfgTrafos = function() {} # placeholder
#' @rdname wfgTrafos
#' @export
tNone = function(y) { return(y) }
attr(tNone, "type") = "wfgTrafo"
attr(tNone, "name") = "tNone"
#' @rdname wfgTrafos
#' @export
tPoly = function(y, alpha=0.02) { # alpha >1~<1 bias toward 0~1 (1.0 would be no change). default taken from wfg9 example
if (alpha<=0) stop("alpha has to be greater than 0")
res = y^alpha
return(to01(res))
}
attr(tPoly, "type") = "wfgTrafo"
attr(tPoly, "name") = "tPoly"
#' @rdname wfgTrafos
#' @export
tFlat = function(y, value=0.8, from=0.75, to=0.85) { # defaults taken from wfg1 example
if (wfg.verbose) cat("flat ", value, from, to, "\n")
if (from>=to) stop("flat region should have: from < to")
if (value<0 | value>1) stop("'value' should be between 0 and 1")
if (from<0 | from>1) stop("'from' should be between 0 and 1")
if (to<0 | to>1) stop("'to' should be between 0 and 1")
# the following are difficult in practice, maybe disallow 0 and 1 alltogether? ie require 0<from<to<1
if (from==0 & !( value==0 & to!=1)) stop("disallowed combination of parameter-values. from=0 requires value=0 and to!=1")
if (to==1 & !(value==1 & from!=0)) stop("disallowed combination of parameter-values. to=1 requires value=1 and from!=0")
A = value
B = from
C = to
res = A + min(0, floor(y-B)) * A*(B-y)/B - min(0, floor(C-y)) * (1-A)*(y-C)/(1-C)
return(to01(res))
}
attr(tFlat, "type") = "wfgTrafo"
attr(tFlat, "name") = "tFlat"
#' @rdname wfgTrafos
#' @export
tParam = function(y, y.prime=NA, factor=0.98/49.98, starter=0.02, ender=50) { # defaults taken from paper wfg7,8,9 (c++: 0.5, 2, 10)
# y.prime's value is set in wfgTrafo()
A = factor
B = starter
C = ender
if (A<=0 | A>=1) stop("A should be such that 0<A<1")
if (B<=0) stop("B should be such that 0<B")
if (C<=B) stop("B, C should be such that B<C")
# for u() this implementation uses the identity
v = function(y.prime) {
A - (1-2*y.prime) * abs( floor(0.5-y.prime) + A )
}
res = y^( B+(C-B)*v(y.prime) )
return(to01(res))
}
attr(tParam, "type") = "wfgTrafo"
attr(tParam, "name") = "tParam"
#' @rdname wfgTrafos
#' @export
tLinear = function(y, zero.loc=0.35) { # default taken from wfg1 example
A = zero.loc
if (A<=0 || A>=1) stop("zero-location A should be within (0, 1)")
res = abs(y-A)/abs( floor(A-y)+A )
return(to01(res))
}
attr(tLinear, "type") = "wfgTrafo"
attr(tLinear, "name") = "tLinear"
#' @rdname wfgTrafos
#' @export
tDecept = function(y, opti.loc=0.35, aperture=0.001, deceptive.value=0.05) {
# always optimumValue==0 and 2 deceptive points.
# opti.loc: the location of the true optimum
# aperture: size around the true optimum. (defaults taken from wfg9 example)
# deceptive.value: the value of the local minimum of the 2 deceptive points
A = opti.loc
B = aperture
C = deceptive.value
if (A<=0 | A>=1) stop("optimum-location should be between 0 and 1 both excluding")
if (B<=0) stop("aperture around the true optimum should be >0")
if (B>0.25) stop("aperture around the true optimum should be much smaller than 1")
if (C<=0) stop("deceptive.value should be >0")
if (C>0.5) stop("deceptive.value should be much smaller than 1")
if (A-B<=0) stop("this combination of optimum-location and aperture around the true optimum does not fit in the (0, 1) interval")
if (A+B>=1) stop("this combination of optimum-location and aperture around the true optimum does not fit in the (0, 1) interval")
res1 = ( floor(y-A+B)*(1-C+(A-B)/B) ) / (A-B)
res2 = ( floor(A+B-y)*(1-C+(1-A-B)/B) ) / (1-A-B)
res = 1 + (abs(y-A)-B) * ( res1+res2+1/B)
return(to01(res))
}
attr(tDecept, "type") = "wfgTrafo"
attr(tDecept, "name") = "tDecept"
#' @rdname wfgTrafos
#' @export
tMulti = function(y, num.minima=30, hill.size=95, opti.loc=0.35) { # defaults taken from wfg9 example
A = num.minima
B = hill.size
C = opti.loc
if (A<=0 | floor(A)!=A) stop("num.minima has to be a natural number")
if (B<0) stop("hill.size have to be >=0")
if ( (4*A+2)*pi < 4*B ) stop("hill.size is too large. num.minima and hill.size should be such that (4*num.minima+2)*pi >= 4*hillsize")
if (C<=0 | C>=1) stop("optimum-location should be in the interval (0,1)")
res1 = abs(y-C) / (2* (floor(C-y)+C) )
res2 = (4*A+2)*pi*(0.5-res1)
res = ( 1+cos(res2)+4*B*res1^2 ) / (B+2)
return(to01(res))
}
attr(tMulti, "type") = "wfgTrafo"
attr(tMulti, "name") = "tMulti"
#' @rdname wfgTrafos
#' @export
tSum = function(y, i, k, M, from=NA, to=NA, weights=NA) { # i, k, M: for system only (dont specify it). defaults for from/to see paper wfg1 and wfgTransformation.R
n = length(y)
w = weights
if(i!=M) {
if (is.na(from)) from = (i-1)*k/(M-1)+1 # defaults from wfg1
if (is.na(to)) to = i*k/(M-1)
} else {
if (is.na(from)) from = k+1 # defaults from wfg1
if (is.na(to)) to = n
}
if (any(is.na(w)) && !all(is.na(w))) stop("some weights are NA. Either none or all have to be defined.")
if (all(is.na(w))) { # default from wfg1
if(i!=M) {
w = 2*( ((i-1)*k/(M-1)+1) : (i*k/(M-1)) )
} else {
w = 2*( (k+1):n )
}
#alternative? w = seq(0.5, 1.5, length.out=to-from+1) # is this a useful default? (all 1-weights would not be)
}
y2 = y[from:to]
if (all(is.na(w))) w = rep(1, length(y2))
if (length(y2)!=length(w)) {
if (length(y2)%%length(w)!=0) stop( paste("weights should have length ", length(y2), "or", length(y2),"should be a multiple" ))
if (wfg.verbose) cat("note the weights", w, "will be extended to length ", to-from+1, "\n")
}
# the wfg-paper requires that all weights be >0
if (any(w<0)) stop("weights should all be >=0")
if (sum(w)==0) stop("at least one weight should be >0")
res = sum(w*y2)/sum(w)
return(to01(res))
}
attr(tSum, "type") = "wfgTrafo"
attr(tSum, "name") = "tSum"
#' @rdname wfgTrafos
#' @export
tNonsep = function(y, degree=NA) { # degree of nonseparability. degree's default is computed in wfgTransformation()
A = degree
# the best default degree-of-non-separability is the maximum: length(y), it obeys the constraint
if (is.na(A)) A = length(y)
if (A<=0 | floor(A)!=A) stop(paste("degree of nonseparability has to be an integer greater than 0 (it is ", A, ")"))
accumOuter = 0
for(j in 1:length(y)) {
accum = y[j]
if (0<=A-2) for(i in 0:(A-2)) {
accum = accum + abs( y[j] - y[(1+j+i)%%length(y) +1] )
}
accumOuter = accumOuter + accum
}
temp = ceiling(A/2)
res = accumOuter / ( length(y)/A*temp * (1+2*A-2*temp) )
return(to01(res))
}
attr(tNonsep, "type") = "wfgTrafo"
attr(tNonsep, "name") = "tNonsep"
| 9,321 | mit |
f357c84594fc4f05c29b8384951fa2ac705e8947 | mjsmith037/mjsmith037.github.io | talks/ESA_08.2019/Code/NO-histogram.R | library(magrittr)
library(tidygraph)
library(ggraph)
library(tidyverse)
library(broom)
library(kableExtra)
tidy_p_value <- function(pvals) {
ifelse(pvals < 0.001, "< 0.001", format(pvals, digits=0, nsmall=3))
}
tidy_t_test_long <- function(dat, grouping, variable) {
factor_levels <- unique(dat[[grouping]])
t.test(filter(dat, !!sym(grouping) == factor_levels[1]) %>% .[[variable]],
filter(dat, !!sym(grouping) == factor_levels[2]) %>% .[[variable]]) %>%
tidy() %>%
select(estimate1, estimate2, p.value) %>%
set_names(c(factor_levels, "p.value"))
}
tidy_ks_test_long <- function(dat, grouping, variable) {
factor_levels <- unique(dat[[grouping]])
ks.test(filter(dat, !!sym(grouping) == factor_levels[1]) %>% .[[variable]],
filter(dat, !!sym(grouping) == factor_levels[2]) %>% .[[variable]]) %>%
tidy() %>%
mutate(p.value = tidy_p_value(p.value))
}
OVERLAP_LINK_CUTOFF <- 0.75
theme_set(theme_bw())
my_cols <- c("Bacteria -> Bacteria"="#006989",
"Bacteria -> Fungi"="#6a526b",
"Fungi -> Bacteria"="#6a526b",
"Fungi -> Fungi"="#9b1d20")
raw_network_data <- read_csv("~/Research/CompetetiveTradeoff/Data/complete_competetive_index.csv",
col_types="ccccccccdddddd") %>%
filter(leaf_x == leaf_y) %>%
mutate(isolate_x = str_c(isolate_x, type_x, sep="."),
isolate_y = str_c(isolate_y, type_y, sep=".")) %>%
do(bind_rows(transmute(., treatment = treatment_x,
leaf = leaf_x,
row_type = type_x,
row_isolate = isolate_x,
col_type = type_y,
col_isolate = isolate_y,
overlap = x_on_y_pw),
transmute(., treatment = treatment_x,
leaf = leaf_x,
row_type = type_y,
row_isolate = isolate_y,
col_type = type_x,
col_isolate = isolate_x,
overlap = y_on_x_pw))) %>%
mutate(row_type = str_extract_all(row_isolate, "\\w+$") %>% str_replace_all(c("bacto"="Bacteria",
"fungi"="Fungi")),
col_type = str_extract_all(col_isolate, "\\w+$") %>% str_replace_all(c("bacto"="Bacteria",
"fungi"="Fungi")),
type=str_c(row_type, " -> ", col_type),
treatment=str_replace_all(treatment, c("C"="Control", "N"="NPK Supplemented")))
ggplot(raw_network_data) +
aes(x=overlap) +
geom_histogram(bins=50, colour="#595959") +
facet_grid(treatment~row_type) +
scale_x_continuous(breaks=c(0,0.25,0.5,0.75,1), labels=c("0","0.25","0.5","0.75","1")) +
xlab("Niche Overlap") + ylab("Count") +
theme(legend.position="none",
plot.margin=margin(5.5, 150, 5.5, 150))
ggsave("../Figures/NO-histogram_aggregated.svg", width=8.67, height=4.5)
ggplot(raw_network_data) +
aes(x=overlap, fill=type, colour=type) +
geom_histogram(bins=50) +
facet_grid(treatment~type) +
scale_colour_manual(values=my_cols) + scale_fill_manual(values=my_cols) +
scale_x_continuous(breaks=c(0,0.25,0.5,0.75,1), labels=c("0","0.25","0.5","0.75","1")) +
xlab("Niche Overlap") + ylab("Count") +
theme(legend.position="none")
ggsave(p, "../Figures/NO-histogram_no_line.svg", width=8.67, height=4.5)
ggsave(p + geom_vline(aes(xintercept=OVERLAP_LINK_CUTOFF), size=0.75), "../Figures/NO-histogram.svg", width=8.67, height=4.5)
full_join(
raw_network_data %>% group_by(type) %>% do(tidy_t_test_long(., "treatment", "overlap")),
raw_network_data %>% group_by(type) %>% do(tidy_ks_test_long(., "treatment", "overlap")),
by="type"
) %>%
ungroup() %>%
select(-statistic, -alternative, -method) %>%
rename_all(. %>% str_replace_all(c("\\.x$"="_t_test", "\\.y$"="_ks_test"))) %>%
mutate(type = str_replace_all(type, "->", "$\\\\rightarrow$")) %>%
mutate_if(is.numeric, function(x) str_c("$", format(x, digits=3, nsmall=3), "$")) %>%
set_names(c("Interaction Type", "NPK Supplemented", "Control", "T-test p-value", "KS-test p-value")) %>%
kable(row.names=FALSE, escape=FALSE)
| 4,325 | mit |
6b2e0643f064c2e59773b18f22c273d86bc7a3c9 | cfwstem/faculty-numbers | read-data.R | utkfacultydata<- read.csv('https://docs.google.com/spreadsheets/d/1--oEDQy-DIircR-xfdwLRQ0E_9MuFCF1We423l4YrYM/pub?output=csv')
| 128 | gpl-3.0 |
619790f457cbb6e7bfa5731bd3b692bca4b37451 | BenjiHu/CSCI2963-Labs | lab7/months.R | months
remove("months")
for(mon in month.name){
print(mon)
} | 62 | mit |
40b8521a1bca6eb2bed2863347666a02930e61da | mumtahena/bild_signatures | code/Archive/example_assign_corr.R | ## running ASSIGN for raf sing
trainingLabelr<-list(control=list(raf=1:12),raf=13:18)
sub_dir<-paste(basedir,"raf_25_gene_list",sep='/')
dir.create( sub_dir)
assign_easy_multi(trainingData = cbind(c_gfp,c_raf),test=c_test,trainingLabel1 = trainingLabelr,g=25,out_dir_base = sub_dir,single = 1)
sub_dir<-paste(basedir,"raf_50_gene_list",sep='/')
dir.create( sub_dir)
assign_easy_multi(trainingData = cbind(c_gfp,c_raf),test=c_test,trainingLabel1 = trainingLabelr,g=50,out_dir_base = sub_dir,single = 1)
## accumulating the ASSIGN prediction files in this block
setwd("~/Dropbox/bild_signatures/icbp_15_april_assign_adap_/")
filenames<-system("ls *gene_list/*/pathway_activity_testset*", intern=TRUE)
filenames
for(i in 1:length(filenames))
{
f<-read.csv(filenames[i], header=1,row.names=1) ###reading in the filess one at a time
colnames(f)<-paste(filenames[i],colnames(f),sep='/')
if(i!=1){
print(i)
data_icbp<-cbind(data_icbp,f)
}
else{
data_icbp<-f
}
}
#head(data_icbp)
#dim(data_icbp)
colnames(data_icbp)<-gsub(pattern = "/pathway_activity_testset.csv",replacement = "",x = colnames(data_icbp))
head(data_icbp)
rownames(data_icbp)[1:7]<-c("184A1","184B5","21MT1","21MT2","21NT","21PT","600MPE")
setwd("~/Dropbox/bild_signatures//Datasets")
drugs<-read.delim("ICBP_drugs.txt", header=1, sep='\t',row.names=1)
icbp_drug<-merge_drop(data_icbp,drugs)
colnames(icbp_drug)
cor_mat=p_mat=matrix(0,length(filenames),90)
rownames(cor_mat)=rownames(p_mat)=colnames(icbp_drug)[1:length(filenames)]
colnames(cor_mat)=colnames(p_mat)=colnames(icbp_drug)[(length(filenames)+11):ncol(icbp_drug)]
for(i in 1:length(filenames)){
for(j in 1:90){
temp=cor.test(icbp_drug[,i],icbp_drug[,(j+length(filenames)+10)],use="pairwise",method="spearman")
print(j)
print(temp)
cor_mat[i,j]=temp$estimate
p_mat[i,j]=temp$p.value
}
}
write.table(cor_mat,"single_cor_drug_mat_4_21.txt",col.names = NA,quote=F,sep='\t')
colnames(p_mat)=paste(colnames(p_mat),"p_value",sep="_")
write.table(p_mat,"single_p_drug_mat_4_21.txt",col.names = NA,quote=F,sep='\t')
cor_p_mat<-cbind(cor_mat,p_mat)
order(colnames(cor_p_mat))
cor_p_mat<-cor_p_mat[,order(colnames(cor_p_mat))]
write.table(cor_p_mat,"single_cor_p_mat_4_21.txt",col.names = NA,quote=F,sep='\t')
| 2,274 | mit |
7e6d4e7146f6a12e90065ecead5115b94d21122b | cran/RSAGA | R/RSAGA-core.R | #' Internal functions that determine OS-specific path in which modules might be located.
#'
#' @name rsaga.get.modules.path
#' @rdname rsaga.get.modules.path
#' @param sysname character: name of the operating system, determined by default by [base::Sys.info()]: e.g., `"Windows"`, `"Linux"`, `"Darwin"` (for Mac OSX), or `"FreeBSD"`
#' @param saga.path character: path with SAGA GIS binaries, as determined (e.g.) by `rsaga.default.path`
#' @param root root path to SAGA GIS installation
#' @param cmd name of the SAGA command line program
#' @export
rsaga.get.modules.path = function(sysname = Sys.info()["sysname"], saga.path, root, cmd)
{
modules = NULL
if (sysname == "Windows") {
# Module folder changed with SAGA Version 3+
if (file.exists(file.path(saga.path, "modules"))) {
modules = file.path(saga.path, "modules")
} else if (file.exists(file.path(saga.path, "tools"))){
modules = file.path(saga.path, "tools")
}
# Stop if no modules folder found
if (is.null(modules)) {
stop("SAGA modules not found in the following windows default paths.\n",
paste0(saga.path, "/modules"), "\n", paste0(saga.path, "/tools"))
}
} else { # Linux, Unix, MacOS
# Look in likely locations for modules folder
module.defaults.paths = c("/usr/lib/x86_64-linux-gnu/saga", "/usr/lib/saga",
"/usr/lib64/saga", "/usr/local/lib/saga", "/usr/local/lib64/saga",
"/usr/local/Cellar/saga-gis-lts/2.3.2/lib/saga", Sys.getenv("SAGA_MLB")[[1]])
for (pa in module.defaults.paths) {
if (dir.exists(file.path(pa))) {
modules = pa
}
}
# Search for modules folder
if(is.null(modules)) {
modules = list.files(path = root, pattern = "libio_gdal", recursive = TRUE, full.names = TRUE)[1]
# Clean modules path
modules = gsub("/libio_gdal.*", x = modules, replacement = "")
}
if (is.null(modules)) {
stop("SAGA modules not found")
}
}
return(modules)
}
#' Internal function that sets the RSAGA Geoprocessing Eviroment manually
#' @name rsaga.set.env
#' @param workspace path of the working directory for SAGA; defaults to the current directory (`"."`).
#' @param cmd name of the SAGA command line program; defaults to `saga_cmd.exe`, its name under Windows
#' @param path path in which to find `cmd`; `rsaga.env` is usually able to find SAGA on your system if it is installed; see Details.
#' @param modules path in which to find SAGA libraries; see Details
#' @param version optional character string: SAGA GIS (API) version, e.g. `"2.0.8"`; if missing, a call to [rsaga.get.version()] is used to determine version number of SAGA API
#' @param cores optional numeric argument, or `NA`: number of cores used by SAGA GIS; supported only by SAGA GIS 2.1.0 (and higher), ignored otherwise (with a warning). Multicore-enabled SAGA GIS modules such as the one used by [rsaga.pisr()] seem to run in multicore mode by default when this argument is not specified, therefore `cores` should only be specified to use a smaller number of cores than available on a machine.
#' @param parallel optional logical argument (default: `FALSE`): if `TRUE`, run RSAGA functions that are capable of parallel processing in parallel mode; note that this is completely independent of the behaviour of SAGA GIS (which can be controlled using the `cores` argument); currently only some RSAGA functions support parallel processing (e.g., [pick.from.ascii.grid()] or [rsaga.get.modules()]). `parallel=TRUE` requires that a parallel backend such as \pkg{doSNOW} or \pkg{doMC} is available and has been started prior to calling any parallelized RSAGA function, otherwise warnings may be generated
#' @export
#'
rsaga.set.env = function(workspace = NULL, cmd = NULL, path = NULL, modules = NULL,
version = NA, cores = NULL, parallel = NULL) {
env = list(workspace = workspace, cmd = cmd, path = path, modules = modules,
version = NA, cores = NA, parallel = parallel)
return(env)
}
#' Function to set up RSAGA geoprocessing environment:
#' Set up the RSAGA Geoprocessing Environment
#'
#' `rsaga.env` creates a list with system-dependent information on SAGA path, module path and data (working) directory. This kind of a list is required by most RSAGA geoprocessing functions and is referred to as the 'RSAGA geoprocessing environment.'
#' @name rsaga.env
#' @param workspace path of the working directory for SAGA; defaults to the current directory (`"."`).
#' @param cmd name of the SAGA command line program; defaults to `saga_cmd.exe`, its name under Windows
#' @param path path in which to find `cmd`; `rsaga.env` is usually able to find SAGA on your system if it is installed; see Details.
#' @param modules path in which to find SAGA libraries; see Details
#' @param version optional character string: SAGA GIS (API) version, e.g. `"2.0.8"`; if missing, a call to [rsaga.get.version()] is used to determine version number of SAGA API
#' @param cores optional numeric argument, or `NA`: number of cores used by SAGA GIS; supported only by SAGA GIS 2.1.0 (and higher), ignored otherwise (with a warning). Multicore-enabled SAGA GIS modules such as the one used by [rsaga.pisr()] seem to run in multicore mode by default when this argument is not specified, therefore `cores` should only be specified to use a smaller number of cores than available on a machine.
#' @param parallel optional logical argument (default: `FALSE`): if `TRUE`, run RSAGA functions that are capable of parallel processing in parallel mode; note that this is completely independent of the behaviour of SAGA GIS (which can be controlled using the `cores` argument); currently only some RSAGA functions support parallel processing (e.g., [pick.from.ascii.grid()] or [rsaga.get.modules()]). `parallel=TRUE` requires that a parallel backend such as \pkg{doSNOW} or \pkg{doMC} is available and has been started prior to calling any parallelized RSAGA function, otherwise warnings may be generated
#' @param root optional root path to SAGA GIS installation. It is used if RSAGA performce a search for the SAGA command line program (s. `search`). If left empty, on Windoes `C:/` is used, on Linux `/usr` and on Mac OS `/usr/local/Cellar`.
#' @param lib.prefix character string: a possible (platform-dependent) prefix for SAGA GIS library names; if missing (recommended), a call to [rsaga.lib.prefix()] tries to determine the correct prefix, e.g. `""` on Windows systems and `"lib"` on non-Windows systems with SAGA GIS pre-2.1.0. Try specifying `""` or `"lib"` manually if this causes problems, and contact the package maintainer if the detection mechanism fails on your system (indicate your `Sys.info()["sysname"]` and your SAGA GIS version)
#'
#' @details IMPORTANT: Unlike R functions such as [options()], which changes and saves settings somewhere in a global variable, [rsaga.env()] does not actually 'save' any settings, it simply creates a list that can (and has to) be passed to other `rsaga.*` functions. See example below.
#'
#' We strongly recommend to install SAGA GIS on Windows in `C:/Program Files/SAGA-GIS`, `C:/Program Files (x86)/SAGA-GIS`,`C:/SAGA-GIS`, `C:/OSGeo4W64/apps/saga-lts` or `C:/OSGeo4W64/apps/saga`.
#' If you use a standalone version of SAGA GIS in a different path, please refer to section 2 bellow.
#'
#' There are three ways to create a RSAGA environment with `rsaga.env`:
#'
#' 1) No paths to the SAGA command line program and to the SAGA modules are specified by the user through the arguments `path` and `modules`.
#' On Windows `rsaga.env` tries to find the SAGA command line program in the following folders
#' `C:/Progra~1/SAGA-GIS`, `C:/Progra~2/SAGA-GIS`, `C:/SAGA-GIS`, `C:/OSGeo4W64/apps/saga-lts` and `C:/OSGeo4W64/apps/saga`.
#' If this fails and attempt is being made to find the SAGA command line program with a search on `C:/`
#' (The drive letter can be changed with the `root` argument).
#' The subfolder `tools` (SAGA Version < 3.0.0 subfolder `modules`) is checked for the SAGA module libraries.
#' On Unix systems `rsaga.env` tries to find the SAGA command line program in various default paths.
#' Additionally, on Unix systems the PATH environment variable is checked for the path to the SAGA command line program
#' and the SAGA_MLB environment variable is checked for the SAGA module libraries.
#' If this fails, a search for the SAGA command line program and the module libraries is performed on `/usr`.
#' If no SAGA command line program can be found, please specify the paths as described in section 2.
#'
#' 2) The user specifies both the path to the SAGA command line program and
#' to the SAGA module libraries. Both paths are checked if they are valid. Use this if SAGA GIS is located in a non-standard path
#' or if you use more than one SAGA GIS version.
#'
#' 3) The user specifies only the path to the SAGA command line program. A search for the SAGA modules is performed as described in section 1.
#'
#' @return A list with components `workspace`, `cmd`, `path`, `modules`, `version`, `cores` and `parallel` with values as passed to `rsaga.env` or default values as described in the Details section.
#' @author Alexander Brenning and Marc Becker
#' @note Note that the default `workspace` is `"."`, not `getwd()`; i.e. the default SAGA workspace folder is not fixed, it changes each time you change the R working directory using `setwd`.
#' @seealso [rsaga.get.version()]
#' @examples
#' \dontrun{
#' # Check the default RSAGA environment on your computer:
#' myenv <- rsaga.env()
#' myenv
#' # SAGA data in C:/sagadata, binaries in C:/SAGA-GIS, modules in C:/SAGA-GIS/modules:
#' myenv <- rsaga.env(workspace="C:/sagadata", path="C:/SAGA-GIS")
#' # Unix: SAGA in /usr/bin (instead of the default /usr/local/bin),
#' # and modules in /use/lib/saga:
#' # myenv <- rsaga.env(path="/usr/bin")
#' # Use the 'myenv' environment for SAGA geoprocessing:
#' rsaga.hillshade("dem","hillshade",env=myenv)
#' # ...creates (or overwrites) grid "C:/sagadata/hillshade.sgrd"
#' # derived from digital elevation model "C:/sagadata/dem.sgrd"
#'
#' # Same calculation with different SAGA version:
#' # (I keep several versions in SAGA-GIS_2.0.x folders:)
#' myenv05 = rsaga.env(path = "C:/Progra~1/SAGA-GIS_2.0.5")
#' rsaga.hillshade("dem","hillshade205",env=myenv05)
#' }
#' @keywords spatial interface
#' @export
#'
rsaga.env = function(path = NULL, modules = NULL, workspace = ".",
cmd = ifelse(Sys.info()["sysname"] == "Windows", "saga_cmd.exe", "saga_cmd"),
version = NULL, cores, parallel = FALSE, root = NULL, lib.prefix)
{
# Set root path depending on operating system
if (is.null(root)) {
if (Sys.info()["sysname"] == "Windows") {
root = "C:/"
} else if (Sys.info()["sysname"] == "Darwin") {
root = "/usr/local/Cellar"
} else {
root = '/usr'
}
}
# Clean user specified paths
if (!is.null(path)) {
path = gsub(pattern = "\\/$", x = path, "")
}
if (!is.null(modules)) {
modules = gsub(pattern = "\\/$", x = modules, "")
}
workspace = gsub(pattern = "\\/$", x = workspace, "")
# Check workspace
if (!dir.exists(workspace)) {
stop("Invalid workspace path ", workspace)
}
# Check paths specified by user. Option 2 und 3 s. details
if (!is.null(path)) {
cat("Verify specified path to SAGA command line program...\n")
# Check if SAGA command line path is valid
if (!file.exists(file.path(path, cmd))) {
stop("SAGA command line program ", cmd, " not found in the specified path:\n", path)
}
# Check if modules exists
if (!is.null(modules)) {
cat("Verify specified path to SAGA modules...\n")
# Check if modules path is valid
if (!file.exists(file.path(modules))) {
stop("SAGA modules not found in the specified path:\n ", modules)
}
cat("Done")
# If modules path is emtpy but cmd path is set, try to find modules path
} else {
cat("Found SAGA command line program. Search for not specified SAGA modules path... \n")
# Search for modules path
modules = rsaga.get.modules.path(saga.path = path)
# Check if modules path is valid
if (!file.exists(modules)) {
stop("SAGA modules not found\n")
}
cat("Done\n")
}
}
# Searching for SAGA command line program in default paths. Option 1 s. details
if (is.null(path)) {
cat("Search for SAGA command line program and modules... \n")
# Try to find SAGA command line program in windows default paths
if (Sys.info()["sysname"] == "Windows") {
# Windows defaults paths
windows.defaults.paths = c("C:/Progra~1/SAGA-GIS", "C:/Progra~2/SAGA-GIS", "C:/SAGA-GIS",
"C:/OSGeo4W64/apps/saga", "C:/OSGeo4W64/apps/saga-ltr")
# Check if one path is valid
for (pa in windows.defaults.paths) {
if (file.exists(file.path(pa, cmd))) {
path = pa
}
}
# If no default path is correct, search for SAGA GIS on entire drive
if (is.null(path)) {
cat("SAGA command line program not found in the following default paths:\n",
paste0(windows.defaults.paths, collapse = "\n"),
"\nSearch on the entire hard drive...\n", sep="")
# Search starts in root directory
path.list = list.files(path = root, pattern = cmd, recursive = TRUE, full.names = TRUE)
# Remove cmd name from path
path.list = gsub(paste0(".{",nchar(cmd),"}$"), "", path.list)
# Stop if no saga_cmd.exe is found
if (length(path.list) == 0) {
stop("SAGA command line program not found on ", root, "\n")
}
# If more than one version is available, select SAGA GIS with highest version number
if (length(path.list) >= 2) {
cat(length(path.list), "SAGA GIS versions detected. Latest version is selected.\n")
version.numbers = c()
for (pa in path.list) {
# Dummy enviroment to get the version number
dummy.env = rsaga.set.env(path = pa, cmd = cmd, workspace = ".",
modules = paste0(pa, "/NULL"))
version.numbers = c(version.numbers, rsaga.get.version(dummy.env))
}
# Select path with the latest SAGA GIS version
path = path.list[which(version.numbers == max(version.numbers))]
# Choose one if multiple versions with the same version number are available
path = path[1]
}
}
} else {
# Unix default paths and path from PATH environment varibale
unix.defaults.paths = c("/usr/bin", "/usr/local/bin",
"/usr/local/Cellar/saga-gis-lts/2.3.2/bin",
sub(paste0('/', cmd), '', system2('which', args = cmd, stdout = TRUE)))
# Check if one path is valid
for (pa in unix.defaults.paths) {
if (file.exists(file.path(pa, cmd))) {
path = pa
}
}
# If no default path is correct, search for SAGA GIS on entire drive
if(is.null(path)) {
# Try to find SAGA command line program on other os
path = list.files(path = root, pattern = paste0(cmd,"$"), recursive = TRUE,
full.names = TRUE)[1]
# Remove cmd name from path
path = gsub(paste0(".{",nchar(cmd),"}$"), '', path)
# Stop if no saga_cmd is found
if (is.na(path)) {
stop("SAGA command line program not found on ", root, "\n")
}
}
}
# Try to find modules path
modules = rsaga.get.modules.path(saga.path = path, root = root, cmd = cmd)
cat("Done\n")
}
# cmd has inheritted a 'sysname' name from the result of Sys.info()
cmd = unname(cmd)
# (optional) number of cores:
if (missing(cores)) {
cores = NA
} else {
stopifnot(cores >= 0)
}
# Set RSAGA geoprocessing environment
env = rsaga.set.env(workspace = workspace, cmd = cmd, path = path, modules = modules,
version = NA, cores = cores, parallel = parallel)
# Determine SAGA API version, if not specified by caller
if (missing(version)) {
version = rsaga.get.version(env = env)
env$version = version
}
# Check cores
if (!is.na(env$cores) & (is.na(env$version) | (substr(env$version, 1, 4) == "2.0."))) {
warning("'cores' argument not supported by SAGA GIS versions <2.1.0; ignoring 'cores' argument\n",
"Use SAGA GIS 2.1.0+ for multicore geoprocessing.")
env$cores = NA
}
if (missing(lib.prefix)) {
lib.prefix = rsaga.lib.prefix(env = env)
env$lib.prefix = lib.prefix
}
return(env)
}
#' Determine prefix for SAGA GIS library names
#'
#' Internal function that determines the possible prefix for SAGA GIS library names - relevant for non-Windows SAGA GIS pre-2.1.0.
#'
#' @name rsaga.lib.prefix
#' @param env list, setting up a SAGA geoprocessing environment as created by [rsaga.env()].
#' @details Some non-Windows versions of `saga_cmd` require library names with a `"lib"` prefix, e.g. `libio_grid` instead of `io_grid`. This function, which is called by [rsaga.env()] tries to guess this behaviour based on the operating system and SAGA GIS version.
#' @return A character string, either `""` or `"lib"`.
#' @seealso [rsaga.env()]
#' @examples
#' \dontrun{
#' env = rsaga.env()
#' # obtained by a call to rsaga.lib.prefix:
#' env$lib.prefix
#'
#' # more explicitly:
#' rsaga.lib.prefix(env=env)
#' }
#' @keywords spatial interface
#' @export
rsaga.lib.prefix = function(env) {
lib.prefix = "lib"
if ((Sys.info()["sysname"] == "Windows")) {
lib.prefix = ""
} else if ((Sys.info()["sysname"] == "Darwin")) {
lib.prefix = ""
} else if (!is.na(env$version)) {
if (substr(env$version,1,4) == "2.1." | substr(env$version,1,4) == "2.2.")
lib.prefix = ""
}
return(lib.prefix)
}
#' Determine SAGA GIS version
#'
#' Determine SAGA GIS version.
#'
#' @name rsaga.get.version
#' @param env list, setting up a SAGA geoprocessing environment as created by [rsaga.env()]. Note that `version=NA` ensures that [rsaga.env()] won't call `rsaga.get.version` itself.
#' @param ... additional arguments to [rsaga.geoprocessor()]
#' @details The function first attempts to determine the SAGA version directly through a system call `saga_cmd --version`, which is supported by SAGA GIS 2.0.8+. If this fails, `saga_cmd -h` is called, and it is attempted to extract the version number of the SAGA API from the output generated, which works for 2.0.4 - 2.0.7.
#' @return A character string defining the SAGA GIS (API) version. E.g., `"2.0.8"`.
#' @seealso [rsaga.env()]
#' @examples
#' \dontrun{
#' myenv <- rsaga.env()
#' myenv$version
#' # rsaga.env actually calls rsaga.get.version:
#' rsaga.get.version()
#'
#' # I keep several versions of SAGA GIS in SAGA-GIS_2.0.x folders:
#' myenv05 = rsaga.env(path = "C:/Progra~1/SAGA-GIS_2.0.5", version = NA)
#' # Check if it's really version 2.0.5 as suggested by the folder name:
#' rsaga.get.version(env = myenv05)
#' }
#' @keywords spatial interface
#' @export
rsaga.get.version = function(env = rsaga.env(version=NA), ...)
{
version = NA
reduce.version = function(x) {
x = gsub(" ", "", x)
# reduce e.g. 2.1.0(32bit) to 2.1.0; added 2013-05-28
x = strsplit(x, '(', fixed=TRUE)[[1]][1]
return(x)
}
# Added 27-Dec-2011:
# saga_cmd --version (only works in SAGA GIS 2.0.8+)
out = rsaga.geoprocessor(lib = NULL, prefix = "--version", show.output.on.console = FALSE,
flags = NULL, warn = -1, env = env, check.parameters = FALSE, ...)
if (all(out != "error: module library not found [--version]")) {
if (length(out >= 1)) {
if (any(sel <- (substr(out,1,9) == "SAGA API:"))) {
# This first option was mentioned by Volker Wichmann on [saga-gis-developer]
# although SAGA GIS 2.0.8 for Windows uses a different output format:
out = gsub("SAGA API:", "", out[sel][1], fixed = TRUE)
out = reduce.version(out)
return(out)
} else if (any(sel <- (substr(out,1,13) == "SAGA Version:"))) {
# Output format used by SAGA GIS 2.0.8 for Windows:
# SAGA Version: 2.0.8
out = gsub("SAGA Version:", "", out[sel][1], fixed = TRUE)
out = reduce.version(out)
return(out)
}
}
}
# End added code
# Older SAGA GIS versions:
# ------------------------
#### check if this function works on unix????
# Retrieve basic help page of saga_cmd:
out = rsaga.geoprocessor(lib = NULL, prefix = "-h", show.output.on.console = FALSE,
flags = NULL, warn = -1, env = env, check.parameters = FALSE, ...)
# Process the help page line by line in order to find lines starting
# with "SAGA API " (or "SAGA CMD ", if no "SAGA API " line available)
for (i in 1:length(out)) {
if (substr(out[i],1,9) == "SAGA API ") {
if (as.numeric(substr(out[i],10,10)) > 0) {
version = gsub(" ", "", substr(out[i],10,nchar(out[i])), fixed = TRUE)
break
}
} else if (substr(out[i],1,9) == "SAGA CMD ") {
if ( is.na(version) & any( as.character(0:9) == substr(out[i],10,10) ) ) {
version = gsub(" ", "", substr(out[i],10,nchar(out[i])), fixed = TRUE)
# no 'break' here because we're still hoping to find info on SAGA API version
# SAGA 2.0.4 only shows SAGA CMD version = 2.0.4, however *some* versions
# distinguish between SAGA API and SAGA CMD versions.
}
}
}
return(version)
}
#' Find SAGA libraries and modules
#'
#' These functions list the SAGA libraries (`rsaga.get.libraries`) and modules (`rsaga.get.lib.modules`, `rsaga.get.modules`) available in a SAGA installation, and allow to perform a full-text search among these functions.
#' @name rsaga.get.modules
#' @param text character string to be searched for in the names of available libraries and/or modules
#' @param search.libs logical (default `TRUE`); see `search.modules`
#' @param search.modules logical (default `TRUE`): should `text` be searched for in library and/or module names?
#' @param ignore.case logical (default `FALSE`): should the text search in library/module names be case sensitive?
#' @param lib character string with the name of the library in which to look for modules
#' @param libs character vector with the names of libraries in which to look for modules; if missing, all libraries will be processed
#' @param module module name or numeric code
#' @param modules optional list: result of `rsaga.get.modules`; if missing, a list of available modules will be retrieved using that function
#' @param env a SAGA geoprocessing environment as created by [rsaga.env()]
#' @param path path of SAGA library files (`modules` subfolder in the SAGA installation folder); defaults to the path determined by [rsaga.env()].
#' @param dll file extension of dynamic link libraries
#' @param interactive logical (default `FALSE`): should modules be returned that can only be executed in interactive mode (i.e. using SAGA GUI)?
#' @param parallel logical (defaults to `env$parallel`): if `TRUE`, run in parallel mode; requires a parallel backend such as \pkg{doSNOW} or \pkg{doMC}
#' @param ... currently only `interactive` to be passed on to `rsaga.get.lib.modules`
#' @return `rsaga.get.libraries` returns a character vector with the names of all SAGA libraries available in the folder `env$modules`.
#'
#' `rsaga.get.lib.modules` returns a `data.frame` with:
#' \itemize{
#' \item{name} {the names of all modules in library `lib`,}
#' \item{code} {their numeric identifiers,}
#' \item{interactive} {and a logical variable indicating whether a module can only be executed in interactive (SAGA GUI) mode.}
#' }
#'
#' `rsaga.get.modules` returns a list with, for each SAGA library in `libs`, a `data.frame` with module information as given by `rsaga.get.lib.modules`. If `libs` is missing, all modules in all libraries will be retrieved.
#'
#' @note For information on the usage of SAGA command line modules, see [rsaga.get.usage()], or [rsaga.html.help()] (in SAGA GIS 2.1.0+), or the RSAGA interface function, if available.
#' @seealso [rsaga.get.usage()], [rsaga.html.help()], [rsaga.geoprocessor()], [rsaga.env()]
#' @examples
#' \dontrun{
#' # make sure that 'rsaga.env' can find 'saga_cmd.exe'
#' # before running this:
#' rsaga.get.libraries()
#' # list all modules in my favorite libraries:
#' rsaga.get.modules(c("io_grid", "grid_tools", "ta_preprocessor",
#' "ta_morphometry", "ta_lighting", "ta_hydrology"))
#' # list *all* modules (quite a few!):
#' # rsaga.get.modules(interactive=TRUE)
#'
#' # find modules that remove sink from DEMs:
#' rsaga.search.modules("sink")
#' # find modules that close gaps (no-data areas) in grids:
#' rsaga.search.modules("gap")
#' }
#' @keywords spatial interface
#' @import plyr
#' @export
rsaga.get.modules = function(libs, env = rsaga.env(),
interactive = FALSE, parallel = env$parallel)
{
if (missing(libs)) libs = rsaga.get.libraries(env$modules)
op = options(warn = -1) # llply would generate two warnings
on.exit(options(op))
res = llply(libs, .fun=rsaga.get.lib.modules, env = env,
interactive = interactive, .parallel = parallel)
names(res) = libs
return(res)
}
#' @rdname rsaga.get.modules
#' @name rsaga.get.libraries
#' @export
rsaga.get.libraries = function(path = rsaga.env()$modules, dll)
{
if (missing(dll)) {
dll = .Platform$dynlib.ext
if (Sys.info()["sysname"] == "Darwin") dll = ".dylib"
}
dllnames = dir(path,paste("^.*\\",dll,"$",sep=""))
if (Sys.info()["sysname"] != "Windows") ### %in% c("Linux","Darwin","FreeBSD"))
if (all(substr(dllnames,1,3) == "lib"))
dllnames = substr(dllnames, 4, nchar(dllnames)) # remove the "lib"
return( gsub(dll,"",dllnames,fixed=TRUE ) )
}
#' @rdname rsaga.get.modules
#' @name rsaga.get.lib.modules
#' @export
rsaga.get.lib.modules = function(lib, env=rsaga.env(), interactive=FALSE)
{
res = NULL
# changed by Rainer Hurling, 2013-07-23:
###if ( lib == "opencv" & (is.na(env$version) | (env$version == "2.0.4" | env$version == "2.0.5" | env$version == "2.0.6")) ) {
if ( lib == "opencv" & env$version %in% c(NA,"2.0.4","2.0.5","2.0.6") ) {
warning("skipping library 'opencv' because it produces an error\n",
" when requesting its module listing in SAGA version 2.0.4 - 2.0.6)")
# return an empty data.frame of the same format as in the successful situation:
return( data.frame( code = numeric(), name = character(), interactive = logical() ) )
}
rawres = rsaga.geoprocessor(lib, module=NULL, env=env,
intern=TRUE, show.output.on.console=FALSE, flags=NULL, invisible=TRUE,
reduce.intern=FALSE, check.module.exists=FALSE, check.parameters = FALSE, warn = -1)
wh = which( gsub(" ","",tolower(rawres)) %in% c("availablemodules:","executablemodules:","modules:", "tools:") )
# Fix for SAGA 6.2.0
# It outputs additionally the tool chains of the library. The list also starts with the string 'Tools:'
if(length(wh) == 2) {
empty_elements = which(rawres == '')
empty_elements = empty_elements[empty_elements > wh[1]]
wh_2 = empty_elements[1]-1
wh = wh[1]
} else {
wh_2 = length(rawres)
}
if (length(wh) > 0) {
rawres = rawres[ (wh[length(wh)]+1) : wh_2 ]
rawres = rawres[ rawres != "" ]
rawres = rawres[ rawres != "type -h or --help for further information" ]
# inserted tolower() for SAGA 2.1.0 RC1:
rawres = rawres[ tolower(rawres) != "error: module" ]
rawres = rawres[ tolower(rawres) != "error: tool" ]
rawres = rawres[ tolower(rawres) != "error: select a tool" ]
}
if (length(wh) > 0) {
# String to split output changed from '\t- ' to '\t' with SAGA version 2.3.1
if (any(c("2.0.4","2.0.5","2.0.6","2.0.7","2.0.8",
"2.1.0","2.1.1","2.1.2","2.1.3","2.1.4",
"2.2.0","2.2.1","2.2.2","2.2.3") == env$version)) {
rawres = strsplit(rawres,"\t- ")
} else {
rawres = strsplit(rawres,"\t")
}
mcodes = c()
mnames = c()
minteracs = c()
for (descr in rawres) {
# Remove box brackets from SAGA > 6.2 output
descr[1] <- gsub(']', '', descr[1], fixed = TRUE)
descr[1] <- gsub('[', '', descr[1], fixed = TRUE)
mygrep = c( grep("[",descr[1],fixed=TRUE), grep("]",descr[1],fixed=TRUE),
grep("[interactive]",descr[2],fixed=TRUE) )
minterac = (length(mygrep) > 0)
# skip interactive modules if only interactive ones are allowed:
if (!minterac | interactive) {
mcode = gsub("[","",gsub("]","",gsub(" ","",descr[1]),fixed=TRUE),fixed=TRUE)
mname = gsub("[interactive] ","",descr[2],fixed=TRUE)
mcodes = c(mcodes, as.numeric(mcode))
mnames = c(mnames, mname)
minteracs = c(minteracs, minterac)
}
}
#if (length(mcodes) > 0)
res = data.frame(code=mcodes, name=mnames, interactive=minteracs)
}
return(res)
}
#' @rdname rsaga.get.modules
#' @name rsaga.module.exists
#' @export
rsaga.module.exists = function(libs, module, env = rsaga.env(), ...) {
if (missing(libs)) libs = rsaga.get.libraries(env$modules)
wh = "name"
if (is.numeric(module)) wh = "code"
for (i in 1:length(libs)) {
modules = rsaga.get.lib.modules(libs[i], env = env, ...)
if (!is.null(modules))
if (any(modules[,wh] == module))
return(TRUE)
}
return(FALSE)
}
#' @rdname rsaga.get.modules
#' @name rsaga.search.modules
#' @export
rsaga.search.modules = function(text, modules, search.libs=TRUE, search.modules=TRUE,
env=rsaga.env(), ignore.case=TRUE, ...)
{
pattern = paste("^.*",text,sep="")
lib = NULL
mod = NULL
if (search.libs) {
lib.nm = rsaga.get.libraries(path=env$modules)
wh.lib = grep(pattern,lib.nm,ignore.case=ignore.case)
lib = lib.nm[wh.lib]
}
if (search.modules) {
if (missing(modules))
modules = rsaga.get.modules(env=env,...)
mod.nm = unlist(sapply(modules,function(x) if (is.atomic(x)) NULL else as.character(x$name)),use.names=FALSE)
mod.libs = sapply(modules,function(x) if (is.atomic(x)) 0 else nrow(x))
mod.libs = rep(names(mod.libs),mod.libs)
wh.mod = grep(pattern,mod.nm,ignore.case=ignore.case)
mod = data.frame( lib=mod.libs[wh.mod], module=mod.nm[wh.mod] )
}
return( list( lib = lib, modules = mod ) )
}
#' Usage of SAGA command line modules
#'
#' `rsaga.get.usage` provides information on the usage of and arguments required by SAGA command line modules.
#'
#' @name rsaga.get.usage
#' @param lib name of the SAGA library
#' @param module name or numeric identifier of SAGA module in library `lib`
#' @param env a SAGA geoprocessing environment as created by [rsaga.env()]
#' @param show logical (default: `TRUE`); display usage in the R console?
#'
#' @details This function is intended to provide information required to use the
#' [rsaga.geoprocessor()] and for writing your own high-level interface
#' function for SAGA modules. R--SAGA interfaces already exist for some SAGA modules,
#' e.g. [rsaga.hillshade()], [rsaga.local.morphometry()], but there
#' are many more.
#' @return The character vector with usage information is invisibly returned.
#' @seealso [rsaga.html.help()], [rsaga.geoprocessor()], [rsaga.env()], [rsaga.get.modules()]
#' @examples
#' \dontrun{
#' rsaga.get.usage("io_grid",1)
#' rsaga.get.usage("ta_preprocessor",2)
#' rsaga.get.usage("ta_morphometry",0)
#' # in SAGA GIS 2.1.0+, compare:
#' rsaga.html.help("io_grid",1)
#' # etc.
#' }
#' @keywords spatial interface
#' @export
rsaga.get.usage = function(lib, module, env=rsaga.env(), show=TRUE)
{
if (is.function(lib))
lib = deparse(substitute(lib))
if (substr(lib,1,6)=="rsaga.") {
if (lib=="rsaga.fill.sinks") {
warning("'rsaga.fill.sinks' uses three modules from the 'ta_preprocessor' library:\n",
" for 'method=\"planchon.darboux.2001\"': module 2\n",
" for 'method=\"wang.liu.2006\"': module 3\n",
" for 'method=\"xxl.wang.liu.2006\"': module 4\n",
"using 'module=2'\n")
}
lib = switch(lib,
rsaga.close.gaps = list(lib="grid_tools", module=7),
rsaga.esri.to.sgrd = list(lib="io_grid", module=1),
rsaga.sgrd.to.esri = list(lib="io_grid", module=0),
rsaga.parallel.processing = list(lib="ta_hydrology", module=0),
rsaga.local.morphometry = list(lib="ta_morphometry", module=0),
rsaga.slope = list(lib="ta_morphometry", module=0),
rsaga.aspect = list(lib="ta_morphometry", module=0),
rsaga.curvature = list(lib="ta_morphometry", module=0),
rsaga.plan.curvature = list(lib="ta_morphometry", module=0),
rsaga.profile.curvature = list(lib="ta_morphometry", module=0),
rsaga.sink.route = list(lib="ta_preprocessor", module=0),
rsaga.sink.removal = list(lib="ta_preprocessor", module=1),
rsaga.fill.sinks = list(lib="ta_preprocessor", module=2),
rsaga.contour = list(lib="shapes_grid", module=5),
rsaga.hillshade = list(lib="ta_lighting", module=0),
#rsaga.solar.radiation = list(lib="ta_lighting", module=2),
#rsaga.insolation = list(lib="ta_lighting", module=3),
rsaga.filter.simple = list(lib="grid_filter", module=0),
rsaga.filter.gauss = list(lib="grid_filter", module=1) )
module = lib$module
lib = lib$lib
}
res = NULL
usage = rsaga.geoprocessor(lib, module, param = list(h=""), env = env,
intern = TRUE, show.output.on.console = FALSE, flags = NULL,
check.module.exists = FALSE, check.parameters = FALSE, warn = -1)
skip = 0
while ((length(usage)>(1+skip)) & (substr(usage[1+skip],1,6)!="Usage:")) {
if (substr(usage[1+skip],1,8) %in%
c("SAGA CMD","Copyrigh","library ","module n","________")) {
skip = skip + 1
} else {
if (skip == 0) {
usage = usage[ 2 : length(usage) ]
} else {
usage = usage[ c(1:skip, (skip+2):length(usage)) ]
}
}
}
if (length(usage) > 1) {
res = usage[ 1 : (length(usage)-1) ]
if (substr(res[length(res)],1,6)=="______") {
res = c(res, "Usage description not available (interactive module?)")
warning("usage description not available for module ",
module, " in library ", lib, " (interactive module?)")
}
} else
warning("usage description not available for module ",
module, "\nin library ", lib, " (interactive module?)")
if (show) {
if (!is.null(res))
cat(paste(res,collapse="\n"),"\n\n")
}
invisible(res)
}
#' HTML help on a SAGA module or library
#'
#' This function opens SAGA's HTML documentation for the specified library or module. Works with SAGA GIS 2.1.0(+), for earlier versions a web page with the SAGA GIS wiki is displayed.
#'
#' @name rsaga.html.help
#' @param lib name of the SAGA library, or one of the `rsaga.` module functions such as [rsaga.hillshade()]
#' @param module name or numeric identifier of SAGA module in library `lib`; `module=NULL` takes you to the main help page of the SAGA library `lib`
#' @param use.program.folder logical; if `TRUE` (the default), attempt to write SAGA GIS documentation to a `"help"` subfolder of `env$path`; the `"help"` folder is created if it doesn't exist. If `FALSE`, create SAGA GIS documentation files in this R session's temporary folder as obtained using `tempdir()`
#' @param env a SAGA geoprocessing environment as created by [rsaga.env()]
#' @param ... additional arguments to [browseURL()]
#' @details Requires SAGA GIS 2.1.0(+), with earlier versions use [rsaga.get.usage()].
#' @examples
#' \dontrun{
#' # Requires SAGA GIS 2.1.0+:
#' rsaga.html.help("io_grid")
#' rsaga.html.help("io_grid",0)
#' rsaga.html.help("io_grid","Import ESRI Arc/Info Grid")
#' }
#' @seealso [rsaga.get.usage()], [rsaga.geoprocessor()], [rsaga.env()]
#' @keywords utilities interface
#' @export
rsaga.html.help = function(lib, module=NULL, use.program.folder = TRUE, env=rsaga.env(), ...)
{
if (env$version == "2.1.0" | env$version == "2.1.1" | env$version == "2.1.2" |
env$version == "2.1.3" | env$version == "2.1.4" | env$version == "2.2.0" |
env$version == "2.2.1" | env$version == "2.2.2" | env$version == "2.2.3") {
# Convert character string module names to integer code, if possible:
if (!is.null(module)) {
if (is.character(module)) {
modules = rsaga.get.lib.modules(lib, env=env)
if (any(modules$name == module))
module = modules$code[ which(modules$name == module)[1] ]
}
}
env$workspace = file.path(env$path, "help")
# check if help file already exists:
fnm = file.path(env$workspace,lib,lib)
if (!is.null(module)) {
if (is.numeric(module)) {
fnm = paste(fnm, "_", formatC(module,width=2,flag="0"), sep="")
}
}
fnm = paste(fnm, ".html", sep="")
# Create help files in the SAGA GIS program folder if they don't yet exist,
# or in this R session's temporary folder if need be:
if(!file.exists(fnm)) {
if (!file.exists(env$workspace)) {
if (use.program.folder)
use.program.folder = dir.create(env$workspace)
if (!use.program.folder) {
temp.workspace = file.path(tempdir(),"help")
if (!file.exists(temp.workspace)) dir.create(temp.workspace)
env$workspace = temp.workspace
}
}
}
# Updated path and file name (env$workspace may have changed):
fnm = file.path(env$workspace,lib,lib)
if (!is.null(module)) {
if (is.numeric(module)) {
fnm = paste(fnm, "_", formatC(module,width=2,flag="0"), sep="")
}
}
fnm = paste(fnm, ".html", sep="")
# Create help files in the SAGA GIS program folder if they don't yet exist:
if (!file.exists(fnm)) {
cat("Calling SAGA GIS to create documentation files in the program folder...\n")
rsaga.geoprocessor(lib = NULL, module = NULL, prefix = "-d", env = env, check.parameters = FALSE)
}
# Issue a warning if documentation file still can't be accessed:
if (!file.exists(fnm)) {
warning("Can't create or can't find suitable documentation file\n",
"for module ", module, " in library ", lib, ".\n",
"Possible reasons:\n",
"- No writing permission for folder ", env$workspace, "\n",
"- Module or library does not exist\n")
return()
}
utils::browseURL( paste("file://",fnm,sep=""), ...)
} else {
warning("rsaga.html.help only available for SAGA GIS 2.1.0+.\n",
"Redirecting you to the SAGA GIS wiki...")
url = "http://sourceforge.net/apps/trac/saga-gis/wiki"
utils::browseURL(url, ...)
}
return()
}
#' Generic R interface for SAGA modules
#'
#' This function is the workhorse of the R--SAGA interface: It calls the SAGA command line tool to run SAGA modules and pass arguments.
#'
#' @name rsaga.geoprocessor
#' @param lib Name of the SAGA library to be called (see Details).
#' @param module Number (`>=0`) or name of the module to called within the library `lib` (see Details).
#' @param param A list of named arguments to be passed to the SAGA module (see Examples).
#' @param show.output.on.console a logical (default: `TRUE`), indicates whether to capture the output of the command and show it on the R console (see [system()]).
#' @param invisible a logical, indicates whether the command window should be visible on the screen.
#' @param intern a logical, indicates whether to make the output of the command an R object
#' @param prefix optional character string: prefix such as `"-h"` used in the `saga_cmd` call; mostly for internal purposes; call `saga_cmd -h` from the command line for details; see also `flags`
#' @param flags optional character string indicating any command line flags; supported only by SAGA GIS 2.1.0 (and higher), quietly ignored otherwise: `"q"`: no progress report (the default for `show.output.on.console=TRUE`); `"r"`: no messages report; `"s"`: silent mode, i.e. no progress and no messages report (the default for `show.output.on.console=FALSE`); other flag options probably not relevant within RSAGA
#' @param cores optional numeric argument, or `NA`: number of cores used by SAGA GIS; supported only by SAGA GIS 2.1.0 (and higher), ignored otherwise (with a warning); overwrites the `cores` setting specified in the `env` argument (see [rsaga.env()]). Multicore-enabled SAGA GIS modules such as the one used by [rsaga.pisr()] seem to run in multicore mode by default when this argument is not specified, therefore `cores` should only be specified to use a smaller number of cores than available on a machine.
#' @param env A SAGA geoprocessing environment, i.e. a list with information on the SAGA and SAGA modules paths and the name of the working directory in which to look for input and output files. (Defaults: see [rsaga.env()].)
#' @param display.command Display the DOS command line for executing the SAGA module (including all the arguments to be passed). Default: `FALSE`.
#' @param reduce.intern If `intern=TRUE`, reduce the text output of SAGA returned to R by eliminating redundant lines showing the progress of module execution etc. (default: `TRUE`).
#' @param check.module.exists logical (default: `TRUE`): call [rsaga.module.exists()] to determine if the specified module can be called in the current SAGA installation
#' @param warn logical (default: `TRUE`): for internal purposes - can be used to suppress warning messages generated by failed SAGA_CMD calls; currently used by [rsaga.get.lib.modules()] and related functions; see [options()] argument `warn` for details
#' @param argsep character (default: `" "`; currently for internal use): defines the character symbol used as a separator between each argument name and argument value passed to `saga_cmd`. SAGA GIS 2.1.0 (RC1) seems to move toward `"="` as a separator, but `" "` still works and some modules (e.g. the used by `rsaga.pisr`) don't seem to work with `argsep="="`. Future releases of RSAGA may change the default `argsep` value and/or delete or ignore this argument and/or move it to [rsaga.env()].
#'
#' @param check.parameters logical(default: `TRUE`): Check if correct parameters are used.
#'
#' @param ... Additional arguments to be passed to [base::system()].
#'
#' @details This workhorse function establishes the interface between the SAGA command line program and R by submitting a system call. This is a low-level function that may be used for directly accessing SAGA; specific functions such as `rsaga.hillshade` are intended to be more user-friendly interfaces to the most frequently used SAGA modules. These higher-level interfaces support default values for the arguments and perform some error checking; they should therefore be preferred if available.
#'
#' A warning is issued if the RSAGA version is not one of 2.0.4-2.0.8 or 2.1.0-2.1.4
#'
#' @return The type of object returned depends on the `intern` argument passed to [system()].
#'
#' If `intern=FALSE`, a numerical error/success code is returned, where a value of `0` corresponds to success and a non-zero value indicates an error. Note however that the function always returns a success value of `0` if `wait=FALSE`, i.e. if it does not wait for SAGA to finish.
#'
#' If `intern=TRUE` (default), the console output of SAGA is returned as a character vector. This character vector lists the input file names and modules arguments, and gives a more or less detailed report of the function's progress. Redundant information can be cancelled out by setting `reduce.intern=TRUE`.
#'
#' @references Brenning, A., 2008. Statistical geocomputing combining R and
#' SAGA: The example of landslide susceptibility analysis with
#' generalized additive models. In J. Boehner, T. Blaschke and
#' L. Montanarella (eds.), SAGA - Seconds Out (= Hamburger
#' Beitraege zur Physischen Geographie und
#' Landschaftsoekologie, vol. 19), p. 23-32.
#'
#' @author Alexander Brenning (R interface); Olaf Conrad and the SAGA development team (SAGA development)
#' @note Existing output files will be overwritten by SAGA without prompting!
#'
#' If a terrain analysis function is not directly interfaced by one of the RSAGA functions, you might still find it in the growing set of SAGA libraries and modules. The names of all libraries available in your SAGA installation can be obtained using [rsaga.get.libraries()] (or by checking the directory listing of the `modules` folder in the SAGA directory). The names and numeric codes of all available modules (globally or within a specific library) are retrieved by [rsaga.get.modules()]. Full-text search in library and module names is performed by [rsaga.search.modules()]. For information on the usage of SAGA command line modules, see [rsaga.get.usage()], or the RSAGA interface function if available.
#'
#' `display.command=TRUE` is mainly intended for debugging purposes to check if all arguments are passed correctly to SAGA CMD.
#' @seealso [rsaga.env()], [rsaga.get.libraries()], [rsaga.get.modules()], [rsaga.search.modules()], [rsaga.get.usage()]; [rsaga.esri.wrapper()] for a wrapper for ESRI ASCII/binary grids; [rsaga.hillshade()] and other higher-level functions.
#' @examples
#' \dontrun{
#' rsaga.hillshade("dem","hillshade",exaggeration=2)
#' # using the RSAGA geoprocessor:
#' rsaga.geoprocessor("ta_lighting",0,list(ELEVATION="dem.sgrd",SHADE="hillshade",EXAGGERATION=2))
#' # equivalent DOS command line call:
#' # saga_cmd.exe ta_lighting 0 -ELEVATION dem.sgrd -SHADE hillshade -EXAGGERATION 2
#' }
#' @keywords spatial interface
#' @export
#' @importFrom magrittr "%>%"
rsaga.geoprocessor = function(
lib, module = NULL, param = list(),
show.output.on.console = TRUE, invisible = TRUE, intern = TRUE,
prefix = NULL, flags = ifelse(show.output.on.console,"q","s"), cores,
env = rsaga.env(), display.command = FALSE, reduce.intern = TRUE,
check.module.exists = TRUE, warn = options("warn")$warn,
argsep = " ", check.parameters = TRUE, ... )
{
# Issue warning if using SAGA GIS version that has not been tested with RSAGA:
if (!is.null(env$version)) {
if (!is.na(env$version)) {
if (!any(c("2.0.4","2.0.5","2.0.6","2.0.7","2.0.8",
"2.1.0","2.1.1","2.1.2","2.1.3","2.1.4",
"2.2.0","2.2.1","2.2.2","2.2.3", "2.3.1",
"2.3.2", "3.0.0", "4.0.0", "4.0.1", "4.1.0",
"5.0.0", "6.0.0", "6.1.0", "6.2.0", "6.3.0", "6.4.0", "7.0.0", NULL) == env$version))
warning("This RSAGA version has been tested with SAGA GIS versions 2.3.1 - 6.3.0.\n",
"You seem to be using SAGA GIS ", env$version, ", which may cause problems due to\n",
"changes in names and definitions of SAGA module arguments, etc.", sep = "" )
}
}
# Number of cores for multicore processing:
if (!missing(cores)) env$cores = cores
if (!is.na(env$cores) & (substr(env$version,1,4) == "2.0.")) {
warning("'cores' argument not supported by SAGA GIS versions <2.1.0; ignoring 'cores' argument\n",
"Use SAGA GIS 2.1.0+ for multicore geoprocessing.")
env$cores = NA
}
if (is.na(env$version) | (substr(env$version,1,4) == "2.0.")) {
if (argsep != " ")
warning("To my knowledge, SAGA GIS 2.0.x only supports argsep=' '.\nUse different argsep values at own risk.")
} else {
if (!(argsep %in% c(" ","=")))
warning("To my knowledge, SAGA GIS only supports argsep=' ' or (partially also) '='.\nUse different argsep values at own risk.")
}
# Change working directory:
old.wd = getwd()
on.exit(setwd(old.wd))
setwd(env$workspace)
# Set environment variables SAGA and SAGA_MLB:
# (This might be redundant, but it probably won't hurt. Might also be version specific.)
if ((Sys.info()["sysname"] != "Windows") | is.na(env$version) | (env$version == "2.0.4")) {
old.saga = Sys.getenv("SAGA", unset = NA)
old.saga.mlb = Sys.getenv("SAGA_MLB", unset = NA)
on.exit(if (is.na(old.saga)) Sys.unsetenv("SAGA") else Sys.setenv(SAGA=old.saga), add = TRUE)
on.exit(if (is.na(old.saga.mlb)) Sys.unsetenv("SAGA_MLB") else Sys.setenv(SAGA_MLB=old.saga.mlb), add=TRUE)
Sys.setenv(SAGA=env$path, SAGA_MLB=env$modules)
}
# Core part of system call:
command = shQuote( paste( env$path, .Platform$file.sep, env$cmd, sep="" ) )
# Prefix e.g. -h or --help for general help (this is currently used by rsaga.get.version)
if (!is.null(prefix))
command = paste( command, prefix, sep = " " )
if (!(env$version %in% c("2.0.4","2.0.5","2.0.6","2.0.7","2.0.8","2.0.9"))) {
if (!is.null(flags))
command = paste( command, " -f=", flags, sep = "" )
if (!is.null(lib) & !is.null(module) & (length(param)>0)) {
if (!is.na(env$cores)) {
command = paste( command, " --cores=", env$cores, sep="" )
}
}
}
# Library - in the case of unix systems (until 2.0.9), it must be preceded by 'lib' -
# but not in the case of Mac OSX:
###add.lib = (Sys.info()["sysname"] != "Windows") & (Sys.info()["sysname"] != "Darwin")
if (!is.null(lib)) {
# From 2.1.0 on, UNIX-like systems do not have preceding 'lib' any more
if (is.null(env$lib.prefix) |
!(env$version %in% c("2.0.4", "2.0.5", "2.0.6",
"2.0.7", "2.0.8", "2.0.9"))) env$lib.prefix = ""
command = paste( command, " ", env$lib.prefix, lib, sep = "")
}
if (!is.null(lib) & !is.null(module)) {
if (check.module.exists) {
ex = rsaga.module.exists(lib, module, env=env)
if (!ex) {
cat("Module '", module, "' not found in SAGA library '", lib, "'.\n",
"Check if module name has changed (or is misspelled)?\n", sep = "")
cat("The following (non-interactive) modules currently exist in this SAGA library:\n\n")
print(rsaga.get.modules(lib, env=env, interactive = FALSE))
cat("\n")
stopifnot(rsaga.module.exists(lib,module, env=env))
}
}
# Check parameters
if(check.parameters) {
# Get console output
# Warning is intentionally generated
res <- suppressWarnings(
system(paste0(command, " \"", module, "\""), intern = TRUE))
# Get parameters
i <- grep("Usage:", res)
# Extract parameter list
param_list <-
unlist(stringr::str_extract_all(res[i], "\\[(.*?)\\]")) %>%
stringr::str_remove("\\[-") %>%
stringr::str_remove("\\]") %>%
stringr::str_remove("<(.*?)>") %>%
stringr::str_remove("\\s")
# Print error message
if(!all(names(param) %in% param_list)) {
#Get false parameter names
false_parameters <- names(param) %in% param_list
# Get all paramter names
paramters <- names(param)
stop(paste0("Wrong paramters used: ",
paste(paramters[!false_parameters], collapse = " "),
". ",
"Possible parameters:",
paste(param_list, collapse = " ")))
}
}
if (is.character(module)) module = shQuote(module)
command = paste(command, module)
if (length(param)>0) {
# Logical arguments are treated in a special way with SAGA versions below 2.1.3:
# They are simply omitted if their value is FALSE.
if (!(env$version %in% c("2.0.4","2.0.5","2.0.6","2.0.7","2.0.8","2.0.9",
"2.1.0","2.1.1","2.1.2"))) {
i = 1
while (i<=length(param)) {
if (is.logical(param[[i]])) {
if (!param[[i]]) {
param[[i]] = "false"
i = i - 1
} else param[[i]] = "true"
}
i = i + 1
}
} else {
i = 1
while (i<=length(param)) {
if (is.logical(param[[i]])) {
if (!param[[i]]) {
param[[i]] = NULL
i = i - 1
} else param[[i]] = ""
}
i = i + 1
}
}
# Argument names:
nm = names(param)
# Argument values:
val = as.character(unlist(param))
# line added by Johan v.d.W.:
# Put quotes around non-void argument values:
val[ nchar(val) > 0 ] = shQuote( val[ nchar(val) > 0 ] )
# Add saga_cmd arguments to the command line call:
param = paste("-",nm, argsep, val,sep="",collapse=" ")
command = paste(command, param)
}
}
if (display.command) cat(command,"\n")
if (Sys.info()["sysname"] == "Windows") {
# Some rsaga core calls need to suppress warnings
# related to non-zero exit codes of saga_cmd:
oldwarn = options("warn")$warn
on.exit(options(warn = oldwarn), add = TRUE)
options(warn = warn)
# Actual saga_cmd call:
res = system( command, intern=intern,
show.output.on.console=show.output.on.console,
invisible=invisible, ...)
options(warn = oldwarn)
} else {
oldwarn = options("warn")$warn
on.exit(options(warn = oldwarn), add = TRUE)
options(warn = warn)
# Supress error message, which appears by using rsaga.get.lib.modules
if (warn == -1){
res = system( command, intern=intern, ignore.stderr = TRUE, ...)
} else {
res = system( command, intern=intern, ...)
}
# 'show.output.on.console' and 'invisible' only work under Windows
options(warn = oldwarn)
}
if (intern) {
if (reduce.intern) {
remove = grep("\r",res,fixed=TRUE)
if (length(remove) > 0)
res = res[ -remove ]
remove = grep("^.*##.*##",res)
if (length(remove) > 0)
res = res[ -remove ]
if (any(remove <- res=="go...")) res = res[!remove]
if (any(remove <- res=="okay")) res = res[!remove]
if (any(remove <- substr(res,1,7)=="type -h")) res = res[!remove]
if (any(remove <- substr(res,1,7)=="_______")) res = res[!remove]
}
if (show.output.on.console)
cat(res,sep="\n")
}
if (intern) {
invisible(res)
} else return(res)
}
#' Use RSAGA functions for ESRI grids
#'
#' This wrapper converts input grid files provided in ESRI binary (.flt) or ASCII (.asc) formats to SAGA's (version 2) grid format, calls the RSAGA geoprocessing function, and converts the output grids back to the ESRI grid format. Conversion can also be limited to either input or output grids.
#' @name rsaga.esri.wrapper
#' @param fun function: one of the RSAGA geoprocessing functions, such as [rsaga.close.gaps()] or [rsaga.hillshade()] etc.
#' @param in.esri logical: are input grids provided as ESRI grids (`in.esri=TRUE`) or as SAGA grids?
#' @param out.esri logical: should output grids be converted to ESRI grids?
#' @param env RSAGA environment as returned by [rsaga.env()]
#' @param esri.workspace directory for the input and output ESRI ASCII/binary grids
#' @param format output file format, either `"ascii"` (default; equivalent: `format=1`) for ASCII grids or `"binary"` (equivalent: `0`) for binary ESRI grids (`.flt`).
#' @param georef character: `"corner"` (equivalent numeric code: `0`) or `"center"` (default; equivalent: `1`). Determines whether the georeference will be related to the center or corner of its extreme lower left grid cell.
#' @param prec number of digits when writing floating point values to ASCII grid files (only relevant if `out.esri=TRUE`).
#' @param esri.extension extension for input/output ESRI grids: defaults to `.asc` for `format="ascii"`, and to `.flt` for `format="binary"`
#' @param condensed.res logical: return only results of the RSAGA geoprocessing function `fun` (`condensed.res=TRUE`), or include the results of the import and export operations, i.e. the calls to [rsaga.esri.to.sgrd()] and [rsaga.sgrd.to.esri()]? (see Value)
#' @param clean.up logical: delete intermediate SAGA grid files?
#' @param intern `intern` argument to be passed to [rsaga.geoprocessor()]; see Value
#' @param ... additional arguments for `fun`; NOTE: ESRI ASCII/float raster file names should NOT include the file extension (.asc, .flt); the file extension is defined by the `esri.extension` and `format` arguments!
#' @details ESRI ASCII/float raster file names should NOT include the file extension (.asc, .flt); the file extension is defined by the `esri.extension` and `format` arguments!
#' @return The object returned depends on the `condensed.res` arguments and the `intern` argument passed to the [rsaga.geoprocessor()].
#'
#' If `condensed.res=TRUE` and `intern=FALSE`, a single numerical error code (0: success) is returned. If `condensed.res=TRUE` and `intern=TRUE` (default), a character vector with the module's console output is returned (invisibly).
#'
#' If `condensed.res=FALSE` the result is a list with components `in.res`, `geoproc.res` and `out.res`. Each of these components is either an error code (for `intern=FALSE`) or (for `intern=TRUE`) a character vector with the console output of the input ([rsaga.esri.to.sgrd()]), the geoprocessing (`fun`), and the output conversion ([rsaga.sgrd.to.esri()]) step, respectively. For `in.esri=FALSE` or `out.esri=FALSE`, the corresponding component is `NULL`.
#' @note Note that the intermediate grids as well as the output grids may overwrite existing files with the same file names without prompting the user. See example below.
#' @seealso [rsaga.esri.to.sgrd()], [rsaga.sgrd.to.esri()], [rsaga.geoprocessor()], [rsaga.env()]
#' @examples
#' \dontrun{
#' rsaga.esri.wrapper(rsaga.hillshade,in.dem="dem",out.grid="hshd",condensed.res=FALSE,intern=FALSE)
#' # if successful, returns list(in.res=0,geoproc.res=0,out.res=0),
#' # and writes hshd.asc; intermediate files dem.sgrd, dem.hgrd, dem.sdat,
#' # hshd.sgrd, hshd.hgrd, and hshd.sdat are deleted.
#' # hshd.asc is overwritten if it already existed.
#' }
#' @keywords spatial interface
#' @export
rsaga.esri.wrapper = function(fun, in.esri=TRUE, out.esri=TRUE,
env=rsaga.env(), esri.workspace=env$workspace,
format="ascii", georef="corner", prec=5, esri.extension,
condensed.res=TRUE, clean.up=TRUE, intern=TRUE, ...)
{
in.res = NULL
geoproc.res = NULL
out.res = NULL
format = match.arg.ext(format,choices=c("binary","ascii"),base=0,ignore.case=TRUE,numeric=TRUE)
if (missing(esri.extension))
esri.extension = c(".flt",".asc")[format+1]
args = list(...)
argnms = names(args)
in.ok = TRUE
if (in.esri) {
wh = grep("^in\\.",names(args))
if (length(wh)==0) {
warning("'in.esri' is TRUE, but the geoprocessing function does not have an 'in.*' grid argument")
} else {
in.args = args[wh]
in.res = rsaga.esri.to.sgrd(in.grids=set.file.extension(unlist(in.args),esri.extension),
intern=intern, show.output.on.console=FALSE,
out.sgrds=unlist(in.args), in.path=esri.workspace, env=env) # more args to geoproc
if (!intern) in.ok = all(in.res==0)
}
}
geoproc.ok = TRUE
if (in.ok) {
geoproc.res = fun(env=env,intern=intern,...)
if (!intern) geoproc.ok = all(geoproc.res==0)
}
if (clean.up) {
del.files = set.file.extension(in.args,"")
del.files = unlist(lapply(as.list(del.files), function(x) paste(x,c("sgrd","hgrd","sdat","mgrd"),sep="")))
unlink(del.files)
}
out.ok = TRUE
if (out.esri & in.ok & geoproc.ok) {
wh = grep("^out\\.",names(args))
if (length(wh)==0) {
warning("'out.esri' is TRUE, but the geoprocessing function does not have an 'out.*' grid argument")
} else {
out.args = args[wh]
out.res = rsaga.sgrd.to.esri(in.sgrds=unlist(out.args),
out.grids=set.file.extension(out.args,unlist(esri.extension)),
out.path=esri.workspace, env=env, intern=intern, show.output.on.console=FALSE,
format=format, georef=georef, prec=prec) # more args to geoproc
if (!intern) out.ok = all(out.res==0)
if (clean.up) {
del.files = set.file.extension(out.args,"")
del.files = unlist(lapply(as.list(del.files), function(x) paste(x,c("sgrd","hgrd","sdat","mgrd"),sep="")))
unlink(del.files)
}
}
}
res = list( in.res=in.res, geoproc.res=geoproc.res, out.res=out.res )
if (condensed.res) {
if (intern) {
res = geoproc.res
} else res = max(abs(unlist(res)))
}
if (intern) {
invisible(res)
} else return( res )
}
#' Create a copy of a SAGA grid file
#'
#' Creates a copy of a SAGA grid file, optionally overwriting the target file if it already exists. Intended mainly for internal use by RSAGA functions, currently in particular [rsaga.inverse.distance()].
#'
#' @param in.grid name of a SAGA GIS grid file; file extension can be omitted
#' @param out.grid name of a SAGA GIS grid file; file extension can be omitted
#' @param overwrite logical; if `TRUE` (the default), overwrite `out.grid` if it already exists; if `FALSE` and the `out.grid` already exists, copying will be skipped without causing an error.
#' @param env a SAGA geoprocessing environment as created by [rsaga.env()]
#' @note SAGA grid files consist of three (or more) individual files with file extensions `.mgrd`, `.sgrd` and `.sdat`. The files with these three file extensions are copied, any additional files (e.g. a history file) are ignored.
#' @keywords spatial interface
#' @export
rsaga.copy.sgrd = function(in.grid, out.grid, overwrite = TRUE, env = rsaga.env())
{
in.grid = set.file.extension(in.grid,".sgrd")
out.grid = set.file.extension(out.grid,".sgrd")
stopifnot(in.grid != out.grid)
old.wd = getwd()
setwd(env$workspace)
stopifnot(file.exists(in.grid))
in.files = c(
set.file.extension(in.grid,".mgrd"),
set.file.extension(in.grid,".sgrd"),
set.file.extension(in.grid,".sdat") )
out.files = c(
set.file.extension(out.grid,".mgrd"),
set.file.extension(out.grid,".sgrd"),
set.file.extension(out.grid,".sdat") )
res = c()
for (i in 1:length(in.files)) {
res[i] = file.copy(in.files[i], out.files[i], overwrite = overwrite)
}
setwd(old.wd)
return(all(res))
}
| 65,948 | gpl-2.0 |
f381e4dcfd3cff5937cbe24efe7a3096ce7eaf4f | radfordneal/pqR | src/library/base/R/pmax.R | # File src/library/base/R/pmax.R
# Part of the R package, http://www.R-project.org
# Modifications for pqR Copyright (c) 2013, 2017, 2018 Radford M. Neal.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
pmin.int <- function(..., na.rm = FALSE) .Internal(pmin(na.rm, ...))
pmax.int <- function(..., na.rm = FALSE) .Internal(pmax(na.rm, ...))
pmax <- function (..., na.rm = FALSE)
{
# Handle the typical case of two arguments specially for speed.
if (!missing(..2) && missing(..3)
&& is.atomic(a<-..1) && !is.object(a)
&& is.atomic(b<-..2) && !is.object(b)) {
mmm <- .Internal (pmax (na.rm, a, b))
if (!is.null(attributes(a)))
mostattributes(mmm) <- attributes(a)
}
else { # the general case
elts <- list(...)
if (length (elts) == 0L)
stop("no arguments")
if (all(vapply(elts, function(x) is.atomic(x) && !is.object(x), NA)))
mmm <- .Internal(pmax(na.rm, ...))
else {
mmm <- elts[[1L]]
attr(mmm, "dim") <- NULL # dim<- would drop names
has.na <- FALSE
for (each in elts[-1L]) {
attr(each, "dim") <- NULL
l1 <- length(each); l2 <- length(mmm)
if(l2 < l1) {
if (l2 && l1 %% l2)
warning("an argument will be fractionally recycled")
mmm <- rep(mmm, length.out = l1)
} else if(l1 && l1 < l2) {
if (l2 %% l1)
warning("an argument will be fractionally recycled")
each <- rep(each, length.out = l2)
}
nas.mmm <- is.na(mmm)
nas.each <- is.na(each)
if(has.na || (has.na <- any(nas.mmm) || any(nas.each))) {
mmm[nas.mmm] <- each[nas.mmm]
each[nas.each] <- mmm[nas.each]
}
change <- mmm < each
change <- change & !is.na(change)
mmm[change] <- each[change]
if (has.na && !na.rm) mmm[nas.mmm | nas.each] <- NA
}
}
if (!is.null(attributes(elts[[1]])))
mostattributes(mmm) <- attributes(elts[[1]])
}
get_rm(mmm)
}
pmin <- function (..., na.rm = FALSE)
{
# Handle the typical case of two arguments specially for speed.
if (!missing(..2) && missing(..3)
&& is.atomic(a<-..1) && !is.object(a)
&& is.atomic(b<-..2) && !is.object(b)) {
mmm <- .Internal (pmin (na.rm, a, b))
if (!is.null(attributes(a)))
mostattributes(mmm) <- attributes(a)
}
else { # the general case
elts <- list(...)
if (length (elts) == 0L)
stop("no arguments")
if (all(vapply(elts, function(x) is.atomic(x) && !is.object(x), NA)))
mmm <- .Internal(pmin(na.rm, ...))
else {
mmm <- elts[[1L]]
attr(mmm, "dim") <- NULL # dim<- would drop names
has.na <- FALSE
for (each in elts[-1L]) {
attr(each, "dim") <- NULL
l1 <- length(each); l2 <- length(mmm)
if(l2 < l1) {
if (l2 && l1 %% l2)
warning("an argument will be fractionally recycled")
mmm <- rep(mmm, length.out = l1)
} else if(l1 && l1 < l2) {
if (l2 %% l1)
warning("an argument will be fractionally recycled")
each <- rep(each, length.out = l2)
}
nas.mmm <- is.na(mmm)
nas.each <- is.na(each)
if(has.na || (has.na <- any(nas.mmm) || any(nas.each))) {
mmm[nas.mmm] <- each[nas.mmm]
each[nas.each] <- mmm[nas.each]
}
change <- mmm > each
change <- change & !is.na(change)
mmm[change] <- each[change]
if (has.na && !na.rm) mmm[nas.mmm | nas.each] <- NA
}
}
if (!is.null(attributes(elts[[1]])))
mostattributes(mmm) <- attributes(elts[[1]])
}
get_rm(mmm)
}
| 4,825 | gpl-2.0 |
a6c083ba7e1e85a3a93e9015fbcc23a97241de3a | wenjie2wang/splines2 | inst/examples/ex-ibs.R | library(splines2)
x <- seq.int(0, 1, 0.01)
knots <- c(0.2, 0.4, 0.7, 0.9)
ibsMat <- ibs(x, knots = knots, degree = 1, intercept = TRUE)
## get the corresponding B-splines by bSpline()
bsMat0 <- bSpline(x, knots = knots, degree = 1, intercept = TRUE)
## or by the deriv() method
bsMat <- deriv(ibsMat)
stopifnot(all.equal(bsMat0, bsMat, check.attributes = FALSE))
## plot B-spline basis with their corresponding integrals
op <- par(mfrow = c(1, 2))
matplot(x, bsMat, type = "l", ylab = "B-spline basis")
abline(v = knots, lty = 2, col = "gray")
matplot(x, ibsMat, type = "l", ylab = "Integral of B-spline basis")
abline(v = knots, lty = 2, col = "gray")
## reset to previous plotting settings
par(op)
| 704 | gpl-3.0 |
152ee95bde40e963d4d5df01029384e7ef986b11 | mkienzle/NSW-sea-garfish-stock-assessment | docs/docs/Script/CalculatePopulationTrendsWithUncertainties.R | # CREATED 13 Dec 2016
# MODIFIED 19 May 2019
# PURPOSE estimate abundance and recruitment point estimates and uncertainties
# METHOD according to S. Brandt, Data Analysis, 1999 Springer-Verlag
# The x% confidence region of the log-likelihood is contains with the minimum log-likelihood + chi-square (0.95, 4 df)
# Calculating the recruitment from as many points as possible in this region, we use the extreme values to give the 95% confidence
# interval of the recruitment estimate
x <- 95 # 95% CI
# Use the best fitted model
source("FitModels.R")
# define some variables
n.year <- nrow(nb.at.age.tmp)
n.cohort <- nrow(nb.at.age.tmp) + ncol(nb.at.age.tmp) - 1
n.par <- length(result2$par) # Number of parameters in the best model
# Create a data.frame to hold the resample results
n.resample <- 1e4
resample.results <- as.data.frame(matrix(nrow = n.resample, ncol = n.par + 1 + n.cohort + n.year))
dimnames(resample.results)[[2]] <- c(paste("par", 1:n.par, sep=""), "log.lik", paste("rec", 1:n.cohort, sep = ""),
paste("Biomass", 1:n.year, sep = ""))
# Fix number of define the range of values, in unit of sd, to look around the mean of each parameters
n.sigma <- 2
for(resample in 1:n.resample){
print(paste(resample,"/",n.resample,sep=""))
# Create a set of random parameters
rand.par <- rep(NA,n.par)
for(i in 1:n.par) rand.par[i] <- runif(1, min = result2$par[i] -n.sigma * errors2[i], max = result2$par[i] + n.sigma * errors2[i])
resample.results[resample, 1:n.par] <- rand.par
resample.results[resample, 5] <- ll.model2(rand.par, catch = nb.at.age.wgt, effort = effort, catchability.scaling.factor = csf)
### Estimate recruitment
P <- prob.for.ll.model2(rand.par, effort = effort, catchability.scaling.factor = csf)
prob <- P
#print(Coaa2Caaa(prob))
est.nb.at.age.in.catch <- outer(estimated.nb.in.catch, rep(1, 6)) * nb.at.age / outer(rowSums(nb.at.age), rep(1, 6))
est.rec <- rowSums(Caaa2Coaa(est.nb.at.age.in.catch), na.rm = T) / rowSums(P, na.rm = T)
resample.results[resample, grep("rec", names(resample.results))] <- est.rec
### Estimate biomass
# Estimated number in the catch (at age)
prop.at.age.from.agesample <- nb.at.age / outer(rowSums(nb.at.age), rep(1, ncol(nb.at.age)))
catch.at.age <- outer(estimated.nb.in.catch, rep(1, ncol(nb.at.age))) * prop.at.age.from.agesample
s.at.age.model2 <-rbind( outer(rep(1,6), c(rand.par[3], rep(1,5))),
outer(rep(1,nrow(effort)-6), c(rand.par[4], rep(1,5))))
F <- rand.par[1] * csf * (effort * s.at.age.model2)
M <- rand.par[2]
#mu <- F/(F+M) * (1-exp(-(F+M)))# Quinn and Deriso (1999) (Eq. 8.58)
#N.at.age <- catch.at.age / mu
N.at.age <- catch.at.age / F
prop.in.population <- N.at.age / outer(rowSums(N.at.age), rep(1, ncol(nb.at.age)))
biomass.at.age <- N.at.age * weight.at.age
resample.results[resample, grep("Biomass", names(resample.results))] <- rowSums(biomass.at.age * 1e-3)
}
# Select the subset of simulated data that fall within the chi-square (x/100, 4df) distance from the minimum log-likelihood
#resample.results.x <- subset(resample.results, (log.lik - result2$value) <= (0.5 * qchisq(x/100, df = 11 * 6 - 4 )))
resample.results.x <- subset(resample.results, (log.lik - result2$value) <= (0.5 * qchisq(x/100, df = 4 )))
# Save the results
write.csv(resample.results.x, file = paste("Results/Data/ProfileLikelihoodOfRecruitmentEstimates-", format(Sys.time(), "%b%d%Y-%H-%M-%S"), ".csv", sep=""))
write.csv(resample.results.x, file = "Results/Data/ProfileLikelihoodOfRecruitmentEstimates.csv")
| 3,560 | gpl-3.0 |
152ee95bde40e963d4d5df01029384e7ef986b11 | mkienzle/NSW-sea-garfish-stock-assessment | Script/CalculatePopulationTrendsWithUncertainties.R | # CREATED 13 Dec 2016
# MODIFIED 19 May 2019
# PURPOSE estimate abundance and recruitment point estimates and uncertainties
# METHOD according to S. Brandt, Data Analysis, 1999 Springer-Verlag
# The x% confidence region of the log-likelihood is contains with the minimum log-likelihood + chi-square (0.95, 4 df)
# Calculating the recruitment from as many points as possible in this region, we use the extreme values to give the 95% confidence
# interval of the recruitment estimate
x <- 95 # 95% CI
# Use the best fitted model
source("FitModels.R")
# define some variables
n.year <- nrow(nb.at.age.tmp)
n.cohort <- nrow(nb.at.age.tmp) + ncol(nb.at.age.tmp) - 1
n.par <- length(result2$par) # Number of parameters in the best model
# Create a data.frame to hold the resample results
n.resample <- 1e4
resample.results <- as.data.frame(matrix(nrow = n.resample, ncol = n.par + 1 + n.cohort + n.year))
dimnames(resample.results)[[2]] <- c(paste("par", 1:n.par, sep=""), "log.lik", paste("rec", 1:n.cohort, sep = ""),
paste("Biomass", 1:n.year, sep = ""))
# Fix number of define the range of values, in unit of sd, to look around the mean of each parameters
n.sigma <- 2
for(resample in 1:n.resample){
print(paste(resample,"/",n.resample,sep=""))
# Create a set of random parameters
rand.par <- rep(NA,n.par)
for(i in 1:n.par) rand.par[i] <- runif(1, min = result2$par[i] -n.sigma * errors2[i], max = result2$par[i] + n.sigma * errors2[i])
resample.results[resample, 1:n.par] <- rand.par
resample.results[resample, 5] <- ll.model2(rand.par, catch = nb.at.age.wgt, effort = effort, catchability.scaling.factor = csf)
### Estimate recruitment
P <- prob.for.ll.model2(rand.par, effort = effort, catchability.scaling.factor = csf)
prob <- P
#print(Coaa2Caaa(prob))
est.nb.at.age.in.catch <- outer(estimated.nb.in.catch, rep(1, 6)) * nb.at.age / outer(rowSums(nb.at.age), rep(1, 6))
est.rec <- rowSums(Caaa2Coaa(est.nb.at.age.in.catch), na.rm = T) / rowSums(P, na.rm = T)
resample.results[resample, grep("rec", names(resample.results))] <- est.rec
### Estimate biomass
# Estimated number in the catch (at age)
prop.at.age.from.agesample <- nb.at.age / outer(rowSums(nb.at.age), rep(1, ncol(nb.at.age)))
catch.at.age <- outer(estimated.nb.in.catch, rep(1, ncol(nb.at.age))) * prop.at.age.from.agesample
s.at.age.model2 <-rbind( outer(rep(1,6), c(rand.par[3], rep(1,5))),
outer(rep(1,nrow(effort)-6), c(rand.par[4], rep(1,5))))
F <- rand.par[1] * csf * (effort * s.at.age.model2)
M <- rand.par[2]
#mu <- F/(F+M) * (1-exp(-(F+M)))# Quinn and Deriso (1999) (Eq. 8.58)
#N.at.age <- catch.at.age / mu
N.at.age <- catch.at.age / F
prop.in.population <- N.at.age / outer(rowSums(N.at.age), rep(1, ncol(nb.at.age)))
biomass.at.age <- N.at.age * weight.at.age
resample.results[resample, grep("Biomass", names(resample.results))] <- rowSums(biomass.at.age * 1e-3)
}
# Select the subset of simulated data that fall within the chi-square (x/100, 4df) distance from the minimum log-likelihood
#resample.results.x <- subset(resample.results, (log.lik - result2$value) <= (0.5 * qchisq(x/100, df = 11 * 6 - 4 )))
resample.results.x <- subset(resample.results, (log.lik - result2$value) <= (0.5 * qchisq(x/100, df = 4 )))
# Save the results
write.csv(resample.results.x, file = paste("Results/Data/ProfileLikelihoodOfRecruitmentEstimates-", format(Sys.time(), "%b%d%Y-%H-%M-%S"), ".csv", sep=""))
write.csv(resample.results.x, file = "Results/Data/ProfileLikelihoodOfRecruitmentEstimates.csv")
| 3,560 | gpl-3.0 |
8ec6c23c15e6f034693b7b067e4d6472fd70ab58 | karmatarap/e107_Project_Test_Area | r/helper_functions/parse-binary_data.R | #' Parse binary data
#'
#' @description Parses the data with binary annotation¨
#'
#' @author AlexandeR Noll
#'
#' @return data frame
#'
#' @examples parse_binary_data()
#' @import dplyr, readr, stringr
parse_binary_data <- function(type) {
library(dplyr)
library(purrr)
library(readr)
library(stringr)
# Load data
tweets_file <- "../data/download_tweets/binary_tweets_downloaded.tsv"
# Parse tweets
individual_tweets <-
read_file(tweets_file) %>%
str_replace_all("\r", "") %>%
str_split(., "\n") %>%
unlist()
tweets_frame <-
individual_tweets %>%
str_split_fixed("\t", 4) %>%
as.data.frame() %>%
rename(tweet_id = V1,
user_id = V2,
is_AE = V3,
tweet_text = V4
) %>%
filter(is_AE != "") %>%
mutate(is_AE = as.factor(plyr::revalue(is_AE, c("0" = "No", "1" = "Yes"))))
tweets_frame
} | 987 | mit |
1f2815a8826cd0903c2abd9fa3963bd5af6dd9e1 | hcommenges/mapfitR | GarminFIT_script.R | ###########################################
######## Explore Garmin FIT tracks
######## Script
###########################################
# load packages ----
library(lubridate)
library(ISOweek)
library(ggplot2)
library(leaflet)
library(sf)
library(dplyr)
# load functions ----
source(file = "GarminFIT_fct.R")
# sync raw files ----
SyncFiles()
# load data ----
listActivities <- readRDS(file = "listactivities.Rds")
# update data ----
listActivities <- AddNewrun(listActivities)
# view one run ----
ViewRun(listActivities, date = "2017-09-03")
# view one run among a set of superimposed tracks ----
ViewRunOverlaid(listActivities, date = "2017-09-03")
# view totals ----
ViewTotals(listActivities)
| 710 | agpl-3.0 |
a3e0e73a9ac966c61aba18d5a7a274cc48b95ad9 | WLOGSolutions/RSuite | packages/RSuite/R/28_repo_manager_dir.R | #----------------------------------------------------------------------------
# RSuite
# Copyright (c) 2017, WLOG Solutions
#
# Repo manager working on directory. Created by rsuite_repo_adapter_dir.
#----------------------------------------------------------------------------
#'
#' Create repo manager to manager repository in directory.
#'
#' @param path path to repository folder (type: character)
#' @param types package types to manage (type: character)
#' @param rver R version to manage repository for. Can be NA if managing only
#' source packages (type: character).
#'
#' @return object of type rsuite_repo_manager
#'
#' @keywords internal
#' @noRd
#'
repo_manager_dir_create <- function(path, types, rver) {
assert( (is.na(rver) && all(types == "source")) || is_nonempty_char1(rver),
"Non empty character(1) expected for rver")
assert(is_nonempty_char1(path), "Non empty character(1) expected for path")
if (!dir.exists(path)) {
success <- dir.create(path, recursive = TRUE)
assert(success, "Failed to create repository at %s", path)
}
path <- rsuite_fullUnifiedPath(path)
result <- list(
path = path,
types = types,
rver = rver
)
class(result) <- c("rsuite_repo_manager_dir", "rsuite_repo_manager")
return(result)
}
#'
#' Implementation of repo_manager_get_info for rsuite_repo_manager_dir.
#'
#' @keywords internal
#' @noRd
#'
repo_manager_get_info.rsuite_repo_manager_dir <- function(repo_manager) {
return(list(
types = repo_manager$types,
rver = repo_manager$rver,
url = path2local_url(repo_manager$path) # from 99_rpatches.R
))
}
#'
#' Implementation of repo_manager_init for rsuite_repo_manager_dir.
#'
#' @keywords internal
#' @noRd
#'
repo_manager_init.rsuite_repo_manager_dir <- function(repo_manager, types) {
if (missing(types)) {
types <- repo_manager$types
}
repo_path <- repo_manager$path
was_inited <- TRUE
for (tp in types) {
tp_path <- rsuite_contrib_url(repo_path, type = tp, rver = repo_manager$rver)
if (!dir.exists(tp_path)) {
success <- dir.create(tp_path, recursive = TRUE)
assert(success, "Failed to initialize repository for %s at %s", tp, repo_path)
}
if (!file.exists(file.path(tp_path, "PACKAGES"))) {
rsuite_write_PACKAGES(tp_path, tp)
was_inited <- FALSE
}
}
return(invisible(was_inited))
}
#'
#' Implementation of repo_manager_upload for rsuite_repo_manager_dir..
#'
#' @keywords internal
#' @noRd
#'
repo_manager_upload.rsuite_repo_manager_dir <- function(repo_manager, src_dir, types) {
if (missing(types)) {
types <- repo_manager$types
}
for (tp in types) {
src_path <- rsuite_contrib_url(src_dir, type = tp, rver = repo_manager$rver)
if (!dir.exists(src_path)) {
pkg_loginfo("No package files found in %s.", src_path)
next
}
dst_path <- rsuite_contrib_url(repo_manager$path, type = tp, rver = repo_manager$rver)
if (!dir.exists(dst_path)) {
dir.create(dst_path, recursive = TRUE)
}
pkg_loginfo("Copying package files from %s into %s ...", src_path, dst_path)
for (f in list.files(src_path)) {
if (grepl("^PACKAGES", f)) {
next
}
success <- file.copy(from = file.path(src_path, f), to = file.path(dst_path, f),
overwrite = TRUE)
assert(success, "Failed to copy %s into %s.", f, dst_path)
}
rsuite_write_PACKAGES(dst_path, tp)
pkg_loginfo("... done")
}
}
#'
#' Implementation of repo_adapter_stop_management for rsuite_repo_manager_dir.
#'
#' @keywords internal
#' @noRd
#'
repo_manager_remove.rsuite_repo_manager_dir <- function(repo_manager, toremove, type) {
path <- rsuite_contrib_url(repo_manager$path, type = type, rver = repo_manager$rver)
if (!dir.exists(path)) {
return(data.frame(Package = as.character(), Version = as.character()))
}
toremove$Removed <- unlist(
lapply(X = sprintf("%s_%s.*", toremove$Package, toremove$Version),
FUN = function(pattern) {
file <- list.files(path = path, pattern = pattern, full.names = TRUE)
if (length(file) > 0) {
unlink(file, force = TRUE) == 0
} else {
FALSE
}
})
)
rsuite_write_PACKAGES(path, type)
res <- toremove[toremove$Removed, c("Package", "Version")]
return(res)
}
#'
#' Implementation of repo_manager_destroy for rsuite_repo_manager_dir.
#'
#' @keywords internal
#' @noRd
#'
repo_manager_destroy.rsuite_repo_manager_dir <- function(repo_manager) {
# noop
}
| 4,733 | apache-2.0 |
35fc9ff795ce73636b806120d134c8f3577eb56e | marchtaylor/fishdynr | R/growth_soVB2.R | #' @title seasonally oscillating von Bertalanffy growth function (using L0)
#' @description \code{growth_soVB2} describes the growth as a function of age (t)
#' using the seasonally oscillating von Bertalanffy growth function (Somers 1988).
#' The function differs slightly from \code{\link[fishdynr]{growth_soVB}}
#' by using \code{L0} instead of \code{t0} to describe the growth function origin.
#'
#' @param Linf Infinite length
#' @param K growth constant
#' @param t age
#' @param L0 (hypothetical) length at time zero
#' @param ts summer point. Time of year (between 0 and 1) when growth oscillation
#' cycle begins (sine wave term becomes positive). Note that this definition
#' differs from some interpretations of the model (see Somers 1998)
#' @param C oscillation strength. Varies between 0 and 1.
#'
#' @examples
#' t <- seq(0,5,0.1)
#' L <- growth_soVB2(Linf=100, K=0.5, t=t, L0=10, ts=0.5, C=0.5)
#' plot(t, L, t="l", ylim=c(0,110), xaxs="i", yaxs="i")
#' points(0,10); abline(h=100, lty=2, col=8)
#'
#' @export
#'
growth_soVB2 <- function(Linf, K, t, L0, ts, C){
Linf - (Linf - L0) * exp(-(
K*(t)
+ (((C*K)/(2*pi))*sin(2*pi*(t-ts)))
))
} | 1,173 | mit |
aeb543bf12b27c3fc955aea28540b05c95c5f8f3 | woobe/h2o | R/tests/testdir_jira/runit_v_11_datemanipulation.R | #
# date parsing and field extraction tests
#
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../findNSourceUtils.R')
datetest <- function(conn){
Log.info('uploading date testing dataset')
hdf <- h2o.importFile(conn, normalizePath(locate('smalldata/jira/v-11.csv')))
# df should be 5 columns: ds1:5
Log.info('data as loaded into h2o:')
Log.info(head(hdf))
# NB: columns 1,5 are currently unsupported as date types
# that is, h2o cannot understand:
# 1 integer days since epoch (or since any other date);
# 2 dates formatted as %d/%m/%y (in strptime format strings)
Log.info('adding date columns')
# NB: h2o automagically recognizes and if it doesn't recognize, you're out of luck
Log.info('extracting year and month from posix date objects')
hdf$year2 <- year(hdf$ds2)
hdf$year3 <- year(hdf$ds3)
hdf$year4 <- year(hdf$ds4)
hdf$mon2 <- month(hdf$ds2)
hdf$mon3 <- month(hdf$ds3)
hdf$mon4 <- month(hdf$ds4)
hdf$idx2 <- year(hdf$ds2) * 12 + month(hdf$ds2)
hdf$idx3 <- year(hdf$ds3) * 12 + month(hdf$ds3)
hdf$idx4 <- year(hdf$ds4) * 12 + month(hdf$ds4)
cc <- colnames(hdf)
nn <- c( paste('year', 2:4, sep=''), paste('month', 2:4, sep=''), paste('idx', 2:4, sep='') )
cc[ (length(cc) - length(nn) + 1):length(cc) ] <- nn
colnames(hdf) <- cc
Log.info('pulling year/month indices local')
ldf <- as.data.frame( hdf )
# build the truth using R internal date fns
rdf <- read.csv(locate('smalldata/jira/v-11.csv'))
rdf$days1 <- as.Date(rdf$ds1, origin='1970-01-01')
rdf$days2 <- as.Date(rdf$ds2, format='%Y-%m-%d')
rdf$days3 <- as.Date(rdf$ds3, format='%d-%b-%y')
rdf$days4 <- as.Date(rdf$ds4, format='%d-%B-%Y')
rdf$days5 <- as.Date(rdf$ds5, format='%d/%m/%y')
months <- data.frame(lapply(rdf[,6:10], function(x) as.POSIXlt(x)$mon))
years <- data.frame(lapply(rdf[,6:10], function(x) as.POSIXlt(x)$year))
idx <- 12*years + months
Log.info('testing correctness')
expect_that( ldf$year2, equals(years[,2]) )
expect_that( ldf$year3, equals(years[,3]) )
expect_that( ldf$year4, equals(years[,4]) )
expect_that( ldf$month2, equals(months[,2]) )
expect_that( ldf$month3, equals(months[,3]) )
expect_that( ldf$month4, equals(months[,4]) )
expect_that( ldf$idx2, equals(idx[,2]) )
expect_that( ldf$idx3, equals(idx[,3]) )
expect_that( ldf$idx4, equals(idx[,4]) )
testEnd()
}
doTest('date testing', datetest)
| 2,436 | apache-2.0 |
858fc2ecec7cd26a172fca60b4c5c40661a1fe26 | sorensje/R_helperfunctions | zeropad.R | zeropad <- function(x){
if (x %/% 10 < 1){
padded_x <- paste('0',x,sep="")
}
else{
padded_x <- as.character(x)
}
return(padded_x)
} | 151 | gpl-2.0 |
035618c2b2c6e91a9571eb833f30563a40dd9d30 | h2oai/h2o-3 | h2o-r/demos/rdemo.supervised.large.R | library(h2o)
h2o.init()
runAll = function(data_path,split_ratio,response,predictors,flag,test_file_path){
#remove all keys from h2o prior to running the function
h2o.rm(as.character(grep(pattern = "", x = h2o.ls()$key,value=T)));
print(paste("Parse : ",data_path,sep=''));
time = system.time(data <- h2o.importFile(data_path, destination_frame="data.hex"));
print(paste("time to parse the file:",time[3]))
if(flag%in% c("B","M") ){
data[,response] <- as.factor(data[,response]);
}
if(is.null(test_file_path) ){
rnd <- h2o.runif(data, 1234)
train <- h2o.assign(data[rnd < split_ratio,], key="train.hex")
test <- h2o.assign(data[rnd >= split_ratio,], key="test.hex")
}else{
train = h2o.assign(data,key="train.hex") #ssign does not work on kdd'98 so parsing the file again
#train <- h2o.importFile(data_path,key="train.hex");
test <- h2o.importFile(test_file_path,key="test.hex");
if(flag%in% c("B","M") ){
train[,response] <- as.factor(train[,response])
test[,response] <- as.factor(test[,response])
}
}
print(paste("Dimension of train set: ",dim(train),sep=''));
print(paste("Dimension of test set: ",dim(test),sep=''));
print("Summary of response column: ")
print(summary(train[,response]));
h2o.rm("data.hex")
print(h2o.ls())
myY = response
myX = predictors
#function for performance measure
perf = function(my_model,newdata,flag){
perf = h2o.performance(model=my_model,data=newdata)
print(perf)
# if( flag%in% c("B","M")){
# if(flag == "B"){
# print(paste("AUC : ",h2o.auc(perf),sep =''))
# }
# print(paste("LogLoss : ",h2o.logloss(perf),sep =''))
# print("Confusion Matrix :")
# print(h2o.confusionMatrix(perf))
# }else{
# print(paste("MeanSquaredError : ",h2o.mse(perf),sep =''))
# }
}
#algo = c("h2o.gbm","h2o.glm","h2o.randomForest","h2o.deeplearning")
#eval(expr) does not work so will have to write a model object for each algo
print(paste("Run GLM:"))
if(flag =="B"){
#glm does not chk the type of the response column, will have to specifically specify the family as binomial
time = system.time(my_model <- h2o.glm(x=myX,y=myY,training_frame=train,validation_frame=test,family="binomial"))
print(paste("Time taken to run GLM:",time[3]))
perf(my_model,test,flag)
}else{
train[,myY] <- as.numeric(train[,myY])
test[,myY] <- as.numeric(test[,myY])
time = system.time(my_model <- h2o.glm(x=myX,y=myY,training_frame=train,validation_frame=test))
print(paste("Time taken to run GLM:",time[3]))
perf(my_model,test,flag ="G") # bec no multinomial in GLM instead runs gaussian
}
print(paste("Run GLM with lambda search:"))
if(flag =="B"){
#glm does not chk the type of the response column, will have to specifically specify the family as binomial
time = system.time(my_model <- h2o.glm(x=myX,y=myY,training_frame=train,validation_frame=test,family="binomial",lambda_search=T))
print(paste("Time taken to run GLM with lambda search:",time[3]))
perf(my_model,test,flag)
}else{
time = system.time(my_model <- h2o.glm(x=myX,y=myY,training_frame=train,lambda_search=T))
print(paste("Time taken to run GLM with lambda search:",time[3]))
perf(my_model,test,flag ="G") # bec no multinomial in GLM instead runs gaussian
}
print(paste("Run DRF:"))
time = system.time(my_model <- h2o.randomForest(x=myX,y=myY,training_frame=train,validation_frame=test))
print(paste("Time taken to run DRF:",time[3]))
perf(my_model,test,flag)
print(paste("Run GBM:"))
time = system.time(my_model <- h2o.gbm(x=myX,y=myY,training_frame=train,validation_frame=test))
print(paste("Time taken to run GBM:",time[3]))
perf(my_model,test,flag)
print(paste("Run DL:"))
time = system.time(my_model <- h2o.deeplearning(x=myX,y=myY,training_frame=train,validation_frame=test))
print(paste("Time taken to run DL:",time[3]))
perf(my_model,test,flag)
}
smokeTest = T
if (smokeTest) {
runAll(
data_path= h2o:::.h2o.locate("smalldata/logreg/prostate.csv"),
split_ratio = .8,
response = 2,
predictors = 3:8,
flag = "B",
test_file_path =NULL)
runAll(
data_path= h2o:::.h2o.locate("smalldata/logreg/prostate.csv"),
split_ratio = .8,
response = 4,
predictors = c(2:3,5:8),
flag = "M",
test_file_path =NULL)
runAll(
data_path= h2o:::.h2o.locate("smalldata/logreg/prostate.csv"),
split_ratio = .8,
response = 3,
predictors = c(1:2,4:8),
flag = "G",
test_file_path =NULL)
}
if (!smokeTest) {
#-------------------------------------Binary Response
runAll(
data_path= "hdfs://mr-0xd6.0xdata.loc/datasets/HIGGS.csv",
split_ratio = .8,
response = 1,
predictors = 2:22,
flag = "B",
test_file_path =NULL)
runAll(
data_path= "hdfs://mr-0xd6.0xdata.loc/datasets/chicagoCrimes.csv",
split_ratio = .8,
response = 9,
predictors = c(1,2,3,4,5,6,7,8,10,11,12,13,14,15,16,17,18,19,20,21,22),
flag = "B",
test_file_path =NULL)
runAll(
data_path= "hdfs://mr-0xd6.0xdata.loc/datasets/airlines/airlines_all.csv",
split_ratio = .8,
response = 31,
predictors = c(1,2,3,4,6,8,9,10,13,17,18,19,22,24),
flag = "B",
test_file_path =NULL)
#---------------------------------------Multinomial Response (glm will assume continuous)
runAll(
data_path= "hdfs://mr-0xd6.0xdata.loc/datasets/mnist/train.csv.gz",
split_ratio = .8,
response = 785,
predictors = 1:784,
flag = "M",
test_file_path= "hdfs://mr-0xd6.0xdata.loc/datasets/mnist/test.csv.gz")
runAll(
data_path= "hdfs://mr-0xd6.0xdata.loc/datasets/covtype.data",
split_ratio = .8,
response = 55,
predictors = 1:54,
flag = "M",
test_file_path = NULL)
#-----------------------------------------Continuous Response
runAll(
data_path= "hdfs://mr-0xd6.0xdata.loc/datasets/cup98LRN_z.csv",
split_ratio = .8,
response = 473,
predictors = c(7,9,14,16,18,41,42,65,70,72,90,159,188,239,251,269,288,354,363,371,372,390,410,427,451,458,465,466),
flag = "G",
test_file_path= "hdfs://mr-0xd6.0xdata.loc/datasets/cup98VAL_z.csv")
runAll(
data_path= "hdfs://mr-0xd6.0xdata.loc/datasets/citibike-nyc/2014-07.csv",
split_ratio = .8,
response = 1,
predictors = 2:15,
flag = "G",
test_file_path ="hdfs://mr-0xd6.0xdata.loc/datasets/citibike-nyc/2014-08.csv")
#-----------------------------------------
}
| 6,561 | apache-2.0 |
9410009ce819a05cad8a6ff232c23186882d4472 | FTSRG/mondo-sam | reporting/shiny/filters/DataFilter.R | library("R.oo", , quietly = TRUE)
#' DataFilter class
setConstructorS3(name = "DataFilter", abstract = TRUE, function(selections = NULL){
extend(Object(), "DataFilter",
.selections = selections,
.container = NULL,
.selectedState = NULL,
.allCurrentStates = NULL,
.allStates = NULL,
.selectedStateStorage = NULL,
.allCurrentStatesStorage = NULL)
})
setMethodS3(name = "getState", class = "DataFilter", function(this){
if (is.null(this$.selectedState)){
throw("Null state in DataFilter - getState")
}
this$.selectedState
})
setMethodS3(name = "setState", class = "DataFilter", function(this, state){
if (is.null(state)){
throw("Null value in DataFilter - setState")
}
this$.selectedState <- state
})
setMethodS3(name = "getAllStates", class = "DataFilter", function(this){
if (is.null(this$.allStates)){
throw("Null states in DataFilter - getAllStates")
}
this$.allStates
})
setMethodS3(name = "setAllStates", class = "DataFilter", function(this, states){
if (is.null(states)){
throw("Null value in DataFilter - setAllStates")
}
this$.allStates <- states
})
setMethodS3("name = getSelections", class = "DataFilter", function(this){
this$.selections
})
setMethodS3(name = "setContainer", class = "DataFilter", function(this, container){
if (is.null(container)){
throw("Null container value in DataFilter - setContainer")
}
this$.container = container
})
setMethodS3(name = "enable", class = "DataFilter", function(this, filterName){
if(filterName %in% this$.selections$getSelections() == FALSE){
return(FALSE)
}
return(TRUE)
})
setMethodS3(name = "storeState", class = "DataFilter", function(this){
this$.selectedStateStorage <- this$.selectedState
this$.allCurrentStatesStorage <- this$.allCurrentStates
})
setMethodS3(name = "popState", class = "DataFilter", function(this){
this$.selectedState <- this$.selectedStateStorage
this$.allCurrentStates <- this$.allCurrentStatesStorage
})
setMethodS3(name = "notifyView", class = "DataFilter", abstract = TRUE, function(this, observers){})
setMethodS3(name = "notifyNextView", class = "DataFilter", abstract = TRUE, function(this, observers){})
setMethodS3(name = "getIdentifier", class = "DataFilter", abstract = TRUE, function(this){})
setMethodS3(name = "update", class = "DataFilter", function(this){
result <- this$.container$.result
id <- this$.container$getFrameID(this$getIdentifier())
if (id == "ID"){
uniqueStates <- this$.allStates
}
else{
uniqueStates <- unique(result$getSubFrame(id)[[this$getIdentifier()]])
}
this$.allCurrentStates <- c()
for(state in uniqueStates){
this$.allCurrentStates <- c(state, this$.allCurrentStates)
}
if (is.null(this$.selectedState)){
this$.selectedState <- this$.allCurrentStates[1]
}
else if (this$.selectedState %in% this$.allCurrentStates == FALSE){
this$.selectedState <- this$.allCurrentStates[1]
}
})
setMethodS3(name = "updateNext", class = "DataFilter", abstract = TRUE, function(this){})
setMethodS3(name = "display", class = "DataFilter", abstract = TRUE, function(this){})
| 3,190 | epl-1.0 |
adcded77e2798b3a199fb23e99075b1be188c015 | pik-piam/magclass | R/read.magpie.R | #' Read MAgPIE-object from file
#'
#' Reads a MAgPIE-file and converts it to a 3D array of the structure
#' (cells,years,datacolumn)
#'
#' This function reads from 13 different MAgPIE file\_types. "rds" is
#' a R-default format for storing R objects."cs2" or "cs2b" is the new standard
#' format for cellular data with or without
#' header and the first columns (year,regiospatial) or only (regiospatial),
#' "csv" is the standard format for regional data with or without header
#' and the first columns (year,region,cellnumber) or only (region,cellnumber).
#' "cs3" is a format similar to csv and cs2, but with the difference that it supports
#' multidimensional data in a format which can be read by GAMS, "put" is a
#' newly supported format which is mosty used for the REMIND-MAgPIE coupling.
#' This format is only partly supported at the moment. "asc" is the AsciiGrid
#' format (for example used for Arc Gis data). "nc" is the netCDF format (only
#' "nc" files written by write.magpie can be read). All these variants are
#' read without further specification. "magpie" (.m) and "magpie zipped" (.mz)
#' are new formats developed to allow a less storage intensive management of
#' MAgPIE-data. The only difference between both formats is that .mz is gzipped
#' whereas .m is not compressed. So .mz needs less memory, whereas .m might
#' have a higher compatibility to other languages. \cr\cr Since library version
#' 1.4 read.magpie can also read regional or global MAgPIE csv-files.
#'
#' @param file_name file name including file ending (wildcards are supported).
#' Optionally also the full path can be specified here (instead of splitting it
#' to file\_name and file\_folder)
#' @param file_folder folder the file is located in (alternatively you can also
#' specify the full path in file\_name - wildcards are supported)
#' @param file_type format the data is stored in. Currently 13 formats are
#' available: "rds" (recommended compressed format),
#' "cs2" & "cs2b" (cellular standard MAgPIE format), "csv" (regional standard
#' MAgPIE format), "cs3" (multidimensional format compatible to GAMS), "cs4"
#' (alternative multidimensional format compatible to GAMS, in contrast to cs3
#' it can also handle sparse data), "cs5" (more generalized version of cs4),
#' "csvr", "cs2r", "cs3r" and "cs4r" which are
#' the same formats as the previous mentioned ones with the only difference
#' that they have a REMIND compatible format, "m" (binary MAgPIE format
#' "magpie"), "mz" (compressed binary MAgPIE format "magpie zipped") "put"
#' (format used primarily for the REMIND-MAgPIE coupling) and "asc",
#' (ASCII-Grid format as used by ArcGis) . If file\_type=NULL the file ending
#' of the file\_name is used as format. If format is different to the formats
#' mentioned standard MAgPIE format is assumed.
#' @param as.array Should the input be transformed to an array? This can be
#' useful for regional or global inputs, but all advantages of the magpie-class
#' are lost.
#' @param comment.char character: a character vector of length one containing a
#' single character or an empty string. Use "" to turn off the interpretation
#' of comments altogether. If a comment is found it will be stored in
#' attr(,"comment"). In text files the comment has to be at the beginning of
#' the file in order to be recognized by read.magpie.
#' @param check.names logical. If TRUE then the names of the variables in the
#' data frame are checked to ensure that they are syntactically valid variable
#' names. Same functionality as in read.table.
#' @param ... additional arguments passed to specific read functions (e.g.
#' \code{varname} for specifying the variable to be read in from a multi-variable
#' NCDF file.)
#' @return \item{x}{MAgPIE-object}
#' @note
#'
#' The binary MAgPIE formats .m and .mz have the following content/structure
#' (you only have to care for that if you want to implement
#' read.magpie/write.magpie functions in other languages): \cr \cr
#' [ FileFormatVersion | Current file format version number (currently 6) | integer | 2 Byte ] \cr
#' [ ncharComment | Number of character bytes of the file comment | integer | 4 Byte ] \cr
#' [ nbyteMetadata | Number of bytes of the serialized metadata | integer | 4 Byte ] \cr
#' [ ncharSets | Number of characters bytes of all regionnames + 2 delimiter | integer | 2 Byte] \cr
#' [ nyears | Number of years | integer | 2 Byte ]\cr
#' [ yearList | All years of the dataset (0, if year is not present) | integer | 2*nyears Byte ] \cr
#' [ ncells | Number of cells | integer | 4 Byte ]\cr
#' [ nchar_cell | Number of characters bytes of all regionnames + (nreg-1) for delimiters | integer | 4 Byte ] \cr
#' [ cells | Cell names saved as cell1\\cell2 (\\n is the delimiter) | character | 1*nchar_cell Byte ] \cr
#' [ nelem | Total number of data elements | integer | 4 Byte ] \cr
#' [ ncharData | Number of char. bytes of all datanames + (ndata - 1) for delimiters | integer | 4 Byte ] \cr
#' [ datanames | Names saved in the format data1\\ndata2 (\\n as del.) | character | 1*ncharData Byte ] \cr
#' [ data | Data of the MAgPIE array in vectorized form | numeric | 4*nelem Byte ] \cr
#' [ comment | Comment with additional information about the data | character | 1*ncharComment Byte ] \cr
#' [ sets | Set names with \\n as delimiter | character | 1*ncharSets Byte] \cr
#' [ metadata | serialized metadata information | bytes | 1*nbyteMetadata Byte] \cr
#'
#' @author Jan Philipp Dietrich, Stephen Bi, Florian Humpenoeder
#' @seealso \code{"\linkS4class{magpie}"}, \code{\link{write.magpie}}
#' @importFrom methods is new
#' @importFrom utils read.csv capture.output toBibtex
#' @export
read.magpie <- function(file_name, file_folder = "", file_type = NULL, as.array = FALSE, # nolint
comment.char = "*", check.names = FALSE, ...) { # nolint
.buildFileName <- function(fileName, fileFolder) {
fileName <- paste0(fileFolder, fileName)
fileNameOut <- Sys.glob(fileName)
if (length(fileNameOut) > 1) {
fileNameOut <- fileNameOut[1]
warning("File name ", fileName, " is ambiguous, only first alternative is used!")
} else if (length(fileNameOut) == 0) {
stop("File ", fileName, " does not exist!")
}
return(fileNameOut)
}
fileName <- .buildFileName(file_name, file_folder)
.getFileType <- function(fileType, fileName) {
# if file-type is not mentioned file-ending is used as file-type
fileType <- ifelse(is.null(fileType), tail(strsplit(fileName, "\\.")[[1]], 1), fileType)
allowedTypes <- c("rds", "m", "mz", "csv", "cs2", "cs2b", "cs3", "cs4", "cs5", "csvr", "cs2r", "cs3r",
"cs4r", "put", "asc", "nc")
if (!(fileType %in% allowedTypes)) stop("Unknown file type: ", fileType)
return(fileType)
}
fileType <- .getFileType(file_type, fileName)
if (fileType %in% c("m", "mz")) {
readMagpie <- readMagpieMZ(fileName, compressed = (fileType == "mz"))
} else if (fileType == "rds") {
readMagpie <- readRDS(fileName)
if (!is.magpie(readMagpie)) stop("File does not contain a magpie object!")
} else if (fileType %in% c("cs3", "cs3r")) {
x <- read.csv(fileName, comment.char = comment.char, check.names = check.names, stringsAsFactors = TRUE)
datacols <- grep("^dummy\\.?[0-9]*$", colnames(x))
xdimnames <- lapply(x[datacols], function(x) return(as.character(unique(x))))
xdimnames[[length(xdimnames) + 1]] <- colnames(x)[-datacols]
names(xdimnames) <- NULL
tmparr <- array(NA, dim = sapply(xdimnames, length), dimnames = xdimnames) # nolint
for (i in xdimnames[[length(xdimnames)]]) {
j <- sapply(cbind(x[datacols], i), as.character) # nolint
.duplicates_check(j)
tmparr[j] <- x[, i]
}
readMagpie <- as.magpie(tmparr)
if (length(grep("^[A-Z]+_[0-9]+$", getCells(readMagpie))) == ncells(readMagpie)) {
getCells(readMagpie) <- sub("_", ".", getCells(readMagpie))
}
attr(readMagpie, "comment") <- .readComment(fileName, commentChar = comment.char)
} else if (fileType %in% c("cs4", "cs4r")) {
x <- read.csv(fileName, comment.char = comment.char, header = FALSE,
check.names = check.names, stringsAsFactors = TRUE)
readMagpie <- as.magpie(x, tidy = TRUE)
attr(readMagpie, "comment") <- .readComment(fileName, commentChar = comment.char)
} else if (fileType == "cs5") {
.metaExtract <- function(fileName, commentChar) {
comment <- .readComment(fileName, commentChar = commentChar)
m <- grep("^META ", comment)
metadata <- comment[m]
comment <- comment[-m]
pattern <- "^META (.*?):(.*)$"
mNames <- sub(pattern, "\\1", metadata)
mData <- strsplit(sub(pattern, "\\2", metadata), ", ")
names(mData) <- mNames
return(list(comment = comment, metadata = mData))
}
m <- .metaExtract(fileName, comment.char)
x <- read.csv(fileName, comment.char = comment.char, header = FALSE,
check.names = check.names, stringsAsFactors = FALSE)
colnames(x) <- m$metadata$names
readMagpie <- as.magpie(x, tidy = TRUE,
spatial = grep(".spat", m$metadata$dimtype, fixed = TRUE),
temporal = grep(".temp", m$metadata$dimtype, fixed = TRUE),
data = grep(".data", m$metadata$dimtype, fixed = TRUE))
attr(readMagpie, "comment") <- m$comment
} else if (fileType %in% c("asc", "nc", "grd", "tif")) {
if (!requireNamespace("raster", quietly = TRUE)) stop("The package \"raster\" is required!")
if (fileType == "nc") {
if (!requireNamespace("ncdf4", quietly = TRUE)) {
stop("The package \"ncdf4\" is required!")
}
nc <- ncdf4::nc_open(fileName)
var <- names(nc[["var"]])
vdim <- vapply(nc[["var"]], function(x) return(x$ndims), integer(1))
var <- var[vdim > 0]
ncdf4::nc_close(nc)
tmp <- list()
for (v in var) {
suppressSpecificWarnings({
warning <- capture.output(tmp[[v]] <- raster::brick(fileName, varname = v, ...))
}, "partial match of 'group' to 'groups'", fixed = TRUE)
if (length(warning) > 0) {
tmp[[v]] <- NULL
next
}
name <- sub("^X([0-9]*)$", "y\\1", names(tmp[[v]]), perl = TRUE)
if (length(name) == 1 && name == "layer") name <- "y0"
names(tmp[[v]]) <- paste0(name, "..", v)
}
readMagpie <- as.magpie(raster::stack(tmp))
} else {
readMagpie <- as.magpie(raster::brick(fileName, ...))
}
} else {
readMagpie <- readMagpieOther(fileName, fileType, comment.char = comment.char, check.names = check.names)
}
if (as.array) readMagpie <- as.array(readMagpie)[, , ]
return(readMagpie)
}
| 10,761 | lgpl-3.0 |
73c10f480ead65a507ff46fb0ebecc6f359388cd | harmonlab/angio-pulse | R_scripts/polyploidy_analyses.R | require(geiger)
require(phylo)
## USING MLE tree
setwd("~/Documents/tank/angiosperms/angio-pulse") # change to match local directory
dat=get(load("spermatophyta_AToL_639_PL_MEDUSA_BATCH.effectsize.rda"))$summary
# only consider upshifts
dat=dat[which(dat$r>0),]
# get the tree
phy=get(load("spermatophyta_AToL_639_PL_MEDUSA_BATCH.familial_MLE.rda"))$phy
shiftNode=match(rownames(dat), c(phy$tip.label, phy$node.label)) # nodes where shifts occur
shiftNode<-numeric(dim(dat)[1])
for(i in 1:dim(dat)[1]) {
nameToMatch<-rownames(dat)[i]
if(nameToMatch %in% phy$node.label) {
theMatch<-which(phy$node.label==nameToMatch)
shiftNode[i]<-theMatch + length(phy$tip.label) # aligns phy$node.label with numbers in edge matrix
} else {
theMatch<-which(phy$tip.label==nameToMatch)
shiftNode[i]<-theMatch
}
}
# polyploidy events from tank
polyploidy=read.csv("data_files/Polyploidization.Dec2014.LJH.csv", header=TRUE)
# this returns all combinations of alternate placements for
# the polyploidy events - there are 6 x 2 x 3 = 36 combinations
return_all_combinations<-function(polyploidy) {
res<-list()
counter<-1
for(i in 3:8) # loop through WGD 6
for(j in 11:12) # loop through WGD 6
for(k in 15:17) { # loop through WGD 9
rowSelections<-c(1, 2, i, 9, 10, j, 13, 14, k)
res[[counter]]<-polyploidy[rowSelections,]
counter <- counter + 1
}
res
}
# get them
wgdComb<-return_all_combinations(polyploidy)
# randomly assign n polyploidy events to branches in the tree -
# this is the null model
randomPolyploidyNodes<-function(phy, n) {
sample(phy$edge[,2], size=n, replace=FALSE)
}
# test for exact matches
exactMatches<-numeric(length(wgdComb))
randomExactMatches<-matrix(nrow=1000, ncol=length(wgdComb))
# loop through all combinations
for(i in 1:length(wgdComb)) {
# get the ith one
pp<-wgdComb[[i]]
# left and right tips used to get the nodes
pl<-as.character(pp$left_tip)
pr<-as.character(pp$right_tip)
polyNode<-numeric(length=nrow(pp))
for(j in 1:nrow(pp)) {
if(pl[j]==pr[j]) { # this is a tip
ww<-which(phy$tip.label==pl[j])
polyNode[j]<-ww
} else { # this is an internal node
pair<-c(pl[j], pr[j])
polyNode[j]<-getMRCA(phy, pair)
}
}
exactMatches[i]<-sum(!is.na(match(polyNode, shiftNode))) # count exact matches
for(j in 1:1000) { # repeat for 1000 random assignments of polyploidy
randomPolys<-randomPolyploidyNodes(phy, length(polyNode))
randomExactMatches[j,i] <- sum(!is.na(match(shiftNode, randomPolys)))
}
}
# calculate p-values
pExact<-numeric(length(exactMatches))
for(i in 1:length(exactMatches)) {
pExact[i]<-(sum(randomExactMatches[,i] >= exactMatches[i])+1)/1001
}
# six are significant
pExact < 0.05
# now repeat using downstream distances
tipDistances<-numeric(length(wgdComb))
randomTipDistances <-matrix(nrow=1000, ncol=length(wgdComb))
library(phytools)
p2<-phy
p2$edge.length[]<-1
lookupDistance<-dist.nodes(p2)
# eliminate tip node WDGs
polyploidy2<-polyploidy[c(-9, -13, -14),]
# this returns all combinations of alternate placements for
# the polyploidy events - there are 6 x 2 x 3 = 36 combinations
# this one deals with polyploidy2, which has tip WGDs left out
return_all_combinations2<-function(polyploidy2) {
res<-list()
counter<-1
for(i in 3:8) # loop through WGD 6
for(j in 10:11) # loop through WGD 6
for(k in 12:14) { # loop through WGD 9
rowSelections<-c(1, 2, i, 9, j, k)
res[[counter]]<-polyploidy2[rowSelections,]
counter <- counter + 1
}
res
}
# get them
wgdComb2<-return_all_combinations2(polyploidy2)
closeCount<-function(pn, sn, cutoff=3, print=F) {
mdists<-numeric(length(pn))
# gather descendants of poly nodes pn
for(i in 1:length(pn)) {
pd<-getDescendants(phy, pn[i])
targets<-pd[which(pd %in% sn)]
targetDistance<-lookupDistance[pn[i],targets]
if(length(targetDistance)!=0) {
mdists[i]<-min(targetDistance)
} else mdists[i]<-Inf
}
testStat<-sum(mdists <= cutoff)
if(print) cat(pn[which(mdists <= cutoff)], "\n")
testStat
}
# test for close matches
testStat<-numeric(length(wgdComb2))
nullDist<-matrix(nrow=1000, ncol=length(wgdComb2))
# loop through all combinations
for(i in 1:length(wgdComb2)) {
# get the ith one
pp<-wgdComb2[[i]]
# left and right tips used to get the nodes
pl<-as.character(pp$left_tip)
pr<-as.character(pp$right_tip)
polyNode<-numeric(length=nrow(pp))
for(j in 1:nrow(pp)) {
if(pl[j]==pr[j]) { # this is a tip
ww<-which(phy$tip.label==pl[j])
polyNode[j]<-ww
} else { # this is an internal node
pair<-c(pl[j], pr[j])
polyNode[j]<-getMRCA(phy, pair)
}
}
testStat[i]<-closeCount(polyNode, shiftNode, print=T)
for(j in 1:1000) { # repeat for 1000 random assignments of polyploidy
randomPolys<-randomPolyploidyNodes(phy, length(polyNode))
nullDist[j,i] <- closeCount(randomPolys, shiftNode)
}
cat(i, "\n")
}
# calculate p-values
pClose<-numeric(length(testStat))
for(i in 1:length(testStat)) {
pClose[i]<-(sum(nullDist[,i] >= testStat[i])+1)/1001
}
pClose
# retest with different cutoff values
allPValues<-matrix(nrow=5, ncol=length(wgdComb2))
for(ct in 1:5) {
testStat<-numeric(length(wgdComb2))
nullDist<-matrix(nrow=1000, ncol=length(wgdComb2))
# loop through all combinations
for(i in 1:length(wgdComb2)) {
# get the ith one
pp<-wgdComb2[[i]]
# left and right tips used to get the nodes
pl<-as.character(pp$left_tip)
pr<-as.character(pp$right_tip)
polyNode<-numeric(length=nrow(pp))
for(j in 1:nrow(pp)) {
if(pl[j]==pr[j]) { # this is a tip
ww<-which(phy$tip.label==pl[j])
polyNode[j]<-ww
} else { # this is an internal node
pair<-c(pl[j], pr[j])
polyNode[j]<-getMRCA(phy, pair)
}
}
testStat[i]<-closeCount(polyNode, shiftNode, cutoff= ct)
for(j in 1:1000) { # repeat for 1000 random assignments of polyploidy
randomPolys<-randomPolyploidyNodes(phy, length(polyNode))
nullDist[j,i] <- closeCount(randomPolys, shiftNode, cutoff= ct)
}
cat(i, " ")
}
# calculate p-values
for(i in 1:length(testStat)) {
allPValues[ct,i]<-(sum(nullDist[,i] >= testStat[i])+1)/1001
}
cat("\n", ct, " is done\n")
}
rownames(allPValues)<-1:5
colnames(allPValues)<-paste("set", 1:36)
write.csv(allPValues, file="allThePValues.csv")
# time distance calculations for table
# these are the crown distances
lookupTimeDistance<-dist.nodes(phy)
p3<-phy
p3$node.label<-NULL
lookupBt<-branching.times(p3)
getBranchPairDistance<-function(phy, node1, node2) {
# crown-to-crown: that's easy
midDist<-lookupTimeDistance[node1, node2]
age1<-lookupBt[which(names(lookupBt)==node1)]
age2<-lookupBt[which(names(lookupBt)==node2)]
#fixing problem when these are tips
if(length(age1)==0) age1<-0
if(length(age2)==0) age2<-0
ww<-which(phy$edge[,2]==node1)
n1anc<-phy$edge[ww,1]
ww<-which(phy$edge[,2]==node2)
n2anc<-phy$edge[ww,1]
ancAge1<-lookupBt[which(names(lookupBt)==n1anc)]
ancAge2<-lookupBt[which(names(lookupBt)==n2anc)]
if(age1 > age2) {
oldCrown<-age1
youngCrown<-age2
oldStem<-ancAge1
youngStem<-ancAge2
} else {
oldCrown<-age2
youngCrown<-age1
oldStem<-ancAge2
youngStem<-ancAge1
}
# minumum possible: old crown to young stem
minDist<-oldCrown - youngStem
# maximum possible: old stem to young crown
maxDist<-oldStem - youngCrown
res<-c(midDist, minDist, maxDist)
names(res)<- c("midDist", "minDist", "maxDist")
res
}
getNodePairDistance<-function(phy, node1, node2) {
nodeDist<-lookupDistance[node1, node2]
age1<-lookupBt[which(names(lookupBt)==node1)]
age2<-lookupBt[which(names(lookupBt)==node2)]
#fixing problem when these are tips
if(length(age1)==0) age1<-0
if(length(age2)==0) age2<-0
ww<-which(phy$edge[,2]==node1)
n1anc<-phy$edge[ww,1]
ww<-which(phy$edge[,2]==node2)
n2anc<-phy$edge[ww,1]
ancAge1<-lookupBt[which(names(lookupBt)==n1anc)]
ancAge2<-lookupBt[which(names(lookupBt)==n2anc)]
if(age1 > age2) {
oldCrown<-age1
youngCrown<-age2
oldStem<-ancAge1
youngStem<-ancAge2
} else {
oldCrown<-age2
youngCrown<-age1
oldStem<-ancAge2
youngStem<-ancAge1
}
# minumum possible: old crown to young stem
minDist<-oldCrown - youngStem
# maximum possible: old stem to young crown
maxDist<-oldStem - youngCrown
res<-c(midDist, minDist, maxDist)
names(res)<- c("midDist", "minDist", "maxDist")
res
}
# test
gg<-getBranchPairDistance(phy, 379, 381)
allDistances<-matrix(nrow=dim(polyploidy)[1], ncol=5)
colnames(allDistances)<- c("match","midDist", "minDist", "maxDist", "nodeDist")
findClosestShift<-function(pn, snodes) {
pd<-getDescendants(phy, pn)
targets<-pd[which(pd %in% sn)]
targetDistance<-lookupTimeDistance[pn,targets]
if(length(targetDistance)!=0) {
closeDist<-min(targetDistance)
names(closeDist)<-targets[which(targetDistance==min(targetDistance))]
} else closeDist<-Inf
closeDist
}
for(i in 1:dim(polyploidy)[1]) {
pl<-as.character(polyploidy[i,"left_tip"])
pr<-as.character(polyploidy[i,"right_tip"])
if(pl==pr) { # this is a tip
ww<-which(phy$tip.label==pl)
polyNode<-ww
} else { # this is an internal node
pair<-c(pl, pr)
polyNode<-getMRCA(phy, pair)
}
cs<-findClosestShift(polyNode, shiftNode)
if(cs < Inf) {
targetNode<-as.numeric(names(cs))
thisResult<-getBranchPairDistance(phy, polyNode, targetNode)
thisNodeDis<-lookupDistance[polyNode, targetNode]
allDistances[i,]<-c(targetNode, thisResult,thisNodeDis)
}
else {
allDistances[i,]<-c(NA, Inf, Inf, Inf, NA)
}
}
timeResult<-cbind(polyploidy, allDistances)
write.csv(timeResult, file="timeresult.csv") | 9,554 | gpl-2.0 |
fe632709746c52681c0186fc369b085ca33c8669 | AlDanial/cloc | tests/inputs/utilities.R | # from https://github.com/lme4/lme4
if(getRversion() < "2.15")
paste0 <- function(...) paste(..., sep = '')
### Utilities for parsing and manipulating mixed-model formulas
##' From the result of \code{\link{findbars}} applied to a model formula and
##' and the evaluation frame, create the model matrix, etc. associated with
##' random-effects terms. See the description of the returned value for a
##' detailed list.
##'
##' @title Create Z, Lambda, Lind, etc.
##' @param bars a list of parsed random-effects terms
##' @param fr a model frame in which to evaluate these terms
##' @return a list with components
##' \item{Zt}{transpose of the sparse model matrix for the random effects}
##' \item{Lambdat}{transpose of the sparse relative covariance factor}
##' \item{Lind}{an integer vector of indices determining the mapping of the
##' elements of the \code{theta} to the \code{"x"} slot of \code{Lambdat}}
##' \item{theta}{initial values of the covariance parameters}
##' \item{lower}{lower bounds on the covariance parameters}
##' \item{flist}{list of grouping factors used in the random-effects terms}
##' \item{cnms}{a list of column names of the random effects according to
##' the grouping factors}
##' @importFrom Matrix sparseMatrix rBind drop0
##' @importMethodsFrom Matrix coerce
##' @family utilities
##' @export
mkReTrms <- function(bars, fr) {
if (!length(bars))
stop("No random effects terms specified in formula")
stopifnot(is.list(bars), vapply(bars, is.language, NA),
inherits(fr, "data.frame"))
names(bars) <- barnames(bars)
term.names <- unlist(lapply(bars, function(x) paste(deparse(x),collapse=" ")))
## auxiliary {named, for easier inspection}:
mkBlist <- function(x) {
frloc <- fr
## convert grouping variables to factors as necessary
## TODO: variables that are *not* in the data frame are
## not converted -- these could still break, e.g. if someone
## tries to use the : operator
for (i in all.vars(x[[3]])) {
if (!is.null(frloc[[i]])) frloc[[i]] <- factor(frloc[[i]])
}
if (is.null(ff <- tryCatch(eval(substitute(factor(fac),
list(fac = x[[3]])), frloc),
error=function(e) NULL)))
stop("couldn't evaluate grouping factor ",
deparse(x[[3]])," within model frame:",
" try adding grouping factor to data ",
"frame explicitly if possible")
if (all(is.na(ff)))
stop("Invalid grouping factor specification, ",
deparse(x[[3]]))
nl <- length(levels(ff))
mm <- model.matrix(eval(substitute( ~ foo, list(foo = x[[2]]))), frloc)
nc <- ncol(mm)
nseq <- seq_len(nc)
sm <- as(ff, "sparseMatrix")
if (nc > 1)
sm <- do.call(rBind, lapply(nseq, function(i) sm))
## hack for NA values contained in factor (FIXME: test elsewhere for consistency?)
sm@x[] <- t(mm[!is.na(ff),])
## When nc > 1 switch the order of the rows of sm
## so the random effects for the same level of the
## grouping factor are adjacent.
if (nc > 1)
sm <- sm[as.vector(matrix(seq_len(nc * nl),
ncol = nl, byrow = TRUE)),]
list(ff = ff, sm = sm, nl = nl, cnms = colnames(mm))
}
blist <- lapply(bars, mkBlist)
nl <- vapply(blist, `[[`, 0L, "nl") # no. of levels per term
# (in lmer jss: \ell_i)
## order terms stably by decreasing number of levels in the factor
if (any(diff(nl) > 0)) {
ord <- rev(order(nl))
blist <- blist[ord]
nl <- nl[ord]
}
Ztlist <- lapply(blist, "[[", "sm")
Zt <- do.call(rBind, Ztlist)
names(Ztlist) <- term.names
q <- nrow(Zt)
## Create and install Lambdat, Lind, etc. This must be done after
## any potential reordering of the terms.
cnms <- lapply(blist, "[[", "cnms") # list of column names of the
# model matrix per term
nc <- vapply(cnms, length, 0L) # no. of columns per term
# (in lmer jss: p_i)
nth <- as.integer((nc * (nc+1))/2) # no. of parameters per term
# (in lmer jss: ??)
nb <- nc * nl # no. of random effects per term
# (in lmer jss: q_i)
stopifnot(sum(nb) == q)
boff <- cumsum(c(0L, nb)) # offsets into b
thoff <- cumsum(c(0L, nth)) # offsets into theta
### FIXME: should this be done with cBind and avoid the transpose
### operator? In other words should Lambdat be generated directly
### instead of generating Lambda first then transposing?
Lambdat <-
t(do.call(sparseMatrix,
do.call(rBind,
lapply(seq_along(blist), function(i)
{
mm <- matrix(seq_len(nb[i]), ncol = nc[i],
byrow = TRUE)
dd <- diag(nc[i])
ltri <- lower.tri(dd, diag = TRUE)
ii <- row(dd)[ltri]
jj <- col(dd)[ltri]
dd[cbind(ii, jj)] <- seq_along(ii) # FIXME: this line unnecessary?
data.frame(i = as.vector(mm[, ii]) + boff[i],
j = as.vector(mm[, jj]) + boff[i],
x = as.double(rep.int(seq_along(ii),
rep.int(nl[i], length(ii))) +
thoff[i]))
}))))
thet <- numeric(sum(nth))
ll <- list(Zt=Matrix::drop0(Zt), theta=thet, Lind=as.integer(Lambdat@x),
Gp=unname(c(0L, cumsum(nb))))
## lower bounds on theta elements are 0 if on diagonal, else -Inf
ll$lower <- -Inf * (thet + 1)
ll$lower[unique(diag(Lambdat))] <- 0
ll$theta[] <- is.finite(ll$lower) # initial values of theta are 0 off-diagonal, 1 on
Lambdat@x[] <- ll$theta[ll$Lind] # initialize elements of Lambdat
ll$Lambdat <- Lambdat
# massage the factor list
fl <- lapply(blist, "[[", "ff")
# check for repeated factors
fnms <- names(fl)
if (length(fnms) > length(ufn <- unique(fnms))) {
fl <- fl[match(ufn, fnms)]
asgn <- match(fnms, ufn)
} else asgn <- seq_along(fl)
names(fl) <- ufn
fl <- do.call(data.frame, c(fl, check.names = FALSE))
attr(fl, "assign") <- asgn
ll$flist <- fl
ll$cnms <- cnms
ll$Ztlist <- Ztlist
ll
} ## {mkReTrms}
##' Create an lmerResp, glmResp or nlsResp instance
##'
##' @title Create an lmerResp, glmResp or nlsResp instance
##' @param fr a model frame
##' @param REML logical scalar, value of REML for an lmerResp instance
##' @param family the optional glm family (glmResp only)
##' @param nlenv the nonlinear model evaluation environment (nlsResp only)
##' @param nlmod the nonlinear model function (nlsResp only)
##' @param ... where to look for response information if \code{fr} is missing.
##' Can contain a model response, \code{y}, offset, \code{offset}, and weights,
##' \code{weights}.
##' @return an lmerResp or glmResp or nlsResp instance
##' @family utilities
##' @export
mkRespMod <- function(fr, REML=NULL, family = NULL, nlenv = NULL, nlmod = NULL, ...) {
if(!missing(fr)){
y <- model.response(fr)
offset <- model.offset(fr)
weights <- model.weights(fr)
N <- n <- nrow(fr)
etastart_update <- model.extract(fr, "etastart")
} else {
fr <- list(...)
y <- fr$y
N <- n <- if(is.matrix(y)) nrow(y) else length(y)
offset <- fr$offset
weights <- fr$weights
etastart_update <- fr$etastart
}
## FIXME: may need to add X, or pass it somehow, if we want to use glm.fit
##y <- model.response(fr)
if(length(dim(y)) == 1) {
## avoid problems with 1D arrays, but keep names
nm <- rownames(y)
dim(y) <- NULL
if(!is.null(nm)) names(y) <- nm
}
rho <- new.env()
rho$y <- if (is.null(y)) numeric(0) else y
if (!is.null(REML)) rho$REML <- REML
rho$etastart <- fr$etastart
rho$mustart <- fr$mustart
##N <- n <- nrow(fr)
if (!is.null(nlenv)) {
stopifnot(is.language(nlmod),
is.environment(nlenv),
is.numeric(val <- eval(nlmod, nlenv)),
length(val) == n,
## FIXME? Restriction, not present in ole' nlme():
is.matrix(gr <- attr(val, "gradient")),
mode(gr) == "numeric",
nrow(gr) == n,
!is.null(pnames <- colnames(gr)))
N <- length(gr)
rho$mu <- as.vector(val)
rho$sqrtXwt <- as.vector(gr)
rho$gam <-
unname(unlist(lapply(pnames,
function(nm) get(nm, envir=nlenv))))
}
if (!is.null(offset)) {
if (length(offset) == 1L) offset <- rep.int(offset, N)
stopifnot(length(offset) == N)
rho$offset <- unname(offset)
} else rho$offset <- rep.int(0, N)
if (!is.null(weights)) {
stopifnot(length(weights) == n, all(weights >= 0))
rho$weights <- unname(weights)
} else rho$weights <- rep.int(1, n)
if (is.null(family)) {
if (is.null(nlenv)) return(do.call(lmerResp$new, as.list(rho)))
return(do.call(nlsResp$new,
c(list(nlenv=nlenv,
nlmod=substitute(~foo, list(foo=nlmod)),
pnames=pnames), as.list(rho))))
}
stopifnot(inherits(family, "family"))
## need weights for initializing evaluation
rho$nobs <- n
## allow trivial objects, e.g. for simulation
if (length(y)>0) eval(family$initialize, rho)
family$initialize <- NULL # remove clutter from str output
ll <- as.list(rho)
ans <- do.call("new", c(list(Class="glmResp", family=family),
ll[setdiff(names(ll), c("m", "nobs", "mustart"))]))
if (length(y)>0) ans$updateMu(if (!is.null(es <- etastart_update)) es else
family$linkfun(get("mustart", rho)))
ans
}
##' From the right hand side of a formula for a mixed-effects model,
##' determine the pairs of expressions that are separated by the
##' vertical bar operator. Also expand the slash operator in grouping
##' factor expressions and expand terms with the double vertical bar operator
##' into separate, independent random effect terms.
##'
##' @title Determine random-effects expressions from a formula
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @param term a mixed-model formula
##' @return pairs of expressions that were separated by vertical bars
##' @section Note: This function is called recursively on individual
##' terms in the model, which is why the argument is called \code{term} and not
##' a name like \code{form}, indicating a formula.
##' @example
##' findbars(f1 <- Reaction ~ Days + (Days|Subject))
##' ## => list( Days | Subject )
##' findbars(y ~ Days + (1|Subject) + (0+Days|Subject))
##' ## => list of length 2: list ( 1 | Subject , 0+Days|Subject)
##' findbars(~ 1 + (1|batch/cask))
##' ## => list of length 2: list ( 1 | cask:batch , 1 | batch)
##' identical(findbars(~ 1 + (Days || Subject)),
##' findbars(~ 1 + (1|Subject) + (0+Days|Subject)))
##' \dontshow{
##' stopifnot(identical(findbars(f1),
##' list(expression(Days | Subject)[[1]])))
##' }
##' @family utilities
##' @keywords models utilities
##' @export
findbars <- function(term)
{
## Recursive function applied to individual terms
fb <- function(term)
{
if (is.name(term) || !is.language(term)) return(NULL)
if (term[[1]] == as.name("(")) return(fb(term[[2]]))
stopifnot(is.call(term))
if (term[[1]] == as.name('|')) return(term)
if (length(term) == 2) return(fb(term[[2]]))
c(fb(term[[2]]), fb(term[[3]]))
}
## Expand any slashes in the grouping factors returned by fb
expandSlash <- function(bb)
{
## Create the interaction terms for nested effects
makeInteraction <- function(x)
{
if (length(x) < 2) return(x)
trm1 <- makeInteraction(x[[1]])
trm11 <- if(is.list(trm1)) trm1[[1]] else trm1
list(substitute(foo:bar, list(foo=x[[2]], bar = trm11)), trm1)
}
## Return the list of '/'-separated terms
slashTerms <- function(x)
{
if (!("/" %in% all.names(x))) return(x)
if (x[[1]] != as.name("/"))
stop("unparseable formula for grouping factor")
list(slashTerms(x[[2]]), slashTerms(x[[3]]))
}
if (!is.list(bb))
expandSlash(list(bb))
else
unlist(lapply(bb, function(x) {
if (length(x) > 2 && is.list(trms <- slashTerms(x[[3]])))
## lapply(unlist(...)) - unlist returns a flattened list
lapply(unlist(makeInteraction(trms)),
function(trm) substitute(foo|bar, list(foo = x[[2]], bar = trm)))
else x
}))
}## {expandSlash}
modterm <- expandDoubleVerts(
if(is(term, "formula")) term[[length(term)]] else term)
expandSlash(fb(modterm))
}
##' From the right hand side of a formula for a mixed-effects model,
##' expand terms with the double vertical bar operator
##' into separate, independent random effect terms.
##'
##' @title Expand terms with \code{'||'} notation into separate \code{'|'} terms
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @param term a mixed-model formula
##' @return the modified term
##' @family utilities
##' @keywords models utilities
##' @export
expandDoubleVerts <- function(term)
{
expandDoubleVert <- function(term) {
frml <- formula(paste0("~", deparse(term[[2]])))
## need term.labels not all.vars to capture interactions too:
newtrms <- paste0("0+", attr(terms(frml), "term.labels"))
if(attr(terms(frml), "intercept")!=0)
newtrms <- c("1", newtrms)
as.formula(paste("~(",
paste(vapply(newtrms, function(trm)
paste0(trm, "|", deparse(term[[3]])), ""),
collapse=")+("), ")"))[[2]]
}
if (!is.name(term) && is.language(term)) {
if (term[[1]] == as.name("(")) {
term[[2]] <- expandDoubleVerts(term[[2]])
}
stopifnot(is.call(term))
if (term[[1]] == as.name('||'))
return( expandDoubleVert(term) )
## else :
term[[2]] <- expandDoubleVerts(term[[2]])
if (length(term) != 2) {
if(length(term) == 3)
term[[3]] <- expandDoubleVerts(term[[3]])
}
}
term
}
##' Remove the random-effects terms from a mixed-effects formula,
##' thereby producing the fixed-effects formula.
##'
##' @title Omit terms separated by vertical bars in a formula
##' @param term the right-hand side of a mixed-model formula
##' @return the fixed-effects part of the formula
##' @section Note: This function is called recursively on individual
##' terms in the model, which is why the argument is called \code{term} and not
##' a name like \code{form}, indicating a formula.
##' @examples
##' nobars(Reaction ~ Days + (Days|Subject)) ## => Reaction ~ Days
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @family utilities
##' @keywords models utilities
##' @export
nobars <- function(term)
{
if (!any(c('|','||') %in% all.names(term))) return(term)
if (is.call(term) && term[[1]] == as.name('|')) return(NULL)
if (is.call(term) && term[[1]] == as.name('||')) return(NULL)
if (length(term) == 2) {
nb <- nobars(term[[2]])
if (is.null(nb)) return(NULL)
term[[2]] <- nb
return(term)
}
nb2 <- nobars(term[[2]])
nb3 <- nobars(term[[3]])
if (is.null(nb2)) return(nb3)
if (is.null(nb3)) return(nb2)
term[[2]] <- nb2
term[[3]] <- nb3
term
}
##' Substitute the '+' function for the '|' and '||' function in a mixed-model
##' formula. This provides a formula suitable for the current
##' model.frame function.
##'
##' @title "Sub[stitute] Bars"
##' @param term a mixed-model formula
##' @return the formula with all | and || operators replaced by +
##' @section Note: This function is called recursively on individual
##' terms in the model, which is why the argument is called \code{term} and not
##' a name like \code{form}, indicating a formula.
##' @examples
##' subbars(Reaction ~ Days + (Days|Subject)) ## => Reaction ~ Days + (Days + Subject)
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @family utilities
##' @keywords models utilities
##' @export
subbars <- function(term)
{
if (is.name(term) || !is.language(term)) return(term)
if (length(term) == 2) {
term[[2]] <- subbars(term[[2]])
return(term)
}
stopifnot(length(term) >= 3)
if (is.call(term) && term[[1]] == as.name('|'))
term[[1]] <- as.name('+')
if (is.call(term) && term[[1]] == as.name('||'))
term[[1]] <- as.name('+')
for (j in 2:length(term)) term[[j]] <- subbars(term[[j]])
term
}
##' @param bars result of findbars
barnames <- function(bars) {
unlist(lapply(bars, function(x) deparse(x[[3]])))
}
##' Does every level of f1 occur in conjunction with exactly one level
##' of f2? The function is based on converting a triplet sparse matrix
##' to a compressed column-oriented form in which the nesting can be
##' quickly evaluated.
##'
##' @title Is f1 nested within f2?
##'
##' @param f1 factor 1
##' @param f2 factor 2
##'
##' @return TRUE if factor 1 is nested within factor 2
##' @examples
##' with(Pastes, isNested(cask, batch)) ## => FALSE
##' with(Pastes, isNested(sample, batch)) ## => TRUE
##' @export
isNested <- function(f1, f2)
{
f1 <- as.factor(f1)
f2 <- as.factor(f2)
stopifnot(length(f1) == length(f2))
k <- length(levels(f1))
sm <- as(new("ngTMatrix",
i = as.integer(f2) - 1L,
j = as.integer(f1) - 1L,
Dim = c(length(levels(f2)), k)),
"CsparseMatrix")
all(sm@p[2:(k+1L)] - sm@p[1:k] <= 1L)
}
subnms <- function(form, nms) {
## Recursive function applied to individual terms
sbnm <- function(term)
{
if (is.name(term)) {
if (any(term == nms)) 0 else term
} else switch(length(term),
term, ## 1
{ ## 2
term[[2]] <- sbnm(term[[2]])
term
},
{ ## 3
term[[2]] <- sbnm(term[[2]])
term[[3]] <- sbnm(term[[3]])
term
})
}
sbnm(form)
}
## Check for a constant term (a literal 1) in an expression
##
## In the mixed-effects part of a nonlinear model formula, a constant
## term is not meaningful because every term must be relative to a
## nonlinear model parameter. This function recursively checks the
## expressions in the formula for a a constant, calling stop() if
## such a term is encountered.
## @title Check for constant terms.
## @param expr an expression
## @return NULL. The function is executed for its side effect.
chck1 <- function(expr) {
if ((le <- length(expr)) == 1) {
if (is.numeric(expr) && expr == 1)
stop("1 is not meaningful in a nonlinear model formula")
return()
} else
for (j in seq_len(le)[-1]) Recall(expr[[j]])
}
## ---> ../man/nlformula.Rd --- Manipulate a nonlinear model formula
##' @param mc matched call from the caller, with arguments 'formula','start',...
##' @return a list with components "respMod", "frame", "X", "reTrms"
nlformula <- function(mc) {
start <- eval(mc$start, parent.frame(2L))
if (is.numeric(start)) start <- list(nlpars = start)
stopifnot(is.numeric(nlpars <- start$nlpars),
vapply(nlpars, length, 0L) == 1L,
length(pnames <- names(nlpars)) == length(nlpars),
length(form <- as.formula(mc$formula)) == 3L,
is(nlform <- eval(form[[2]]), "formula"),
pnames %in%
(av <- all.vars(nlmod <- as.call(nlform[[lnl <- length(nlform)]]))))
## MM{FIXME}: fortune(106) even twice in here!
nlform[[lnl]] <- parse(text= paste(setdiff(all.vars(form), pnames), collapse=' + '))[[1]]
nlform <- eval(nlform)
environment(nlform) <- environment(form)
m <- match(c("data", "subset", "weights", "na.action", "offset"),
names(mc), 0)
mc <- mc[c(1, m)]
mc$drop.unused.levels <- TRUE
mc[[1]] <- as.name("model.frame")
mc$formula <- nlform
fr <- eval(mc, parent.frame(2L))
n <- nrow(fr)
nlenv <- list2env(fr, parent=parent.frame(2L))
lapply(pnames, function(nm) nlenv[[nm]] <- rep.int(nlpars[[nm]], n))
respMod <- mkRespMod(fr, nlenv=nlenv, nlmod=nlmod)
chck1(meform <- form[[3L]])
pnameexpr <- parse(text=paste(pnames, collapse='+'))[[1]]
nb <- nobars(meform)
fe <- eval(substitute(~ 0 + nb + pnameexpr))
environment(fe) <- environment(form)
frE <- do.call(rbind, lapply(seq_along(nlpars), function(i) fr)) # rbind s copies of the frame
for (nm in pnames) # convert these variables in fr to indicators
frE[[nm]] <- as.numeric(rep(nm == pnames, each = n))
X <- model.matrix(fe, frE)
rownames(X) <- NULL
reTrms <- mkReTrms(lapply(findbars(meform),
function(expr) {
expr[[2]] <- substitute(0+foo, list(foo=expr[[2]]))
expr
}), frE)
list(respMod=respMod, frame=fr, X=X, reTrms=reTrms, pnames=pnames)
} ## {nlformula}
##--> ../man/mkMerMod.Rd ---Create a merMod object
##' @param rho the environment of the objective function
##' @param opt the value returned by the optimizer
##' @param reTrms reTrms list from the calling function
mkMerMod <- function(rho, opt, reTrms, fr, mc, lme4conv=NULL) {
if(missing(mc)) mc <- match.call()
stopifnot(is.environment(rho),
is(pp <- rho$pp, "merPredD"),
is(resp <- rho$resp, "lmResp"),
is.list(opt), "par" %in% names(opt),
c("conv","fval") %in% substr(names(opt),1,4), ## "conv[ergence]", "fval[ues]"
is.list(reTrms), c("flist", "cnms", "Gp", "lower") %in% names(reTrms),
length(rcl <- class(resp)) == 1)
n <- nrow(pp$V)
p <- ncol(pp$V)
dims <- c(N=nrow(pp$X), n=n, p=p, nmp=n-p,
nth=length(pp$theta), q=nrow(pp$Zt),
nAGQ=rho$nAGQ,
compDev=rho$compDev,
## 'use scale' in the sense of whether dispersion parameter should
## be reported/used (*not* whether theta should be scaled by sigma)
useSc=(rcl != "glmResp" ||
!resp$family$family %in% c("poisson","binomial")),
reTrms=length(reTrms$cnms),
spFe=0L,
REML=if (rcl=="lmerResp") resp$REML else 0L,
GLMM=(rcl=="glmResp"),
NLMM=(rcl=="nlsResp"))
storage.mode(dims) <- "integer"
fac <- as.numeric(rcl != "nlsResp")
if (trivial.y <- (length(resp$y)==0)) {
## trivial model
sqrLenU <- wrss <- pwrss <- NA
} else {
sqrLenU <- pp$sqrL(fac)
wrss <- resp$wrss()
pwrss <- wrss + sqrLenU
}
weights <- resp$weights
beta <- pp$beta(fac)
#sigmaML <- pwrss/sum(weights)
sigmaML <- pwrss/n
if (rcl != "lmerResp") {
pars <- opt$par
if (length(pars) > length(pp$theta)) beta <- pars[-(seq_along(pp$theta))]
}
cmp <- c(ldL2=pp$ldL2(), ldRX2=pp$ldRX2(), wrss=wrss,
ussq=sqrLenU, pwrss=pwrss,
drsum=if (rcl=="glmResp" && !trivial.y) resp$resDev() else NA,
REML=if (rcl=="lmerResp" && resp$REML != 0L && !trivial.y)
opt$fval else NA,
## FIXME: construct 'REML deviance' here?
dev=if (rcl=="lmerResp" && resp$REML != 0L || trivial.y) NA else opt$fval,
sigmaML=sqrt(unname(if (!dims["useSc"] || trivial.y) NA else sigmaML)),
sigmaREML=sqrt(unname(if (rcl!="lmerResp" || trivial.y) NA else sigmaML*(dims['n']/dims['nmp']))),
tolPwrss=rho$tolPwrss)
## TODO: improve this hack to get something in frame slot (maybe need weights, etc...)
if(missing(fr)) fr <- data.frame(resp$y)
new(switch(rcl, lmerResp="lmerMod", glmResp="glmerMod", nlsResp="nlmerMod"),
call=mc, frame=fr, flist=reTrms$flist, cnms=reTrms$cnms,
Gp=reTrms$Gp, theta=pp$theta, beta=beta,
u=if (trivial.y) rep(NA_real_,nrow(pp$Zt)) else pp$u(fac),
lower=reTrms$lower, devcomp=list(cmp=cmp, dims=dims),
pp=pp, resp=resp,
optinfo = list (optimizer= attr(opt,"optimizer"),
control = attr(opt,"control"),
derivs = attr(opt,"derivs"),
conv = list(opt=opt$conv, lme4=lme4conv),
feval = if (is.null(opt$feval)) NA else opt$feval,
warnings = attr(opt,"warnings"), val = opt$par)
)
}## {mkMerMod}
## generic argument checking
## 'type': name of calling function ("glmer", "lmer", "nlmer")
##
checkArgs <- function(type,...) {
l... <- list(...)
if (isTRUE(l...[["sparseX"]])) warning("sparseX = TRUE has no effect at present")
## '...' handling up front, safe-guarding against typos ("familiy") :
if(length(l... <- list(...))) {
if (!is.null(l...[["family"]])) { # call glmer if family specified
## we will only get here if 'family' is *not* in the arg list
warning("calling lmer with family() is deprecated: please use glmer() instead")
type <- "glmer"
}
## Check for method argument which is no longer used
## (different meanings/hints depending on glmer vs lmer)
if (!is.null(method <- l...[["method"]])) {
msg <- paste("Argument", sQuote("method"), "is deprecated.")
if (type=="lmer") msg <- paste(msg,"Use the REML argument to specify ML or REML estimation.")
if (type=="glmer") msg <- paste(msg,"Use the nAGQ argument to specify Laplace (nAGQ=1) or adaptive",
"Gauss-Hermite quadrature (nAGQ>1). PQL is no longer available.")
warning(msg)
l... <- l...[names(l...) != "method"]
}
if(length(l...)) {
warning("extra argument(s) ",
paste(sQuote(names(l...)), collapse=", "),
" disregarded")
}
}
}
## check formula and data: return an environment suitable for evaluating
## the formula.
## (1) if data is specified, return it
## (2) otherwise, if formula has an environment, use it
## (3) otherwise [e.g. if formula was passed as a string], try to use parent.frame(2)
## if #3 is true *and* the user is doing something tricky with nested functions,
## this may fail ...
checkFormulaData <- function(formula,data,checkLHS=TRUE,debug=FALSE) {
dataName <- deparse(substitute(data))
missingData <- inherits(tryCatch(eval(data), error=function(e)e), "error")
## data not found (this *should* only happen with garbage input,
## OR when strings used as formulae -> drop1/update/etc.)
##
## alternate attempt (fails)
##
## ff <- sys.frames()
## ex <- substitute(data)
## ii <- rev(seq_along(ff))
## for(i in ii) {
## ex <- eval(substitute(substitute(x, env=sys.frames()[[n]]),
## env = list(x = ex, n=i)))
## }
## origName <- deparse(ex)
## missingData <- !exists(origName)
## (!dataName=="NULL" && !exists(dataName))
if (missingData) {
varex <- function(v,env) exists(v,envir=env,inherits=FALSE)
allvars <- all.vars(as.formula(formula))
allvarex <- function(vvec=allvars,...) { all(sapply(vvec,varex,...)) }
if (allvarex(env=(ee <- environment(formula)))) {
stop("'data' not found, but variables found in environment of formula: ",
"try specifying 'formula' as a formula rather ",
"than a string in the original model")
} else stop("'data' not found, and some variables missing from formula environment")
} else {
if (is.null(data)) {
if (!is.null(ee <- environment(formula))) {
## use environment of formula
denv <- ee
} else {
## e.g. no environment, e.g. because formula is a character vector
## parent.frame(2L) works because [g]lFormula (our calling environment)
## has been called within [g]lmer with env=parent.frame(1L)
## If you call checkFormulaData in some other bizarre way such that
## parent.frame(2L) is *not* OK, you deserve what you get
## calling checkFormulaData directly from the global
## environment should be OK, since trying to go up beyond the global
## environment keeps bringing you back to the global environment ...
denv <- parent.frame(2L)
}
} else {
## data specified
denv <- list2env(data)
}
}
## FIXME: set enclosing environment of denv to environment(formula), or parent.frame(2L) ?
if (debug) {
cat("Debugging parent frames in checkFormulaData:\n")
## find global environment -- could do this with sys.nframe() ?
glEnv <- 1
while (!identical(parent.frame(glEnv),.GlobalEnv)) {
glEnv <- glEnv+1
}
## where are vars?
for (i in 1:glEnv) {
OK <- allvarex(env=parent.frame(i))
cat("vars exist in parent frame ",i)
if (i==glEnv) cat(" (global)")
cat(" ",OK,"\n")
}
cat("vars exist in env of formula ",allvarex(env=denv),"\n")
} ## if (debug)
stopifnot(!checkLHS || length(as.formula(formula,env=denv)) == 3) ## check for two-sided formula
return(denv)
}
## checkFormulaData <- function(formula,data) {
## ee <- environment(formula)
## if (is.null(ee)) {
## ee <- parent.frame(2)
## }
## if (missing(data)) data <- ee
## stopifnot(length(as.formula(formula,env=as.environment(data))) == 3)
## return(data)
## }
##' Not exported; for tests (and examples) that can be slow;
##' Use if(lme4:::testLevel() >= 1.) ..... see ../README.md
testLevel <- function()
if(nzchar(s <- Sys.getenv("LME4_TEST_LEVEL")) &&
is.finite(s <- as.numeric(s))) s else 1
##' General conditional variance-covariance matrix
##'
##' Experimental function for estimating the variance-covariance
##' matrix of the random effects, conditional on the observed data
##' and at the (RE)ML estimate of the fixed effects and covariance
##' parameters. Applicable for any Lambda matrix, but slower than
##' other block-by-block methods.
##' Not exported.
##'
##' TODO:
##' (1) Write up quite note on theory (e.g. Laplace approximation).
##' (2) Figure out how to convert between full q-by-q matrix, and
##' the format currently in the postVar attributes of the
##' elements of the output of ranef.
##' (3) Test.
##' (4) Do we need to think carefully about the differences
##' between REML and ML, beyond just multiplying by a different
##' sigma^2 estimate?
##'
##' @param object \code{merMod} object
##' @return Sparse covariance matrix
condVar <- function(object) {
s2 <- sigma(object)^2
Lamt <- getME(object,"Lambdat")
L <- getME(object,"L")
## never do it this way! fortune("SOOOO")
#V <- solve(L, system = "A")
#V <- chol2inv(L)
#s2*crossprod(Lamt, V) %*% Lamt
LL <- solve(L, Lamt, system = "A")
s2 * crossprod(Lamt, LL)
}
mkMinimalData <- function(formula) {
vars <- all.vars(formula)
nVars <- length(vars)
matr <- matrix(0, 2, nVars)
data <- as.data.frame(matr)
setNames(data, vars)
}
##' Make template for mixed model parameters
mkParsTemplate <- function(formula, data){
if(missing(data)) data <- mkMinimalData(formula)
mfRanef <- model.frame( subbars(formula), data)
mmFixef <- model.matrix(nobars(formula) , data)
reTrms <- mkReTrms(findbars(formula), mfRanef)
cnms <- reTrms$cnms
thetaNamesList <- mapply(mkPfun(), names(cnms), cnms)
thetaNames <- unlist(thetaNamesList)
betaNames <- colnames(mmFixef)
list(beta = setNames(numeric(length( betaNames)), betaNames),
theta = setNames(reTrms$theta, thetaNames),
sigma = 1)
}
##' Make template for mixed model data
##'
##' Useful for simulating balanced designs and for
##' getting started on unbalanced simulations
##'
##' @param formula formula
##' @param data data -- not necessary
##' @param nGrps number of groups per grouping factor
##' @param rfunc function for generating covariate data
##' @param ... additional parameters for rfunc
mkDataTemplate <- function(formula, data,
nGrps = 2, nPerGrp = 1,
rfunc = NULL, ...){
if(missing(data)) data <- mkMinimalData(formula)
grpFacNames <- unique(barnames(findbars(formula)))
varNames <- all.vars(formula)
covariateNames <- setdiff(varNames, grpFacNames)
nGrpFac <- length(grpFacNames)
nCov <- length(covariateNames)
grpFac <- gl(nGrps, nPerGrp)
grpDat <- expand.grid(replicate(nGrpFac, grpFac, simplify = FALSE))
colnames(grpDat) <- grpFacNames
nObs <- nrow(grpDat)
if(is.null(rfunc)) rfunc <- function(n, ...) rep(0, n)
params <- c(list(nObs), list(...))
covDat <- as.data.frame(replicate(nCov, do.call(rfunc, params),
simplify = FALSE))
colnames(covDat) <- covariateNames
cbind(grpDat, covDat)
}
| 33,475 | gpl-2.0 |
fe632709746c52681c0186fc369b085ca33c8669 | richarddavis/cloc | testcode/utilities.R | # from https://github.com/lme4/lme4
if(getRversion() < "2.15")
paste0 <- function(...) paste(..., sep = '')
### Utilities for parsing and manipulating mixed-model formulas
##' From the result of \code{\link{findbars}} applied to a model formula and
##' and the evaluation frame, create the model matrix, etc. associated with
##' random-effects terms. See the description of the returned value for a
##' detailed list.
##'
##' @title Create Z, Lambda, Lind, etc.
##' @param bars a list of parsed random-effects terms
##' @param fr a model frame in which to evaluate these terms
##' @return a list with components
##' \item{Zt}{transpose of the sparse model matrix for the random effects}
##' \item{Lambdat}{transpose of the sparse relative covariance factor}
##' \item{Lind}{an integer vector of indices determining the mapping of the
##' elements of the \code{theta} to the \code{"x"} slot of \code{Lambdat}}
##' \item{theta}{initial values of the covariance parameters}
##' \item{lower}{lower bounds on the covariance parameters}
##' \item{flist}{list of grouping factors used in the random-effects terms}
##' \item{cnms}{a list of column names of the random effects according to
##' the grouping factors}
##' @importFrom Matrix sparseMatrix rBind drop0
##' @importMethodsFrom Matrix coerce
##' @family utilities
##' @export
mkReTrms <- function(bars, fr) {
if (!length(bars))
stop("No random effects terms specified in formula")
stopifnot(is.list(bars), vapply(bars, is.language, NA),
inherits(fr, "data.frame"))
names(bars) <- barnames(bars)
term.names <- unlist(lapply(bars, function(x) paste(deparse(x),collapse=" ")))
## auxiliary {named, for easier inspection}:
mkBlist <- function(x) {
frloc <- fr
## convert grouping variables to factors as necessary
## TODO: variables that are *not* in the data frame are
## not converted -- these could still break, e.g. if someone
## tries to use the : operator
for (i in all.vars(x[[3]])) {
if (!is.null(frloc[[i]])) frloc[[i]] <- factor(frloc[[i]])
}
if (is.null(ff <- tryCatch(eval(substitute(factor(fac),
list(fac = x[[3]])), frloc),
error=function(e) NULL)))
stop("couldn't evaluate grouping factor ",
deparse(x[[3]])," within model frame:",
" try adding grouping factor to data ",
"frame explicitly if possible")
if (all(is.na(ff)))
stop("Invalid grouping factor specification, ",
deparse(x[[3]]))
nl <- length(levels(ff))
mm <- model.matrix(eval(substitute( ~ foo, list(foo = x[[2]]))), frloc)
nc <- ncol(mm)
nseq <- seq_len(nc)
sm <- as(ff, "sparseMatrix")
if (nc > 1)
sm <- do.call(rBind, lapply(nseq, function(i) sm))
## hack for NA values contained in factor (FIXME: test elsewhere for consistency?)
sm@x[] <- t(mm[!is.na(ff),])
## When nc > 1 switch the order of the rows of sm
## so the random effects for the same level of the
## grouping factor are adjacent.
if (nc > 1)
sm <- sm[as.vector(matrix(seq_len(nc * nl),
ncol = nl, byrow = TRUE)),]
list(ff = ff, sm = sm, nl = nl, cnms = colnames(mm))
}
blist <- lapply(bars, mkBlist)
nl <- vapply(blist, `[[`, 0L, "nl") # no. of levels per term
# (in lmer jss: \ell_i)
## order terms stably by decreasing number of levels in the factor
if (any(diff(nl) > 0)) {
ord <- rev(order(nl))
blist <- blist[ord]
nl <- nl[ord]
}
Ztlist <- lapply(blist, "[[", "sm")
Zt <- do.call(rBind, Ztlist)
names(Ztlist) <- term.names
q <- nrow(Zt)
## Create and install Lambdat, Lind, etc. This must be done after
## any potential reordering of the terms.
cnms <- lapply(blist, "[[", "cnms") # list of column names of the
# model matrix per term
nc <- vapply(cnms, length, 0L) # no. of columns per term
# (in lmer jss: p_i)
nth <- as.integer((nc * (nc+1))/2) # no. of parameters per term
# (in lmer jss: ??)
nb <- nc * nl # no. of random effects per term
# (in lmer jss: q_i)
stopifnot(sum(nb) == q)
boff <- cumsum(c(0L, nb)) # offsets into b
thoff <- cumsum(c(0L, nth)) # offsets into theta
### FIXME: should this be done with cBind and avoid the transpose
### operator? In other words should Lambdat be generated directly
### instead of generating Lambda first then transposing?
Lambdat <-
t(do.call(sparseMatrix,
do.call(rBind,
lapply(seq_along(blist), function(i)
{
mm <- matrix(seq_len(nb[i]), ncol = nc[i],
byrow = TRUE)
dd <- diag(nc[i])
ltri <- lower.tri(dd, diag = TRUE)
ii <- row(dd)[ltri]
jj <- col(dd)[ltri]
dd[cbind(ii, jj)] <- seq_along(ii) # FIXME: this line unnecessary?
data.frame(i = as.vector(mm[, ii]) + boff[i],
j = as.vector(mm[, jj]) + boff[i],
x = as.double(rep.int(seq_along(ii),
rep.int(nl[i], length(ii))) +
thoff[i]))
}))))
thet <- numeric(sum(nth))
ll <- list(Zt=Matrix::drop0(Zt), theta=thet, Lind=as.integer(Lambdat@x),
Gp=unname(c(0L, cumsum(nb))))
## lower bounds on theta elements are 0 if on diagonal, else -Inf
ll$lower <- -Inf * (thet + 1)
ll$lower[unique(diag(Lambdat))] <- 0
ll$theta[] <- is.finite(ll$lower) # initial values of theta are 0 off-diagonal, 1 on
Lambdat@x[] <- ll$theta[ll$Lind] # initialize elements of Lambdat
ll$Lambdat <- Lambdat
# massage the factor list
fl <- lapply(blist, "[[", "ff")
# check for repeated factors
fnms <- names(fl)
if (length(fnms) > length(ufn <- unique(fnms))) {
fl <- fl[match(ufn, fnms)]
asgn <- match(fnms, ufn)
} else asgn <- seq_along(fl)
names(fl) <- ufn
fl <- do.call(data.frame, c(fl, check.names = FALSE))
attr(fl, "assign") <- asgn
ll$flist <- fl
ll$cnms <- cnms
ll$Ztlist <- Ztlist
ll
} ## {mkReTrms}
##' Create an lmerResp, glmResp or nlsResp instance
##'
##' @title Create an lmerResp, glmResp or nlsResp instance
##' @param fr a model frame
##' @param REML logical scalar, value of REML for an lmerResp instance
##' @param family the optional glm family (glmResp only)
##' @param nlenv the nonlinear model evaluation environment (nlsResp only)
##' @param nlmod the nonlinear model function (nlsResp only)
##' @param ... where to look for response information if \code{fr} is missing.
##' Can contain a model response, \code{y}, offset, \code{offset}, and weights,
##' \code{weights}.
##' @return an lmerResp or glmResp or nlsResp instance
##' @family utilities
##' @export
mkRespMod <- function(fr, REML=NULL, family = NULL, nlenv = NULL, nlmod = NULL, ...) {
if(!missing(fr)){
y <- model.response(fr)
offset <- model.offset(fr)
weights <- model.weights(fr)
N <- n <- nrow(fr)
etastart_update <- model.extract(fr, "etastart")
} else {
fr <- list(...)
y <- fr$y
N <- n <- if(is.matrix(y)) nrow(y) else length(y)
offset <- fr$offset
weights <- fr$weights
etastart_update <- fr$etastart
}
## FIXME: may need to add X, or pass it somehow, if we want to use glm.fit
##y <- model.response(fr)
if(length(dim(y)) == 1) {
## avoid problems with 1D arrays, but keep names
nm <- rownames(y)
dim(y) <- NULL
if(!is.null(nm)) names(y) <- nm
}
rho <- new.env()
rho$y <- if (is.null(y)) numeric(0) else y
if (!is.null(REML)) rho$REML <- REML
rho$etastart <- fr$etastart
rho$mustart <- fr$mustart
##N <- n <- nrow(fr)
if (!is.null(nlenv)) {
stopifnot(is.language(nlmod),
is.environment(nlenv),
is.numeric(val <- eval(nlmod, nlenv)),
length(val) == n,
## FIXME? Restriction, not present in ole' nlme():
is.matrix(gr <- attr(val, "gradient")),
mode(gr) == "numeric",
nrow(gr) == n,
!is.null(pnames <- colnames(gr)))
N <- length(gr)
rho$mu <- as.vector(val)
rho$sqrtXwt <- as.vector(gr)
rho$gam <-
unname(unlist(lapply(pnames,
function(nm) get(nm, envir=nlenv))))
}
if (!is.null(offset)) {
if (length(offset) == 1L) offset <- rep.int(offset, N)
stopifnot(length(offset) == N)
rho$offset <- unname(offset)
} else rho$offset <- rep.int(0, N)
if (!is.null(weights)) {
stopifnot(length(weights) == n, all(weights >= 0))
rho$weights <- unname(weights)
} else rho$weights <- rep.int(1, n)
if (is.null(family)) {
if (is.null(nlenv)) return(do.call(lmerResp$new, as.list(rho)))
return(do.call(nlsResp$new,
c(list(nlenv=nlenv,
nlmod=substitute(~foo, list(foo=nlmod)),
pnames=pnames), as.list(rho))))
}
stopifnot(inherits(family, "family"))
## need weights for initializing evaluation
rho$nobs <- n
## allow trivial objects, e.g. for simulation
if (length(y)>0) eval(family$initialize, rho)
family$initialize <- NULL # remove clutter from str output
ll <- as.list(rho)
ans <- do.call("new", c(list(Class="glmResp", family=family),
ll[setdiff(names(ll), c("m", "nobs", "mustart"))]))
if (length(y)>0) ans$updateMu(if (!is.null(es <- etastart_update)) es else
family$linkfun(get("mustart", rho)))
ans
}
##' From the right hand side of a formula for a mixed-effects model,
##' determine the pairs of expressions that are separated by the
##' vertical bar operator. Also expand the slash operator in grouping
##' factor expressions and expand terms with the double vertical bar operator
##' into separate, independent random effect terms.
##'
##' @title Determine random-effects expressions from a formula
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @param term a mixed-model formula
##' @return pairs of expressions that were separated by vertical bars
##' @section Note: This function is called recursively on individual
##' terms in the model, which is why the argument is called \code{term} and not
##' a name like \code{form}, indicating a formula.
##' @example
##' findbars(f1 <- Reaction ~ Days + (Days|Subject))
##' ## => list( Days | Subject )
##' findbars(y ~ Days + (1|Subject) + (0+Days|Subject))
##' ## => list of length 2: list ( 1 | Subject , 0+Days|Subject)
##' findbars(~ 1 + (1|batch/cask))
##' ## => list of length 2: list ( 1 | cask:batch , 1 | batch)
##' identical(findbars(~ 1 + (Days || Subject)),
##' findbars(~ 1 + (1|Subject) + (0+Days|Subject)))
##' \dontshow{
##' stopifnot(identical(findbars(f1),
##' list(expression(Days | Subject)[[1]])))
##' }
##' @family utilities
##' @keywords models utilities
##' @export
findbars <- function(term)
{
## Recursive function applied to individual terms
fb <- function(term)
{
if (is.name(term) || !is.language(term)) return(NULL)
if (term[[1]] == as.name("(")) return(fb(term[[2]]))
stopifnot(is.call(term))
if (term[[1]] == as.name('|')) return(term)
if (length(term) == 2) return(fb(term[[2]]))
c(fb(term[[2]]), fb(term[[3]]))
}
## Expand any slashes in the grouping factors returned by fb
expandSlash <- function(bb)
{
## Create the interaction terms for nested effects
makeInteraction <- function(x)
{
if (length(x) < 2) return(x)
trm1 <- makeInteraction(x[[1]])
trm11 <- if(is.list(trm1)) trm1[[1]] else trm1
list(substitute(foo:bar, list(foo=x[[2]], bar = trm11)), trm1)
}
## Return the list of '/'-separated terms
slashTerms <- function(x)
{
if (!("/" %in% all.names(x))) return(x)
if (x[[1]] != as.name("/"))
stop("unparseable formula for grouping factor")
list(slashTerms(x[[2]]), slashTerms(x[[3]]))
}
if (!is.list(bb))
expandSlash(list(bb))
else
unlist(lapply(bb, function(x) {
if (length(x) > 2 && is.list(trms <- slashTerms(x[[3]])))
## lapply(unlist(...)) - unlist returns a flattened list
lapply(unlist(makeInteraction(trms)),
function(trm) substitute(foo|bar, list(foo = x[[2]], bar = trm)))
else x
}))
}## {expandSlash}
modterm <- expandDoubleVerts(
if(is(term, "formula")) term[[length(term)]] else term)
expandSlash(fb(modterm))
}
##' From the right hand side of a formula for a mixed-effects model,
##' expand terms with the double vertical bar operator
##' into separate, independent random effect terms.
##'
##' @title Expand terms with \code{'||'} notation into separate \code{'|'} terms
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @param term a mixed-model formula
##' @return the modified term
##' @family utilities
##' @keywords models utilities
##' @export
expandDoubleVerts <- function(term)
{
expandDoubleVert <- function(term) {
frml <- formula(paste0("~", deparse(term[[2]])))
## need term.labels not all.vars to capture interactions too:
newtrms <- paste0("0+", attr(terms(frml), "term.labels"))
if(attr(terms(frml), "intercept")!=0)
newtrms <- c("1", newtrms)
as.formula(paste("~(",
paste(vapply(newtrms, function(trm)
paste0(trm, "|", deparse(term[[3]])), ""),
collapse=")+("), ")"))[[2]]
}
if (!is.name(term) && is.language(term)) {
if (term[[1]] == as.name("(")) {
term[[2]] <- expandDoubleVerts(term[[2]])
}
stopifnot(is.call(term))
if (term[[1]] == as.name('||'))
return( expandDoubleVert(term) )
## else :
term[[2]] <- expandDoubleVerts(term[[2]])
if (length(term) != 2) {
if(length(term) == 3)
term[[3]] <- expandDoubleVerts(term[[3]])
}
}
term
}
##' Remove the random-effects terms from a mixed-effects formula,
##' thereby producing the fixed-effects formula.
##'
##' @title Omit terms separated by vertical bars in a formula
##' @param term the right-hand side of a mixed-model formula
##' @return the fixed-effects part of the formula
##' @section Note: This function is called recursively on individual
##' terms in the model, which is why the argument is called \code{term} and not
##' a name like \code{form}, indicating a formula.
##' @examples
##' nobars(Reaction ~ Days + (Days|Subject)) ## => Reaction ~ Days
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @family utilities
##' @keywords models utilities
##' @export
nobars <- function(term)
{
if (!any(c('|','||') %in% all.names(term))) return(term)
if (is.call(term) && term[[1]] == as.name('|')) return(NULL)
if (is.call(term) && term[[1]] == as.name('||')) return(NULL)
if (length(term) == 2) {
nb <- nobars(term[[2]])
if (is.null(nb)) return(NULL)
term[[2]] <- nb
return(term)
}
nb2 <- nobars(term[[2]])
nb3 <- nobars(term[[3]])
if (is.null(nb2)) return(nb3)
if (is.null(nb3)) return(nb2)
term[[2]] <- nb2
term[[3]] <- nb3
term
}
##' Substitute the '+' function for the '|' and '||' function in a mixed-model
##' formula. This provides a formula suitable for the current
##' model.frame function.
##'
##' @title "Sub[stitute] Bars"
##' @param term a mixed-model formula
##' @return the formula with all | and || operators replaced by +
##' @section Note: This function is called recursively on individual
##' terms in the model, which is why the argument is called \code{term} and not
##' a name like \code{form}, indicating a formula.
##' @examples
##' subbars(Reaction ~ Days + (Days|Subject)) ## => Reaction ~ Days + (Days + Subject)
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @family utilities
##' @keywords models utilities
##' @export
subbars <- function(term)
{
if (is.name(term) || !is.language(term)) return(term)
if (length(term) == 2) {
term[[2]] <- subbars(term[[2]])
return(term)
}
stopifnot(length(term) >= 3)
if (is.call(term) && term[[1]] == as.name('|'))
term[[1]] <- as.name('+')
if (is.call(term) && term[[1]] == as.name('||'))
term[[1]] <- as.name('+')
for (j in 2:length(term)) term[[j]] <- subbars(term[[j]])
term
}
##' @param bars result of findbars
barnames <- function(bars) {
unlist(lapply(bars, function(x) deparse(x[[3]])))
}
##' Does every level of f1 occur in conjunction with exactly one level
##' of f2? The function is based on converting a triplet sparse matrix
##' to a compressed column-oriented form in which the nesting can be
##' quickly evaluated.
##'
##' @title Is f1 nested within f2?
##'
##' @param f1 factor 1
##' @param f2 factor 2
##'
##' @return TRUE if factor 1 is nested within factor 2
##' @examples
##' with(Pastes, isNested(cask, batch)) ## => FALSE
##' with(Pastes, isNested(sample, batch)) ## => TRUE
##' @export
isNested <- function(f1, f2)
{
f1 <- as.factor(f1)
f2 <- as.factor(f2)
stopifnot(length(f1) == length(f2))
k <- length(levels(f1))
sm <- as(new("ngTMatrix",
i = as.integer(f2) - 1L,
j = as.integer(f1) - 1L,
Dim = c(length(levels(f2)), k)),
"CsparseMatrix")
all(sm@p[2:(k+1L)] - sm@p[1:k] <= 1L)
}
subnms <- function(form, nms) {
## Recursive function applied to individual terms
sbnm <- function(term)
{
if (is.name(term)) {
if (any(term == nms)) 0 else term
} else switch(length(term),
term, ## 1
{ ## 2
term[[2]] <- sbnm(term[[2]])
term
},
{ ## 3
term[[2]] <- sbnm(term[[2]])
term[[3]] <- sbnm(term[[3]])
term
})
}
sbnm(form)
}
## Check for a constant term (a literal 1) in an expression
##
## In the mixed-effects part of a nonlinear model formula, a constant
## term is not meaningful because every term must be relative to a
## nonlinear model parameter. This function recursively checks the
## expressions in the formula for a a constant, calling stop() if
## such a term is encountered.
## @title Check for constant terms.
## @param expr an expression
## @return NULL. The function is executed for its side effect.
chck1 <- function(expr) {
if ((le <- length(expr)) == 1) {
if (is.numeric(expr) && expr == 1)
stop("1 is not meaningful in a nonlinear model formula")
return()
} else
for (j in seq_len(le)[-1]) Recall(expr[[j]])
}
## ---> ../man/nlformula.Rd --- Manipulate a nonlinear model formula
##' @param mc matched call from the caller, with arguments 'formula','start',...
##' @return a list with components "respMod", "frame", "X", "reTrms"
nlformula <- function(mc) {
start <- eval(mc$start, parent.frame(2L))
if (is.numeric(start)) start <- list(nlpars = start)
stopifnot(is.numeric(nlpars <- start$nlpars),
vapply(nlpars, length, 0L) == 1L,
length(pnames <- names(nlpars)) == length(nlpars),
length(form <- as.formula(mc$formula)) == 3L,
is(nlform <- eval(form[[2]]), "formula"),
pnames %in%
(av <- all.vars(nlmod <- as.call(nlform[[lnl <- length(nlform)]]))))
## MM{FIXME}: fortune(106) even twice in here!
nlform[[lnl]] <- parse(text= paste(setdiff(all.vars(form), pnames), collapse=' + '))[[1]]
nlform <- eval(nlform)
environment(nlform) <- environment(form)
m <- match(c("data", "subset", "weights", "na.action", "offset"),
names(mc), 0)
mc <- mc[c(1, m)]
mc$drop.unused.levels <- TRUE
mc[[1]] <- as.name("model.frame")
mc$formula <- nlform
fr <- eval(mc, parent.frame(2L))
n <- nrow(fr)
nlenv <- list2env(fr, parent=parent.frame(2L))
lapply(pnames, function(nm) nlenv[[nm]] <- rep.int(nlpars[[nm]], n))
respMod <- mkRespMod(fr, nlenv=nlenv, nlmod=nlmod)
chck1(meform <- form[[3L]])
pnameexpr <- parse(text=paste(pnames, collapse='+'))[[1]]
nb <- nobars(meform)
fe <- eval(substitute(~ 0 + nb + pnameexpr))
environment(fe) <- environment(form)
frE <- do.call(rbind, lapply(seq_along(nlpars), function(i) fr)) # rbind s copies of the frame
for (nm in pnames) # convert these variables in fr to indicators
frE[[nm]] <- as.numeric(rep(nm == pnames, each = n))
X <- model.matrix(fe, frE)
rownames(X) <- NULL
reTrms <- mkReTrms(lapply(findbars(meform),
function(expr) {
expr[[2]] <- substitute(0+foo, list(foo=expr[[2]]))
expr
}), frE)
list(respMod=respMod, frame=fr, X=X, reTrms=reTrms, pnames=pnames)
} ## {nlformula}
##--> ../man/mkMerMod.Rd ---Create a merMod object
##' @param rho the environment of the objective function
##' @param opt the value returned by the optimizer
##' @param reTrms reTrms list from the calling function
mkMerMod <- function(rho, opt, reTrms, fr, mc, lme4conv=NULL) {
if(missing(mc)) mc <- match.call()
stopifnot(is.environment(rho),
is(pp <- rho$pp, "merPredD"),
is(resp <- rho$resp, "lmResp"),
is.list(opt), "par" %in% names(opt),
c("conv","fval") %in% substr(names(opt),1,4), ## "conv[ergence]", "fval[ues]"
is.list(reTrms), c("flist", "cnms", "Gp", "lower") %in% names(reTrms),
length(rcl <- class(resp)) == 1)
n <- nrow(pp$V)
p <- ncol(pp$V)
dims <- c(N=nrow(pp$X), n=n, p=p, nmp=n-p,
nth=length(pp$theta), q=nrow(pp$Zt),
nAGQ=rho$nAGQ,
compDev=rho$compDev,
## 'use scale' in the sense of whether dispersion parameter should
## be reported/used (*not* whether theta should be scaled by sigma)
useSc=(rcl != "glmResp" ||
!resp$family$family %in% c("poisson","binomial")),
reTrms=length(reTrms$cnms),
spFe=0L,
REML=if (rcl=="lmerResp") resp$REML else 0L,
GLMM=(rcl=="glmResp"),
NLMM=(rcl=="nlsResp"))
storage.mode(dims) <- "integer"
fac <- as.numeric(rcl != "nlsResp")
if (trivial.y <- (length(resp$y)==0)) {
## trivial model
sqrLenU <- wrss <- pwrss <- NA
} else {
sqrLenU <- pp$sqrL(fac)
wrss <- resp$wrss()
pwrss <- wrss + sqrLenU
}
weights <- resp$weights
beta <- pp$beta(fac)
#sigmaML <- pwrss/sum(weights)
sigmaML <- pwrss/n
if (rcl != "lmerResp") {
pars <- opt$par
if (length(pars) > length(pp$theta)) beta <- pars[-(seq_along(pp$theta))]
}
cmp <- c(ldL2=pp$ldL2(), ldRX2=pp$ldRX2(), wrss=wrss,
ussq=sqrLenU, pwrss=pwrss,
drsum=if (rcl=="glmResp" && !trivial.y) resp$resDev() else NA,
REML=if (rcl=="lmerResp" && resp$REML != 0L && !trivial.y)
opt$fval else NA,
## FIXME: construct 'REML deviance' here?
dev=if (rcl=="lmerResp" && resp$REML != 0L || trivial.y) NA else opt$fval,
sigmaML=sqrt(unname(if (!dims["useSc"] || trivial.y) NA else sigmaML)),
sigmaREML=sqrt(unname(if (rcl!="lmerResp" || trivial.y) NA else sigmaML*(dims['n']/dims['nmp']))),
tolPwrss=rho$tolPwrss)
## TODO: improve this hack to get something in frame slot (maybe need weights, etc...)
if(missing(fr)) fr <- data.frame(resp$y)
new(switch(rcl, lmerResp="lmerMod", glmResp="glmerMod", nlsResp="nlmerMod"),
call=mc, frame=fr, flist=reTrms$flist, cnms=reTrms$cnms,
Gp=reTrms$Gp, theta=pp$theta, beta=beta,
u=if (trivial.y) rep(NA_real_,nrow(pp$Zt)) else pp$u(fac),
lower=reTrms$lower, devcomp=list(cmp=cmp, dims=dims),
pp=pp, resp=resp,
optinfo = list (optimizer= attr(opt,"optimizer"),
control = attr(opt,"control"),
derivs = attr(opt,"derivs"),
conv = list(opt=opt$conv, lme4=lme4conv),
feval = if (is.null(opt$feval)) NA else opt$feval,
warnings = attr(opt,"warnings"), val = opt$par)
)
}## {mkMerMod}
## generic argument checking
## 'type': name of calling function ("glmer", "lmer", "nlmer")
##
checkArgs <- function(type,...) {
l... <- list(...)
if (isTRUE(l...[["sparseX"]])) warning("sparseX = TRUE has no effect at present")
## '...' handling up front, safe-guarding against typos ("familiy") :
if(length(l... <- list(...))) {
if (!is.null(l...[["family"]])) { # call glmer if family specified
## we will only get here if 'family' is *not* in the arg list
warning("calling lmer with family() is deprecated: please use glmer() instead")
type <- "glmer"
}
## Check for method argument which is no longer used
## (different meanings/hints depending on glmer vs lmer)
if (!is.null(method <- l...[["method"]])) {
msg <- paste("Argument", sQuote("method"), "is deprecated.")
if (type=="lmer") msg <- paste(msg,"Use the REML argument to specify ML or REML estimation.")
if (type=="glmer") msg <- paste(msg,"Use the nAGQ argument to specify Laplace (nAGQ=1) or adaptive",
"Gauss-Hermite quadrature (nAGQ>1). PQL is no longer available.")
warning(msg)
l... <- l...[names(l...) != "method"]
}
if(length(l...)) {
warning("extra argument(s) ",
paste(sQuote(names(l...)), collapse=", "),
" disregarded")
}
}
}
## check formula and data: return an environment suitable for evaluating
## the formula.
## (1) if data is specified, return it
## (2) otherwise, if formula has an environment, use it
## (3) otherwise [e.g. if formula was passed as a string], try to use parent.frame(2)
## if #3 is true *and* the user is doing something tricky with nested functions,
## this may fail ...
checkFormulaData <- function(formula,data,checkLHS=TRUE,debug=FALSE) {
dataName <- deparse(substitute(data))
missingData <- inherits(tryCatch(eval(data), error=function(e)e), "error")
## data not found (this *should* only happen with garbage input,
## OR when strings used as formulae -> drop1/update/etc.)
##
## alternate attempt (fails)
##
## ff <- sys.frames()
## ex <- substitute(data)
## ii <- rev(seq_along(ff))
## for(i in ii) {
## ex <- eval(substitute(substitute(x, env=sys.frames()[[n]]),
## env = list(x = ex, n=i)))
## }
## origName <- deparse(ex)
## missingData <- !exists(origName)
## (!dataName=="NULL" && !exists(dataName))
if (missingData) {
varex <- function(v,env) exists(v,envir=env,inherits=FALSE)
allvars <- all.vars(as.formula(formula))
allvarex <- function(vvec=allvars,...) { all(sapply(vvec,varex,...)) }
if (allvarex(env=(ee <- environment(formula)))) {
stop("'data' not found, but variables found in environment of formula: ",
"try specifying 'formula' as a formula rather ",
"than a string in the original model")
} else stop("'data' not found, and some variables missing from formula environment")
} else {
if (is.null(data)) {
if (!is.null(ee <- environment(formula))) {
## use environment of formula
denv <- ee
} else {
## e.g. no environment, e.g. because formula is a character vector
## parent.frame(2L) works because [g]lFormula (our calling environment)
## has been called within [g]lmer with env=parent.frame(1L)
## If you call checkFormulaData in some other bizarre way such that
## parent.frame(2L) is *not* OK, you deserve what you get
## calling checkFormulaData directly from the global
## environment should be OK, since trying to go up beyond the global
## environment keeps bringing you back to the global environment ...
denv <- parent.frame(2L)
}
} else {
## data specified
denv <- list2env(data)
}
}
## FIXME: set enclosing environment of denv to environment(formula), or parent.frame(2L) ?
if (debug) {
cat("Debugging parent frames in checkFormulaData:\n")
## find global environment -- could do this with sys.nframe() ?
glEnv <- 1
while (!identical(parent.frame(glEnv),.GlobalEnv)) {
glEnv <- glEnv+1
}
## where are vars?
for (i in 1:glEnv) {
OK <- allvarex(env=parent.frame(i))
cat("vars exist in parent frame ",i)
if (i==glEnv) cat(" (global)")
cat(" ",OK,"\n")
}
cat("vars exist in env of formula ",allvarex(env=denv),"\n")
} ## if (debug)
stopifnot(!checkLHS || length(as.formula(formula,env=denv)) == 3) ## check for two-sided formula
return(denv)
}
## checkFormulaData <- function(formula,data) {
## ee <- environment(formula)
## if (is.null(ee)) {
## ee <- parent.frame(2)
## }
## if (missing(data)) data <- ee
## stopifnot(length(as.formula(formula,env=as.environment(data))) == 3)
## return(data)
## }
##' Not exported; for tests (and examples) that can be slow;
##' Use if(lme4:::testLevel() >= 1.) ..... see ../README.md
testLevel <- function()
if(nzchar(s <- Sys.getenv("LME4_TEST_LEVEL")) &&
is.finite(s <- as.numeric(s))) s else 1
##' General conditional variance-covariance matrix
##'
##' Experimental function for estimating the variance-covariance
##' matrix of the random effects, conditional on the observed data
##' and at the (RE)ML estimate of the fixed effects and covariance
##' parameters. Applicable for any Lambda matrix, but slower than
##' other block-by-block methods.
##' Not exported.
##'
##' TODO:
##' (1) Write up quite note on theory (e.g. Laplace approximation).
##' (2) Figure out how to convert between full q-by-q matrix, and
##' the format currently in the postVar attributes of the
##' elements of the output of ranef.
##' (3) Test.
##' (4) Do we need to think carefully about the differences
##' between REML and ML, beyond just multiplying by a different
##' sigma^2 estimate?
##'
##' @param object \code{merMod} object
##' @return Sparse covariance matrix
condVar <- function(object) {
s2 <- sigma(object)^2
Lamt <- getME(object,"Lambdat")
L <- getME(object,"L")
## never do it this way! fortune("SOOOO")
#V <- solve(L, system = "A")
#V <- chol2inv(L)
#s2*crossprod(Lamt, V) %*% Lamt
LL <- solve(L, Lamt, system = "A")
s2 * crossprod(Lamt, LL)
}
mkMinimalData <- function(formula) {
vars <- all.vars(formula)
nVars <- length(vars)
matr <- matrix(0, 2, nVars)
data <- as.data.frame(matr)
setNames(data, vars)
}
##' Make template for mixed model parameters
mkParsTemplate <- function(formula, data){
if(missing(data)) data <- mkMinimalData(formula)
mfRanef <- model.frame( subbars(formula), data)
mmFixef <- model.matrix(nobars(formula) , data)
reTrms <- mkReTrms(findbars(formula), mfRanef)
cnms <- reTrms$cnms
thetaNamesList <- mapply(mkPfun(), names(cnms), cnms)
thetaNames <- unlist(thetaNamesList)
betaNames <- colnames(mmFixef)
list(beta = setNames(numeric(length( betaNames)), betaNames),
theta = setNames(reTrms$theta, thetaNames),
sigma = 1)
}
##' Make template for mixed model data
##'
##' Useful for simulating balanced designs and for
##' getting started on unbalanced simulations
##'
##' @param formula formula
##' @param data data -- not necessary
##' @param nGrps number of groups per grouping factor
##' @param rfunc function for generating covariate data
##' @param ... additional parameters for rfunc
mkDataTemplate <- function(formula, data,
nGrps = 2, nPerGrp = 1,
rfunc = NULL, ...){
if(missing(data)) data <- mkMinimalData(formula)
grpFacNames <- unique(barnames(findbars(formula)))
varNames <- all.vars(formula)
covariateNames <- setdiff(varNames, grpFacNames)
nGrpFac <- length(grpFacNames)
nCov <- length(covariateNames)
grpFac <- gl(nGrps, nPerGrp)
grpDat <- expand.grid(replicate(nGrpFac, grpFac, simplify = FALSE))
colnames(grpDat) <- grpFacNames
nObs <- nrow(grpDat)
if(is.null(rfunc)) rfunc <- function(n, ...) rep(0, n)
params <- c(list(nObs), list(...))
covDat <- as.data.frame(replicate(nCov, do.call(rfunc, params),
simplify = FALSE))
colnames(covDat) <- covariateNames
cbind(grpDat, covDat)
}
| 33,475 | gpl-2.0 |
fe632709746c52681c0186fc369b085ca33c8669 | garbear/cloc | testcode/utilities.R | # from https://github.com/lme4/lme4
if(getRversion() < "2.15")
paste0 <- function(...) paste(..., sep = '')
### Utilities for parsing and manipulating mixed-model formulas
##' From the result of \code{\link{findbars}} applied to a model formula and
##' and the evaluation frame, create the model matrix, etc. associated with
##' random-effects terms. See the description of the returned value for a
##' detailed list.
##'
##' @title Create Z, Lambda, Lind, etc.
##' @param bars a list of parsed random-effects terms
##' @param fr a model frame in which to evaluate these terms
##' @return a list with components
##' \item{Zt}{transpose of the sparse model matrix for the random effects}
##' \item{Lambdat}{transpose of the sparse relative covariance factor}
##' \item{Lind}{an integer vector of indices determining the mapping of the
##' elements of the \code{theta} to the \code{"x"} slot of \code{Lambdat}}
##' \item{theta}{initial values of the covariance parameters}
##' \item{lower}{lower bounds on the covariance parameters}
##' \item{flist}{list of grouping factors used in the random-effects terms}
##' \item{cnms}{a list of column names of the random effects according to
##' the grouping factors}
##' @importFrom Matrix sparseMatrix rBind drop0
##' @importMethodsFrom Matrix coerce
##' @family utilities
##' @export
mkReTrms <- function(bars, fr) {
if (!length(bars))
stop("No random effects terms specified in formula")
stopifnot(is.list(bars), vapply(bars, is.language, NA),
inherits(fr, "data.frame"))
names(bars) <- barnames(bars)
term.names <- unlist(lapply(bars, function(x) paste(deparse(x),collapse=" ")))
## auxiliary {named, for easier inspection}:
mkBlist <- function(x) {
frloc <- fr
## convert grouping variables to factors as necessary
## TODO: variables that are *not* in the data frame are
## not converted -- these could still break, e.g. if someone
## tries to use the : operator
for (i in all.vars(x[[3]])) {
if (!is.null(frloc[[i]])) frloc[[i]] <- factor(frloc[[i]])
}
if (is.null(ff <- tryCatch(eval(substitute(factor(fac),
list(fac = x[[3]])), frloc),
error=function(e) NULL)))
stop("couldn't evaluate grouping factor ",
deparse(x[[3]])," within model frame:",
" try adding grouping factor to data ",
"frame explicitly if possible")
if (all(is.na(ff)))
stop("Invalid grouping factor specification, ",
deparse(x[[3]]))
nl <- length(levels(ff))
mm <- model.matrix(eval(substitute( ~ foo, list(foo = x[[2]]))), frloc)
nc <- ncol(mm)
nseq <- seq_len(nc)
sm <- as(ff, "sparseMatrix")
if (nc > 1)
sm <- do.call(rBind, lapply(nseq, function(i) sm))
## hack for NA values contained in factor (FIXME: test elsewhere for consistency?)
sm@x[] <- t(mm[!is.na(ff),])
## When nc > 1 switch the order of the rows of sm
## so the random effects for the same level of the
## grouping factor are adjacent.
if (nc > 1)
sm <- sm[as.vector(matrix(seq_len(nc * nl),
ncol = nl, byrow = TRUE)),]
list(ff = ff, sm = sm, nl = nl, cnms = colnames(mm))
}
blist <- lapply(bars, mkBlist)
nl <- vapply(blist, `[[`, 0L, "nl") # no. of levels per term
# (in lmer jss: \ell_i)
## order terms stably by decreasing number of levels in the factor
if (any(diff(nl) > 0)) {
ord <- rev(order(nl))
blist <- blist[ord]
nl <- nl[ord]
}
Ztlist <- lapply(blist, "[[", "sm")
Zt <- do.call(rBind, Ztlist)
names(Ztlist) <- term.names
q <- nrow(Zt)
## Create and install Lambdat, Lind, etc. This must be done after
## any potential reordering of the terms.
cnms <- lapply(blist, "[[", "cnms") # list of column names of the
# model matrix per term
nc <- vapply(cnms, length, 0L) # no. of columns per term
# (in lmer jss: p_i)
nth <- as.integer((nc * (nc+1))/2) # no. of parameters per term
# (in lmer jss: ??)
nb <- nc * nl # no. of random effects per term
# (in lmer jss: q_i)
stopifnot(sum(nb) == q)
boff <- cumsum(c(0L, nb)) # offsets into b
thoff <- cumsum(c(0L, nth)) # offsets into theta
### FIXME: should this be done with cBind and avoid the transpose
### operator? In other words should Lambdat be generated directly
### instead of generating Lambda first then transposing?
Lambdat <-
t(do.call(sparseMatrix,
do.call(rBind,
lapply(seq_along(blist), function(i)
{
mm <- matrix(seq_len(nb[i]), ncol = nc[i],
byrow = TRUE)
dd <- diag(nc[i])
ltri <- lower.tri(dd, diag = TRUE)
ii <- row(dd)[ltri]
jj <- col(dd)[ltri]
dd[cbind(ii, jj)] <- seq_along(ii) # FIXME: this line unnecessary?
data.frame(i = as.vector(mm[, ii]) + boff[i],
j = as.vector(mm[, jj]) + boff[i],
x = as.double(rep.int(seq_along(ii),
rep.int(nl[i], length(ii))) +
thoff[i]))
}))))
thet <- numeric(sum(nth))
ll <- list(Zt=Matrix::drop0(Zt), theta=thet, Lind=as.integer(Lambdat@x),
Gp=unname(c(0L, cumsum(nb))))
## lower bounds on theta elements are 0 if on diagonal, else -Inf
ll$lower <- -Inf * (thet + 1)
ll$lower[unique(diag(Lambdat))] <- 0
ll$theta[] <- is.finite(ll$lower) # initial values of theta are 0 off-diagonal, 1 on
Lambdat@x[] <- ll$theta[ll$Lind] # initialize elements of Lambdat
ll$Lambdat <- Lambdat
# massage the factor list
fl <- lapply(blist, "[[", "ff")
# check for repeated factors
fnms <- names(fl)
if (length(fnms) > length(ufn <- unique(fnms))) {
fl <- fl[match(ufn, fnms)]
asgn <- match(fnms, ufn)
} else asgn <- seq_along(fl)
names(fl) <- ufn
fl <- do.call(data.frame, c(fl, check.names = FALSE))
attr(fl, "assign") <- asgn
ll$flist <- fl
ll$cnms <- cnms
ll$Ztlist <- Ztlist
ll
} ## {mkReTrms}
##' Create an lmerResp, glmResp or nlsResp instance
##'
##' @title Create an lmerResp, glmResp or nlsResp instance
##' @param fr a model frame
##' @param REML logical scalar, value of REML for an lmerResp instance
##' @param family the optional glm family (glmResp only)
##' @param nlenv the nonlinear model evaluation environment (nlsResp only)
##' @param nlmod the nonlinear model function (nlsResp only)
##' @param ... where to look for response information if \code{fr} is missing.
##' Can contain a model response, \code{y}, offset, \code{offset}, and weights,
##' \code{weights}.
##' @return an lmerResp or glmResp or nlsResp instance
##' @family utilities
##' @export
mkRespMod <- function(fr, REML=NULL, family = NULL, nlenv = NULL, nlmod = NULL, ...) {
if(!missing(fr)){
y <- model.response(fr)
offset <- model.offset(fr)
weights <- model.weights(fr)
N <- n <- nrow(fr)
etastart_update <- model.extract(fr, "etastart")
} else {
fr <- list(...)
y <- fr$y
N <- n <- if(is.matrix(y)) nrow(y) else length(y)
offset <- fr$offset
weights <- fr$weights
etastart_update <- fr$etastart
}
## FIXME: may need to add X, or pass it somehow, if we want to use glm.fit
##y <- model.response(fr)
if(length(dim(y)) == 1) {
## avoid problems with 1D arrays, but keep names
nm <- rownames(y)
dim(y) <- NULL
if(!is.null(nm)) names(y) <- nm
}
rho <- new.env()
rho$y <- if (is.null(y)) numeric(0) else y
if (!is.null(REML)) rho$REML <- REML
rho$etastart <- fr$etastart
rho$mustart <- fr$mustart
##N <- n <- nrow(fr)
if (!is.null(nlenv)) {
stopifnot(is.language(nlmod),
is.environment(nlenv),
is.numeric(val <- eval(nlmod, nlenv)),
length(val) == n,
## FIXME? Restriction, not present in ole' nlme():
is.matrix(gr <- attr(val, "gradient")),
mode(gr) == "numeric",
nrow(gr) == n,
!is.null(pnames <- colnames(gr)))
N <- length(gr)
rho$mu <- as.vector(val)
rho$sqrtXwt <- as.vector(gr)
rho$gam <-
unname(unlist(lapply(pnames,
function(nm) get(nm, envir=nlenv))))
}
if (!is.null(offset)) {
if (length(offset) == 1L) offset <- rep.int(offset, N)
stopifnot(length(offset) == N)
rho$offset <- unname(offset)
} else rho$offset <- rep.int(0, N)
if (!is.null(weights)) {
stopifnot(length(weights) == n, all(weights >= 0))
rho$weights <- unname(weights)
} else rho$weights <- rep.int(1, n)
if (is.null(family)) {
if (is.null(nlenv)) return(do.call(lmerResp$new, as.list(rho)))
return(do.call(nlsResp$new,
c(list(nlenv=nlenv,
nlmod=substitute(~foo, list(foo=nlmod)),
pnames=pnames), as.list(rho))))
}
stopifnot(inherits(family, "family"))
## need weights for initializing evaluation
rho$nobs <- n
## allow trivial objects, e.g. for simulation
if (length(y)>0) eval(family$initialize, rho)
family$initialize <- NULL # remove clutter from str output
ll <- as.list(rho)
ans <- do.call("new", c(list(Class="glmResp", family=family),
ll[setdiff(names(ll), c("m", "nobs", "mustart"))]))
if (length(y)>0) ans$updateMu(if (!is.null(es <- etastart_update)) es else
family$linkfun(get("mustart", rho)))
ans
}
##' From the right hand side of a formula for a mixed-effects model,
##' determine the pairs of expressions that are separated by the
##' vertical bar operator. Also expand the slash operator in grouping
##' factor expressions and expand terms with the double vertical bar operator
##' into separate, independent random effect terms.
##'
##' @title Determine random-effects expressions from a formula
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @param term a mixed-model formula
##' @return pairs of expressions that were separated by vertical bars
##' @section Note: This function is called recursively on individual
##' terms in the model, which is why the argument is called \code{term} and not
##' a name like \code{form}, indicating a formula.
##' @example
##' findbars(f1 <- Reaction ~ Days + (Days|Subject))
##' ## => list( Days | Subject )
##' findbars(y ~ Days + (1|Subject) + (0+Days|Subject))
##' ## => list of length 2: list ( 1 | Subject , 0+Days|Subject)
##' findbars(~ 1 + (1|batch/cask))
##' ## => list of length 2: list ( 1 | cask:batch , 1 | batch)
##' identical(findbars(~ 1 + (Days || Subject)),
##' findbars(~ 1 + (1|Subject) + (0+Days|Subject)))
##' \dontshow{
##' stopifnot(identical(findbars(f1),
##' list(expression(Days | Subject)[[1]])))
##' }
##' @family utilities
##' @keywords models utilities
##' @export
findbars <- function(term)
{
## Recursive function applied to individual terms
fb <- function(term)
{
if (is.name(term) || !is.language(term)) return(NULL)
if (term[[1]] == as.name("(")) return(fb(term[[2]]))
stopifnot(is.call(term))
if (term[[1]] == as.name('|')) return(term)
if (length(term) == 2) return(fb(term[[2]]))
c(fb(term[[2]]), fb(term[[3]]))
}
## Expand any slashes in the grouping factors returned by fb
expandSlash <- function(bb)
{
## Create the interaction terms for nested effects
makeInteraction <- function(x)
{
if (length(x) < 2) return(x)
trm1 <- makeInteraction(x[[1]])
trm11 <- if(is.list(trm1)) trm1[[1]] else trm1
list(substitute(foo:bar, list(foo=x[[2]], bar = trm11)), trm1)
}
## Return the list of '/'-separated terms
slashTerms <- function(x)
{
if (!("/" %in% all.names(x))) return(x)
if (x[[1]] != as.name("/"))
stop("unparseable formula for grouping factor")
list(slashTerms(x[[2]]), slashTerms(x[[3]]))
}
if (!is.list(bb))
expandSlash(list(bb))
else
unlist(lapply(bb, function(x) {
if (length(x) > 2 && is.list(trms <- slashTerms(x[[3]])))
## lapply(unlist(...)) - unlist returns a flattened list
lapply(unlist(makeInteraction(trms)),
function(trm) substitute(foo|bar, list(foo = x[[2]], bar = trm)))
else x
}))
}## {expandSlash}
modterm <- expandDoubleVerts(
if(is(term, "formula")) term[[length(term)]] else term)
expandSlash(fb(modterm))
}
##' From the right hand side of a formula for a mixed-effects model,
##' expand terms with the double vertical bar operator
##' into separate, independent random effect terms.
##'
##' @title Expand terms with \code{'||'} notation into separate \code{'|'} terms
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @param term a mixed-model formula
##' @return the modified term
##' @family utilities
##' @keywords models utilities
##' @export
expandDoubleVerts <- function(term)
{
expandDoubleVert <- function(term) {
frml <- formula(paste0("~", deparse(term[[2]])))
## need term.labels not all.vars to capture interactions too:
newtrms <- paste0("0+", attr(terms(frml), "term.labels"))
if(attr(terms(frml), "intercept")!=0)
newtrms <- c("1", newtrms)
as.formula(paste("~(",
paste(vapply(newtrms, function(trm)
paste0(trm, "|", deparse(term[[3]])), ""),
collapse=")+("), ")"))[[2]]
}
if (!is.name(term) && is.language(term)) {
if (term[[1]] == as.name("(")) {
term[[2]] <- expandDoubleVerts(term[[2]])
}
stopifnot(is.call(term))
if (term[[1]] == as.name('||'))
return( expandDoubleVert(term) )
## else :
term[[2]] <- expandDoubleVerts(term[[2]])
if (length(term) != 2) {
if(length(term) == 3)
term[[3]] <- expandDoubleVerts(term[[3]])
}
}
term
}
##' Remove the random-effects terms from a mixed-effects formula,
##' thereby producing the fixed-effects formula.
##'
##' @title Omit terms separated by vertical bars in a formula
##' @param term the right-hand side of a mixed-model formula
##' @return the fixed-effects part of the formula
##' @section Note: This function is called recursively on individual
##' terms in the model, which is why the argument is called \code{term} and not
##' a name like \code{form}, indicating a formula.
##' @examples
##' nobars(Reaction ~ Days + (Days|Subject)) ## => Reaction ~ Days
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @family utilities
##' @keywords models utilities
##' @export
nobars <- function(term)
{
if (!any(c('|','||') %in% all.names(term))) return(term)
if (is.call(term) && term[[1]] == as.name('|')) return(NULL)
if (is.call(term) && term[[1]] == as.name('||')) return(NULL)
if (length(term) == 2) {
nb <- nobars(term[[2]])
if (is.null(nb)) return(NULL)
term[[2]] <- nb
return(term)
}
nb2 <- nobars(term[[2]])
nb3 <- nobars(term[[3]])
if (is.null(nb2)) return(nb3)
if (is.null(nb3)) return(nb2)
term[[2]] <- nb2
term[[3]] <- nb3
term
}
##' Substitute the '+' function for the '|' and '||' function in a mixed-model
##' formula. This provides a formula suitable for the current
##' model.frame function.
##'
##' @title "Sub[stitute] Bars"
##' @param term a mixed-model formula
##' @return the formula with all | and || operators replaced by +
##' @section Note: This function is called recursively on individual
##' terms in the model, which is why the argument is called \code{term} and not
##' a name like \code{form}, indicating a formula.
##' @examples
##' subbars(Reaction ~ Days + (Days|Subject)) ## => Reaction ~ Days + (Days + Subject)
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @family utilities
##' @keywords models utilities
##' @export
subbars <- function(term)
{
if (is.name(term) || !is.language(term)) return(term)
if (length(term) == 2) {
term[[2]] <- subbars(term[[2]])
return(term)
}
stopifnot(length(term) >= 3)
if (is.call(term) && term[[1]] == as.name('|'))
term[[1]] <- as.name('+')
if (is.call(term) && term[[1]] == as.name('||'))
term[[1]] <- as.name('+')
for (j in 2:length(term)) term[[j]] <- subbars(term[[j]])
term
}
##' @param bars result of findbars
barnames <- function(bars) {
unlist(lapply(bars, function(x) deparse(x[[3]])))
}
##' Does every level of f1 occur in conjunction with exactly one level
##' of f2? The function is based on converting a triplet sparse matrix
##' to a compressed column-oriented form in which the nesting can be
##' quickly evaluated.
##'
##' @title Is f1 nested within f2?
##'
##' @param f1 factor 1
##' @param f2 factor 2
##'
##' @return TRUE if factor 1 is nested within factor 2
##' @examples
##' with(Pastes, isNested(cask, batch)) ## => FALSE
##' with(Pastes, isNested(sample, batch)) ## => TRUE
##' @export
isNested <- function(f1, f2)
{
f1 <- as.factor(f1)
f2 <- as.factor(f2)
stopifnot(length(f1) == length(f2))
k <- length(levels(f1))
sm <- as(new("ngTMatrix",
i = as.integer(f2) - 1L,
j = as.integer(f1) - 1L,
Dim = c(length(levels(f2)), k)),
"CsparseMatrix")
all(sm@p[2:(k+1L)] - sm@p[1:k] <= 1L)
}
subnms <- function(form, nms) {
## Recursive function applied to individual terms
sbnm <- function(term)
{
if (is.name(term)) {
if (any(term == nms)) 0 else term
} else switch(length(term),
term, ## 1
{ ## 2
term[[2]] <- sbnm(term[[2]])
term
},
{ ## 3
term[[2]] <- sbnm(term[[2]])
term[[3]] <- sbnm(term[[3]])
term
})
}
sbnm(form)
}
## Check for a constant term (a literal 1) in an expression
##
## In the mixed-effects part of a nonlinear model formula, a constant
## term is not meaningful because every term must be relative to a
## nonlinear model parameter. This function recursively checks the
## expressions in the formula for a a constant, calling stop() if
## such a term is encountered.
## @title Check for constant terms.
## @param expr an expression
## @return NULL. The function is executed for its side effect.
chck1 <- function(expr) {
if ((le <- length(expr)) == 1) {
if (is.numeric(expr) && expr == 1)
stop("1 is not meaningful in a nonlinear model formula")
return()
} else
for (j in seq_len(le)[-1]) Recall(expr[[j]])
}
## ---> ../man/nlformula.Rd --- Manipulate a nonlinear model formula
##' @param mc matched call from the caller, with arguments 'formula','start',...
##' @return a list with components "respMod", "frame", "X", "reTrms"
nlformula <- function(mc) {
start <- eval(mc$start, parent.frame(2L))
if (is.numeric(start)) start <- list(nlpars = start)
stopifnot(is.numeric(nlpars <- start$nlpars),
vapply(nlpars, length, 0L) == 1L,
length(pnames <- names(nlpars)) == length(nlpars),
length(form <- as.formula(mc$formula)) == 3L,
is(nlform <- eval(form[[2]]), "formula"),
pnames %in%
(av <- all.vars(nlmod <- as.call(nlform[[lnl <- length(nlform)]]))))
## MM{FIXME}: fortune(106) even twice in here!
nlform[[lnl]] <- parse(text= paste(setdiff(all.vars(form), pnames), collapse=' + '))[[1]]
nlform <- eval(nlform)
environment(nlform) <- environment(form)
m <- match(c("data", "subset", "weights", "na.action", "offset"),
names(mc), 0)
mc <- mc[c(1, m)]
mc$drop.unused.levels <- TRUE
mc[[1]] <- as.name("model.frame")
mc$formula <- nlform
fr <- eval(mc, parent.frame(2L))
n <- nrow(fr)
nlenv <- list2env(fr, parent=parent.frame(2L))
lapply(pnames, function(nm) nlenv[[nm]] <- rep.int(nlpars[[nm]], n))
respMod <- mkRespMod(fr, nlenv=nlenv, nlmod=nlmod)
chck1(meform <- form[[3L]])
pnameexpr <- parse(text=paste(pnames, collapse='+'))[[1]]
nb <- nobars(meform)
fe <- eval(substitute(~ 0 + nb + pnameexpr))
environment(fe) <- environment(form)
frE <- do.call(rbind, lapply(seq_along(nlpars), function(i) fr)) # rbind s copies of the frame
for (nm in pnames) # convert these variables in fr to indicators
frE[[nm]] <- as.numeric(rep(nm == pnames, each = n))
X <- model.matrix(fe, frE)
rownames(X) <- NULL
reTrms <- mkReTrms(lapply(findbars(meform),
function(expr) {
expr[[2]] <- substitute(0+foo, list(foo=expr[[2]]))
expr
}), frE)
list(respMod=respMod, frame=fr, X=X, reTrms=reTrms, pnames=pnames)
} ## {nlformula}
##--> ../man/mkMerMod.Rd ---Create a merMod object
##' @param rho the environment of the objective function
##' @param opt the value returned by the optimizer
##' @param reTrms reTrms list from the calling function
mkMerMod <- function(rho, opt, reTrms, fr, mc, lme4conv=NULL) {
if(missing(mc)) mc <- match.call()
stopifnot(is.environment(rho),
is(pp <- rho$pp, "merPredD"),
is(resp <- rho$resp, "lmResp"),
is.list(opt), "par" %in% names(opt),
c("conv","fval") %in% substr(names(opt),1,4), ## "conv[ergence]", "fval[ues]"
is.list(reTrms), c("flist", "cnms", "Gp", "lower") %in% names(reTrms),
length(rcl <- class(resp)) == 1)
n <- nrow(pp$V)
p <- ncol(pp$V)
dims <- c(N=nrow(pp$X), n=n, p=p, nmp=n-p,
nth=length(pp$theta), q=nrow(pp$Zt),
nAGQ=rho$nAGQ,
compDev=rho$compDev,
## 'use scale' in the sense of whether dispersion parameter should
## be reported/used (*not* whether theta should be scaled by sigma)
useSc=(rcl != "glmResp" ||
!resp$family$family %in% c("poisson","binomial")),
reTrms=length(reTrms$cnms),
spFe=0L,
REML=if (rcl=="lmerResp") resp$REML else 0L,
GLMM=(rcl=="glmResp"),
NLMM=(rcl=="nlsResp"))
storage.mode(dims) <- "integer"
fac <- as.numeric(rcl != "nlsResp")
if (trivial.y <- (length(resp$y)==0)) {
## trivial model
sqrLenU <- wrss <- pwrss <- NA
} else {
sqrLenU <- pp$sqrL(fac)
wrss <- resp$wrss()
pwrss <- wrss + sqrLenU
}
weights <- resp$weights
beta <- pp$beta(fac)
#sigmaML <- pwrss/sum(weights)
sigmaML <- pwrss/n
if (rcl != "lmerResp") {
pars <- opt$par
if (length(pars) > length(pp$theta)) beta <- pars[-(seq_along(pp$theta))]
}
cmp <- c(ldL2=pp$ldL2(), ldRX2=pp$ldRX2(), wrss=wrss,
ussq=sqrLenU, pwrss=pwrss,
drsum=if (rcl=="glmResp" && !trivial.y) resp$resDev() else NA,
REML=if (rcl=="lmerResp" && resp$REML != 0L && !trivial.y)
opt$fval else NA,
## FIXME: construct 'REML deviance' here?
dev=if (rcl=="lmerResp" && resp$REML != 0L || trivial.y) NA else opt$fval,
sigmaML=sqrt(unname(if (!dims["useSc"] || trivial.y) NA else sigmaML)),
sigmaREML=sqrt(unname(if (rcl!="lmerResp" || trivial.y) NA else sigmaML*(dims['n']/dims['nmp']))),
tolPwrss=rho$tolPwrss)
## TODO: improve this hack to get something in frame slot (maybe need weights, etc...)
if(missing(fr)) fr <- data.frame(resp$y)
new(switch(rcl, lmerResp="lmerMod", glmResp="glmerMod", nlsResp="nlmerMod"),
call=mc, frame=fr, flist=reTrms$flist, cnms=reTrms$cnms,
Gp=reTrms$Gp, theta=pp$theta, beta=beta,
u=if (trivial.y) rep(NA_real_,nrow(pp$Zt)) else pp$u(fac),
lower=reTrms$lower, devcomp=list(cmp=cmp, dims=dims),
pp=pp, resp=resp,
optinfo = list (optimizer= attr(opt,"optimizer"),
control = attr(opt,"control"),
derivs = attr(opt,"derivs"),
conv = list(opt=opt$conv, lme4=lme4conv),
feval = if (is.null(opt$feval)) NA else opt$feval,
warnings = attr(opt,"warnings"), val = opt$par)
)
}## {mkMerMod}
## generic argument checking
## 'type': name of calling function ("glmer", "lmer", "nlmer")
##
checkArgs <- function(type,...) {
l... <- list(...)
if (isTRUE(l...[["sparseX"]])) warning("sparseX = TRUE has no effect at present")
## '...' handling up front, safe-guarding against typos ("familiy") :
if(length(l... <- list(...))) {
if (!is.null(l...[["family"]])) { # call glmer if family specified
## we will only get here if 'family' is *not* in the arg list
warning("calling lmer with family() is deprecated: please use glmer() instead")
type <- "glmer"
}
## Check for method argument which is no longer used
## (different meanings/hints depending on glmer vs lmer)
if (!is.null(method <- l...[["method"]])) {
msg <- paste("Argument", sQuote("method"), "is deprecated.")
if (type=="lmer") msg <- paste(msg,"Use the REML argument to specify ML or REML estimation.")
if (type=="glmer") msg <- paste(msg,"Use the nAGQ argument to specify Laplace (nAGQ=1) or adaptive",
"Gauss-Hermite quadrature (nAGQ>1). PQL is no longer available.")
warning(msg)
l... <- l...[names(l...) != "method"]
}
if(length(l...)) {
warning("extra argument(s) ",
paste(sQuote(names(l...)), collapse=", "),
" disregarded")
}
}
}
## check formula and data: return an environment suitable for evaluating
## the formula.
## (1) if data is specified, return it
## (2) otherwise, if formula has an environment, use it
## (3) otherwise [e.g. if formula was passed as a string], try to use parent.frame(2)
## if #3 is true *and* the user is doing something tricky with nested functions,
## this may fail ...
checkFormulaData <- function(formula,data,checkLHS=TRUE,debug=FALSE) {
dataName <- deparse(substitute(data))
missingData <- inherits(tryCatch(eval(data), error=function(e)e), "error")
## data not found (this *should* only happen with garbage input,
## OR when strings used as formulae -> drop1/update/etc.)
##
## alternate attempt (fails)
##
## ff <- sys.frames()
## ex <- substitute(data)
## ii <- rev(seq_along(ff))
## for(i in ii) {
## ex <- eval(substitute(substitute(x, env=sys.frames()[[n]]),
## env = list(x = ex, n=i)))
## }
## origName <- deparse(ex)
## missingData <- !exists(origName)
## (!dataName=="NULL" && !exists(dataName))
if (missingData) {
varex <- function(v,env) exists(v,envir=env,inherits=FALSE)
allvars <- all.vars(as.formula(formula))
allvarex <- function(vvec=allvars,...) { all(sapply(vvec,varex,...)) }
if (allvarex(env=(ee <- environment(formula)))) {
stop("'data' not found, but variables found in environment of formula: ",
"try specifying 'formula' as a formula rather ",
"than a string in the original model")
} else stop("'data' not found, and some variables missing from formula environment")
} else {
if (is.null(data)) {
if (!is.null(ee <- environment(formula))) {
## use environment of formula
denv <- ee
} else {
## e.g. no environment, e.g. because formula is a character vector
## parent.frame(2L) works because [g]lFormula (our calling environment)
## has been called within [g]lmer with env=parent.frame(1L)
## If you call checkFormulaData in some other bizarre way such that
## parent.frame(2L) is *not* OK, you deserve what you get
## calling checkFormulaData directly from the global
## environment should be OK, since trying to go up beyond the global
## environment keeps bringing you back to the global environment ...
denv <- parent.frame(2L)
}
} else {
## data specified
denv <- list2env(data)
}
}
## FIXME: set enclosing environment of denv to environment(formula), or parent.frame(2L) ?
if (debug) {
cat("Debugging parent frames in checkFormulaData:\n")
## find global environment -- could do this with sys.nframe() ?
glEnv <- 1
while (!identical(parent.frame(glEnv),.GlobalEnv)) {
glEnv <- glEnv+1
}
## where are vars?
for (i in 1:glEnv) {
OK <- allvarex(env=parent.frame(i))
cat("vars exist in parent frame ",i)
if (i==glEnv) cat(" (global)")
cat(" ",OK,"\n")
}
cat("vars exist in env of formula ",allvarex(env=denv),"\n")
} ## if (debug)
stopifnot(!checkLHS || length(as.formula(formula,env=denv)) == 3) ## check for two-sided formula
return(denv)
}
## checkFormulaData <- function(formula,data) {
## ee <- environment(formula)
## if (is.null(ee)) {
## ee <- parent.frame(2)
## }
## if (missing(data)) data <- ee
## stopifnot(length(as.formula(formula,env=as.environment(data))) == 3)
## return(data)
## }
##' Not exported; for tests (and examples) that can be slow;
##' Use if(lme4:::testLevel() >= 1.) ..... see ../README.md
testLevel <- function()
if(nzchar(s <- Sys.getenv("LME4_TEST_LEVEL")) &&
is.finite(s <- as.numeric(s))) s else 1
##' General conditional variance-covariance matrix
##'
##' Experimental function for estimating the variance-covariance
##' matrix of the random effects, conditional on the observed data
##' and at the (RE)ML estimate of the fixed effects and covariance
##' parameters. Applicable for any Lambda matrix, but slower than
##' other block-by-block methods.
##' Not exported.
##'
##' TODO:
##' (1) Write up quite note on theory (e.g. Laplace approximation).
##' (2) Figure out how to convert between full q-by-q matrix, and
##' the format currently in the postVar attributes of the
##' elements of the output of ranef.
##' (3) Test.
##' (4) Do we need to think carefully about the differences
##' between REML and ML, beyond just multiplying by a different
##' sigma^2 estimate?
##'
##' @param object \code{merMod} object
##' @return Sparse covariance matrix
condVar <- function(object) {
s2 <- sigma(object)^2
Lamt <- getME(object,"Lambdat")
L <- getME(object,"L")
## never do it this way! fortune("SOOOO")
#V <- solve(L, system = "A")
#V <- chol2inv(L)
#s2*crossprod(Lamt, V) %*% Lamt
LL <- solve(L, Lamt, system = "A")
s2 * crossprod(Lamt, LL)
}
mkMinimalData <- function(formula) {
vars <- all.vars(formula)
nVars <- length(vars)
matr <- matrix(0, 2, nVars)
data <- as.data.frame(matr)
setNames(data, vars)
}
##' Make template for mixed model parameters
mkParsTemplate <- function(formula, data){
if(missing(data)) data <- mkMinimalData(formula)
mfRanef <- model.frame( subbars(formula), data)
mmFixef <- model.matrix(nobars(formula) , data)
reTrms <- mkReTrms(findbars(formula), mfRanef)
cnms <- reTrms$cnms
thetaNamesList <- mapply(mkPfun(), names(cnms), cnms)
thetaNames <- unlist(thetaNamesList)
betaNames <- colnames(mmFixef)
list(beta = setNames(numeric(length( betaNames)), betaNames),
theta = setNames(reTrms$theta, thetaNames),
sigma = 1)
}
##' Make template for mixed model data
##'
##' Useful for simulating balanced designs and for
##' getting started on unbalanced simulations
##'
##' @param formula formula
##' @param data data -- not necessary
##' @param nGrps number of groups per grouping factor
##' @param rfunc function for generating covariate data
##' @param ... additional parameters for rfunc
mkDataTemplate <- function(formula, data,
nGrps = 2, nPerGrp = 1,
rfunc = NULL, ...){
if(missing(data)) data <- mkMinimalData(formula)
grpFacNames <- unique(barnames(findbars(formula)))
varNames <- all.vars(formula)
covariateNames <- setdiff(varNames, grpFacNames)
nGrpFac <- length(grpFacNames)
nCov <- length(covariateNames)
grpFac <- gl(nGrps, nPerGrp)
grpDat <- expand.grid(replicate(nGrpFac, grpFac, simplify = FALSE))
colnames(grpDat) <- grpFacNames
nObs <- nrow(grpDat)
if(is.null(rfunc)) rfunc <- function(n, ...) rep(0, n)
params <- c(list(nObs), list(...))
covDat <- as.data.frame(replicate(nCov, do.call(rfunc, params),
simplify = FALSE))
colnames(covDat) <- covariateNames
cbind(grpDat, covDat)
}
| 33,475 | gpl-2.0 |
fe632709746c52681c0186fc369b085ca33c8669 | kindkaktus/cloc | testcode/utilities.R | # from https://github.com/lme4/lme4
if(getRversion() < "2.15")
paste0 <- function(...) paste(..., sep = '')
### Utilities for parsing and manipulating mixed-model formulas
##' From the result of \code{\link{findbars}} applied to a model formula and
##' and the evaluation frame, create the model matrix, etc. associated with
##' random-effects terms. See the description of the returned value for a
##' detailed list.
##'
##' @title Create Z, Lambda, Lind, etc.
##' @param bars a list of parsed random-effects terms
##' @param fr a model frame in which to evaluate these terms
##' @return a list with components
##' \item{Zt}{transpose of the sparse model matrix for the random effects}
##' \item{Lambdat}{transpose of the sparse relative covariance factor}
##' \item{Lind}{an integer vector of indices determining the mapping of the
##' elements of the \code{theta} to the \code{"x"} slot of \code{Lambdat}}
##' \item{theta}{initial values of the covariance parameters}
##' \item{lower}{lower bounds on the covariance parameters}
##' \item{flist}{list of grouping factors used in the random-effects terms}
##' \item{cnms}{a list of column names of the random effects according to
##' the grouping factors}
##' @importFrom Matrix sparseMatrix rBind drop0
##' @importMethodsFrom Matrix coerce
##' @family utilities
##' @export
mkReTrms <- function(bars, fr) {
if (!length(bars))
stop("No random effects terms specified in formula")
stopifnot(is.list(bars), vapply(bars, is.language, NA),
inherits(fr, "data.frame"))
names(bars) <- barnames(bars)
term.names <- unlist(lapply(bars, function(x) paste(deparse(x),collapse=" ")))
## auxiliary {named, for easier inspection}:
mkBlist <- function(x) {
frloc <- fr
## convert grouping variables to factors as necessary
## TODO: variables that are *not* in the data frame are
## not converted -- these could still break, e.g. if someone
## tries to use the : operator
for (i in all.vars(x[[3]])) {
if (!is.null(frloc[[i]])) frloc[[i]] <- factor(frloc[[i]])
}
if (is.null(ff <- tryCatch(eval(substitute(factor(fac),
list(fac = x[[3]])), frloc),
error=function(e) NULL)))
stop("couldn't evaluate grouping factor ",
deparse(x[[3]])," within model frame:",
" try adding grouping factor to data ",
"frame explicitly if possible")
if (all(is.na(ff)))
stop("Invalid grouping factor specification, ",
deparse(x[[3]]))
nl <- length(levels(ff))
mm <- model.matrix(eval(substitute( ~ foo, list(foo = x[[2]]))), frloc)
nc <- ncol(mm)
nseq <- seq_len(nc)
sm <- as(ff, "sparseMatrix")
if (nc > 1)
sm <- do.call(rBind, lapply(nseq, function(i) sm))
## hack for NA values contained in factor (FIXME: test elsewhere for consistency?)
sm@x[] <- t(mm[!is.na(ff),])
## When nc > 1 switch the order of the rows of sm
## so the random effects for the same level of the
## grouping factor are adjacent.
if (nc > 1)
sm <- sm[as.vector(matrix(seq_len(nc * nl),
ncol = nl, byrow = TRUE)),]
list(ff = ff, sm = sm, nl = nl, cnms = colnames(mm))
}
blist <- lapply(bars, mkBlist)
nl <- vapply(blist, `[[`, 0L, "nl") # no. of levels per term
# (in lmer jss: \ell_i)
## order terms stably by decreasing number of levels in the factor
if (any(diff(nl) > 0)) {
ord <- rev(order(nl))
blist <- blist[ord]
nl <- nl[ord]
}
Ztlist <- lapply(blist, "[[", "sm")
Zt <- do.call(rBind, Ztlist)
names(Ztlist) <- term.names
q <- nrow(Zt)
## Create and install Lambdat, Lind, etc. This must be done after
## any potential reordering of the terms.
cnms <- lapply(blist, "[[", "cnms") # list of column names of the
# model matrix per term
nc <- vapply(cnms, length, 0L) # no. of columns per term
# (in lmer jss: p_i)
nth <- as.integer((nc * (nc+1))/2) # no. of parameters per term
# (in lmer jss: ??)
nb <- nc * nl # no. of random effects per term
# (in lmer jss: q_i)
stopifnot(sum(nb) == q)
boff <- cumsum(c(0L, nb)) # offsets into b
thoff <- cumsum(c(0L, nth)) # offsets into theta
### FIXME: should this be done with cBind and avoid the transpose
### operator? In other words should Lambdat be generated directly
### instead of generating Lambda first then transposing?
Lambdat <-
t(do.call(sparseMatrix,
do.call(rBind,
lapply(seq_along(blist), function(i)
{
mm <- matrix(seq_len(nb[i]), ncol = nc[i],
byrow = TRUE)
dd <- diag(nc[i])
ltri <- lower.tri(dd, diag = TRUE)
ii <- row(dd)[ltri]
jj <- col(dd)[ltri]
dd[cbind(ii, jj)] <- seq_along(ii) # FIXME: this line unnecessary?
data.frame(i = as.vector(mm[, ii]) + boff[i],
j = as.vector(mm[, jj]) + boff[i],
x = as.double(rep.int(seq_along(ii),
rep.int(nl[i], length(ii))) +
thoff[i]))
}))))
thet <- numeric(sum(nth))
ll <- list(Zt=Matrix::drop0(Zt), theta=thet, Lind=as.integer(Lambdat@x),
Gp=unname(c(0L, cumsum(nb))))
## lower bounds on theta elements are 0 if on diagonal, else -Inf
ll$lower <- -Inf * (thet + 1)
ll$lower[unique(diag(Lambdat))] <- 0
ll$theta[] <- is.finite(ll$lower) # initial values of theta are 0 off-diagonal, 1 on
Lambdat@x[] <- ll$theta[ll$Lind] # initialize elements of Lambdat
ll$Lambdat <- Lambdat
# massage the factor list
fl <- lapply(blist, "[[", "ff")
# check for repeated factors
fnms <- names(fl)
if (length(fnms) > length(ufn <- unique(fnms))) {
fl <- fl[match(ufn, fnms)]
asgn <- match(fnms, ufn)
} else asgn <- seq_along(fl)
names(fl) <- ufn
fl <- do.call(data.frame, c(fl, check.names = FALSE))
attr(fl, "assign") <- asgn
ll$flist <- fl
ll$cnms <- cnms
ll$Ztlist <- Ztlist
ll
} ## {mkReTrms}
##' Create an lmerResp, glmResp or nlsResp instance
##'
##' @title Create an lmerResp, glmResp or nlsResp instance
##' @param fr a model frame
##' @param REML logical scalar, value of REML for an lmerResp instance
##' @param family the optional glm family (glmResp only)
##' @param nlenv the nonlinear model evaluation environment (nlsResp only)
##' @param nlmod the nonlinear model function (nlsResp only)
##' @param ... where to look for response information if \code{fr} is missing.
##' Can contain a model response, \code{y}, offset, \code{offset}, and weights,
##' \code{weights}.
##' @return an lmerResp or glmResp or nlsResp instance
##' @family utilities
##' @export
mkRespMod <- function(fr, REML=NULL, family = NULL, nlenv = NULL, nlmod = NULL, ...) {
if(!missing(fr)){
y <- model.response(fr)
offset <- model.offset(fr)
weights <- model.weights(fr)
N <- n <- nrow(fr)
etastart_update <- model.extract(fr, "etastart")
} else {
fr <- list(...)
y <- fr$y
N <- n <- if(is.matrix(y)) nrow(y) else length(y)
offset <- fr$offset
weights <- fr$weights
etastart_update <- fr$etastart
}
## FIXME: may need to add X, or pass it somehow, if we want to use glm.fit
##y <- model.response(fr)
if(length(dim(y)) == 1) {
## avoid problems with 1D arrays, but keep names
nm <- rownames(y)
dim(y) <- NULL
if(!is.null(nm)) names(y) <- nm
}
rho <- new.env()
rho$y <- if (is.null(y)) numeric(0) else y
if (!is.null(REML)) rho$REML <- REML
rho$etastart <- fr$etastart
rho$mustart <- fr$mustart
##N <- n <- nrow(fr)
if (!is.null(nlenv)) {
stopifnot(is.language(nlmod),
is.environment(nlenv),
is.numeric(val <- eval(nlmod, nlenv)),
length(val) == n,
## FIXME? Restriction, not present in ole' nlme():
is.matrix(gr <- attr(val, "gradient")),
mode(gr) == "numeric",
nrow(gr) == n,
!is.null(pnames <- colnames(gr)))
N <- length(gr)
rho$mu <- as.vector(val)
rho$sqrtXwt <- as.vector(gr)
rho$gam <-
unname(unlist(lapply(pnames,
function(nm) get(nm, envir=nlenv))))
}
if (!is.null(offset)) {
if (length(offset) == 1L) offset <- rep.int(offset, N)
stopifnot(length(offset) == N)
rho$offset <- unname(offset)
} else rho$offset <- rep.int(0, N)
if (!is.null(weights)) {
stopifnot(length(weights) == n, all(weights >= 0))
rho$weights <- unname(weights)
} else rho$weights <- rep.int(1, n)
if (is.null(family)) {
if (is.null(nlenv)) return(do.call(lmerResp$new, as.list(rho)))
return(do.call(nlsResp$new,
c(list(nlenv=nlenv,
nlmod=substitute(~foo, list(foo=nlmod)),
pnames=pnames), as.list(rho))))
}
stopifnot(inherits(family, "family"))
## need weights for initializing evaluation
rho$nobs <- n
## allow trivial objects, e.g. for simulation
if (length(y)>0) eval(family$initialize, rho)
family$initialize <- NULL # remove clutter from str output
ll <- as.list(rho)
ans <- do.call("new", c(list(Class="glmResp", family=family),
ll[setdiff(names(ll), c("m", "nobs", "mustart"))]))
if (length(y)>0) ans$updateMu(if (!is.null(es <- etastart_update)) es else
family$linkfun(get("mustart", rho)))
ans
}
##' From the right hand side of a formula for a mixed-effects model,
##' determine the pairs of expressions that are separated by the
##' vertical bar operator. Also expand the slash operator in grouping
##' factor expressions and expand terms with the double vertical bar operator
##' into separate, independent random effect terms.
##'
##' @title Determine random-effects expressions from a formula
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @param term a mixed-model formula
##' @return pairs of expressions that were separated by vertical bars
##' @section Note: This function is called recursively on individual
##' terms in the model, which is why the argument is called \code{term} and not
##' a name like \code{form}, indicating a formula.
##' @example
##' findbars(f1 <- Reaction ~ Days + (Days|Subject))
##' ## => list( Days | Subject )
##' findbars(y ~ Days + (1|Subject) + (0+Days|Subject))
##' ## => list of length 2: list ( 1 | Subject , 0+Days|Subject)
##' findbars(~ 1 + (1|batch/cask))
##' ## => list of length 2: list ( 1 | cask:batch , 1 | batch)
##' identical(findbars(~ 1 + (Days || Subject)),
##' findbars(~ 1 + (1|Subject) + (0+Days|Subject)))
##' \dontshow{
##' stopifnot(identical(findbars(f1),
##' list(expression(Days | Subject)[[1]])))
##' }
##' @family utilities
##' @keywords models utilities
##' @export
findbars <- function(term)
{
## Recursive function applied to individual terms
fb <- function(term)
{
if (is.name(term) || !is.language(term)) return(NULL)
if (term[[1]] == as.name("(")) return(fb(term[[2]]))
stopifnot(is.call(term))
if (term[[1]] == as.name('|')) return(term)
if (length(term) == 2) return(fb(term[[2]]))
c(fb(term[[2]]), fb(term[[3]]))
}
## Expand any slashes in the grouping factors returned by fb
expandSlash <- function(bb)
{
## Create the interaction terms for nested effects
makeInteraction <- function(x)
{
if (length(x) < 2) return(x)
trm1 <- makeInteraction(x[[1]])
trm11 <- if(is.list(trm1)) trm1[[1]] else trm1
list(substitute(foo:bar, list(foo=x[[2]], bar = trm11)), trm1)
}
## Return the list of '/'-separated terms
slashTerms <- function(x)
{
if (!("/" %in% all.names(x))) return(x)
if (x[[1]] != as.name("/"))
stop("unparseable formula for grouping factor")
list(slashTerms(x[[2]]), slashTerms(x[[3]]))
}
if (!is.list(bb))
expandSlash(list(bb))
else
unlist(lapply(bb, function(x) {
if (length(x) > 2 && is.list(trms <- slashTerms(x[[3]])))
## lapply(unlist(...)) - unlist returns a flattened list
lapply(unlist(makeInteraction(trms)),
function(trm) substitute(foo|bar, list(foo = x[[2]], bar = trm)))
else x
}))
}## {expandSlash}
modterm <- expandDoubleVerts(
if(is(term, "formula")) term[[length(term)]] else term)
expandSlash(fb(modterm))
}
##' From the right hand side of a formula for a mixed-effects model,
##' expand terms with the double vertical bar operator
##' into separate, independent random effect terms.
##'
##' @title Expand terms with \code{'||'} notation into separate \code{'|'} terms
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @param term a mixed-model formula
##' @return the modified term
##' @family utilities
##' @keywords models utilities
##' @export
expandDoubleVerts <- function(term)
{
expandDoubleVert <- function(term) {
frml <- formula(paste0("~", deparse(term[[2]])))
## need term.labels not all.vars to capture interactions too:
newtrms <- paste0("0+", attr(terms(frml), "term.labels"))
if(attr(terms(frml), "intercept")!=0)
newtrms <- c("1", newtrms)
as.formula(paste("~(",
paste(vapply(newtrms, function(trm)
paste0(trm, "|", deparse(term[[3]])), ""),
collapse=")+("), ")"))[[2]]
}
if (!is.name(term) && is.language(term)) {
if (term[[1]] == as.name("(")) {
term[[2]] <- expandDoubleVerts(term[[2]])
}
stopifnot(is.call(term))
if (term[[1]] == as.name('||'))
return( expandDoubleVert(term) )
## else :
term[[2]] <- expandDoubleVerts(term[[2]])
if (length(term) != 2) {
if(length(term) == 3)
term[[3]] <- expandDoubleVerts(term[[3]])
}
}
term
}
##' Remove the random-effects terms from a mixed-effects formula,
##' thereby producing the fixed-effects formula.
##'
##' @title Omit terms separated by vertical bars in a formula
##' @param term the right-hand side of a mixed-model formula
##' @return the fixed-effects part of the formula
##' @section Note: This function is called recursively on individual
##' terms in the model, which is why the argument is called \code{term} and not
##' a name like \code{form}, indicating a formula.
##' @examples
##' nobars(Reaction ~ Days + (Days|Subject)) ## => Reaction ~ Days
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @family utilities
##' @keywords models utilities
##' @export
nobars <- function(term)
{
if (!any(c('|','||') %in% all.names(term))) return(term)
if (is.call(term) && term[[1]] == as.name('|')) return(NULL)
if (is.call(term) && term[[1]] == as.name('||')) return(NULL)
if (length(term) == 2) {
nb <- nobars(term[[2]])
if (is.null(nb)) return(NULL)
term[[2]] <- nb
return(term)
}
nb2 <- nobars(term[[2]])
nb3 <- nobars(term[[3]])
if (is.null(nb2)) return(nb3)
if (is.null(nb3)) return(nb2)
term[[2]] <- nb2
term[[3]] <- nb3
term
}
##' Substitute the '+' function for the '|' and '||' function in a mixed-model
##' formula. This provides a formula suitable for the current
##' model.frame function.
##'
##' @title "Sub[stitute] Bars"
##' @param term a mixed-model formula
##' @return the formula with all | and || operators replaced by +
##' @section Note: This function is called recursively on individual
##' terms in the model, which is why the argument is called \code{term} and not
##' a name like \code{form}, indicating a formula.
##' @examples
##' subbars(Reaction ~ Days + (Days|Subject)) ## => Reaction ~ Days + (Days + Subject)
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @family utilities
##' @keywords models utilities
##' @export
subbars <- function(term)
{
if (is.name(term) || !is.language(term)) return(term)
if (length(term) == 2) {
term[[2]] <- subbars(term[[2]])
return(term)
}
stopifnot(length(term) >= 3)
if (is.call(term) && term[[1]] == as.name('|'))
term[[1]] <- as.name('+')
if (is.call(term) && term[[1]] == as.name('||'))
term[[1]] <- as.name('+')
for (j in 2:length(term)) term[[j]] <- subbars(term[[j]])
term
}
##' @param bars result of findbars
barnames <- function(bars) {
unlist(lapply(bars, function(x) deparse(x[[3]])))
}
##' Does every level of f1 occur in conjunction with exactly one level
##' of f2? The function is based on converting a triplet sparse matrix
##' to a compressed column-oriented form in which the nesting can be
##' quickly evaluated.
##'
##' @title Is f1 nested within f2?
##'
##' @param f1 factor 1
##' @param f2 factor 2
##'
##' @return TRUE if factor 1 is nested within factor 2
##' @examples
##' with(Pastes, isNested(cask, batch)) ## => FALSE
##' with(Pastes, isNested(sample, batch)) ## => TRUE
##' @export
isNested <- function(f1, f2)
{
f1 <- as.factor(f1)
f2 <- as.factor(f2)
stopifnot(length(f1) == length(f2))
k <- length(levels(f1))
sm <- as(new("ngTMatrix",
i = as.integer(f2) - 1L,
j = as.integer(f1) - 1L,
Dim = c(length(levels(f2)), k)),
"CsparseMatrix")
all(sm@p[2:(k+1L)] - sm@p[1:k] <= 1L)
}
subnms <- function(form, nms) {
## Recursive function applied to individual terms
sbnm <- function(term)
{
if (is.name(term)) {
if (any(term == nms)) 0 else term
} else switch(length(term),
term, ## 1
{ ## 2
term[[2]] <- sbnm(term[[2]])
term
},
{ ## 3
term[[2]] <- sbnm(term[[2]])
term[[3]] <- sbnm(term[[3]])
term
})
}
sbnm(form)
}
## Check for a constant term (a literal 1) in an expression
##
## In the mixed-effects part of a nonlinear model formula, a constant
## term is not meaningful because every term must be relative to a
## nonlinear model parameter. This function recursively checks the
## expressions in the formula for a a constant, calling stop() if
## such a term is encountered.
## @title Check for constant terms.
## @param expr an expression
## @return NULL. The function is executed for its side effect.
chck1 <- function(expr) {
if ((le <- length(expr)) == 1) {
if (is.numeric(expr) && expr == 1)
stop("1 is not meaningful in a nonlinear model formula")
return()
} else
for (j in seq_len(le)[-1]) Recall(expr[[j]])
}
## ---> ../man/nlformula.Rd --- Manipulate a nonlinear model formula
##' @param mc matched call from the caller, with arguments 'formula','start',...
##' @return a list with components "respMod", "frame", "X", "reTrms"
nlformula <- function(mc) {
start <- eval(mc$start, parent.frame(2L))
if (is.numeric(start)) start <- list(nlpars = start)
stopifnot(is.numeric(nlpars <- start$nlpars),
vapply(nlpars, length, 0L) == 1L,
length(pnames <- names(nlpars)) == length(nlpars),
length(form <- as.formula(mc$formula)) == 3L,
is(nlform <- eval(form[[2]]), "formula"),
pnames %in%
(av <- all.vars(nlmod <- as.call(nlform[[lnl <- length(nlform)]]))))
## MM{FIXME}: fortune(106) even twice in here!
nlform[[lnl]] <- parse(text= paste(setdiff(all.vars(form), pnames), collapse=' + '))[[1]]
nlform <- eval(nlform)
environment(nlform) <- environment(form)
m <- match(c("data", "subset", "weights", "na.action", "offset"),
names(mc), 0)
mc <- mc[c(1, m)]
mc$drop.unused.levels <- TRUE
mc[[1]] <- as.name("model.frame")
mc$formula <- nlform
fr <- eval(mc, parent.frame(2L))
n <- nrow(fr)
nlenv <- list2env(fr, parent=parent.frame(2L))
lapply(pnames, function(nm) nlenv[[nm]] <- rep.int(nlpars[[nm]], n))
respMod <- mkRespMod(fr, nlenv=nlenv, nlmod=nlmod)
chck1(meform <- form[[3L]])
pnameexpr <- parse(text=paste(pnames, collapse='+'))[[1]]
nb <- nobars(meform)
fe <- eval(substitute(~ 0 + nb + pnameexpr))
environment(fe) <- environment(form)
frE <- do.call(rbind, lapply(seq_along(nlpars), function(i) fr)) # rbind s copies of the frame
for (nm in pnames) # convert these variables in fr to indicators
frE[[nm]] <- as.numeric(rep(nm == pnames, each = n))
X <- model.matrix(fe, frE)
rownames(X) <- NULL
reTrms <- mkReTrms(lapply(findbars(meform),
function(expr) {
expr[[2]] <- substitute(0+foo, list(foo=expr[[2]]))
expr
}), frE)
list(respMod=respMod, frame=fr, X=X, reTrms=reTrms, pnames=pnames)
} ## {nlformula}
##--> ../man/mkMerMod.Rd ---Create a merMod object
##' @param rho the environment of the objective function
##' @param opt the value returned by the optimizer
##' @param reTrms reTrms list from the calling function
mkMerMod <- function(rho, opt, reTrms, fr, mc, lme4conv=NULL) {
if(missing(mc)) mc <- match.call()
stopifnot(is.environment(rho),
is(pp <- rho$pp, "merPredD"),
is(resp <- rho$resp, "lmResp"),
is.list(opt), "par" %in% names(opt),
c("conv","fval") %in% substr(names(opt),1,4), ## "conv[ergence]", "fval[ues]"
is.list(reTrms), c("flist", "cnms", "Gp", "lower") %in% names(reTrms),
length(rcl <- class(resp)) == 1)
n <- nrow(pp$V)
p <- ncol(pp$V)
dims <- c(N=nrow(pp$X), n=n, p=p, nmp=n-p,
nth=length(pp$theta), q=nrow(pp$Zt),
nAGQ=rho$nAGQ,
compDev=rho$compDev,
## 'use scale' in the sense of whether dispersion parameter should
## be reported/used (*not* whether theta should be scaled by sigma)
useSc=(rcl != "glmResp" ||
!resp$family$family %in% c("poisson","binomial")),
reTrms=length(reTrms$cnms),
spFe=0L,
REML=if (rcl=="lmerResp") resp$REML else 0L,
GLMM=(rcl=="glmResp"),
NLMM=(rcl=="nlsResp"))
storage.mode(dims) <- "integer"
fac <- as.numeric(rcl != "nlsResp")
if (trivial.y <- (length(resp$y)==0)) {
## trivial model
sqrLenU <- wrss <- pwrss <- NA
} else {
sqrLenU <- pp$sqrL(fac)
wrss <- resp$wrss()
pwrss <- wrss + sqrLenU
}
weights <- resp$weights
beta <- pp$beta(fac)
#sigmaML <- pwrss/sum(weights)
sigmaML <- pwrss/n
if (rcl != "lmerResp") {
pars <- opt$par
if (length(pars) > length(pp$theta)) beta <- pars[-(seq_along(pp$theta))]
}
cmp <- c(ldL2=pp$ldL2(), ldRX2=pp$ldRX2(), wrss=wrss,
ussq=sqrLenU, pwrss=pwrss,
drsum=if (rcl=="glmResp" && !trivial.y) resp$resDev() else NA,
REML=if (rcl=="lmerResp" && resp$REML != 0L && !trivial.y)
opt$fval else NA,
## FIXME: construct 'REML deviance' here?
dev=if (rcl=="lmerResp" && resp$REML != 0L || trivial.y) NA else opt$fval,
sigmaML=sqrt(unname(if (!dims["useSc"] || trivial.y) NA else sigmaML)),
sigmaREML=sqrt(unname(if (rcl!="lmerResp" || trivial.y) NA else sigmaML*(dims['n']/dims['nmp']))),
tolPwrss=rho$tolPwrss)
## TODO: improve this hack to get something in frame slot (maybe need weights, etc...)
if(missing(fr)) fr <- data.frame(resp$y)
new(switch(rcl, lmerResp="lmerMod", glmResp="glmerMod", nlsResp="nlmerMod"),
call=mc, frame=fr, flist=reTrms$flist, cnms=reTrms$cnms,
Gp=reTrms$Gp, theta=pp$theta, beta=beta,
u=if (trivial.y) rep(NA_real_,nrow(pp$Zt)) else pp$u(fac),
lower=reTrms$lower, devcomp=list(cmp=cmp, dims=dims),
pp=pp, resp=resp,
optinfo = list (optimizer= attr(opt,"optimizer"),
control = attr(opt,"control"),
derivs = attr(opt,"derivs"),
conv = list(opt=opt$conv, lme4=lme4conv),
feval = if (is.null(opt$feval)) NA else opt$feval,
warnings = attr(opt,"warnings"), val = opt$par)
)
}## {mkMerMod}
## generic argument checking
## 'type': name of calling function ("glmer", "lmer", "nlmer")
##
checkArgs <- function(type,...) {
l... <- list(...)
if (isTRUE(l...[["sparseX"]])) warning("sparseX = TRUE has no effect at present")
## '...' handling up front, safe-guarding against typos ("familiy") :
if(length(l... <- list(...))) {
if (!is.null(l...[["family"]])) { # call glmer if family specified
## we will only get here if 'family' is *not* in the arg list
warning("calling lmer with family() is deprecated: please use glmer() instead")
type <- "glmer"
}
## Check for method argument which is no longer used
## (different meanings/hints depending on glmer vs lmer)
if (!is.null(method <- l...[["method"]])) {
msg <- paste("Argument", sQuote("method"), "is deprecated.")
if (type=="lmer") msg <- paste(msg,"Use the REML argument to specify ML or REML estimation.")
if (type=="glmer") msg <- paste(msg,"Use the nAGQ argument to specify Laplace (nAGQ=1) or adaptive",
"Gauss-Hermite quadrature (nAGQ>1). PQL is no longer available.")
warning(msg)
l... <- l...[names(l...) != "method"]
}
if(length(l...)) {
warning("extra argument(s) ",
paste(sQuote(names(l...)), collapse=", "),
" disregarded")
}
}
}
## check formula and data: return an environment suitable for evaluating
## the formula.
## (1) if data is specified, return it
## (2) otherwise, if formula has an environment, use it
## (3) otherwise [e.g. if formula was passed as a string], try to use parent.frame(2)
## if #3 is true *and* the user is doing something tricky with nested functions,
## this may fail ...
checkFormulaData <- function(formula,data,checkLHS=TRUE,debug=FALSE) {
dataName <- deparse(substitute(data))
missingData <- inherits(tryCatch(eval(data), error=function(e)e), "error")
## data not found (this *should* only happen with garbage input,
## OR when strings used as formulae -> drop1/update/etc.)
##
## alternate attempt (fails)
##
## ff <- sys.frames()
## ex <- substitute(data)
## ii <- rev(seq_along(ff))
## for(i in ii) {
## ex <- eval(substitute(substitute(x, env=sys.frames()[[n]]),
## env = list(x = ex, n=i)))
## }
## origName <- deparse(ex)
## missingData <- !exists(origName)
## (!dataName=="NULL" && !exists(dataName))
if (missingData) {
varex <- function(v,env) exists(v,envir=env,inherits=FALSE)
allvars <- all.vars(as.formula(formula))
allvarex <- function(vvec=allvars,...) { all(sapply(vvec,varex,...)) }
if (allvarex(env=(ee <- environment(formula)))) {
stop("'data' not found, but variables found in environment of formula: ",
"try specifying 'formula' as a formula rather ",
"than a string in the original model")
} else stop("'data' not found, and some variables missing from formula environment")
} else {
if (is.null(data)) {
if (!is.null(ee <- environment(formula))) {
## use environment of formula
denv <- ee
} else {
## e.g. no environment, e.g. because formula is a character vector
## parent.frame(2L) works because [g]lFormula (our calling environment)
## has been called within [g]lmer with env=parent.frame(1L)
## If you call checkFormulaData in some other bizarre way such that
## parent.frame(2L) is *not* OK, you deserve what you get
## calling checkFormulaData directly from the global
## environment should be OK, since trying to go up beyond the global
## environment keeps bringing you back to the global environment ...
denv <- parent.frame(2L)
}
} else {
## data specified
denv <- list2env(data)
}
}
## FIXME: set enclosing environment of denv to environment(formula), or parent.frame(2L) ?
if (debug) {
cat("Debugging parent frames in checkFormulaData:\n")
## find global environment -- could do this with sys.nframe() ?
glEnv <- 1
while (!identical(parent.frame(glEnv),.GlobalEnv)) {
glEnv <- glEnv+1
}
## where are vars?
for (i in 1:glEnv) {
OK <- allvarex(env=parent.frame(i))
cat("vars exist in parent frame ",i)
if (i==glEnv) cat(" (global)")
cat(" ",OK,"\n")
}
cat("vars exist in env of formula ",allvarex(env=denv),"\n")
} ## if (debug)
stopifnot(!checkLHS || length(as.formula(formula,env=denv)) == 3) ## check for two-sided formula
return(denv)
}
## checkFormulaData <- function(formula,data) {
## ee <- environment(formula)
## if (is.null(ee)) {
## ee <- parent.frame(2)
## }
## if (missing(data)) data <- ee
## stopifnot(length(as.formula(formula,env=as.environment(data))) == 3)
## return(data)
## }
##' Not exported; for tests (and examples) that can be slow;
##' Use if(lme4:::testLevel() >= 1.) ..... see ../README.md
testLevel <- function()
if(nzchar(s <- Sys.getenv("LME4_TEST_LEVEL")) &&
is.finite(s <- as.numeric(s))) s else 1
##' General conditional variance-covariance matrix
##'
##' Experimental function for estimating the variance-covariance
##' matrix of the random effects, conditional on the observed data
##' and at the (RE)ML estimate of the fixed effects and covariance
##' parameters. Applicable for any Lambda matrix, but slower than
##' other block-by-block methods.
##' Not exported.
##'
##' TODO:
##' (1) Write up quite note on theory (e.g. Laplace approximation).
##' (2) Figure out how to convert between full q-by-q matrix, and
##' the format currently in the postVar attributes of the
##' elements of the output of ranef.
##' (3) Test.
##' (4) Do we need to think carefully about the differences
##' between REML and ML, beyond just multiplying by a different
##' sigma^2 estimate?
##'
##' @param object \code{merMod} object
##' @return Sparse covariance matrix
condVar <- function(object) {
s2 <- sigma(object)^2
Lamt <- getME(object,"Lambdat")
L <- getME(object,"L")
## never do it this way! fortune("SOOOO")
#V <- solve(L, system = "A")
#V <- chol2inv(L)
#s2*crossprod(Lamt, V) %*% Lamt
LL <- solve(L, Lamt, system = "A")
s2 * crossprod(Lamt, LL)
}
mkMinimalData <- function(formula) {
vars <- all.vars(formula)
nVars <- length(vars)
matr <- matrix(0, 2, nVars)
data <- as.data.frame(matr)
setNames(data, vars)
}
##' Make template for mixed model parameters
mkParsTemplate <- function(formula, data){
if(missing(data)) data <- mkMinimalData(formula)
mfRanef <- model.frame( subbars(formula), data)
mmFixef <- model.matrix(nobars(formula) , data)
reTrms <- mkReTrms(findbars(formula), mfRanef)
cnms <- reTrms$cnms
thetaNamesList <- mapply(mkPfun(), names(cnms), cnms)
thetaNames <- unlist(thetaNamesList)
betaNames <- colnames(mmFixef)
list(beta = setNames(numeric(length( betaNames)), betaNames),
theta = setNames(reTrms$theta, thetaNames),
sigma = 1)
}
##' Make template for mixed model data
##'
##' Useful for simulating balanced designs and for
##' getting started on unbalanced simulations
##'
##' @param formula formula
##' @param data data -- not necessary
##' @param nGrps number of groups per grouping factor
##' @param rfunc function for generating covariate data
##' @param ... additional parameters for rfunc
mkDataTemplate <- function(formula, data,
nGrps = 2, nPerGrp = 1,
rfunc = NULL, ...){
if(missing(data)) data <- mkMinimalData(formula)
grpFacNames <- unique(barnames(findbars(formula)))
varNames <- all.vars(formula)
covariateNames <- setdiff(varNames, grpFacNames)
nGrpFac <- length(grpFacNames)
nCov <- length(covariateNames)
grpFac <- gl(nGrps, nPerGrp)
grpDat <- expand.grid(replicate(nGrpFac, grpFac, simplify = FALSE))
colnames(grpDat) <- grpFacNames
nObs <- nrow(grpDat)
if(is.null(rfunc)) rfunc <- function(n, ...) rep(0, n)
params <- c(list(nObs), list(...))
covDat <- as.data.frame(replicate(nCov, do.call(rfunc, params),
simplify = FALSE))
colnames(covDat) <- covariateNames
cbind(grpDat, covDat)
}
| 33,475 | gpl-2.0 |
a92686491c2980bb92b6421720b4152e64670197 | dabrze/swirl_courses | Programowanie_w_R/vapply_i_tapply/initLesson.R | # Path to installed lesson
.lessonpath <- file.path(path.package("swirl.pl"), "Courses", "Programowanie_w_R",
"lapply_i_sapply")
# Path to dataset
.datapath <- file.path(.lessonpath, "flag.data.txt")
# Load dataset
flags <- read.csv(.datapath, header=FALSE)
# Set column names
colnames(flags) <- c("name", "landmass", "zone", "area", "population",
"language", "religion", "bars", "stripes", "colours",
"red", "green", "blue", "gold", "white", "black",
"orange", "mainhue", "circles", "crosses", "saltires",
"quarters", "sunstars", "crescent", "triangle",
"icon", "animate", "text", "topleft", "botright")
# Path to dataset info
.infopath <- file.path(.lessonpath, "flag.names.txt")
# Function for user to open info
viewinfo <- function() {
file.edit(.infopath)
return(.infopath)
}
# Dummy function to advance user past question for which
# correct answer yields an error
ok <- function() {
invisible()
} | 1,067 | gpl-3.0 |
eba7fd47dce215000ec40ffc15aeb7bc692a18fe | nick-ulle/RCompilerExamples | tests/testthat/test_knn.R | # Description:
# Tests for the knn example.
test_that("compiled order function is correct", {
module = compile_knn()
.order = module[[".order"]]
x = c(1.3, -1.2, 2.1, 3.5, 7.1, -20, 4)
result = .llvm(.order, x, length(x))
target = order(x)
expect_equal(result, target)
x = c(-0.33, 2.02, -1.7, -1.01, -0.99, -0.79, -1.59)
result = .llvm(.order, x, length(x))
target = order(x)
expect_equal(result, target)
})
test_that("compiled which.max function is correct", {
module = compile_knn()
.which.max = module[[".which.max"]]
x = c(1L, 3L, 15L, 3L)
result = .llvm(.which.max, x, length(x))
target = which.max(x)
expect_equal(result, target)
x = c(5L, 9L, 10L, 3L, 8L, 12L, 5L, 11L)
result = .llvm(.which.max, x, length(x))
target = which.max(x)
expect_equal(result, target)
x = -c(1L, 2L, 3L, 4L)
result = .llvm(.which.max, x, length(x))
target = which.max(x)
expect_equal(result, target)
})
test_that("compiled knn function is correct", {
module = compile_knn()
knn = module[[".knn"]]
distances = matrix(c(0.5, 0.72, 1.5, 23.1, 12.3, 14.1, 0.2, 1.9), 4, 2)
labels = c(1L, 1L, 2L, 2L)
result = .llvm(knn, distances, labels, 2L, 2L, nrow(distances),
ncol(distances))
target = c(1L, 2L)
expect_equal(result, target)
})
test_that("compiled knn function is correct for iris data", {
# This is a "realistic" test of 3-nearest neighbors on the iris data.
distance = compile_distance()[[".distance"]]
knn = compile_knn()[[".knn"]]
set.seed(40)
train_idx = sample.int(nrow(iris), 50)
train = iris[train_idx, 1:4]
test = iris[-train_idx, 1:4]
labels = as.integer(iris$Species)[train_idx]
# Computed with:
#
# target = class::knn(train, test, labels, 3L)
#
target = c(1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L,
2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 3L,
2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 2L, 3L, 3L, 3L,
3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 2L, 3L, 3L, 3L, 3L, 2L, 3L,
3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L)
train = t(train)
test = t(test)
distances = .llvm(distance, train, test, nrow(train), ncol(train),
ncol(test), 2L)
result = .llvm(knn, distances, labels, 3L, 3L, ncol(train), ncol(test))
expect_equal(result, target)
})
| 2,426 | mit |
fde8b72a8959b0b477abaf7018d436e493491701 | benjjneb/dada2 | R/paired.R | ################################################################################
#' Merge denoised forward and reverse reads.
#'
#' This function attempts to merge each denoised pair of forward and reverse reads,
#' rejecting any pairs which do not sufficiently overlap or which contain too many
#' (>0 by default) mismatches in the overlap region. Note: This function assumes that
#' the fastq files for the forward and reverse reads were in the same order.
#'
#' @param dadaF (Required). A \code{\link{dada-class}} object, or a list of such objects.
#' The \code{\link{dada-class}} object(s) generated by denoising the forward reads.
#'
#' @param derepF (Required). \code{character} or \code{\link{derep-class}}.
#' The file path(s) to the fastq file(s), or a directory containing fastq file(s) corresponding to the
#' the forward reads of the samples to be merged. Compressed file formats such as .fastq.gz and .fastq.bz2 are supported.
#' A \code{\link{derep-class}} object (or list thereof) returned by \code{link{derepFastq}} can also be provided.
#' These \code{\link{derep-class}} object(s) or fastq files should correspond to those used
#' as input to the the \code{\link{dada}} function when denoising the forward reads.
#'
#' @param dadaR (Required). A \code{\link{dada-class}} object, or a list of such objects.
#' The \code{\link{dada-class}} object(s) generated by denoising the reverse reads.
#'
#' @param derepR (Required). \code{character} or \code{\link{derep-class}}.
#' The file path(s) to the fastq file(s), or a directory containing fastq file(s) corresponding to the
#' the reverse reads of the samples to be merged. Compressed file formats such as .fastq.gz and .fastq.bz2 are supported.
#' A \code{\link{derep-class}} object (or list thereof) returned by \code{link{derepFastq}} can also be provided.
#' These \code{\link{derep-class}} object(s) or fastq files should correspond to those used
#' as input to the the \code{\link{dada}} function when denoising the reverse reads.
#'
#' @param minOverlap (Optional). Default 12.
#' The minimum length of the overlap required for merging the forward and reverse reads.
#'
#' @param maxMismatch (Optional). Default 0.
#' The maximum mismatches allowed in the overlap region.
#'
#' @param returnRejects (Optional). Default FALSE.
#' If TRUE, the pairs that that were rejected based on mismatches in the overlap
#' region are retained in the return \code{data.frame}.
#'
#' @param propagateCol (Optional). \code{character}. Default \code{character(0)}.
#' The return data.frame will include values from columns in the $clustering \code{data.frame}
#' of the provided \code{\link{dada-class}} objects with the provided names.
#'
#' @param justConcatenate (Optional). Default FALSE.
#' If TRUE, the forward and reverse-complemented reverse read are concatenated rather than merged,
#' with a NNNNNNNNNN (10 Ns) spacer inserted between them.
#'
#' @param trimOverhang (Optional). Default FALSE.
#' If TRUE, "overhangs" in the alignment between the forwards and reverse read are trimmed off.
#' "Overhangs" are when the reverse read extends past the start of the forward read, and vice-versa,
#' as can happen when reads are longer than the amplicon and read into the other-direction primer region.
#'
#' @param verbose (Optional). Default FALSE.
#' If TRUE, a summary of the function results are printed to standard output.
#'
#' @param ... (Optional). Further arguments to pass on to \code{\link{nwalign}}.
#' By default, \code{mergePairs} uses alignment parameters that hevaily penalizes mismatches and gaps
#' when aligning the forward and reverse sequences.
#'
#' @return A \code{data.frame}, or a list of \code{data.frames}.
#'
#' The return \code{data.frame}(s) has a row for each unique pairing of forward/reverse denoised sequences,
#' and the following columns:
#' \itemize{
#' \item{\code{$abundance}: Number of reads corresponding to this forward/reverse combination.}
#' \item{\code{$sequence}: The merged sequence.}
#' \item{\code{$forward}: The index of the forward denoised sequence.}
#' \item{\code{$reverse}: The index of the reverse denoised sequence.}
#' \item{\code{$nmatch}: Number of matches nts in the overlap region.}
#' \item{\code{$nmismatch}: Number of mismatches in the overlap region.}
#' \item{\code{$nindel}: Number of indels in the overlap region.}
#' \item{\code{$prefer}: The sequence used for the overlap region. 1=forward; 2=reverse.}
#' \item{\code{$accept}: TRUE if overlap between forward and reverse denoised sequences was at least
#' \code{minOverlap} and had at most \code{maxMismatch} differences. FALSE otherwise.}
#' \item{\code{$...}: Additional columns specified in \code{propagateCol}.}
#' }
#' A list of data.frames are returned if a list of input objects was provided.
#'
#' @seealso \code{\link{derepFastq}}, \code{\link{dada}}, \code{\link{fastqPairedFilter}}
#' @export
#'
#' @importFrom methods is
#'
#' @examples
#' fnF <- system.file("extdata", "sam1F.fastq.gz", package="dada2")
#' fnR = system.file("extdata", "sam1R.fastq.gz", package="dada2")
#' dadaF <- dada(fnF, selfConsist=TRUE)
#' dadaR <- dada(fnR, selfConsist=TRUE)
#' merger <- mergePairs(dadaF, fnF, dadaR, fnR)
#' merger <- mergePairs(dadaF, fnF, dadaR, fnR, returnRejects=TRUE, propagateCol=c("n0", "birth_ham"))
#' merger <- mergePairs(dadaF, fnF, dadaR, fnR, justConcatenate=TRUE)
#'
mergePairs <- function(dadaF, derepF, dadaR, derepR, minOverlap = 12, maxMismatch=0, returnRejects=FALSE, propagateCol=character(0), justConcatenate=FALSE, trimOverhang=FALSE, verbose=FALSE, ...) {
# Validate input
if(is(dadaF, "dada")) dadaF <- list(dadaF)
if(is(dadaR, "dada")) dadaR <- list(dadaR)
if(is(derepF, "derep")) derepF <- list(derepF)
else if(is(derepF, "character") && length(derepF)==1 && dir.exists(derepF)) derepF <- parseFastqDirectory(derepF)
if(is(derepR, "derep")) derepR <- list(derepR)
else if(is(derepR, "character") && length(derepR)==1 && dir.exists(derepR)) derepR <- parseFastqDirectory(derepR)
if( !(is.list.of(dadaF, "dada") && is.list.of(dadaR, "dada")) ) {
stop("dadaF and dadaR must be provided as dada-class objects or lists of dada-class objects.")
}
if( !( (is.list.of(derepF, "derep") || is(derepF, "character")) &&
(is.list.of(derepR, "derep") || is(derepR, "character")) )) {
stop("derepF and derepR must be provided as derep-class objects or as character vectors of filenames.")
}
# Perform merging
nrecs <- c(length(dadaF), length(derepF), length(dadaR), length(derepR))
if(length(unique(nrecs))>1) stop("The dadaF/derepF/dadaR/derepR arguments must be the same length.")
rval <- lapply(seq_along(dadaF), function (i) {
mapF <- getDerep(derepF[[i]])$map
mapR <- getDerep(derepR[[i]])$map
if(!(is.integer(mapF) && is.integer(mapR))) stop("Incorrect format of $map in derep-class arguments.")
# if(any(is.na(rF)) || any(is.na(rR))) stop("Non-corresponding maps and dada-outputs.")
if(!(length(mapF) == length(mapR) &&
max(mapF, na.rm=TRUE) == length(dadaF[[i]]$map) &&
max(mapR, na.rm=TRUE) == length(dadaR[[i]]$map))) {
stop("Non-corresponding derep-class and dada-class objects.")
}
rF <- dadaF[[i]]$map[mapF]
rR <- dadaR[[i]]$map[mapR]
pairdf <- data.frame(sequence = "", abundance=0, forward=rF, reverse=rR)
ups <- unique(pairdf) # The unique forward/reverse pairs of denoised sequences
keep <- !is.na(ups$forward) & !is.na(ups$reverse)
ups <- ups[keep, ]
if (nrow(ups)==0) {
outnames <- c("sequence", "abundance", "forward", "reverse",
"nmatch", "nmismatch", "nindel", "prefer", "accept")
ups <- data.frame(matrix(ncol = length(outnames), nrow = 0))
names(ups) <- outnames
if(verbose) {
message("No paired-reads (in ZERO unique pairings) successfully merged out of ", nrow(pairdf), " pairings) input.")
}
return(ups)
} else {
Funqseq <- unname(as.character(dadaF[[i]]$clustering$sequence[ups$forward]))
Runqseq <- rc(unname(as.character(dadaR[[i]]$clustering$sequence[ups$reverse])))
if (justConcatenate == TRUE) {
# Simply concatenate the sequences together
ups$sequence <- mapply(function(x,y) paste0(x,"NNNNNNNNNN", y),
Funqseq, Runqseq, SIMPLIFY=FALSE);
ups$nmatch <- 0
ups$nmismatch <- 0
ups$nindel <- 0
ups$prefer <- NA
ups$accept <- TRUE
} else {
# Align forward and reverse reads.
# Use unbanded N-W align to compare forward/reverse
# Adjusting align params to prioritize zero-mismatch merges
tmp <- getDadaOpt(c("MATCH", "MISMATCH", "GAP_PENALTY"))
if(maxMismatch==0) {
setDadaOpt(MATCH=1L, MISMATCH=-64L, GAP_PENALTY=-64L)
} else {
setDadaOpt(MATCH=1L, MISMATCH=-8L, GAP_PENALTY=-8L)
}
alvecs <- mapply(function(x,y) nwalign(x,y,band=-1,...), Funqseq, Runqseq, SIMPLIFY=FALSE)
setDadaOpt(tmp)
outs <- t(sapply(alvecs, function(x) C_eval_pair(x[1], x[2])))
ups$nmatch <- outs[,1]
ups$nmismatch <- outs[,2]
ups$nindel <- outs[,3]
ups$prefer <- 1 + (dadaR[[i]]$clustering$n0[ups$reverse] > dadaF[[i]]$clustering$n0[ups$forward])
ups$accept <- (ups$nmatch >= minOverlap) & ((ups$nmismatch + ups$nindel) <= maxMismatch)
# Make the sequence
ups$sequence <- mapply(C_pair_consensus, sapply(alvecs,`[`,1), sapply(alvecs,`[`,2), ups$prefer, trimOverhang);
# Additional param to indicate whether 1:forward or 2:reverse takes precedence
# Must also strip out any indels in the return
# This function is only used here.
}
# Add abundance and sequence to the output data.frame
tab <- table(pairdf$forward, pairdf$reverse)
ups$abundance <- tab[cbind(ups$forward, ups$reverse)]
ups$sequence[!ups$accept] <- ""
# Add columns from forward/reverse clustering
propagateCol <- propagateCol[propagateCol %in% colnames(dadaF[[i]]$clustering)]
for(col in propagateCol) {
ups[,paste0("F.",col)] <- dadaF[[i]]$clustering[ups$forward,col]
ups[,paste0("R.",col)] <- dadaR[[i]]$clustering[ups$reverse,col]
}
# Sort output by abundance and name
ups <- ups[order(ups$abundance, decreasing=TRUE),]
rownames(ups) <- NULL
if(verbose) {
message(sum(ups$abundance[ups$accept]), " paired-reads (in ", sum(ups$accept), " unique pairings) successfully merged out of ", sum(ups$abundance), " (in ", nrow(ups), " pairings) input.")
}
if(!returnRejects) { ups <- ups[ups$accept,] }
if(any(duplicated(ups$sequence))) {
message("Duplicate sequences in merged output.")
}
return(ups)
}
})
# Construct returns
if(!is.null(names(dadaF))) names(rval) <- names(dadaF)
if(length(rval) == 1) rval <- rval[[1]]
return(rval)
}
#' @importFrom ShortRead FastqStreamer
#' @importFrom ShortRead id
#' @importFrom ShortRead yield
sameOrder <- function(fnF, fnR, qualityType = "Auto") {
matched <- TRUE
fF <- FastqStreamer(fnF)
on.exit(close(fF))
fR <- FastqStreamer(fnR)
on.exit(close(fR), add=TRUE)
while( length(suppressWarnings(fqF <- yield(fF, qualityType = qualityType)))
&& length(suppressWarnings(fqR <- yield(fR, qualityType = qualityType))) ) {
idF <- trimTails(id(fqF), 1, " ")
idR <- trimTails(id(fqR), 1, " ")
matched <- matched && all(idF == idR)
}
return(matched)
}
isMatch <- function(al, minOverlap, verbose=FALSE) {
out <- C_eval_pair(al[1], al[2]) # match, mismatch, indel
if(verbose) { cat("Match/mismatch/indel:", out, "\n") }
if(out[1] >= minOverlap && out[2] == 0 && out[3] == 0) {
return(TRUE);
} else {
return(FALSE);
}
}
| 12,097 | lgpl-3.0 |
fe632709746c52681c0186fc369b085ca33c8669 | cosmicexplorer/cloc-code | testcode/utilities.R | # from https://github.com/lme4/lme4
if(getRversion() < "2.15")
paste0 <- function(...) paste(..., sep = '')
### Utilities for parsing and manipulating mixed-model formulas
##' From the result of \code{\link{findbars}} applied to a model formula and
##' and the evaluation frame, create the model matrix, etc. associated with
##' random-effects terms. See the description of the returned value for a
##' detailed list.
##'
##' @title Create Z, Lambda, Lind, etc.
##' @param bars a list of parsed random-effects terms
##' @param fr a model frame in which to evaluate these terms
##' @return a list with components
##' \item{Zt}{transpose of the sparse model matrix for the random effects}
##' \item{Lambdat}{transpose of the sparse relative covariance factor}
##' \item{Lind}{an integer vector of indices determining the mapping of the
##' elements of the \code{theta} to the \code{"x"} slot of \code{Lambdat}}
##' \item{theta}{initial values of the covariance parameters}
##' \item{lower}{lower bounds on the covariance parameters}
##' \item{flist}{list of grouping factors used in the random-effects terms}
##' \item{cnms}{a list of column names of the random effects according to
##' the grouping factors}
##' @importFrom Matrix sparseMatrix rBind drop0
##' @importMethodsFrom Matrix coerce
##' @family utilities
##' @export
mkReTrms <- function(bars, fr) {
if (!length(bars))
stop("No random effects terms specified in formula")
stopifnot(is.list(bars), vapply(bars, is.language, NA),
inherits(fr, "data.frame"))
names(bars) <- barnames(bars)
term.names <- unlist(lapply(bars, function(x) paste(deparse(x),collapse=" ")))
## auxiliary {named, for easier inspection}:
mkBlist <- function(x) {
frloc <- fr
## convert grouping variables to factors as necessary
## TODO: variables that are *not* in the data frame are
## not converted -- these could still break, e.g. if someone
## tries to use the : operator
for (i in all.vars(x[[3]])) {
if (!is.null(frloc[[i]])) frloc[[i]] <- factor(frloc[[i]])
}
if (is.null(ff <- tryCatch(eval(substitute(factor(fac),
list(fac = x[[3]])), frloc),
error=function(e) NULL)))
stop("couldn't evaluate grouping factor ",
deparse(x[[3]])," within model frame:",
" try adding grouping factor to data ",
"frame explicitly if possible")
if (all(is.na(ff)))
stop("Invalid grouping factor specification, ",
deparse(x[[3]]))
nl <- length(levels(ff))
mm <- model.matrix(eval(substitute( ~ foo, list(foo = x[[2]]))), frloc)
nc <- ncol(mm)
nseq <- seq_len(nc)
sm <- as(ff, "sparseMatrix")
if (nc > 1)
sm <- do.call(rBind, lapply(nseq, function(i) sm))
## hack for NA values contained in factor (FIXME: test elsewhere for consistency?)
sm@x[] <- t(mm[!is.na(ff),])
## When nc > 1 switch the order of the rows of sm
## so the random effects for the same level of the
## grouping factor are adjacent.
if (nc > 1)
sm <- sm[as.vector(matrix(seq_len(nc * nl),
ncol = nl, byrow = TRUE)),]
list(ff = ff, sm = sm, nl = nl, cnms = colnames(mm))
}
blist <- lapply(bars, mkBlist)
nl <- vapply(blist, `[[`, 0L, "nl") # no. of levels per term
# (in lmer jss: \ell_i)
## order terms stably by decreasing number of levels in the factor
if (any(diff(nl) > 0)) {
ord <- rev(order(nl))
blist <- blist[ord]
nl <- nl[ord]
}
Ztlist <- lapply(blist, "[[", "sm")
Zt <- do.call(rBind, Ztlist)
names(Ztlist) <- term.names
q <- nrow(Zt)
## Create and install Lambdat, Lind, etc. This must be done after
## any potential reordering of the terms.
cnms <- lapply(blist, "[[", "cnms") # list of column names of the
# model matrix per term
nc <- vapply(cnms, length, 0L) # no. of columns per term
# (in lmer jss: p_i)
nth <- as.integer((nc * (nc+1))/2) # no. of parameters per term
# (in lmer jss: ??)
nb <- nc * nl # no. of random effects per term
# (in lmer jss: q_i)
stopifnot(sum(nb) == q)
boff <- cumsum(c(0L, nb)) # offsets into b
thoff <- cumsum(c(0L, nth)) # offsets into theta
### FIXME: should this be done with cBind and avoid the transpose
### operator? In other words should Lambdat be generated directly
### instead of generating Lambda first then transposing?
Lambdat <-
t(do.call(sparseMatrix,
do.call(rBind,
lapply(seq_along(blist), function(i)
{
mm <- matrix(seq_len(nb[i]), ncol = nc[i],
byrow = TRUE)
dd <- diag(nc[i])
ltri <- lower.tri(dd, diag = TRUE)
ii <- row(dd)[ltri]
jj <- col(dd)[ltri]
dd[cbind(ii, jj)] <- seq_along(ii) # FIXME: this line unnecessary?
data.frame(i = as.vector(mm[, ii]) + boff[i],
j = as.vector(mm[, jj]) + boff[i],
x = as.double(rep.int(seq_along(ii),
rep.int(nl[i], length(ii))) +
thoff[i]))
}))))
thet <- numeric(sum(nth))
ll <- list(Zt=Matrix::drop0(Zt), theta=thet, Lind=as.integer(Lambdat@x),
Gp=unname(c(0L, cumsum(nb))))
## lower bounds on theta elements are 0 if on diagonal, else -Inf
ll$lower <- -Inf * (thet + 1)
ll$lower[unique(diag(Lambdat))] <- 0
ll$theta[] <- is.finite(ll$lower) # initial values of theta are 0 off-diagonal, 1 on
Lambdat@x[] <- ll$theta[ll$Lind] # initialize elements of Lambdat
ll$Lambdat <- Lambdat
# massage the factor list
fl <- lapply(blist, "[[", "ff")
# check for repeated factors
fnms <- names(fl)
if (length(fnms) > length(ufn <- unique(fnms))) {
fl <- fl[match(ufn, fnms)]
asgn <- match(fnms, ufn)
} else asgn <- seq_along(fl)
names(fl) <- ufn
fl <- do.call(data.frame, c(fl, check.names = FALSE))
attr(fl, "assign") <- asgn
ll$flist <- fl
ll$cnms <- cnms
ll$Ztlist <- Ztlist
ll
} ## {mkReTrms}
##' Create an lmerResp, glmResp or nlsResp instance
##'
##' @title Create an lmerResp, glmResp or nlsResp instance
##' @param fr a model frame
##' @param REML logical scalar, value of REML for an lmerResp instance
##' @param family the optional glm family (glmResp only)
##' @param nlenv the nonlinear model evaluation environment (nlsResp only)
##' @param nlmod the nonlinear model function (nlsResp only)
##' @param ... where to look for response information if \code{fr} is missing.
##' Can contain a model response, \code{y}, offset, \code{offset}, and weights,
##' \code{weights}.
##' @return an lmerResp or glmResp or nlsResp instance
##' @family utilities
##' @export
mkRespMod <- function(fr, REML=NULL, family = NULL, nlenv = NULL, nlmod = NULL, ...) {
if(!missing(fr)){
y <- model.response(fr)
offset <- model.offset(fr)
weights <- model.weights(fr)
N <- n <- nrow(fr)
etastart_update <- model.extract(fr, "etastart")
} else {
fr <- list(...)
y <- fr$y
N <- n <- if(is.matrix(y)) nrow(y) else length(y)
offset <- fr$offset
weights <- fr$weights
etastart_update <- fr$etastart
}
## FIXME: may need to add X, or pass it somehow, if we want to use glm.fit
##y <- model.response(fr)
if(length(dim(y)) == 1) {
## avoid problems with 1D arrays, but keep names
nm <- rownames(y)
dim(y) <- NULL
if(!is.null(nm)) names(y) <- nm
}
rho <- new.env()
rho$y <- if (is.null(y)) numeric(0) else y
if (!is.null(REML)) rho$REML <- REML
rho$etastart <- fr$etastart
rho$mustart <- fr$mustart
##N <- n <- nrow(fr)
if (!is.null(nlenv)) {
stopifnot(is.language(nlmod),
is.environment(nlenv),
is.numeric(val <- eval(nlmod, nlenv)),
length(val) == n,
## FIXME? Restriction, not present in ole' nlme():
is.matrix(gr <- attr(val, "gradient")),
mode(gr) == "numeric",
nrow(gr) == n,
!is.null(pnames <- colnames(gr)))
N <- length(gr)
rho$mu <- as.vector(val)
rho$sqrtXwt <- as.vector(gr)
rho$gam <-
unname(unlist(lapply(pnames,
function(nm) get(nm, envir=nlenv))))
}
if (!is.null(offset)) {
if (length(offset) == 1L) offset <- rep.int(offset, N)
stopifnot(length(offset) == N)
rho$offset <- unname(offset)
} else rho$offset <- rep.int(0, N)
if (!is.null(weights)) {
stopifnot(length(weights) == n, all(weights >= 0))
rho$weights <- unname(weights)
} else rho$weights <- rep.int(1, n)
if (is.null(family)) {
if (is.null(nlenv)) return(do.call(lmerResp$new, as.list(rho)))
return(do.call(nlsResp$new,
c(list(nlenv=nlenv,
nlmod=substitute(~foo, list(foo=nlmod)),
pnames=pnames), as.list(rho))))
}
stopifnot(inherits(family, "family"))
## need weights for initializing evaluation
rho$nobs <- n
## allow trivial objects, e.g. for simulation
if (length(y)>0) eval(family$initialize, rho)
family$initialize <- NULL # remove clutter from str output
ll <- as.list(rho)
ans <- do.call("new", c(list(Class="glmResp", family=family),
ll[setdiff(names(ll), c("m", "nobs", "mustart"))]))
if (length(y)>0) ans$updateMu(if (!is.null(es <- etastart_update)) es else
family$linkfun(get("mustart", rho)))
ans
}
##' From the right hand side of a formula for a mixed-effects model,
##' determine the pairs of expressions that are separated by the
##' vertical bar operator. Also expand the slash operator in grouping
##' factor expressions and expand terms with the double vertical bar operator
##' into separate, independent random effect terms.
##'
##' @title Determine random-effects expressions from a formula
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @param term a mixed-model formula
##' @return pairs of expressions that were separated by vertical bars
##' @section Note: This function is called recursively on individual
##' terms in the model, which is why the argument is called \code{term} and not
##' a name like \code{form}, indicating a formula.
##' @example
##' findbars(f1 <- Reaction ~ Days + (Days|Subject))
##' ## => list( Days | Subject )
##' findbars(y ~ Days + (1|Subject) + (0+Days|Subject))
##' ## => list of length 2: list ( 1 | Subject , 0+Days|Subject)
##' findbars(~ 1 + (1|batch/cask))
##' ## => list of length 2: list ( 1 | cask:batch , 1 | batch)
##' identical(findbars(~ 1 + (Days || Subject)),
##' findbars(~ 1 + (1|Subject) + (0+Days|Subject)))
##' \dontshow{
##' stopifnot(identical(findbars(f1),
##' list(expression(Days | Subject)[[1]])))
##' }
##' @family utilities
##' @keywords models utilities
##' @export
findbars <- function(term)
{
## Recursive function applied to individual terms
fb <- function(term)
{
if (is.name(term) || !is.language(term)) return(NULL)
if (term[[1]] == as.name("(")) return(fb(term[[2]]))
stopifnot(is.call(term))
if (term[[1]] == as.name('|')) return(term)
if (length(term) == 2) return(fb(term[[2]]))
c(fb(term[[2]]), fb(term[[3]]))
}
## Expand any slashes in the grouping factors returned by fb
expandSlash <- function(bb)
{
## Create the interaction terms for nested effects
makeInteraction <- function(x)
{
if (length(x) < 2) return(x)
trm1 <- makeInteraction(x[[1]])
trm11 <- if(is.list(trm1)) trm1[[1]] else trm1
list(substitute(foo:bar, list(foo=x[[2]], bar = trm11)), trm1)
}
## Return the list of '/'-separated terms
slashTerms <- function(x)
{
if (!("/" %in% all.names(x))) return(x)
if (x[[1]] != as.name("/"))
stop("unparseable formula for grouping factor")
list(slashTerms(x[[2]]), slashTerms(x[[3]]))
}
if (!is.list(bb))
expandSlash(list(bb))
else
unlist(lapply(bb, function(x) {
if (length(x) > 2 && is.list(trms <- slashTerms(x[[3]])))
## lapply(unlist(...)) - unlist returns a flattened list
lapply(unlist(makeInteraction(trms)),
function(trm) substitute(foo|bar, list(foo = x[[2]], bar = trm)))
else x
}))
}## {expandSlash}
modterm <- expandDoubleVerts(
if(is(term, "formula")) term[[length(term)]] else term)
expandSlash(fb(modterm))
}
##' From the right hand side of a formula for a mixed-effects model,
##' expand terms with the double vertical bar operator
##' into separate, independent random effect terms.
##'
##' @title Expand terms with \code{'||'} notation into separate \code{'|'} terms
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @param term a mixed-model formula
##' @return the modified term
##' @family utilities
##' @keywords models utilities
##' @export
expandDoubleVerts <- function(term)
{
expandDoubleVert <- function(term) {
frml <- formula(paste0("~", deparse(term[[2]])))
## need term.labels not all.vars to capture interactions too:
newtrms <- paste0("0+", attr(terms(frml), "term.labels"))
if(attr(terms(frml), "intercept")!=0)
newtrms <- c("1", newtrms)
as.formula(paste("~(",
paste(vapply(newtrms, function(trm)
paste0(trm, "|", deparse(term[[3]])), ""),
collapse=")+("), ")"))[[2]]
}
if (!is.name(term) && is.language(term)) {
if (term[[1]] == as.name("(")) {
term[[2]] <- expandDoubleVerts(term[[2]])
}
stopifnot(is.call(term))
if (term[[1]] == as.name('||'))
return( expandDoubleVert(term) )
## else :
term[[2]] <- expandDoubleVerts(term[[2]])
if (length(term) != 2) {
if(length(term) == 3)
term[[3]] <- expandDoubleVerts(term[[3]])
}
}
term
}
##' Remove the random-effects terms from a mixed-effects formula,
##' thereby producing the fixed-effects formula.
##'
##' @title Omit terms separated by vertical bars in a formula
##' @param term the right-hand side of a mixed-model formula
##' @return the fixed-effects part of the formula
##' @section Note: This function is called recursively on individual
##' terms in the model, which is why the argument is called \code{term} and not
##' a name like \code{form}, indicating a formula.
##' @examples
##' nobars(Reaction ~ Days + (Days|Subject)) ## => Reaction ~ Days
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @family utilities
##' @keywords models utilities
##' @export
nobars <- function(term)
{
if (!any(c('|','||') %in% all.names(term))) return(term)
if (is.call(term) && term[[1]] == as.name('|')) return(NULL)
if (is.call(term) && term[[1]] == as.name('||')) return(NULL)
if (length(term) == 2) {
nb <- nobars(term[[2]])
if (is.null(nb)) return(NULL)
term[[2]] <- nb
return(term)
}
nb2 <- nobars(term[[2]])
nb3 <- nobars(term[[3]])
if (is.null(nb2)) return(nb3)
if (is.null(nb3)) return(nb2)
term[[2]] <- nb2
term[[3]] <- nb3
term
}
##' Substitute the '+' function for the '|' and '||' function in a mixed-model
##' formula. This provides a formula suitable for the current
##' model.frame function.
##'
##' @title "Sub[stitute] Bars"
##' @param term a mixed-model formula
##' @return the formula with all | and || operators replaced by +
##' @section Note: This function is called recursively on individual
##' terms in the model, which is why the argument is called \code{term} and not
##' a name like \code{form}, indicating a formula.
##' @examples
##' subbars(Reaction ~ Days + (Days|Subject)) ## => Reaction ~ Days + (Days + Subject)
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @family utilities
##' @keywords models utilities
##' @export
subbars <- function(term)
{
if (is.name(term) || !is.language(term)) return(term)
if (length(term) == 2) {
term[[2]] <- subbars(term[[2]])
return(term)
}
stopifnot(length(term) >= 3)
if (is.call(term) && term[[1]] == as.name('|'))
term[[1]] <- as.name('+')
if (is.call(term) && term[[1]] == as.name('||'))
term[[1]] <- as.name('+')
for (j in 2:length(term)) term[[j]] <- subbars(term[[j]])
term
}
##' @param bars result of findbars
barnames <- function(bars) {
unlist(lapply(bars, function(x) deparse(x[[3]])))
}
##' Does every level of f1 occur in conjunction with exactly one level
##' of f2? The function is based on converting a triplet sparse matrix
##' to a compressed column-oriented form in which the nesting can be
##' quickly evaluated.
##'
##' @title Is f1 nested within f2?
##'
##' @param f1 factor 1
##' @param f2 factor 2
##'
##' @return TRUE if factor 1 is nested within factor 2
##' @examples
##' with(Pastes, isNested(cask, batch)) ## => FALSE
##' with(Pastes, isNested(sample, batch)) ## => TRUE
##' @export
isNested <- function(f1, f2)
{
f1 <- as.factor(f1)
f2 <- as.factor(f2)
stopifnot(length(f1) == length(f2))
k <- length(levels(f1))
sm <- as(new("ngTMatrix",
i = as.integer(f2) - 1L,
j = as.integer(f1) - 1L,
Dim = c(length(levels(f2)), k)),
"CsparseMatrix")
all(sm@p[2:(k+1L)] - sm@p[1:k] <= 1L)
}
subnms <- function(form, nms) {
## Recursive function applied to individual terms
sbnm <- function(term)
{
if (is.name(term)) {
if (any(term == nms)) 0 else term
} else switch(length(term),
term, ## 1
{ ## 2
term[[2]] <- sbnm(term[[2]])
term
},
{ ## 3
term[[2]] <- sbnm(term[[2]])
term[[3]] <- sbnm(term[[3]])
term
})
}
sbnm(form)
}
## Check for a constant term (a literal 1) in an expression
##
## In the mixed-effects part of a nonlinear model formula, a constant
## term is not meaningful because every term must be relative to a
## nonlinear model parameter. This function recursively checks the
## expressions in the formula for a a constant, calling stop() if
## such a term is encountered.
## @title Check for constant terms.
## @param expr an expression
## @return NULL. The function is executed for its side effect.
chck1 <- function(expr) {
if ((le <- length(expr)) == 1) {
if (is.numeric(expr) && expr == 1)
stop("1 is not meaningful in a nonlinear model formula")
return()
} else
for (j in seq_len(le)[-1]) Recall(expr[[j]])
}
## ---> ../man/nlformula.Rd --- Manipulate a nonlinear model formula
##' @param mc matched call from the caller, with arguments 'formula','start',...
##' @return a list with components "respMod", "frame", "X", "reTrms"
nlformula <- function(mc) {
start <- eval(mc$start, parent.frame(2L))
if (is.numeric(start)) start <- list(nlpars = start)
stopifnot(is.numeric(nlpars <- start$nlpars),
vapply(nlpars, length, 0L) == 1L,
length(pnames <- names(nlpars)) == length(nlpars),
length(form <- as.formula(mc$formula)) == 3L,
is(nlform <- eval(form[[2]]), "formula"),
pnames %in%
(av <- all.vars(nlmod <- as.call(nlform[[lnl <- length(nlform)]]))))
## MM{FIXME}: fortune(106) even twice in here!
nlform[[lnl]] <- parse(text= paste(setdiff(all.vars(form), pnames), collapse=' + '))[[1]]
nlform <- eval(nlform)
environment(nlform) <- environment(form)
m <- match(c("data", "subset", "weights", "na.action", "offset"),
names(mc), 0)
mc <- mc[c(1, m)]
mc$drop.unused.levels <- TRUE
mc[[1]] <- as.name("model.frame")
mc$formula <- nlform
fr <- eval(mc, parent.frame(2L))
n <- nrow(fr)
nlenv <- list2env(fr, parent=parent.frame(2L))
lapply(pnames, function(nm) nlenv[[nm]] <- rep.int(nlpars[[nm]], n))
respMod <- mkRespMod(fr, nlenv=nlenv, nlmod=nlmod)
chck1(meform <- form[[3L]])
pnameexpr <- parse(text=paste(pnames, collapse='+'))[[1]]
nb <- nobars(meform)
fe <- eval(substitute(~ 0 + nb + pnameexpr))
environment(fe) <- environment(form)
frE <- do.call(rbind, lapply(seq_along(nlpars), function(i) fr)) # rbind s copies of the frame
for (nm in pnames) # convert these variables in fr to indicators
frE[[nm]] <- as.numeric(rep(nm == pnames, each = n))
X <- model.matrix(fe, frE)
rownames(X) <- NULL
reTrms <- mkReTrms(lapply(findbars(meform),
function(expr) {
expr[[2]] <- substitute(0+foo, list(foo=expr[[2]]))
expr
}), frE)
list(respMod=respMod, frame=fr, X=X, reTrms=reTrms, pnames=pnames)
} ## {nlformula}
##--> ../man/mkMerMod.Rd ---Create a merMod object
##' @param rho the environment of the objective function
##' @param opt the value returned by the optimizer
##' @param reTrms reTrms list from the calling function
mkMerMod <- function(rho, opt, reTrms, fr, mc, lme4conv=NULL) {
if(missing(mc)) mc <- match.call()
stopifnot(is.environment(rho),
is(pp <- rho$pp, "merPredD"),
is(resp <- rho$resp, "lmResp"),
is.list(opt), "par" %in% names(opt),
c("conv","fval") %in% substr(names(opt),1,4), ## "conv[ergence]", "fval[ues]"
is.list(reTrms), c("flist", "cnms", "Gp", "lower") %in% names(reTrms),
length(rcl <- class(resp)) == 1)
n <- nrow(pp$V)
p <- ncol(pp$V)
dims <- c(N=nrow(pp$X), n=n, p=p, nmp=n-p,
nth=length(pp$theta), q=nrow(pp$Zt),
nAGQ=rho$nAGQ,
compDev=rho$compDev,
## 'use scale' in the sense of whether dispersion parameter should
## be reported/used (*not* whether theta should be scaled by sigma)
useSc=(rcl != "glmResp" ||
!resp$family$family %in% c("poisson","binomial")),
reTrms=length(reTrms$cnms),
spFe=0L,
REML=if (rcl=="lmerResp") resp$REML else 0L,
GLMM=(rcl=="glmResp"),
NLMM=(rcl=="nlsResp"))
storage.mode(dims) <- "integer"
fac <- as.numeric(rcl != "nlsResp")
if (trivial.y <- (length(resp$y)==0)) {
## trivial model
sqrLenU <- wrss <- pwrss <- NA
} else {
sqrLenU <- pp$sqrL(fac)
wrss <- resp$wrss()
pwrss <- wrss + sqrLenU
}
weights <- resp$weights
beta <- pp$beta(fac)
#sigmaML <- pwrss/sum(weights)
sigmaML <- pwrss/n
if (rcl != "lmerResp") {
pars <- opt$par
if (length(pars) > length(pp$theta)) beta <- pars[-(seq_along(pp$theta))]
}
cmp <- c(ldL2=pp$ldL2(), ldRX2=pp$ldRX2(), wrss=wrss,
ussq=sqrLenU, pwrss=pwrss,
drsum=if (rcl=="glmResp" && !trivial.y) resp$resDev() else NA,
REML=if (rcl=="lmerResp" && resp$REML != 0L && !trivial.y)
opt$fval else NA,
## FIXME: construct 'REML deviance' here?
dev=if (rcl=="lmerResp" && resp$REML != 0L || trivial.y) NA else opt$fval,
sigmaML=sqrt(unname(if (!dims["useSc"] || trivial.y) NA else sigmaML)),
sigmaREML=sqrt(unname(if (rcl!="lmerResp" || trivial.y) NA else sigmaML*(dims['n']/dims['nmp']))),
tolPwrss=rho$tolPwrss)
## TODO: improve this hack to get something in frame slot (maybe need weights, etc...)
if(missing(fr)) fr <- data.frame(resp$y)
new(switch(rcl, lmerResp="lmerMod", glmResp="glmerMod", nlsResp="nlmerMod"),
call=mc, frame=fr, flist=reTrms$flist, cnms=reTrms$cnms,
Gp=reTrms$Gp, theta=pp$theta, beta=beta,
u=if (trivial.y) rep(NA_real_,nrow(pp$Zt)) else pp$u(fac),
lower=reTrms$lower, devcomp=list(cmp=cmp, dims=dims),
pp=pp, resp=resp,
optinfo = list (optimizer= attr(opt,"optimizer"),
control = attr(opt,"control"),
derivs = attr(opt,"derivs"),
conv = list(opt=opt$conv, lme4=lme4conv),
feval = if (is.null(opt$feval)) NA else opt$feval,
warnings = attr(opt,"warnings"), val = opt$par)
)
}## {mkMerMod}
## generic argument checking
## 'type': name of calling function ("glmer", "lmer", "nlmer")
##
checkArgs <- function(type,...) {
l... <- list(...)
if (isTRUE(l...[["sparseX"]])) warning("sparseX = TRUE has no effect at present")
## '...' handling up front, safe-guarding against typos ("familiy") :
if(length(l... <- list(...))) {
if (!is.null(l...[["family"]])) { # call glmer if family specified
## we will only get here if 'family' is *not* in the arg list
warning("calling lmer with family() is deprecated: please use glmer() instead")
type <- "glmer"
}
## Check for method argument which is no longer used
## (different meanings/hints depending on glmer vs lmer)
if (!is.null(method <- l...[["method"]])) {
msg <- paste("Argument", sQuote("method"), "is deprecated.")
if (type=="lmer") msg <- paste(msg,"Use the REML argument to specify ML or REML estimation.")
if (type=="glmer") msg <- paste(msg,"Use the nAGQ argument to specify Laplace (nAGQ=1) or adaptive",
"Gauss-Hermite quadrature (nAGQ>1). PQL is no longer available.")
warning(msg)
l... <- l...[names(l...) != "method"]
}
if(length(l...)) {
warning("extra argument(s) ",
paste(sQuote(names(l...)), collapse=", "),
" disregarded")
}
}
}
## check formula and data: return an environment suitable for evaluating
## the formula.
## (1) if data is specified, return it
## (2) otherwise, if formula has an environment, use it
## (3) otherwise [e.g. if formula was passed as a string], try to use parent.frame(2)
## if #3 is true *and* the user is doing something tricky with nested functions,
## this may fail ...
checkFormulaData <- function(formula,data,checkLHS=TRUE,debug=FALSE) {
dataName <- deparse(substitute(data))
missingData <- inherits(tryCatch(eval(data), error=function(e)e), "error")
## data not found (this *should* only happen with garbage input,
## OR when strings used as formulae -> drop1/update/etc.)
##
## alternate attempt (fails)
##
## ff <- sys.frames()
## ex <- substitute(data)
## ii <- rev(seq_along(ff))
## for(i in ii) {
## ex <- eval(substitute(substitute(x, env=sys.frames()[[n]]),
## env = list(x = ex, n=i)))
## }
## origName <- deparse(ex)
## missingData <- !exists(origName)
## (!dataName=="NULL" && !exists(dataName))
if (missingData) {
varex <- function(v,env) exists(v,envir=env,inherits=FALSE)
allvars <- all.vars(as.formula(formula))
allvarex <- function(vvec=allvars,...) { all(sapply(vvec,varex,...)) }
if (allvarex(env=(ee <- environment(formula)))) {
stop("'data' not found, but variables found in environment of formula: ",
"try specifying 'formula' as a formula rather ",
"than a string in the original model")
} else stop("'data' not found, and some variables missing from formula environment")
} else {
if (is.null(data)) {
if (!is.null(ee <- environment(formula))) {
## use environment of formula
denv <- ee
} else {
## e.g. no environment, e.g. because formula is a character vector
## parent.frame(2L) works because [g]lFormula (our calling environment)
## has been called within [g]lmer with env=parent.frame(1L)
## If you call checkFormulaData in some other bizarre way such that
## parent.frame(2L) is *not* OK, you deserve what you get
## calling checkFormulaData directly from the global
## environment should be OK, since trying to go up beyond the global
## environment keeps bringing you back to the global environment ...
denv <- parent.frame(2L)
}
} else {
## data specified
denv <- list2env(data)
}
}
## FIXME: set enclosing environment of denv to environment(formula), or parent.frame(2L) ?
if (debug) {
cat("Debugging parent frames in checkFormulaData:\n")
## find global environment -- could do this with sys.nframe() ?
glEnv <- 1
while (!identical(parent.frame(glEnv),.GlobalEnv)) {
glEnv <- glEnv+1
}
## where are vars?
for (i in 1:glEnv) {
OK <- allvarex(env=parent.frame(i))
cat("vars exist in parent frame ",i)
if (i==glEnv) cat(" (global)")
cat(" ",OK,"\n")
}
cat("vars exist in env of formula ",allvarex(env=denv),"\n")
} ## if (debug)
stopifnot(!checkLHS || length(as.formula(formula,env=denv)) == 3) ## check for two-sided formula
return(denv)
}
## checkFormulaData <- function(formula,data) {
## ee <- environment(formula)
## if (is.null(ee)) {
## ee <- parent.frame(2)
## }
## if (missing(data)) data <- ee
## stopifnot(length(as.formula(formula,env=as.environment(data))) == 3)
## return(data)
## }
##' Not exported; for tests (and examples) that can be slow;
##' Use if(lme4:::testLevel() >= 1.) ..... see ../README.md
testLevel <- function()
if(nzchar(s <- Sys.getenv("LME4_TEST_LEVEL")) &&
is.finite(s <- as.numeric(s))) s else 1
##' General conditional variance-covariance matrix
##'
##' Experimental function for estimating the variance-covariance
##' matrix of the random effects, conditional on the observed data
##' and at the (RE)ML estimate of the fixed effects and covariance
##' parameters. Applicable for any Lambda matrix, but slower than
##' other block-by-block methods.
##' Not exported.
##'
##' TODO:
##' (1) Write up quite note on theory (e.g. Laplace approximation).
##' (2) Figure out how to convert between full q-by-q matrix, and
##' the format currently in the postVar attributes of the
##' elements of the output of ranef.
##' (3) Test.
##' (4) Do we need to think carefully about the differences
##' between REML and ML, beyond just multiplying by a different
##' sigma^2 estimate?
##'
##' @param object \code{merMod} object
##' @return Sparse covariance matrix
condVar <- function(object) {
s2 <- sigma(object)^2
Lamt <- getME(object,"Lambdat")
L <- getME(object,"L")
## never do it this way! fortune("SOOOO")
#V <- solve(L, system = "A")
#V <- chol2inv(L)
#s2*crossprod(Lamt, V) %*% Lamt
LL <- solve(L, Lamt, system = "A")
s2 * crossprod(Lamt, LL)
}
mkMinimalData <- function(formula) {
vars <- all.vars(formula)
nVars <- length(vars)
matr <- matrix(0, 2, nVars)
data <- as.data.frame(matr)
setNames(data, vars)
}
##' Make template for mixed model parameters
mkParsTemplate <- function(formula, data){
if(missing(data)) data <- mkMinimalData(formula)
mfRanef <- model.frame( subbars(formula), data)
mmFixef <- model.matrix(nobars(formula) , data)
reTrms <- mkReTrms(findbars(formula), mfRanef)
cnms <- reTrms$cnms
thetaNamesList <- mapply(mkPfun(), names(cnms), cnms)
thetaNames <- unlist(thetaNamesList)
betaNames <- colnames(mmFixef)
list(beta = setNames(numeric(length( betaNames)), betaNames),
theta = setNames(reTrms$theta, thetaNames),
sigma = 1)
}
##' Make template for mixed model data
##'
##' Useful for simulating balanced designs and for
##' getting started on unbalanced simulations
##'
##' @param formula formula
##' @param data data -- not necessary
##' @param nGrps number of groups per grouping factor
##' @param rfunc function for generating covariate data
##' @param ... additional parameters for rfunc
mkDataTemplate <- function(formula, data,
nGrps = 2, nPerGrp = 1,
rfunc = NULL, ...){
if(missing(data)) data <- mkMinimalData(formula)
grpFacNames <- unique(barnames(findbars(formula)))
varNames <- all.vars(formula)
covariateNames <- setdiff(varNames, grpFacNames)
nGrpFac <- length(grpFacNames)
nCov <- length(covariateNames)
grpFac <- gl(nGrps, nPerGrp)
grpDat <- expand.grid(replicate(nGrpFac, grpFac, simplify = FALSE))
colnames(grpDat) <- grpFacNames
nObs <- nrow(grpDat)
if(is.null(rfunc)) rfunc <- function(n, ...) rep(0, n)
params <- c(list(nObs), list(...))
covDat <- as.data.frame(replicate(nCov, do.call(rfunc, params),
simplify = FALSE))
colnames(covDat) <- covariateNames
cbind(grpDat, covDat)
}
| 33,475 | gpl-2.0 |
fe632709746c52681c0186fc369b085ca33c8669 | nexB/cloc-mirror | testcode/utilities.R | # from https://github.com/lme4/lme4
if(getRversion() < "2.15")
paste0 <- function(...) paste(..., sep = '')
### Utilities for parsing and manipulating mixed-model formulas
##' From the result of \code{\link{findbars}} applied to a model formula and
##' and the evaluation frame, create the model matrix, etc. associated with
##' random-effects terms. See the description of the returned value for a
##' detailed list.
##'
##' @title Create Z, Lambda, Lind, etc.
##' @param bars a list of parsed random-effects terms
##' @param fr a model frame in which to evaluate these terms
##' @return a list with components
##' \item{Zt}{transpose of the sparse model matrix for the random effects}
##' \item{Lambdat}{transpose of the sparse relative covariance factor}
##' \item{Lind}{an integer vector of indices determining the mapping of the
##' elements of the \code{theta} to the \code{"x"} slot of \code{Lambdat}}
##' \item{theta}{initial values of the covariance parameters}
##' \item{lower}{lower bounds on the covariance parameters}
##' \item{flist}{list of grouping factors used in the random-effects terms}
##' \item{cnms}{a list of column names of the random effects according to
##' the grouping factors}
##' @importFrom Matrix sparseMatrix rBind drop0
##' @importMethodsFrom Matrix coerce
##' @family utilities
##' @export
mkReTrms <- function(bars, fr) {
if (!length(bars))
stop("No random effects terms specified in formula")
stopifnot(is.list(bars), vapply(bars, is.language, NA),
inherits(fr, "data.frame"))
names(bars) <- barnames(bars)
term.names <- unlist(lapply(bars, function(x) paste(deparse(x),collapse=" ")))
## auxiliary {named, for easier inspection}:
mkBlist <- function(x) {
frloc <- fr
## convert grouping variables to factors as necessary
## TODO: variables that are *not* in the data frame are
## not converted -- these could still break, e.g. if someone
## tries to use the : operator
for (i in all.vars(x[[3]])) {
if (!is.null(frloc[[i]])) frloc[[i]] <- factor(frloc[[i]])
}
if (is.null(ff <- tryCatch(eval(substitute(factor(fac),
list(fac = x[[3]])), frloc),
error=function(e) NULL)))
stop("couldn't evaluate grouping factor ",
deparse(x[[3]])," within model frame:",
" try adding grouping factor to data ",
"frame explicitly if possible")
if (all(is.na(ff)))
stop("Invalid grouping factor specification, ",
deparse(x[[3]]))
nl <- length(levels(ff))
mm <- model.matrix(eval(substitute( ~ foo, list(foo = x[[2]]))), frloc)
nc <- ncol(mm)
nseq <- seq_len(nc)
sm <- as(ff, "sparseMatrix")
if (nc > 1)
sm <- do.call(rBind, lapply(nseq, function(i) sm))
## hack for NA values contained in factor (FIXME: test elsewhere for consistency?)
sm@x[] <- t(mm[!is.na(ff),])
## When nc > 1 switch the order of the rows of sm
## so the random effects for the same level of the
## grouping factor are adjacent.
if (nc > 1)
sm <- sm[as.vector(matrix(seq_len(nc * nl),
ncol = nl, byrow = TRUE)),]
list(ff = ff, sm = sm, nl = nl, cnms = colnames(mm))
}
blist <- lapply(bars, mkBlist)
nl <- vapply(blist, `[[`, 0L, "nl") # no. of levels per term
# (in lmer jss: \ell_i)
## order terms stably by decreasing number of levels in the factor
if (any(diff(nl) > 0)) {
ord <- rev(order(nl))
blist <- blist[ord]
nl <- nl[ord]
}
Ztlist <- lapply(blist, "[[", "sm")
Zt <- do.call(rBind, Ztlist)
names(Ztlist) <- term.names
q <- nrow(Zt)
## Create and install Lambdat, Lind, etc. This must be done after
## any potential reordering of the terms.
cnms <- lapply(blist, "[[", "cnms") # list of column names of the
# model matrix per term
nc <- vapply(cnms, length, 0L) # no. of columns per term
# (in lmer jss: p_i)
nth <- as.integer((nc * (nc+1))/2) # no. of parameters per term
# (in lmer jss: ??)
nb <- nc * nl # no. of random effects per term
# (in lmer jss: q_i)
stopifnot(sum(nb) == q)
boff <- cumsum(c(0L, nb)) # offsets into b
thoff <- cumsum(c(0L, nth)) # offsets into theta
### FIXME: should this be done with cBind and avoid the transpose
### operator? In other words should Lambdat be generated directly
### instead of generating Lambda first then transposing?
Lambdat <-
t(do.call(sparseMatrix,
do.call(rBind,
lapply(seq_along(blist), function(i)
{
mm <- matrix(seq_len(nb[i]), ncol = nc[i],
byrow = TRUE)
dd <- diag(nc[i])
ltri <- lower.tri(dd, diag = TRUE)
ii <- row(dd)[ltri]
jj <- col(dd)[ltri]
dd[cbind(ii, jj)] <- seq_along(ii) # FIXME: this line unnecessary?
data.frame(i = as.vector(mm[, ii]) + boff[i],
j = as.vector(mm[, jj]) + boff[i],
x = as.double(rep.int(seq_along(ii),
rep.int(nl[i], length(ii))) +
thoff[i]))
}))))
thet <- numeric(sum(nth))
ll <- list(Zt=Matrix::drop0(Zt), theta=thet, Lind=as.integer(Lambdat@x),
Gp=unname(c(0L, cumsum(nb))))
## lower bounds on theta elements are 0 if on diagonal, else -Inf
ll$lower <- -Inf * (thet + 1)
ll$lower[unique(diag(Lambdat))] <- 0
ll$theta[] <- is.finite(ll$lower) # initial values of theta are 0 off-diagonal, 1 on
Lambdat@x[] <- ll$theta[ll$Lind] # initialize elements of Lambdat
ll$Lambdat <- Lambdat
# massage the factor list
fl <- lapply(blist, "[[", "ff")
# check for repeated factors
fnms <- names(fl)
if (length(fnms) > length(ufn <- unique(fnms))) {
fl <- fl[match(ufn, fnms)]
asgn <- match(fnms, ufn)
} else asgn <- seq_along(fl)
names(fl) <- ufn
fl <- do.call(data.frame, c(fl, check.names = FALSE))
attr(fl, "assign") <- asgn
ll$flist <- fl
ll$cnms <- cnms
ll$Ztlist <- Ztlist
ll
} ## {mkReTrms}
##' Create an lmerResp, glmResp or nlsResp instance
##'
##' @title Create an lmerResp, glmResp or nlsResp instance
##' @param fr a model frame
##' @param REML logical scalar, value of REML for an lmerResp instance
##' @param family the optional glm family (glmResp only)
##' @param nlenv the nonlinear model evaluation environment (nlsResp only)
##' @param nlmod the nonlinear model function (nlsResp only)
##' @param ... where to look for response information if \code{fr} is missing.
##' Can contain a model response, \code{y}, offset, \code{offset}, and weights,
##' \code{weights}.
##' @return an lmerResp or glmResp or nlsResp instance
##' @family utilities
##' @export
mkRespMod <- function(fr, REML=NULL, family = NULL, nlenv = NULL, nlmod = NULL, ...) {
if(!missing(fr)){
y <- model.response(fr)
offset <- model.offset(fr)
weights <- model.weights(fr)
N <- n <- nrow(fr)
etastart_update <- model.extract(fr, "etastart")
} else {
fr <- list(...)
y <- fr$y
N <- n <- if(is.matrix(y)) nrow(y) else length(y)
offset <- fr$offset
weights <- fr$weights
etastart_update <- fr$etastart
}
## FIXME: may need to add X, or pass it somehow, if we want to use glm.fit
##y <- model.response(fr)
if(length(dim(y)) == 1) {
## avoid problems with 1D arrays, but keep names
nm <- rownames(y)
dim(y) <- NULL
if(!is.null(nm)) names(y) <- nm
}
rho <- new.env()
rho$y <- if (is.null(y)) numeric(0) else y
if (!is.null(REML)) rho$REML <- REML
rho$etastart <- fr$etastart
rho$mustart <- fr$mustart
##N <- n <- nrow(fr)
if (!is.null(nlenv)) {
stopifnot(is.language(nlmod),
is.environment(nlenv),
is.numeric(val <- eval(nlmod, nlenv)),
length(val) == n,
## FIXME? Restriction, not present in ole' nlme():
is.matrix(gr <- attr(val, "gradient")),
mode(gr) == "numeric",
nrow(gr) == n,
!is.null(pnames <- colnames(gr)))
N <- length(gr)
rho$mu <- as.vector(val)
rho$sqrtXwt <- as.vector(gr)
rho$gam <-
unname(unlist(lapply(pnames,
function(nm) get(nm, envir=nlenv))))
}
if (!is.null(offset)) {
if (length(offset) == 1L) offset <- rep.int(offset, N)
stopifnot(length(offset) == N)
rho$offset <- unname(offset)
} else rho$offset <- rep.int(0, N)
if (!is.null(weights)) {
stopifnot(length(weights) == n, all(weights >= 0))
rho$weights <- unname(weights)
} else rho$weights <- rep.int(1, n)
if (is.null(family)) {
if (is.null(nlenv)) return(do.call(lmerResp$new, as.list(rho)))
return(do.call(nlsResp$new,
c(list(nlenv=nlenv,
nlmod=substitute(~foo, list(foo=nlmod)),
pnames=pnames), as.list(rho))))
}
stopifnot(inherits(family, "family"))
## need weights for initializing evaluation
rho$nobs <- n
## allow trivial objects, e.g. for simulation
if (length(y)>0) eval(family$initialize, rho)
family$initialize <- NULL # remove clutter from str output
ll <- as.list(rho)
ans <- do.call("new", c(list(Class="glmResp", family=family),
ll[setdiff(names(ll), c("m", "nobs", "mustart"))]))
if (length(y)>0) ans$updateMu(if (!is.null(es <- etastart_update)) es else
family$linkfun(get("mustart", rho)))
ans
}
##' From the right hand side of a formula for a mixed-effects model,
##' determine the pairs of expressions that are separated by the
##' vertical bar operator. Also expand the slash operator in grouping
##' factor expressions and expand terms with the double vertical bar operator
##' into separate, independent random effect terms.
##'
##' @title Determine random-effects expressions from a formula
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @param term a mixed-model formula
##' @return pairs of expressions that were separated by vertical bars
##' @section Note: This function is called recursively on individual
##' terms in the model, which is why the argument is called \code{term} and not
##' a name like \code{form}, indicating a formula.
##' @example
##' findbars(f1 <- Reaction ~ Days + (Days|Subject))
##' ## => list( Days | Subject )
##' findbars(y ~ Days + (1|Subject) + (0+Days|Subject))
##' ## => list of length 2: list ( 1 | Subject , 0+Days|Subject)
##' findbars(~ 1 + (1|batch/cask))
##' ## => list of length 2: list ( 1 | cask:batch , 1 | batch)
##' identical(findbars(~ 1 + (Days || Subject)),
##' findbars(~ 1 + (1|Subject) + (0+Days|Subject)))
##' \dontshow{
##' stopifnot(identical(findbars(f1),
##' list(expression(Days | Subject)[[1]])))
##' }
##' @family utilities
##' @keywords models utilities
##' @export
findbars <- function(term)
{
## Recursive function applied to individual terms
fb <- function(term)
{
if (is.name(term) || !is.language(term)) return(NULL)
if (term[[1]] == as.name("(")) return(fb(term[[2]]))
stopifnot(is.call(term))
if (term[[1]] == as.name('|')) return(term)
if (length(term) == 2) return(fb(term[[2]]))
c(fb(term[[2]]), fb(term[[3]]))
}
## Expand any slashes in the grouping factors returned by fb
expandSlash <- function(bb)
{
## Create the interaction terms for nested effects
makeInteraction <- function(x)
{
if (length(x) < 2) return(x)
trm1 <- makeInteraction(x[[1]])
trm11 <- if(is.list(trm1)) trm1[[1]] else trm1
list(substitute(foo:bar, list(foo=x[[2]], bar = trm11)), trm1)
}
## Return the list of '/'-separated terms
slashTerms <- function(x)
{
if (!("/" %in% all.names(x))) return(x)
if (x[[1]] != as.name("/"))
stop("unparseable formula for grouping factor")
list(slashTerms(x[[2]]), slashTerms(x[[3]]))
}
if (!is.list(bb))
expandSlash(list(bb))
else
unlist(lapply(bb, function(x) {
if (length(x) > 2 && is.list(trms <- slashTerms(x[[3]])))
## lapply(unlist(...)) - unlist returns a flattened list
lapply(unlist(makeInteraction(trms)),
function(trm) substitute(foo|bar, list(foo = x[[2]], bar = trm)))
else x
}))
}## {expandSlash}
modterm <- expandDoubleVerts(
if(is(term, "formula")) term[[length(term)]] else term)
expandSlash(fb(modterm))
}
##' From the right hand side of a formula for a mixed-effects model,
##' expand terms with the double vertical bar operator
##' into separate, independent random effect terms.
##'
##' @title Expand terms with \code{'||'} notation into separate \code{'|'} terms
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @param term a mixed-model formula
##' @return the modified term
##' @family utilities
##' @keywords models utilities
##' @export
expandDoubleVerts <- function(term)
{
expandDoubleVert <- function(term) {
frml <- formula(paste0("~", deparse(term[[2]])))
## need term.labels not all.vars to capture interactions too:
newtrms <- paste0("0+", attr(terms(frml), "term.labels"))
if(attr(terms(frml), "intercept")!=0)
newtrms <- c("1", newtrms)
as.formula(paste("~(",
paste(vapply(newtrms, function(trm)
paste0(trm, "|", deparse(term[[3]])), ""),
collapse=")+("), ")"))[[2]]
}
if (!is.name(term) && is.language(term)) {
if (term[[1]] == as.name("(")) {
term[[2]] <- expandDoubleVerts(term[[2]])
}
stopifnot(is.call(term))
if (term[[1]] == as.name('||'))
return( expandDoubleVert(term) )
## else :
term[[2]] <- expandDoubleVerts(term[[2]])
if (length(term) != 2) {
if(length(term) == 3)
term[[3]] <- expandDoubleVerts(term[[3]])
}
}
term
}
##' Remove the random-effects terms from a mixed-effects formula,
##' thereby producing the fixed-effects formula.
##'
##' @title Omit terms separated by vertical bars in a formula
##' @param term the right-hand side of a mixed-model formula
##' @return the fixed-effects part of the formula
##' @section Note: This function is called recursively on individual
##' terms in the model, which is why the argument is called \code{term} and not
##' a name like \code{form}, indicating a formula.
##' @examples
##' nobars(Reaction ~ Days + (Days|Subject)) ## => Reaction ~ Days
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @family utilities
##' @keywords models utilities
##' @export
nobars <- function(term)
{
if (!any(c('|','||') %in% all.names(term))) return(term)
if (is.call(term) && term[[1]] == as.name('|')) return(NULL)
if (is.call(term) && term[[1]] == as.name('||')) return(NULL)
if (length(term) == 2) {
nb <- nobars(term[[2]])
if (is.null(nb)) return(NULL)
term[[2]] <- nb
return(term)
}
nb2 <- nobars(term[[2]])
nb3 <- nobars(term[[3]])
if (is.null(nb2)) return(nb3)
if (is.null(nb3)) return(nb2)
term[[2]] <- nb2
term[[3]] <- nb3
term
}
##' Substitute the '+' function for the '|' and '||' function in a mixed-model
##' formula. This provides a formula suitable for the current
##' model.frame function.
##'
##' @title "Sub[stitute] Bars"
##' @param term a mixed-model formula
##' @return the formula with all | and || operators replaced by +
##' @section Note: This function is called recursively on individual
##' terms in the model, which is why the argument is called \code{term} and not
##' a name like \code{form}, indicating a formula.
##' @examples
##' subbars(Reaction ~ Days + (Days|Subject)) ## => Reaction ~ Days + (Days + Subject)
##' @seealso \code{\link{formula}}, \code{\link{model.frame}}, \code{\link{model.matrix}}.
##' @family utilities
##' @keywords models utilities
##' @export
subbars <- function(term)
{
if (is.name(term) || !is.language(term)) return(term)
if (length(term) == 2) {
term[[2]] <- subbars(term[[2]])
return(term)
}
stopifnot(length(term) >= 3)
if (is.call(term) && term[[1]] == as.name('|'))
term[[1]] <- as.name('+')
if (is.call(term) && term[[1]] == as.name('||'))
term[[1]] <- as.name('+')
for (j in 2:length(term)) term[[j]] <- subbars(term[[j]])
term
}
##' @param bars result of findbars
barnames <- function(bars) {
unlist(lapply(bars, function(x) deparse(x[[3]])))
}
##' Does every level of f1 occur in conjunction with exactly one level
##' of f2? The function is based on converting a triplet sparse matrix
##' to a compressed column-oriented form in which the nesting can be
##' quickly evaluated.
##'
##' @title Is f1 nested within f2?
##'
##' @param f1 factor 1
##' @param f2 factor 2
##'
##' @return TRUE if factor 1 is nested within factor 2
##' @examples
##' with(Pastes, isNested(cask, batch)) ## => FALSE
##' with(Pastes, isNested(sample, batch)) ## => TRUE
##' @export
isNested <- function(f1, f2)
{
f1 <- as.factor(f1)
f2 <- as.factor(f2)
stopifnot(length(f1) == length(f2))
k <- length(levels(f1))
sm <- as(new("ngTMatrix",
i = as.integer(f2) - 1L,
j = as.integer(f1) - 1L,
Dim = c(length(levels(f2)), k)),
"CsparseMatrix")
all(sm@p[2:(k+1L)] - sm@p[1:k] <= 1L)
}
subnms <- function(form, nms) {
## Recursive function applied to individual terms
sbnm <- function(term)
{
if (is.name(term)) {
if (any(term == nms)) 0 else term
} else switch(length(term),
term, ## 1
{ ## 2
term[[2]] <- sbnm(term[[2]])
term
},
{ ## 3
term[[2]] <- sbnm(term[[2]])
term[[3]] <- sbnm(term[[3]])
term
})
}
sbnm(form)
}
## Check for a constant term (a literal 1) in an expression
##
## In the mixed-effects part of a nonlinear model formula, a constant
## term is not meaningful because every term must be relative to a
## nonlinear model parameter. This function recursively checks the
## expressions in the formula for a a constant, calling stop() if
## such a term is encountered.
## @title Check for constant terms.
## @param expr an expression
## @return NULL. The function is executed for its side effect.
chck1 <- function(expr) {
if ((le <- length(expr)) == 1) {
if (is.numeric(expr) && expr == 1)
stop("1 is not meaningful in a nonlinear model formula")
return()
} else
for (j in seq_len(le)[-1]) Recall(expr[[j]])
}
## ---> ../man/nlformula.Rd --- Manipulate a nonlinear model formula
##' @param mc matched call from the caller, with arguments 'formula','start',...
##' @return a list with components "respMod", "frame", "X", "reTrms"
nlformula <- function(mc) {
start <- eval(mc$start, parent.frame(2L))
if (is.numeric(start)) start <- list(nlpars = start)
stopifnot(is.numeric(nlpars <- start$nlpars),
vapply(nlpars, length, 0L) == 1L,
length(pnames <- names(nlpars)) == length(nlpars),
length(form <- as.formula(mc$formula)) == 3L,
is(nlform <- eval(form[[2]]), "formula"),
pnames %in%
(av <- all.vars(nlmod <- as.call(nlform[[lnl <- length(nlform)]]))))
## MM{FIXME}: fortune(106) even twice in here!
nlform[[lnl]] <- parse(text= paste(setdiff(all.vars(form), pnames), collapse=' + '))[[1]]
nlform <- eval(nlform)
environment(nlform) <- environment(form)
m <- match(c("data", "subset", "weights", "na.action", "offset"),
names(mc), 0)
mc <- mc[c(1, m)]
mc$drop.unused.levels <- TRUE
mc[[1]] <- as.name("model.frame")
mc$formula <- nlform
fr <- eval(mc, parent.frame(2L))
n <- nrow(fr)
nlenv <- list2env(fr, parent=parent.frame(2L))
lapply(pnames, function(nm) nlenv[[nm]] <- rep.int(nlpars[[nm]], n))
respMod <- mkRespMod(fr, nlenv=nlenv, nlmod=nlmod)
chck1(meform <- form[[3L]])
pnameexpr <- parse(text=paste(pnames, collapse='+'))[[1]]
nb <- nobars(meform)
fe <- eval(substitute(~ 0 + nb + pnameexpr))
environment(fe) <- environment(form)
frE <- do.call(rbind, lapply(seq_along(nlpars), function(i) fr)) # rbind s copies of the frame
for (nm in pnames) # convert these variables in fr to indicators
frE[[nm]] <- as.numeric(rep(nm == pnames, each = n))
X <- model.matrix(fe, frE)
rownames(X) <- NULL
reTrms <- mkReTrms(lapply(findbars(meform),
function(expr) {
expr[[2]] <- substitute(0+foo, list(foo=expr[[2]]))
expr
}), frE)
list(respMod=respMod, frame=fr, X=X, reTrms=reTrms, pnames=pnames)
} ## {nlformula}
##--> ../man/mkMerMod.Rd ---Create a merMod object
##' @param rho the environment of the objective function
##' @param opt the value returned by the optimizer
##' @param reTrms reTrms list from the calling function
mkMerMod <- function(rho, opt, reTrms, fr, mc, lme4conv=NULL) {
if(missing(mc)) mc <- match.call()
stopifnot(is.environment(rho),
is(pp <- rho$pp, "merPredD"),
is(resp <- rho$resp, "lmResp"),
is.list(opt), "par" %in% names(opt),
c("conv","fval") %in% substr(names(opt),1,4), ## "conv[ergence]", "fval[ues]"
is.list(reTrms), c("flist", "cnms", "Gp", "lower") %in% names(reTrms),
length(rcl <- class(resp)) == 1)
n <- nrow(pp$V)
p <- ncol(pp$V)
dims <- c(N=nrow(pp$X), n=n, p=p, nmp=n-p,
nth=length(pp$theta), q=nrow(pp$Zt),
nAGQ=rho$nAGQ,
compDev=rho$compDev,
## 'use scale' in the sense of whether dispersion parameter should
## be reported/used (*not* whether theta should be scaled by sigma)
useSc=(rcl != "glmResp" ||
!resp$family$family %in% c("poisson","binomial")),
reTrms=length(reTrms$cnms),
spFe=0L,
REML=if (rcl=="lmerResp") resp$REML else 0L,
GLMM=(rcl=="glmResp"),
NLMM=(rcl=="nlsResp"))
storage.mode(dims) <- "integer"
fac <- as.numeric(rcl != "nlsResp")
if (trivial.y <- (length(resp$y)==0)) {
## trivial model
sqrLenU <- wrss <- pwrss <- NA
} else {
sqrLenU <- pp$sqrL(fac)
wrss <- resp$wrss()
pwrss <- wrss + sqrLenU
}
weights <- resp$weights
beta <- pp$beta(fac)
#sigmaML <- pwrss/sum(weights)
sigmaML <- pwrss/n
if (rcl != "lmerResp") {
pars <- opt$par
if (length(pars) > length(pp$theta)) beta <- pars[-(seq_along(pp$theta))]
}
cmp <- c(ldL2=pp$ldL2(), ldRX2=pp$ldRX2(), wrss=wrss,
ussq=sqrLenU, pwrss=pwrss,
drsum=if (rcl=="glmResp" && !trivial.y) resp$resDev() else NA,
REML=if (rcl=="lmerResp" && resp$REML != 0L && !trivial.y)
opt$fval else NA,
## FIXME: construct 'REML deviance' here?
dev=if (rcl=="lmerResp" && resp$REML != 0L || trivial.y) NA else opt$fval,
sigmaML=sqrt(unname(if (!dims["useSc"] || trivial.y) NA else sigmaML)),
sigmaREML=sqrt(unname(if (rcl!="lmerResp" || trivial.y) NA else sigmaML*(dims['n']/dims['nmp']))),
tolPwrss=rho$tolPwrss)
## TODO: improve this hack to get something in frame slot (maybe need weights, etc...)
if(missing(fr)) fr <- data.frame(resp$y)
new(switch(rcl, lmerResp="lmerMod", glmResp="glmerMod", nlsResp="nlmerMod"),
call=mc, frame=fr, flist=reTrms$flist, cnms=reTrms$cnms,
Gp=reTrms$Gp, theta=pp$theta, beta=beta,
u=if (trivial.y) rep(NA_real_,nrow(pp$Zt)) else pp$u(fac),
lower=reTrms$lower, devcomp=list(cmp=cmp, dims=dims),
pp=pp, resp=resp,
optinfo = list (optimizer= attr(opt,"optimizer"),
control = attr(opt,"control"),
derivs = attr(opt,"derivs"),
conv = list(opt=opt$conv, lme4=lme4conv),
feval = if (is.null(opt$feval)) NA else opt$feval,
warnings = attr(opt,"warnings"), val = opt$par)
)
}## {mkMerMod}
## generic argument checking
## 'type': name of calling function ("glmer", "lmer", "nlmer")
##
checkArgs <- function(type,...) {
l... <- list(...)
if (isTRUE(l...[["sparseX"]])) warning("sparseX = TRUE has no effect at present")
## '...' handling up front, safe-guarding against typos ("familiy") :
if(length(l... <- list(...))) {
if (!is.null(l...[["family"]])) { # call glmer if family specified
## we will only get here if 'family' is *not* in the arg list
warning("calling lmer with family() is deprecated: please use glmer() instead")
type <- "glmer"
}
## Check for method argument which is no longer used
## (different meanings/hints depending on glmer vs lmer)
if (!is.null(method <- l...[["method"]])) {
msg <- paste("Argument", sQuote("method"), "is deprecated.")
if (type=="lmer") msg <- paste(msg,"Use the REML argument to specify ML or REML estimation.")
if (type=="glmer") msg <- paste(msg,"Use the nAGQ argument to specify Laplace (nAGQ=1) or adaptive",
"Gauss-Hermite quadrature (nAGQ>1). PQL is no longer available.")
warning(msg)
l... <- l...[names(l...) != "method"]
}
if(length(l...)) {
warning("extra argument(s) ",
paste(sQuote(names(l...)), collapse=", "),
" disregarded")
}
}
}
## check formula and data: return an environment suitable for evaluating
## the formula.
## (1) if data is specified, return it
## (2) otherwise, if formula has an environment, use it
## (3) otherwise [e.g. if formula was passed as a string], try to use parent.frame(2)
## if #3 is true *and* the user is doing something tricky with nested functions,
## this may fail ...
checkFormulaData <- function(formula,data,checkLHS=TRUE,debug=FALSE) {
dataName <- deparse(substitute(data))
missingData <- inherits(tryCatch(eval(data), error=function(e)e), "error")
## data not found (this *should* only happen with garbage input,
## OR when strings used as formulae -> drop1/update/etc.)
##
## alternate attempt (fails)
##
## ff <- sys.frames()
## ex <- substitute(data)
## ii <- rev(seq_along(ff))
## for(i in ii) {
## ex <- eval(substitute(substitute(x, env=sys.frames()[[n]]),
## env = list(x = ex, n=i)))
## }
## origName <- deparse(ex)
## missingData <- !exists(origName)
## (!dataName=="NULL" && !exists(dataName))
if (missingData) {
varex <- function(v,env) exists(v,envir=env,inherits=FALSE)
allvars <- all.vars(as.formula(formula))
allvarex <- function(vvec=allvars,...) { all(sapply(vvec,varex,...)) }
if (allvarex(env=(ee <- environment(formula)))) {
stop("'data' not found, but variables found in environment of formula: ",
"try specifying 'formula' as a formula rather ",
"than a string in the original model")
} else stop("'data' not found, and some variables missing from formula environment")
} else {
if (is.null(data)) {
if (!is.null(ee <- environment(formula))) {
## use environment of formula
denv <- ee
} else {
## e.g. no environment, e.g. because formula is a character vector
## parent.frame(2L) works because [g]lFormula (our calling environment)
## has been called within [g]lmer with env=parent.frame(1L)
## If you call checkFormulaData in some other bizarre way such that
## parent.frame(2L) is *not* OK, you deserve what you get
## calling checkFormulaData directly from the global
## environment should be OK, since trying to go up beyond the global
## environment keeps bringing you back to the global environment ...
denv <- parent.frame(2L)
}
} else {
## data specified
denv <- list2env(data)
}
}
## FIXME: set enclosing environment of denv to environment(formula), or parent.frame(2L) ?
if (debug) {
cat("Debugging parent frames in checkFormulaData:\n")
## find global environment -- could do this with sys.nframe() ?
glEnv <- 1
while (!identical(parent.frame(glEnv),.GlobalEnv)) {
glEnv <- glEnv+1
}
## where are vars?
for (i in 1:glEnv) {
OK <- allvarex(env=parent.frame(i))
cat("vars exist in parent frame ",i)
if (i==glEnv) cat(" (global)")
cat(" ",OK,"\n")
}
cat("vars exist in env of formula ",allvarex(env=denv),"\n")
} ## if (debug)
stopifnot(!checkLHS || length(as.formula(formula,env=denv)) == 3) ## check for two-sided formula
return(denv)
}
## checkFormulaData <- function(formula,data) {
## ee <- environment(formula)
## if (is.null(ee)) {
## ee <- parent.frame(2)
## }
## if (missing(data)) data <- ee
## stopifnot(length(as.formula(formula,env=as.environment(data))) == 3)
## return(data)
## }
##' Not exported; for tests (and examples) that can be slow;
##' Use if(lme4:::testLevel() >= 1.) ..... see ../README.md
testLevel <- function()
if(nzchar(s <- Sys.getenv("LME4_TEST_LEVEL")) &&
is.finite(s <- as.numeric(s))) s else 1
##' General conditional variance-covariance matrix
##'
##' Experimental function for estimating the variance-covariance
##' matrix of the random effects, conditional on the observed data
##' and at the (RE)ML estimate of the fixed effects and covariance
##' parameters. Applicable for any Lambda matrix, but slower than
##' other block-by-block methods.
##' Not exported.
##'
##' TODO:
##' (1) Write up quite note on theory (e.g. Laplace approximation).
##' (2) Figure out how to convert between full q-by-q matrix, and
##' the format currently in the postVar attributes of the
##' elements of the output of ranef.
##' (3) Test.
##' (4) Do we need to think carefully about the differences
##' between REML and ML, beyond just multiplying by a different
##' sigma^2 estimate?
##'
##' @param object \code{merMod} object
##' @return Sparse covariance matrix
condVar <- function(object) {
s2 <- sigma(object)^2
Lamt <- getME(object,"Lambdat")
L <- getME(object,"L")
## never do it this way! fortune("SOOOO")
#V <- solve(L, system = "A")
#V <- chol2inv(L)
#s2*crossprod(Lamt, V) %*% Lamt
LL <- solve(L, Lamt, system = "A")
s2 * crossprod(Lamt, LL)
}
mkMinimalData <- function(formula) {
vars <- all.vars(formula)
nVars <- length(vars)
matr <- matrix(0, 2, nVars)
data <- as.data.frame(matr)
setNames(data, vars)
}
##' Make template for mixed model parameters
mkParsTemplate <- function(formula, data){
if(missing(data)) data <- mkMinimalData(formula)
mfRanef <- model.frame( subbars(formula), data)
mmFixef <- model.matrix(nobars(formula) , data)
reTrms <- mkReTrms(findbars(formula), mfRanef)
cnms <- reTrms$cnms
thetaNamesList <- mapply(mkPfun(), names(cnms), cnms)
thetaNames <- unlist(thetaNamesList)
betaNames <- colnames(mmFixef)
list(beta = setNames(numeric(length( betaNames)), betaNames),
theta = setNames(reTrms$theta, thetaNames),
sigma = 1)
}
##' Make template for mixed model data
##'
##' Useful for simulating balanced designs and for
##' getting started on unbalanced simulations
##'
##' @param formula formula
##' @param data data -- not necessary
##' @param nGrps number of groups per grouping factor
##' @param rfunc function for generating covariate data
##' @param ... additional parameters for rfunc
mkDataTemplate <- function(formula, data,
nGrps = 2, nPerGrp = 1,
rfunc = NULL, ...){
if(missing(data)) data <- mkMinimalData(formula)
grpFacNames <- unique(barnames(findbars(formula)))
varNames <- all.vars(formula)
covariateNames <- setdiff(varNames, grpFacNames)
nGrpFac <- length(grpFacNames)
nCov <- length(covariateNames)
grpFac <- gl(nGrps, nPerGrp)
grpDat <- expand.grid(replicate(nGrpFac, grpFac, simplify = FALSE))
colnames(grpDat) <- grpFacNames
nObs <- nrow(grpDat)
if(is.null(rfunc)) rfunc <- function(n, ...) rep(0, n)
params <- c(list(nObs), list(...))
covDat <- as.data.frame(replicate(nCov, do.call(rfunc, params),
simplify = FALSE))
colnames(covDat) <- covariateNames
cbind(grpDat, covDat)
}
| 33,475 | gpl-2.0 |
ba8818e94cff901dd5fa651e2a9b6388c267492d | jackwasey/icd | R/who.R | # TODO: only download (and cache) WHO data as needed, rather than forcing user to wait minutes to download everything on first use.
#' Functions to get the WHO ICD-10 English 2016 and French 2008 data
#' @param resource Fragment of URL with specific ICD-10 resource requested
#' @param year Four-digit year as integer or character
#' @template lang
#' @return
#' \code{.dl_icd10who_memoise} returns the JSON data, or fails with NULL
#' @keywords internal datasets
#' @noRd
.dl_icd10who_memoise <- function(resource,
year = 2016,
lang = "en") {
# WHO changed the URL from https://apps.who.int/classifications to
# https://icd.who.int/browse10 . Nothing complicated: I set this (if unset) in
# zzz.R on package load. If there is another change, the user can update this
# with a package update.
# memoise package has given me problems and crashes. DIY
mem_file_name <- paste(
"WHO", year, lang,
gsub("JsonGetChildrenConcepts\\?ConceptId=|(&|\\?)useHtml=false", "", resource),
"json",
sep = "."
)
mem_dir <- file.path(get_icd_data_dir(), "memoise")
dir.create(mem_dir, showWarnings = FALSE)
mem_path <- file.path(mem_dir, mem_file_name)
if (file.exists(mem_path)) {
.trc(paste(
"Have memoised data for ", year, lang, resource,
"from", mem_path
))
readRDS(mem_path)
} else {
res <- .dl_icd10who_json(year, lang, resource)
.trc(paste(
"Saving memoised data for ", year, lang, resource,
"in", mem_path
))
saveRDS(res, mem_path, version = 2)
res
}
}
.dl_icd10who_json <- function(year, lang, resource) {
json_url <- paste(
getOption("icd.who_url"),
year,
lang,
resource,
sep = "/"
)
if (.offline() && !.interact()) {
msg <- "Offline and not interactive, so not attempting WHO data download."
.absent_action_switch(msg)
return(NULL)
}
.msg("Getting WHO data with JSON: ", json_url)
http_response <- httr::RETRY("GET", json_url)
if (hs <- http_response$status_code >= 400) {
.msg("trying once more")
http_response <- httr::RETRY("GET", json_url)
if (hs <- http_response$status_code >= 400) {
stop(
"Unable to fetch resource: ", json_url,
" with HTTP status, ", hs, ". Check your internet connection, ",
"retry later, then file an issue at: ",
"https://github.com/jackwasey/icd/issues ."
)
}
} # end 400+
json_data <- httr::content(http_response, simplifyDataFrame=TRUE)
jsonlite::fromJSON(json_data)
}
#' Use WHO API to discover chapters
#'
#' Of note, the \code{WHO} package does not provide access to classifications, just
#' WHO summary data.
#' @keywords internal
#' @noRd
.dl_icd10who_chapter_names <- function(ver = "icd10",
year = 2016,
lang = "en") {
.dl_icd10who_children(
ver = ver,
year = year,
lang = lang
)[["label"]]
}
#' Get the children of a concept (ICD-10 chapter, code or range)
#' @param concept_id \code{NULL} for root, concept string for any leaf or
#' intermediate.
#' @examples
#' .dl_icd10who_children("XXII")
#' .dl_icd10who_children("U84")
#' # U85 is a leaf node, returns no children as empty list
#' .dl_icd10who_children("U82-U85")
#' # https://icd.who.int/browse10/2016/en#/U85
#' .dl_icd10who_children("U85")
#' # https://icd.who.int/browse10/2016/en#/P90
#' .dl_icd10who_children("P90-P96")
#' .dl_icd10who_children("P90")
#' @keywords internal
#' @noRd
.dl_icd10who_children <- function(concept_id = NULL, ...) {
resource <- if (is.null(concept_id)) {
"JsonGetRootConcepts?useHtml=false"
} else {
paste0(
"JsonGetChildrenConcepts?ConceptId=",
concept_id,
"&useHtml=false"
)
}
.dl_icd10who_memoise(resource = resource, ...)
}
#' Use public interface to fetch ICD-10 WHO data for a given version
#'
#' The user may call this function to install the full WHO ICD-10 definition on
#' their machine, after which it will be available to \CRANpkg{icd}.
#' @param concept_id This is the id for the code or code group, e.g.,
#' \sQuote{XI} (Chapter 6), \sQuote{T90--T98} (A sub-chapter), \sQuote{E01} (A
#' sub-sub-chapter). You cannot query a single code with this interface.
#' @param year integer 4-digit year
#' @param lang Currently it seems only 'en' works
#' @param ... further arguments passed to self recursively, or
#' \code{.dl_icd10who_memoise}
#' @examples
#' \dontrun{
#' .dl_icd10who_walk(year = 2016, lang = "en", concept_id = "B20-B24")
#' }
#' @keywords internal
#' @noRd
.dl_icd10who_walk <- function(concept_id = NULL,
year = 2016,
lang = "en",
hier_code = character(),
hier_desc = character(),
...) {
.dbg(
".dl_icd10who_memoise with concept_id = ",
ifelse(is.null(concept_id), "NULL", concept_id)
)
.dbg(paste(hier_code, collapse = " -> "))
if (.offline()) {
.msg("Returning NULL because offline")
return()
}
tree_json <- .dl_icd10who_children(
concept_id = concept_id,
year = year,
lang = lang,
...
)
if (is.null(tree_json)) {
warning(
"Unable to retrieve results for concept_id: ", concept_id,
"so returning NULL. Try re-running the command."
)
return()
}
.dbg("hier level = ", length(hier_code))
new_hier <- length(hier_code) + 1
# parallel mclapply is about 2-3x as fast, but may get throttled for multiple
# connections, and error handling and debugging is much harder.
all_new_rows <- lapply(
seq_len(nrow(tree_json)),
function(branch) {
new_rows <- data.frame(
code = character(),
leaf = logical(),
desc = character(),
three_digit = character(),
major = character(),
sub_sub_chapter = character(),
sub_chapter = character(),
chapter = character()
)
# might be looping through chapters, sub-chapters, etc.
child_code <- tree_json[branch, "ID"]
child_desc <- tree_json[branch, "label"]
is_leaf <- tree_json[branch, "isLeaf"]
# for each level, if not defined by arguments, then assign next possible
hier_code[new_hier] <- child_code
hier_desc[new_hier] <- child_desc
sub_sub_chapter <- NA
re_chap_or_sub_chap <- "(^[XVI]+$)|(^.+-.+$)"
hier_three_digit_idx <- which(nchar(hier_code) == 3 &
!grepl(re_chap_or_sub_chap, hier_code))
if (length(hier_code) >= 3 && nchar(hier_code[3]) > 3) {
sub_sub_chapter <- hier_desc[3]
}
this_child_up_hier <- grepl(re_chap_or_sub_chap, child_code)
three_digit <- hier_code[hier_three_digit_idx]
major <- hier_desc[hier_three_digit_idx]
if (!this_child_up_hier && !is.na(three_digit)) {
new_item <- data.frame(
code = child_code,
leaf = is_leaf,
desc = child_desc,
three_digit = three_digit,
major = major,
sub_sub_chapter = sub_sub_chapter,
sub_chapter = hier_desc[2],
chapter = hier_desc[1],
stringsAsFactors = FALSE
)
stopifnot(child_code %nin% new_rows$code)
new_rows <- rbind(new_rows, new_item)
}
if (!is_leaf) {
.dbg(
paste(new_rows$code, collapse = ", "),
" not a leaf, so recursing"
)
recursed_rows <- .dl_icd10who_walk(
concept_id = child_code,
year = year,
lang = lang,
hier_code = hier_code,
hier_desc = hier_desc,
...
)
stopifnot(!any(recursed_rows$code %in% new_rows$code))
new_rows <- rbind(new_rows, recursed_rows)
} # not leaf
new_rows
}
) # lapply loop
if (.verbose() > 1) {
.dbg(
"leaving recursion with length(all_new_rows) = ",
length(all_new_rows)
)
if (length(all_new_rows$code)) {
.trc(paste(all_new_rows$code, collapse = ", "), print = TRUE)
}
}
# just return the rows (we are recursing so can't save anything in this
# function). Parser can do this.
if (!all(vapply(all_new_rows, is.data.frame, logical(1))) ||
!all(vapply(all_new_rows, ncol, integer(1)) == ncol(all_new_rows[[1]]))
) {
stop(
"Error when downloading WHO ICD data. ",
"(Concept ID = ", concept_id, ") ",
"This may be a temporary download failure. Please re-try the command."
)
}
do.call(rbind, all_new_rows)
}
.dl_icd10who_finalize <- function(dat, year, lang) {
dat[["code"]] <- sub(pattern = "\\.", replacement = "", x = dat[["code"]])
for (col_name in c(
"chapter",
"sub_chapter",
"sub_sub_chapter",
"major",
"desc"
)) {
dat[[col_name]] <- sub("[^ ]+ ", "", dat[[col_name]])
}
# First, if three digit doesn't match code, then drop the row, as these are
# incorrectly assimilated rows.
thr <- get_major.icd10(dat$code)
dat <- dat[dat$three_digit == thr, ]
dat$three_digit <- factor_sorted_levels(as.icd10who(dat$three_digit))
# Then I think any remaining rows are plain duplicates
dat <- dat[!duplicated(dat$code), ]
dat <- dat[order(dat$code), ]
rownames(dat) <- NULL
var_name <- paste0("icd10who", year, ifelse(lang == "en", "", lang))
dat$code <- as.icd10who(dat$code)
.save_in_cache(var_name, x = dat)
invisible(dat)
}
.parse_icd10who2016 <- function(...) {
if (!.confirm_download()) {
return()
}
.dl_icd10who_finalize(
.dl_icd10who_walk(year = 2016, lang = "en", ...),
2016, "en"
)
}
.parse_icd10who2008fr <- function(...) {
if (!.confirm_download()) {
return()
}
.dl_icd10who_finalize(
.dl_icd10who_walk(year = 2008, lang = "fr", ...),
2008,
"fr"
)
}
.downloading_who_message <- function() {
message(paste(
"Downloading or parsing cached WHO ICD data.",
"This may take a few minutes.",
"Data is cached, so repeating the command will return immediately,",
"or finish caching, then return."
))
}
| 10,104 | gpl-3.0 |
d0e8a92d7271387a234583b2ef181bc631558a53 | kyoren/https-github.com-h2oai-h2o-3 | h2o-r/tests/testdir_algos/glm/runit_NOPASS_GLM_forloop_attack_medium.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../../h2o-runit.R')
# Constants and setters
bools <- c(TRUE, FALSE)
set_x <- function(cols) {
if(sample(bools,1)) {
while (TRUE){
myX <- cols
for(i in 1:length(cols))
if (sample(bools, 1))
myX <- myX[-i]
if(length(myX) > 0)
break
}
return(myX)
} else
cols
}
set_y <- function(col) return(col)
set_training_frame <- function(frame) return(frame)
set_validation_frame <- function(frame) return(frame)
set_max_iterations <- function() sample.int(50,1)
set_beta_epsilon <- function() runif(1)
set_solver <- function() sample(c("AUTO", "IRLSM", "L_BFGS", "COORDINATE_DESCENT_NAIVE", "COORDINATE_DESCENT"),1)
set_standardize <- function() sample(bools,1)
set_family <- function(family) return(family)
set_link <- function(family) {
if(identical(family, "gaussian"))
return(sample(c("identity", "log", "inverse"), 1))
else if (identical(family, "binomial"))
return("logit")
else if (identical(family, "poisson"))
return(sample(c("log", "identity"),1 ))
else if (identical(family, "gamma"))
return(sample(c("identity", "log", "inverse"),1))
}
set_tweedie_variance_power <- function() {}
set_tweedie_link_power <- function() {}
set_alpha <- function() runif(1)
set_prior <- function() runif(1)
set_lambda <- function() {}
set_lambda_search <- function() sample(bools,1)
set_nlambdas <- function() sample(2:10,1)
set_lambda_min_ratio <- function() {}
set_beta_constraints <- function(standardize, cols, frame, ignored) {
name <- list()
lower_bound <- list()
upper_bound <- list()
if (!is.null(ignored) && any(colnames(frame)[cols] %in% ignored))
cols <- cols[-which(colnames(frame)[cols] %in% ignored)]
for (n in cols) {
# If enum column => create Colname.Class
if (is.factor(frame[,n])) {
# (standardize == T) => (use_all_factor_levels == T) => all factors acceptable
if(is.null(standardize) || standardize)
enums <- paste(names(frame)[n],h2o.levels(frame, n), sep = ".")
# (standardize == F) => (use_all_factor_levels == F) => first factor dropped
else
enums <- paste(names(frame)[n],h2o.levels(frame, n), sep = ".")[-1]
name <- c(name, enums)
for(e in enums) {
l <- runif(1,-1,1)
u <- runif(1) + l
lower_bound <- c(lower_bound, l)
upper_bound <- c(upper_bound, u)
}
} else {
name <- c(name, names(frame)[n])
l <- runif(1,-1,1)
u <- runif(1) + l
lower_bound <- c(lower_bound, l)
upper_bound <- c(upper_bound, u)
}
}
return(data.frame(names = unlist(name),
lower_bounds = unlist(lower_bound),
upper_bounds = unlist(upper_bound)))
}
set_offset_column <- function(cols, frame)
while(1) {
val <- sample(names(frame)[cols], 1)
if(!is.factor(frame[,val]))
return(val)
}
set_weights_column <- function(col) return("weights")
randomParams <- function(family, train, test, x, y) {
parms <- list()
parm_set <- function(parm, required = FALSE, dep = TRUE, ...) {
if (!dep)
return(NULL)
if (required || sample(bools,1)) {
val <- do.call(paste0("set_", parm), list(...))
if (!is.null(val))
if (identical(val, "weights")) {
Log.info(paste0(sub("_", " ", parm), ":"))
print(weights.train)
} else if (is.vector(val)) {
Log.info(paste0(sub("_", " ", parm), ": ",val))
} else if (class(val) == "Frame") {
Log.info(paste0(sub("_", " ", parm), ": "))
} else if (inherits(val, "data.frame")) {
Log.info(paste0(sub("_", " ", parm), ": "))
print(val)
} else {
Log.info(paste0(sub("_", " ", parm), ": ",val)) }
return(val)
}
return(NULL)
}
weights.train <- runif(nrow(train), min = 0, max = 10)
weights.test <- runif(nrow(test), min = 0, max = 10)
train$weights <- as.h2o(weights.train)
test$weights <- as.h2o(weights.test)
parms$x <- parm_set("x", required = TRUE, cols = x)
parms$y <- parm_set("y", required = TRUE, col = y)
parms$family <- parm_set("family", family = family, required = TRUE)
parms$training_frame <- parm_set("training_frame", required = TRUE, frame = train)
parms$validation_frame <- parm_set("validation_frame", frame = test)
parms$max_iterations <- parm_set("max_iterations")
parms$beta_epsilon <- parm_set("beta_epsilon")
parms$solver <- parm_set("solver")
parms$standardize <- parm_set("standardize")
parms$link <- parm_set("link", family = family)
# parms$tweedie_variance_power <- parm_set("tweedie_variance_power")
# parms$tweedie_link_power <- parm_set("tweedie_link_power")
parms$alpha <- parm_set("alpha")
parms$prior <- parm_set("prior", dep = identical(family, "binomial"))
# parms$lambda <- parm_set("lambda")
parms$lambda_search <- parm_set("lambda_search")
parms$nlambdas <- parm_set("nlambdas", dep = !is.null(parms$lambda_search) && parms$lambda_search)
# parms$lambda_min_ratio <- parm_set("lambda_min_ratio")
#parms$offset_column <- parm_set("offset_column", cols = x, frame = train)
#parms$weights_column <- parm_set("weights_column")
parms$beta_constraints <- parm_set("beta_constraints", standardize = parms$standardize,
cols = parms$x, frame = train, ignored = parms$offset_column)
t <- system.time(hh <- do.call("h2o.glm", parms))
print(hh)
print("#########################################################################################")
print("")
print(t)
print("")
}
test.glm.rand_attk_forloop <- function() {
Log.info("Import and data munging...")
pros.hex <- h2o.uploadFile(locate("smalldata/prostate/prostate.csv"))
pros.hex[,2] <- as.factor(pros.hex[,2])
pros.hex[,4] <- as.factor(pros.hex[,4])
pros.hex[,5] <- as.factor(pros.hex[,5])
pros.hex[,6] <- as.factor(pros.hex[,6])
pros.hex[,9] <- as.factor(pros.hex[,9])
p.sid <- h2o.runif(pros.hex)
pros.train <- h2o.assign(pros.hex[p.sid > .2, ], "pros.train")
pros.test <- h2o.assign(pros.hex[p.sid <= .2, ], "pros.test")
cars.hex <- h2o.uploadFile(locate("smalldata/junit/cars.csv"))
cars.hex[,3] <- as.factor(cars.hex[,3])
c.sid <- h2o.runif(cars.hex)
cars.train <- h2o.assign(cars.hex[c.sid > .2, ], "cars.train")
cars.test <- h2o.assign(cars.hex[c.sid <= .2, ], "cars.test")
Log.info("### Binomial ###")
for(i in 1:10)
randomParams("binomial", pros.train, pros.test, 3:9, 2)
Log.info("### Gaussian ###")
for(i in 1:10)
randomParams("gaussian", cars.train, cars.test, 3:7, 2)
Log.info("### Poisson ###")
for(i in 1:10)
randomParams("poisson", cars.train, cars.test, 3:7, 2)
Log.info("### Gamma ###")
for(i in 1:10)
randomParams("gamma", cars.train, cars.test, 3:7, 2)
}
doTest("Checking GLM in Random Attack For Loops", test.glm.rand_attk_forloop)
| 6,943 | apache-2.0 |
300efa20cd1230fd1e6f534413021473a44021c9 | caus-am/aci | R/utils/next_colex_comb.R | next_colex_comb<-function(x) {
#For a 0-1 vectors gives a next
#Can be used to quickly iterate over all subset of a given size.
#Just start with (1,1,1,….,1,0,…,0). In the end returns 0.
#The underlying mechanism to determine the successor is to determine the lowest block of ones
#and move its highest bit one position up.
#The rest of the block is then moved to the low end of the word.
j=1;
for ( i in index(1,(length(x)-1)) ) {
if ( x[i] == 1 ) {
if ( x[i + 1] == 0 ) {
x[i]<-0;x[i+1]=1;
return(x); #switch bit to left
} else {
x[i]<-0;x[j]=1;j<-j+1;
}
}
}
return(NA)
} | 617 | bsd-2-clause |
329b1c1c8c86b8530af11a063810f68ee4841d66 | heggy231/R_Bootcamp_DataWeek_2015 | 8_-_Data_Manipulation.R |
# title: "8 - Data Manipulation"
author: "Joseph Rickert"
date: "September 28, 2015"
# In this script we will show some basic data wrangling.
## Fetch some data from Yahoo Finance
# Go to http://finance.yahoo.com/q/hp?s=IBM+Historical+Prices and copy the link to the table.
# Then read the data directly from the URL into an R data frame.
url <- "http://bit.ly/1NZKUFd"
IBM.stock <- read.table(url,header=TRUE,sep=",")
head(IBM.stock)
# Having taken the trouble to fetch the data from the web we will then show how to write it to disk and
# read it back into a different data frame.
write.csv(IBM.stock,file="IBM.stock.csv",row.names=FALSE)
IBM_too <- read.csv("IBM.stock.csv")
## Augment the data frame
# Here are two ways to add a new variable, Volatility, to a data frame, The first uses "$" to index into the data frame,
# The second uses the within function
IBM.stock$Volatility<- (IBM.stock$High - IBM.stock$Low)/IBM.stock$Open
head(IBM.stock)
# Alternative using within
IBM.stock2 <- within(IBM.stock,{Volatility = (High - Low)/Open})
head(IBM.stock2)
## Prune the data frame
# We show two was to prune the data frame so that it only include prices after January 1, 2000.
# Note that both methods use row, column indexing into the data frame IBM.stock[row,column].
# The first thing we do is check to see what data types the varables are. Noticing that Date is a factor, we will
# make it a date as we build a new data frame.
sapply(IBM.stock,class) # Note that Date is a factor
IBM.stock.2000 <- IBM.stock[as.Date(IBM.stock$Date) > as.Date('2000-01-01'),]
head(IBM.stock.2000)
tail(IBM.stock.2000)
# The second method uses the which() function. First, we build a simple example to show how which() works.
xx <- 1:10
which(xx > 5)
#which(as.Date(IBM.stock$Date) > as.Date('2000-01-01'))
IBM.stock2.2000 <- IBM.stock[which(as.Date(IBM.stock$Date) > as.Date('2000-01-01')),]
tail(IBM.stock2.2000)
## Aggregate data
# Here we will aggregate daily observations to form a monthly series.
# First we will create new year and month variables by extracting the relevant information from the Date variable
IBM.stock.2000$Month <- substr(IBM.stock.2000$Date,6,7) # Add variable Month to data frame
IBM.stock.2000$Year <- substr(IBM.stock.2000$Date,1,4) # Add variable Year to the data frame
head(IBM.stock.2000)
# Make a new data frame to hold the aggregated monthly prices.
IBM.stock.month <- aggregate(.~Month+Year,data=IBM.stock.2000,mean) # The dot in the formula stands for everything
head(IBM.stock.month)
# Make a date variable in IBM.stock.month and assign everything to the first of the month
IBM.stock.month$Date <- as.Date(paste(IBM.stock.month$Year,IBM.stock.month$Month,'01',sep='-'))
head(IBM.stock.month)
# Now sort the data frame to get the latest data first
IBM.stock.month <- IBM.stock.month[with(IBM.stock.month,order(-as.integer(Year),Month)),]
head(IBM.stock.month)
## Merge data
# We will merge the the data frame containing the aggregated monthly stock prices from the year 2000
# to the present with a new data frame containing dividend data. First we get the dividend data.
url2 <- "http://ichart.finance.yahoo.com/table.csv?s=IBM&a=00&b=2&c=1962&d=11&e=22&f=2011&g=v&ignore=.csv"
IBM.div <- read.table(url2,header=TRUE,sep=",")
#write.csv(IBM.div,"IBM.div.csv",row.names=FALSE)
head(IBM.div)
#
class(IBM.stock.month$Date)
class(IBM.div$Date)
```
# Make the IBM.div date into a proper date object
IBM.div$Date <- as.Date(IBM.div$Date)
class(IBM.div$Date)
# Next we write a function to Create a column for the merge picking the last dividend dispersed before the merge.
fcn <- function(x){
as.character(IBM.div$Date[min(which(IBM.div$Date < x))])
}
IBM.stock.month$divDate <- sapply(IBM.stock.month$Date,fcn)
IBM.stock.month$divDate <- as.Date(IBM.stock.month$divDate)
head(IBM.stock.month)
```
# Do the merge
IBM <- merge(IBM.stock.month,IBM.div,by.x='divDate',by.y='Date')
head(IBM)
class(IBM$divDate)
IBM1 <- IBM[order(-as.integer(as.Date(IBM$divDate))),]
head(IBM1)
```
## Is there an easirer way to merge?
# Lets try using the join function from the plyr package.
library(plyr)
head(IBM.stock.month)
head(IBM.div)
names(IBM.div)[1] <- "divDate"
#
IBM2 <- join(IBM.stock.month,IBM.div,by='divDate')
head(IBM2)
```
# Sort both data frames to compare them.
IBMs <- IBM[order(-as.integer(as.Date(IBM$Date))),]
head(IBMs)
IBM2s <- IBM2[order(-as.integer(as.Date(IBM2$Date))),]
head(IBM2s)
## Comparing data frames
Have a look at the vignette for the compare package
http://cran.r-project.org/web/packages/compare/vignettes/compare-intro.pdf
library(compare)
comparison <- compare(IBMs,IBM2s,allowAll=TRUE)
comparison$result
## Reshaping a data frame
# Finaly, let's look at using the reshape function to make "wide" and "long" versions of the data set.
IBM.wide <- reshape(IBM[,c("Year","Month","Close")],idvar="Year",timevar="Month",direction="wide")
IBM.wide
IBM.long <- reshape(IBM.wide,idvar="Year",timevar="Month",direction="long")
head(IBM.long, n=20)
# The dplyer Package
# So far, we have been looking mostly at base R data manipulation techniques.
# But for the past year or so "state-of-the-art"" data wrangling with R is accomplished through Hadley Wichham's dplyr package. Let's look at Hadly's tutorial in his online vignette.
# http://cran.rstudio.com/web/packages/dplyr/vignettes/introduction.html
| 5,394 | mit |
329b1c1c8c86b8530af11a063810f68ee4841d66 | joseph-rickert/R_Bootcamp_DataWeek_2015 | 8_-_Data_Manipulation.R |
# title: "8 - Data Manipulation"
author: "Joseph Rickert"
date: "September 28, 2015"
# In this script we will show some basic data wrangling.
## Fetch some data from Yahoo Finance
# Go to http://finance.yahoo.com/q/hp?s=IBM+Historical+Prices and copy the link to the table.
# Then read the data directly from the URL into an R data frame.
url <- "http://bit.ly/1NZKUFd"
IBM.stock <- read.table(url,header=TRUE,sep=",")
head(IBM.stock)
# Having taken the trouble to fetch the data from the web we will then show how to write it to disk and
# read it back into a different data frame.
write.csv(IBM.stock,file="IBM.stock.csv",row.names=FALSE)
IBM_too <- read.csv("IBM.stock.csv")
## Augment the data frame
# Here are two ways to add a new variable, Volatility, to a data frame, The first uses "$" to index into the data frame,
# The second uses the within function
IBM.stock$Volatility<- (IBM.stock$High - IBM.stock$Low)/IBM.stock$Open
head(IBM.stock)
# Alternative using within
IBM.stock2 <- within(IBM.stock,{Volatility = (High - Low)/Open})
head(IBM.stock2)
## Prune the data frame
# We show two was to prune the data frame so that it only include prices after January 1, 2000.
# Note that both methods use row, column indexing into the data frame IBM.stock[row,column].
# The first thing we do is check to see what data types the varables are. Noticing that Date is a factor, we will
# make it a date as we build a new data frame.
sapply(IBM.stock,class) # Note that Date is a factor
IBM.stock.2000 <- IBM.stock[as.Date(IBM.stock$Date) > as.Date('2000-01-01'),]
head(IBM.stock.2000)
tail(IBM.stock.2000)
# The second method uses the which() function. First, we build a simple example to show how which() works.
xx <- 1:10
which(xx > 5)
#which(as.Date(IBM.stock$Date) > as.Date('2000-01-01'))
IBM.stock2.2000 <- IBM.stock[which(as.Date(IBM.stock$Date) > as.Date('2000-01-01')),]
tail(IBM.stock2.2000)
## Aggregate data
# Here we will aggregate daily observations to form a monthly series.
# First we will create new year and month variables by extracting the relevant information from the Date variable
IBM.stock.2000$Month <- substr(IBM.stock.2000$Date,6,7) # Add variable Month to data frame
IBM.stock.2000$Year <- substr(IBM.stock.2000$Date,1,4) # Add variable Year to the data frame
head(IBM.stock.2000)
# Make a new data frame to hold the aggregated monthly prices.
IBM.stock.month <- aggregate(.~Month+Year,data=IBM.stock.2000,mean) # The dot in the formula stands for everything
head(IBM.stock.month)
# Make a date variable in IBM.stock.month and assign everything to the first of the month
IBM.stock.month$Date <- as.Date(paste(IBM.stock.month$Year,IBM.stock.month$Month,'01',sep='-'))
head(IBM.stock.month)
# Now sort the data frame to get the latest data first
IBM.stock.month <- IBM.stock.month[with(IBM.stock.month,order(-as.integer(Year),Month)),]
head(IBM.stock.month)
## Merge data
# We will merge the the data frame containing the aggregated monthly stock prices from the year 2000
# to the present with a new data frame containing dividend data. First we get the dividend data.
url2 <- "http://ichart.finance.yahoo.com/table.csv?s=IBM&a=00&b=2&c=1962&d=11&e=22&f=2011&g=v&ignore=.csv"
IBM.div <- read.table(url2,header=TRUE,sep=",")
#write.csv(IBM.div,"IBM.div.csv",row.names=FALSE)
head(IBM.div)
#
class(IBM.stock.month$Date)
class(IBM.div$Date)
```
# Make the IBM.div date into a proper date object
IBM.div$Date <- as.Date(IBM.div$Date)
class(IBM.div$Date)
# Next we write a function to Create a column for the merge picking the last dividend dispersed before the merge.
fcn <- function(x){
as.character(IBM.div$Date[min(which(IBM.div$Date < x))])
}
IBM.stock.month$divDate <- sapply(IBM.stock.month$Date,fcn)
IBM.stock.month$divDate <- as.Date(IBM.stock.month$divDate)
head(IBM.stock.month)
```
# Do the merge
IBM <- merge(IBM.stock.month,IBM.div,by.x='divDate',by.y='Date')
head(IBM)
class(IBM$divDate)
IBM1 <- IBM[order(-as.integer(as.Date(IBM$divDate))),]
head(IBM1)
```
## Is there an easirer way to merge?
# Lets try using the join function from the plyr package.
library(plyr)
head(IBM.stock.month)
head(IBM.div)
names(IBM.div)[1] <- "divDate"
#
IBM2 <- join(IBM.stock.month,IBM.div,by='divDate')
head(IBM2)
```
# Sort both data frames to compare them.
IBMs <- IBM[order(-as.integer(as.Date(IBM$Date))),]
head(IBMs)
IBM2s <- IBM2[order(-as.integer(as.Date(IBM2$Date))),]
head(IBM2s)
## Comparing data frames
Have a look at the vignette for the compare package
http://cran.r-project.org/web/packages/compare/vignettes/compare-intro.pdf
library(compare)
comparison <- compare(IBMs,IBM2s,allowAll=TRUE)
comparison$result
## Reshaping a data frame
# Finaly, let's look at using the reshape function to make "wide" and "long" versions of the data set.
IBM.wide <- reshape(IBM[,c("Year","Month","Close")],idvar="Year",timevar="Month",direction="wide")
IBM.wide
IBM.long <- reshape(IBM.wide,idvar="Year",timevar="Month",direction="long")
head(IBM.long, n=20)
# The dplyer Package
# So far, we have been looking mostly at base R data manipulation techniques.
# But for the past year or so "state-of-the-art"" data wrangling with R is accomplished through Hadley Wichham's dplyr package. Let's look at Hadly's tutorial in his online vignette.
# http://cran.rstudio.com/web/packages/dplyr/vignettes/introduction.html
| 5,394 | mit |
73ff14eebb0796f2b0551c5b691905cca074806c | illiHub/R | Misc/eventDetails.R | ###########################################
# SCRIPT START
###########################################
# set working directory
setwd(workdir1)
# import table
#j<-1
for (j in 1:length(subjects)){
file <- subjects[j]
mainmat_events <- read.table(file, header = T, sep = "\t", dec = ",",fill = FALSE, na.strings="NA", blank.lines.skip = FALSE)
mainmat_events<-mainmat_events[,c('RecordingTimestamp','GazePointIndex','DistanceLeft','ValidityLeft','DistanceRight','ValidityRight','GazePointX..ADCSpx.','GazePointY..ADCSpx.','StudioEvent','StudioEventData','MediaName')]
names(mainmat_events)<-c('Timestamp','Number','DistanceLeft','ValidityLeft','DistanceRight','ValidityRight','GazePointX','GazePointY','Event','Descriptor','StimuliName')
#create events matrix (for movie start, end and name)
event<-as.character(mainmat_events$Descriptor[mainmat_events$Event=="MovieStart"|mainmat_events$Event=="ImageStart"])
start<-mainmat_events$Timestamp[mainmat_events$Event=="MovieStart"|mainmat_events$Event=="ImageStart"]
end<-mainmat_events$Timestamp[mainmat_events$Event=="MovieEnd"|mainmat_events$Event=="ImageEnd"]
events<-data.frame(cbind(event,start,end))
events[,1]<-as.character(events[,1])
events[,2:3]<-apply(events[,2:3],2,as.numeric)
###########################################
# DECREASING MAIN MATRIX SIZE
###########################################
# starting an index for deleting irrelevent media
events[,4]<-0
events[events$event=="No media",4]<-1
for (d in 1:numintermedia){
events[events$event==intermedia[d],4]<-1
}
events<-subset(events,subset=V4!=1)
events<-events[,1:3]
# adding stimulus order number (which video comes before which!)
for (e in 1:nrow(events)){
mainmat_events[mainmat_events$Timestamp>=events[e,2] & mainmat_events$Timestamp<=events[e,3],"Number"]<-as.character(e)
}
events[,4]<-subject_names[j]
if (j==1){
eventsmat<-events
} else{
eventsmat<-rbind(eventsmat,events)
}
print(paste(subject_names[j],'-',j))
}
setwd(workdir2)
names(eventsmat)[4]<-"subject"
eventsmat$seg<-0 # remember to check trials that need segmentation
eventsfile<-(paste(experiment,"-event-info.txt", sep=""))
if(appending==0){write.table(eventsmat, eventsfile, quote=FALSE, sep="\t", na="NA",row.names=FALSE,col.names=TRUE)}else{write.table(eventsmat, eventsfile, quote=FALSE, sep="\t", na="NA",row.names=FALSE,col.names=FALSE,append=T)}
print('Getting event Details From Individual Files - Done!')
| 2,425 | mit |
6180ac829ef92cf962600e5271eaff80d35b6562 | personlin/GMPEhaz | R/Crustal_Common_Form.R | #' GMPE function for Crustal Common form 001 (2017)
#'
#' \code{Cru.Com.001} returns the ground-motion prediction with it sigma of Crustal Common form 001 GMPE.
#'
#'Crustal Common form 001
#'
#' @param Mag Earthquake momnet magnitude, Numeric.
#' @param Rrup Rupture distance(km), Numeric.
#' @param Rjb Joyner and Boore distance(km), Numeric.
#' @param Ztor Depth to the top of the finite rupture model (km).
#' @param ftype style of faulting.
#' @param Dip Dip angle of the fault plane.
#' @param rupwidth Down-dip rupture width (km).
#' @param Rx Horizontal distance(km) from top edge of rupture. Measured perpendicular to the fault strike.
#' @param hwflag hanging-wall flag, 1 for hanging-wall.
#' @param Prd Period of spectral acceleration.
#'
#' @return A list will be return, including mag, Rrup, Rjb, Ztor, ftype, dip, rupwidth, Rx,
#' hwflag, pecT, lnY, sigma, iflag.
#'
#' @examples
#' Cru.Com.001(6, 20, 20, 5, 0, 90, 20, 20, 0, 0)
#' Cru.Com.001(7, 20, 20, 2, 0, 90, 20, 20, 0, 0 )
#'
#' @export
Cru.Com.001 <- function(Mag, Rrup, Rjb, Ztor, ftype=0, Dip, rupwidth, Rx, hwflag=0, Prd){
# Subroutine S04_Crustal_Common001 ( m, Rrup, Rjb, ztor, ftype, dip, Width, Rx, HWFlag,
# specT, lnY, sigma, iflag )
if (Prd != 0 & (Prd < 0.01 | Prd > 2)) {
stop("Period out of range! \n\n")
}
retvals <- .Fortran("S04_Crustal_Common001", m=as.single(Mag), Rrup=as.single(Rrup), Rjb=as.single(Rjb),
ztor=as.single(Ztor), ftype=as.single(ftype), dip=as.single(Dip), Width=as.single(rupwidth),
Rx=as.single(Rx), HWFlag=as.integer(hwflag), specT=as.single(Prd),
lnY=as.single(0.1), sigma=as.single(0.1), iflag=as.integer(0))
names(retvals) <- c("mag", "Rrup", "Rjb", "Ztor", "ftype", "dip", "rupwidth", "Rx", "hwflag",
"specT", "lnY", "sigma", "iflag")
return(retvals)
}
#' GMPE function for Crustal Common form 002 (2017)
#'
#' \code{Cru.Com.002} returns the ground-motion prediction with it sigma of Crustal Common form 002 GMPE.
#'
#'Crustal Common form 002
#'
#' @param Mag Earthquake momnet magnitude, Numeric.
#' @param Rrup Rupture distance(km), Numeric.
#' @param Rjb Joyner and Boore distance(km), Numeric.
#' @param Ztor Depth to the top of the finite rupture model (km).
#' @param ftype style of faulting.
#' @param Dip Dip angle of the fault plane.
#' @param rupwidth Down-dip rupture width (km).
#' @param Rx Horizontal distance(km) from top edge of rupture. Measured perpendicular to the fault strike.
#' @param hwflag hanging-wall flag, 1 for hanging-wall.
#' @param Prd Period of spectral acceleration.
#'
#' @return A list will be return, including mag, Rrup, Rjb, Ztor, ftype, dip, rupwidth, Rx,
#' hwflag, pecT, lnY, sigma, iflag.
#'
#' @examples
#' Cru.Com.002(6, 20, 20, 5, 0, 90, 20, 20, 0, 0)
#' Cru.Com.002(7, 20, 20, 2, 0, 90, 20, 20, 0, 0 )
#'
#' @export
Cru.Com.002 <- function(Mag, Rrup, Rjb, Ztor, ftype=0, Dip, rupwidth, Rx, hwflag=0, Prd){
# Subroutine S04_Crustal_Common002 ( m, Rrup, Rjb, ztor, ftype, dip, Width, Rx, HWFlag,
# specT, lnY, sigma, iflag )
if (Prd != 0 & (Prd < 0.01 | Prd > 2)) {
stop("Period out of range! \n\n")
}
retvals <- .Fortran("S04_Crustal_Common002", m=as.single(Mag), Rrup=as.single(Rrup), Rjb=as.single(Rjb),
ztor=as.single(Ztor), ftype=as.single(ftype), dip=as.single(Dip), Width=as.single(rupwidth),
Rx=as.single(Rx), HWFlag=as.integer(hwflag), specT=as.single(Prd),
lnY=as.single(0.1), sigma=as.single(0.1), iflag=as.integer(0))
names(retvals) <- c("mag", "Rrup", "Rjb", "Ztor", "ftype", "dip", "rupwidth", "Rx", "hwflag",
"specT", "lnY", "sigma", "iflag")
return(retvals)
}
#' GMPE function for Crustal Common form 003 (2017)
#'
#' \code{Cru.Com.003} returns the ground-motion prediction with it sigma of Crustal Common form 003 GMPE.
#'
#'Crustal Common form 003
#'
#' @param Mag Earthquake momnet magnitude, Numeric.
#' @param Rrup Rupture distance(km), Numeric.
#' @param Rjb Joyner and Boore distance(km), Numeric.
#' @param Ztor Depth to the top of the finite rupture model (km).
#' @param ftype style of faulting.
#' @param Dip Dip angle of the fault plane.
#' @param rupwidth Down-dip rupture width (km).
#' @param Rx Horizontal distance(km) from top edge of rupture. Measured perpendicular to the fault strike.
#' @param hwflag hanging-wall flag, 1 for hanging-wall.
#' @param Prd Period of spectral acceleration.
#'
#' @return A list will be return, including mag, Rrup, Rjb, Ztor, ftype, dip, rupwidth, Rx,
#' hwflag, pecT, lnY, sigma, iflag.
#'
#' @examples
#' Cru.Com.003(6, 20, 20, 5, 0, 90, 20, 20, 0, 0)
#' Cru.Com.003(7, 20, 20, 2, 0, 90, 20, 20, 0, 0 )
#'
#' @export
Cru.Com.003 <- function(Mag, Rrup, Rjb, Ztor, ftype=0, Dip, rupwidth, Rx, hwflag=0, Prd){
# Subroutine S04_Crustal_Common003 ( m, Rrup, Rjb, ztor, ftype, dip, Width, Rx, HWFlag,
# specT, lnY, sigma, iflag )
if (Prd != 0 & (Prd < 0.01 | Prd > 2)) {
stop("Period out of range! \n\n")
}
retvals <- .Fortran("S04_Crustal_Common003", m=as.single(Mag), Rrup=as.single(Rrup), Rjb=as.single(Rjb),
ztor=as.single(Ztor), ftype=as.single(ftype), dip=as.single(Dip), Width=as.single(rupwidth),
Rx=as.single(Rx), HWFlag=as.integer(hwflag), specT=as.single(Prd),
lnY=as.single(0.1), sigma=as.single(0.1), iflag=as.integer(0))
names(retvals) <- c("mag", "Rrup", "Rjb", "Ztor", "ftype", "dip", "rupwidth", "Rx", "hwflag",
"specT", "lnY", "sigma", "iflag")
return(retvals)
}
#' GMPE function for Crustal Common form 004 (2017)
#'
#' \code{Cru.Com.004} returns the ground-motion prediction with it sigma of Crustal Common form 004 GMPE.
#'
#'Crustal Common form 004
#'
#' @param Mag Earthquake momnet magnitude, Numeric.
#' @param Rrup Rupture distance(km), Numeric.
#' @param Rjb Joyner and Boore distance(km), Numeric.
#' @param Ztor Depth to the top of the finite rupture model (km).
#' @param ftype style of faulting.
#' @param Dip Dip angle of the fault plane.
#' @param rupwidth Down-dip rupture width (km).
#' @param Rx Horizontal distance(km) from top edge of rupture. Measured perpendicular to the fault strike.
#' @param hwflag hanging-wall flag, 1 for hanging-wall.
#' @param Prd Period of spectral acceleration.
#'
#' @return A list will be return, including mag, Rrup, Rjb, Ztor, ftype, dip, rupwidth, Rx,
#' hwflag, pecT, lnY, sigma, iflag.
#'
#' @examples
#' Cru.Com.004(6, 20, 20, 5, 0, 90, 20, 20, 0, 0)
#' Cru.Com.004(7, 20, 20, 2, 0, 90, 20, 20, 0, 0 )
#'
#' @export
Cru.Com.004 <- function(Mag, Rrup, Rjb, Ztor, ftype=0, Dip, rupwidth, Rx, hwflag=0, Prd){
# Subroutine S04_Crustal_Common004 ( m, Rrup, Rjb, ztor, ftype, dip, Width, Rx, HWFlag,
# specT, lnY, sigma, iflag )
if (Prd != 0 & (Prd < 0.01 | Prd > 2)) {
stop("Period out of range! \n\n")
}
retvals <- .Fortran("S04_Crustal_Common004", m=as.single(Mag), Rrup=as.single(Rrup), Rjb=as.single(Rjb),
ztor=as.single(Ztor), ftype=as.single(ftype), dip=as.single(Dip), Width=as.single(rupwidth),
Rx=as.single(Rx), HWFlag=as.integer(hwflag), specT=as.single(Prd),
lnY=as.single(0.1), sigma=as.single(0.1), iflag=as.integer(0))
names(retvals) <- c("mag", "Rrup", "Rjb", "Ztor", "ftype", "dip", "rupwidth", "Rx", "hwflag",
"specT", "lnY", "sigma", "iflag")
return(retvals)
}
#' GMPE function for Crustal Common form 005 (2017)
#'
#' \code{Cru.Com.005} returns the ground-motion prediction with it sigma of Crustal Common form 005 GMPE.
#'
#'Crustal Common form 005
#'
#' @param Mag Earthquake momnet magnitude, Numeric.
#' @param Rrup Rupture distance(km), Numeric.
#' @param Rjb Joyner and Boore distance(km), Numeric.
#' @param Ztor Depth to the top of the finite rupture model (km).
#' @param ftype style of faulting.
#' @param Dip Dip angle of the fault plane.
#' @param rupwidth Down-dip rupture width (km).
#' @param Rx Horizontal distance(km) from top edge of rupture. Measured perpendicular to the fault strike.
#' @param hwflag hanging-wall flag, 1 for hanging-wall.
#' @param Prd Period of spectral acceleration.
#'
#' @return A list will be return, including mag, Rrup, Rjb, Ztor, ftype, dip, rupwidth, Rx,
#' hwflag, pecT, lnY, sigma, iflag.
#'
#' @examples
#' Cru.Com.005(6, 20, 20, 5, 0, 90, 20, 20, 0, 0)
#' Cru.Com.005(7, 20, 20, 2, 0, 90, 20, 20, 0, 0 )
#'
#' @export
Cru.Com.005 <- function(Mag, Rrup, Rjb, Ztor, ftype=0, Dip, rupwidth, Rx, hwflag=0, Prd){
# Subroutine S04_Crustal_Common005 ( m, Rrup, Rjb, ztor, ftype, dip, Width, Rx, HWFlag,
# specT, lnY, sigma, iflag )
if (Prd != 0 & (Prd < 0.01 | Prd > 2)) {
stop("Period out of range! \n\n")
}
retvals <- .Fortran("S04_Crustal_Common005", m=as.single(Mag), Rrup=as.single(Rrup), Rjb=as.single(Rjb),
ztor=as.single(Ztor), ftype=as.single(ftype), dip=as.single(Dip), Width=as.single(rupwidth),
Rx=as.single(Rx), HWFlag=as.integer(hwflag), specT=as.single(Prd),
lnY=as.single(0.1), sigma=as.single(0.1), iflag=as.integer(0))
names(retvals) <- c("mag", "Rrup", "Rjb", "Ztor", "ftype", "dip", "rupwidth", "Rx", "hwflag",
"specT", "lnY", "sigma", "iflag")
return(retvals)
}
#' GMPE function for Crustal Common form 006 (2017)
#'
#' \code{Cru.Com.006} returns the ground-motion prediction with it sigma of Crustal Common form 006 GMPE.
#'
#'Crustal Common form 006
#'
#' @param Mag Earthquake momnet magnitude, Numeric.
#' @param Rrup Rupture distance(km), Numeric.
#' @param Rjb Joyner and Boore distance(km), Numeric.
#' @param Ztor Depth to the top of the finite rupture model (km).
#' @param ftype style of faulting.
#' @param Dip Dip angle of the fault plane.
#' @param rupwidth Down-dip rupture width (km).
#' @param Rx Horizontal distance(km) from top edge of rupture. Measured perpendicular to the fault strike.
#' @param hwflag hanging-wall flag, 1 for hanging-wall.
#' @param Prd Period of spectral acceleration.
#'
#' @return A list will be return, including mag, Rrup, Rjb, Ztor, ftype, dip, rupwidth, Rx,
#' hwflag, pecT, lnY, sigma, iflag.
#'
#' @examples
#' Cru.Com.006(6, 20, 20, 5, 0, 90, 20, 20, 0, 0)
#' Cru.Com.006(7, 20, 20, 2, 0, 90, 20, 20, 0, 0 )
#'
#' @export
Cru.Com.006 <- function(Mag, Rrup, Rjb, Ztor, ftype=0, Dip, rupwidth, Rx, hwflag=0, Prd){
# Subroutine S04_Crustal_Common006 ( m, Rrup, Rjb, ztor, ftype, dip, Width, Rx, HWFlag,
# specT, lnY, sigma, iflag )
if (Prd != 0 & (Prd < 0.01 | Prd > 2)) {
stop("Period out of range! \n\n")
}
retvals <- .Fortran("S04_Crustal_Common006", m=as.single(Mag), Rrup=as.single(Rrup), Rjb=as.single(Rjb),
ztor=as.single(Ztor), ftype=as.single(ftype), dip=as.single(Dip), Width=as.single(rupwidth),
Rx=as.single(Rx), HWFlag=as.integer(hwflag), specT=as.single(Prd),
lnY=as.single(0.1), sigma=as.single(0.1), iflag=as.integer(0))
names(retvals) <- c("mag", "Rrup", "Rjb", "Ztor", "ftype", "dip", "rupwidth", "Rx", "hwflag",
"specT", "lnY", "sigma", "iflag")
return(retvals)
}
#' GMPE function for Crustal Common form 007 (2017)
#'
#' \code{Cru.Com.007} returns the ground-motion prediction with it sigma of Crustal Common form 007 GMPE.
#'
#'Crustal Common form 007
#'
#' @param Mag Earthquake momnet magnitude, Numeric.
#' @param Rrup Rupture distance(km), Numeric.
#' @param Rjb Joyner and Boore distance(km), Numeric.
#' @param Ztor Depth to the top of the finite rupture model (km).
#' @param ftype style of faulting.
#' @param Dip Dip angle of the fault plane.
#' @param rupwidth Down-dip rupture width (km).
#' @param Rx Horizontal distance(km) from top edge of rupture. Measured perpendicular to the fault strike.
#' @param hwflag hanging-wall flag, 1 for hanging-wall.
#' @param Prd Period of spectral acceleration.
#'
#' @return A list will be return, including mag, Rrup, Rjb, Ztor, ftype, dip, rupwidth, Rx,
#' hwflag, pecT, lnY, sigma, iflag.
#'
#' @examples
#' Cru.Com.007(6, 20, 20, 5, 0, 90, 20, 20, 0, 0)
#' Cru.Com.007(7, 20, 20, 2, 0, 90, 20, 20, 0, 0 )
#'
#' @export
Cru.Com.007 <- function(Mag, Rrup, Rjb, Ztor, ftype=0, Dip, rupwidth, Rx, hwflag=0, Prd){
# Subroutine S04_Crustal_Common007 ( m, Rrup, Rjb, ztor, ftype, dip, Width, Rx, HWFlag,
# specT, lnY, sigma, iflag )
if (Prd != 0 & (Prd < 0.01 | Prd > 2)) {
stop("Period out of range! \n\n")
}
retvals <- .Fortran("S04_Crustal_Common007", m=as.single(Mag), Rrup=as.single(Rrup), Rjb=as.single(Rjb),
ztor=as.single(Ztor), ftype=as.single(ftype), dip=as.single(Dip), Width=as.single(rupwidth),
Rx=as.single(Rx), HWFlag=as.integer(hwflag), specT=as.single(Prd),
lnY=as.single(0.1), sigma=as.single(0.1), iflag=as.integer(0))
names(retvals) <- c("mag", "Rrup", "Rjb", "Ztor", "ftype", "dip", "rupwidth", "Rx", "hwflag",
"specT", "lnY", "sigma", "iflag")
return(retvals)
}
#' GMPE function for Crustal Common form 008 (2017)
#'
#' \code{Cru.Com.008} returns the ground-motion prediction with it sigma of Crustal Common form 008 GMPE.
#'
#'Crustal Common form 008
#'
#' @param Mag Earthquake momnet magnitude, Numeric.
#' @param Rrup Rupture distance(km), Numeric.
#' @param Rjb Joyner and Boore distance(km), Numeric.
#' @param Ztor Depth to the top of the finite rupture model (km).
#' @param ftype style of faulting.
#' @param Dip Dip angle of the fault plane.
#' @param rupwidth Down-dip rupture width (km).
#' @param Rx Horizontal distance(km) from top edge of rupture. Measured perpendicular to the fault strike.
#' @param hwflag hanging-wall flag, 1 for hanging-wall.
#' @param Prd Period of spectral acceleration.
#'
#' @return A list will be return, including mag, Rrup, Rjb, Ztor, ftype, dip, rupwidth, Rx,
#' hwflag, pecT, lnY, sigma, iflag.
#'
#' @examples
#' Cru.Com.008(6, 20, 20, 5, 0, 90, 20, 20, 0, 0)
#' Cru.Com.008(7, 20, 20, 2, 0, 90, 20, 20, 0, 0 )
#'
#' @export
Cru.Com.008 <- function(Mag, Rrup, Rjb, Ztor, ftype=0, Dip, rupwidth, Rx, hwflag=0, Prd){
# Subroutine S04_Crustal_Common008 ( m, Rrup, Rjb, ztor, ftype, dip, Width, Rx, HWFlag,
# specT, lnY, sigma, iflag )
if (Prd != 0 & (Prd < 0.01 | Prd > 2)) {
stop("Period out of range! \n\n")
}
retvals <- .Fortran("S04_Crustal_Common008", m=as.single(Mag), Rrup=as.single(Rrup), Rjb=as.single(Rjb),
ztor=as.single(Ztor), ftype=as.single(ftype), dip=as.single(Dip), Width=as.single(rupwidth),
Rx=as.single(Rx), HWFlag=as.integer(hwflag), specT=as.single(Prd),
lnY=as.single(0.1), sigma=as.single(0.1), iflag=as.integer(0))
names(retvals) <- c("mag", "Rrup", "Rjb", "Ztor", "ftype", "dip", "rupwidth", "Rx", "hwflag",
"specT", "lnY", "sigma", "iflag")
return(retvals)
}
#' GMPE function for Crustal Common form 009 (2017)
#'
#' \code{Cru.Com.009} returns the ground-motion prediction with it sigma of Crustal Common form 009 GMPE.
#'
#'Crustal Common form 009
#'
#' @param Mag Earthquake momnet magnitude, Numeric.
#' @param Rrup Rupture distance(km), Numeric.
#' @param Rjb Joyner and Boore distance(km), Numeric.
#' @param Ztor Depth to the top of the finite rupture model (km).
#' @param ftype style of faulting.
#' @param Dip Dip angle of the fault plane.
#' @param rupwidth Down-dip rupture width (km).
#' @param Rx Horizontal distance(km) from top edge of rupture. Measured perpendicular to the fault strike.
#' @param hwflag hanging-wall flag, 1 for hanging-wall.
#' @param Prd Period of spectral acceleration.
#'
#' @return A list will be return, including mag, Rrup, Rjb, Ztor, ftype, dip, rupwidth, Rx,
#' hwflag, pecT, lnY, sigma, iflag.
#'
#' @examples
#' Cru.Com.009(6, 20, 20, 5, 0, 90, 20, 20, 0, 0)
#' Cru.Com.009(7, 20, 20, 2, 0, 90, 20, 20, 0, 0 )
#'
#' @export
Cru.Com.009 <- function(Mag, Rrup, Rjb, Ztor, ftype=0, Dip, rupwidth, Rx, hwflag=0, Prd){
# Subroutine S04_Crustal_Common009 ( m, Rrup, Rjb, ztor, ftype, dip, Width, Rx, HWFlag,
# specT, lnY, sigma, iflag )
if (Prd != 0 & (Prd < 0.01 | Prd > 2)) {
stop("Period out of range! \n\n")
}
retvals <- .Fortran("S04_Crustal_Common009", m=as.single(Mag), Rrup=as.single(Rrup), Rjb=as.single(Rjb),
ztor=as.single(Ztor), ftype=as.single(ftype), dip=as.single(Dip), Width=as.single(rupwidth),
Rx=as.single(Rx), HWFlag=as.integer(hwflag), specT=as.single(Prd),
lnY=as.single(0.1), sigma=as.single(0.1), iflag=as.integer(0))
names(retvals) <- c("mag", "Rrup", "Rjb", "Ztor", "ftype", "dip", "rupwidth", "Rx", "hwflag",
"specT", "lnY", "sigma", "iflag")
return(retvals)
}
#' GMPE function for Crustal Common form 010 (2017)
#'
#' \code{Cru.Com.010} returns the ground-motion prediction with it sigma of Crustal Common form 010 GMPE.
#'
#'Crustal Common form 010
#'
#' @param Mag Earthquake momnet magnitude, Numeric.
#' @param Rrup Rupture distance(km), Numeric.
#' @param Rjb Joyner and Boore distance(km), Numeric.
#' @param Ztor Depth to the top of the finite rupture model (km).
#' @param ftype style of faulting.
#' @param Dip Dip angle of the fault plane.
#' @param rupwidth Down-dip rupture width (km).
#' @param Rx Horizontal distance(km) from top edge of rupture. Measured perpendicular to the fault strike.
#' @param hwflag hanging-wall flag, 1 for hanging-wall.
#' @param Prd Period of spectral acceleration.
#'
#' @return A list will be return, including mag, Rrup, Rjb, Ztor, ftype, dip, rupwidth, Rx,
#' hwflag, pecT, lnY, sigma, iflag.
#'
#' @examples
#' Cru.Com.010(6, 20, 20, 5, 0, 90, 20, 20, 0, 0)
#' Cru.Com.010(7, 20, 20, 2, 0, 90, 20, 20, 0, 0 )
#'
#' @export
Cru.Com.010 <- function(Mag, Rrup, Rjb, Ztor, ftype=0, Dip, rupwidth, Rx, hwflag=0, Prd){
# Subroutine S04_Crustal_Common010 ( m, Rrup, Rjb, ztor, ftype, dip, Width, Rx, HWFlag,
# specT, lnY, sigma, iflag )
if (Prd != 0 & (Prd < 0.01 | Prd > 2)) {
stop("Period out of range! \n\n")
}
retvals <- .Fortran("S04_Crustal_Common010", m=as.single(Mag), Rrup=as.single(Rrup), Rjb=as.single(Rjb),
ztor=as.single(Ztor), ftype=as.single(ftype), dip=as.single(Dip), Width=as.single(rupwidth),
Rx=as.single(Rx), HWFlag=as.integer(hwflag), specT=as.single(Prd),
lnY=as.single(0.1), sigma=as.single(0.1), iflag=as.integer(0))
names(retvals) <- c("mag", "Rrup", "Rjb", "Ztor", "ftype", "dip", "rupwidth", "Rx", "hwflag",
"specT", "lnY", "sigma", "iflag")
return(retvals)
}
#' GMPE function for Crustal Common form 011 (2017)
#'
#' \code{Cru.Com.011} returns the ground-motion prediction with it sigma of Crustal Common form 011 GMPE.
#'
#'Crustal Common form 011
#'
#' @param Mag Earthquake momnet magnitude, Numeric.
#' @param Rrup Rupture distance(km), Numeric.
#' @param Rjb Joyner and Boore distance(km), Numeric.
#' @param Ztor Depth to the top of the finite rupture model (km).
#' @param ftype style of faulting.
#' @param Dip Dip angle of the fault plane.
#' @param rupwidth Down-dip rupture width (km).
#' @param Rx Horizontal distance(km) from top edge of rupture. Measured perpendicular to the fault strike.
#' @param hwflag hanging-wall flag, 1 for hanging-wall.
#' @param Prd Period of spectral acceleration.
#'
#' @return A list will be return, including mag, Rrup, Rjb, Ztor, ftype, dip, rupwidth, Rx,
#' hwflag, pecT, lnY, sigma, iflag.
#'
#' @examples
#' Cru.Com.011(6, 20, 20, 5, 0, 90, 20, 20, 0, 0)
#' Cru.Com.011(7, 20, 20, 2, 0, 90, 20, 20, 0, 0 )
#'
#' @export
Cru.Com.011 <- function(Mag, Rrup, Rjb, Ztor, ftype=0, Dip, rupwidth, Rx, hwflag=0, Prd){
# Subroutine S04_Crustal_Common011 ( m, Rrup, Rjb, ztor, ftype, dip, Width, Rx, HWFlag,
# specT, lnY, sigma, iflag )
if (Prd != 0 & (Prd < 0.01 | Prd > 2)) {
stop("Period out of range! \n\n")
}
retvals <- .Fortran("S04_Crustal_Common011", m=as.single(Mag), Rrup=as.single(Rrup), Rjb=as.single(Rjb),
ztor=as.single(Ztor), ftype=as.single(ftype), dip=as.single(Dip), Width=as.single(rupwidth),
Rx=as.single(Rx), HWFlag=as.integer(hwflag), specT=as.single(Prd),
lnY=as.single(0.1), sigma=as.single(0.1), iflag=as.integer(0))
names(retvals) <- c("mag", "Rrup", "Rjb", "Ztor", "ftype", "dip", "rupwidth", "Rx", "hwflag",
"specT", "lnY", "sigma", "iflag")
return(retvals)
}
#' GMPE function for Crustal Common form 012 (2017)
#'
#' \code{Cru.Com.012} returns the ground-motion prediction with it sigma of Crustal Common form 012 GMPE.
#'
#'Crustal Common form 012
#'
#' @param Mag Earthquake momnet magnitude, Numeric.
#' @param Rrup Rupture distance(km), Numeric.
#' @param Rjb Joyner and Boore distance(km), Numeric.
#' @param Ztor Depth to the top of the finite rupture model (km).
#' @param ftype style of faulting.
#' @param Dip Dip angle of the fault plane.
#' @param rupwidth Down-dip rupture width (km).
#' @param Rx Horizontal distance(km) from top edge of rupture. Measured perpendicular to the fault strike.
#' @param hwflag hanging-wall flag, 1 for hanging-wall.
#' @param Prd Period of spectral acceleration.
#'
#' @return A list will be return, including mag, Rrup, Rjb, Ztor, ftype, dip, rupwidth, Rx,
#' hwflag, pecT, lnY, sigma, iflag.
#'
#' @examples
#' Cru.Com.012(6, 20, 20, 5, 0, 90, 20, 20, 0, 0)
#' Cru.Com.012(7, 20, 20, 2, 0, 90, 20, 20, 0, 0 )
#'
#' @export
Cru.Com.012 <- function(Mag, Rrup, Rjb, Ztor, ftype=0, Dip, rupwidth, Rx, hwflag=0, Prd){
# Subroutine S04_Crustal_Common012 ( m, Rrup, Rjb, ztor, ftype, dip, Width, Rx, HWFlag,
# specT, lnY, sigma, iflag )
if (Prd != 0 & (Prd < 0.01 | Prd > 2)) {
stop("Period out of range! \n\n")
}
retvals <- .Fortran("S04_Crustal_Common012", m=as.single(Mag), Rrup=as.single(Rrup), Rjb=as.single(Rjb),
ztor=as.single(Ztor), ftype=as.single(ftype), dip=as.single(Dip), Width=as.single(rupwidth),
Rx=as.single(Rx), HWFlag=as.integer(hwflag), specT=as.single(Prd),
lnY=as.single(0.1), sigma=as.single(0.1), iflag=as.integer(0))
names(retvals) <- c("mag", "Rrup", "Rjb", "Ztor", "ftype", "dip", "rupwidth", "Rx", "hwflag",
"specT", "lnY", "sigma", "iflag")
return(retvals)
}
#' GMPE function for Crustal Common form 013 (2017)
#'
#' \code{Cru.Com.013} returns the ground-motion prediction with it sigma of Crustal Common form 013 GMPE.
#'
#'Crustal Common form 013
#'
#' @param Mag Earthquake momnet magnitude, Numeric.
#' @param Rrup Rupture distance(km), Numeric.
#' @param Rjb Joyner and Boore distance(km), Numeric.
#' @param Ztor Depth to the top of the finite rupture model (km).
#' @param ftype style of faulting.
#' @param Dip Dip angle of the fault plane.
#' @param rupwidth Down-dip rupture width (km).
#' @param Rx Horizontal distance(km) from top edge of rupture. Measured perpendicular to the fault strike.
#' @param hwflag hanging-wall flag, 1 for hanging-wall.
#' @param Prd Period of spectral acceleration.
#'
#' @return A list will be return, including mag, Rrup, Rjb, Ztor, ftype, dip, rupwidth, Rx,
#' hwflag, pecT, lnY, sigma, iflag.
#'
#' @examples
#' Cru.Com.013(6, 20, 20, 5, 0, 90, 20, 20, 0, 0)
#' Cru.Com.013(7, 20, 20, 2, 0, 90, 20, 20, 0, 0 )
#'
#' @export
Cru.Com.013 <- function(Mag, Rrup, Rjb, Ztor, ftype=0, Dip, rupwidth, Rx, hwflag=0, Prd){
# Subroutine S04_Crustal_Common013 ( m, Rrup, Rjb, ztor, ftype, dip, Width, Rx, HWFlag,
# specT, lnY, sigma, iflag )
if (Prd != 0 & (Prd < 0.01 | Prd > 2)) {
stop("Period out of range! \n\n")
}
retvals <- .Fortran("S04_Crustal_Common013", m=as.single(Mag), Rrup=as.single(Rrup), Rjb=as.single(Rjb),
ztor=as.single(Ztor), ftype=as.single(ftype), dip=as.single(Dip), Width=as.single(rupwidth),
Rx=as.single(Rx), HWFlag=as.integer(hwflag), specT=as.single(Prd),
lnY=as.single(0.1), sigma=as.single(0.1), iflag=as.integer(0))
names(retvals) <- c("mag", "Rrup", "Rjb", "Ztor", "ftype", "dip", "rupwidth", "Rx", "hwflag",
"specT", "lnY", "sigma", "iflag")
return(retvals)
}
#' GMPE function for Crustal Common form 014 (2017)
#'
#' \code{Cru.Com.014} returns the ground-motion prediction with it sigma of Crustal Common form 014 GMPE.
#'
#'Crustal Common form 014
#'
#' @param Mag Earthquake momnet magnitude, Numeric.
#' @param Rrup Rupture distance(km), Numeric.
#' @param Rjb Joyner and Boore distance(km), Numeric.
#' @param Ztor Depth to the top of the finite rupture model (km).
#' @param ftype style of faulting.
#' @param Dip Dip angle of the fault plane.
#' @param rupwidth Down-dip rupture width (km).
#' @param Rx Horizontal distance(km) from top edge of rupture. Measured perpendicular to the fault strike.
#' @param hwflag hanging-wall flag, 1 for hanging-wall.
#' @param Prd Period of spectral acceleration.
#'
#' @return A list will be return, including mag, Rrup, Rjb, Ztor, ftype, dip, rupwidth, Rx,
#' hwflag, pecT, lnY, sigma, iflag.
#'
#' @examples
#' Cru.Com.014(6, 20, 20, 5, 0, 90, 20, 20, 0, 0)
#' Cru.Com.014(7, 20, 20, 2, 0, 90, 20, 20, 0, 0 )
#'
#' @export
Cru.Com.014 <- function(Mag, Rrup, Rjb, Ztor, ftype=0, Dip, rupwidth, Rx, hwflag=0, Prd){
# Subroutine S04_Crustal_Common014 ( m, Rrup, Rjb, ztor, ftype, dip, Width, Rx, HWFlag,
# specT, lnY, sigma, iflag )
if (Prd != 0 & (Prd < 0.01 | Prd > 2)) {
stop("Period out of range! \n\n")
}
retvals <- .Fortran("S04_Crustal_Common014", m=as.single(Mag), Rrup=as.single(Rrup), Rjb=as.single(Rjb),
ztor=as.single(Ztor), ftype=as.single(ftype), dip=as.single(Dip), Width=as.single(rupwidth),
Rx=as.single(Rx), HWFlag=as.integer(hwflag), specT=as.single(Prd),
lnY=as.single(0.1), sigma=as.single(0.1), iflag=as.integer(0))
names(retvals) <- c("mag", "Rrup", "Rjb", "Ztor", "ftype", "dip", "rupwidth", "Rx", "hwflag",
"specT", "lnY", "sigma", "iflag")
return(retvals)
}
#' GMPE function for Crustal Common form 015 (2017)
#'
#' \code{Cru.Com.015} returns the ground-motion prediction with it sigma of Crustal Common form 015 GMPE.
#'
#'Crustal Common form 015
#'
#' @param Mag Earthquake momnet magnitude, Numeric.
#' @param Rrup Rupture distance(km), Numeric.
#' @param Rjb Joyner and Boore distance(km), Numeric.
#' @param Ztor Depth to the top of the finite rupture model (km).
#' @param ftype style of faulting.
#' @param Dip Dip angle of the fault plane.
#' @param rupwidth Down-dip rupture width (km).
#' @param Rx Horizontal distance(km) from top edge of rupture. Measured perpendicular to the fault strike.
#' @param hwflag hanging-wall flag, 1 for hanging-wall.
#' @param Prd Period of spectral acceleration.
#'
#' @return A list will be return, including mag, Rrup, Rjb, Ztor, ftype, dip, rupwidth, Rx,
#' hwflag, pecT, lnY, sigma, iflag.
#'
#' @examples
#' Cru.Com.015(6, 20, 20, 5, 0, 90, 20, 20, 0, 0)
#' Cru.Com.015(7, 20, 20, 2, 0, 90, 20, 20, 0, 0 )
#'
#' @export
Cru.Com.015 <- function(Mag, Rrup, Rjb, Ztor, ftype=0, Dip, rupwidth, Rx, hwflag=0, Prd){
# Subroutine S04_Crustal_Common015 ( m, Rrup, Rjb, ztor, ftype, dip, Width, Rx, HWFlag,
# specT, lnY, sigma, iflag )
if (Prd != 0 & (Prd < 0.01 | Prd > 2)) {
stop("Period out of range! \n\n")
}
retvals <- .Fortran("S04_Crustal_Common015", m=as.single(Mag), Rrup=as.single(Rrup), Rjb=as.single(Rjb),
ztor=as.single(Ztor), ftype=as.single(ftype), dip=as.single(Dip), Width=as.single(rupwidth),
Rx=as.single(Rx), HWFlag=as.integer(hwflag), specT=as.single(Prd),
lnY=as.single(0.1), sigma=as.single(0.1), iflag=as.integer(0))
names(retvals) <- c("mag", "Rrup", "Rjb", "Ztor", "ftype", "dip", "rupwidth", "Rx", "hwflag",
"specT", "lnY", "sigma", "iflag")
return(retvals)
}
#' GMPE function for Crustal Common form 016 (2017)
#'
#' \code{Cru.Com.016} returns the ground-motion prediction with it sigma of Crustal Common form 016 GMPE.
#'
#'Crustal Common form 016
#'
#' @param Mag Earthquake momnet magnitude, Numeric.
#' @param Rrup Rupture distance(km), Numeric.
#' @param Rjb Joyner and Boore distance(km), Numeric.
#' @param Ztor Depth to the top of the finite rupture model (km).
#' @param ftype style of faulting.
#' @param Dip Dip angle of the fault plane.
#' @param rupwidth Down-dip rupture width (km).
#' @param Rx Horizontal distance(km) from top edge of rupture. Measured perpendicular to the fault strike.
#' @param hwflag hanging-wall flag, 1 for hanging-wall.
#' @param Prd Period of spectral acceleration.
#'
#' @return A list will be return, including mag, Rrup, Rjb, Ztor, ftype, dip, rupwidth, Rx,
#' hwflag, pecT, lnY, sigma, iflag.
#'
#' @examples
#' Cru.Com.016(6, 20, 20, 5, 0, 90, 20, 20, 0, 0)
#' Cru.Com.016(7, 20, 20, 2, 0, 90, 20, 20, 0, 0 )
#'
#' @export
Cru.Com.016 <- function(Mag, Rrup, Rjb, Ztor, ftype=0, Dip, rupwidth, Rx, hwflag=0, Prd){
# Subroutine S04_Crustal_Common016 ( m, Rrup, Rjb, ztor, ftype, dip, Width, Rx, HWFlag,
# specT, lnY, sigma, iflag )
if (Prd != 0 & (Prd < 0.01 | Prd > 2)) {
stop("Period out of range! \n\n")
}
retvals <- .Fortran("S04_Crustal_Common016", m=as.single(Mag), Rrup=as.single(Rrup), Rjb=as.single(Rjb),
ztor=as.single(Ztor), ftype=as.single(ftype), dip=as.single(Dip), Width=as.single(rupwidth),
Rx=as.single(Rx), HWFlag=as.integer(hwflag), specT=as.single(Prd),
lnY=as.single(0.1), sigma=as.single(0.1), iflag=as.integer(0))
names(retvals) <- c("mag", "Rrup", "Rjb", "Ztor", "ftype", "dip", "rupwidth", "Rx", "hwflag",
"specT", "lnY", "sigma", "iflag")
return(retvals)
}
#' GMPE function for Crustal Common form 017 (2017)
#'
#' \code{Cru.Com.017} returns the ground-motion prediction with it sigma of Crustal Common form 017 GMPE.
#'
#'Crustal Common form 017
#'
#' @param Mag Earthquake momnet magnitude, Numeric.
#' @param Rrup Rupture distance(km), Numeric.
#' @param Rjb Joyner and Boore distance(km), Numeric.
#' @param Ztor Depth to the top of the finite rupture model (km).
#' @param ftype style of faulting.
#' @param Dip Dip angle of the fault plane.
#' @param rupwidth Down-dip rupture width (km).
#' @param Rx Horizontal distance(km) from top edge of rupture. Measured perpendicular to the fault strike.
#' @param hwflag hanging-wall flag, 1 for hanging-wall.
#' @param Prd Period of spectral acceleration.
#'
#' @return A list will be return, including mag, Rrup, Rjb, Ztor, ftype, dip, rupwidth, Rx,
#' hwflag, pecT, lnY, sigma, iflag.
#'
#' @examples
#' Cru.Com.017(6, 20, 20, 5, 0, 90, 20, 20, 0, 0)
#' Cru.Com.017(7, 20, 20, 2, 0, 90, 20, 20, 0, 0 )
#'
#' @export
Cru.Com.017 <- function(Mag, Rrup, Rjb, Ztor, ftype=0, Dip, rupwidth, Rx, hwflag=0, Prd){
# Subroutine S04_Crustal_Common017 ( m, Rrup, Rjb, ztor, ftype, dip, Width, Rx, HWFlag,
# specT, lnY, sigma, iflag )
if (Prd != 0 & (Prd < 0.01 | Prd > 2)) {
stop("Period out of range! \n\n")
}
retvals <- .Fortran("S04_Crustal_Common017", m=as.single(Mag), Rrup=as.single(Rrup), Rjb=as.single(Rjb),
ztor=as.single(Ztor), ftype=as.single(ftype), dip=as.single(Dip), Width=as.single(rupwidth),
Rx=as.single(Rx), HWFlag=as.integer(hwflag), specT=as.single(Prd),
lnY=as.single(0.1), sigma=as.single(0.1), iflag=as.integer(0))
names(retvals) <- c("mag", "Rrup", "Rjb", "Ztor", "ftype", "dip", "rupwidth", "Rx", "hwflag",
"specT", "lnY", "sigma", "iflag")
return(retvals)
}
| 32,534 | gpl-3.0 |
4c313053b91967aa4b77b73a79afdac976b09a64 | poissonconsulting/tulip | R/directory.R | #' Delete directory
#'
#' Deletes directory specified by dir after possibly checking with user.
#'
#' @param dir a character scalar of the directory to delete
#' @param check a logical scalar indicating whether to check with user
#' @return an invisible logical scalar indicating whether successful.
#' @export
delete_directory <- function (dir, check = TRUE) {
assert_that(is.string(dir))
assert_that(is.flag(check))
if(!file.exists(dir)) {
message("directory '", dir, "' does not exist")
return(invisible(TRUE))
}
if(check) {
if(!yesno(paste0("Are you sure you want to delete the directory '", dir, "'?"))) {
return (invisible(FALSE))
}
}
message("deleting dir '", dir, "' ...")
invisible(unlink(dir, recursive = TRUE) == 0)
}
#' Copy directory
#'
#' Copies directory specified by dir into parent_dir
#'
#' @param dir string of the directory to copy
#' @param parent_dir string of the directory to move it to
#' @param copy.date flag of whether to preserve file dates
#' @param check a logical scalar indicating whether to confirm with user
#' @return an invisible logical scalar indicating whether successful.
#' @export
copy_directory <- function (dir, parent_dir = tempdir(), copy.date = TRUE,
check = TRUE) {
assert_that(is.string(dir))
assert_that(is.string(parent_dir))
assert_that(is.flag(copy.date) && noNA(copy.date))
assert_that(is.flag(check) && noNA(check))
if(!file.exists(dir)) {
warning("directory '", dir, "' not found")
return (invisible(TRUE))
}
if(check) {
if(!yesno(paste0("Are you sure you want to copy the directory '", dir, "' to '", parent_dir, "'?"))) {
return (invisible(FALSE))
}
}
if(!file.exists(parent_dir))
dir.create(parent_dir, recursive = TRUE)
flag <- file.copy(from = dir, to = parent_dir, recursive = TRUE, copy.date = copy.date)
if(flag) {
flag <- unlink(dir)
} else
warning("unable to copy directory")
invisible(as.logical(flag))
}
| 2,000 | mit |
8fd195072e11ab254775e00004b28e6e61639938 | sje30/eglen2015 | vignettes/w81s1_sims.R | ## ---- get-rgc-data
##rgc.of = read.table('~/mosaics/data/w81s1of.txt')
##rgc.on = read.table('~/mosaics/data/w81s1on.txt')
rgc.of.file = system.file("extdata/w81s1/w81s1of.txt", package="eglen2015")
rgc.on.file = system.file("extdata/w81s1/w81s1on.txt", package="eglen2015")
rgc.w.file = system.file("extdata/w81s1/w81s1w.txt", package="eglen2015")
rgc.of = read.table(rgc.of.file)
rgc.on = read.table(rgc.on.file)
rgc.w = scan(rgc.w.file)
## ---- show-univ
rgc.soma.rad = 8
par(bty='n', mfrow=c(1,2))
plot(rgc.on, asp=1, type='n', xlab='', ylab='')
symbols(rgc.on[,1], rgc.on[,2],
asp=1, circles=rep(rgc.soma.rad, nrow(rgc.on)),
inches=FALSE, add=TRUE)
rect(rgc.w[1], rgc.w[3], rgc.w[2], rgc.w[4])
| 717 | mit |
2f9094fe4a55b43a6d315c9a0f11feebacb072a7 | bmatthie/diamonds | ui.R | # ===========================================================================
# File: ui.R
#
# Copyright 2015 Brian Robert Matthiesen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ===========================================================================
library(shiny)
library(ggplot2)
dataset <- diamonds
shinyUI(pageWithSidebar(
headerPanel("Brian's Diamond Explorer"),
sidebarPanel(
sliderInput('sampleSize', 'Sample Size', min=1, max=nrow(dataset),
value=min(1000, nrow(dataset)), step=500, round=0),
selectInput('x', 'X', names(dataset)),
selectInput('y', 'Y', names(dataset), names(dataset)[[2]]),
selectInput('color', 'Color', c('None', names(dataset))),
checkboxInput('jitter', 'Jitter'),
checkboxInput('smooth', 'Smooth'),
selectInput('facet_row', 'Facet Row', c(None='.', names(dataset))),
selectInput('facet_col', 'Facet Column', c(None='.', names(dataset)))
),
mainPanel(
plotOutput('plot')
)
))
| 1,486 | apache-2.0 |
f677e3288b3a86caf188aa02fa8c2f78e80d7339 | petertbryant/MidCoastTMDL | 05e_ParameterUncertaintyAnalysis.R | library(SSN)
library(plyr)
library(RODBC)
library(ggplot2)
library(reshape2)
library(MASS)
options(stringsAsFactors = FALSE)
#Get fit object from 06_scenarios.R
fit <- models[[7]]
#Generate confidence intervals using assumptions of indepent normality
df_ci <- confint.glmssn(fit, level = .9)
df_ci <- cbind(df_ci, fit$estimates$betahat)
df_ci <- as.data.frame(df_ci)
df_ci$parms <- rownames(df_ci)
df_ci <- plyr::rename(df_ci, c("5 %" = "lci", "95 %" = "uci", "V3" = "est"))
df_ci_tmp <- df_ci[!df_ci$parms %in% c('(Intercept)', 'HDWTR1.21463119808789'),]
for (i in 1:nrow(df_ci)) {
df_ci_tmp <- df_ci[i,]
g <- ggplot(data = df_ci, aes(x = parms, y = est)) + #geom_point(aes(y = est)) +
geom_pointrange(aes(ymin = lci, ymax = uci)) + #facet_wrap( ~ parms) +
geom_hline(yintercept = 0, colour = 'red', lwd = 1) + theme(axis.text.x = element_text(angle = 45, hjust = 1))
print(g)
}
ssn_preds <- predict(fit, interval = "prediction", predpointsID = "preds")
#Brute force monte carlo simulations varying parameter estimates using
#multivariate normal distributions
obs <- getSSNdata.frame(fit)
set.seed(11)
randparm <- mvrnorm(n=500, mu = fit$estimates$betahat, Sigma = fit$estimates$covb)
randparm
bhats <- fit$estimates$betahat
dimnames(bhats)[[1]][7] <- 'HDWTR1'
dimnames(randparm)[[2]][7] <- 'HDWTR1'
for (i in 1:nrow(randparm)) {
varied.preds <- predict.vary(betahat = as.data.frame(t(bhats)), ss = obs,
r_vec = randparm[i,])
varied.preds <- plyr::rename(varied.preds, c('BSTI_prd' = paste0("BSTI_prd_", i)))
if (i == 1) {
predtable <- varied.preds
} else {
predtable <- merge(predtable, varied.preds)
}
}
#Check out the results
boxplot(10^t(predtable[1:10,])[2:501,])
| 1,739 | gpl-3.0 |
71f2f9e5271cabf7a4b433bac531e35f89fd6cc9 | wch/r-source | src/library/tools/R/userdir.R | # File src/library/tools/R/userdir.R
# Part of the R package, https://www.R-project.org
#
# Copyright (C) 2020 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# https://www.R-project.org/Licenses/
R_user_dir <-
function(package, which = c("data", "config", "cache"))
{
stopifnot(is.character(package), length(package) == 1L)
which <- match.arg(which)
home <- normalizePath("~")
path <-
switch(which,
data = {
if(nzchar(p <- Sys.getenv("R_USER_DATA_DIR")))
p
else if(nzchar(p <- Sys.getenv("XDG_DATA_HOME")))
p
else if(.Platform$OS.type == "windows")
file.path(Sys.getenv("APPDATA"), "R", "data")
else if(Sys.info()["sysname"] == "Darwin")
file.path(home, "Library", "Application Support",
"org.R-project.R")
else
file.path(home, ".local", "share")
},
config = {
if(nzchar(p <- Sys.getenv("R_USER_CONFIG_DIR")))
p
else if(nzchar(p <- Sys.getenv("XDG_CONFIG_HOME")))
p
else if(.Platform$OS.type == "windows")
file.path(Sys.getenv("APPDATA"), "R", "config")
else if(Sys.info()["sysname"] == "Darwin")
file.path(home, "Library", "Preferences",
"org.R-project.R")
else
file.path(home, ".config")
},
cache = {
if(nzchar(p <- Sys.getenv("R_USER_CACHE_DIR")))
p
else if(nzchar(p <- Sys.getenv("XDG_CACHE_HOME")))
p
else if(.Platform$OS.type == "windows")
file.path(Sys.getenv("LOCALAPPDATA"), "R", "cache")
else if(Sys.info()["sysname"] == "Darwin")
file.path(home, "Library", "Caches",
"org.R-project.R")
else
file.path(home, ".cache")
})
file.path(path, "R", package)
}
| 2,861 | gpl-2.0 |
83de570ca3872b4a79f72dbfd9d6ac96e4101dca | XiaoruiZhu/AdpQMLE | AdaptQMLE/R/gen.lin.R | #' Generating function for single linear model y = a + b*x.
#'
#' @title Single linear model y = a + b*x.
#'
#' @param intercept The intercept of single linear model.
#' @param slope The parameter b, which is the slope of y = a + b*x.
#' @param noise.variance The variance of error series.
#' @param x The depedent variable.
#' @param dis.error Distribution of error, student's t or normal.
#' @param dft The degree of freedom if error was specified as student's t.
#'
#' @return A data frame contains x, y and error series.
#' @export
#'
gen.lin <- function(intercept, slope, noise.variance, x, dis.error = c("rt", "rnorm"), dft) {
# Make up y by adding Gaussian, or student's t noise to the linear function
dis.error <- match.arg(dis.error)
if (dis.error == "rt"){
e <- rt(length(x), dft)
} else if (dis.error == "rnorm") {
e <- rnorm(length(x), 0, sd=sqrt(noise.variance))
}
y <- rep(intercept,length(x)) + x %*% as.matrix(slope) + e
return(data.frame(x=x, y=y, e = e))
}
| 998 | mit |
fc70d93d079bfa82849db3d7e073b90fced998f7 | tomoakin/TCC | TCC/tests/DESDES1.R | library(TCC)
set.seed(12734)
tcc <- simulateReadCounts(Ngene = 100, PDEG = 0.2, DEG.assign = c(0.9, 0.1),replicate=c(1,1))
tcc <- calcNormFactors(tcc, norm.method = "deseq", test.method = "deseq",
FDR = 0.1, floorPDEG = 0.05)
cat("tcc$norm.factors: ")
cat(tcc$norm.factors)
cat("\n")
cat("tcc$stat$execution.time: ")
cat(tcc$stat$execution.time)
cat("\n")
| 382 | gpl-2.0 |
c5ae5684555d54c3833ef3bb143e00aec757ed6a | 52North/sensorweb4R | R/class-timeseries.R | #' @include generic-methods.R
#' @include class-phenomenon.R
#' @include class-service.R
#' @include class-feature.R
#' @include class-offering.R
#' @include class-procedure.R
#' @include class-category.R
#' @include class-station.R
#' @include class-tvp.R
NULL
#' Timeseries
#'
#' Represents a timeseries.
#'
#' @family API Resources
#' @author Christian Autermann \email{c.autermann@@52north.org}
#' @rdname Timeseries-class
#' @name Timeseries-class
#' @export
setClass("Timeseries",
contains = "ApiResource",
slots = list(uom = "character",
phenomenon = "Phenomenon",
service = "Service",
feature = "Feature",
offering = "Offering",
procedure = "Procedure",
category = "Category",
station = "Station",
firstValue = "TVP",
lastValue = "TVP",
referenceValues = "list",
statusIntervals = "list"),
validity = function(object) {
errors <- assert.same.length(id = object@id,
uom = object@uom,
phenomenon = object@phenomenon,
service = object@service,
feature = object@feature,
offering = object@offering,
procedure = object@procedure,
category = object@category,
station = object@station,
firstValue = object@firstValue,
lastValue = object@lastValue,
referenceValues = object@referenceValues,
statusIntervals = object@statusIntervals)
if (length(errors) == 0) TRUE else errors
})
#' @export
#' @describeIn Timeseries-class Checks whether \code{x} is a \code{Timeseries}.
is.Timeseries <- function(x) is(x, "Timeseries")
#' @export
#' @describeIn Timeseries-class Coerces \code{x} into a \code{Timeseries}.
as.Timeseries <- function(x) as(x, "Timeseries")
setClassUnion("Timeseries_or_characters",
c("Timeseries", "character"))
setClassUnion("Timeseries_or_NULL",
c("Timeseries", "NULL"))
create.value <- function(len, x) {
if (len == 0)
TVP()
else if (is.null(x) || length(x) == 0)
rep(TVP(NA), len)
else if (length(x) == 1)
rep(as.TVP(x), len)
else as.TVP(x)
}
create.StatusIntervals <- function(len, x) {
if (len == 0)
list()
else if (is.null(x) || length(x) == 0)
rep(list(StatusInterval()), length.out = len)
else if (length(x) == 1) {
if (is.StatusInterval(x))
rep(list(x), length.out = len)
else
rep(x, length.out = len)
}
else x
}
create.ReferenceValues <- function(len, x) {
if (len == 0)
list()
else if (is.null(x) || length(x) == 0)
rep(list(ReferenceValue()), length.out = len)
else if (length(x) == 1) {
if (is.ReferenceValue(x))
rep(list(x), length.out = len)
else
rep(x, length.out = len)
} else if (length(x) == 1) {
}
else as.list(x)
}
#' @export
#' @describeIn Timeseries-class Constructs a new \code{Timeseries}.
Timeseries <- function(id = character(), label = NULL, uom = NULL, endpoint = NULL,
phenomenon = NULL, service = NULL, feature = NULL, offering = NULL,
procedure = NULL, category = NULL, station = NULL, statusIntervals = NULL,
firstValue = NULL, lastValue = NULL, referenceValues = NULL) {
id <- as.character(id)
len <- length(id)
label <- stretch(len, label, as.character(NA), as.character)
endpoint <- stretch(len, endpoint, as.character(NA), as.Endpoint)
service <- stretch(len, service, as.character(NA), as.Service)
uom <- stretch(len, uom, as.character(NA), as.character)
phenomenon <- stretch(len, phenomenon, as.character(NA), as.Phenomenon)
feature <- stretch(len, feature, as.character(NA), as.Feature)
offering <- stretch(len, offering, as.character(NA), as.Offering)
procedure <- stretch(len, procedure, as.character(NA), as.Procedure)
category <- stretch(len, category, as.character(NA), as.Category)
station <- stretch(len, station, as.character(NA), as.Station)
firstValue <- create.value(len, firstValue)
lastValue <- create.value(len, lastValue)
referenceValues <- create.ReferenceValues(len, referenceValues)
statusIntervals <- create.StatusIntervals(len, statusIntervals)
return(new("Timeseries",
endpoint = endpoint,
id = id,
label = label,
uom = uom,
phenomenon = phenomenon,
service = service,
category = category,
feature = feature,
offering = offering,
procedure = procedure,
station = station,
referenceValues = referenceValues,
statusIntervals = statusIntervals,
firstValue = firstValue,
lastValue = lastValue))
}
#' @rdname accessor-methods
setMethod("referenceValues",
signature(x = "Timeseries"),
function(x) x@referenceValues)
#' @rdname accessor-methods
setMethod("referenceValues<-",
signature(x = "Timeseries",
value = "ReferenceValue_or_NULL"),
function(x, value) {
x@referenceValues <- create.ReferenceValues(length(x), referenceValues)
invisible(x)
})
#' @rdname api-relations
setMethod("service",
signature(x = "Timeseries"),
function(x) x@service)
#' @rdname api-relations
setMethod("service<-",
signature(x = "Timeseries",
value = "Service_or_NULL"),
function(x, value) {
x@service <- stretch(length(x), value, as.character(NA), as.Service)
invisible(x)
})
#' @rdname api-relations
setMethod("feature",
signature(x = "Timeseries"),
function(x) x@feature)
#' @rdname api-relations
setMethod("feature<-",
signature(x = "Timeseries",
value = "Feature_or_NULL"),
function(x, value) {
x@feature <- stretch(length(x), value, as.character(NA), as.Feature)
invisible(x)
})
#' @rdname api-relations
setMethod("offering",
signature(x = "Timeseries"),
function(x) x@offering)
#' @rdname api-relations
setMethod("offering<-",
signature(x = "Timeseries",
value = "Offering_or_NULL"),
function(x, value) {
x@offering <- stretch(length(x), value, as.character(NA), as.Offering)
invisible(x)
})
#' @rdname api-relations
setMethod("category",
signature(x = "Timeseries"),
function(x) x@category)
#' @rdname api-relations
setMethod("category<-",
signature(x = "Timeseries",
value = "Category_or_NULL"),
function(x, value) {
x@category <- stretch(length(x), value, as.character(NA), as.Category)
invisible(x)
})
#' @rdname api-relations
setMethod("procedure",
signature(x = "Timeseries"),
function(x) x@procedure)
#' @rdname api-relations
setMethod("procedure<-",
signature(x = "Timeseries",
value = "Procedure_or_NULL"),
function(x, value) {
x@procedure <- stretch(length(x), value, as.character(NA), as.Procedure)
invisible(x)
})
#' @rdname api-relations
setMethod("station",
signature(x = "Timeseries"),
function(x) x@station)
#' @rdname api-relations
setMethod("station<-",
signature(x = "Timeseries",
value = "Station_or_NULL"),
function(x, value) {
x@station <- stretch(length(x), value, as.character(NA), as.Station)
invisible(x)
})
#' @rdname accessor-methods
setMethod("uom",
signature(x = "Timeseries"),
function(x) x@uom)
#' @rdname accessor-methods
setMethod("uom<-",
signature(x = "Timeseries",
value = "character_or_NULL"),
function(x, value) {
x@uom <- stretch(length(x), value, NA, as.character)
invisible(x)
})
#' @rdname api-relations
setMethod("phenomenon",
signature(x = "Timeseries"),
function(x) x@phenomenon)
#' @rdname api-relations
setMethod("phenomenon<-",
signature(x = "Timeseries",
value = "Phenomenon_or_NULL"),
function(x, value) {
x@phenomenon <- stretch(length(x), value, as.character(NA), as.Phenomenon)
invisible(x)
})
#' @rdname accessor-methods
setMethod("firstValue",
signature(x = "Timeseries"),
function(x) x@firstValue)
#' @rdname accessor-methods
setMethod("firstValue<-",
signature(x = "Timeseries",
value = "TVP_or_NULL"),
function(x, value) {
x@firstValue <- create.value(length(x), value)
x
})
#' @rdname accessor-methods
setMethod("lastValue",
signature(x = "Timeseries"),
function(x) x@lastValue)
#' @rdname accessor-methods
setMethod("lastValue<-",
signature(x = "Timeseries",
value = "TVP_or_NULL"),
function(x, value) {
x@lastValue <- create.value(length(x), value)
x
})
#' @rdname accessor-methods
setMethod("statusIntervals",
signature(x = "Timeseries"),
function(x) x@statusIntervals)
#' @rdname url-methods
setMethod("getDataURL",
signature(x = "Timeseries"),
function(x) subresourceURL(x, "getData"))
setAs("character", "Timeseries", function(from) Timeseries(id = from))
setAs("list", "Timeseries", function(from) concat.list(from))
rbind2.Timeseries <- function(x, y) {
x <- as.Timeseries(x)
y <- as.Timeseries(y)
Timeseries(endpoint = rbind2(endpoint(x), endpoint(y)),
id = c(id(x), id(y)),
label = c(label(x), label(y)),
uom = c(uom(x), uom(y)),
phenomenon = rbind2(phenomenon(x), phenomenon(y)),
service = rbind2(service(x), service(y)),
category = rbind2(category(x), category(y)),
feature = rbind2(feature(x), feature(y)),
offering = rbind2(offering(x), offering(y)),
procedure = rbind2(procedure(x), procedure(y)),
station = rbind2(station(x), station(y)),
referenceValues = c(referenceValues(x), referenceValues(y)),
firstValue = rbind2(firstValue(x), firstValue(y)),
statusIntervals = c(statusIntervals(x), statusIntervals(y)),
lastValue = rbind2(lastValue(x), lastValue(y)))
}
#' @rdname rbind2-methods
setMethod("rbind2", signature("Timeseries", "Timeseries"),
function(x, y) rbind2.Timeseries(x, y))
#' @rdname rbind2-methods
setMethod("rbind2", signature("Timeseries", "ANY"),
function(x, y) rbind2.Timeseries(x, as.Timeseries(y)))
#' @rdname rbind2-methods
setMethod("rbind2", signature("ANY", "Timeseries"),
function(x, y) rbind2.Timeseries(as.Timeseries(x), y))
#' @rdname rep-methods
setMethod("rep", signature(x = "Timeseries"), function(x, ...)
Timeseries(endpoint = rep(endpoint(x), ...),
id = rep(id(x), ...),
label = rep(label(x), ...),
uom = rep(uom(x), ...),
phenomenon = rep(phenomenon(x), ...),
service = rep(service(x), ...),
category = rep(category(x), ...),
feature = rep(feature(x), ...),
offering = rep(offering(x), ...),
procedure = rep(procedure(x), ...),
station = rep(station(x), ...),
referenceValues = rep(referenceValues(x), ...),
statusIntervals = rep(statusIntervals(x), ...),
firstValue = rep(firstValue(x), ...),
lastValue = rep(lastValue(x), ...)))
| 12,619 | apache-2.0 |
e95ff8bfbf65ee55e1ac57b3c30e182f521a227b | cxxr-devel/cxxr | src/extra/testr/filtered-test-suite/max/tc_max_27.R | expected <- 3
test(id=23, code={
argv <- structure(list(2, 3, NA, na.rm = TRUE), .Names = c("", "", "",
"na.rm"))
do.call('max', argv);
}, o = expected);
| 157 | gpl-2.0 |
e95ff8bfbf65ee55e1ac57b3c30e182f521a227b | kmillar/cxxr | src/extra/testr/filtered-test-suite/max/tc_max_27.R | expected <- 3
test(id=23, code={
argv <- structure(list(2, 3, NA, na.rm = TRUE), .Names = c("", "", "",
"na.rm"))
do.call('max', argv);
}, o = expected);
| 157 | gpl-2.0 |
e95ff8bfbf65ee55e1ac57b3c30e182f521a227b | ArunChauhan/cxxr | src/extra/testr/filtered-test-suite/max/tc_max_27.R | expected <- 3
test(id=23, code={
argv <- structure(list(2, 3, NA, na.rm = TRUE), .Names = c("", "", "",
"na.rm"))
do.call('max', argv);
}, o = expected);
| 157 | gpl-2.0 |
bbcd7b37a4df12cd9a36b1905aec0c4cddb1e205 | splaticvoid/datasciencecoursera | Assignment 1/complete.R | complete <- function(directory, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1041
## ...
## where 'id' is the monitor ID number and 'nobs' is the
## number of complete cases
files <- list.files(directory, full.names = TRUE)
rowsize<-length(id)
result <- matrix(0,nrow=rowsize,ncol=2)
ids<-c()
counts<-c()
## Iterate through each element in ID
for (idVal in id) {
## Create the directory+file path
## Open the file
## Load the data into a frame
fileEntry <- paste(directory,"/",str_pad(toString(idVal), 3, pad="0"),".csv",sep="")
fileData <- read.csv(fileEntry,header=TRUE)
## Count the rows
sulfates<-fileData[2]
nitrates<-fileData[3]
validValues <- sum(complete.cases(sulfates,nitrates))
## Add id + count to a matrix
ids<-c(ids,idVal)
counts<-c(counts,as.numeric(validValues))
}
result<- matrix(c(ids,counts),nrow=rowsize, ncol=2)
dimnames(result)=list(c(1:rowsize), c("id","nobs"))
data.frame(result)
}
##Unit test
# expected<-matrix(c(30:25,932,711,475,338,586,463), nrow=6, ncol=2)
# dimnames(expected) = list(c(1:6),c("id","nobs"))
#
# actual<- complete("specdata", 30:25)
#
# print(expected)
# print(actual)
#
# if (identical(expected,actual)) {
# print("Pass")
# } else {
# print("Fail")
# } | 1,541 | cc0-1.0 |
e95ff8bfbf65ee55e1ac57b3c30e182f521a227b | kmillar/rho | src/extra/testr/filtered-test-suite/max/tc_max_27.R | expected <- 3
test(id=23, code={
argv <- structure(list(2, 3, NA, na.rm = TRUE), .Names = c("", "", "",
"na.rm"))
do.call('max', argv);
}, o = expected);
| 157 | gpl-2.0 |
e95ff8bfbf65ee55e1ac57b3c30e182f521a227b | rho-devel/rho | src/extra/testr/filtered-test-suite/max/tc_max_27.R | expected <- 3
test(id=23, code={
argv <- structure(list(2, 3, NA, na.rm = TRUE), .Names = c("", "", "",
"na.rm"))
do.call('max', argv);
}, o = expected);
| 157 | gpl-2.0 |
e95ff8bfbf65ee55e1ac57b3c30e182f521a227b | krlmlr/cxxr | src/extra/testr/filtered-test-suite/max/tc_max_27.R | expected <- 3
test(id=23, code={
argv <- structure(list(2, 3, NA, na.rm = TRUE), .Names = c("", "", "",
"na.rm"))
do.call('max', argv);
}, o = expected);
| 157 | gpl-2.0 |
376eec2b46b7f8aca2756688676e56d0a1106212 | mexindian/TileMaker | tests/testthat.R | library(testthat)
library(TileMaker)
test_check("TileMaker")
| 62 | mit |
73b2aae2e90d43d90bf04a6128c96bf80a11fa16 | aappling-usgs/gsplot | R/lines.R | #' gsplot lines
#'
#' Creating a line by specifying specific plot points. See \code{\link[graphics]{lines}} for more details.
#'
#' @param object gsplot object
#' @param \dots Further graphical parameters may also be supplied as arguments. See 'Details'.
#'
#' @details Additional graphical parameter inputs:
#' \itemize{
#' \item{\code{x}} {vector of x-coordinates for points that make up the line}
#' \item{\code{y}} {vector of y-coordinates for points that make up the line}
#' \item{\code{side}} {vector specifying the side(s) to use for axes (1,2,3,4 for sides, or 5,6,7,8 for outward offsets of those)}
#' \item{\code{legend.name}} {name that appears in the legend, see \code{\link{legend}} for more legend parameters}
#' \item{\code{error_bar}} {add error bars to the defined line, see \code{\link{error_bar}}
#' for arguments, must add arguments as a list}
#' \item{\code{callouts}} {add callouts and text to the defined line, see \code{\link{callouts}}
#' for arguments, must add arguments as a list}
#' }
#'
#' @examples
#' gsNew <- gsplot()
#' gsNew <- lines(gsNew, c(1,2), y=c(2,5))
#' gsNew <- lines(gsNew, c(3,4,3), c(2,4,6), pch=6)
#' gsNew <- points(gsNew, c(8,4,1.2), c(2,4.7,6), side=c(3,2))
#' gsNew
#'
#' # Same example using the magrittr pipe '%>%' to connect operations within gsplot
#' gsNewpipe <- gsplot() %>%
#' lines(c(1,2), c(2,5)) %>%
#' lines(c(3,4,3), c(2,4,6), pch=6) %>%
#' points(c(8,4,1.2), c(2,4.7,6), side=c(3,2))
#' gsNewpipe
#'
#' gs <- gsplot() %>%
#' lines(x=c(1,2), y=c(4,2), xlim=c(0, 5), ylim=c(0,5),
#' callouts=list(labels=c(NA, "data"), col="blue"))
#' gs
#' @export
#' @rdname lines
lines <- function(object, ...) {
override("graphics", "lines", object, ...)
}
lines.gsplot <- function(object, ..., legend.name=NULL, side=c(1,2)){
fun.name <- 'lines'
object <- gather_function_info(object, fun.name, ..., legend.name=legend.name, side=side)
return(object)
}
| 1,958 | cc0-1.0 |
73b2aae2e90d43d90bf04a6128c96bf80a11fa16 | jiwalker-usgs/gsplot | R/lines.R | #' gsplot lines
#'
#' Creating a line by specifying specific plot points. See \code{\link[graphics]{lines}} for more details.
#'
#' @param object gsplot object
#' @param \dots Further graphical parameters may also be supplied as arguments. See 'Details'.
#'
#' @details Additional graphical parameter inputs:
#' \itemize{
#' \item{\code{x}} {vector of x-coordinates for points that make up the line}
#' \item{\code{y}} {vector of y-coordinates for points that make up the line}
#' \item{\code{side}} {vector specifying the side(s) to use for axes (1,2,3,4 for sides, or 5,6,7,8 for outward offsets of those)}
#' \item{\code{legend.name}} {name that appears in the legend, see \code{\link{legend}} for more legend parameters}
#' \item{\code{error_bar}} {add error bars to the defined line, see \code{\link{error_bar}}
#' for arguments, must add arguments as a list}
#' \item{\code{callouts}} {add callouts and text to the defined line, see \code{\link{callouts}}
#' for arguments, must add arguments as a list}
#' }
#'
#' @examples
#' gsNew <- gsplot()
#' gsNew <- lines(gsNew, c(1,2), y=c(2,5))
#' gsNew <- lines(gsNew, c(3,4,3), c(2,4,6), pch=6)
#' gsNew <- points(gsNew, c(8,4,1.2), c(2,4.7,6), side=c(3,2))
#' gsNew
#'
#' # Same example using the magrittr pipe '%>%' to connect operations within gsplot
#' gsNewpipe <- gsplot() %>%
#' lines(c(1,2), c(2,5)) %>%
#' lines(c(3,4,3), c(2,4,6), pch=6) %>%
#' points(c(8,4,1.2), c(2,4.7,6), side=c(3,2))
#' gsNewpipe
#'
#' gs <- gsplot() %>%
#' lines(x=c(1,2), y=c(4,2), xlim=c(0, 5), ylim=c(0,5),
#' callouts=list(labels=c(NA, "data"), col="blue"))
#' gs
#' @export
#' @rdname lines
lines <- function(object, ...) {
override("graphics", "lines", object, ...)
}
lines.gsplot <- function(object, ..., legend.name=NULL, side=c(1,2)){
fun.name <- 'lines'
object <- gather_function_info(object, fun.name, ..., legend.name=legend.name, side=side)
return(object)
}
| 1,958 | cc0-1.0 |
73017f0a824a912163a9f897515f8df287f4918a | ivanhigueram/deforestacion | modeling/reg_dis_het_effects.R | ##############################################################################################
##############################################################################################
### RUN REGRESSION DISCONTINUITY MODELS ###
### THIS CODE WILL READ THE DISTANCE AND DEFORESTATION DATA TO RUN RD MODELS ###
### HERE WE WILL RUN THE MODELS TO CALCULATE HETEROGENEOUS EFFECTS FOR ###
### DEFORESTATION, COCA CROPS, AND ILLEGAL GOLD MINING. THIS ANALYSIS WILL ###
### ONLY USE ALL BORDERS WITHOUT COMPOUND TREATMENT EFFECT ###
##############################################################################################
##############################################################################################
rm(list=ls())
library(plyr)
library(dplyr)
library(data.table)
library(rdrobust)
library(rdd)
library(stringr)
library(stargazer)
library(foreign)
library(ggplot2)
library(magrittr)
library(foreign)
library(stringr)
library(rlang)
library(tidyr)
library(RATest)
# Source tables functions
setwd(Sys.getenv("ROOT_FOLDER"))
source("R/rd_functions.R")
source("modeling/merge_datasets.R")
##############################################################################################
###################################### 1. DEFORESTATION ######################################
##############################################################################################
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
####################### (NIGHT-LIGHT DATA: CLUMPS: {1: CLUMP, 0: NO CLUMP}) ##################
##############################################################################################
list_df <- c(defo_dist[2:3], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, clumps_5k == 1))
rd_robust_clump1 <- lapply(list_df, function(park){
rdrobust(
y = park$loss_sum,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2:3], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, clumps_5k == 0))
rd_robust_clump0 <- lapply(list_df, function(park){
rdrobust(
y = park$loss_sum,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_clump0, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump0.rds"))
saveRDS(rd_robust_clump1, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump1.rds"))
rd_robust_clump0 <- readRDS(str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump0.rds"))
rd_robust_clump1 <- readRDS(str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump1.rds"))
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
##################### (ROADS BUFFER: {1: INSIDE 5KM ROAD BUFFER, 0: OUTSIDE}) ################
##############################################################################################
list_df <- c(defo_dist[2:3], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, roads == 1))
rd_robust_clump1_2 <- lapply(list_df, function(park){
rdrobust(
y = park$loss_sum,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2:3], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, roads == 0))
rd_robust_clump0_2 <- lapply(list_df, function(park){
rdrobust(
y = park$loss_sum,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_clump0_2, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_roads0.rds"))
saveRDS(rd_robust_clump1_2, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_roads1.rds"))
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
################ (Institutions: {1: Municipality created before 1950}) ######################
##############################################################################################
list_df <- c(defo_dist[2:3], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, ao_crea <= 1950))
rd_robust_inst0 <- lapply(list_df, function(park){
rdrobust(
y = park$loss_sum,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2:3], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, ao_crea > 1950))
rd_robust_inst1 <- lapply(list_df, function(park){
rdrobust(
y = park$loss_sum,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_inst0, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_inst0.rds"))
saveRDS(rd_robust_inst1, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_inst1.rds"))
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
##################### (Violence: {1: homicide rate > median pre-2000; 0 ow}) #################
##############################################################################################
list_df <- c(defo_dist[2:3], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, hom_q34 == 0))
rd_robust_hom0 <- lapply(list_df, function(park){
rdrobust(
y = park$loss_sum,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2:3], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, hom_q34 == 1))
rd_robust_hom1 <- lapply(list_df, function(park){
rdrobust(
y = park$loss_sum,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_hom0, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_hom0.rds"))
saveRDS(rd_robust_hom1, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_hom1.rds"))
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
##################### (Conflict: {1: Presence illegal armed groups pre-2000}) ################
##############################################################################################
list_df <- c(defo_dist[2:3], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, pres_cerac_1 == 0))
rd_robust_conflict0 <- lapply(list_df, function(park){
rdrobust(
y = park$loss_sum,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2:3], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, pres_cerac_1 == 1))
rd_robust_conflict1 <- lapply(list_df, function(park){
rdrobust(
y = park$loss_sum,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_conflict0, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_conflict0.rds"))
saveRDS(rd_robust_conflict1, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_conflict1.rds"))
##############################################################################################
################################### 2. SIMCI DATA: COCA CROPS ################################
##############################################################################################
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
###################### (NIGHT-LIGHT DATA: CLUMPS 5K: {1: CLUMP, 0: NO CLUMP}) ################
##############################################################################################
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, clumps_5k == 1))
rd_robust_clump1_coca <- lapply(list_df, function(park){
rdrobust(
y = park$coca_agg,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, clumps_5k == 0))
rd_robust_clump0_coca <- lapply(list_df, function(park){
rdrobust(
y = park$coca_agg,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_clump0_coca, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump0_coca.rds"))
saveRDS(rd_robust_clump1_coca, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump1_coca.rds"))
rd_robust_clump0_coca <- readRDS(str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump0_coca.rds"))
rd_robust_clump1_coca <- readRDS(str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump1_coca.rds"))
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
################ (Institutions: {1: Municipality created before 1950}) ######################
##############################################################################################
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, ao_crea <= 1950))
rd_robust_inst0_coca <- lapply(list_df, function(park){
rdrobust(
y = park$coca_agg,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 8,
all = T
)
})
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, ao_crea > 1950))
rd_robust_inst1_coca <- lapply(list_df, function(park){
rdrobust(
y = park$coca_agg,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_inst0_coca, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_inst0_coca.rds"))
saveRDS(rd_robust_inst1_coca, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_inst1_coca.rds"))
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
##################### (Violence: {1: homicide rate > median pre-2000; 0 ow}) #################
##############################################################################################
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, hom_q34 == 0))
rd_robust_hom0_coca <- lapply(list_df, function(park){
rdrobust(
y = park$coca_agg,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, hom_q34 == 1))
rd_robust_hom1_coca <- lapply(list_df, function(park){
rdrobust(
y = park$coca_agg,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_hom0_coca, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_hom0_coca.rds"))
saveRDS(rd_robust_hom1_coca, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_hom1_coca.rds"))
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
##################### (ROADS BUFFER: {1: INSIDE 5KM ROAD BUFFER, 0: OUTSIDE}) ################
##############################################################################################
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, roads == 1))
rd_robust_roads1_coca <- lapply(list_df, function(park){
rdrobust(
y = park$coca_agg,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, roads == 0))
rd_robust_roads0_coca <- lapply(list_df, function(park){
rdrobust(
y = park$coca_agg,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_roads1_coca, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_roads0_coca.rds"))
saveRDS(rd_robust_roads0_coca, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_roads1_coca.rds"))
##############################################################################################
################################# 2. SIMCI DATA: ILLEGAL MINING ##############################
##############################################################################################
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
###################### (NIGHT-LIGHT DATA: CLUMPS 5K: {1: CLUMP, 0: NO CLUMP}) ################
##############################################################################################
counter <- 0
list_df <- c(defo_dist[2]) %>%
lapply(., function(x) base::subset(x, clumps_5k ==1))
rd_robust_clump1_mining <- lapply(list_df, function(park){
counter <<- counter + 1
print(counter)
rdrobust(
y = park$illegal_mining_EVOA_2014,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, clumps_5k == 0))
rd_robust_clump0_mining <- lapply(list_df, function(park){
rdrobust(
y = park$illegal_mining_EVOA_2014,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_clump0_mining, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump0_mining.rds"))
saveRDS(rd_robust_clump1_mining, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump1_mining.rds"))
rd_robust_clump0_mining <- readRDS(str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump0_mining.rds"))
rd_robust_clump1_mining <- readRDS(str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump1_mining.rds"))
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
################ (Institutions: {1: Municipality created before 1950}) ######################
##############################################################################################
counter <- 0
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, ao_crea <= 1950))
rd_robust_inst0_mining <- lapply(list_df, function(park){
counter <<- counter + 1
print(counter)
rdrobust(
y = park$illegal_mining_EVOA_2014,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, ao_crea > 1950))
rd_robust_inst1_mining <- lapply(list_df, function(park){
rdrobust(
y = park$illegal_mining_EVOA_2014,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_inst0_mining, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_inst0_mining.rds"))
saveRDS(rd_robust_inst1_mining, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_inst1_mining.rds"))
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
##################### (Violence: {1: homicide rate > median pre-2000; 0 ow}) #################
##############################################################################################
counter <- 0
list_df <- c(defo_dist_terr) %>%
lapply(., function(x) base::subset(x, hom_q34 == 0))
rd_robust_hom0_mining <- lapply(list_df, function(park){
counter <<- counter + 1
print(counter)
rdrobust(
y = park$illegal_mining_EVOA_2014,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, hom_q34 == 1))
rd_robust_hom1_mining <- lapply(list_df, function(park){
rdrobust(
y = park$illegal_mining_EVOA_2014,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_hom0_mining, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_hom0_mining.rds"))
saveRDS(rd_robust_hom1_mining, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_hom1_mining.rds"))
# rd_robust_hom0_mining <- readRDS(str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_hom0_mining.rds"))
# rd_robust_hom1_mining <- readRDS(str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_hom1_mining.rds"))
s##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
##################### (ROADS BUFFER: {1: INSIDE 5KM ROAD BUFFER, 0: OUTSIDE}) ################
##############################################################################################
counter <- 0
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, roads == 1))
rd_robust_road1_mining <- lapply(list_df, function(park){
counter <<- counter + 1
print(counter)
rdrobust(
y = park$illegal_mining_EVOA_2014,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
counter <- 0
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, roads == 0))
rd_robust_road0_mining <- lapply(list_df, function(park){
counter <<- counter + 1
print(counter)
rdrobust(
y = park$illegal_mining_EVOA_2014,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_road0_mining, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_roads0_mining.rds"))
saveRDS(rd_robust_road1_mining, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_roads1_mining.rds"))
| 23,029 | mit |
dcbfcb072cee134e44e035cf1bb2435dc3d1ec09 | B1aine/kaggle-allstate | doSubmission.R | library(cvTools)
library(gbm)
########################################
############## TRAINING ################
########################################
load('data/train_2014.04.05.RData')
nam = names(data)
omitClass <- c(which(nam=="targetA"),which(nam=="targetB"),which(nam=="targetC"),which(nam=="targetD"),
which(nam=="targetE"),which(nam=="targetF"),which(nam=="targetG"),
which(nam=="customer_ID"),which(nam=="last"),which(nam=="location"))
omitOptions <- c(omitClass,which(nam=="willChange"))
targetOptions = c(which(nam=="targetA"),which(nam=="targetB"),which(nam=="targetC"),which(nam=="targetD"),
which(nam=="targetE"),which(nam=="targetF"),which(nam=="targetG"))
currentOptions <- c(which(nam=="A"),which(nam=="B"),which(nam=="C"),which(nam=="D"),
which(nam=="E"),which(nam=="F"),which(nam=="G"))
set.seed(1234)
folds = 3
classTrees = 4000
optTrees = 1500
class_shrinkage = opt_shrinkage = 0.01
class_depth = opt_depth = 9
class_minobs = 20
opt_minobs = 10
customers = unique(data$customer_ID)
cvF <- cvFolds(length(customers), K = folds, R = 1)
classifiers <- list()
pClass <- list()
models_options <- list()
for(i in 1:7)
{
classifiers[[i]] <- list()
pClass[[i]] <- list()
models_options[[i]] <- list()
}
for(fold in 1:folds)
{
valCustomers <- sort(cvF$subset[cvF$which==fold])
val = data$customer_ID %in% valCustomers
train = !val
for(option in 1:7)
{
######### CLASSIFIER
cat('Fold ',fold,', option ',option,', building the classifier...')
data$willChange <- as.numeric(data[,targetOptions[option]] != data[,currentOptions[option]])
gc(reset=T)
classifiers[[option]][[fold]] = gbm(willChange~., data[train,-omitClass],
distribution="bernoulli",keep.data=F,verbose=F,n.cores=1,
n.trees=classTrees,shrinkage=class_shrinkage,interaction.depth=class_depth,n.minobsinnode=class_minobs)
pClass[[option]][[fold]] = predict(classifiers[[option]][[fold]],data[val,],type="response",n.trees=classTrees)
cat('done\n')
######### OPTION
cat('Fold ',fold,', option ',option,', building the predictor...')
models_options[[option]][[fold]] = gbm(as.formula(paste(nam[targetOptions[option]],'~.')),
data = data[val,-omitOptions[-option]][pClass[[option]][[fold]]>=0.2,],
distribution="multinomial",keep.data=T,verbose=F,n.cores=1,
n.trees=optTrees,shrinkage=opt_shrinkage,interaction.depth=opt_depth,n.minobsinnode=opt_minobs)
cat('done\n')
cat('saving...')
save(classifiers,pClass,models_options,file='models_ind.RData')
cat('done\n')
gc(reset=T)
}
}
########################################
############## TESTING #################
########################################
testClass = list()
for (option in 1:7)
{
testClass[[option]] = 0
for (fold in 1:folds)
testClass[[option]] = testClass[[option]] +
predict(classifiers[[option]][[fold]],test,type="response",n.trees=classTrees)
testClass[[option]] = testClass[[option]]/folds
}
thr = rep(0.7,7)
thr[7] = 0.45
for (rep in 1:1)
{
testOption = list()
for (option in 1:7)
{
testOption[[option]] = 0
for (fold in 1:folds)
testOption[[option]] = testOption[[option]] +
predict(models_options[[option]][[fold]],test[testClass[[option]]>=thr[option],],type="response",n.trees=optTrees)
testOption[[option]] <- apply(testOption[[option]]/folds,1,which.max)
if (option %in% c(1,2,5,6))
testOption[[option]] = testOption[[option]]-1
}
for(option in 1:7)
{
test[testClass[[option]]>=thr[option],currentOptions[option]] = testOption[[option]]
}
}
whichLast = which(test$last==1)
finalTest = sprintf('%s%s%s%s%s%s%s',
test$A[whichLast],
test$B[whichLast],
test$C[whichLast],
test$D[whichLast],
test$E[whichLast],
test$F[whichLast],
test$G[whichLast])
submission = data.frame(customer_ID=test$customer_ID[whichLast],plan=finalTest)
write.csv(submission,file="submissionTest.csv",quote=F,row.names=F) | 4,343 | mit |
fa7b616742d8eddd1ece1f5a8e85373c81052129 | happynotes/PIHM.AnalysisR | R/datafilter.R | #' Find the cells/segments that match your filters.
#' Developed by Lele Shu( lele.shu at gmail.com lzs157 at psu.edu )
#' Created by Fri Apr 17 14:11:46 EDT 2015
#'
#' @param data in TS
#' @param filter. If filter<0, find the IDs where is smaller than filter. If filter>0, find the IDs where is larger than filter.
#' @param ifplot. Defaut=FALSE
#' @keywords check value
#' @export id of cell/segment which match filter.
#' @examples
#' datafilter(data,filter=5) to find the cells which has value>5.
#'
#' datafilter(data,filter=-5) to find the cells which has value<5.
#'
#' datafilter(data,filter=[-5,5]) to find the cells which has -5<value<5.
#'
#'
#'
datafilter <-function(data, filter,name=paste0(substitute(data),collapse=''),
ylab=paste0(substitute(data),collapse='')
, unit='',is.riv=FALSE,if.plot=TRUE){
dataMin=sapply(data,min);
dataMax=sapply(data,max);
if(is.riv){ #filter for riv
key='OverBanks'
riv =readriv();
segshp=riv$River$riv[,7]
shp=riv$Shape$shp;
calib=readcalib(bak=TRUE);
rd <- shp[segshp,2] * calib['RIV_DPTH'];
ids=which(dataMax>rd);
cat("\t",as.character(length(ids)), "item(s) are filtered from ",name," data.", "filter= Over Banks\n\n");
}else{
if(missing(filter)){ #calculate default filter.
sd=soildepth();
filter=sd;
}
if (length(filter)==ncol(data)){ #filter value for each cell.
if(mean(filter)>0){
ids=which(filter<dataMax);
}else{
ids=which(filter<dataMin);
}
key='Vfilter'
cat("\t",as.character(length(ids)), "item(s) are filtered from ",name," data.", 'filter= vector',"\n\n");
}else{
filterMin=min(filter);
filterMax=max(filter);
if (length(filter)>1) {
ids=which(dataMin>filterMin & dataMax< filterMax)
}else{
if (filter >0){
ids=which(dataMax>filterMax);
}else{
ids=which(dataMin<filterMin);
}
}
cat("\t",as.character(length(ids)), "item(s) are filtered from ",name," data.", 'filter=',filter,"\n\n");
key=as.character(mean(filter) );
}
}
plotzoo(data,fn=paste(name,'_all.png',sep=''),ylab=ylab,unit=unit) ;
if(length(ids)==0){
return(0)
}else{
if(if.plot){
plotzoo(data[,ids],fn=paste(name,key,'.png',sep='') ,ylab=ylab,unit=unit,holdon=TRUE);
if (is.riv){
urd=sort(unique(rd))
xx=range(time(data));
for (i in 1:length(urd)){
lines(xx,c(urd[i],urd[i]),lwd=2);
}
}
dev.off();
if(is.riv){ # if ids is for rivers.
PIHM.triplot(rivid=ids,fn=paste(name,'_Overbank.png',sep=''),name=name,title=paste(name) );
}else{ # ids is for Cells.
PIHM.triplot(cellid=ids,fn=paste(name,'+ELV',key,'.png',sep=''),name=name,title=paste(name,'filter=',key),
riveron=RIVERON);
}
}
}
# image.off()
return(ids)
}
datafilter.riv <-function(data, filter,name=paste0(substitute(data),collapse=''),ylab=paste0(substitute(data),collapse=''), unit='',if.plot=TRUE){
dataMin=sapply(data,min);
dataMax=sapply(data,max);
if(missing(filter)){ #default river filter is "OVER BANKS"
key='OverBanks'
riv =readriv();
segshp=riv$River$riv[,7]
shp=riv$Shape$shp;
calib=readcalib(bak=TRUE);
rd <- shp[segshp,2] * calib['RIV_DPTH'];
ids=which(dataMax>rd);
cat("\t",as.character(length(ids)), "river segment(s) are filtered from ",name," data.", "filter= Over Banks\n\n");
}else{
if (length(filter)==ncol(data)){ #filter value for each cell.
if(mean(filter)>0){
ids=which(filter<dataMax);
}else{
ids=which(filter<dataMin);
}
key='Vfilter'
cat("\t",as.character(length(ids)), "river segment(s) are filtered from ",name," data.", 'filter= vector',"\n\n");
}else{
filterMin=min(filter);
filterMax=max(filter);
if (length(filter)>1) {
ids=which(dataMin>filterMin & dataMax< filterMax)
}else{
if (filter >0){
ids=which(dataMax>filterMax);
}else{
ids=which(dataMin<filterMin);
}
}
cat("\t",as.character(length(ids)), "river segment(s) are filtered from ",name," data.", 'filter=',filter,"\n\n");
key=as.character(mean(filter) );
}
}
plotzoo(data,fn=paste(name,'_all.png',sep=''),ylab=ylab,unit=unit) ;
if(length(ids)==0){
return(0)
}else{
if(if.plot){
plotzoo(data[,ids],fn=paste(name,key,'.png',sep='') ,ylab=ylab,unit=unit,holdon=TRUE);
if (missing(filter)){
urd=sort(unique(rd))
xx=range(time(data));
for (i in 1:length(urd)){
lines(xx,c(urd[i],urd[i]),lwd=2);
}
}
dev.off();
PIHM.triplot(rivid=ids,fn=paste(name,'_',key,'.png',sep=''),name=name,title=paste(name) );
}
}
image.off()
return(ids)
}
| 5,597 | gpl-2.0 |
73017f0a824a912163a9f897515f8df287f4918a | banco-republica-research/deforestacion | modeling/reg_dis_het_effects.R | ##############################################################################################
##############################################################################################
### RUN REGRESSION DISCONTINUITY MODELS ###
### THIS CODE WILL READ THE DISTANCE AND DEFORESTATION DATA TO RUN RD MODELS ###
### HERE WE WILL RUN THE MODELS TO CALCULATE HETEROGENEOUS EFFECTS FOR ###
### DEFORESTATION, COCA CROPS, AND ILLEGAL GOLD MINING. THIS ANALYSIS WILL ###
### ONLY USE ALL BORDERS WITHOUT COMPOUND TREATMENT EFFECT ###
##############################################################################################
##############################################################################################
rm(list=ls())
library(plyr)
library(dplyr)
library(data.table)
library(rdrobust)
library(rdd)
library(stringr)
library(stargazer)
library(foreign)
library(ggplot2)
library(magrittr)
library(foreign)
library(stringr)
library(rlang)
library(tidyr)
library(RATest)
# Source tables functions
setwd(Sys.getenv("ROOT_FOLDER"))
source("R/rd_functions.R")
source("modeling/merge_datasets.R")
##############################################################################################
###################################### 1. DEFORESTATION ######################################
##############################################################################################
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
####################### (NIGHT-LIGHT DATA: CLUMPS: {1: CLUMP, 0: NO CLUMP}) ##################
##############################################################################################
list_df <- c(defo_dist[2:3], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, clumps_5k == 1))
rd_robust_clump1 <- lapply(list_df, function(park){
rdrobust(
y = park$loss_sum,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2:3], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, clumps_5k == 0))
rd_robust_clump0 <- lapply(list_df, function(park){
rdrobust(
y = park$loss_sum,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_clump0, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump0.rds"))
saveRDS(rd_robust_clump1, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump1.rds"))
rd_robust_clump0 <- readRDS(str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump0.rds"))
rd_robust_clump1 <- readRDS(str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump1.rds"))
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
##################### (ROADS BUFFER: {1: INSIDE 5KM ROAD BUFFER, 0: OUTSIDE}) ################
##############################################################################################
list_df <- c(defo_dist[2:3], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, roads == 1))
rd_robust_clump1_2 <- lapply(list_df, function(park){
rdrobust(
y = park$loss_sum,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2:3], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, roads == 0))
rd_robust_clump0_2 <- lapply(list_df, function(park){
rdrobust(
y = park$loss_sum,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_clump0_2, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_roads0.rds"))
saveRDS(rd_robust_clump1_2, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_roads1.rds"))
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
################ (Institutions: {1: Municipality created before 1950}) ######################
##############################################################################################
list_df <- c(defo_dist[2:3], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, ao_crea <= 1950))
rd_robust_inst0 <- lapply(list_df, function(park){
rdrobust(
y = park$loss_sum,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2:3], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, ao_crea > 1950))
rd_robust_inst1 <- lapply(list_df, function(park){
rdrobust(
y = park$loss_sum,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_inst0, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_inst0.rds"))
saveRDS(rd_robust_inst1, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_inst1.rds"))
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
##################### (Violence: {1: homicide rate > median pre-2000; 0 ow}) #################
##############################################################################################
list_df <- c(defo_dist[2:3], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, hom_q34 == 0))
rd_robust_hom0 <- lapply(list_df, function(park){
rdrobust(
y = park$loss_sum,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2:3], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, hom_q34 == 1))
rd_robust_hom1 <- lapply(list_df, function(park){
rdrobust(
y = park$loss_sum,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_hom0, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_hom0.rds"))
saveRDS(rd_robust_hom1, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_hom1.rds"))
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
##################### (Conflict: {1: Presence illegal armed groups pre-2000}) ################
##############################################################################################
list_df <- c(defo_dist[2:3], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, pres_cerac_1 == 0))
rd_robust_conflict0 <- lapply(list_df, function(park){
rdrobust(
y = park$loss_sum,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2:3], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, pres_cerac_1 == 1))
rd_robust_conflict1 <- lapply(list_df, function(park){
rdrobust(
y = park$loss_sum,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_conflict0, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_conflict0.rds"))
saveRDS(rd_robust_conflict1, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_conflict1.rds"))
##############################################################################################
################################### 2. SIMCI DATA: COCA CROPS ################################
##############################################################################################
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
###################### (NIGHT-LIGHT DATA: CLUMPS 5K: {1: CLUMP, 0: NO CLUMP}) ################
##############################################################################################
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, clumps_5k == 1))
rd_robust_clump1_coca <- lapply(list_df, function(park){
rdrobust(
y = park$coca_agg,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, clumps_5k == 0))
rd_robust_clump0_coca <- lapply(list_df, function(park){
rdrobust(
y = park$coca_agg,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_clump0_coca, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump0_coca.rds"))
saveRDS(rd_robust_clump1_coca, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump1_coca.rds"))
rd_robust_clump0_coca <- readRDS(str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump0_coca.rds"))
rd_robust_clump1_coca <- readRDS(str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump1_coca.rds"))
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
################ (Institutions: {1: Municipality created before 1950}) ######################
##############################################################################################
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, ao_crea <= 1950))
rd_robust_inst0_coca <- lapply(list_df, function(park){
rdrobust(
y = park$coca_agg,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 8,
all = T
)
})
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, ao_crea > 1950))
rd_robust_inst1_coca <- lapply(list_df, function(park){
rdrobust(
y = park$coca_agg,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_inst0_coca, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_inst0_coca.rds"))
saveRDS(rd_robust_inst1_coca, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_inst1_coca.rds"))
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
##################### (Violence: {1: homicide rate > median pre-2000; 0 ow}) #################
##############################################################################################
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, hom_q34 == 0))
rd_robust_hom0_coca <- lapply(list_df, function(park){
rdrobust(
y = park$coca_agg,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, hom_q34 == 1))
rd_robust_hom1_coca <- lapply(list_df, function(park){
rdrobust(
y = park$coca_agg,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_hom0_coca, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_hom0_coca.rds"))
saveRDS(rd_robust_hom1_coca, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_hom1_coca.rds"))
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
##################### (ROADS BUFFER: {1: INSIDE 5KM ROAD BUFFER, 0: OUTSIDE}) ################
##############################################################################################
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, roads == 1))
rd_robust_roads1_coca <- lapply(list_df, function(park){
rdrobust(
y = park$coca_agg,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, roads == 0))
rd_robust_roads0_coca <- lapply(list_df, function(park){
rdrobust(
y = park$coca_agg,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_roads1_coca, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_roads0_coca.rds"))
saveRDS(rd_robust_roads0_coca, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_roads1_coca.rds"))
##############################################################################################
################################# 2. SIMCI DATA: ILLEGAL MINING ##############################
##############################################################################################
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
###################### (NIGHT-LIGHT DATA: CLUMPS 5K: {1: CLUMP, 0: NO CLUMP}) ################
##############################################################################################
counter <- 0
list_df <- c(defo_dist[2]) %>%
lapply(., function(x) base::subset(x, clumps_5k ==1))
rd_robust_clump1_mining <- lapply(list_df, function(park){
counter <<- counter + 1
print(counter)
rdrobust(
y = park$illegal_mining_EVOA_2014,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, clumps_5k == 0))
rd_robust_clump0_mining <- lapply(list_df, function(park){
rdrobust(
y = park$illegal_mining_EVOA_2014,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_clump0_mining, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump0_mining.rds"))
saveRDS(rd_robust_clump1_mining, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump1_mining.rds"))
rd_robust_clump0_mining <- readRDS(str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump0_mining.rds"))
rd_robust_clump1_mining <- readRDS(str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_clump1_mining.rds"))
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
################ (Institutions: {1: Municipality created before 1950}) ######################
##############################################################################################
counter <- 0
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, ao_crea <= 1950))
rd_robust_inst0_mining <- lapply(list_df, function(park){
counter <<- counter + 1
print(counter)
rdrobust(
y = park$illegal_mining_EVOA_2014,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, ao_crea > 1950))
rd_robust_inst1_mining <- lapply(list_df, function(park){
rdrobust(
y = park$illegal_mining_EVOA_2014,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_inst0_mining, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_inst0_mining.rds"))
saveRDS(rd_robust_inst1_mining, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_inst1_mining.rds"))
##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
##################### (Violence: {1: homicide rate > median pre-2000; 0 ow}) #################
##############################################################################################
counter <- 0
list_df <- c(defo_dist_terr) %>%
lapply(., function(x) base::subset(x, hom_q34 == 0))
rd_robust_hom0_mining <- lapply(list_df, function(park){
counter <<- counter + 1
print(counter)
rdrobust(
y = park$illegal_mining_EVOA_2014,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, hom_q34 == 1))
rd_robust_hom1_mining <- lapply(list_df, function(park){
rdrobust(
y = park$illegal_mining_EVOA_2014,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_hom0_mining, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_hom0_mining.rds"))
saveRDS(rd_robust_hom1_mining, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_hom1_mining.rds"))
# rd_robust_hom0_mining <- readRDS(str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_hom0_mining.rds"))
# rd_robust_hom1_mining <- readRDS(str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_hom1_mining.rds"))
s##############################################################################################
#################################### OPTIMAL ROBUST BANDWITHS ################################
##################### (ROADS BUFFER: {1: INSIDE 5KM ROAD BUFFER, 0: OUTSIDE}) ################
##############################################################################################
counter <- 0
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, roads == 1))
rd_robust_road1_mining <- lapply(list_df, function(park){
counter <<- counter + 1
print(counter)
rdrobust(
y = park$illegal_mining_EVOA_2014,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
counter <- 0
list_df <- c(defo_dist[2], defo_dist_terr) %>%
lapply(., function(x) base::subset(x, roads == 0))
rd_robust_road0_mining <- lapply(list_df, function(park){
counter <<- counter + 1
print(counter)
rdrobust(
y = park$illegal_mining_EVOA_2014,
x = park$dist_disc,
covs = cbind(park$altura_tile_30arc, park$slope, park$roughness, park$prec,
park$sq_1km.1, park$treecover_agg, as.factor(as.character(park$buffer_id))),
vce = "nn",
nnmatch = 3,
all = T
)
})
saveRDS(rd_robust_road0_mining, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_roads0_mining.rds"))
saveRDS(rd_robust_road1_mining, str_c(Sys.getenv("OUTPUT_FOLDER"), "/RD/Models/new_results/rd_robust_roads1_mining.rds"))
| 23,029 | mit |
5fe0311ef0711532e4176209605ff6e1af56eb66 | thehyve/Rmodules_oncoprint | web-app/Rscripts/Heatmap/HeatmapLoader.R | ###########################################################################
# Copyright 2008-2012 Janssen Research & Development, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###########################################################################
###########################################################################
#Heatmap Loader
###########################################################################
Heatmap.loader <- function(
input.filename,
output.file ="Heatmap",
meltData = TRUE,
imageWidth = 1200,
imageHeight = 800,
pointsize = 15,
maxDrawNumber = Inf,
color.range.clamps = c(-2.5,2.5),
aggregate.probes = FALSE
)
{
print("-------------------")
print("HeatmapLoader.R")
print("CREATING HEATMAP")
library(Cairo)
library(ggplot2)
library(reshape2)
library(gplots)
#Pull the GEX data from the file.
mRNAData <- data.frame(read.delim(input.filename, stringsAsFactors = FALSE))
# The GROUP column needs to have the values from GENE_SYMBOL concatenated as a suffix,
# but only if the latter does not contain a private value (which means that the biomarker was not present in any of the dictionaries)
mRNAData$GROUP <- as.character(mRNAData$GROUP)
rowsToConcatenate <- grep("^PRIVATE", mRNAData$GENE_SYMBOL, invert = TRUE)
mRNAData$GROUP[rowsToConcatenate] <- paste(mRNAData$GROUP[rowsToConcatenate], mRNAData$GENE_SYMBOL[rowsToConcatenate],sep="_")
mRNAData$GROUP <- as.factor(mRNAData$GROUP)
if (aggregate.probes) {
# probe aggregation function adapted from dataBuilder.R to heatmap's specific data-formats
mRNAData <- Heatmap.probe.aggregation(mRNAData, collapseRow.method = "MaxMean", collapseRow.selectFewestMissing = TRUE)
}
#If we have to melt and cast, do it here, otherwise we make the group column the rownames
if(meltData == TRUE)
{
#Trim the patient.id field.
mRNAData$PATIENT_NUM <- gsub("^\\s+|\\s+$", "",mRNAData$PATIENT_NUM)
#Melt the data, leaving 3 columns as the grouping fields.
meltedData <- melt(mRNAData, id=c("GROUP","PATIENT_NUM","GENE_SYMBOL"))
#Cast the data into a format that puts the ASSAY.ID in a column.
mRNAData <- data.frame(dcast(meltedData, GROUP ~ PATIENT_NUM))
#When we convert to a data frame the numeric columns get an x in front of them. Remove them here.
colnames(mRNAData) <- sub("^X","",colnames(mRNAData))
}
else
{
colnames(mRNAData) <- gsub("^\\s+|\\s+$","",colnames(mRNAData))
#Use only unique row names. This unique should get rid of the case where we have multiple genes per probe. The values for the probes are all the same.
mRNAData <- unique(mRNAData)
}
#Set the name of the rows to be the names of the probes.
rownames(mRNAData) = mRNAData$GROUP
#Convert data to a integer matrix.
mRNAData <- data.matrix(subset(mRNAData, select = -c(GROUP)))
#We can't draw a heatmap for a matrix with no rows.
if(nrow(mRNAData)<1) stop("||FRIENDLY||R cannot plot a heatmap with no Gene/Probe selected. Please check your variable selection and run again.")
if(ncol(mRNAData)<2) stop("||FRIENDLY||R cannot plot a heatmap with only 1 Patient data. Please check your variable selection and run again.")
#We can't draw a heatmap for a matrix with only 1 row (restriction of heatmap.2 function).
#Adding an extra dummy row with NA values, does the trick as they seems to be ignored in the plot and the density histogram
if(nrow(mRNAData)==1) {
mRNAData <- rbind(mRNAData, mRNAData[1,])
mRNAData[2,] = NA
}
# by Serge and Wei to filter a sub set and reorder markers
num_markers<-dim(mRNAData)[1] # number of markers in the dataset
if (num_markers > maxDrawNumber) { # if more markers in the dataset, apply filter
sd_rows_mRNA<-apply (mRNAData,1,sd,na.rm=T)
mRNAData<-mRNAData[!is.na(sd_rows_mRNA),] # remove markers where sd is NA
sd_rows_mRNA<-sd_rows_mRNA[!is.na(sd_rows_mRNA)]
indices_to_include <- order(sd_rows_mRNA,decreasing = T)[1:maxDrawNumber] # filter by SD, keep only the top maxDrawNumber
mRNAData <- mRNAData[indices_to_include,]
}
colcolor<-colnames(mRNAData) # assign colors for different subset
colcolor[grep("^S1_|_S1_|_S1$",colnames(mRNAData))]<-"orange"
colcolor[grep("^S2_|_S2_|_S2$",colnames(mRNAData))]<-"yellow"
mean_reorder<-rowMeans(mRNAData[,colcolor=="orange" ], na.rm = T) # reorder the data by rowmean of Subset 1
mRNAData<-mRNAData[order(mean_reorder,decreasing = T),]
rownames(mRNAData)<-gsub("_\\s+$","",rownames(mRNAData), ignore.case = FALSE, perl = T) # remove the _ at the end of the marker label
# end filter subset
# check whether there is enough data to draw heatmap
n_remaining_marker<-nrow(mRNAData)
n_remaining_sample<-ncol(mRNAData)
if (is.null(color.range.clamps)) color.range.clamps = c(min(mRNAData), max(mRNAData))
if (n_remaining_marker>1 & n_remaining_sample >1) {
plotHeatmap(mRNAData, colcolor, color.range.clamps, output.file, extension = "png")
plotHeatmap(mRNAData, colcolor, color.range.clamps, output.file, extension = "svg")
} else {
#Prepare the package to capture the image file.
CairoPNG(file=paste(output.file,".png",sep=""),width=as.numeric(imageWidth),height=as.numeric(imageHeight),pointsize=as.numeric(pointsize))
tmp<-frame()
tmp2<-mtext ("not enough marker/samples to draw heatmap", cex=2)
print (tmp)
print (tmp2)
dev.off()
}
print("-------------------")
}
plotHeatmap <- function(data, colcolors, color.range.clamps, output.file = "Heatmap", extension = "png") {
require(Cairo)
require(gplots)
pxPerCell <- 15
hmPars <- list(pointSize = pxPerCell / 1, labelPointSize = pxPerCell / 9)
if (nrow(data) < 30 || ncol(data) < 30) {
pxPerCell <- 40
hmPars <- list(pointSize = pxPerCell / 5, labelPointSize = pxPerCell / 10)
}
maxResolution <- 30000
if (nrow(data) > ncol(data) && nrow(data)*pxPerCell > maxResolution) {
pxPerCell <- maxResolution/nrow(data)
hmPars <- list(pointSize = pxPerCell / 1, labelPointSize = pxPerCell / 9)
} else if (ncol(data)*pxPerCell > maxResolution) {
pxPerCell <- maxResolution/ncol(data)
hmPars <- list(pointSize = pxPerCell / 1, labelPointSize = pxPerCell / 9)
}
mainHeight <- nrow(data) * pxPerCell
mainWidth <- ncol(data) * pxPerCell
leftMarginSize <- pxPerCell * 1
rightMarginSize <- pxPerCell * max(10, max(nchar(rownames(data))))
topMarginSize <- pxPerCell * 3
bottomMarginSize <- pxPerCell * max(10, max(nchar(colnames(data))))
topSpectrumHeight <- rightMarginSize
imageWidth <- leftMarginSize + mainWidth + rightMarginSize
imageHeight <- topSpectrumHeight + topMarginSize + mainHeight + bottomMarginSize
hmCanvasDiv <- list(xLeft = leftMarginSize / imageWidth, xMain = mainWidth / imageWidth, xRight = rightMarginSize / imageWidth,
yTopLarge = topSpectrumHeight / imageHeight, yTopSmall = topMarginSize / imageHeight,
yMain = mainHeight / imageHeight, yBottom = bottomMarginSize / imageHeight)
if (extension == "svg") {
CairoSVG(file = paste(output.file,".svg",sep=""), width = imageWidth/200,
height = imageHeight/200, pointsize = hmPars$pointSize*0.35)
} else {
CairoPNG(file = paste(output.file,".png",sep=""), width = imageWidth,
height = imageHeight, pointsize = hmPars$pointSize)
}
par(mar = c(0, 0, 0, 0))
heatmap.2(data,
Rowv=NA,
Colv=NA,
ColSideColors = colcolors,
col = greenred(800),
breaks = seq(color.range.clamps[1], color.range.clamps[2], length.out = 800+1),
sepwidth=c(0,0),
margins=c(0, 0),
cexRow = hmPars$labelPointSize,
cexCol = hmPars$labelPointSize,
scale = "none",
dendrogram = "none",
key = TRUE,
keysize = 0.001,
density.info = "histogram", # density.info=c("histogram","density","none")
trace = "none",
lmat = matrix(ncol = 3, byrow = TRUE, data = c( # 1 is subset color bar, 2 is heatmap, 5 is color histogram
3, 5, 4,
6, 1, 7,
8, 2, 9,
10, 11, 12)),
lwid = c(hmCanvasDiv$xLeft, hmCanvasDiv$xMain, hmCanvasDiv$xRight),
lhei = c(hmCanvasDiv$yTopLarge, hmCanvasDiv$yTopSmall, hmCanvasDiv$yMain, hmCanvasDiv$yBottom))
legend(x = 1 - hmCanvasDiv$xRight*0.93, y = 1,
legend = c("Subset 1","Subset 2"),
fill = c("orange","yellow"),
bg = "white", ncol = 1,
cex = topSpectrumHeight * 0.006,
)
dev.off()
}
Heatmap.probe.aggregation <- function(mRNAData, collapseRow.method, collapseRow.selectFewestMissing, output.file = "aggregated_data.txt") {
library(WGCNA)
meltedData <- melt(mRNAData, id=c("GROUP","GENE_SYMBOL","PATIENT_NUM"))
#Cast the data into a format that puts the PATIENT_NUM in a column.
castedData <- data.frame(dcast(meltedData, GROUP + GENE_SYMBOL ~ PATIENT_NUM))
#Create a unique identifier column.
castedData$UNIQUE_ID <- paste(castedData$GENE_SYMBOL,castedData$GROUP,sep="")
#Set the name of the rows to be the unique ID.
rownames(castedData) = castedData$UNIQUE_ID
if (nrow(castedData) <= 1) {
warning("Only one probe.id present in the data. Probe aggregation not possible.")
return (mRNAData)
}
#Run the collapse on a subset of the data by removing some columns.
finalData <- collapseRows(subset(castedData, select = -c(GENE_SYMBOL,GROUP,UNIQUE_ID) ),
rowGroup = castedData$GENE_SYMBOL,
rowID = castedData$UNIQUE_ID,
method = collapseRow.method,
connectivityBasedCollapsing = TRUE,
methodFunction = NULL,
connectivityPower = 1,
selectFewestMissing = collapseRow.selectFewestMissing,
thresholdCombine = NA)
#Coerce the data into a data frame.
finalData=data.frame(finalData$group2row, finalData$datETcollapsed)
#Rename the columns, the selected row_id is the unique_id.
colnames(finalData)[2] <- 'UNIQUE_ID'
#Merge the probe.id back in.
finalData <- merge(finalData,castedData[c('UNIQUE_ID','GROUP')],by=c('UNIQUE_ID'))
#Remove the unique_id and selected row ID column.
finalData <- subset(finalData, select = -c(UNIQUE_ID))
#Melt the data back.
finalData <- melt(finalData)
#Set the column names again.
colnames(finalData) <- c("GENE_SYMBOL","GROUP","PATIENT_NUM","VALUE")
#When we convert to a data frame the numeric columns get an x in front of them. Remove them here.
finalData$PATIENT_NUM <- sub("^X","",finalData$PATIENT_NUM)
#Return relevant columns
finalData <- finalData[,c("PATIENT_NUM","VALUE","GROUP","GENE_SYMBOL")]
write.table(finalData, file = output.file, sep = "\t", row.names = FALSE)
finalData
}
| 12,166 | apache-2.0 |
Subsets and Splits