#Code for reproducing simulation graphs and summary statistics for quantitative risk simulation. This code produced the .csv of Nrisky values, simulation values, and sensitivity analysis values for each disease-scenario combination #Meg McEachran #Updated 3/25/2022 library(tidyverse) library(ggpubr) library(EnvStats) library(purrr) library(extraDistr) #####VHSV #read vhsv data: simdata1<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-08 VHSV-1_simulation_data.csv") simdata2<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-08 VHSV-2_simulation_data.csv") simdata3<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-08 VHSV-3_simulation_data.csv") simdata4<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-08 VHSV-4_simulation_data.csv") Nrisky.data.long<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-08 Nrisky.vhsv.csv") #pull Prisky data: Prisky.data<-data.frame("Prisky.1"=simdata1$Prisky,"Prisky.2"=simdata2$Prisky,"Prisky.3"=simdata3$Prisky,"Prisky.4"=simdata4$Prisky) #make long Prisky.data.long<-Prisky.data %>% pivot_longer(cols=c(1:4)) #summary stats: Prisky.data.long %>% group_by(name) %>% summarize(median.p=median(value),sd.p=sd(value),mean=mean(value),max=max(value)) #Nrisky summary stats: Nrisky.data.long %>% group_by(name) %>% summarize(mean.n=mean(value),median.n=median(value),sd.n=sd(value),max=max(value)) #make median and mean dfs med_Nrisky_df <- Nrisky.data.long %>% group_by(name) %>% summarize(median=median(value)) mean_Nrisky_df <- Nrisky.data.long %>% group_by(name) %>% summarize(mean=mean(value)) #####PLOTTING FOR MANUSCRIPT: scenario.labs<-c("VHSV-1","VHSV-2","VHSV-3","VHSV-4") names(scenario.labs) <- c("Nrisky.1", "Nrisky.2", "Nrisky.3","Nrisky.4") #faceted plot: facet.nolog.plot.v<-Nrisky.data.long %>% filter(value<5000000) %>% ggplot()+ aes(value,fill=name) + stat_bin(bins=100,position="identity",alpha=0.5) + theme_bw()+ # xlab("Number of risky trips")+ # xlim(0,2000000)+ #ylab("Number of iterations")+ theme(axis.text = element_text(size = 12), #axis.title = element_text(size=14), axis.title = element_blank(), legend.position = "none", legend.text = element_text(size=12), legend.title = element_text(size=12)) + geom_vline(data=med_Nrisky_df,aes(xintercept = median, color = name),linetype=2)+ geom_text(data=med_Nrisky_df, aes(x=median, y=2000, label=prettyNum(round(median),big.mark = ",",scientific=FALSE), size=4,hjust=-.2),show.legend = FALSE) + #facet_grid(OutbreakExtent~SuscSpecies)+ +labeller=labeller(name=scenario.labs facet_wrap(~name)+ theme(strip.text.x = element_blank(), axis.text.x = element_text(angle = 90,vjust = 0.5))+ expand_limits(x=5e+06)+ #scale_x_continuous(breaks=c(100000,500000,1000000,1500000,2000000),labels=c("1M","2M","3M","1.5M","2M"))+ scale_x_continuous(breaks=c(500000,1000000,2000000,3000000,4000000,5000000),labels=c("500K","1M","2M","3M","4M","5M"))+ scale_fill_manual(name="Scenario",labels=c("VHSV-1","VHSV-2","VHSV-3","VHSV-4"),values=c("blue","green","purple","red","black","black"))+ scale_color_manual(name="Scenario",labels=c("VHSV-1","VHSV-2","VHSV-3","VHSV-4"),values=c("blue","green","purple","red","black","black")) facet.nolog.plot.v #create ecdf plot ecdfplot.v<-ggplot(filter(Nrisky.data.long,value<5000000)) + aes(value,color=name)+stat_ecdf(geom="step")+ theme_bw()+ #scale_x_continuous(labels=comma)+ # xlab("Number of risky trips")+ #xlim(0,2000000)+ # ylab("Proportion of iterations")+ theme(axis.text = element_text(size = 12), axis.title = element_blank(), #axis.title = element_text(size=14), #legend.position = c(0.2,.75), legend.text = element_text(size=12), legend.title = element_text(size=12), legend.position = c(0.6,0.4)) + scale_y_continuous(position="right")+ expand_limits(x=5e+06)+ scale_x_continuous(breaks=c(500000,1000000,2000000,3000000,4000000,50000000),labels=c("500K","1M","2M","3M","4M","5M"))+ scale_fill_manual(name="Scenario",labels=c("VHSV-1","VHSV-2","VHSV-3","VHSV-4"),values=c("blue","green","purple","red"))+ scale_color_manual(name="Scenario",labels=c("VHSV-1","VHSV-2","VHSV-3","VHSV-4"),values=c("blue","green","purple","red")) ecdfplot.v #make faceted plot a with broken axis: #faceted plot: # v1<- ggplot(Nrisky.data.long)+ aes(value,fill=name) + geom_histogram(bins=100,position="identity",alpha=0.5) + # theme_bw()+ # #xlab("Number of risky trips")+ # # xlim(0,2000000)+ # #ylab("Number of iterations")+ # theme(axis.text = element_text(size = 12), # #axis.title = element_text(size=14), # axis.title = element_blank(), # legend.position = "none", # legend.text = element_text(size=12), # legend.title = element_text(size=12)) + # geom_vline(data=med_Nrisky_df,aes(xintercept = median, color = name),linetype=2)+ # geom_text(data=med_Nrisky_df, aes(x=median, y=2000, label=prettyNum(round(median),big.mark = ",",scientific=FALSE), size=4,hjust=-.2),show.legend = FALSE) + # #facet_grid(OutbreakExtent~SuscSpecies)+ #labeller=labeller(name=scenario.labs # facet_wrap(~name)+ # theme(strip.text.x = element_blank(), # strip.text.y = element_blank(), # axis.text.x = element_text(angle = 90,vjust = 0.5))+ # #scale_x_continuous(breaks=c(100000,500000,1000000,1500000,2000000),labels=c("1M","2M","3M","1.5M","2M"))+ # scale_x_continuous(breaks=c(500000,1000000,2000000,3000000,4000000,50000000),labels=c("500K","1M","2M","3M","4M","5M"))+ # scale_fill_manual(name="Scenario",labels=c("VHSV-1: LS watershed, Notropis spp.","VHSV-2: Statewide, Notropis spp.","VHSV-3: LS watershed, all susc. spp.","VHSV-4: Statewide, all susc spp."),values=c("blue","green","purple","red"))+ # scale_color_manual(name="Scenario",labels=c("VHSV-1: LS watershed, Notropis spp.","VHSV-2: Statewide, Notropis spp.","VHSV-3: LS watershed, all susc. spp.","VHSV-4: Statewide, all susc spp."),values=c("blue","green","purple","red")) # broken.plot.v<-v1 + scale_x_break(c(3e+06,1.2e+07)) # broken.plot.cinal<-broken.plot.v+scale_x_break(c(3e+06,1.2e+07)) # #Broken axes don't seem to be working for now on the multipanel graph so will abandon for now. #### VHSV SENSITIVITY (Values for Table 3) #VHSV-1 #sensitivity analysis on standardized data: beta.sensmod.vhsv1<-lm(scale(Nrisky) ~ scale(P.use) + scale(P.useLSfish)+ scale(P.useLSshiners) + scale(P.release) + scale(Prev) + scale(Svhsv) + scale(anglers) + scale(trips), data=simdata1) summary(beta.sensmod.vhsv1) beta.sensmod.vhsv1$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input beta.df.vhsv1<-data.frame(coeff=c("P.use","P.useLSfish","P.useLSshiners","P.release", "Prev","Svhsv","anglers","trips"),value=beta.sensmod.vhsv1$coefficients[-1]) #beta.df.vhsv4$coeff<- reorder(beta.df.vhsv4$coeff,abs(beta.df.vhsv4$value)) standardized.vhsv.1<-ggplot(beta.df.vhsv1)+aes(reorder(coeff,value),value)+geom_col()+coord_flip()+ggtitle("standardized.vhsv.1") #VHSV-2 #sensitivity analysis on standardized data: beta.sensmod.vhsv2<-lm(scale(Nrisky) ~ scale(P.use) + scale(P.usesusc) + scale(P.release) + scale(Prev) + scale(Svhsv) + scale(anglers) + scale(trips), data=simdata2) summary(beta.sensmod.vhsv2) beta.sensmod.vhsv2$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input beta.df.vhsv2<-data.frame(coeff=c("P.use","P.usesusc","P.release", "Prev","Svhsv","anglers","trips"),value=beta.sensmod.vhsv2$coefficients[-1]) #beta.df.vhsv2$coeff<- reorder(beta.df.vhsv2$coeff,abs(beta.df.vhsv2$value)) standardized.vhsv.2<-ggplot(beta.df.vhsv2)+aes(reorder(coeff,value),value)+geom_col()+coord_flip()+ggtitle("standardized.vhsv.2") #VHSV-3 #standardized betas beta.sensmod.vhsv3<-lm(scale(Nrisky) ~ scale(P.use) + scale(P.useLSfish)+ scale(P.useLSsusc) + scale(P.release) + scale(Prev) + scale(Svhsv) + scale(anglers) + scale(trips), data=simdata3) summary(beta.sensmod.vhsv3) beta.sensmod.vhsv3$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input beta.df.vhsv3<-data.frame(coeff=c("P.use","P.useLSfish","P.useLSsusc","P.release", "Prev","Svhsv","anglers","trips"),value=beta.sensmod.vhsv3$coefficients[-1]) #beta.df.vhsv4$coeff<- reorder(beta.df.vhsv4$coeff,abs(beta.df.vhsv4$value)) standardized.vhsv.3<-ggplot(beta.df.vhsv3)+aes(reorder(coeff,value),value)+geom_col()+coord_flip()+ggtitle("standardized.vhsv.3") #VHSV-4 beta.sensmod.vhsv4<-lm(scale(Nrisky) ~ scale(P.use) + scale(P.usesusc) + scale(P.release) + scale(Prev) + scale(Svhsv) + scale(anglers) + scale(trips), data=simdata4) summary(beta.sensmod.vhsv4) beta.sensmod.vhsv4$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input beta.df.vhsv4<-data.frame(coeff=c("P.use","P.usesusc","P.release", "Prev","Svhsv","anglers","trips"),value=beta.sensmod.vhsv4$coefficients[-1]) #beta.df.vhsv4$coeff<- reorder(beta.df.vhsv4$coeff,abs(beta.df.vhsv4$value)) standardized.vhsv.4<-ggplot(beta.df.vhsv4)+aes(reorder(coeff,value),value)+geom_col()+coord_flip()+ggtitle("standardized.vhsv.4") ###What proportion of VHSV-1 iterations are less than 1000 risky trips? (Values for Table 2) f1.v <- ecdf(simdata1$Nrisky) y1.v=f1.v(1000) y1.v 100*(1-y1.v) f2.v <- ecdf(simdata2$Nrisky) y2.v=f2.v(1000) y2.v 100*(1-y2.v) f3.v <- ecdf(simdata3$Nrisky) y3.v=f3.v(1000) y3.v 100*(1-y3.v) f4.v <- ecdf(simdata3$Nrisky) y4.v=f4.v(1000) y4.v 100*(1-y4.v) #What proportion of VHSV-1 iterations are less than 0.01 Prisky? prisk1.v <- ecdf(simdata1$Prisky) p1.v=prisk1.v(0.01) #30% p1.v 100*(1-p1.v) prisk2.v <- ecdf(simdata2$Prisky) p2.v=prisk2.v(0.01) p2.v 100*(1-p2.v) prisk3.v <- ecdf(simdata3$Prisky) p3.v=prisk3.v(0.01) #% p3.v 100*(1-p3.v) prisk4.v <- ecdf(simdata4$Prisky) p4.v=prisk4.v(0.01) p4.v 100*(1-p4.v) ################### ##### OVIO ##### ####################### #read data Nrisky.data.long.o<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-12 Nrisky.ovio.csv") simdata1.o<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-12 OVIO-1_simulation_data.csv") simdata2.o<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-12 OVIO-2_simulation_data.csv") simdata3.o<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-12 OVIO-3_simulation_data.csv") #summary stats for ovio scenarios: Nrisky.data.long.o %>% group_by(name)%>% summarize(median=median(value),sd=sd(value),mean=mean(value),max=max(value)) #Put all Prisky together and do summary stats Prisky.data.ovio<-data.frame("Prisky.1.s"=simdata1.o$Prisky.s,"Prisky.1.w"=simdata1.o$Prisky.w, "Prisky.2.s"=simdata2.o$Prisky.s,"Prisky.2.w"=simdata2.o$Prisky.w, "Prisky.3.s"=simdata1.o$Prisky.s,"Prisky.3.w"=simdata1.o$Prisky.w) Prisky.ovio.long<-Prisky.data.ovio %>% pivot_longer(cols=c(1:6)) Prisky.ovio.long %>% group_by(name) %>% summarize(median.p=median(value),sd.p=sd(value),mean.p=mean(value),max.p=max(value)) #make median and means med_Nrisky_df.o <- Nrisky.data.long.o %>% group_by(name) %>% summarize(median=median(value)) mean_Nrisky_df.o <- Nrisky.data.long.o %>% group_by(name) %>% summarize(mean=mean(value)) %>% data.frame() ######PLOTS FOR MANUSCRIPT #create labels for facet: dose.labs <- c("D0.5", "D1", "D2") scenario.labs.o<-c("Baseline", "25% Clean Bait","50% Clean Bait") names(scenario.labs.o) <- c("Nrisky.baseline","Nrisky.clean25","Nrisky.clean50") #####Create ECDF AND GRAPH with limit at 2 mil ecdfplot.o<-Nrisky.data.long.o %>% filter(value<5000000) %>% ggplot()+ aes(value,color=name)+stat_ecdf(geom="step")+ theme_bw()+ #scale_x_continuous(labels=comma)+ #xlab("Number of risky trips")+ #xlim(0,5000000)+ #ylab("Proportion of data")+ theme(axis.text = element_text(size = 12), axis.title = element_blank(), #axis.title = element_text(size=14), #legend.position = c(0.2,.75), legend.text = element_text(size=12), legend.title = element_text(size=12), legend.position = c(0.6,0.4)) + scale_y_continuous(position="right")+ scale_x_continuous(breaks=c(500000,1000000,2000000,3000000,4000000,5000000),labels=c("500K","1M","2M","3M","4M","5M"))+ scale_color_manual(name="Scenario",labels=c("OVIO-1: Baseline","OVIO-2: 25% clean bait","OVIO-3: 50% clean bait"),values=c("red","green","blue"))+ expand_limits(x=5000000) ecdfplot.o #try again with facetted not log scale plot facet.nolog.plot.o<-Nrisky.data.long.o %>% filter(value<5000000) %>% ggplot()+ aes(value,fill=name) + stat_bin(bins=100,position="identity",alpha=0.5) + theme_bw()+ #xlab("Number of risky trips")+ # ylab("Number of iterations")+ theme(axis.text = element_text(size = 12), axis.title = element_blank(), #axis.title = element_text(size=14), legend.position = "none", legend.text = element_text(size=12), legend.title = element_text(size=12)) + geom_vline(data=med_Nrisky_df.o,aes(xintercept = median, color = name),linetype=2)+ geom_text(data=med_Nrisky_df.o, aes(x=median, y=2000, label=prettyNum(round(median),big.mark = ",",scientific=FALSE), size=4,hjust=-.2),show.legend = FALSE) + facet_wrap(~name)+ #,labeller=labeller(name=scenario.labs.o))+ theme(strip.text.x = element_blank(), axis.text.x = element_text(angle = 90,vjust = 0.5))+ scale_x_continuous(breaks=c(0.5e+06,1e+06,2e+06,3e+06,4e+06,5e+06),labels=c("500K","1M","2M","3M","4M","5M"))+ scale_fill_manual(name="Scenario",labels=c("OVIO-1: Baseline","OVIO-2: 25% clean bait","OVIO-3: 50% clean bait"),values=c("red","green","blue"))+ scale_color_manual(name="Scenario",labels=c("OVIO-1: Baseline","OVIO-2: 25% clean bait","OVIO-3: 50% clean bait"),values=c("red","green","blue"))+ expand_limits(x=5000000) facet.nolog.plot.o plot.o<-ggarrange(facet.nolog.plot.o,ecdfplot.o,common.legend = FALSE)#labels = c("A","B")) plot.o ####OVIO SENSITIVITY ANALYSES (Values for Table 3) #STANDARDIZED sensitivity analysis: #OVIO-1 betasensmod<-lm(data = simdata1.o, scale(Nrisky.total)~scale(P.use)+ scale(P.use.summer) + scale(P.usesusc.summer) +scale(P.use.winter) + scale(P.usesusc.winter) + scale(P.release) + scale(Prev.s) + scale(Prev.w) + scale(Sovio) + scale(L) + scale(Tangyr.w) + scale(Tangyr.s)) summary(betasensmod) betasensmod$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input betasensmoddf<-data.frame(coeff=c("P.use" ,"P.use.summer" , "P.usesusc.summer" ,"P.use.winter" ,"P.usesusc.winter" , "P.release" , "Prev.s" , "Prev.w" , "Sovio" , "L" , "Tangyr.w" , "Tangyr.s"),value=betasensmod$coefficients[-1]) standardized.ovio.1<-ggplot(betasensmoddf)+aes(reorder(coeff,value),value)+geom_col()+coord_flip()+theme_bw()+ theme(axis.text = element_text(size=12))+ggtitle("standardized.ovio.1") #OVIO-2 betasensmod2<-lm(data = simdata2.o, scale(Nrisky.total)~scale(P.use)+ scale(P.use.summer) + scale(P.usesusc.summer) +scale(P.use.winter) + scale(P.usesusc.winter) + scale(P.release) + scale(Prev.s) + scale(Prev.w) + scale(Sovio) + scale(L) + scale(Tangyr.w) + scale(Tangyr.s)) summary(betasensmod2) betasensmod2$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input betasensmoddf2<-data.frame(coeff=c("P.use" ,"P.use.summer" , "P.usesusc.summer" ,"P.use.winter" ,"P.usesusc.winter" , "P.release" , "Prev.s" , "Prev.w" , "Sovio" , "L" , "Tangyr.w" , "Tangyr.s"),value=betasensmod2$coefficients[-1]) standardized.ovio.2<-ggplot(betasensmoddf2)+aes(reorder(coeff,value),value)+geom_col()+coord_flip()+theme_bw()+ theme(axis.text = element_text(size=12))+ggtitle("standardized.ovio.2") #OVIO-3 betasensmod3<-lm(data = simdata3.o, scale(Nrisky.total)~scale(P.use)+ scale(P.use.summer) + scale(P.usesusc.summer) +scale(P.use.winter) + scale(P.usesusc.winter) + scale(P.release) + scale(Prev.s) + scale(Prev.w) + scale(Sovio) + scale(L) + scale(Tangyr.w) + scale(Tangyr.s)) summary(betasensmod3) betasensmod3$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input betasensmoddf3<-data.frame(coeff=c("P.use" ,"P.use.summer" , "P.usesusc.summer" ,"P.use.winter" ,"P.usesusc.winter" , "P.release" , "Prev.s" , "Prev.w" , "Sovio" , "L" , "Tangyr.w" , "Tangyr.s"),value=betasensmod3$coefficients[-1]) standardized.ovio.3<-ggplot(betasensmoddf3)+aes(reorder(coeff,value),value)+geom_col()+coord_flip()+theme_bw()+ theme(axis.text = element_text(size=12))+ggtitle("standardized.ovio.3") ###What proportion of OVIO iterations are less than 1000 risky trips? #OVIO-1 f1.o <- ecdf(simdata1.o$Nrisky.total) y1.o=f1.o(1000) y1.o 100*(1-y1.o) #OVIO-2 f2.o <- ecdf(simdata2.o$Nrisky.total) y2.o=f2.o(1000) y2.o 100*(1-y2.o) #OVIO-3 f3.o <- ecdf(simdata3.o$Nrisky.total) y3.o=f3.o(1000) y3.o 100*(1-y3.o) #What proportion of ovio iterations are less than 0.01 Priskys? (Values for Table 2) #OVIO-1 prisks1 <- ecdf(simdata1.o$Prisky.s) p1s=prisks1(0.01) p1s 100*(1-p1s) #OVIO-2 prisks2 <- ecdf(simdata2.o$Prisky.s) p2s=prisks2(0.01) p2s 100*(1-p2s) #OVIO-3 prisks3 <- ecdf(simdata3.o$Prisky.s) p3s=prisks3(0.01) p3s 100*(1-p3s) #What proportion of ovio iterations are less than 0.01 Priskyw? #OVIO-1 priskw1 <- ecdf(simdata1.o$Prisky.w) p1w=priskw1(0.01) p1w 100*(1-p1w) #OVIO-2 priskw2 <- ecdf(simdata2.o$Prisky.w) p2w=priskw2(0.01) p2w 100*(1-p2w) #OVIO-3 priskw3 <- ecdf(simdata3.o$Prisky.w) p3w=priskw3(0.01) # p3w 100*(1-p3w) ############################## ###########ASIAN FISH TAPEWORM #read data Nrisky.aft.long<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-08 Nrisky.aft.csv") simdataa1<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-08 AFT-1_simulation_data.csv") simdataa2<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-08 AFT-2_simulation_data.csv") #make list of medians and means med_Nrisky_df <- Nrisky.aft.long %>% group_by(name) %>% summarize(median=median(value)) mean_Nrisky_df <- Nrisky.aft.long %>% group_by(name) %>% summarize(mean=mean(value)) %>% data.frame() #Put all Prisky together and do summary stats Prisky.data.aft<-data.frame("Prisky.low"=simdataa1$Prisky,"Prisky.high"=simdataa2$Prisky) Prisky.aft.long<-Prisky.data.aft %>% pivot_longer(cols=c(1:2)) Prisky.aft.long %>% group_by(name) %>% summarize(median.n=median(value),sd.n=sd(value),mean=mean(value),max=max(value)) #Nrisky summary stats: Nrisky.aft.long %>% group_by(name) %>% summarize(median.n=median(value),sd.n=sd(value),mean=mean(value),max=max(value)) #####AFT PLOTTING FOR MANUSCRIPT: # scenario.labs<-c("Low (2% max prevalence)", "High (20% max prevalence)") names(scenario.labs) <- c("Nrisky.low","Nrisky.high") #faceted plot: facet.nolog.plot.a<-Nrisky.aft.long %>% filter(value<5000000) %>% ggplot()+ aes(value,fill=name) + stat_bin(bins=100,position="identity",alpha=0.5) + theme_bw()+ #xlab("Number of risky trips")+ # xlim(0,2000000)+ #ylab("Number of iterations")+ theme(axis.text = element_text(size = 12), axis.title = element_blank(), #axis.title = element_text(size=14), legend.position = "none", legend.text = element_text(size=12), legend.title = element_text(size=12)) + geom_vline(data=med_Nrisky_df,aes(xintercept = median, color = name),linetype=2)+ geom_text(data=med_Nrisky_df, aes(x=median, y=1000, label=prettyNum(round(median),big.mark = ",",scientific=FALSE), size=4,hjust=-.2),show.legend = FALSE) + facet_wrap(~name)+#,labeller=labeller(name=scenario.labs))+ theme(strip.text.x = element_blank(), axis.text.x = element_text(angle = 90,vjust = 0.5))+ #scale_x_continuous(breaks=c(100000,500000,1000000,1500000,2000000),labels=c("1M","2M","3M","1.5M","2M"))+ scale_x_continuous(breaks=c(1000000,2000000,3000000,4000000,50000000),labels=c("1M","2M","3M","4M","5M"))+ expand_limits(x=5e+06)+ scale_fill_manual(name="Scenario",labels=c("High AFT prevalence","Low AFT prevalence"),values=c("red","blue","black","black","black","black"))+ scale_color_manual(name="Scenario",labels=c("High AFT prevalence","Low AFT prevalence"),values=c("red","blue","black","black","black","black")) facet.nolog.plot.a #create ecdf plot ecdfplot.a<-ggplot(filter(Nrisky.aft.long,value<5000000)) + aes(value,color=name)+stat_ecdf(geom="step")+ theme_bw()+ #scale_x_continuous(labels=comma)+ #xlab("Number of risky trips")+ #xlim(0,2000000)+ #ylab("Proportion of iterations")+ theme(axis.text = element_text(size = 12), axis.title = element_blank(), #axis.title = element_text(size=14), #legend.position = c(0.2,.75), legend.text = element_text(size=12), legend.title = element_text(size=12), legend.position = c(0.6,0.4)) + scale_fill_manual(name="Scenario",labels=c("High AFT prevalence","Low AFT prevalence"),values=c("red","blue"))+ scale_color_manual(name="Scenario",labels=c("High AFT prevalence","Low AFT prevalence"),values=c("red","blue"))+ scale_x_continuous(breaks=c(1000000,2000000,3000000,4000000,50000000),labels=c("1M","2M","3M","4M","5M"))+ scale_y_continuous(position="right")+ expand_limits(x=5e+06) #labs(x="",y="Proportion of iterations") plot.aft<-ggarrange(facet.nolog.plot.a,ecdfplot.a,common.legend = FALSE) #,labels = c("A","B")) plot.aft ###Sensitivity analysis AFT (Values for Table 3) #standardized coefficients: beta.sensmod.aft1<-lm(scale(Nriskya) ~ scale(P.use) + scale(P.usesusc) + scale(P.release) + scale(Prev) + scale(Saft) + scale(anglers) + scale(trips), data=simdataa1) summary(beta.sensmod.aft1) beta.sensmod.aft1$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input beta.df.aft1<-data.frame(coeff=c("P.use","P.usesusc","P.release", "Prev","Saft","anglers","trips"),value=beta.sensmod.aft1$coefficients[-1]) #beta.df$coeff<- reorder(beta.df$coeff,abs(beta.df$value)) standardized.aft.1<-ggplot(beta.df.aft1)+aes(reorder(coeff,value),value)+geom_col()+coord_flip()+ggtitle("standardized.aft.1") #Standardized coefficients: beta.sensmod.aft2<-lm(scale(Nriskya) ~ scale(P.use) + scale(P.usesusc) + scale(P.release) + scale(Prev) + scale(Saft) + scale(anglers) + scale(trips), data=simdataa2) summary(beta.sensmod.aft2) beta.sensmod.aft2$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input beta.df.aft2<-data.frame(coeff=c("P.use","P.usesusc","P.release", "Prev","Saft","anglers","trips"),value=beta.sensmod.aft2$coefficients[-1]) #beta.df$coeff<- reorder(beta.df$coeff,abs(beta.df$value)) standardized.aft.2<-ggplot(beta.df.aft2)+aes(reorder(coeff,value),value)+geom_col()+coord_flip()+ggtitle("standardized.aft.2") ###What proportion of AFT iterations are less than 1000 risky trips? f1 <- ecdf(simdataa1$Nriskya) y1=f1(1000) y1 100*(1-y1) f2 <- ecdf(simdataa2$Nriskya) y2=f2(1000) y2 100*(1-y2) #What proportion of AFT iterations are less than 0.01 Prisky? (Values for Table 2) prisk1 <- ecdf(simdataa1$Prisky) p1=prisk1(0.01) #30% p1 100*(1-p1) prisk2 <- ecdf(simdataa2$Prisky) p2=prisk2(0.01) p2 100*(1-p2) ###Code for generating Figure 2: top.row<-ggarrange(facet.nolog.plot.v,ecdfplot.v,common.legend = TRUE) middle.row<-ggarrange(facet.nolog.plot.o,ecdfplot.o,common.legend = T) bottom.row<-ggarrange(facet.nolog.plot.a,ecdfplot.a,common.legend = T) plot.all<-ggarrange(top.row,middle.row,bottom.row,ncol=1,nrow=3,labels = c("A","B","C")) plot.all.annotated<-annotate_figure(plot.all, bottom = text_grob("Number of risky trips",size=16), left = text_grob("Number of iterations",rot = 90, vjust = 1,size = 16), right = text_grob("Proportion of iterations",rot = -90, vjust = 1,size=16)) ################ ############SENSITIVITY ANALYSIS ####################### #This section of the code provided for recreating the values shown in Table 4. Simulation data from manual alteration of the input parameters of interest is read in as csvs, which are available as part of the data repository. ###VHSV-Prelease down #read in sensitivity data (note: change to appropriate path for your file system) simdata1minus<-read.csv("C://Users/thom4412/Box/Meg's Code/Quantitative risk assessment/2022-03-10 VHSV-1_sens_minus.csv") simdata2minus<-read.csv("C://Users/thom4412/Box/Meg's Code/Quantitative risk assessment/2022-03-10 VHSV-2_sens_minus.csv") simdata3minus<-read.csv("C://Users/thom4412/Box/Meg's Code/Quantitative risk assessment/2022-03-10 VHSV-3_sens_minus_data.csv") simdata4minus<-read.csv("C://Users/thom4412/Box/Meg's Code/Quantitative risk assessment/2022-03-10 VHSV-4_sens_minus_data.csv") #Nriskys together Nrisky.data.minus<-data.frame("Nrisky.minus1"=simdata1minus$Nrisky,"Nrisky.minus2"=simdata2minus$Nrisky,"Nrisky.minus3"=simdata3minus$Nrisky, "Nrisky.minus4"=simdata4minus$Nrisky) Nrisky.minus.long<-Nrisky.data.minus %>% pivot_longer(cols=c(1:4)) #run sensmods on new data with 10% Prelease reduction data and extract betas for comparison: #sens vhsv1 minus.sensmod.vhsv1<-lm(scale(Nrisky) ~ scale(P.use) + scale(P.useLSfish) + scale(P.useLSshiners) + scale(P.release.minus10) + scale(Prev) + scale(Svhsv) + scale(anglers) + scale(trips), data=simdata1minus) summary(minus.sensmod.vhsv1) #compare betas: minus.sensmod.vhsv1$coefficients[4] #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input beta.sensmod.vhsv1$coefficients[4] #compare Nriskys difference<-mean(simdata1$Nrisky)-mean(simdata1minus$Nrisky) percent<-difference/mean(simdata1$Nrisky) #sens vhsv2 minus.sensmod.vhsv2<-lm(scale(Nrisky) ~ scale(P.use) + scale(P.usesusc) + scale(P.release.minus10) + scale(Prev) + scale(Svhsv) + scale(anglers) + scale(trips), data=simdata2minus) summary(minus.sensmod.vhsv2) minus.sensmod.vhsv2$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas: minus.sensmod.vhsv2$coefficients[4] #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input beta.sensmod.vhsv2$coefficients[4] #compare Nriskys difference<-mean(simdata2$Nrisky)-mean(simdata2minus$Nrisky) percent<-difference/mean(simdata2$Nrisky) #sens vhsv 3 minus.sensmod.vhsv3<-lm(scale(Nrisky) ~ scale(P.use) + scale(P.useLSfish) + scale(P.useLSsusc) + scale(P.release.minus10) + scale(Prev) + scale(Svhsv) + scale(anglers) + scale(trips), data=simdata3minus) summary(minus.sensmod.vhsv3) minus.sensmod.vhsv3$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas beta.sensmod.vhsv3$coefficients[4] minus.sensmod.vhsv3$coefficients[4] #compare Nriskys difference<-mean(simdata3$Nrisky)-mean(simdata3minus$Nrisky) percent<-difference/mean(simdata3$Nrisky) #sens vhsv 4 minus.sensmod.vhsv4<-lm(scale(Nrisky) ~ scale(P.use) + scale(P.usesusc) + scale(P.release) + scale(Prev) + scale(Svhsv) + scale(anglers) + scale(trips), data=simdata4minus) summary(minus.sensmod.vhsv4) minus.sensmod.vhsv4$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas beta.sensmod.vhsv4$coefficients[4] minus.sensmod.vhsv4$coefficients[4] #compare Nriskys difference<-mean(simdata4$Nrisky)-mean(simdata4minus$Nrisky) percent<-difference/mean(simdata4$Nrisky) ##### VHSV-Prelease up 10% simdata1plus<-read.csv("C://Users/thom4412/Box/Meg's Code/Quantitative risk assessment/2022-03-11 VHSV-1_sens_plus.csv") simdata2plus<-read.csv("C://Users/thom4412/Box/Meg's Code/Quantitative risk assessment/2022-03-11 VHSV-2_sens_plus.csv") simdata3plus<-read.csv("C://Users/thom4412/Box/Meg's Code/Quantitative risk assessment/2022-03-11 VHSV-3_sens_plus_data.csv") simdata4plus<-read.csv("C://Users/thom4412/Box/Meg's Code/Quantitative risk assessment/2022-03-11 VHSV-4_sens_plus_data.csv") #Nriskys together Nrisky.data.plus<-data.frame("Nrisky.plus1"=simdata1plus$Nrisky,"Nrisky.plus2"=simdata2plus$Nrisky,"Nrisky.plus3"=simdata3plus$Nrisky, "Nrisky.plus4"=simdata4plus$Nrisky) Nrisky.plus.long<-Nrisky.data.plus %>% pivot_longer(cols=c(1:4)) #run sensmods on new data with 10% Prelease increase data and extract betas for comparison: #sens vhsv1 plus.sensmod.vhsv1<-lm(scale(Nrisky) ~ scale(P.use) + scale(P.useLSfish) + scale(P.useLSshiners) + scale(P.release.plus10) + scale(Prev) + scale(Svhsv) + scale(anglers) + scale(trips), data=simdata1plus) summary(plus.sensmod.vhsv1) #compare betas: plus.sensmod.vhsv1$coefficients[4] #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input beta.sensmod.vhsv1$coefficients[4] #compare Nriskys difference<-mean(simdata1$Nrisky)-mean(simdata1plus$Nrisky) percent<-difference/mean(simdata1$Nrisky) #sens vhsv2 plus.sensmod.vhsv2<-lm(scale(Nrisky) ~ scale(P.use) + scale(P.usesusc) + scale(P.release.plus10) + scale(Prev) + scale(Svhsv) + scale(anglers) + scale(trips), data=simdata2plus) summary(plus.sensmod.vhsv2) plus.sensmod.vhsv2$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas: plus.sensmod.vhsv2$coefficients[4] #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input beta.sensmod.vhsv2$coefficients[4] #compare Nriskys difference<-mean(simdata2$Nrisky)-mean(simdata2plus$Nrisky) percent<-difference/mean(simdata2$Nrisky) #sens vhsv 3 plus.sensmod.vhsv3<-lm(scale(Nrisky) ~ scale(P.use) + scale(P.useLSfish) + scale(P.useLSsusc) + scale(P.release.plus10) + scale(Prev) + scale(Svhsv) + scale(anglers) + scale(trips), data=simdata3plus) summary(plus.sensmod.vhsv3) plus.sensmod.vhsv3$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas beta.sensmod.vhsv3$coefficients[4] plus.sensmod.vhsv3$coefficients[4] #compare Nriskys difference<-mean(simdata3$Nrisky)-mean(simdata3plus$Nrisky) percent<-difference/mean(simdata3$Nrisky) #sens vhsv 4 plus.sensmod.vhsv4<-lm(scale(Nrisky) ~ scale(P.use) + scale(P.usesusc) + scale(P.release.plus10) + scale(Prev) + scale(Svhsv) + scale(anglers) + scale(trips), data=simdata4plus) summary(plus.sensmod.vhsv4) plus.sensmod.vhsv4$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas beta.sensmod.vhsv4$coefficients[4] plus.sensmod.vhsv4$coefficients[4] #compare Nriskys difference<-mean(simdata4$Nrisky)-mean(simdata4plus$Nrisky) percent<-difference/mean(simdata4$Nrisky) ####VHSV- Prevalence down 10% sim1.prev.minus<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-11 VHSV-1_prevminus_data.csv") sim2.prev.minus<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-11 VHSV-2_prevminus_data.csv") sim3.prev.minus<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-11 VHSV-3_prevminus_data.csv") sim4.prev.minus<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-11 VHSV-4_prevminus_data.csv") #sens vhsv 1 prev down 10% minus.prev.vhsv1<-lm(scale(Nrisky) ~ scale(P.use) + scale(P.useLSfish) + scale(P.useLSshiners)+ scale(P.release) + scale(Prev.minus10) + scale(Svhsv) + scale(anglers) + scale(trips), data=simdata1.prev.minus) summary(minus.prev.vhsv1) minus.prev.vhsv1$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas beta.sensmod.vhsv1$coefficients[6] minus.prev.vhsv1$coefficients[6] #compare Nriskys difference<-mean(simdata3$Nrisky)-mean(simdata3.prev.minus$Nrisky) percent<-difference/mean(simdata3$Nrisky) #sens vhsv 2 prev down 10% minus.prev.vhsv2<-lm(scale(Nrisky) ~ scale(P.use) + scale(P.usesusc) + scale(P.release) + scale(Prev.minus10) + scale(Svhsv) + scale(anglers) + scale(trips), data=simdata2.prev.minus) summary(minus.prev.vhsv2) minus.prev.vhsv2$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas beta.sensmod.vhsv2$coefficients[5] minus.prev.vhsv2$coefficients[5] #compare Nriskys difference<-mean(simdata2$Nrisky)-mean(simdata2.prev.minus$Nrisky) percent<-difference/mean(simdata2$Nrisky) #sens vhsv 3 prev down 10% minus.prev.vhsv3<-lm(scale(Nrisky) ~ scale(P.use) + scale(P.useLSfish) + scale(P.useLSsusc) + scale(P.release) + scale(Prev.minus10) + scale(Svhsv) + scale(anglers) + scale(trips), data=simdata3.prev.minus) summary(minus.prev.vhsv3) minus.prev.vhsv3$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas beta.sensmod.vhsv3$coefficients[6] minus.prev.vhsv3$coefficients[6] #compare Nriskys difference<-mean(simdata3$Nrisky)-mean(simdata3.prev.minus$Nrisky) percent<-difference/mean(simdata3$Nrisky) #sens vhsv 4 prev down 10% minus.prev.vhsv4<-lm(scale(Nrisky) ~ scale(P.use) + scale(P.usesusc) + scale(P.release) + scale(Prev.minus10) + scale(Svhsv) + scale(anglers) + scale(trips), data=simdata4.prev.minus) summary(minus.prev.vhsv4) minus.prev.vhsv4$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas beta.sensmod.vhsv4$coefficients[5] minus.prev.vhsv4$coefficients[5] #compare Nriskys difference<-mean(simdata4$Nrisky)-mean(simdata4.prev.minus$Nrisky) percent<-difference/mean(simdata4$Nrisky) ############## VHSV- Prevalence up 10% sim1.prev.plus<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-11 VHSV-1_prevplus_data.csv") sim2.prev.plus<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-11 VHSV-2_prevplus_data.csv") sim3.prev.plus<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-11 VHSV-3_prevplus_data.csv") sim4.prev.plus<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-11 VHSV-4_prevplus_data.csv") #sens vhsv 1 prev up 10% plus.prev.vhsv1<-lm(scale(Nrisky) ~ scale(P.use) + scale(P.useLSfish) + scale(P.useLSshiners)+ scale(P.release) + scale(Prev.plus10) + scale(Svhsv) + scale(anglers) + scale(trips), data=simdata1.prev.plus) summary(plus.prev.vhsv1) plus.prev.vhsv1$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas beta.sensmod.vhsv1$coefficients[6] plus.prev.vhsv1$coefficients[6] #compare Nriskys difference<-mean(simdata3$Nrisky)-mean(simdata3.prev.plus$Nrisky) percent<-difference/mean(simdata3$Nrisky) wilcox.test(simdata1$Nrisky,simdata1.prev.plus$Nrisky) #sens vhsv 2 prev up 10% plus.prev.vhsv2<-lm(scale(Nrisky) ~ scale(P.use) + scale(P.usesusc) + scale(P.release) + scale(Prev.plus10) + scale(Svhsv) + scale(anglers) + scale(trips), data=simdata2.prev.plus) summary(plus.prev.vhsv2) plus.prev.vhsv2$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas beta.sensmod.vhsv2$coefficients[5] plus.prev.vhsv2$coefficients[5] #compare Nriskys difference<-mean(simdata2$Nrisky)-mean(simdata2.prev.plus$Nrisky) percent<-difference/mean(simdata2$Nrisky) wilcox.test(simdata2$Nrisky,simdata2.prev.plus$Nrisky) #sens vhsv 3 prev up 10% plus.prev.vhsv3<-lm(scale(Nrisky) ~ scale(P.use) + scale(P.useLSfish) + scale(P.useLSsusc) + scale(P.release) + scale(Prev.plus10) + scale(Svhsv) + scale(anglers) + scale(trips), data=simdata3.prev.plus) summary(plus.prev.vhsv3) plus.prev.vhsv3$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas beta.sensmod.vhsv3$coefficients[6] plus.prev.vhsv3$coefficients[6] #compare Nriskys difference<-mean(simdata3$Nrisky)-mean(simdata3.prev.plus$Nrisky) percent<-difference/mean(simdata3$Nrisky) wilcox.test(simdata3$Nrisky,simdata3.prev.plus$Nrisky) #sens vhsv 4 prev down 10% plus.prev.vhsv4<-lm(scale(Nrisky) ~ scale(P.use) + scale(P.usesusc) + scale(P.release) + scale(Prev.plus10) + scale(Svhsv) + scale(anglers) + scale(trips), data=simdata4.prev.plus) summary(plus.prev.vhsv4) plus.prev.vhsv4$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas beta.sensmod.vhsv4$coefficients[5] plus.prev.vhsv4$coefficients[5] #compare Nriskys difference<-mean(simdata4$Nrisky)-mean(simdata4.prev.plus$Nrisky) percent<-difference/mean(simdata4$Nrisky) wilcox.test(simdata4$Nrisky,simdata4.prev.plus$Nrisky) ################################################### #####OVIO #################################################### #OVIO sensitivity analysis--probability of releas down 10% #read data ovio1.release.minus10<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-11 OVIO-1_releaseminus_data.csv") ovio2.release.minus10<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-11 OVIO-2_releaseminus_data.csv") ovio3.release.minus10<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-12 OVIO-3_releaseminus_data.csv") #OVIO1 release down 10% ovio1.releaseminus.mod<-lm(data = ovio1.release.minus10, scale(Nrisky.total)~scale(P.use)+ scale(P.use.summer) + scale(P.usesusc.summer) +scale(P.use.winter) + scale(P.usesusc.winter) + scale(P.release.minus10) + scale(Prev.s) + scale(Prev.w) + scale(Sovio) + scale(L) + scale(Tangyr.w) + scale(Tangyr.s)) summary(ovio1.releaseminus.mod) ovio1.releaseminus.mod$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas betasensmod$coefficients[7] ovio1.releaseminus.mod$coefficients[7] #compare Nriskys difference<-mean(simdata1.o$Nrisky.total)-mean(ovio1.release.minus10$Nrisky.total) percent<-difference/mean(simdata1.o$Nrisky.total) wilcox.test(simdata1.o$Nrisky.total,ovio1.release.minus10$Nrisky.total) #OVIO2 release down 10% ovio2.releaseminus.mod<-lm(data = ovio2.release.minus10, scale(Nrisky.total)~scale(P.use)+ scale(P.use.summer) + scale(P.usesusc.summer) +scale(P.use.winter) + scale(P.usesusc.winter) + scale(P.release.minus10) + scale(Prev.s) + scale(Prev.w) + scale(Sovio) + scale(L) + scale(Tangyr.w) + scale(Tangyr.s)) summary(ovio2.releaseminus.mod) ovio2.releaseminus.mod$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas betasensmod2$coefficients[7] ovio2.releaseminus.mod$coefficients[7] #compare Nriskys difference<-mean(simdata2.o$Nrisky.total)-mean(ovio2.release.minus10$Nrisky.total) percent<-difference/mean(simdata2.o$Nrisky.total) wilcox.test(simdata2.o$Nrisky.total,ovio2.release.minus10$Nrisky.total) #OVIO3 release down 10% ovio3.releaseminus.mod<-lm(data = ovio3.release.minus10, scale(Nrisky.total)~scale(P.use)+ scale(P.use.summer) + scale(P.usesusc.summer) +scale(P.use.winter) + scale(P.usesusc.winter) + scale(P.release.minus10) + scale(Prev.s) + scale(Prev.w) + scale(Sovio) + scale(L) + scale(Tangyr.w) + scale(Tangyr.s)) summary(ovio3.releaseminus.mod) ovio2.releaseminus.mod$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas betasensmod3$coefficients[7] ovio3.releaseminus.mod$coefficients[7] #compare Nriskys difference<-mean(simdata3.o$Nrisky.total)-mean(ovio3.release.minus10$Nrisky.total) percent<-difference/mean(simdata3.o$Nrisky.total) wilcox.test(simdata3.o$Nrisky.total,ovio3.release.minus10$Nrisky.total) #OVIO sensitivity analysis--probability of release UP 10% #read data ovio1.release.plus10<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-13 OVIO-1_releaseplus_data.csv") ovio2.release.plus10<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-13 OVIO-2_releaseplus_data.csv") ovio3.release.plus10<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-13 OVIO-3_releaseplus_data.csv") #OVIO1 release down 10% ovio1.releaseplus.mod<-lm(data = ovio1.release.plus10, scale(Nrisky.total)~scale(P.use)+ scale(P.use.summer) + scale(P.usesusc.summer) +scale(P.use.winter) + scale(P.usesusc.winter) + scale(P.release.plus10) + scale(Prev.s) + scale(Prev.w) + scale(Sovio) + scale(L) + scale(Tangyr.w) + scale(Tangyr.s)) summary(ovio1.releaseplus.mod) ovio1.releaseplus.mod$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas betasensmod$coefficients[7] ovio1.releaseplus.mod$coefficients[7] #compare Nriskys difference<-mean(simdata1.o$Nrisky.total)-mean(ovio1.release.plus10$Nrisky.total) percent<-difference/mean(simdata1.o$Nrisky.total) wilcox.test(simdata1.o$Nrisky.total,ovio1.release.plus10$Nrisky.total) #OVIO2 release up 10% ovio2.releaseplus.mod<-lm(data = ovio2.release.plus10, scale(Nrisky.total)~scale(P.use)+ scale(P.use.summer) + scale(P.usesusc.summer) +scale(P.use.winter) + scale(P.usesusc.winter) + scale(P.release.plus10) + scale(Prev.s) + scale(Prev.w) + scale(Sovio) + scale(L) + scale(Tangyr.w) + scale(Tangyr.s)) summary(ovio2.releaseplus.mod) ovio2.releaseplus.mod$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas betasensmod2$coefficients[7] ovio2.releaseplus.mod$coefficients[7] #compare Nriskys difference<-mean(simdata2.o$Nrisky.total)-mean(ovio2.release.plus10$Nrisky.total) percent<-difference/mean(simdata2.o$Nrisky.total) wilcox.test(simdata2.o$Nrisky.total,ovio2.release.plus10$Nrisky.total) #OVIO3 release down 10% ovio3.releaseplus.mod<-lm(data = ovio3.release.plus10, scale(Nrisky.total)~scale(P.use)+ scale(P.use.summer) + scale(P.usesusc.summer) +scale(P.use.winter) + scale(P.usesusc.winter) + scale(P.release.plus10) + scale(Prev.s) + scale(Prev.w) + scale(Sovio) + scale(L) + scale(Tangyr.w) + scale(Tangyr.s)) summary(ovio3.releaseplus.mod) ovio2.releaseplus.mod$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas betasensmod3$coefficients[7] ovio3.releaseplus.mod$coefficients[7] #compare Nriskys difference<-mean(simdata3.o$Nrisky.total)-mean(ovio3.release.plus10$Nrisky.total) percent<-difference/mean(simdata3.o$Nrisky.total) wilcox.test(simdata3.o$Nrisky.total,ovio3.release.plus10$Nrisky.total) ######## ###OVIO prevalence reduction #### #reduce prevalence read data ovio1.prev.minus<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-13 OVIO-1_prevminus_data.csv") #OVIO-1 prevalence minus 10% ovio1.prevminus.mod<-lm(data = ovio1.prev.minus, scale(Nrisky.total)~scale(P.use)+ scale(P.use.summer) + scale(P.usesusc.summer) +scale(P.use.winter) + scale(P.usesusc.winter) + scale(P.release) + scale(Prev.s.minus10) + scale(Prev.w.minus10) + scale(Sovio) + scale(L) + scale(Tangyr.w) + scale(Tangyr.s)) ovio2.prev.minus<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-14 OVIO-2_prevminus_data.csv") ovio3.prev.minus<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-14 OVIO-3_prevminus_data.csv") #compare betas,summer betasensmod$coefficients[c(8,9)] ovio1.prevminus.mod$coefficients[c(8,9)] #compare Nriskytotal difference<-mean(simdata1.o$Nrisky.total)-mean(ovio1.prev.minus$Nrisky.total) percent<-difference/mean(simdata1.o$Nrisky.total) wilcox.test(simdata1.o$Nrisky.total,prevdata1.ovio.minus$Nrisky.total) #OVIO-2 prevalence minus 10% ovio2.prevminus.mod<-lm(data = ovio2.prev.minus, scale(Nrisky.total)~scale(P.use)+ scale(P.use.summer) + scale(P.usesusc.summer) +scale(P.use.winter) + scale(P.usesusc.winter) + scale(P.release) + scale(Prev.s.minus10) + scale(Prev.w.minus10) + scale(Sovio) + scale(L) + scale(Tangyr.w) + scale(Tangyr.s)) #compare betas,summer betasensmod2$coefficients[c(8,9)] ovio2.prevminus.mod$coefficients[c(8,9)] #compare Nriskytotal difference<-mean(simdata2.o$Nrisky.total)-mean(ovio2.prev.minus$Nrisky.total) percent<-difference/mean(simdata2.o$Nrisky.total) wilcox.test(simdata2.o$Nrisky.total,prevdata2.ovio.minus$Nrisky.total) #OVIO-3 prevalence minus 10% ovio3.prevminus.mod<-lm(data = ovio3.prev.minus, scale(Nrisky.total)~scale(P.use)+ scale(P.use.summer) + scale(P.usesusc.summer) +scale(P.use.winter) + scale(P.usesusc.winter) + scale(P.release) + scale(Prev.s.minus10) + scale(Prev.w.minus10) + scale(Sovio) + scale(L) + scale(Tangyr.w) + scale(Tangyr.s)) #compare betas,summer betasensmod3$coefficients[c(8,9)] ovio3.prevminus.mod$coefficients[c(8,9)] difference<-mean(simdata3.o$Nrisky.total)-mean(ovio3.prev.minus$Nrisky.total) percent<-difference/mean(simdata3.o$Nrisky.total) wilcox.test(simdata3.o$Nrisky.total,prevdata2.ovio.minus$Nrisky.total) ######## ###OVIO prevalence increas 10% #### #increased prevalence read data ovio1.prev.plus<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-15 OVIO-1_prevplus_data.csv") ovio2.prev.plus<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-15 OVIO-2_prevplus_data.csv") ovio3.prev.plus<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-15 OVIO-3_prevplus_data.csv") #OVIO-1 prevalence plus 10% ovio1.prevplus.mod<-lm(data = ovio1.prev.plus, scale(Nrisky.total)~scale(P.use)+ scale(P.use.summer) + scale(P.usesusc.summer) +scale(P.use.winter) + scale(P.usesusc.winter) + scale(P.release) + scale(Prev.s.plus10) + scale(Prev.w.plus10) + scale(Sovio) + scale(L) + scale(Tangyr.w) + scale(Tangyr.s)) #compare betas,summer betasensmod$coefficients[c(8,9)] ovio1.prevplus.mod$coefficients[c(8,9)] #compare Nriskytotal difference<-mean(simdata1.o$Nrisky.total)-mean(ovio1.prev.plus$Nrisky.total) percent<-difference/mean(simdata1.o$Nrisky.total) wilcox.test(simdata1.o$Nrisky.total,prevdata1.ovio.plus$Nrisky.total) #OVIO-2 prevalence plu 10% ovio2.prevplus.mod<-lm(data = ovio2.prev.plus, scale(Nrisky.total)~scale(P.use)+ scale(P.use.summer) + scale(P.usesusc.summer) +scale(P.use.winter) + scale(P.usesusc.winter) + scale(P.release) + scale(Prev.s.plus10) + scale(Prev.w.plus10) + scale(Sovio) + scale(L) + scale(Tangyr.w) + scale(Tangyr.s)) #compare betas,summer betasensmod2$coefficients[c(8,9)] ovio2.prevplus.mod$coefficients[c(8,9)] #compare Nriskytotal difference<-mean(simdata2.o$Nrisky.total)-mean(ovio2.prev.plus$Nrisky.total) percent<-difference/mean(simdata2.o$Nrisky.total) wilcox.test(simdata2.o$Nrisky.total,prevdata2.ovio.plus$Nrisky.total) #OVIO-3 prevalence minus 10% ovio3.prevplus.mod<-lm(data = ovio3.prev.plus, scale(Nrisky.total)~scale(P.use)+ scale(P.use.summer) + scale(P.usesusc.summer) +scale(P.use.winter) + scale(P.usesusc.winter) + scale(P.release) + scale(Prev.s.plus10) + scale(Prev.w.plus10) + scale(Sovio) + scale(L) + scale(Tangyr.w) + scale(Tangyr.s)) #compare betas,summer betasensmod3$coefficients[c(8,9)] ovio3.prevplus.mod$coefficients[c(8,9)] difference<-mean(simdata3.o$Nrisky.total)-mean(ovio3.prev.minus$Nrisky.total) percent<-difference/mean(simdata3.o$Nrisky.total) wilcox.test(simdata3.o$Nrisky.total,prevdata3.ovio.plus$Nrisky.total) ############## ###AFT ############# #AFT sensitivity analysis with P.release DOWN 10% #read data aft1.release.minus10<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-13 AFT1_releaseminus_data.csv") aft2.release.minus10<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-13 AFT2_releaseminus_data.csv") #AFT 1 with p release minus 10% aft.releaseminus.mod<-lm(scale(Nriskya) ~ scale(P.use) + scale(P.usesusc) + scale(P.release.minus10) + scale(Prev) + scale(Saft) + scale(anglers) + scale(trips), data=aft1.release.minus10) summary(aft.releaseminus.mod) aft.releaseminus.mod$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas beta.sensmod.aft1$coefficients[4] aft.releaseminus.mod$coefficients[4] #compare Nriskys difference<-mean(simdataa1$Nriskya)-mean(aft1.release.minus10$Nriskya) percent<-difference/mean(simdataa1$Nriskya) wilcox.test(simdataa1$Nriskya,aft1.release.minus10$Nriskya) #AFT 2 with p release mins 10% aft.releaseminus.mod2<-lm(scale(Nriskya) ~ scale(P.use) + scale(P.usesusc) + scale(P.release.minus10) + scale(Prev) + scale(Saft) + scale(anglers) + scale(trips), data=aft2.release.minus10) summary(aft.releaseminus.mod2) aft.releaseminus.mod2$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas beta.sensmod.aft2$coefficients[4] aft.releaseminus.mod2$coefficients[4] #compare Nriskys difference<-mean(simdataa2$Nriskya)-mean(aft2.release.minus10$Nriskya) percent<-difference/mean(simdataa2$Nriskya) wilcox.test(simdataa2$Nriskya,aft2.release.minus10$Nriskya) #AFT sensitivity analysis with P.release DOWN 10% #read data aft1.release.plus10<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-15 AFT1_releaseplus_data.csv") aft2.release.plus10<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-15 AFT2_releaseplus_data.csv") #AFT 1 with p release plus 10% aft.releaseplus.mod<-lm(scale(Nriskya) ~ scale(P.use) + scale(P.usesusc) + scale(P.release.plus10) + scale(Prev) + scale(Saft) + scale(anglers) + scale(trips), data=aft1.release.plus10) summary(aft.releaseplus.mod) aft.releaseplus.mod$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas beta.sensmod.aft1$coefficients[4] aft.releaseplus.mod$coefficients[4] #compare Nriskys difference<-mean(simdataa1$Nriskya)-mean(aft1.release.plus10$Nriskya) percent<-difference/mean(simdataa1$Nriskya) wilcox.test(simdataa1$Nriskya,aft1.release.plus10$Nriskya) #AFT 2 with p release plus 10% aft.releaseplus.mod2<-lm(scale(Nriskya) ~ scale(P.use) + scale(P.usesusc) + scale(P.release.plus10) + scale(Prev) + scale(Saft) + scale(anglers) + scale(trips), data=aft2.release.plus10) summary(aft.releaseplus.mod2) aft.releaseplus.mod2$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas beta.sensmod.aft2$coefficients[4] aft.releaseplus.mod2$coefficients[4] #compare Nriskys difference<-mean(simdataa2$Nriskya)-mean(aft2.release.plus10$Nriskya) percent<-difference/mean(simdataa2$Nriskya) wilcox.test(simdataa2$Nriskya,aft2.release.plus10$Nriskya) #AFT Sensitivity analysis on prevalence down 10% #read data aft1.prev.minus<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-15 AFT1_prevminus_data.csv") aft2.prev.minus<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-15 AFT2_prevminus_data.csv") ##AFT 1 with prev minus 10% aft.prevminus.mod1<-lm(scale(Nriskya) ~ scale(P.use) + scale(P.usesusc) + scale(P.release) + scale(Prev.minus10) + scale(Saft) + scale(anglers) + scale(trips), data=aft1.prev.minus) summary(aft.prevminus.mod1) aft.prevminus.mod1$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas beta.sensmod.aft1$coefficients[4] aft.prevminus.mod1$coefficients[4] #compare Nriskys difference<-mean(simdataa1$Nriskya)-mean(aft1.prev.minus$Nriskya) percent<-difference/mean(simdataa1$Nriskya) wilcox.test(simdataa1$Nriskya,aft1.prev.minus$Nriskya) ##AFT 2 with prev minus 10% aft.prevminus.mod2<-lm(scale(Nriskya) ~ scale(P.use) + scale(P.usesusc) + scale(P.release) + scale(Prev.minus10) + scale(Saft) + scale(anglers) + scale(trips), data=aft2.prev.minus) summary(aft.prevminus.mod2) aft.prevminus.mod2$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas beta.sensmod.aft2$coefficients[4] aft.prevminus.mod2$coefficients[4] #compare Nriskys difference<-mean(simdataa2$Nriskya)-mean(aft2.prev.minus$Nriskya) percent<-difference/mean(simdataa2$Nriskya) wilcox.test(simdataa2$Nriskya,aft2.prev.minus$Nriskya) #AFT Sensitivity analysis on prevalence up 10% #read data aft1.prev.plus<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-16 AFT1_prevplus_data.csv") aft2.prev.plus<-read.csv("C:/Users/thom4412/Box/Meg's Code/Quantitative Risk Assessment/2022-03-16 AFT2_prevplus_data.csv") ##AFT 1 with prev plus 10% aft.prevplus.mod1<-lm(scale(Nriskya) ~ scale(P.use) + scale(P.usesusc) + scale(P.release) + scale(Prev.plus10) + scale(Saft) + scale(anglers) + scale(trips), data=aft1.prev.plus) summary(aft.prevplus.mod1) aft.prevplus.mod1$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas beta.sensmod.aft1$coefficients[4] aft.prevplus.mod1$coefficients[4] #compare Nriskys difference<-mean(simdataa1$Nriskya)-mean(aft1.prev.plus$Nriskya) percent<-difference/mean(simdataa1$Nriskya) wilcox.test(simdataa1$Nriskya,aft1.prev.plus$Nriskya) ##AFT 2 with prev plus 10% aft.prevplus.mod2<-lm(scale(Nriskya) ~ scale(P.use) + scale(P.usesusc) + scale(P.release) + scale(Prev.plus10) + scale(Saft) + scale(anglers) + scale(trips), data=aft2.prev.plus) summary(aft.prevplus.mod2) aft.prevplus.mod2$coefficients #pulls out the vector of intercept + coefficients wh represent change in output mean (Nrisky) resulting from unit change in each input #compare betas beta.sensmod.aft2$coefficients[4] aft.prevplus.mod2$coefficients[4] #compare Nriskys difference<-mean(simdataa2$Nriskya)-mean(aft2.prev.plus$Nriskya) percent<-difference/mean(simdataa2$Nriskya) wilcox.test(simdataa2$Nriskya,aft2.prev.plus$Nriskya)