###                                   Appendix S1                                  ###
###                                                                                ###
### Excerpt of code written in Program R to determine the accuracy and precision   ###
### of statistical population reconstruction based on four different formulations  ### 
### of the reconstruction likelihood and varying amounts of radio-telemetry data.  ###

######################################################################################
################################## Workspace Set-Up ##################################
######################################################################################

# Clear worspace environment and import required packages
rm(list=ls())
require(stats)
require(dfoptim)
library(doSNOW)

######################################################################################
################################## Model Definition ##################################
######################################################################################

StaPopRec_ModOpt<-function(OptVar=c(3000,750,0.25,0.90,F,9)) {
  
  #Re-initialize year parameters
  Y=8; y=7
  
  # Define Monte Carlo simulation
  StaPopRec_MonCar<-function(var) {
    
    ## Import parameter values
    {
      S=rep(var[4],14)
      c=-log(1-rep(var[3],14))
      f=c(sample(HisHun,50-Y,T),sample(HisHun))
    }
    
    ## Create projection blanks
    {
      N=matrix(NA,51,15)    ### Projected population size
      E=matrix(NA,51,15)    ### Projected harvest matrix
      n=matrix(0,51,3)      ### Projected radio-telemetry population size
      v=matrix(0,51,3)      ### Projected radio-telemetry natural mortality matrix
      u=matrix(0,51,3)      ### Projected radio-telemetry harvest matrix
    }
    
    ## Calculate initial population size
    N[1,1:14]=round(var[1]*0.75^c(0:13)/sum(0.75^c(0:13)))
    
    ## Loop projection process through 50 years
    for (i in 1:50) {
      
      ### Harvest
      for (j in 1:14) E[i,j]=rbinom(1,N[i,j],(1-exp(-c[j]*f[i])))
      
      ### Survival
      for (j in 1:13) N[i+1,j+1]=rbinom(1,N[i,j]-E[i,j],S[j])
      
      ### Reproduction
      N[i+1,1]=sum(rpois(var[2],1))
    }
    
    ## Generate nuv matrices
    {
      n[(50-7+1):50,3]=sample(c(12,29,34,48,42,35,40))
      n[(50-7+1):50,2]=round(n[(50-7+1):50,3]*2/3)
      n[(50-7+1):50,1]=round(n[(50-7+1):50,3]*1/3)
      for (i in (50-7+1):50) {
        for (j in 1:3) {
          v[i,j]=rbinom(1,n[i,j],(1-S[1]))
          u[i,j]=rbinom(1,n[i,j]-v[i,j],(1-exp(-c[1]*f[i])))
          n[i+1,j]=n[i+1,j]+rbinom(1,n[i,j]-v[i,j]-u[i,j],0.67)
        }
      }
    }
    
    ## Trim and calculate yearly totals
    N=N[1:50,]; E=E[1:50,]; N[,15]=rowSums(N[,1:14]); E[,15]=rowSums(E[,1:14])
    
    ## Produce graphs as requested
    if (var[5]) plot(c((50+1-var[6]):50),N[(50+1-var[6]):50,15],type='l',xlab="Time (years)", ylab="PopSize")
    
    ## Return population size projection
    return(list(N=N[(50+1-var[6]):50,],E=E[(50+1-var[6]):50,],
                f=f[(50+1-var[6]):50],S=S[1:3],c=c[1:3],
                n=n[(50-7+1):50,],u=u[(50-7+1):50,],v=v[(50-7+1):50,]))
  }
  
  # Define three-tier population estimation
  StaPopRec_PopEst<-function(var) {
    
    ## Import initial (diagonal) cohort values
    N[1:Y,1]=var[1:Y]
    N[1,2:3]=var[(Y+1):(Y+2)]
    
    ## Set vulnerability coefficients
    c=rep(var[Y+3],3)/10000
    
    ## Set survival probabilities 
    S=rep(var[Y+4],3)/10000 
    
    ## Define expected population sizes
    for (i in 1:(Y-1)) {
      N[i+1,2]=N[i,1]*exp(-c[1]*f[i])*S[1]
      N[i+1,3]=N[i,2]*exp(-c[2]*f[i])*S[2]+
               N[i,3]*exp(-c[2]*f[i])*S[3]
    }
    
    # Return population estimate
    return(list(N=N,popest=N[,1]+N[,2]+N[,3]))
  }
  
  ## Define three-tier joint likelihood
  StaPopRec_JoiLik<-function(var) {
    
    ### Set up values for likelihoods
    {
      #### Import initial (diagonal) cohort values
      N[1:Y,1]=var[1:Y]
      N[1,2:3]=var[(Y+1):(Y+2)]
      
      #### Set vulnerability coefficients
      c=c(var[Y+3],var[Y+3],var[Y+3])/10000
      
      #### Set survival probabilities 
      S=c(var[Y+4],var[Y+4],var[Y+4])/10000 
      
      #### Define expected population sizes
      for (i in 1:(Y-1)) {
        N[i+1,2]=N[i,1]*exp(-c[1]*f[i])*S[1]
        N[i+1,3]=N[i,2]*exp(-c[2]*f[i])*S[2]+
                 N[i,3]*exp(-c[3]*f[i])*S[3]
      }
      
      # Perform secondary test of model boundaries
      if (any(N<h)) return (10004)
      
      # Define expected harvest values
      for (i in 1:Y) {
        for (j in 1:3) {
          E[i,j]=N[i,j]*(1-exp(-c[j]*f[i]))
        }
      }
    }  
    
    ### Define (diagonal) cohort-specific likelihoods
    {
      for (i in 1:(Y-2)) {
        L_AgeHar[i]=sum(log((h[i,1]+h[i+1,2]+h[i+2,3]+1-1:h[i,1])/1:h[i,1]))+
          sum(log((h[i+1,2]+h[i+2,3]+1-1:h[i+1,2])/1:1:h[i+1,2]))+
          h[i+0,1]*log(E[i+0,1]/(E[i,1]+E[i+1,2]+E[i+2,3]))+
          h[i+1,2]*log(E[i+1,2]/(E[i,1]+E[i+1,2]+E[i+2,3]))+
          h[i+2,3]*log(E[i+2,3]/(E[i,1]+E[i+1,2]+E[i+2,3]))
      }
      
      L_AgeHar[Y-1]=sum(log((N[Y-1,1]+1-1:h[Y-1,1])/1:h[Y-1,1]))+
        sum(log((N[Y-1,1]-h[Y-1,1]+1-1:h[Y,2])/1:h[Y,2]))+
        h[Y-1,1]*log(1-exp(-c[1]*f[Y-1]))+
        h[Y,2]*log(exp(-c[1]*f[Y-1])*S[1]*(1-exp(-c[2]*f[Y])))+
        (N[Y-1,1]-h[Y-1,1]-h[Y,2])*log(exp(-c[1]*f[Y-1])*(1-S[1]*(1-exp(-c[2]*f[Y]))))
      
      L_AgeHar[Y]=sum(log((N[Y,1]+1-1:h[Y,1])/1:h[Y,1]))+
        h[Y,1]*log(1-exp(-c[1]*f[Y]))+
        (N[Y,1]-h[Y,1])*log(exp(-c[1]*f[Y]))
      
      L_AgeHar[Y+1]=sum(log((h[1,2]+h[2,3]+1-1:h[1,2])/1:h[1,2]))+
        h[1,2]*log(E[1,2]/(E[1,2]+E[2,3]))+
        h[2,3]*log(E[2,3]/(E[1,2]+E[2,3]))
      
      L_AgeHar[Y+2]=sum(log((N[1,3]+1-1:h[1,3])/1:h[1,3]))+
        h[1,3]*log(1-exp(-c[3]*f[1]))+
        (N[1,3]-h[1,3])*log(exp(-c[3]*f[1]))
    }
    
    ### Calculate catch-effort likelihood
    for (i in 1:Y) {
      L_CatEff[i]=sum(log((N[i,1]+1-1:h[i,1])/1:h[i,1]))+
        (h[i,1])*log(1-exp(-c[1]*f[i]))+
        (N[i,1]-h[i,1])*log(exp(-c[1]*f[i]))+
        sum(log((sum(N[i,2:3])+1-1:sum(h[i,2:3]))/1:sum(h[i,2:3])))+
        sum(h[i,2:3])*log(1-exp(-c[2]*f[i]))+
        sum(N[i,2:3]-h[i,2:3])*log(exp(-c[2]*f[i]))
    }
    
    ### Calculate radio-telemetry likelihoods
    if (RadTel==1) L_RadTel=log(choose(n,v))+v*log(1-S[1])+(n-v)*log(S[1])
    if (RadTel==2) L_RadTel=log(choose(n-v,u))+u*log(1-exp(-c[1]*f[(Y-y+1):Y]))+(n-v-u)*log(exp(-c[1]*f[(Y-y+1):Y]))
    if (RadTel==3) L_RadTel=log(choose(n,v)*choose(n-v,u))+v*log(1-S[1])+u*log(S[1]*(1-exp(-c[1]*f[(Y-y+1):Y])))+
      (n-v-u)*log(S[1]*exp(-c[1]*f[(Y-y+1):Y]))
    
    ### Return joint likelihood
    if (all(is.finite(c(L_AgeHar,L_CatEff,L_RadTel)))) return(-sum(c(L_AgeHar,L_CatEff,L_RadTel))) else return (10001)
  }
  
  ## Generate Monte Carlo harvest statistics
  MonCar=StaPopRec_MonCar(c(OptVar))
  while (min(MonCar$N[,15])<0.67*OptVar[1] | max(MonCar$N[,15])>1.33*OptVar[1]) MonCar=StaPopRec_MonCar(c(OptVar))
  
  ## Import and clip harvest statistics, calculate population minimumm
  {
    f=MonCar$f; h=MonCar$E; Nt=MonCar$N
    N_min=rep(0,Y+13); {
      for (i in 1:Y) {
        j=i
        k=1
        while (j!=(Y+1) && k!=(14+1)) {
          N_min[i]=N_min[i]+h[j,k]
          j=j+1
          k=k+1
        }
      }
      for (i in 2:14) {
        j=1
        k=i
        while (j!=(Y+1) && k!=(14+1)) {
          N_min[Y+i-1]=N_min[Y+i-1]+h[j,k]
          j=j+1
          k=k+1
        }
      }
    }
    N_tru=c(MonCar$N[,1],MonCar$N[1,2:14])
    
    h[,3]=rowSums(h[,3:14]); h=h[,1:3]
    Nt[,3]=rowSums(Nt[,3:14]); Nt=Nt[,1:3]
    N_min=c(N_min[1:(Y+1)],sum(N_min[(Y+2):(Y+13)]))
    N_tru=c(N_tru[1:(Y+1)],sum(N_tru[(Y+2):(Y+13)]))
  }
  
  ## Set optimization blanks
  {
    N=matrix(NA,nrow=Y,ncol=3)
    P=matrix(NA,nrow=Y,ncol=3)
    E=matrix(NA,nrow=Y,ncol=3)
    L_AgeHar=rep(0,Y+2)
    L_CatEff=rep(0,Y)
    L_RadTel=rep(0,y)
  }
  
  ##  Perform model optimizations
  n_init=rep(0,Y+2)
  for (i in 1:(Y+2)) {
    n_init[i]=max(N_min[i],(c(h[,1],h[1,2:3])/(1-exp(-0.175*f[c(1:Y,1,1)])))[i])
  }
  RadTel=0; mod=nmkb(par=c(n_init,1750,8250),fn=StaPopRec_JoiLik,lower=c(N_min,500,7000),
                     upper=c(rep(15000,Y+2),3500,9500),control=list(maxfeval=10000))
  tmp=c(mod$par,mod$value)
  
  for (a in 1:3) {
    for (y in c(2,4,6,7)) {
      n=MonCar$n[(7-y+1):7,a]; v=MonCar$v[(7-y+1):7,a]; u=MonCar$u[(7-y+1):7,a]
      for (RadTel in 1:3) {
        mod=nmkb(par=c(n_init,1750,8250),fn=StaPopRec_JoiLik,lower=c(N_min,500,7000),
                 upper=c(rep(15000,Y+2),3500,9500),control=list(maxfeval=10000))
        tmp=cbind(tmp,c(mod$par,mod$value))
      }
    }
  }

  ## Calculate bias
  {
    bias=rep(0,1+3*4*3)
    for (i in 1:length(bias)) {
      bias[i]=1/(3*Y)*sum((Nt-StaPopRec_PopEst(tmp[(1+(i-1)*(Y+4+1)):(i*(Y+4+1)-1)])$N)/Nt)
    }
  }
  
  ## Return results
  return (c(h,f,Nt,MonCar$n,MonCar$u,MonCar$v,c(tmp),bias))
}

######################################################################################
################################# Model Optimization #################################
######################################################################################

# Set simulation set characteristics
{
  scen=1
  
  if (scen == 1) {Ni=03000; Nr=0750; H=0.10; S=0.75;} #LLL
  if (scen == 2) {Ni=04000; Nr=1250; H=0.25; S=0.75;} #LHL
  if (scen == 3) {Ni=03000; Nr=0750; H=0.25; S=0.90;} #LHH
  if (scen == 4) {Ni=10000; Nr=4000; H=0.10; S=0.75;} #HLL
  if (scen == 5) {Ni=14000; Nr=6000; H=0.25; S=0.75;} #HHL
  if (scen == 6) {Ni=10000; Nr=4000; H=0.25; S=0.90;} #HHH
}

# Initialize time stamp and set number of iterations and cores
{
  TimIni=Sys.time()
  iter_max=10000
  core=4
  Y=8
  y=7 
}

# Define variable distributions and results matrix
{
  ## Historic distributions
  HisHun<-c(1839,1010,1060,807,703,990,841,590,411)[(9-Y+1):9]
  HisHun=HisHun/mean(HisHun)
  
  ## Results matrix
  res=matrix(NA,nrow=iter_max,ncol=7*Y+9*y+37*(Y+4+1)+37)
}

# Initialize multi-core processing
cl <- makeCluster(core, type="SOCK")
registerDoSNOW(cl)

# Loop through 10000 iterations
for (iter in 1:ceiling(iter_max/core)) {
  
  ## Concurrently run multiple iterations
  temp=foreach(i=1:core, .combine=rbind, .packages='dfoptim') %dopar% (StaPopRec_ModOpt(c(Ni,Nr,H,S,F,Y)))
  
  ## Add results to data frame
  res[(core*(iter-1)+1):(core*iter),]=temp
  
  ## Plot excerpt of bias
  {
    boxplot(res[,601],res[,635],res[,636],res[,637],outline=F)
  }
  
  ## Display progress
  print(paste(iter/ceiling(iter_max/core)*100,"% at ",Sys.time(),sep=""))
  
  ## Export partial results
  write.csv(res,file="temp.txt",row.names=FALSE)
}

# Evaluate model speed
TimDif=Sys.time()-TimIni
TimDif

# Terminate multi-core processing
stopCluster(cl)

# Export model results
{
  if (scen == 1)  write.csv(res,file="Scen1_LLL.txt",row.names=FALSE)
  if (scen == 2)  write.csv(res,file="Scen2_LHL.txt",row.names=FALSE)
  if (scen == 3)  write.csv(res,file="Scen3_LHH.txt",row.names=FALSE)
  if (scen == 4)  write.csv(res,file="Scen4_HLL.txt",row.names=FALSE)
  if (scen == 5)  write.csv(res,file="Scen5_HHL.txt",row.names=FALSE)
  if (scen == 6)  write.csv(res,file="Scen6_HHH.txt",row.names=FALSE)
}

# Check model results
{
  ## Population abundance range
  c(min(res[,4*Y+1]+res[,4*Y+2]+res[,4*Y+3],res[,4*Y+4]+res[,4*Y+5]+res[,4*Y+6],
        res[,4*Y+7]+res[,4*Y+8]+res[,4*Y+9],res[,4*Y+10]+res[,4*Y+11]+res[,4*Y+12],
        res[,4*Y+13]+res[,4*Y+14]+res[,4*Y+15],res[,4*Y+16]+res[,4*Y+17]+res[,4*Y+18],
        res[,4*Y+19]+res[,4*Y+20]+res[,4*Y+21],res[,4*Y+22]+res[,4*Y+23]+res[,4*Y+24],
        res[,4*Y+25]+res[,4*Y+26]+res[,4*Y+27]),
    max(res[,4*Y+1]+res[,4*Y+2]+res[,4*Y+3],res[,4*Y+4]+res[,4*Y+5]+res[,4*Y+6],
        res[,4*Y+7]+res[,4*Y+8]+res[,4*Y+9],res[,4*Y+10]+res[,4*Y+11]+res[,4*Y+12],
        res[,4*Y+13]+res[,4*Y+14]+res[,4*Y+15],res[,4*Y+16]+res[,4*Y+17]+res[,4*Y+18],
        res[,4*Y+19]+res[,4*Y+20]+res[,4*Y+21],res[,4*Y+22]+res[,4*Y+23]+res[,4*Y+24],
        res[,4*Y+25]+res[,4*Y+26]+res[,4*Y+27]))
  
  ## Comparison of likelihoods under maximum data conditions
  ### Mean bias
  mean(res[,(7*Y+9*y+37*(Y+4+1)+1)])
  mean(res[,(7*Y+9*y+37*(Y+4+1)+24+11)])
  mean(res[,(7*Y+9*y+37*(Y+4+1)+24+12)])
  mean(res[,(7*Y+9*y+37*(Y+4+1)+24+13)])
  
  ### 95% CI
  c(quantile(res[,(7*Y+9*y+37*(Y+4+1)+1)],0.025),quantile(res[,(7*Y+9*y+37*(Y+4+1)+1)],0.975))
  c(quantile(res[,(7*Y+9*y+37*(Y+4+1)+24+11)],0.025),quantile(res[,(7*Y+9*y+37*(Y+4+1)+24+11)],0.975))
  c(quantile(res[,(7*Y+9*y+37*(Y+4+1)+24+12)],0.025),quantile(res[,(7*Y+9*y+37*(Y+4+1)+24+12)],0.975))
  c(quantile(res[,(7*Y+9*y+37*(Y+4+1)+24+13)],0.025),quantile(res[,(7*Y+9*y+37*(Y+4+1)+24+13)],0.975))
  
  ### Range
  c(min(res[,(7*Y+9*y+37*(Y+4+1)+1)]),max(res[,(7*Y+9*y+37*(Y+4+1)+1)]))
  c(min(res[,(7*Y+9*y+37*(Y+4+1)+24+11)]),max(res[,(7*Y+9*y+37*(Y+4+1)+24+11)]))
  c(min(res[,(7*Y+9*y+37*(Y+4+1)+24+12)]),max(res[,(7*Y+9*y+37*(Y+4+1)+24+12)]))
  c(min(res[,(7*Y+9*y+37*(Y+4+1)+24+13)]),max(res[,(7*Y+9*y+37*(Y+4+1)+24+13)]))
}