Metadata for "Velocity of climate change grids for North America" The raster data on this page represents multivariate climatic velocity for North America. Velocity was derived from multivariate distance along axes 1 and 2 of a PCA of 11 bioclimatic variables: MAT: mean annual temperature (°C) MWMT: mean temperature of the warmest month (°C) MCMT: mean temperature of the coldest month (°C) TD: difference between MCMT and MWMT, as a measure of continentality (°C) MAP: mean annual precipitation (mm) [as log10(MAP+1)] MSP: mean summer (May to Sep) precipitation (mm) [as log10(MSP+1)] MWP: mean winter (Oct to Apr) precipitation (mm) [as log10(MWP+1)] DD5: degree-days above 5°C (growing degree days) [as sqrt(DD5)] NFFD: the number of frost-free days Eref: Hargreave's reference evaporation CMD: Hargreave's climatic moisture index [as log10(CMD+1)] The PCA was developed on current climate and then predicted to future climate: pcamodel<-princomp(current_climate, cor=T) currentscores<-predict(pcamodel) futurescores<-predict(pcamodel,newdata=future_climate) We used the code in the R script below to model velocity. All calculations were performed using R statistical software (http://www.r-project.org/). Please see http://adaptwest.databasin.org for additional spatial data and documents related to this study. ############################################################################ File Naming convention: [DIRECTION][TYPE}_[GCM]_[SCENARIO]_[YEAR].asc or .tif [DIRECTION] - Type of analysis fw forward (present to future) bw backward (future to present) [TYPE] - Type of data disp proportion of runs with disappearing or no-analog climates vel velocity (km/year) [GCM] - Name of GCM CanESM2 CCSM4 CNRM_CM5 Ensemble GFDL_CM3 HadGEM2_ES INM_CM4 IPSL_CM5A_MR MPI_ESM_LR [SCENARIO] IPCC scenario rcp45 Moderate emissions scenario rcp85 High emissions scenario [YEAR] Period of measurement 2055 Current (1981-2010) to 2041-2070 2085 Current (1981-2010) to 2071-2100 *.asc The file extension .asc denotes ESRI ASCII grid format. *.tif The file extension .tif denotes the GEOTIFF format. ############################################################################ Contents of *.asc files: First six lines: NCOLS = number of columns in grid NROWS = number of rows in grid XLLCORNER = x coordinate of lower left corner (corner, not center of cell) YLLCORNER = y coordinate of lower left corner (corner, not center of cell) CELLSIZE = length of a single side of each cell, in meters NODATA_value = The value that denotes missing data. Remaining lines: Data separated by spaces. A carriage return (\n) denotes the next row of the grid. ############################################################################ Coordinate system and projection parameters Projection: Lambert_Azimuthal_Equal_Area False_Easting: 0.0 False_Northing: 0.0 Central_Meridian: -100.0 Latitude_Of_Origin: 45.0 Linear Unit: Meter (1.0) Geographic Coordinate System: GCS_WGS_1984 Angular Unit: Degree (0.0174532925199433) Prime Meridian: Greenwich (0.0) Datum: D_WGS_1984 Spheroid: WGS_1984 Semimajor Axis: 6378137.0 Semiminor Axis: 6356752.314245179 Inverse Flattening: 298.257223563 Projection CYLINDRICAL Zunits NO Units METERS Spheroid SPHERE Xshift 0.0000000000 Yshift 0.0000000000 Parameters 1 /* Projection type < 1 | 2 | 3 > 0 0 0.000 /* Longitude of central meridian 30 0 0.000 /* Latitude of standard parallel Data citation: AdaptWest Project. 2015. Gridded climate velocity data for North America at 1km resolution. Available at adaptwest.databasin.org. Reference: Hamann, A., Roberts, D.R., Barber, Q.E., Carroll, C. and Nielsen, S.E. 2014. Velocity of climate change algorithms for guiding conservation and management. Global Change Biology 21:997–1004, February 2015, DOI: 10.1111/gcb.12736. Carroll, C. and Lawler, J.J, Roberts, D.R., and Hamann, A.. 2015. Biotic and climatic velocity identify contrasting areas of vulnerability to climate change. PLOS ONE 10(10): e0140486. ########R script for calculating velocity######## library(MASS) library(SDMTools) library(raster) library(spatial.tools) library(yaImpute) gcm.list<-c("CanESM2","CCSM4","CNRM-CM5","GFDL-CM3","HadGEM2-ES","INM-CM4","IPSL-CM5A-MR","MPI-ESM-LR") period.list<-c("rcp45_2055","rcp85_2055","rcp45_2085","rcp85_2085") cpca1<-asc2dataframe("c:\\workspace\\GCM_ascs_laz\\pca1_current_812010laz.asc") cpca2<-asc2dataframe("c:\\workspace\\GCM_ascs_laz\\pca2_current_812010laz.asc") currentscores2<-cbind(cpca1[,3],cpca2[,3]) ############FORWARD VELOCITY for (j in 1:length(gcm.list)){ for(i in 1:length(period.list)){ gcmname<-gcm.list[j] period<-period.list[i] fpca1<-asc2dataframe(paste("c:\\workspace\\GCM_ascs_laz\\pca1_",gcmname,"_",period,"laz.asc",sep="")) fpca2<-asc2dataframe(paste("c:\\workspace\\GCM_ascs_laz\\pca2_",gcmname,"_",period,"laz.asc",sep="")) futurescores2<-cbind(fpca1[,3],fpca2[,3]) ############FORWARD ###Use first 2 PCA axes, fixed bin width ### Create the bins min1<-min(min(currentscores2[,1]),min(futurescores2[,1])) min2<-min(min(currentscores2[,2]),min(futurescores2[,2])) max1<-max(max(currentscores2[,1]),max(futurescores2[,1])) max2<-max(max(currentscores2[,2]),max(futurescores2[,2])) ##make bins bin.width<-0.25 b.adj<-seq(-0.5*bin.width,0.5*bin.width,length.out=40)# Make the per-loop bin adjustment (bin width = 1, 10 iterations) badjlength<-seq(1,length(b.adj),1) b1<-seq(min1,max1,by=0.25) # Make bins b2<-seq(min2,max2,by=0.25) # Make bins x <- cpca1[,2] # vector of grid cell x coordinates y <- cpca1[,1] # vector of grid cell y coordinates #Calculate velocity library(doParallel) n.proc=20 cl <- makeCluster(n.proc) registerDoParallel(cl) cfun <- function(vel,velstack) addLayer(vel,velstack) velstack<-foreach(a=badjlength, .combine='cfun',.inorder=F,.packages=c('raster','yaImpute')) %dopar% { cclassespc1<- findInterval(currentscores2[,1],b1+b.adj[a]) # convert PC1 to bins via breaks cclassespc2<- findInterval(currentscores2[,2],b2+b.adj[a]) # convert PC1 to bins via breaks fclassespc1<- findInterval(futurescores2[,1],b1+b.adj[a]) fclassespc2<- findInterval(futurescores2[,2],b2+b.adj[a]) cbins<-cbind(x,y,cclassespc1,cclassespc2) fbins<-cbind(x,y,fclassespc1,fclassespc2) uniquec<-unique(cbins[,3:4]) uniquec<-cbind(seq(1,length(uniquec[,1]),1),uniquec) cbins<-merge(cbins,uniquec,by.x=c(3:4),by.y=c(2:3)) fbins<-merge(fbins,uniquec,by.x=c(3:4),by.y=c(2:3)) cbins<-cbins[,c(5,3,4)] fbins<-fbins[,c(5,3,4)] cbins<-cbind(seq(1,length(cbins[,1]),1),cbins) fbins<-cbind(seq(1,length(fbins[,1]),1),fbins) names(cbins)<-c("ID","bin","x","y") names(fbins)<-c("ID","bin","x","y") ## kNN yaIMpute LOOP ## u <- uniquec[order(uniquec[,1]),1] d <- data.frame(matrix(nrow=0,ncol=4)) # Empty matrix for distances names(d)<-c("ID","x","y","dist") for(j in u){ p.xy <- cbins[which(cbins[,2]==j),c(1,3,4)] f.xy <- fbins[which(fbins[,2]==j),c(1,3,4)] if(nrow(f.xy)>0){ d.ann <- as.data.frame(ann( as.matrix(f.xy[,-1]), as.matrix(p.xy[,-1]), k=1, verbose=F)$knnIndexDist) d1 <- cbind(p.xy, round(sqrt(d.ann[,2]))) } else { d1 <- cbind(p.xy, rep(-9999,nrow(p.xy))) } names(d1)<-names(d) d<-rbind(d,d1) } d<-d[order(d[,1]),] vel<-rasterFromXYZ(d[,c(2,3,4)]) } stopCluster(cl) velstacktmp<-velstack fun <- function(x) { x[x==-9999] <-NA; return(x) } velstacktmp<-calc(velstacktmp,fun=fun) velstackmean<-calc(velstacktmp,fun=mean,na.rm=TRUE) if(i<3){velstackmean<-velstackmean/(1000*60) writeLines("60") } else {velstackmean<-velstackmean/(1000*90) writeLines("90") } dispmeantmp<-velstack fun <- function(x) { x[x>-9998] <-0; return(x) } dispmean<-calc(dispmeantmp,fun=fun) fun <- function(x) { x[x==-9999] <-1; return(x) } dispmean<-calc(dispmean,fun=fun) dispmean<-calc(dispmean,fun=mean,na.rm=TRUE) writeRaster(dispmean,file=paste("fwdisp_",gcmname,"_",period,".asc",sep=""),NAflag=-9999) writeRaster(velstackmean,file=paste("fwvel_",gcmname,"_",period,".asc",sep=""),NAflag=-9999) rm(velstack) rm(velstacktmp) rm(velstackmean) rm(dispmeantmp) rm(dispmean) gc() removeTmpFiles(h=0) } } ############BACKWARD VELOCITY for (j in 1:length(gcm.list)){ for(i in 1:length(period.list)){ gcmname<-gcm.list[j] period<-period.list[i] fpca1<-asc2dataframe(paste("c:\\workspace\\GCM_ascs_laz\\pca1_",gcmname,"_",period,"laz.asc",sep="")) fpca2<-asc2dataframe(paste("c:\\workspace\\GCM_ascs_laz\\pca2_",gcmname,"_",period,"laz.asc",sep="")) futurescores2<-cbind(fpca1[,3],fpca2[,3]) ############BACKWARD ###Use first 2 PCA axes, fixed bin width ### Create the bins min1<-min(min(currentscores2[,1]),min(futurescores2[,1])) min2<-min(min(currentscores2[,2]),min(futurescores2[,2])) max1<-max(max(currentscores2[,1]),max(futurescores2[,1])) max2<-max(max(currentscores2[,2]),max(futurescores2[,2])) ##make bins bin.width<-0.25 b.adj<-seq(-0.5*bin.width,0.5*bin.width,length.out=40)# Make the per-loop bin adjustment (bin width = 1, 10 iterations) badjlength<-seq(1,length(b.adj),1) b1<-seq(min1,max1,by=0.25) # Make bins b2<-seq(min2,max2,by=0.25) # Make bins x <- cpca1[,2] # vector of grid cell x coordinates y <- cpca1[,1] # vector of grid cell y coordinates #Calculate velocity library(doParallel) n.proc=20 cl <- makeCluster(n.proc) registerDoParallel(cl) cfun <- function(vel,velstack) addLayer(vel,velstack) velstack<-foreach(a=badjlength, .combine='cfun',.inorder=F,.packages=c('raster','yaImpute')) %dopar% { cclassespc1<- findInterval(currentscores2[,1],b1+b.adj[a]) # convert PC1 to bins via breaks cclassespc2<- findInterval(currentscores2[,2],b2+b.adj[a]) # convert PC1 to bins via breaks fclassespc1<- findInterval(futurescores2[,1],b1+b.adj[a]) fclassespc2<- findInterval(futurescores2[,2],b2+b.adj[a]) cbins<-cbind(x,y,cclassespc1,cclassespc2) fbins<-cbind(x,y,fclassespc1,fclassespc2) uniquef<-unique(fbins[,3:4]) uniquef<-cbind(seq(1,length(uniquef[,1]),1),uniquef) cbins<-merge(cbins,uniquef,by.x=c(3:4),by.y=c(2:3)) fbins<-merge(fbins,uniquef,by.x=c(3:4),by.y=c(2:3)) cbins<-cbins[,c(5,3,4)] fbins<-fbins[,c(5,3,4)] cbins<-cbind(seq(1,length(cbins[,1]),1),cbins) fbins<-cbind(seq(1,length(fbins[,1]),1),fbins) names(cbins)<-c("ID","bin","x","y") names(fbins)<-c("ID","bin","x","y") ## kNN yaIMpute LOOP ## u <- uniquef[order(uniquef[,1]),1] d <- data.frame(matrix(nrow=0,ncol=4)) # Empty matrix for distances names(d)<-c("ID","x","y","dist") for(j in u){ p.xy <- cbins[which(cbins[,2]==j),c(1,3,4)] f.xy <- fbins[which(fbins[,2]==j),c(1,3,4)] if(nrow(p.xy)>0){ d.ann <- as.data.frame(ann( as.matrix(p.xy[,-1]), as.matrix(f.xy[,-1]), k=1, verbose=F)$knnIndexDist) d1 <- cbind(f.xy, round(sqrt(d.ann[,2]))) } else { d1 <- cbind(f.xy, rep(-9999,nrow(f.xy))) } names(d1)<-names(d) d<-rbind(d,d1) } d<-d[order(d[,1]),] vel<-rasterFromXYZ(d[,c(2,3,4)]) } stopCluster(cl) velstacktmp<-velstack fun <- function(x) { x[x==-9999] <-NA; return(x) } velstacktmp<-calc(velstacktmp,fun=fun) velstackmean<-calc(velstacktmp,fun=mean,na.rm=TRUE) if(i<3){velstackmean<-velstackmean/(1000*60) writeLines("60") } else {velstackmean<-velstackmean/(1000*90) writeLines("90") } dispmeantmp<-velstack fun <- function(x) { x[x>-9998] <-0; return(x) } dispmean<-calc(dispmeantmp,fun=fun) fun <- function(x) { x[x==-9999] <-1; return(x) } dispmean<-calc(dispmean,fun=fun) dispmean<-calc(dispmean,fun=mean,na.rm=TRUE) writeRaster(dispmean,file=paste("bwdisp_",gcmname,"_",period,".asc",sep=""),NAflag=-9999) writeRaster(velstackmean,file=paste("bwvel_",gcmname,"_",period,".asc",sep=""),NAflag=-9999) rm(velstack) rm(velstacktmp) rm(velstackmean) rm(dispmeantmp) rm(dispmean) gc() removeTmpFiles(h=0) } }