In Git you can add a submodule to a repository. This is basically a sub-repository embedded in your main repository. This can be very useful. A couple of usecases of submodules:
- Separate big codebases into multiple repositories.
| library(LaCroixColoR) | |
| library(sf) | |
| library(fasterize) | |
| library(rayshader) | |
| library(raster) | |
| library(exactextractr) | |
| library(rayrender) | |
| # load raster into R | |
| elevation <- raster("Olympus_Mons_ortho-image.tif") |
| library(shiny) | |
| xy <- c(784,479) #output of grDevices::dev.size("px") | |
| url <- "https://www.youtube.com/watch?v=Ef2jmf2vy00" #copy yt link here | |
| url <- gsub("watch\\?v=","embed/",url) | |
| ui <- fluidPage( | |
| HTML(paste0('<iframe width="',xy[1],'" height="',xy[2],'" src="',url,'" frameborder="0"></iframe>')) | |
| ) | |
| server <- function(input, output, session) { | |
| } |
| import numpy as np | |
| import pandas as pd | |
| def sklearn_tree_to_ee_string(estimator, feature_names): | |
| # extract out the information need to build the tree string | |
| n_nodes = estimator.tree_.node_count | |
| children_left = estimator.tree_.children_left | |
| children_right = estimator.tree_.children_right | |
| feature_idx = estimator.tree_.feature |
| library(tidyverse) | |
| mod_full <- lm(mpg ~ cyl + disp + hp + drat + wt + qsec, data = mtcars) | |
| mod_g1 <- lm(mpg ~ cyl + disp + hp, data = mtcars) | |
| mod_g2 <- lm(mpg ~ drat + wt + qsec, data = mtcars) | |
| dat <- data.frame(full_r2 = summary(mod_full)$r.square, | |
| g1_r2 = summary(mod_g1)$r.square, | |
| g2_r2 = summary(mod_g2)$r.square) %>% | |
| mutate(g1_ind = full_r2 - g2_r2, |
| library(stringr) | |
| library(spdep) | |
| library(rgdal) | |
| library(magrittr) | |
| library(ggplot2) | |
| library(sf) | |
| #====================================================== |
| <!DOCTYPE qgis_style> | |
| <qgis_style version="0"> | |
| <symbols/> | |
| <colorramps> | |
| <colorramp type="gradient" name="Magma"> | |
| <prop k="color1" v="0,0,3,255"/> | |
| <prop k="color2" v="251,252,191,255"/> | |
| <prop k="stops" v="0.04;4,4,21,255:0.08;14,10,42,255:0.12;26,16,65,255:0.16;40,17,89,255:0.20;57,15,110,255:0.23;74,16,121,255:0.27;90,21,126,255:0.31;105,28,128,255:0.35;121,34,129,255:0.39;137,40,129,255:0.43;153,45,127,255:0.47;169,50,124,255:0.51;185,55,120,255:0.55;202,62,114,255:0.59;217,70,106,255:0.62;230,81,98,255:0.66;240,96,93,255:0.70;247,113,91,255:0.74;251,132,96,255:0.78;253,151,104,255:0.82;254,170,116,255:0.86;254,188,130,255:0.90;253,207,146,255:0.94;253,225,163,255:0.98;252,243,181,255"/> | |
| </colorramp> | |
| <colorramp type="gradient" name="Inferno"> |
| '''This script goes along the blog post | |
| "Building powerful image classification models using very little data" | |
| from blog.keras.io. | |
| It uses data that can be downloaded at: | |
| https://www.kaggle.com/c/dogs-vs-cats/data | |
| In our setup, we: | |
| - created a data/ folder | |
| - created train/ and validation/ subfolders inside data/ | |
| - created cats/ and dogs/ subfolders inside train/ and validation/ | |
| - put the cat pictures index 0-999 in data/train/cats |
| # Test of Significance, takes the same arguments as t.test() . | |
| signif.test <- function(x, ...) { | |
| p <- t.test(x, ...)$p.value | |
| # List of p excuses retrieved from http://mchankins.wordpress.com/2013/04/21/still-not-significant-2/ | |
| p_excuses <- c( | |
| "(barely) not statistically significant <p>", | |
| "a barely detectable statistically significant difference <p>", | |
| "a borderline significant trend <p>", | |
| "a certain trend toward significance <p>", |
| Package | Ordinal | Multinomial | Partial proportional odds | Scale Effects | Random Effects |
|---|---|---|---|---|---|
| nnet | no | multinom |
N/A | N/A | No |
| MASS | polr |
No | No | No | No |
| ordinal | clm/clm2 |
all X's in nominal (may not converge) |
offending X's in nominal |
via scale |
clmm/clmm2 |
| VGAM | yes | ? | ? | ? | No |
| MCMCglmm | yes | ? | ? | ? | ? |
| Mixcat | yes | ? | ? | npmlt |
|
| mlogit | ? | mlogit |
N/A | N/ |