This is a guide for aligning images.
See the full Advanced Markdown doc for more tips and tricks
| import torch, torch.nn as nn, torch.nn.functional as F | |
| import numpy as np | |
| import torch.optim as optim | |
| # tied autoencoder using off the shelf nn modules | |
| class TiedAutoEncoderOffTheShelf(nn.Module): | |
| def __init__(self, inp, out, weight): | |
| super().__init__() | |
| self.encoder = nn.Linear(inp, out, bias=False) | |
| self.decoder = nn.Linear(out, inp, bias=False) |
| import torch | |
| import torch.nn as nn | |
| from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence | |
| seqs = ['gigantic_string','tiny_str','medium_str'] | |
| # make <pad> idx 0 | |
| vocab = ['<pad>'] + sorted(set(''.join(seqs))) | |
| # make model |
This is a guide for aligning images.
See the full Advanced Markdown doc for more tips and tricks
| """ Trains an agent with (stochastic) Policy Gradients on Pong. Uses OpenAI Gym. """ | |
| import numpy as np | |
| import cPickle as pickle | |
| import gym | |
| # hyperparameters | |
| H = 200 # number of hidden layer neurons | |
| batch_size = 10 # every how many episodes to do a param update? | |
| learning_rate = 1e-4 | |
| gamma = 0.99 # discount factor for reward |
| -- multiple learning rates per network. Optimizes two copies of a model network and checks if the optimization steps (2) and (3) produce the same weights/parameters. | |
| require 'torch' | |
| require 'nn' | |
| require 'optim' | |
| torch.setdefaulttensortype('torch.FloatTensor') | |
| -- (1) Define a model for this example. | |
| local model = nn.Sequential() | |
| model:add(nn.Linear(10,20)) |
| -- suppose you have a model called model | |
| lrs_model = model:clone() | |
| lrs = lrs_model:getParameters() | |
| lrs:fill(1) -- setting the base learning rate to 1 | |
| -- now lets set the learning rate factor of the bias of module 5 to 2 | |
| lrs_model:get(5).bias:fill(2) | |
| -- same thing for the weights of module 2, let's set them to 3 | |
| lrs_model:get(2).weight:fill(3) |
| from __future__ import division | |
| import string | |
| import math | |
| tokenize = lambda doc: doc.lower().split(" ") | |
| document_0 = "China has a strong economy that is growing at a rapid pace. However politically it differs greatly from the US Economy." | |
| document_1 = "At last, China seems serious about confronting an endemic problem: domestic violence and corruption." | |
| document_2 = "Japan's prime minister, Shinzo Abe, is working towards healing the economic turmoil in his own country for his view on the future of his people." | |
| document_3 = "Vladimir Putin is working hard to fix the economy in Russia as the Ruble has tumbled." |
| """Information Retrieval metrics | |
| Useful Resources: | |
| http://www.cs.utexas.edu/~mooney/ir-course/slides/Evaluation.ppt | |
| http://www.nii.ac.jp/TechReports/05-014E.pdf | |
| http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf | |
| http://hal.archives-ouvertes.fr/docs/00/72/67/60/PDF/07-busa-fekete.pdf | |
| Learning to Rank for Information Retrieval (Tie-Yan Liu) | |
| """ | |
| import numpy as np |
| #!/bin/bash | |
| # Script for installing tmux on systems where you don't have root access. | |
| # tmux will be installed in $HOME/local/bin. | |
| # It's assumed that wget and a C/C++ compiler are installed. | |
| # exit on error | |
| set -e | |
| TMUX_VERSION=1.8 |