Version: 1.9.8
Platform: x86_64
First, install or update to the latest system software.
sudo apt-get update
sudo apt-get install build-essential chrpath libssl-dev libxft-dev
| # 1. Install CUDA | |
| # Preparation | |
| sudo apt-get update | |
| sudo apt-get upgrade | |
| sudo apt-get install tmux build-essential gcc g++ make binutils | |
| sudo apt-get install software-properties-common | |
| # Download Cuda toolkit (Nvidia driver included) | |
| cd ~/Downloads | |
| # Download CUDA 8.0 from this https://developer.nvidia.com/cuda-80-ga2-download-archive, not 9.0 |
| private static class Vertex { | |
| private int uniqueLabel; | |
| public Vertex(int uniqueLabel) { | |
| super(); | |
| this.uniqueLabel = uniqueLabel; | |
| } | |
| @Override | |
| public boolean equals(Object obj) { |
| """Sequence-to-sequence model with an attention mechanism.""" | |
| # see https://www.tensorflow.org/versions/r0.10/tutorials/seq2seq/index.html | |
| # compare https://github.com/tflearn/tflearn/blob/master/examples/nlp/seq2seq_example.py | |
| from __future__ import print_function | |
| import numpy as np | |
| import tensorflow as tf | |
| vocab_size=256 # We are lazy, so we avoid fency mapping and just use one *class* per character/byte | |
| target_vocab_size=vocab_size | |
| learning_rate=0.1 |
| """ | |
| Minimal character-level Vanilla RNN model. Written by Andrej Karpathy (@karpathy) | |
| BSD License | |
| """ | |
| import numpy as np | |
| # data I/O | |
| data = open('input.txt', 'r').read() # should be simple plain text file | |
| chars = list(set(data)) | |
| data_size, vocab_size = len(data), len(chars) |
| $ sudo apt-get install chromium-chromedriver | |
| $ sudo ln -s /usr/lib/chromium-browser/chromedriver /usr/bin/chromedriver |
#A Collection of NLP notes
##N-grams
###Calculating unigram probabilities:
P( wi ) = count ( wi ) ) / count ( total number of words )
In english..