VS Code (https://code.visualstudio.com/)
OBS: Version 1.21 eller uppåt
Node.js & NPM (https://nodejs.org/en/download/)
OBS: Node version 8.10.0 eller uppåt, NPM version 5.6.0 eller uppåt
| from django.contrib import admin | |
| from books.models import Agent, PolicyIssue | |
| class PolicyIssueAdmin(admin.ModelAdmin): | |
| def add_view(self, request, form_url='', extra_context=None): | |
| if request.user.get_profile().is_employee: | |
| self.model.branch.field.editable = False | |
| else: | |
| self.model.branch.field.editable = True |
| DATE_WITH_TIME=`date "+%Y%m%d-%H%M%S"` | |
| pg_dump -Fc -U $1 -h localhost -p 5432 $1 --no-owner --no-acl -f "pgdump-$1-$DATE_WITH_TIME.bak" |
| sudo apt-get update | |
| sudo apt-get install apt-transport-https openjdk-8-jre-headless | |
| sudo wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add - | |
| sudo echo "deb https://artifacts.elastic.co/packages/6.x/apt stable main" | sudo tee /etc/apt/sources.list.d/elastic-6.x.list | |
| sudo apt-get update && sudo apt-get install elasticsearch | |
| sudo /bin/systemctl daemon-reload | |
| sudo /bin/systemctl enable elasticsearch.service | |
| sudo systemctl start elasticsearch.service | |
| sudo ls -la /var/log/elasticsearch/ |
VS Code (https://code.visualstudio.com/)
OBS: Version 1.21 eller uppåt
OBS: Node version 8.10.0 eller uppåt, NPM version 5.6.0 eller uppåt
| module.exports = { | |
| parser: "babel-eslint", | |
| plugins: ["react", "prettier"], | |
| rules: { | |
| "prettier/prettier": "error" | |
| }, | |
| parserOptions: { | |
| ecmaFeatures: { | |
| jsx: true, | |
| modules: true |
| CREATE DATABASE millistream; | |
| \c millistream | |
| CREATE TABLE corporateactions | |
| ( | |
| "id" SERIAL, | |
| "insref" numeric(20) NOT NULL, | |
| "type" smallint NOT NULL, | |
| "subtype" smallint, | |
| "dividend" double precision, |
| sudo apt-get update | |
| sudo apt-get upgrade | |
| # Install packages | |
| sudo apt-get install -y python-pip python-dev build-essential python-software-properties | |
| sudo apt-get install -y git-core htop nginx ntpdate nano ufw curl wget | |
| sudo apt-get install -y zlib1g-dev libssl-dev libreadline-dev libyaml-dev libxml2-dev libxslt1-dev libcurl4-openssl-dev | |
| sudo apt-get install -y libsqlite3-dev sqlite3 | |
| sudo apt-get install -y redis-server | |
| sudo apt-get install -y nodejs npm |
| import re | |
| import HTMLParser | |
| def get_urls_from_text(text): | |
| # See: http://daringfireball.net/2010/07/improved_regex_for_matching_urls | |
| # Copied from http://copia.posthaven.com/finding-urls-in-plain-text | |
| parser = HTMLParser.HTMLParser() | |
| GRUBER_URLINTEXT_PAT = re.compile(ur'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?\xab\xbb\u201c\u201d\u2018\u2019]))') | |
| # Remove HTML entities from url | |
| urls = [parser.unescape(mgroups[0]) for mgroups in GRUBER_URLINTEXT_PAT.findall(text)] |
| # s3storages.py | |
| import urlparse | |
| from django.conf import settings | |
| from storages.backends.s3boto import S3BotoStorage | |
| def domain(url): | |
| return urlparse.urlparse(url).hostname |