# Debian | Raspbian
All steps on Debian require to run as root. To become root simply run:
Debian
su
Raspbian
| #!/bin/bash | |
| # Sources using Openstack images | |
| declare -A sources | |
| declare -A template_ids | |
| declare -A result_ids | |
| template_ids["debian"]=9000 | |
| template_ids["ubuntu"]=9100 | |
| template_ids["almalinux"]=9200 |
| #!/bin/bash | |
| set -o errexit | |
| clear | |
| printf "\n*** This script will download a cloud image and create a Proxmox VM template from it. ***\n\n" | |
| ### HOW TO USE | |
| ### Pre-req: | |
| ### - run on a Proxmox 6 server | |
| ### - a dhcp server should be active on vmbr1 |
| ######################### | |
| # 5.4.2024 : Breytti ur apis.is yfir i xml thjonustu vedur.is | |
| # | |
| # Home Assistant skynjari fyrir 3ja tíma mannaðar veðurathuganir frá Veðurstofu Íslands. | |
| # Mannaðar veðurstöðvar birta athuganir á 3 klst fresti. | |
| # https://www.vedur.is/media/vedurstofan/XMLthjonusta.pdf | |
| # | |
| # Stöðvanúmerin má finna með því að fara á slóðina: https://www.vedur.is/vedur/stodvar | |
| # Hér að neðan er Reykjavík valin (station=1) | |
| # |
# Debian | Raspbian
All steps on Debian require to run as root. To become root simply run:
Debian
su
Raspbian
| #!/usr/bin/env python | |
| # -*- coding: utf-8 -*- | |
| # Written as part of https://www.scrapehero.com/how-to-scrape-amazon-product-reviews-using-python/ | |
| from lxml import html | |
| import json | |
| import requests | |
| import json,re | |
| from dateutil import parser as dateparser | |
| from time import sleep |
| { | |
| "_id":"amazon_pet", | |
| "startUrl":[ | |
| "https://www.amazon.com/Best-Sellers-Pet-Supplies/zgbs/pet-supplies/ref=zg_bs_nav_0" | |
| ], | |
| "selectors":[ | |
| { | |
| "id":"product", | |
| "type":"SelectorElement", | |
| "parentSelectors":[ |
| #!/usr/bin/env python | |
| # -*- coding: utf-8 -*- | |
| # Written as part of https://www.scrapehero.com/how-to-scrape-amazon-product-reviews-using-python/ | |
| from lxml import html | |
| from json import dump,loads | |
| from requests import get | |
| import json | |
| from re import sub | |
| from dateutil import parser as dateparser | |
| from time import sleep |
| from lxml import html | |
| import csv | |
| import os | |
| import requests | |
| from exceptions import ValueError | |
| from time import sleep | |
| from random import randint | |
| def parse(url): | |
| headers = { |
| ### | |
| # | |
| # This script runs robocopy jobs in parallel by increasing the number of outstanding i/o's to the VPSA. Even though you can | |
| # change the number of threads using the "/mt:#" parameter, your backups will run faster by adding two or more jobs to your | |
| # original set. | |
| # | |
| # To do this, you need to subdivide the work into directories. That is, each job will recurse the directory until completed. | |
| # The ideal case is to have 100's of directories as the root of the backup. Simply change $src to get | |
| # the list of folders to backup and the list is used to feed $ScriptBlock. | |
| # |
| yum update -y | |
| yum install -y epel-release | |
| yum clean all | |
| # Add repos | |
| rpm -Uvh "https://labs.consol.de/repo/stable/rhel7/i386/labs-consol-stable.rhel7.noarch.rpm" | |
| rpm -ihv http://opensource.is/repo/ok-release.rpm | |
| yum update -y ok-release | |
| # |