Adapted from Cook's Illustrated 15-minute walnut fudge.
- 16oz bittersweet chocolate (I use Ghirardelli 60% bittersweet bars)
- 2oz unsweetened chocolate (again, I use Ghirardelli, 100% bars)
- 1 can sweetened condensed milk (whole)
| # first you need to make sure you have the libraries installed; I'm using pyquery and requests: | |
| # pip install pyquery requests | |
| import requests | |
| from pyquery import PyQuery as pq | |
| from urlparse import urljoin | |
| URL = "http://www.stat-gabon.org/" | |
| response = requests.get(URL) |
| void setup() { | |
| pinMode (2,OUTPUT);//attach pin 2 to vcc | |
| pinMode (5,OUTPUT);//attach pin 5 to GND | |
| // initialize serial communication: | |
| Serial.begin(9600); | |
| } | |
| void loop() | |
| { | |
| digitalWrite(2, HIGH); |
| var ocemail = function(url, domain) { | |
| var capitalize = function(s) { return s.charAt(0).toUpperCase() + s.slice(1).toLowerCase(); } | |
| // url parser from http://jsperf.com/url-parsing | |
| var urlParseRE = /^(((([^:\/#\?]+:)?(?:(\/\/)((?:(([^:@\/#\?]+)(?:\:([^:@\/#\?]+))?)@)?(([^:\/#\?\]\[]+|\[[^\/\]@#?]+\])(?:\:([0-9]+))?))?)?)?((\/?(?:[^\/\?#]+\/+)*)([^\?#]*)))?(\?[^#]+)?)(#.*)?/; | |
| var hostname = urlParseRE.exec(url)[11]; | |
| if (hostname) { | |
| var domainMatch = /^(?:www[.])?([-a-z0-9]+)[.](house|senate)[.]gov$/; | |
| var match = domainMatch.exec(hostname.toLowerCase()); |
| use std::num::pow; | |
| pub struct Point { x: int, y: int } | |
| struct Line { p1: Point, p2: Point } | |
| impl Line { | |
| pub fn length(&self) -> f64 { | |
| let xdiff = self.p1.x - self.p2.x; | |
| let ydiff = self.p1.y - self.p2.y; | |
| ((pow(xdiff, 2) + pow(ydiff, 2)) as f64).sqrt() |
This is kind of inspired by this recipe. Amounts are what I did, but if I do it again I might slightly decrease the nutmeg and maybe slightly increase the sugar.
| import csv, re | |
| SEARCH_TERMS = [re.compile(term, re.I) for term in [r"(?<!white )house", "HFAC", "Congressman", "Congresswoman", "Congressional", "Senate", "Senator"]] | |
| SEARCH_COLS = ["contact_title", "contact_name", "contact_office", "contact_agency"] | |
| csv_infile = open('contacts.csv', 'rb') | |
| csv_in = csv.DictReader(csv_infile) | |
| csv_outfile = open('contacts_filtered.csv', 'wb') | |
| csv_out = csv.DictWriter(csv_outfile, csv_in.fieldnames) |
Note: these are amounts for a nine-inch round pie. For the 13x9 rectangular pie, I used a double batch of filling and a 1.5x batch of crust.
| <script> | |
| (function() { | |
| var insertScript = function(url) { | |
| var elem = document.createElement('script'); | |
| elem.src = (document.location.protocol == "https:" ? "https://cdns" : "http://cdn") + ".gigya.com/js/" + url; | |
| elem.async = true; | |
| elem.type = "text/javascript"; | |
| $('script').eq(0).before(elem); | |
| } | |
| window.onGigyaServiceReady = function(type) { |
| """ | |
| This script ingests a CSV of last names and builds JSON output on the | |
| percentage of last names that begin with each letter of the alphabet. | |
| I initially ran it on a CSV-ified version of the US Census's list of all last | |
| names with more than 100 occurrences, and used the frequency field within that | |
| file to weigh the output, but absent that field, it assigns equal weight to | |
| each name, allowing us to also process our TCamp 2012 attendence list. | |
| Using the script on the Census data available in: |