I hereby claim:
- I am amreshvenugopal on github.
- I am ltbringer (https://keybase.io/ltbringer) on keybase.
- I have a public key ASAYY7A_gJS3mDSJ7qG48aK-_vRooqrCT4V7QDpJ3Punvgo
To claim this, I am signing this object:
| use std::thread; | |
| use std::sync::atomic::{Ordering, AtomicIsize}; | |
| use std::sync::Arc; | |
| use std::time::{Instant}; | |
| fn parallel_search(sstables: &Vec<u8>, k: u8, n_threads: usize) -> isize { | |
| let n_sstables = sstables.len(); | |
| let mut handles = vec![]; | |
| let early_exit = Arc::new(AtomicIsize::new(-1)); |
| from typing import Any, Callable | |
| a1 = Callable[[Any], Any] | |
| class Compose: | |
| def __init__(self, f: a1) -> None: | |
| self.f = f |
| from typing import Any, Callable | |
| a1 = Callable[[Any], Any] | |
| class Compose: | |
| def __init__(self, f: a1) -> None: | |
| self.f = f |
| import json | |
| import string | |
| import random | |
| import colorsys | |
| import operator as op | |
| import numpy as np | |
| import pandas as pd | |
| import matplotlib.pyplot as plt |
I hereby claim:
To claim this, I am signing this object:
| ;; ============================================================= | |
| ;; A file simulator is used to create an example problem | |
| ;; that helps demonstrate the use of macros in common lisp | |
| ;; ============================================================= | |
| ;; Interfaces to file I/Os expose methods like | |
| ;; `open`, `close`, `read` | |
| ;; not closing a file can lead to issues where the machine runs | |
| ;; out of memory, or unreliable state of the file | |
| ;; (due to unwanted side-effects) | |
| ;; |
| 'use strict'; | |
| /* | |
| * given a string of bits such as 100100, calculate the number of steps required to convert | |
| * the string to 000000 using the following rules: | |
| * a bit may be flipped only if it is following immediately by a one, followed only by zeroes | |
| * the far-right bit may be toggled freely | |
| * | |
| * examples: 111 -> 110 -> 010 -> 011 -> 001 -> 000 (score 5) | |
| * 1101 -> 1100 -> 0100 -> 0101 -> 0111 -> 0100 -> 0010 -> 0011 -> 0001 -> 0000 (score 9) |
| bot1_sym = 'O' | |
| bot2_sym = 'X' | |
| def optimize_bot(game, bot1, bot2): | |
| """ | |
| Punish or Reward the bot with respect to the agent that wins the game | |
| """ | |
| if game.winner == bot1_sym: | |
| bot1.on_reward(1) | |
| # reward |
| class Agent(object): | |
| def __init__(self, exploration_rate=0.33, learning_rate=0.5, discount_factor=0.01): | |
| """ | |
| An agent is a problem solver. | |
| It should perform actions like: | |
| - plotting a symbol on the tic-tac-toe board if it is vacant. | |
| - Remember which states are more profitable than the others. | |
| - Explore better states | |
| - Exploit for maximum profit | |
| import numpy as np | |
| import pandas as pd | |
| from matplotlib import pyplot as plt | |
| import seaborn as sns | |
| class Board(object): | |
| """ | |
| The environment for the reinforcement learning project. | |
| It should: |