1. Best Practices https://github.com/chapel-lang/chapel/tree/master/doc/rst/developer/bestPractices
- ContributorInfo.rst - good info for PR's
- GitCheatsheet.rst - Git info
- TestSystem.rst - how to use start_test system(important!)
| import os | |
| import openai | |
| from typing import List, Tuple | |
| openai.api_key = os.environ["OPENAI_API_KEY"] | |
| from langchain.document_loaders import ArxivLoader | |
| from langchain.docstore.document import Document | |
| from langchain.embeddings import OpenAIEmbeddings |
| from redis import Redis | |
| from redis.commands.search.field import VectorField, TagField | |
| # Function to create a flat (brute-force) search index with Redis/RediSearch | |
| # Could also be a HNSW index | |
| def create_flat_index(redis_conn: Redis, number_of_vectors: int, distance_metric: str='COSINE'): | |
| image_field = VectorField("img_vector", | |
| "FLAT", {"TYPE": "FLOAT32", | |
| "DIM": 512, |
| Query: That is a happy person | |
| That is a very happy person -> similarity score = 0.94291496 | |
| That is a happy dog -> similarity score = 0.69457746 | |
| Today is a sunny day -> similarity score = 0.25687605 |
| import numpy as np | |
| from numpy.linalg import norm | |
| from sentence_transformers import SentenceTransformer | |
| # Define the model we want to use (it'll download itself) | |
| model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2') | |
| sentences = [ | |
| "That is a very happy person", |
| def cosine_similarity(a, b): | |
| return np.dot(a, b)/(norm(a)*norm(b)) |
| from sentence_transformers import SentenceTransformer | |
| model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2') | |
| # create the vector embedding for the query | |
| query_embedding = model.encode("That is a happy person") |
| from sentence_transformers import SentenceTransformer | |
| model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2') | |
| sentences = [ | |
| "That is a very happy Person", | |
| "That is a Happy Dog", | |
| "Today is a sunny day" | |
| ] | |
| embeddings = model.encode(sentences) |
| netcdf ocean_mean_month { | |
| dimensions: | |
| xh = 44 ; | |
| yh = 40 ; | |
| time = UNLIMITED ; // (0 currently) | |
| nv = 2 ; | |
| zl = 2 ; | |
| xq = 44 ; | |
| yq = 40 ; | |
| variables: |
| NOTE from PE 0: MPP_DOMAINS_SET_STACK_SIZE: stack size set to 32768. | |
| &MPP_IO_NML | |
| HEADER_BUFFER_VAL= 16384, | |
| GLOBAL_FIELD_ON_ROOT_PE=T, | |
| IO_CLOCKS_ON=F, | |
| SHUFFLE= 0, | |
| DEFLATE_LEVEL= -1, | |
| CF_COMPLIANCE=F, | |
| / | |
| NOTE from PE 0: MPP_IO_SET_STACK_SIZE: stack size set to 131072. |