npx https://github.com/google-gemini/gemini-cli
- Or for a global installation:
npm install -g @google/gemini-cli
| from dotenv import load_dotenv | |
| from openai import OpenAI | |
| from traceloop.sdk import Traceloop | |
| from traceloop.sdk.decorators import workflow, task | |
| import os | |
| load_dotenv() | |
| OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | |
| TRACeloop_API_KEY = os.getenv("TRACeloop_API_KEY") |
| import re | |
| from langchain.docstore.document import Document | |
| from langchain.text_splitter import RecursiveCharacterTextSplitter | |
| from langchain.embeddings.openai import OpenAIEmbeddings | |
| from langchain.vectorstores import Qdrant | |
| from qdrant_client import QdrantClient | |
| # --- Step 1: Extract text from the document --- | |
| def extract_text(file_path: str) -> str: | |
| """ |
| FROM ./Llama-3-ELYZA-JP-8B-q4_k_m.gguf | |
| TEMPLATE """{{ if .System }}<|start_header_id|>system<|end_header_id|> | |
| {{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|> | |
| {{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|> | |
| {{ .Response }}<|eot_id|>""" | |
| PARAMETER stop "<|start_header_id|>" | |
| PARAMETER stop "<|end_header_id|>" |
| from fastapi import FastAPI, HTTPException | |
| from pydantic import BaseModel | |
| from openai import OpenAI | |
| from dotenv import load_dotenv | |
| import os | |
| load_dotenv() | |
| app = FastAPI() | |
| client = OpenAI() |
| import requests | |
| # Base endpoint | |
| base_url = "https://r.jina.ai/" | |
| # Input URL to be appended | |
| input_url = "https://www.stateof.ai/" | |
| # Full URL with the input URL appended after a plus (+) sign | |
| full_url = base_url + input_url |
| import streamlit as st | |
| # App title | |
| st.title('EcoOptimizer Carbon Footprint Calculator') | |
| # Introduction | |
| st.write(''' | |
| This tool calculates the Software Carbon Intensity (SCI) for EcoOptimizer, | |
| an AI-powered energy management system for commercial buildings by EcoTech Solutions. | |
| ''') |
| training_arguments = TrainingArguments( | |
| output_dir="./results", | |
| per_device_train_batch_size=4, | |
| per_device_eval_batch_size=4, | |
| gradient_accumulation_steps=2, | |
| optim="adamw_8bit", | |
| logging_steps=50, | |
| learning_rate=1e-4, | |
| evaluation_strategy="steps", | |
| do_eval=True, |
| ## With Evaluation Harness | |
| !pip install git+https://github.com/EleutherAI/lm-evaluation-harness.git | |
| !pip install bitsandbytes | |
| !pip install --upgrade transformers | |
| !pip install auto-gptq optimum autoawq | |
| !lm_eval --model hf --model_args pretrained=google/gemma-7b --tasks winogrande,hellaswag,arc_challenge --device cuda:0 --num_fewshot 1 --batch_size 8 --output_path ./eval_harness/gemma-7b | |
| !lm_eval --model hf --model_args pretrained=google/gemma-7b --tasks winogrande,hellaswag,arc_challenge --device cuda:0 --num_fewshot 5 --batch_size 8 --output_path ./eval_harness/gemma-7b-5shot | |
| import os | |
| #set you runpod key as a environment variable | |
| os.environ['RUNPOD_API_KEY'] = "your_runpod_api_key" | |
| import runpod | |
| from IPython.display import display, Markdown | |
| runpod.api_key = os.getenv("RUNPOD_API_KEY", "your_runpod_api_key") | |
| if runpod.api_key == "your_runpod_api_key": |