Select Git revision
myfirstrag.py
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
myfirstrag.py 1.60 KiB
import os
from llama_index.core import VectorStoreIndex, ServiceContext, SimpleDirectoryReader
# uncomment for log messages
# import logging
# import sys
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# select yozr API env fpr API key.
#from llama_index.embeddings.openai import OpenAIEmbedding
# for OpenAI:
# os.environ['OPENAI_API_KEY'] = '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'
# embed_model = OpenAIEmbedding()
# to use free Huggingface Model
# from llama_index.embeddings.huggingface import HuggingFaceEmbedding
# embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
# os.environ["GROQ_API_KEY"] = "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"
# from llama_index.llms.groq import Groq
service_context = ServiceContext.from_defaults(embed_model=embed_model)
# load textual data from directory, using the SimpleDirectoryReader connector
# check https://llamahub.ai/ for more opportunities to get your data in there
documents = SimpleDirectoryReader("./data/firststeps/").load_data()
# create local index (note: this happens every time you call this script,
# so maybe make chane this scpript in order to make it persistent after lodadig/indexing one time
# to save your OpenAI budget ;-)
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
# easy prompt
while True:
prompt = input("Enter a prompt (or 'exit' to quit): ")
if prompt == 'exit':
break
response = query_engine.query(prompt)
print(response)