Skip to content
Snippets Groups Projects
Select Git revision
  • 2f1b5dd278cf6a9d97bf744282ecb7044707952d
  • master default protected
  • csv_export
  • ndex
  • v1.1.18-rc2
  • v1.1.17
  • v1.1.16
  • v1.1.16-rc12
  • v1.1.16-rc11
  • v1.1.16-rc10
  • v1.1.16-rc9
  • v1.1.16-rc8
  • v1.1.16-rc7
  • v1.1.16-rc4
  • v1.1.16-rc3
  • v1.1.16-rc1
  • v1.1.6-rc1
  • v1.1.15
  • v1.1.15-rc7
  • v1.1.15-rc6
  • v1.1.15-rc3
  • v1.1.15-rc1
  • v1.1.14
  • v1.1.13
24 results

task-list.component.html

Blame
  • Code owners
    Assign users and groups as approvers for specific file changes. Learn more.
    myfirstrag.py 1.60 KiB
    import os
    from llama_index.core import VectorStoreIndex, ServiceContext, SimpleDirectoryReader
    
    
    
    
    # uncomment for log messages
    # import logging
    # import sys
    # logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
    # logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
    
    
    
    
    # select yozr API env fpr API key. 
    #from llama_index.embeddings.openai import OpenAIEmbedding
    # for OpenAI:
    # os.environ['OPENAI_API_KEY'] = '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'
    # embed_model = OpenAIEmbedding()
    
    # to use free Huggingface Model
    # from llama_index.embeddings.huggingface import HuggingFaceEmbedding
    # embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
    
    
    
    # os.environ["GROQ_API_KEY"] = "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"
    # from llama_index.llms.groq import Groq
    
    service_context = ServiceContext.from_defaults(embed_model=embed_model)
    
    # load textual data from directory, using the SimpleDirectoryReader connector
    # check https://llamahub.ai/ for more opportunities to get your data in there
    
    documents = SimpleDirectoryReader("./data/firststeps/").load_data()
    
    # create local index (note: this happens every time you call this script, 
    # so maybe make chane this scpript in order to make it persistent after lodadig/indexing one time
    # to save your OpenAI budget ;-)
    
    index = VectorStoreIndex.from_documents(documents)
    
    
    query_engine = index.as_query_engine()
    
    # easy prompt
    while True:
        prompt = input("Enter a prompt (or 'exit' to quit): ")
       
        if prompt == 'exit':
            break
        
        response = query_engine.query(prompt)
        print(response)