Skip to content
Snippets Groups Projects
Select Git revision
  • 9b19b7cc16d47c11c9495d46a9aefad4ad941c0e
  • main default protected
2 results

basics.py

Blame
  • Embruch, Gerd's avatar
    Embruch, Gerd authored
    9b19b7cc
    History
    Code owners
    Assign users and groups as approvers for specific file changes. Learn more.
    basics.py 6.96 KiB
    ##############################################
    # PREPARE FOR FOLLOWING STEP(S)
    ##############################################
    # import openai api key from ./secret_key.py
    from secret_key import openapi_key
    import os
    os.environ['OPENAI_API_KEY'] = openapi_key
    
    # define llm
    from langchain_openai import OpenAI
    # higher temperature allows llm to be more creative (value between 0.0 and 1.0)
    llm = OpenAI(temperature=0)
    
    ##############################################
    #  SIMPLE PROMPT TO OPENAI
    ##############################################
    name = llm("I want to open a restaurant for Indian food. Suggest a fancy name for this.")
    print(name)
    
    ##############################################
    # CREATE A PROMPT TEMPLATE
    ##############################################
    # from langchain.prompts import PromptTemplate
    
    # # set the template
    # prompt_template_name = PromptTemplate(
    #   input_variables=['cuisine'],
    #   template = "I want to open a restaurant for {cuisine} food. Suggest a fancy name for this."
    # )
    
    # # pipe the value
    # prompt = prompt_template_name.format(cuisine="Mexican")
    # print(prompt)
    
    
    ##############################################
    # CREATE A CHAIN
    ############################################## 
    # from langchain.chains import LLMChain
    # from langchain.prompts import PromptTemplate
    
    # # set the template
    # prompt_template_name = PromptTemplate(
    #   input_variables=['cuisine'],
    #   template = "I want to open a restaurant for {cuisine} food. Suggest a fancy name for this."
    # )
    # # create the chain link
    # restaurant_name_chain = LLMChain(llm=llm, prompt=prompt_template_name)
    # answer = restaurant_name_chain.invoke("American")
    # print('answer:',answer)
    
    ##############################################
    # CREATE A SIMPLE SEQUENTIAL CHAIN
    # a chain is aware of the steps/links and uses them to sharpen the output of the following chain links
    ############################################## 
    # from langchain.chains import LLMChain
    # from langchain.prompts import PromptTemplate
    
    # # set the template 1
    # prompt_template_name = PromptTemplate(
    #   input_variables=['cuisine'],
    #   template = "I want to open a restaurant for {cuisine} food. Suggest a fancy name for this."
    # )
    # # create the chain link 1
    # restaurant_name_link = LLMChain(llm=llm, prompt=prompt_template_name)
    
    # # set the template 2
    # prompt_template_items = PromptTemplate(
    #   input_variables=['restaurant_name'],
    #   template = "Suggest ten menu items for {restaurant_name}. Return it as comma separated list."
    # )
    # # create the chain link 2
    # food_items_link = LLMChain(llm=llm, prompt=prompt_template_items)
    
    # # chaining the links
    # from langchain.chains import SimpleSequentialChain
    # chain = SimpleSequentialChain(chains = [restaurant_name_link, food_items_link])
    # response = chain.invoke("Korean")
    # # but the output only provides the answer to the very last prompt
    # # to avoid this, use a squential chain (not a SIMPLE sequential chain)
    # print(response)
    
    ##############################################
    # CREATE A SEQUENTIAL CHAIN
    # can have mutliple in- and outputs
    ############################################## 
    # from langchain.chains import LLMChain
    # from langchain.prompts import PromptTemplate
    
    # # set the template 1
    # prompt_template_name = PromptTemplate(
    #   input_variables=['cuisine'],
    #   template = "I want to open a restaurant for {cuisine} food. Suggest a fancy name for this."
    # )
    # # create the chain link 1
    # restaurant_name_link = LLMChain(llm=llm, prompt=prompt_template_name, output_key='restaurant_name')
    
    # # set the template 2
    # prompt_template_items = PromptTemplate(
    #   input_variables=['restaurant_name'],
    #   template = "Suggest ten menu items for {restaurant_name}. Return it as comma separated list."
    # )
    # # create the chain link 2
    # food_items_link = LLMChain(llm=llm, prompt=prompt_template_items, output_key='menu_items')
    
    # # chaining the links
    # from langchain.chains import SequentialChain
    # chain = SequentialChain(
    #   chains = [restaurant_name_link, food_items_link],
    #   input_variables=['cuisine'],
    #   output_variables=['restaurant_name', 'menu_items'],
    #   )
    # response = chain({'cuisine': 'Greek'})
    # print(response)
    
    ##############################################
    # AGENTS
    # agents are a way to use external functions & sources
    ############################################## 
    # from langchain.agents import AgentType, initialize_agent, load_tools
    # from secret_key import serpapi_api_key
    
    # os.environ['SERPAPI_API_KEY'] = serpapi_api_key
    
    # # load an agent
    # # before using serpapi run 'pip install google-search-results numexpr'
    # tools= load_tools(['serpapi','llm-math'], llm=llm)
    # agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
    # response = agent.invoke("What was the GDP of the US in 2022?")
    # print(response)
    
    
    ##############################################
    # MEMORY
    # keeps awareness of the whole conversation
    ##############################################
    # from langchain.memory import ConversationBufferMemory
    # from langchain.chains import LLMChain
    # from langchain.prompts import PromptTemplate
    
    # # initiate memory instance
    # memory = ConversationBufferMemory()
    # # set the template
    # prompt_template_name = PromptTemplate(
    #   input_variables=['cuisine'],
    #   template = "I want to open a restaurant for {cuisine} food. Suggest a fancy name for this."
    # )
    # # create the chain link
    # restaurant_name_chain = LLMChain(llm=llm, prompt=prompt_template_name, memory=memory)
    # answer = restaurant_name_chain.invoke("American")
    # print('answer:',answer)
    
    # # run once more
    # answer = restaurant_name_chain.invoke("German")
    # print('answer:',answer)
    
    # # look at memory now
    # print(restaurant_name_chain.memory.buffer)
    
    ##############################################
    # CONVERSATION CHAIN
    # appends whole history to each request
    ##############################################
    # from langchain.chains import ConversationChain
    # # initiate
    # conv = ConversationChain(llm=llm)
    # # check default template
    # # print(conv.prompt.template)
    
    # answer = conv.invoke("Who won the first cricket world cup?")
    # print(answer)
    # answer = conv.invoke("What's 5+5?")
    # print(answer)
    # answer = conv.invoke("Who was the captain of the winning team?")
    # print(answer)
    # #all the history is send on each request. Thi can cost a lot of tokens (and money)
    
    ##############################################
    # CONVERSATION CHAIN WITH BUFFER WINDOW MEMORY
    # appends a defined amount of history to each request
    ##############################################
    # from langchain.chains import ConversationChain
    # from langchain.memory import ConversationBufferWindowMemory
    # # initiate (k=count of how many historic conversations shall be send on request)
    # memory = ConversationBufferWindowMemory(k=1)
    # conv = ConversationChain(llm=llm, memory=memory)
    
    # answer = conv.invoke("Who won the first cricket world cup?")
    # print(answer)
    # answer = conv.invoke("What's 5+5?")
    # print(answer)
    # answer = conv.invoke("Who was the captain of the winning team?")
    # print(answer)
    # # all the history is send on each request. Thi can cost a lot of tokens (and money)