Skip to content
Snippets Groups Projects
Commit 9b19b7cc authored by Embruch, Gerd's avatar Embruch, Gerd
Browse files

Initial commit

parents
Branches
Tags
No related merge requests found
.venv
__pycache__
secret_key.py
\ No newline at end of file
# Prerequisits
- python3 installed
- [openAI API Key](https://auth.openai.com/)
# Install
```
python3 -m venv .venv
source .venv/bin/activate
pip install-r requirements.txt
cp ./secret_key.py.template ./secret_key.py
```
# Configure
populate the `secret_key.py` file with proper information
# Start
Each segment of the file `basics.py` represents a step.
Each step is meant to run independently. So you uncomment a single segment, and run `python3 basics.py`
# Sources
- [YT: LangChain Crash Course For Beginners | LangChain Tutorial ](https://www.youtube.com/watch?v=nAmC7SoVLd8)
basics.py 0 → 100644
##############################################
# PREPARE FOR FOLLOWING STEP(S)
##############################################
# import openai api key from ./secret_key.py
from secret_key import openapi_key
import os
os.environ['OPENAI_API_KEY'] = openapi_key
# define llm
from langchain_openai import OpenAI
# higher temperature allows llm to be more creative (value between 0.0 and 1.0)
llm = OpenAI(temperature=0)
##############################################
# SIMPLE PROMPT TO OPENAI
##############################################
name = llm("I want to open a restaurant for Indian food. Suggest a fancy name for this.")
print(name)
##############################################
# CREATE A PROMPT TEMPLATE
##############################################
# from langchain.prompts import PromptTemplate
# # set the template
# prompt_template_name = PromptTemplate(
# input_variables=['cuisine'],
# template = "I want to open a restaurant for {cuisine} food. Suggest a fancy name for this."
# )
# # pipe the value
# prompt = prompt_template_name.format(cuisine="Mexican")
# print(prompt)
##############################################
# CREATE A CHAIN
##############################################
# from langchain.chains import LLMChain
# from langchain.prompts import PromptTemplate
# # set the template
# prompt_template_name = PromptTemplate(
# input_variables=['cuisine'],
# template = "I want to open a restaurant for {cuisine} food. Suggest a fancy name for this."
# )
# # create the chain link
# restaurant_name_chain = LLMChain(llm=llm, prompt=prompt_template_name)
# answer = restaurant_name_chain.invoke("American")
# print('answer:',answer)
##############################################
# CREATE A SIMPLE SEQUENTIAL CHAIN
# a chain is aware of the steps/links and uses them to sharpen the output of the following chain links
##############################################
# from langchain.chains import LLMChain
# from langchain.prompts import PromptTemplate
# # set the template 1
# prompt_template_name = PromptTemplate(
# input_variables=['cuisine'],
# template = "I want to open a restaurant for {cuisine} food. Suggest a fancy name for this."
# )
# # create the chain link 1
# restaurant_name_link = LLMChain(llm=llm, prompt=prompt_template_name)
# # set the template 2
# prompt_template_items = PromptTemplate(
# input_variables=['restaurant_name'],
# template = "Suggest ten menu items for {restaurant_name}. Return it as comma separated list."
# )
# # create the chain link 2
# food_items_link = LLMChain(llm=llm, prompt=prompt_template_items)
# # chaining the links
# from langchain.chains import SimpleSequentialChain
# chain = SimpleSequentialChain(chains = [restaurant_name_link, food_items_link])
# response = chain.invoke("Korean")
# # but the output only provides the answer to the very last prompt
# # to avoid this, use a squential chain (not a SIMPLE sequential chain)
# print(response)
##############################################
# CREATE A SEQUENTIAL CHAIN
# can have mutliple in- and outputs
##############################################
# from langchain.chains import LLMChain
# from langchain.prompts import PromptTemplate
# # set the template 1
# prompt_template_name = PromptTemplate(
# input_variables=['cuisine'],
# template = "I want to open a restaurant for {cuisine} food. Suggest a fancy name for this."
# )
# # create the chain link 1
# restaurant_name_link = LLMChain(llm=llm, prompt=prompt_template_name, output_key='restaurant_name')
# # set the template 2
# prompt_template_items = PromptTemplate(
# input_variables=['restaurant_name'],
# template = "Suggest ten menu items for {restaurant_name}. Return it as comma separated list."
# )
# # create the chain link 2
# food_items_link = LLMChain(llm=llm, prompt=prompt_template_items, output_key='menu_items')
# # chaining the links
# from langchain.chains import SequentialChain
# chain = SequentialChain(
# chains = [restaurant_name_link, food_items_link],
# input_variables=['cuisine'],
# output_variables=['restaurant_name', 'menu_items'],
# )
# response = chain({'cuisine': 'Greek'})
# print(response)
##############################################
# AGENTS
# agents are a way to use external functions & sources
##############################################
# from langchain.agents import AgentType, initialize_agent, load_tools
# from secret_key import serpapi_api_key
# os.environ['SERPAPI_API_KEY'] = serpapi_api_key
# # load an agent
# # before using serpapi run 'pip install google-search-results numexpr'
# tools= load_tools(['serpapi','llm-math'], llm=llm)
# agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
# response = agent.invoke("What was the GDP of the US in 2022?")
# print(response)
##############################################
# MEMORY
# keeps awareness of the whole conversation
##############################################
# from langchain.memory import ConversationBufferMemory
# from langchain.chains import LLMChain
# from langchain.prompts import PromptTemplate
# # initiate memory instance
# memory = ConversationBufferMemory()
# # set the template
# prompt_template_name = PromptTemplate(
# input_variables=['cuisine'],
# template = "I want to open a restaurant for {cuisine} food. Suggest a fancy name for this."
# )
# # create the chain link
# restaurant_name_chain = LLMChain(llm=llm, prompt=prompt_template_name, memory=memory)
# answer = restaurant_name_chain.invoke("American")
# print('answer:',answer)
# # run once more
# answer = restaurant_name_chain.invoke("German")
# print('answer:',answer)
# # look at memory now
# print(restaurant_name_chain.memory.buffer)
##############################################
# CONVERSATION CHAIN
# appends whole history to each request
##############################################
# from langchain.chains import ConversationChain
# # initiate
# conv = ConversationChain(llm=llm)
# # check default template
# # print(conv.prompt.template)
# answer = conv.invoke("Who won the first cricket world cup?")
# print(answer)
# answer = conv.invoke("What's 5+5?")
# print(answer)
# answer = conv.invoke("Who was the captain of the winning team?")
# print(answer)
# #all the history is send on each request. Thi can cost a lot of tokens (and money)
##############################################
# CONVERSATION CHAIN WITH BUFFER WINDOW MEMORY
# appends a defined amount of history to each request
##############################################
# from langchain.chains import ConversationChain
# from langchain.memory import ConversationBufferWindowMemory
# # initiate (k=count of how many historic conversations shall be send on request)
# memory = ConversationBufferWindowMemory(k=1)
# conv = ConversationChain(llm=llm, memory=memory)
# answer = conv.invoke("Who won the first cricket world cup?")
# print(answer)
# answer = conv.invoke("What's 5+5?")
# print(answer)
# answer = conv.invoke("Who was the captain of the winning team?")
# print(answer)
# # all the history is send on each request. Thi can cost a lot of tokens (and money)
\ No newline at end of file
aiohttp==3.9.4
aiosignal==1.3.1
annotated-types==0.6.0
anyio==4.3.0
attrs==23.2.0
certifi==2024.2.2
charset-normalizer==3.3.2
dataclasses-json==0.6.4
distro==1.9.0
frozenlist==1.4.1
google-search-results==2.4.2
greenlet==3.0.3
h11==0.14.0
httpcore==1.0.5
httpx==0.27.0
idna==3.7
jsonpatch==1.33
jsonpointer==2.4
langchain==0.1.16
langchain-community==0.0.32
langchain-core==0.1.42
langchain-openai==0.1.3
langchain-text-splitters==0.0.1
langsmith==0.1.47
marshmallow==3.21.1
multidict==6.0.5
mypy-extensions==1.0.0
numexpr==2.10.0
numpy==1.26.4
openai==1.17.1
orjson==3.10.0
packaging==23.2
pydantic==2.7.0
pydantic_core==2.18.1
PyYAML==6.0.1
regex==2023.12.25
requests==2.31.0
sniffio==1.3.1
SQLAlchemy==2.0.29
tenacity==8.2.3
tiktoken==0.6.0
tqdm==4.66.2
typing-inspect==0.9.0
typing_extensions==4.11.0
urllib3==2.2.1
yarl==1.9.4
openapi_key="sk-hj8dfs8f9dsfsdgibberish"
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment