Skip to content
Snippets Groups Projects
Commit 3e0c3f7a authored by Embruch, Gerd's avatar Embruch, Gerd
Browse files

switched to env file

parent 7b40039c
No related branches found
No related tags found
No related merge requests found
# api key for openAi
OPENAI_API_KEY="sk-gibberish"
# higher temperature allows llm to be more creative (value between 0.0 and 1.0)
OVERAL_TEMPERATURE=0.1
\ No newline at end of file
.venv
.env
__pycache__
\ No newline at end of file
secret_key.py
\ No newline at end of file
......@@ -14,8 +14,9 @@ cp ./secret_key.py.template ./secret_key.py
populate the `secret_key.py` file with proper information
# Start
Each segment of the file `basics.py` represents a step.
Each segment of the file `main.py` represents a step.
Each step is meant to run independently. So you uncomment a single segment, and run `python3 basics.py`
# Sources
- [YT: LangChain Crash Course For Beginners | LangChain Tutorial ](https://www.youtube.com/watch?v=nAmC7SoVLd8)
- [YT: Comparing LLMs with LangChain](https://www.youtube.com/watch?v=rFNG0MIEuW0)
\ No newline at end of file
......@@ -2,14 +2,17 @@
# PREPARE FOR FOLLOWING STEP(S)
##############################################
# import openai api key from ./secret_key.py
from secret_key import openapi_key
import os
os.environ['OPENAI_API_KEY'] = openapi_key
# define llm
# .env parser
from dotenv import load_dotenv
# Load environment variables from .env file
if not os.path.isfile('./.env'):
raise RuntimeError("Aborting: No .env file found.")
load_dotenv()
# define llm for most steps
from langchain_openai import OpenAI
# higher temperature allows llm to be more creative (value between 0.0 and 1.0)
llm = OpenAI(temperature=0)
llm = OpenAI(temperature=os.environ['OVERAL_TEMPERATURE'])
##############################################
# SIMPLE PROMPT TO OPENAI
......@@ -181,7 +184,7 @@ print(name)
##############################################
# from langchain.chains import ConversationChain
# from langchain.memory import ConversationBufferWindowMemory
# # initiate (k=count of how many historic conversations shall be send on request)
# # initiate (k=count of how many historic conversations (question:answer pair) shall be send on request)
# memory = ConversationBufferWindowMemory(k=1)
# conv = ConversationChain(llm=llm, memory=memory)
......@@ -191,4 +194,5 @@ print(name)
# print(answer)
# answer = conv.invoke("Who was the captain of the winning team?")
# print(answer)
# # all the history is send on each request. Thi can cost a lot of tokens (and money)
\ No newline at end of file
......@@ -7,12 +7,15 @@ certifi==2024.2.2
charset-normalizer==3.3.2
dataclasses-json==0.6.4
distro==1.9.0
filelock==3.13.4
frozenlist==1.4.1
fsspec==2024.3.1
google-search-results==2.4.2
greenlet==3.0.3
h11==0.14.0
httpcore==1.0.5
httpx==0.27.0
huggingface-hub==0.22.2
idna==3.7
jsonpatch==1.33
jsonpointer==2.4
......@@ -32,6 +35,7 @@ orjson==3.10.0
packaging==23.2
pydantic==2.7.0
pydantic_core==2.18.1
python-dotenv==1.0.1
PyYAML==6.0.1
regex==2023.12.25
requests==2.31.0
......
openapi_key="sk-hj8dfs8f9dsfsdgibberish"
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment