diff --git a/.env.template b/.env.template
new file mode 100644
index 0000000000000000000000000000000000000000..96bd1fc3e3433c842873fe7642d35570b50c22b8
--- /dev/null
+++ b/.env.template
@@ -0,0 +1,4 @@
+# api key for openAi
+OPENAI_API_KEY="sk-gibberish"
+# higher temperature allows llm to be more creative (value between 0.0 and 1.0)
+OVERAL_TEMPERATURE=0.1
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 9f96e9d3d0660395d3c954718080f11f467c2c05..838e0c2baaec41c42e3170ec1556e086f4128c47 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,3 @@
 .venv
-__pycache__
-secret_key.py
\ No newline at end of file
+.env
+__pycache__
\ No newline at end of file
diff --git a/README.md b/README.md
index 6dfd62c4def62cf09971f3085eb59cb4af358da8..c3bb7f4b90bb724db4bc3e40e2b827e9fe5954c4 100644
--- a/README.md
+++ b/README.md
@@ -14,8 +14,9 @@ cp ./secret_key.py.template ./secret_key.py
 populate the `secret_key.py` file with proper information
 
 # Start
-Each segment of the file `basics.py` represents a step.
+Each segment of the file `main.py` represents a step.
 Each step is meant to run independently. So you uncomment a single segment, and run `python3 basics.py`
 
 # Sources
 - [YT: LangChain Crash Course For Beginners | LangChain Tutorial ](https://www.youtube.com/watch?v=nAmC7SoVLd8)
+- [YT: Comparing LLMs with LangChain](https://www.youtube.com/watch?v=rFNG0MIEuW0)
\ No newline at end of file
diff --git a/basics.py b/main.py
similarity index 94%
rename from basics.py
rename to main.py
index 5fe1e93ebffc073b464b4bcf38ed4765b10f30cc..db4e4fc2ffa39a38a79baa80b9efcaf579645c70 100644
--- a/basics.py
+++ b/main.py
@@ -2,14 +2,17 @@
 # PREPARE FOR FOLLOWING STEP(S)
 ##############################################
 # import openai api key from ./secret_key.py
-from secret_key import openapi_key
 import os
-os.environ['OPENAI_API_KEY'] = openapi_key
-
-# define llm
+# .env parser
+from dotenv import load_dotenv
+# Load environment variables from .env file
+if not os.path.isfile('./.env'):
+  raise RuntimeError("Aborting: No .env file found.")
+load_dotenv()
+
+# define llm for most steps
 from langchain_openai import OpenAI
-# higher temperature allows llm to be more creative (value between 0.0 and 1.0)
-llm = OpenAI(temperature=0)
+llm = OpenAI(temperature=os.environ['OVERAL_TEMPERATURE'])
 
 ##############################################
 #  SIMPLE PROMPT TO OPENAI
@@ -181,7 +184,7 @@ print(name)
 ##############################################
 # from langchain.chains import ConversationChain
 # from langchain.memory import ConversationBufferWindowMemory
-# # initiate (k=count of how many historic conversations shall be send on request)
+# # initiate (k=count of how many historic conversations (question:answer pair) shall be send on request)
 # memory = ConversationBufferWindowMemory(k=1)
 # conv = ConversationChain(llm=llm, memory=memory)
 
@@ -191,4 +194,5 @@ print(name)
 # print(answer)
 # answer = conv.invoke("Who was the captain of the winning team?")
 # print(answer)
-# # all the history is send on each request. Thi can cost a lot of tokens (and money)
\ No newline at end of file
+
+
diff --git a/requirements.txt b/requirements.txt
index ff1c08fdedc8f3bf1d05bb06fb85a6f44ff4a928..7b75b09ca375cc0556871fc3fdd3355a76a5bb46 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -7,12 +7,15 @@ certifi==2024.2.2
 charset-normalizer==3.3.2
 dataclasses-json==0.6.4
 distro==1.9.0
+filelock==3.13.4
 frozenlist==1.4.1
+fsspec==2024.3.1
 google-search-results==2.4.2
 greenlet==3.0.3
 h11==0.14.0
 httpcore==1.0.5
 httpx==0.27.0
+huggingface-hub==0.22.2
 idna==3.7
 jsonpatch==1.33
 jsonpointer==2.4
@@ -32,6 +35,7 @@ orjson==3.10.0
 packaging==23.2
 pydantic==2.7.0
 pydantic_core==2.18.1
+python-dotenv==1.0.1
 PyYAML==6.0.1
 regex==2023.12.25
 requests==2.31.0
diff --git a/secret_key.py.template b/secret_key.py.template
deleted file mode 100644
index c473394150eab879bc844980a321069067182386..0000000000000000000000000000000000000000
--- a/secret_key.py.template
+++ /dev/null
@@ -1 +0,0 @@
-openapi_key="sk-hj8dfs8f9dsfsdgibberish"
\ No newline at end of file