Skip to content
Snippets Groups Projects
Commit 3e2f4135 authored by Embruch, Gerd's avatar Embruch, Gerd
Browse files

switched over to prompting to multiple models at once

parent c40c0cab
No related branches found
No related tags found
No related merge requests found
import os import os
import subprocess import subprocess
from pathlib import Path from pathlib import Path
# enables counting list items
from operator import length_hint
# .env parser # .env parser
from dotenv import load_dotenv from dotenv import load_dotenv
# colored print # colored print
...@@ -9,6 +10,8 @@ from colorist import Color, BrightColor, bright_yellow, magenta, red, green ...@@ -9,6 +10,8 @@ from colorist import Color, BrightColor, bright_yellow, magenta, red, green
# langchain stuff # langchain stuff
from langchain_community.llms import Ollama from langchain_community.llms import Ollama
from langchain.prompts import PromptTemplate from langchain.prompts import PromptTemplate
from langchain.model_laboratory import ModelLaboratory
################################################## ##################################################
### PREPARING & STARTING ### PREPARING & STARTING
...@@ -24,34 +27,31 @@ if not os.path.isfile('./.env'): ...@@ -24,34 +27,31 @@ if not os.path.isfile('./.env'):
load_dotenv() load_dotenv()
########## ##########
# INSTALLING LLM # INSTALLING LLMS
########## ##########
print(f"installing llm <{os.environ['llmName']}>. This can take a while, depending on size and if already installed.") # split llm names into array
llmNames = os.environ['LLM_NAMES'].split(',')
# create download folder green(f'Installing {length_hint(llmNames)} LLMs. This can take a while, depending on installation status, size and bandwith.')
Path(os.environ['OLLAMA_MODELS']).mkdir(parents=True, exist_ok=True) # loop trhough llm names
for idx, llmName in enumerate(llmNames):
process = subprocess.run(["ollama","pull" , f"{os.environ['llmName']}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) print(f"{idx+1}/{length_hint(llmNames)}: {llmName}")
# install
process = subprocess.run(["ollama","pull" , f"{llmName}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# on error # on error
if process.returncode != 0: if process.returncode != 0:
magenta(process.stderr.decode('utf8')) red(process.stderr.decode('utf8'))
red(f"ABORTING: Unable to install the requested LLM") # remove name from array
os._exit(1) llmNames.pop(idx)
print(process.stdout.decode('UTF8'))
llm = Ollama( # exit if no LLM could be loaded
model=os.environ['llmName'], if length_hint(llmNames) == 0:
temperature=0, red(f"ABORTING: Unable to install even one of the requested LLMs")
# callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]) os._exit(1)
)
# write down the question to ask
question="What tall is the tallest pyramid in egypt and what's it called?"
################################################## ##################################################
### ASK THE LLM ### DEFINING THE PROMPT
################################################## ##################################################
green('prompting') green('preparing the prompt specifications')
# define template # define template
prompt_template = PromptTemplate( prompt_template = PromptTemplate(
input_variables=['question'], input_variables=['question'],
...@@ -63,9 +63,18 @@ prompt_template = PromptTemplate( ...@@ -63,9 +63,18 @@ prompt_template = PromptTemplate(
--------------------- ---------------------
""" """
) )
# create prompt by injecting the question
prompt = prompt_template.format(question=question) ##################################################
# run the query ### RUNNING THE TEST
response = llm.invoke(prompt) ##################################################
# return the response # define LLMs to run with their specs
print(f"Question: {Color.MAGENTA}{question}{Color.OFF}\nAnswer: {BrightColor.MAGENTA}{response}{Color.OFF}\n") localLLMs = []
for idx, llmName in enumerate(llmNames):
localLLMs.append(Ollama(
model=llmName,
temperature=os.environ['OVERAL_TEMPERATURE']
))
# setup 'laboratory'
lab = ModelLaboratory.from_llms(localLLMs, prompt=prompt_template)
# run prompt through all llms
lab.compare(os.environ['QUESTION'])
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment