Skip to content
Snippets Groups Projects
Commit 3e2f4135 authored by Embruch, Gerd's avatar Embruch, Gerd
Browse files

switched over to prompting to multiple models at once

parent c40c0cab
Branches
No related tags found
No related merge requests found
import os
import subprocess
from pathlib import Path
# enables counting list items
from operator import length_hint
# .env parser
from dotenv import load_dotenv
# colored print
......@@ -9,6 +10,8 @@ from colorist import Color, BrightColor, bright_yellow, magenta, red, green
# langchain stuff
from langchain_community.llms import Ollama
from langchain.prompts import PromptTemplate
from langchain.model_laboratory import ModelLaboratory
##################################################
### PREPARING & STARTING
......@@ -24,34 +27,31 @@ if not os.path.isfile('./.env'):
load_dotenv()
##########
# INSTALLING LLM
# INSTALLING LLMS
##########
print(f"installing llm <{os.environ['llmName']}>. This can take a while, depending on size and if already installed.")
# create download folder
Path(os.environ['OLLAMA_MODELS']).mkdir(parents=True, exist_ok=True)
process = subprocess.run(["ollama","pull" , f"{os.environ['llmName']}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# split llm names into array
llmNames = os.environ['LLM_NAMES'].split(',')
green(f'Installing {length_hint(llmNames)} LLMs. This can take a while, depending on installation status, size and bandwith.')
# loop trhough llm names
for idx, llmName in enumerate(llmNames):
print(f"{idx+1}/{length_hint(llmNames)}: {llmName}")
# install
process = subprocess.run(["ollama","pull" , f"{llmName}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# on error
if process.returncode != 0:
magenta(process.stderr.decode('utf8'))
red(f"ABORTING: Unable to install the requested LLM")
os._exit(1)
print(process.stdout.decode('UTF8'))
red(process.stderr.decode('utf8'))
# remove name from array
llmNames.pop(idx)
llm = Ollama(
model=os.environ['llmName'],
temperature=0,
# callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])
)
# write down the question to ask
question="What tall is the tallest pyramid in egypt and what's it called?"
# exit if no LLM could be loaded
if length_hint(llmNames) == 0:
red(f"ABORTING: Unable to install even one of the requested LLMs")
os._exit(1)
##################################################
### ASK THE LLM
### DEFINING THE PROMPT
##################################################
green('prompting')
green('preparing the prompt specifications')
# define template
prompt_template = PromptTemplate(
input_variables=['question'],
......@@ -63,9 +63,18 @@ prompt_template = PromptTemplate(
---------------------
"""
)
# create prompt by injecting the question
prompt = prompt_template.format(question=question)
# run the query
response = llm.invoke(prompt)
# return the response
print(f"Question: {Color.MAGENTA}{question}{Color.OFF}\nAnswer: {BrightColor.MAGENTA}{response}{Color.OFF}\n")
##################################################
### RUNNING THE TEST
##################################################
# define LLMs to run with their specs
localLLMs = []
for idx, llmName in enumerate(llmNames):
localLLMs.append(Ollama(
model=llmName,
temperature=os.environ['OVERAL_TEMPERATURE']
))
# setup 'laboratory'
lab = ModelLaboratory.from_llms(localLLMs, prompt=prompt_template)
# run prompt through all llms
lab.compare(os.environ['QUESTION'])
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment