Press n or j to go to the next uncovered block, b, p or k for the previous block.
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 | 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x | import { Router } from "express";
import { getStatus, getModel, getModels, deleteModel, installModel, checkRequestedModel, getChat, getChats } from "../controllers/AI.js";
import { chat } from "../utils/handleAI.js";
import { chatSchema, deleteModelSchema, getModelSchema, getModelsSchema, installModelSchema } from "../validationSchemes/AI.js";
import { validate } from "../utils/handleValidations.js";
import { gateKeeper } from "../controllers/Auth.js";
import { verifyAccessToken } from "../utils/handleTokens.js";
const router = Router();
/**
* GET STATUS
* get status of LLM backend
*
* @return {bool} wether LLM backend is running or not
*/
router.get('/status', getStatus);
/**
* GET MODELS
* get list of installed MODELS
* filterable by regex
*
* @header {authorization} Bearer [required] access token
* @param {string} filter [required] filter for model names, regex & empty allowed
*
* @return {object} list of found models
*/
router.post('/models', verifyAccessToken, validate(getModelsSchema), getModels);
/**
* GET MODEL
* get details of a specific model
*
* @header {authorization} Bearer [required] access token
* @param {string} model [required] model name
*
* @return {object} model details
*/
router.post('/model', verifyAccessToken, validate(getModelSchema), getModel);
/**
* INSTALL MODEL
* installs the given model - if it is available in the LLM backends catalogue
*
* @header {authorization} Bearer [required] access token
* @param {string} model [required] model name
*
* @return {string} installation response
*/
router.put('/models', verifyAccessToken, gateKeeper, validate(installModelSchema), installModel);
/**
* DELETE MODEL
* deletes the given model - if it is installed
*
* Authorization: Bearer {{token}}
* @header {authorization} Bearer [required] access token
* @param {string} model [required] model name
*
* @return {string} deletion response
*/
router.delete('/models', verifyAccessToken, gateKeeper, validate(deleteModelSchema), deleteModel);
/**
* CHAT
* send a message to the LLM Backend
* embeddings will be used as context
* if there's already a chat history record, it will be injected as context, too
* the first message will be send to a LLM to create a title for the chat history record
* the received answer, along with the users message, will be saved/added as/to chat history record
*
* Authorization: Bearer {{token}}
* @header {authorization} Bearer [required] access token
* @param {string} model [required] model name
* @param {string} text [required] input for AI to handle with
* @param {string} chatId [optional] ID of the chat history record
*
* @return {object} AI response & chat history record
*/
router.post('/chat', verifyAccessToken, validate(chatSchema), checkRequestedModel, getChat, chat);
/**
* GET CONVERSATIONS
*
* @header {authorization} Bearer [required] access token
* @return {object} list of found conversations, ordered by updated
*/
// TODO sort chats by createdAt
router.get('/chats', verifyAccessToken, getChats);
export default router; |