diff --git a/controllers/AI.js b/controllers/AI.js
index c4517cddbe0f1a083092aadeec075ea868282213..a683661d839015e7d0ac0d18dcd7314292090eba 100644
--- a/controllers/AI.js
+++ b/controllers/AI.js
@@ -83,14 +83,11 @@ export const deleteModel = async (req, res, next) => {
  */
 export const getChat = async (req, res, next) => {
   // IF NO CHATID GIVEN
-
   if (!req.body.chatId) {
     try {
-      console.info(performance);
       // create chat and remember ID
-      performance.mark('create_chat:start');
       req.body.chatId = await createChat(req.body.model, req.body.input);
-      performance.mark('create_chat:end');
+
       // return
       return next();
     } catch (error) {
@@ -108,7 +105,9 @@ export const getChat = async (req, res, next) => {
     }
     // remember chat history
     // cite: https://js.langchain.com/v0.1/docs/modules/memory/chat_messages/custom/
+    performance.mark('mapStoredMessagesToChatMessages:start');
     req.body.chatHistory = mapStoredMessagesToChatMessages(record.chatHistory);
+    performance.mark('mapStoredMessagesToChatMessages:end');
     // go on
     next();
   } catch (error) {
@@ -138,11 +137,13 @@ export const getChats = async (req, res, next) => {
 */
 export const createChat = async (model, input) => {
   try {
+    performance.mark('create_chat:start');
     // create chat title
     const title = await summarizeText(model, input);
     // create record
     const record = await createRecord(Chat, prefillDocumentObject(Chat, { title }));
     // return record id
+    performance.mark('create_chat:end');
     return record.id;
   } catch (error) {
     throw error;
@@ -154,6 +155,7 @@ export const createChat = async (model, input) => {
  * CHECK IF REQUESTED MODEL IS AVAILABLE
  */
 export const checkRequestedModel = async (req, res, next) => {
+  performance.mark('checkRequestedModel:start');
   if (!req.body.model) {
     return res.status(500).json({ error: `No chat model requested.` });
   }
@@ -162,6 +164,7 @@ export const checkRequestedModel = async (req, res, next) => {
   if (!models.length) {
     return res.status(500).json({ error: `Chat model ${req.body.model} not found.` });
   }
+  performance.mark('checkRequestedModel:end');
   next();
 };
 
@@ -170,6 +173,7 @@ export const checkRequestedModel = async (req, res, next) => {
  */
 export const aiFilterModelsByName = async (strFilter = '') => {
   try {
+    performance.mark('aiFilterModelsByName:start');
     // fetch all available models
     const avail = await aiGetModels();
 
@@ -178,6 +182,7 @@ export const aiFilterModelsByName = async (strFilter = '') => {
     // set regex query
     const regex = new RegExp(strFilter, 'i');
     // filter models by regex query
+    performance.mark('aiFilterModelsByName:end');
     return avail.models.filter((model) => regex.test(model.name));
   } catch (error) {
     throw error;
diff --git a/controllers/Embeddings.js b/controllers/Embeddings.js
index 9e058b436c55c6a78978b1d756304971f4bd119d..b7ad482a6b117a36f7adb4e409b77504b3da5006 100644
--- a/controllers/Embeddings.js
+++ b/controllers/Embeddings.js
@@ -290,7 +290,9 @@ export const isVectorDbAvailable = async () => {
   let heartbeat;
   // console.log('Checking VectorDB availability...');
   try {
+    performance.mark('isVectorDbAvailable:start');
     heartbeat = await chroma.heartbeat();
+    performance.mark('isVectorDbAvailable:end');
     return true;
   } catch (error) {
     return false;
@@ -301,6 +303,7 @@ export const isVectorDbAvailable = async () => {
 * CHECK IF VECTOR DB COLLECTION IS AVAILABLE
 */
 export const isCollectionAvailable = async () => {
+  performance.mark('isCollectionAvailable:start');
   // return false if vector db is not available
   if (!await isVectorDbAvailable()) {
     return false;
@@ -310,9 +313,11 @@ export const isCollectionAvailable = async () => {
   // check if required collection exists
   if (collections.some(collection => collection.name === process.env['VECTOR_COLLECTION_NAME'])) {
     // return collection
+    performance.mark('isCollectionAvailable:end');
     return await chroma.getCollection({ name: process.env['VECTOR_COLLECTION_NAME'] });
-  }
+  };
   // return false if collection not found
+  performance.mark('isCollectionAvailable:end');
   return false;
 };
 
diff --git a/utils/handleAI.js b/utils/handleAI.js
index d025ea4ea1cccb291e98d2d41623b2fa300e57c5..2cdd639f9e832dff0e802c9cb1e13b04d0705ffc 100644
--- a/utils/handleAI.js
+++ b/utils/handleAI.js
@@ -77,6 +77,7 @@ export const aiDeleteModel = async (model) => {
  */
 export const summarizeText = async (model, input) => {
   try {
+    performance.mark('summarizeText:start');
     // define llm
     const llm = new ChatOllama({
       baseUrl: process.env['AI_API_URL'],
@@ -89,6 +90,7 @@ export const summarizeText = async (model, input) => {
     const chain = promptTemplate.pipe(llm);
     // invoke variable text & run chain
     const summary = await chain.invoke({ text: input });
+    performance.mark('summarizeText:end');
     return summary.content;
   } catch (error) {
     throw error;
@@ -104,6 +106,8 @@ export const chat = async (req, res, next) => {
   // sources: https://js.langchain.com/v0.1/docs/use_cases/question_answering/sources/#adding-sources
   // citations: https://js.langchain.com/v0.1/docs/use_cases/question_answering/citations/
 
+  performance.mark('chat:start');
+
   // test if collection is available
   const collection = await isCollectionAvailable();
   if (!collection) {
@@ -122,6 +126,7 @@ export const chat = async (req, res, next) => {
   // cite: https://js.langchain.com/v0.1/docs/get_started/quickstart/#conversational-retrieval-chain
   //  "[...]take in the most recent input (input) and the conversation history (chat_history) and use an LLM to generate a search query[...]"
   // #################
+  performance.mark('historyAwareRetrieverChain:start');
   // create a prompt that considers the chat history
   const historyAwarePrompt = ChatPromptTemplate.fromMessages([
     new MessagesPlaceholder("chat_history"),
@@ -134,11 +139,14 @@ export const chat = async (req, res, next) => {
     retriever,
     rephrasePrompt: historyAwarePrompt,
   });
+  performance.mark('historyAwareRetrieverChain:end');
 
 
   // #################
   // create the chat chain
   // #################
+  performance.mark('createStuffDocumentsChain:start');
+
   // create a prompt that uses the chat history
   const chatPrompt = ChatPromptTemplate.fromMessages([
     [
@@ -154,20 +162,25 @@ export const chat = async (req, res, next) => {
     prompt: chatPrompt,
     returnMessages: true
   });
+  performance.mark('createStuffDocumentsChain:end');
+
   // #################
   // combine chains to get result
   // #################
   // combine the two chains
+  performance.mark('createConversationalRetrievalChain:start');
   const conversationalRetrievalChain = await createRetrievalChain({
     retriever: historyAwareRetrieverChain,
     combineDocsChain: chatChain,
   });
-
+  performance.mark('createConversationalRetrievalChain:end');
+  performance.mark('invokeConversationalRetrievalChain:start');
   // finally ask the question
   const result = await conversationalRetrievalChain.invoke({
     chat_history: req.body.chatHistory ?? [],
     input: req.body.input,
   });
+  performance.mark('invokeConversationalRetrievalChain:end');
 
   // get source informations and prepare to store in chat history
   // Answers from DocumentSource are prefixed with '<DS> '
@@ -175,6 +188,7 @@ export const chat = async (req, res, next) => {
   // BUG: prefixes are not consistent correctly set bet LLM
   // console.log('Answer: ', result.answer.substring(0, 15), '...');
   let sourceLocation;
+  performance.mark('setSourceLocation:start');
   if (result.answer.startsWith('<DS> ')) {
     const file = path.posix.basename(result.context[0].metadata.source);
     const posFrom = result.context[0].metadata.loc.lines.from;
@@ -185,13 +199,16 @@ export const chat = async (req, res, next) => {
   }
 
   result.answer = result.answer.substring(5);
+  performance.mark('setSourceLocation:end');
+
 
   // store q/a-pair in chat history
   let chat = await extendChat(req.body.chatId, [
     new HumanMessage(req.body.input),
     new AIMessage({ content: result.answer, source: sourceLocation })
   ]);
+  performance.mark('chat:end');
 
   // return the answer
-  return res.json({ answer: result.answer, chat, performance: req.performance });
+  return res.json({ answer: result.answer, chat });
 };
\ No newline at end of file
diff --git a/utils/handleDB.js b/utils/handleDB.js
index 8ac754727c33f0eae8bf8c037101597c557c099d..901421d83cfc961575b02e3621e83d2ae24d488e 100644
--- a/utils/handleDB.js
+++ b/utils/handleDB.js
@@ -153,7 +153,10 @@ export const updateOneRecord = async (newData) => {
  */
 export const findByIdAndUpdate = async (model, id, data) => {
   try {
-    return model.findByIdAndUpdate(id, data);
+    performance.mark('findByIdAndUpdate:start');
+    const result = model.findByIdAndUpdate(id, data);
+    performance.mark('findByIdAndUpdate:end');
+    return result;
   } catch (error) {
     throw error;
   }
@@ -186,6 +189,7 @@ export const findOneAndUpdate = async (model, searchObject, data) => {
  * @return  {object}            the edited document
  */
 export const extendChat = async (chatId, messages) => {
+  performance.mark('extendChat:start');
   // exit if no chatId
   if (!chatId) {
     console.error('No chatId provided');
@@ -198,6 +202,7 @@ export const extendChat = async (chatId, messages) => {
     // push new message into chat history
     const serializedMessages = mapChatMessagesToStoredMessages(messages);
     record.chatHistory ? record.chatHistory.push(...serializedMessages) : record.chatHistory = serializedMessages;
+    performance.mark('extendChat:end');
     // save & return chat
     return await findByIdAndUpdate(Chat, chatId, { chatHistory: record.chatHistory });
   } catch (error) {
diff --git a/utils/handleSchemes.js b/utils/handleSchemes.js
index e64af30de628d3ebd2ce4be501dc6ffd23487941..bcb360b755456a685cf86c84ddbbd136b5a37b88 100644
--- a/utils/handleSchemes.js
+++ b/utils/handleSchemes.js
@@ -26,13 +26,13 @@ export const getConfidentialFields = (model) => {
  * @return  {object}          cleansed object
  */
 export const hideConfidentialFields = (model, object) => {
-  performance.mark('hideConfidentialFields_start');
+  performance.mark('hideConfidentialFields:start');
   const confidentialFields = getConfidentialFields(model);
   // delete from object
   confidentialFields.forEach(field => {
     delete object[field];
   });
-  performance.mark('hideConfidentialFields_end');
+  performance.mark('hideConfidentialFields:end');
   return object;
 };
 
diff --git a/utils/handleTokens.js b/utils/handleTokens.js
index 3bd5c4e0986336c33913166964eea4bff8b3b109..794b2aebac73516bff5e6ec3818d5e32d03d1060 100644
--- a/utils/handleTokens.js
+++ b/utils/handleTokens.js
@@ -132,6 +132,8 @@ export const deleteRefreshToken = async (refreshToken) => {
  *
  */
 export const verifyAccessToken = async (req, res, next) => {
+  performance.mark('verifyAccessToken:start');
+
   // define header
   const authHeader = req.headers['authorization'];
   // split token from authHeader - if available
@@ -141,12 +143,17 @@ export const verifyAccessToken = async (req, res, next) => {
   if (!token) return res.status(401).json({ message: 'No access token found. Access denied.' });
 
   // verify token
+  performance.mark('jwt.verify:start');
   jwt.verify(token, process.env.JWT_SECRET_KEY, async (error, payload) => {
+    performance.mark('jwt.verify:end');
+
     // if invalid
     if (error) return res.status(403).json({ message: 'Access token is no longer valid. Access denied.' });
     // if valid: remember current user id & role and go on
     global.currentUserId = payload.id;
     global.currentUserRole = payload.role;
+    performance.mark('verifyAccessToken:end');
+
     next();
   });
 };
diff --git a/utils/handleValidations.js b/utils/handleValidations.js
index 2dffe78306e5cf8b5286434d466434bd946cc3f6..d25d4669dcc5451eed0ef6f4f744909af55f2e70 100644
--- a/utils/handleValidations.js
+++ b/utils/handleValidations.js
@@ -2,10 +2,12 @@ import { CustomError } from "./handleErrors.js";
 
 // ### Validation function
 export const validate = (schema) => (req, res, next) => {
+  performance.mark('validateInput:start');
   try {
     // validate provided schema against request body
     schema.parse(req.body);
     // advance
+    performance.mark('validateInput:end');
     next();
   } catch (error) {
     // name error