diff --git a/__tests__/ai/__snapshots__/pull.test.js.snap b/__tests__/ai/__snapshots__/pull.test.js.snap
index f90daf34947ac1a9ef79403ab267da7f7ff658da..e4fadc67cb4f153bcc8eadbcf276a448d0567a72 100644
--- a/__tests__/ai/__snapshots__/pull.test.js.snap
+++ b/__tests__/ai/__snapshots__/pull.test.js.snap
@@ -35,6 +35,41 @@ exports[`ai pull model > given required fields are missing > should respond with
 
 exports[`ai pull model > given the inputs are valid > should respond with a proper body 1`] = `
 {
-  "status": "success",
+  "model": [
+    {
+      "details": {
+        "families": [
+          "llama",
+        ],
+        "family": "llama",
+        "format": "gguf",
+        "parameter_size": "8.0B",
+        "parent_model": "",
+        "quantization_level": "Q4_0",
+      },
+      "digest": "62757c860e01d552d4e46b09c6b8d5396ef9015210105427e05a8b27d7727ed2",
+      "model": "llama3.1:latest",
+      "modified_at": "2024-08-05T18:15:28.145879883+02:00",
+      "name": "llama3.1:latest",
+      "size": 4661226402,
+    },
+    {
+      "details": {
+        "families": [
+          "llama",
+        ],
+        "family": "llama",
+        "format": "gguf",
+        "parameter_size": "8.0B",
+        "parent_model": "",
+        "quantization_level": "Q4_0",
+      },
+      "digest": "365c0bd3c000a25d28ddbf732fe1c6add414de7275464c4e4d1c3b5fcb5d8ad1",
+      "model": "llama3:latest",
+      "modified_at": "2024-08-05T12:11:53.562412228+02:00",
+      "name": "llama3:latest",
+      "size": 4661224676,
+    },
+  ],
 }
 `;
diff --git a/controllers/AI.js b/controllers/AI.js
index 0fd16bb144ac0e7f2382c721e56a24198d1195d5..7d57c9f21369dbdb5ec2920683936558969dcd92 100644
--- a/controllers/AI.js
+++ b/controllers/AI.js
@@ -57,7 +57,9 @@ export const getModel = async (req, res, next) => {
 export const installModel = async (req, res, next) => {
   try {
     const response = await aiInstallModel(req.body.model);
-    return res.json(response);
+    const model = await aiFilterModelsByName(req.body.model);
+
+    return res.json({ message: response.message, model });
   } catch (error) {
     next(error);
   }
diff --git a/controllers/Embeddings.js b/controllers/Embeddings.js
index 8a3769148a2736ff67ccc8b72b205134e100b72e..6231549b8aaa4fe7415fb51bc5fef57faf65e852 100644
--- a/controllers/Embeddings.js
+++ b/controllers/Embeddings.js
@@ -76,7 +76,7 @@ export const removeVectorDb = async (req, res, next) => {
   }
   // exit if collection don't exist
   if (! await isCollectionAvailable()) {
-    return res.status(404).json({ error: `VectorDB collection ${process.env['VECTOR_COLLECTION_NAME']} not found.` });
+    return res.status(404).json({ message: `VectorDB collection ${process.env['VECTOR_COLLECTION_NAME']} not found.` });
   }
 
   // delete collection
@@ -111,40 +111,15 @@ export const getStatus = async (req, res, next) => {
   return res.json({ vectorDBrunning, collection, itemCount });
 };
 
-/** *******************************************************
- * CREATE EMBEDDINGS
- */
-export const createEmbeddings = async (req, res) => {
-  // check if collection is available
-  const collection = await isCollectionAvailable();
-  if (!collection) {
-    return res.status(500).json({ error: `VectorDB collection ${process.env['VECTOR_COLLECTION_NAME']} not found.` });
-  }
-  // test if model is available
-  const models = await aiFilterModelsByName(process.env['RAG_MODEL_NAME']);
-  // install model if missing
-  if (!models.length) {
-    console.info('Embedding Model not found. Installing ...');
-    await ollama.pull({ model: process.env['RAG_MODEL_NAME'] });
-  }
-  // console.log('collection count BEFORE', await collection.count());
-  // load RAG files
-  const docs = await directoryLoader();
-  // embed
-  const loadedDocs = await embedder(docs);
-
-  // console.log('collection count AFTER', await collection.count());
-  return res.json({ 'message': 'Embeddings created.' });
-};
-
 /** *******************************************************
  * UPDATE EMBEDDINGS
  */
 export const updateEmbeddings = async (req, res, next) => {
   // check if collection is available
-  const collection = await isCollectionAvailable();
+  let collection = await isCollectionAvailable();
   if (!collection) {
-    return res.status(500).json({ error: `VectorDB collection ${process.env['VECTOR_COLLECTION_NAME']} not found.` });
+    // return res.status(500).json({ message: `VectorDB collection ${process.env['VECTOR_COLLECTION_NAME']} not found.` });
+    collection = await createCollection();
   }
 
   // #################
diff --git a/routes/ai.js b/routes/ai.js
index bfbbecdd3f08044977c66acb16a97cdf66387e68..ec38e6150b9d7643a2f030213a2198cef9e9969d 100644
--- a/routes/ai.js
+++ b/routes/ai.js
@@ -82,6 +82,7 @@ router.delete('/models', verifyAccessToken, gateKeeper, validate(deleteModelSche
  * 
  * @return  {object}         AI response & chat history record
  */
+// TODO add cross encoder / reranker
 router.post('/chat', verifyAccessToken, validate(chatSchema), checkRequestedModel, getChat, chat);
 
 
@@ -91,6 +92,7 @@ router.post('/chat', verifyAccessToken, validate(chatSchema), checkRequestedMode
  * @header  {authorization}  Bearer       [required] access token
  * @return  {object}                    list of found conversations, ordered by updated
  */
+// TODO sort chats by createdAt
 router.get('/chats', verifyAccessToken, getChats);
 
 export default router;
\ No newline at end of file
diff --git a/routes/embeddings.js b/routes/embeddings.js
index 37bba1947448a2fa52706f9da5ccef920af63499..0f4a341ccb9715bdb3ed241f7c7965fa76adbc97 100644
--- a/routes/embeddings.js
+++ b/routes/embeddings.js
@@ -13,6 +13,7 @@ const router = Router();
  *
  * @return  {object}                     related message
  */
+// BUG after deletion and restoring, collection is not found till restart
 router.delete('/', verifyAccessToken, gateKeeper, removeVectorDb);
 
 /**
diff --git a/utils/handleAI.js b/utils/handleAI.js
index dde2c62e6c1462c8726edcf4b356944445effc31..f69fd6f0b5418ad033daf7d2d5dbf2c5765be126 100644
--- a/utils/handleAI.js
+++ b/utils/handleAI.js
@@ -53,7 +53,8 @@ export const aiGetModel = async (model) => {
  */
 export const aiInstallModel = async (model) => {
   try {
-    return await ollama.pull({ model, stream: false });
+    const message = await ollama.pull({ model, stream: false });
+    return { message: `model ${model} installed` };
   } catch (error) {
     throw error;
   }