diff --git a/maxwell_integrate_to_h5.py b/maxwell_integrate_to_h5.py
index 1074b7417028de0614afd74421ceace45e72fa1d..77e3b6750190720bc49fa04bc3a28eda9ddbf982 100644
--- a/maxwell_integrate_to_h5.py
+++ b/maxwell_integrate_to_h5.py
@@ -18,6 +18,7 @@ import pandas as pd
 #from silx.io.dictdump import h5todict, dicttoh5
 import h5py
 import re
+from tqdm import tqdm
 
 
 
@@ -146,9 +147,9 @@ def integrate_ims_in_dir(path_im, path_int, dtype_im=".tif", dtype_int=".dat"):
         
         return data
 
-
     # Loop through all subdirectories and integrate images
-    for subdir in set(os.path.dirname(fname) for fname in fnames_ims):
+    subdirs = set(os.path.dirname(fname) for fname in fnames_ims)
+    for subdir in tqdm(subdirs, desc="Processing subdirectories"):
         
         # Get filenames and metadata for the current subdirectory
         subdir_fnames = [fname for fname in fnames_ims if os.path.dirname(fname) == subdir]
@@ -253,6 +254,7 @@ def integrate_ims_in_dir(path_im, path_int, dtype_im=".tif", dtype_int=".dat"):
                 os.remove(output_file)
 
             # Create the HDF5 file with the results
+            reduced_leght = 0
             with h5py.File(output_file, "w", libver="latest", track_order=True) as h5:
                 # Create the root group and set its attributes
                 h5.attrs["NX_class"] = "NXroot"
@@ -382,11 +384,12 @@ def integrate_ims_in_dir(path_im, path_int, dtype_im=".tif", dtype_int=".dat"):
                             continue
                             
                     else:
-                        print(f"Failed to create entry group {entry_name}")
+                        reduced_leght += 1
+                        print(f"Failed to create entry group {idx:05d}.1")
                         continue
                             
 
-            print(f"✅ HDF5 file '{output_file}' created with {len(results_data)} spectra.")
+            print(f"✅ HDF5 file '{output_file}' created with {len(results_data) - reduced_leght} spectra.")
 
             # Clean the results DataFrame from memory (redundend, but good practice)
             del results_df