diff --git a/maxwell_int_to_h5.sh b/maxwell_int_to_h5.sh
index 63492beec5836c3f20da1f2934e9d8f581e5a991..40c7f0723878986c855da7e4fd155156e19f4027 100644
--- a/maxwell_int_to_h5.sh
+++ b/maxwell_int_to_h5.sh
@@ -18,7 +18,7 @@ plarization=0.99
 number_of_points=4500
 unit="q_A^-1"
 error="azimuthal"
-
+forbidden=("0p2") #forbidden subdirectories, if you want to integrate all subdirs, set this to ()
 
 
 module load maxwell
@@ -26,4 +26,4 @@ module load conda
 module load pyFAI
 activate FlyPDF
 cd  /asap3/petra3/gpfs/p21.1/2025/data/11021216/shared/Integration/
-python3 maxwell_integrate_with_subdirs.py $path_raw_data $path_integrated_data $fpath_poni $fpath_mask $NPROCS $plarization $number_of_points $unit $error $format #Start python script
+python3 maxwell_integrate_with_subdirs.py $path_raw_data $path_integrated_data $fpath_poni $fpath_mask $NPROCS $plarization $number_of_points $unit $error $format $forbidden #Start python script
diff --git a/maxwell_integrate_to_h5.py b/maxwell_integrate_to_h5.py
index 28e6c75e36baecf9569c9f70ca7d84da083a2ed0..d098587836fa20c25738d62dc6184dde1abfa006 100644
--- a/maxwell_integrate_to_h5.py
+++ b/maxwell_integrate_to_h5.py
@@ -28,19 +28,19 @@ def integrate_ims_in_dir(path_im, path_int, dtype_im=".tif", dtype_int=".dat"):
     :param 'str' dtype_int: data type/filename ending of pattern file
     """
     global NPROC
+    global FORBIDDEN
     fnames_ims = []#= glob(os.path.join(path_im, "*" + dtype_im))
     path_int_list = []
     for path, subdirs, files in os.walk(path_im):
         for name in files:
-            if ("cu" not in name) or ("Cu" not in name) or ("np" not in name):
-                if "sdd500" not in name and "sdd750" not in name and "sdd1000" not in name:
-                    fnames_ims.append(os.path.join(path, name))
-                    if path_im != str(path):
-                        path_new = str(path).replace(path_im,'')      
-                        path_new = path_int + path_new 
-                    else:
-                        path_new = path_int 
-                    path_int_list.append(path_new)
+            if FORBIDDEN not in name:
+                fnames_ims.append(os.path.join(path, name))
+                if path_im != str(path):
+                    path_new = str(path).replace(path_im,'')      
+                    path_new = path_int + path_new 
+                else:
+                    path_new = path_int 
+                path_int_list.append(path_new)
 
     #fnames_ims.sort(key=str.lower)
 
@@ -96,24 +96,28 @@ def integrate_ims_in_dir(path_im, path_int, dtype_im=".tif", dtype_int=".dat"):
                     results_df = pd.concat([results_df, pd.DataFrame(data)], ignore_index=True)
 
     pool = Pool(int(NPROC))
-    
-    for i,fname_im in enumerate(fnames_ims):
-        pool.apply_async(integration_thread, (fname_im,path_int_list[i]))
+    for subdir in set(os.path.dirname(fname) for fname in fnames_ims):
+        subdir_fnames = [fname for fname in fnames_ims if os.path.dirname(fname) == subdir]
+        subdir_path_int = path_int_list[fnames_ims.index(subdir_fnames[0])]
+
+        for fname_im in subdir_fnames:
+            pool.apply_async(integration_thread, (fname_im, subdir_path_int))
 
-        # Export the DataFrame to a CSV file with the name of the directory
+        pool.close()
+        pool.join()
+
+        # Export the DataFrame to a CSV file with the name of the subdirectory
         if 'results_df' in globals():
             results_df = results_df.sort_values(by="filename", key=lambda col: col.str.lower())
-            directory_name = os.path.basename(os.path.normpath(path_int))
-            results_df.to_csv(os.path.join(path_int, f"{directory_name}.csv"), index=False)
-            results_df.to_hdf(os.path.join(path_int, f"{directory_name}.h5"), key='data', mode='w')
+            subdir_name = os.path.basename(os.path.normpath(subdir_path_int))
+            results_df.to_csv(os.path.join(subdir_path_int, f"{subdir_name}.csv"), index=False)
+            results_df.to_hdf(os.path.join(subdir_path_int, f"{subdir_name}.h5"), key='data', mode='w')
             del results_df
         else:
-            print("No images were integrated. No results DataFrame created.")
-
+            print(f"No images were integrated in subdirectory {subdir}. No results DataFrame created.")
 
-        
-    pool.close()
-    pool.join()
+        # Reset the pool for the next subdirectory
+        pool = Pool(int(NPROC))
     
         
 def integrate_on_created(event, path_int, dtype_im=".tif", dtype_int=".dat"):
@@ -231,6 +235,7 @@ if __name__ == '__main__':
     UNIT=str(sys.argv[8])
     ERRORMODE = str(sys.argv[9]).lower()
     DATATYPE = str(sys.argv[10]).lower()
+    FORBIDDEN = sys.argv[11].split(',') if len(sys.argv) > 11 else []
 
     if DATATYPE not in {"tif", "tiff", "TIF", "TIFF"}:
         raise ValueError(f"Unsupported data type: {DATATYPE}")