diff --git a/requirements.txt b/requirements.txt
index 2512317732a3b5150bd0ed5723e18571d6125b32..0635e647ca63b65367ce405ba7ab9bce472c75f8 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -6,4 +6,5 @@ numba
 numpy
 pandas
 scipy
-argparse
\ No newline at end of file
+argparse
+h5py
\ No newline at end of file
diff --git a/user/sumlab_auto.py b/user/sumlab_auto.py
index 760cab06be3cfeb3376743f9183f8c4924fdc0b6..9919edbf1abf0a223c9a408a0e987f975ee53fcd 100644
--- a/user/sumlab_auto.py
+++ b/user/sumlab_auto.py
@@ -2,10 +2,10 @@ import sys
 import os
 sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 from PeakOTron import PeakOTron
-import pandas as pd
-import numpy as np
+import h5py
 import argparse
-
+import numpy as np
+import pandas as pd
 
 def float_or_none(value):
     return None if value.lower() == 'none' else float(value)
@@ -69,13 +69,18 @@ print("=======================================\033[0m")
 
 # Loop thorough SORTED files in alphabetical order!
 files_to_fit = sorted(files_to_fit, key=lambda x: x[0])
+G = []; d_G = []; G_prefit = []; d_G_prefit = []
+V_bias = []
+
 for i, (file, path) in enumerate(files_to_fit):
     V = float(file.split('deg')[1].split('V')[0].replace('_', '.'))
     if 'ns' in file:
-        t_gate = float(file.split('V')[1].split('ns')[0].replace('_', '.'))
+       t_gate = float(file.split('V')[1].split('ns')[0].replace('_', '.'))
     print(f"Fitting: \033[95m{file}\033[0m (t_gate = {t_gate} ns)")
-
-    #f_tau_hmt = f_tau(V, V_bd_hmt, V_0_hmt)
+    #items = file.split('_')
+    #V = float(items[2].replace('V', '').replace('p', '.'))
+    # f_tau_hmt = f_tau(V, V_bd_hmt, V_0_hmt)
+    V_bias.append(V)
 
     # Load files.
     data = np.loadtxt(path, skiprows=0)
@@ -86,7 +91,7 @@ for i, (file, path) in enumerate(files_to_fit):
                tau=tau,  # SLOW PULSE COMPONENT TIME CONSTANT (ns)
                t_gate=t_gate,  # GATE LENGTH (ns)
                t_0=t_0,  # INTEGRATION TIME BEFORE GATE (ns)
-               tau_R=0.65*tau, #WAS: tau_R=f_tau_hmt*tau,
+               tau_R=0.65*tau,  # WAS: tau_R=f_tau_hmt*tau,
                bin_0=bin_0,
                truncate_nsigma0_up=truncate_nsigma0_up,
                truncate_nsigma0_do=truncate_nsigma0_do,
@@ -95,7 +100,8 @@ for i, (file, path) in enumerate(files_to_fit):
 
     fit_out = {}
     prefit_val, prefit_err = f_data.GetPrefitResults(bin_units=False)
-    print("\033[95m"+rf"Prefit: G = {prefit_val.get('G')} d_G = {prefit_err.get('G')}"+"\033[0m")
+    print("\033[95m"+rf"Prefit: G = {prefit_val.get('G')
+                                     } d_G = {prefit_err.get('G')}"+"\033[0m")
     for key, value in prefit_val.items():
         fit_out["prefit_{:s}".format(key)] = value
     for key, value in prefit_err.items():
@@ -103,15 +109,38 @@ for i, (file, path) in enumerate(files_to_fit):
 
     if not prefit_only:
         fit_val, fit_err = f_data.GetFitResults(bin_units=False)
-        print("\033[95m"+rf"Fit: G = {fit_val.get('G')} d_G = {fit_err.get('G')}"+"\033[0m")
+        print("\033[95m"+rf"Fit: G = {fit_val.get('G')
+                                      } d_G = {fit_err.get('G')}"+"\033[0m")
         for key, value in fit_val.items():
             fit_out["{:s}".format(key)] = value
         for key, value in fit_err.items():
             fit_out["d_{:s}".format(key)] = value
         f_data.PlotFit(plot_in_bins=True, display=False,
-                save_directory=f"{folder}/{file[:-4]}_fit.png")
+                       save_directory=f"{folder}/{file[:-4]}_fit.png")
 
     df = pd.DataFrame.from_dict([fit_out])
     df.to_csv("{}/fit_results_{:s}.csv".format(folder, file[:-4]))
 
+    if not prefit_only and 'G' in fit_out and 'd_G' in fit_out:
+        G.append(fit_out['G'])
+        d_G.append(fit_out['d_G'])
+    else:
+        G.append(0)
+        d_G.append(0)
+    G_prefit.append(fit_out['prefit_G'])
+    d_G_prefit.append(fit_out['prefit_d_G'])
+
+G = np.array(G)
+d_G = np.array(d_G)
+G_prefit = np.array(G_prefit)
+d_G_prefit = np.array(d_G_prefit)
+V_bias = np.array(V_bias)
+
+with h5py.File(f"{folder}/{os.path.basename(folder)}.h5", 'w') as f:
+    f.create_dataset('G', data=G)
+    f.create_dataset('d_G', data=d_G)
+    f.create_dataset('G_prefit', data=G_prefit)
+    f.create_dataset('d_G_prefit', data=d_G_prefit)
+    f.create_dataset('V_bias', data=V_bias)
+
 print("\033[95m=======================================\033[0m")