diff --git a/LossFunctions.py b/LossFunctions.py
index 87eadd3d214da9508dbbe8cc9fce506e377ad07f..9f7fa4548c9c04f566bc07d7cfa8ca58acbc2b5c 100644
--- a/LossFunctions.py
+++ b/LossFunctions.py
@@ -4,9 +4,9 @@ from iminuit.util import describe
 from Model import epsilon
 from HelperFunctions import FakeFuncCode
 import matplotlib.pyplot as plt
- 
-
-           
+   
+        
+        
                                               
 class BinnedLH:
     
@@ -14,6 +14,7 @@ class BinnedLH:
     def __init__(self, f, bin_centres, counts, bw):
         self.f = f
         self.x = bin_centres
+        self.len_x = len(self.x)
         self.dx = bw
         self.counts = counts
         self.N = np.sum(counts)
@@ -22,6 +23,8 @@ class BinnedLH:
         self.func_code = FakeFuncCode(f, dock=True)
         self.n_calls=0
         self.eps = epsilon()
+        self.mask = (self.counts>0)
+     
         
 
     def __call__(self, *arg):
@@ -29,21 +32,23 @@ class BinnedLH:
                 
         
         y_hat = self.f(self.x, *arg)
+
         
-        y_hat = np.nan_to_num(y_hat, nan=self.eps, posinf=self.eps, neginf=self.eps)
-        y_hat = np.where(y_hat<self.eps, self.eps, y_hat)
-        
-        E = y_hat*self.N*self.dx
+        E = y_hat*self.dx
         h = self.counts
-        mask = (h>0)
-        E = E[mask]
-        h = h[mask]
+        mask = self.mask
         
-        nlogL = -np.sum(h*(np.log(E) - np.log(h)) + (h-E))
 
-        self.n_calls+=1
+        nlogL= np.zeros(self.len_x)
         
+        nlogL[mask] = h[mask]*(np.log(E[mask]) - np.log(h[mask])) + (h[mask]-E[mask])
+        nlogL[~mask] = -E[~mask]
+
+        nlogL = -np.sum(nlogL)    
+         
+        self.n_calls+=1
         
+
         return nlogL
     
     
@@ -102,4 +107,4 @@ class HuberRegression:
         
         loss = np.sum((~cond_flag) * (0.5 * a ** 2) - (cond_flag) * self.delta * (0.5 * self.delta - a), -1)
         
-        return np.sum(loss)
\ No newline at end of file
+        return np.sum(loss)