diff --git a/.gitignore b/.gitignore
index df2eaf4579d8730d30a9b69b2e0568794c12e27f..8b23d3cc4a0b27822a26b0a473b2cf04f7213786 100644
--- a/.gitignore
+++ b/.gitignore
@@ -14,8 +14,9 @@ Thumbs.db
 
 # Source for the following rules: https://raw.githubusercontent.com/github/gitignore/master/Python.gitignore
 # Byte-compiled / optimized / DLL files
-__pycache__/
+.__pycache__/
 *.py[cod]
+*.pyc
 *$py.class
 
 # C extensions
@@ -38,6 +39,7 @@ wheels/
 *.egg-info/
 .installed.cfg
 *.egg
+.start_script.py
 MANIFEST
 
 # PyInstaller
diff --git a/input/get/README.md b/input/get/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..56c610a06043789e1574f0a29fbf3fbb477c5d9f
--- /dev/null
+++ b/input/get/README.md
@@ -0,0 +1,30 @@
+# Journal Fetcher
+In this Directory there are all the Fetcher, to create an publication-instance.  
+These Fetchers should inherit from [**JournalFetcher**](journal_fetcher.py).
+
+## Template
+There is a [**template file**](template_.py) with no functionality but can be used to get to see what needs to be added.  
+
+## Naming Convention
+The filename of the fetcher should be <[a-z]+>.py and the classname should be \<Filename\>Fetcher.  
+Example:  
+filename: [acs.py](acs.py)  
+classname: [AcsFetcher](acs.py)
+
+## Format
+Fetchers should keep this format:
+- doi_url : *https://doi.org/10.xxxx/...*
+- title   : *title*
+- contributors: *[contrib_1, contrib_2, ...]*
+- journal = *full name*
+- publication_date = *dd.mm.yyyy*
+- subjects = *[subject_1, subject_2, ...]*
+- references = *[reference_1, reference_2, ...]*
+- citations = *[citations_1, citation_2, ...]*
+- abstract = *abstract without '\n'*
+
+
+## Tests
+Tests for these Modules are in **input/test/**.  
+These tests should inherit from [**FetcherTestCase**](../test/test_input.py) and should have positive and negative testcases. Look in [**test_acs.py**](../test/test_acs.py) for reference.
+There are also tests which compares the string representation of the publication with a predefined one.[*](../test/test_txt/acs.txt)
\ No newline at end of file
diff --git a/input/get/_springer.py b/input/get/_springer.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ed7e8c978a8e3a3539a884b0fb235499dc20554
--- /dev/null
+++ b/input/get/_springer.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python3
+
+"""
+Child class of JournalFetcher
+Usage: Check if Url can be used with 'can_use_url'
+       and then fetch publication with 'get_publication'
+"""
+
+import re
+from input.get.journal_fetcher import JournalFetcher
+from input.publication import Citation, Publication
+
+
+class NatureFetcher(JournalFetcher):
+
+    """
+    scrapes publication metadata from a provided url
+    """
+
+    #   NOTE: nature does not use journal names in doi links, must match by 10.xxxx identifier instead
+    SUPPORTED_JOURNALS = ['1038']
+
+    @staticmethod
+    def can_use_url(url: str) -> bool:
+        """
+        Uses regex to extract journal specific substrings in doi.
+        TODO: Support non doi-urls, maybe parse specific journals from s[5 digit] part of nature doi if necessary
+        """
+
+        
+
+        matched_url = re.match(r'^(https?://)?(doi.org/)?10.(\d{4})/s(\d{5})-(\d{3})-(\d{5})-(\d{1})', url.strip(". \t\r\n"))
+
+        #checks if match exists
+        if matched_url is not None:
+            return matched_url[3] in NatureFetcher.SUPPORTED_JOURNALS
+        else:
+            return False
+    
+    @staticmethod
+    def get_pub_light(url: str) -> Publication:
+        pass
+
+    @staticmethod
+    def get_publication(url: str) -> Publication:
+        """
+        takes a url, scrapes article and citation metadata from corresponding website, and returns
+        that metadata as a Publication instance.
+        """
+
+        #creation of soup
+        try:
+            soup = JournalFetcher.get_soup(url)
+        except Exception as error:
+            raise error
+        
+        # raise error if re recognizes pattern, but url isnt correct:
+        #   For other Urls
+        if soup.text.strip(" \t\n")=="Missing resource null":
+            raise ValueError("'{}' is not a valid doi url".format(url))
+
+        #   For dois
+        if soup.title is not None:
+            if soup.title.text == "Error: DOI Not Found":
+                raise ValueError("'{}' matches pattern for 'Nature', but doesnt link to paper.".format(url))
+
+        #fetching metadata from soup
+        doi_url = "https://doi.org/" + soup.head.find(attrs={"name": "DOI"}).get("content")
+        title = soup.head.find(attrs={"name": "citation_title"}).get("content")
+        journal = soup.head.find(attrs={"name": "citation_journal_title"}).get("content")
+        published = soup.head.find(attrs={"name": "prism.publicationDate"}).get("content")
+        contributors = []
+        subjects = []
+        references = []
+
+        for creator in soup.head.findAll(attrs={"name": "dc.creator"}):
+            contributors.append(creator.get("content"))
+
+        for subject in soup.head.findAll(attrs={"name": "dc.subject"}):
+            subjects.append(subject.get("content"))
+
+        for reference in soup.head.findAll(atts={"name": "citation_reference"}):
+
+            if re.match('citation_journal_title=', reference.get("content")):
+                ref_doi = re.search(r'citation_doi=(.+); ', reference.get("content"))[1]
+                ref_title = re.search(r'citation_title=(.+); ', reference.get("content"))[1]
+                ref_journal = re.search(r'citation_journal_title=(.+); ',reference.get("content"))[1]
+                ref_contributors = re.split(r', ', re.search(r'citation_author=(.+); ', reference.get("content")))
+            else:
+                ref_doi = ""
+                ref_title = reference.get("content")
+                ref_journal = ""
+                ref_contributors = ""
+            
+            references.append(Citation(doi_url=ref_doi, title=ref_title\
+                                    , journal=ref_journal, contributors=ref_contributors\
+                                    , cit_type="Referenz" ))
+
+
+        return Publication(doi_url, title, contributors, journal, published, subjects, references)
+
+        # TODO: Exceptions-handling
+        #   raise ValueException("Cant Fetch: '{}'".format(error))
+        # return None
diff --git a/input/get/acs.py b/input/get/acs.py
index 9691845b27ae694a8213a0f0fe5f827c75890eee..c7ea64735bc83502c8df3c5f8dd984ea29f196aa 100755
--- a/input/get/acs.py
+++ b/input/get/acs.py
@@ -12,58 +12,57 @@ from input.get.journal_fetcher import JournalFetcher
 from input.publication import Publication, Citation
 
 
-class Fetcher(JournalFetcher):
+class AcsFetcher(JournalFetcher):
     """
-    Specific Fetcher for the ACS journals.
+    Specific Fetcher for the ACS-journals.
     """
 
-    # Constant for the abbreviations of the supported Journals
+    # Constant for the specific doi-url of the supported Journals
     SUPPORTED_JOURNALS = ['1021']
 
     @staticmethod
     def can_use_url(url: str) -> str:
         """
-        Uses Regex to extract journal specific substrings in Doi.
-        TODO: Support non Doi-urls
+        Uses Regex to extract journal specific substrings in (Doi-)Urls.
         """
-        matched_url = re.match(r'^(https?://)?(doi.org/|pubs.acs.org/doi/)?(10.(\d{4})/\w+.\S+)', url.strip(". \t\r\n"))
+        matched_url = re.match(r'^(https?://)?(doi.org/|pubs.acs.org/doi/)?([a-z]+/)?(10.(\d{4})/\w+.\S+)', url.strip(". \t\r\n"))
         
-        #Checks if match exists
+        # Checks if match exists
         if matched_url is not None:
-            return matched_url[4] in Fetcher.SUPPORTED_JOURNALS
+            return matched_url[5] in AcsFetcher.SUPPORTED_JOURNALS
         else:
             return False
 
-    @staticmethod
-
 
+    @staticmethod
     def get_pub_light(url: str) -> Publication:
         """
         Fetches html and creates Beatifulsoup-instance in parent class.
-        Specific css-searches for ACS-Journals and creates Publication-instance.
+        Specific css-searches for ACS-Journals and creates Publication-instance (without References, Citations and abstract).
         """
 
-        # Creation of Soup
+        # Create soup
         try:
             soup = JournalFetcher.get_soup(url)
         except Exception as error:
             raise error
         
         # Raise Error if re recognizes Pattern, but url isnt correct:
-        #   For other Urls
+        # - for other Urls
         if soup.text.strip(" \t\n")=="Missing resource null":
             raise ValueError("'{}' matches Pattern for 'ACS', but doesnt link to Paper.".format(url))
 
-        #   For Dois
+        # - for Dois
         if soup.title is not None:
             if soup.title.text == "Error: DOI Not Found":
                 raise ValueError("'{}' matches Pattern for 'ACS', but doesnt link to Paper.".format(url))
 
-        
+        # Presearch for a smaller soup
         soup_header = soup.select('.article_header')[0]
         
-        # Creates Publication
+        # fetches info for publication
         doi_url = soup_header.select('a[title="DOI URL"]')[0].string
+
         title = soup_header.select(".hlFld-Title")[0].text
 
         contributors = []
@@ -71,21 +70,30 @@ class Fetcher(JournalFetcher):
             contributors.append(author.text)
 
         journal = soup_header.select(".cit-title")[0].text
-
         # Replaces abbreviation with whole name
         if journal in JournalFetcher.abbrev_dict:
             journal = JournalFetcher.abbrev_dict[journal]
                 
-
+        # Format in acs :"month dd, yyyy"
         published = soup_header.select(".pub-date-value")[0].text
-
-        subjects = []
-        subject_soup = soup_header.select('.article_header-taxonomy')[0]
-        for subject in subject_soup.select('a'):
-            subjects.append(subject.text)
-
-        return Publication(doi_url, title, contributors, journal, published, 
-                           subjects)
+        re_date = re.match(r'\s*(\w+) (\d+), (\d+)\s*',published)
+        # dd.mm.yyyy
+        if re_date is not None:
+            published = (re_date[2].zfill(2) + "."
+                        + JournalFetcher.mont_to_num[re_date[1].lower()]
+                        + "." + re_date[3])
+
+        subjects = ["None Found"]
+        subject_soup = soup_header.select('.article_header-taxonomy')
+        # Some Papers have no Subjects
+        if subject_soup != []:
+            subjects = []
+            for subject in subject_soup[0].select('a'):
+                subjects.append(subject.text)
+
+        return Publication(doi_url = doi_url,title = title, contributors = contributors\
+                            , journal = journal, publication_date = published, subjects = subjects\
+                            , references = None, citations = None, abstract = None)
 
     def get_publication(url: str) -> Publication:
         """
@@ -93,30 +101,28 @@ class Fetcher(JournalFetcher):
         Specific css-searches for ACS-Journals and creates Publication-instance.
         """
 
-        # Creation of Soup
+        # Create soup
         try:
             soup = JournalFetcher.get_soup(url)
         except Exception as error:
             raise error
         
         # Raise Error if re recognizes Pattern, but url isnt correct:
-        #   For other Urls
+        # - for other Urls
         if soup.text.strip(" \t\n")=="Missing resource null":
             raise ValueError("'{}' matches Pattern for 'ACS', but doesnt link to Paper.".format(url))
 
-        #   For Dois
+        #  - for Dois
         if soup.title is not None:
             if soup.title.text == "Error: DOI Not Found":
                 raise ValueError("'{}' matches Pattern for 'ACS', but doesnt link to Paper.".format(url))
 
-        
+        # Presearch for a smaller soup
         soup_header = soup.select('.article_header')[0]
         
-        #Could be used for more specific search
-        ref_cit_soup = soup
-
-        # Creates Publication
+        # fetches info for publication
         doi_url = soup_header.select('a[title="DOI URL"]')[0].string
+
         title = soup_header.select(".hlFld-Title")[0].text
 
         contributors = []
@@ -124,29 +130,45 @@ class Fetcher(JournalFetcher):
             contributors.append(author.text)
 
         journal = soup_header.select(".cit-title")[0].text
-
         # Replaces abbreviation with whole name
         if journal in JournalFetcher.abbrev_dict:
             journal = JournalFetcher.abbrev_dict[journal]
                 
-
+        # Format in acs :"month dd, yyyy"
         published = soup_header.select(".pub-date-value")[0].text
-
-        subjects = []
-        subject_soup = soup_header.select('.article_header-taxonomy')[0]
-        for subject in subject_soup.select('a'):
-            subjects.append(subject.text)
+        re_date = re.match(r'\s*(\w+) (\d+), (\d+)\s*',published)
+        # dd.mm.yyyy
+        if re_date is not None:
+            published = (re_date[2].zfill(2) + "."
+                        + JournalFetcher.mont_to_num[re_date[1].lower()]
+                        + "." + re_date[3])
+
+        subjects = ["None Found"]
+        subject_soup = soup_header.select('.article_header-taxonomy')
+        # Some Papers have no Subjects
+        if subject_soup != []:
+            subjects = []
+            for subject in subject_soup[0].select('a'):
+                subjects.append(subject.text)
+
+        abstract_soup = soup.select('.articleBody_abstractText')
+        abstract = "Found Nothing"
+        # Some Papers have no abstract in the html
+        if abstract_soup != []:
+            abstract = abstract_soup[0].text
 
 
         references = []
-        references_soup = ref_cit_soup.select('ol#references')
+        references_soup = soup.select('ol#references')
+        # Some Papers have no References in the html
         if references_soup != []:
             for reference in references_soup[0].select('li'):
                 if reference.select('.refDoi') != []:
                     ref_doi = "https://doi.org/{}".format(reference.select('.refDoi')[0].text.strip()[5:])
                 else: 
-        #           No Doi -> No Paper
+                # Some references aren't Paper and have no Doi, we ignore those
                     continue
+
                 ref_title = reference.select('.NLM_article-title')[0].text\
                         if reference.select('.NLM_article-title') != [] else None
                 ref_journal = reference.select('i')[0].text\
@@ -163,14 +185,16 @@ class Fetcher(JournalFetcher):
                 references.append(Citation(ref_doi, ref_title, ref_journal, ref_contributors, cit_type="Reference"))
 
         citations = []
-        citation_soup = ref_cit_soup.select('.cited-content_cbyCitation')
+        citation_soup = soup.select('.cited-content_cbyCitation')
+        # Some Papers have no Citations in the html
         if citation_soup != []:
             for citation in citation_soup[0].select('li'):
                 if citation.select('a[title="DOI URL"]') != []: 
                     cit_doi = citation.select('a[title="DOI URL"]')[0].text
                 else:
-        #           No Doi -> No Paper
+                # Some citations aren't Paper and have no Doi, we ignore those
                     continue
+
                 cit_title = citation.select('.cited-content_cbyCitation_article-title')[0].text\
                         if citation.select('.cited-content_cbyCitation_article-title')!= [] else None
                 cit_journal = citation.select('.cited-content_cbyCitation_journal-name')[0].text\
@@ -179,14 +203,17 @@ class Fetcher(JournalFetcher):
                 # Replaces abbreviation with whole name
                 if cit_journal in JournalFetcher.abbrev_dict:
                     cit_journal = JournalFetcher.abbrev_dict[cit_journal]
+                
                 cit_contributors =[]
                 cit_contributors = citation.select('.cited-content_cbyCitation_article-contributors')[0]\
                     .text.replace("\n", " ").replace("\r", "").split(', ')
-        #           clean up of the last Entry
+                # clean up of the last Entry, because sometimes there is an extra ','
                 cit_contributors_last = cit_contributors.pop().strip(". ")
                 if cit_contributors_last != '':
-                    cit_contributors.append(cit_contributors_last)  
+                    cit_contributors.append(cit_contributors_last)
+
                 citations.append(Citation(cit_doi, cit_title, cit_journal, cit_contributors, cit_type = "Citation"))
 
-        return Publication(doi_url, title, contributors, journal, published
-                            , subjects, references, citations)
+        return Publication(doi_url = doi_url,title = title, contributors = contributors\
+                        , journal = journal,publication_date = published,subjects = subjects\
+                        ,references = references,citations = citations, abstract = abstract)
diff --git a/input/get/elsevier.py b/input/get/elsevier.py
new file mode 100755
index 0000000000000000000000000000000000000000..5c947a7c13a3133473cd926d04022f7964728c3c
--- /dev/null
+++ b/input/get/elsevier.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python3
+
+"""
+Child class of JournalFetcher
+Usage: None, this is just a template and should be ignored
+"""
+
+# import re
+from input.get.journal_fetcher import JournalFetcher
+from input.publication import Publication
+
+
+class TemplateFetcher(JournalFetcher):
+
+    """
+    This is only a template and therefore has no functionality
+    """
+
+    # TODO: Naming-Convention:
+    #   Class: '[filename]Fetcher'
+    #   file: [journal-/organisation-name]
+    #       format: "<[a-z]>+.py" allowed
+    #   Not having this convention -> not imported
+    #   TODO: List of Compatable Journals
+    SUPPORTED_JOURNALS = []
+
+    @staticmethod
+    def can_use_url(url: str) -> bool:
+        """
+        Checks if given url links to a supported journal.
+        """
+
+        # TODO: Check the URL for compatability
+        #   Maybe like:
+        #       url_re = re.match(r'(https?://)?(doi.org/)?(10.(\d{4})/\w+.\S+)', url)
+        #       if url_re is not None:
+        #           return   url_re[4] in SUPPORTED_JOURNALS
+        #       else:
+        return False
+
+    @staticmethod
+    def get_publication(url: str) -> Publication:
+        """
+        Creates a Publication-instance.
+        """
+
+        # Create soup
+        try:
+            soup = JournalFetcher.get_soup(url)
+        except Exception as errir:
+            raise error
+
+        # TODO: Fetch data from the HTML
+        #   soup = JournalFetcher.get_soup(url)
+        #   Check if soup fetched a Paper
+        #   doi -- https://doi.org/10.xxxx/....
+        #   title
+        #   contributors[]
+        #   journal -- if journal in JournalFetcher.abbrev_dict: journal = JournalFetcher.abbrev_dict[journal]
+        #   publication_date -- dd.mm.yyyy
+        #   subjects[]
+        #   abstract
+        #   references[]
+        #   citations[] 
+        # TODO: Create new Publication-instance
+        #   return Publication(doi_url = doi_url,title = title, contributors = contributors\
+        #              , journal = journal,publication_date = published,subjects = subjects\
+        #              ,references = references,citations = citations, abstract = abstract)
+        return None
+
+    @staticmethod
+    def get_pub_light(url: str) -> Publication:
+        """
+        Creates a Publication-instance without Citations and References.
+        """
+        # Create soup
+        try:
+            soup = JournalFetcher.get_soup(url)
+        except Exception as errir:
+            raise error
+
+
+        # TODO: Fetch data from the HTML
+        #   soup = JournalFetcher.get_soup(url)
+        #   Check if soup fetched a Paper
+        #   doi -- https://doi.org/10.xxxx/....
+        #   title
+        #   contributors[]
+        #   journal -- if journal in JournalFetcher.abbrev_dict: journal = JournalFetcher.abbrev_dict[journal]
+        #   publication_date -- dd.mm.yyyy
+        #   subjects[]
+        # TODO: Create new Publication-instance
+        #   return Publication(doi_url = doi_url,title = title, contributors = contributors\
+        #               , journal = journal,publication_date = published, subjects = subjects\
+        #               , references = None, citations = None, abstract = None)
+        return None
diff --git a/input/get/journal_fetcher.py b/input/get/journal_fetcher.py
index 514af1f80f5c7d442b790aebf5fe3954d50f8f5d..03ce6324dc38aae05bbabbb6512183e498f9d7a3 100755
--- a/input/get/journal_fetcher.py
+++ b/input/get/journal_fetcher.py
@@ -12,7 +12,8 @@ from input.publication import Publication
 
 class JournalFetcher(metaclass=ABCMeta):
     """
-    This is a abstract-class for fetcher modules
+    This is a abstract-class for fetcher modules.
+    It defines common functions, common dictionaries and functions to be implemented
     """
     
     @staticmethod
@@ -41,7 +42,7 @@ class JournalFetcher(metaclass=ABCMeta):
         Abstract-function to be implemented in subclass.
         Checks if given url links to a supported journal
         """
-        raise AttributeError("JournalFetcher for '{}' hasnt implemented 'can_use_url()'".format(url))
+        raise AttributeError("JournalFetcher for '{}' hasnt implemented 'can_use_url(url)'".format(url))
 
 
     @staticmethod
@@ -51,46 +52,70 @@ class JournalFetcher(metaclass=ABCMeta):
         Abstract-function to be implemented in subclass.
         Creates a Publication-instance.
         """
-        raise AttributeError("JournalFetcher for '{}' hasnt implemented 'get_publication()'".format(url))
+        raise AttributeError("JournalFetcher for '{}' hasnt implemented 'get_publication(url)'".format(url))
 
+    @staticmethod
+    @abstractmethod
+    def get_pub_light(url: str) -> Publication:
+        """
+        Abstract-function to be implemented in subclass.
+        Creates a Publication-instance without Reference and Citation
+        """
+        raise AttributeError("JournalFetcher for '{}' hasnt implemented 'get_pub_light(url)'".format(url))
+
+    # Dictionary to get from month to number
+    mont_to_num= {
+        "january": "01",
+        "february": "02",
+        "march": "03",
+        "april": "04",
+        "may": "05",
+        "june": "06",
+        "july": "07",
+        "august": "08",
+        "september": "09",
+        "october": "10",
+        "november": "11",
+        "december": "12"
+    }
 
-    # A Dictionary, which connects abbreviation to whole journal-name
+    # A Dictionary, which connects abbreviation to whole journal name
     abbrev_dict = {
-          "Nat. Protoc.":"Journal of Natural Products"
-        ,"PLoS Comput. Biol.":"PLoS Computational Biology"
-        ,"PLoS One":"PLoS One"
-        ,"Protein Sci.":"Protein Science"
+         "Nat. Protoc.":"Journal of Natural Products"
+        ,"Nat. Chem.":"Nature Chemistry"
+        ,"Nat. Med.":"Nature Medicine"
+        ,"Nat. Commun.":"Nature Communications"
+        ,"Nat. Cell Biol.":"Nature Cell Biology"
+        ,"Nat. Methods":"Nature Methods"
+        ,"Nat. Chem. Biol.":"Nature Chemical Biology"
         ,"J. Am. Chem. Soc.":"Journal of the American Chemical Society"
         ,"J. Chem. Phys.":"Journal of Chemical Physics"
-        ,"Appl. Sci.":"Applied Science"
-        ,"Comput. Sci. Eng.":"Computing in Science & Engineering"
-        ,"Beilstein J. Org. Chem.":"Beilstein Journal of Organic Chemistry"
-        ,"Biol. Chem.":"Biological Chemistry"
-        ,"Isr. J. Chem.":"Israel Journal of Chemistry"
-        ,"Nat. Methods":"Nature Methods"
-        ,"Proc. Natl. Acad. Sci. U. S. A.":"Proceedings of the National Academy of Sciences of the United States of America"
         ,"J. Phys. Chem. B":"Journal of Physical Chemistry B"
-        ,"Carbohydr. Res.":"Carbohydrate Research"
         ,"J. Chem. Theory Comput.":"Journal of Chemical Theory and Computation"
         ,"J. Mol. Biol.":"Journal of Molecular Biology"
-        ,"Nucleic Acids Res.":"Nucleic Acids Research"
         ,"J. Comput. Chem.":"Journal of Computational Chemistry"
         ,"J. Cheminf.":"Journal of Cheminformatics"
         ,"J. Med. Chem.":"Journal of Medicinal Chemistry"
         ,"J. Comput.-Aided Mol. Des.":"Journal of Computer-Aided Molecular Design"
         ,"J. Chem. Inf. Model.":"Journal of Chemical Information and Modeling"
-        ,"Mol. Cell":"Molecular Cell"
         ,"J. Cell Biolog.":"Journal of Cell Biology"
-        ,"Mol. Cell Biol.":"Molecular and Cellular Biology"
         ,"J. Cell Sci.":"Journal of Cell Science"
-        ,"Nat. Cell Biol.":"Nature Cell Biology"
         ,"J. Aerosol Sci. Technol.":"Aerosol Science and Technology"
+        ,"Mol. Cell":"Molecular Cell"
+        ,"Mol. Cell Biol.":"Molecular and Cellular Biology"
         ,"Mol. Biol. Cell":"Molecular Biology of the Cell"
+        ,"Exp. Cell Res.":"Experimental Cell Research"
+        ,"PLoS Comput. Biol.":"PLoS Computational Biology"
+        ,"PLoS One":"PLoS One"
+        ,"Protein Sci.":"Protein Science"
+        ,"Appl. Sci.":"Applied Science"
+        ,"Comput. Sci. Eng.":"Computing in Science & Engineering"
+        ,"Beilstein J. Org. Chem.":"Beilstein Journal of Organic Chemistry"
+        ,"Biol. Chem.":"Biological Chemistry"
+        ,"Isr. J. Chem.":"Israel Journal of Chemistry"
+        ,"Proc. Natl. Acad. Sci. U. S. A.":"Proceedings of the National Academy of Sciences of the United States of America"
+        ,"Carbohydr. Res.":"Carbohydrate Research"
+        ,"Nucleic Acids Res.":"Nucleic Acids Research"
         ,"Build. Environ.":"Building and Environment"
         ,"Sci. Rep.":"Scientific Reports"
-        ,"Nat. Chem.":"Nature Chemistry"
-        ,"Nat. Med.":"Nature Medicine"
-        ,"Nat. Commun.":"Nature Communications"
-        ,"Exp. Cell Res.":"Experimental Cell Research"
-        ,"Nat. Chem. Biol.":"Nature Chemical Biology"
         }
\ No newline at end of file
diff --git a/input/get/nature.py b/input/get/nature.py
deleted file mode 100644
index c50ea0ef9d1d4a9a386730e31cc72372cbf698c0..0000000000000000000000000000000000000000
--- a/input/get/nature.py
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python3
-
-"""
-Child class of JournalFetcher
-Usage: Check if Url can be used with 'can_use_url'
-       and then fetch publication with 'get_publication'
-"""
-
-# import re
-from input.get.journal_fetcher import JournalFetcher
-from input.publication import Publication
-
-
-class Fetcher(JournalFetcher):
-
-    """
-    scrapes publication metadata from a provided url
-    """
-
-    #   TODO: List of Compatable Journals
-    #   NOTE: nature does not use journal names in doi links, must match by 10.xxxx identifier instead
-    SUPPORTED_JOURNALS = []
-
-    @staticmethod
-    def can_use_url(url: str) -> bool:
-        """
-        Checks if given url links to a supported journal.
-        """
-
-        # TODO: Check the URL for compatability
-        #   re.match in SUPPORTED_JOURNALS
-        return False
-
-    @staticmethod
-    def get_publication(url: str) -> Publication:
-        """
-        Creates a Publication-instance.
-        """
-
-        soup = JournalFetcher.get_soup(url)
-
-        _doi_url = "https://doi.org/" + soup.head.find(attrs={"name": "DOI"}).get("content")
-        _title = soup.head.find(attrs={"name": "citation_title"}).get("content")
-        _journal = soup.head.find(attrs={"name": "citation_journal_title"}).get("content")
-        _published = soup.head.find(attrs={"name": "prism.publicationDate"}).get("content")
-        _contributors = []
-        _subjects = []
-
-        for creator in soup.head.findAll(attrs={"name": "dc.creator"}):
-            _contributors.append(creator.get("content"))
-
-        for subject in soup.head.findAll(attrs={"name": "dc.subject"}):
-            _subjects.append(subject.get("content"))
-
-        return Publication(_doi_url, _title, _contributors, _journal, _published, _subjects)
-
-        # TODO: Exceptions-handling
-        #   raise ValueException("Cant Fetch: '{}'".format(error))
-        # return None
diff --git a/input/get/template_.py b/input/get/template_.py
index 58de0237bd514f7dd1b5b25f251b740d33e3589e..7e4ba32b6bcaffe3cc9c6f4d605b06db8ed6c71c 100755
--- a/input/get/template_.py
+++ b/input/get/template_.py
@@ -10,16 +10,17 @@ from input.get.journal_fetcher import JournalFetcher
 from input.publication import Publication
 
 
-class Fetcher(JournalFetcher):
+class TemplateFetcher(JournalFetcher):
 
     """
     This is only a template and therefore has no functionality
     """
 
     # TODO: Naming-Convention:
-    #   Class: 'Fetcher'
+    #   Class: '[filename]Fetcher'
     #   file: [journal-/organisation-name]
-    #       format = "[a-z]*.py" allowed
+    #       format: "<[a-z]>+.py" allowed
+    #   Not having this convention -> not imported
     #   TODO: List of Compatable Journals
     SUPPORTED_JOURNALS = []
 
@@ -30,10 +31,11 @@ class Fetcher(JournalFetcher):
         """
 
         # TODO: Check the URL for compatability
-        #   url_re = re.match(r'(https?://)?(doi.org/)?(10.(\d{4})/\w+.\S+)', url)
-        #   if url_re is not None:
-        #       return   url_re[4] in SUPPORTED_JOURNALS
-        #   else:
+        #   Maybe like:
+        #       url_re = re.match(r'(https?://)?(doi.org/)?(10.(\d{4})/\w+.\S+)', url)
+        #       if url_re is not None:
+        #           return   url_re[4] in SUPPORTED_JOURNALS
+        #       else:
         return False
 
     @staticmethod
@@ -44,8 +46,39 @@ class Fetcher(JournalFetcher):
 
         # TODO: Fetch data from the HTML
         #   soup = JournalFetcher.get_soup(url)
-        #   doi,title,contributors[],journal,publication_date,subjects[],references[],citations[] 
+        #   Check if soup fetched a Paper
+        #   doi -- https://doi.org/10.xxxx/....
+        #   title
+        #   contributors[]
+        #   journal -- if journal in JournalFetcher.abbrev_dict: journal = JournalFetcher.abbrev_dict[journal]
+        #   publication_date -- dd.mm.yyyy
+        #   subjects[]
+        #   abstract
+        #   references[]
+        #   citations[] 
         # TODO: Create new Publication-instance
-        #   return Publication(doi_url, title, contributors = [], journal
-        #           , publication_date, subjects = [], references = [], citations = [])
+        #   return Publication(doi_url = doi_url,title = title, contributors = contributors\
+        #              , journal = journal,publication_date = published,subjects = subjects\
+        #              ,references = references,citations = citations, abstract = abstract)
+        return None
+
+    @staticmethod
+    def get_pub_light(url: str) -> Publication:
+        """
+        Creates a Publication-instance without Citations and References.
+        """
+
+        # TODO: Fetch data from the HTML
+        #   soup = JournalFetcher.get_soup(url)
+        #   Check if soup fetched a Paper
+        #   doi -- https://doi.org/10.xxxx/....
+        #   title
+        #   contributors[]
+        #   journal -- if journal in JournalFetcher.abbrev_dict: journal = JournalFetcher.abbrev_dict[journal]
+        #   publication_date -- dd.mm.yyyy
+        #   subjects[]
+        # TODO: Create new Publication-instance
+        #   return Publication(doi_url = doi_url,title = title, contributors = contributors\
+        #               , journal = journal,publication_date = published, subjects = subjects\
+        #               , references = None, citations = None, abstract = None)
         return None
\ No newline at end of file
diff --git a/input/interface.py b/input/interface.py
index 59515b3a3a2a5361222b8e55d3a7314ab3907132..f0c9ca39f863fe894769dd6fdbc97a13e6e2caad 100755
--- a/input/interface.py
+++ b/input/interface.py
@@ -2,8 +2,10 @@
 
 """
 Interface for the Input-Package only this should be accessed from outside this Package.
-
+Usage:  Create an InputInterface instance and call 'get_publication(url)' to get 
+        an publication instance
 """
+
 from os import walk
 import importlib
 import pathlib
@@ -19,44 +21,51 @@ class InputInterface:
     get_path = None
     fetcher_classes=[]
 
-    # '__new__' is called before '__init__' and gives us an instance
+
     def __new__(cls, *args, **kwargs):
-        
-        # checks if an instance exists and if it doesnt creates one
+        """
+        Creates and/or returns the one class-instance. (Singleton-Pattern)
+        this method is automaticly called before '__init__'
+        """
+        # checks if an instance exists and if it doesnt, it creates one
         if cls.instance == None:
+            # standard '__new__' 
             cls.instance = super(InputInterface, cls).__new__(cls,*args, **kwargs)
         
         return cls.instance
 
+
     def __init__(self):
-        # imports all modules
+        """
+        Initializes the Singleton and imports all modules in <path>/input/get/
+        Fetchers in that directory will be autoimported.
+        """
 
         if self.fetcher_classes ==[]:
             self.import_fetcher_classes()
             if self.fetcher_classes ==[]:
-                raise AttributeError("No specific Fetchers where found at: '{}'"
+                raise ImportError("No specific Fetchers where found at: '{}'"
                                     .format(self.get_path))
         
 
     def get_publication(self, url: str) -> Publication:
         """
-        The interface-method to get a Publication-instance
-        (including it's citations and references)
+        The interface-method to get a publication-instance
+        (including its citations and references)
 
         Parameters
         ----------
-        :param url: url to a Publication
+        :param url: url to a publication
         :type url: str
-        :return: Publication instance or None if not supported
+        :return: Publication instance, else ValueError
         """
         
-        # Checks if module supports the 'url' and 
-        # returns a Publication if it does.
+        # Checks every fetcher if it can use url and if it can tries to fetch it
         for fetcher_class in InputInterface.fetcher_classes:
             if fetcher_class.can_use_url(url):
                 return fetcher_class.get_publication(url)
             
-        # No Module for given url was found
+        # No fetcher for given url was found
         raise ValueError("'{}' is not supported".format(url))
         
     def get_pub_light(self, url: str) -> Publication:
@@ -66,18 +75,17 @@ class InputInterface:
 
         Parameters
         ----------
-        :param url: url to a Publication
+        :param url: url to a publication
         :type url: str
-        :return: Publication instance or None if not supported
+        :return: publication instance, else ValueError
         """
         
-        # Checks if module supports the 'url' and 
-        # returns a Publication if it does.
+        # Checks every fetcher if it can use url and if it can tries to fetch it
         for fetcher_class in InputInterface.fetcher_classes:
             if fetcher_class.can_use_url(url):
                 return fetcher_class.get_pub_light(url)
             
-        # No Module for given url was found
+        # No fetcher for given url was found
         raise ValueError("'{}' is not supported".format(url))
     
     def get_supported_fetchers(self):
@@ -87,9 +95,9 @@ class InputInterface:
 
     def import_fetcher_classes(self):
         """
-        Searches in 'get', if there are [a-z]*.py modules (specific Fetchers)
+        Searches in '<path>/input/get', if there are [a-z]+.py modules (specific Fetchers)
         and tries to import them.
-        Saves found modules in 'fetcher_files'.
+        Saves found modules in 'fetcher_classes'.
         """
 
         # Path to 'get'-package
@@ -101,13 +109,22 @@ class InputInterface:
             if re.match(r'[a-z]+.py', file) is not None:
                 fetcher_file_names.append(file)
 
-        # Tries to import those modules and saves their 'Fetcher'-class
+        # Tries to import those modules and saves their class
         for file in fetcher_file_names:
+            imported = True
             try:
                 fetcher_class = importlib.import_module("input.get.{}".format(file[:-3]))
-                try:
-                    self.fetcher_classes.append(fetcher_class.__getattribute__('Fetcher'))
-                except Exception as error:
-                    ImportError("Module '{}' does not have a 'Fetcher'-class".format(file[:-3]))
             except Exception:
+                # Raises exception if naming convention not correct
+                # file: <[a-z]+>.py (like 'acs.py')
+                # class: <filename>Fetcher (like 'AcsFetcher')
+                imported = False
                 raise ImportError("Module '{}' can not be imported".format(file[:-3]))
+            try:
+                if imported:
+                    # Looks if class-name has the naming convention and then adds it to 'fetcher_classes'
+                    self.fetcher_classes.append(fetcher_class\
+                        .__getattribute__('{}Fetcher'.format(file[:-3].capitalize())))
+            except Exception as error:
+                ImportError("Module '{}' does not have a '{}Fetcher'-class"\
+                        .format(file[:-3],file[:-3].capitalize()))
diff --git a/input/publication.py b/input/publication.py
index fc512e7173a84695ea566706784c565a7b5ebb8f..5fa8ef0f8d62d73465db8219de35cc286717a044 100755
--- a/input/publication.py
+++ b/input/publication.py
@@ -1,17 +1,18 @@
 #!/usr/bin/env python3
 
-# this is needed for typing pre python 3.9, this maybe as an large Overhead
+# this is needed for typing pre python 3.9, this maybe has an large Overhead
 from typing import Any, List
 
 
 class Publication:
     """
-        Represents a Publications
+    Represents a publication
     """
     def __init__(self, doi_url: str, title: str \
                  , contributors: List[str], journal: str \
                  , publication_date: str, subjects: List[str]\
-                 , references: List[Any] = None, citations: List[Any] = None ):
+                 , references: List[Any] = None, citations: List[Any] = None\
+                 , abstract: str = None):
         """
         Parameters
         ----------
@@ -21,14 +22,18 @@ class Publication:
         :type title: str
         :param contributors:list of all contributors
         :type contributors: list[]
-        :param published: date of release
-        :type published: str
+        :param journal: the journal of the publication
+        :type journal: str
+        :param publication_date: date of release
+        :type publication_date: str (format dd.mm.yyyy)
         :param subjects: the subject of the Publication
         :type subjects: List[str]
-        :param references: the Citation which is been referenced by this Publication 
+        :param references: the Citation which is been referenced by this publication 
         :type references: List[Any]
-        :param citations: the Citation which references this Publication
+        :param citations: the Citation which references this publication
         :type citations: List[Any]
+        :param abstract: the abstract of this publication
+        :typr abstract: str
         :return: None
         """
         self.doi_url = doi_url
@@ -45,27 +50,37 @@ class Publication:
             self.citations = []
         else: 
             self.citations = citations
+        self.abstract = abstract
         
         # For the 'Verarbeitungsgruppe'
         self.group = None
 
+
     def __str__(self) -> str:
-        return ("Title:        {}\n"
-                "Doi-url:      {}\n"
-                "Authors:      {}\n"
-                "Journal:      {}\n"
-                "Published on: {}\n"
-                "Subjects:     {}\n"
-                "References:   \n{}\n"
-                "Citations:    \n{}")\
+        """
+        Default string-converter for this class
+        """
+        return ("Title:\t\t\t{}\n"
+                "Doi-url:\t\t{}\n"
+                "Authors:\t\t{}\n"
+                "Journal:\t\t{}\n"
+                "Published on:\t{}\n"
+                "Subjects:\t\t{}\n"
+                "Abstract:\t\t{}\n"
+                "References:\n{}\n\n"
+                "Citations:\n{}")\
                 .format(self.title, self.doi_url, ", ".join(self.contributors)
                         , self.journal, self.publication_date
-                        , ", ".join(self.subjects)
+                        , ", ".join(self.subjects), self.abstract
                         , "\n".join(self.get_citation_string(self.references))
                         , "\n".join(self.get_citation_string(self.citations)))
 
+
     @staticmethod
     def get_citation_string(citations):
+        """
+        Helper-method for __str__
+        """
         if citations == []:
             return ["None"]
         else:
@@ -74,38 +89,16 @@ class Publication:
                 citation_string.append(citation.__str__())
         return citation_string
 
-    def add_citations(self, citation) -> None:
-        """
-        Appends a list of Citations or Citation to self.citations.
-
-        Parameter
-        ---------
-        :param citation: Citation or Reference of the Publication
-        :type citation: Citation or list[Citation]
-        :return: self.citations
-        """
-        if type(citation) is Citation:
-            self.citations.append(citation)
-
-        # Checks if 'citation' is a list of Citations
-        elif type(citation) is list:
-            for _cit in citation:
-                if type(_cit) is Citation:
-                    self.citations.append(_cit)
-                else:
-                    raise TypeError("_set_citation expects Citations or List of Citations, not: '{}'"
-                                    .format(type(_cit)))
-        else:
-            raise TypeError("_set_citation expects Citations or List of Citations, not: '{}'"
-                            .format(type(citation)))
-
-        return self.citations
 
     def __eq__(self, other) -> bool:
-        """ Compares the unique doi_url of two Publications"""
-        if type(self)==type(other):
+        """
+        Compares the unique doi_url of this publications to a publication, citation or string.
+        Overrides the "==" operator.
+        """
+        if type(other) == Publication or type(other) == Citation:
             return self.doi_url == other.doi_url
-        return False
+        else:
+            return self.doi_url == other
 
 
 class Citation:
@@ -115,13 +108,15 @@ class Citation:
         """
         Parameters
         ----------
-        :param doi_url: doi_url of the publication
+        :param doi_url: doi_url of the citation
         :type doi_url: str
-        :param title: title of the publication
+        :param title: title of the citation
         :type title: str
+        :param journal: the journal of the citation
+        :type journal: str
         :param contributors: list of all contributors
         :type contributors: List[str]
-        :param cit_type: Specifies if Reference or Citation
+        :param cit_type: Specifies if it's a Reference or Citation
         :type cit_type: str
         :return: None
         """
@@ -132,12 +127,28 @@ class Citation:
         self.contributors = contributors
         self.cit_type = cit_type
 
+
     def __str__(self) -> str:
-        return ("\t{}-Title:        {}\n"
-                "\t{}-Doi:          {}\n"
-                "\t{}-Journal:      {}\n"
-                "\t{}-Contributors: {}\n")\
+        """
+        Default string-converter for this class
+        """
+        return ("\n"
+                "\t{}-Title:\t\t\t{}\n"
+                "\t{}-Doi:\t\t\t{}\n"
+                "\t{}-Journal:\t\t{}\n"
+                "\t{}-Contributors:\t\t{}")\
                 .format(self.cit_type, self.title
                       , self.cit_type, self.doi_url
                       , self.cit_type, self.journal
                       , self.cit_type, ", ".join(self.contributors))
+
+
+    def __eq__(self, other) -> bool:
+        """
+        Compares the unique doi_url of this publications to a publication, citation or string
+        Overrides the "==" operator.
+        """
+        if type(other) == Publication or type(other) == Citation:
+            return self.doi_url == other.doi_url
+        else:
+            return self.doi_url == other
diff --git a/input/test/test_acs.py b/input/test/test_acs.py
index e3dfe84a09d3599de32efbab0dd60655b5414152..601cf02122cd2c8404ad600335c4a983b5f73ddb 100644
--- a/input/test/test_acs.py
+++ b/input/test/test_acs.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 
-from input.get.acs import Fetcher as Acs
+from input.get.acs import AcsFetcher as Acs
 from input.publication import Publication, Citation
 from input.test.test_input import FetcherTestCase
 
@@ -11,6 +11,9 @@ class AcsTestCase(FetcherTestCase):
     """
 
     def test_acs_url(self):
+        """
+        Test if AcsFetcher recognizes its 'urls'.
+        """
         # Positive Testing
         self.can_use_url_test(Acs, "https://doi.org/10.1021/acs.jcim.1c00203"           , True)
         self.can_use_url_test(Acs, "doi.org/10.1021/acs.jcim.1c00203"                   , True)
@@ -29,6 +32,9 @@ class AcsTestCase(FetcherTestCase):
 
 
     def test_acs_publication(self):
+        """
+        Compares fetched Publication with a static one.
+        """
         url = "https://doi.org/10.1021/acs.jcim.1c00203"
         self.get_publication_test(Acs, url, self.expectedPubs[url])
 
@@ -44,8 +50,9 @@ class AcsTestCase(FetcherTestCase):
            title = "AutoDock Vina 1.2.0: New Docking Methods, Expanded Force Field, and Python Bindings",
            contributors = ["Jerome Eberhardt", "Diogo Santos-Martins", "Andreas F. Tillack", "Stefano Forli"],
            journal="Journal of Chemical Information and Modeling",
-           publication_date = "July 19, 2021",
+           publication_date = "19.07.2021",
            subjects = ["Algorithms","Ligands","Molecules","Receptors","Macrocycles"],
+           abstract = "AutoDock Vina is arguably one of the fastest and most widely used open-source programs for molecular docking. However, compared to other programs in the AutoDock Suite, it lacks support for modeling specific features such as macrocycles or explicit water molecules. Here, we describe the implementation of this functionality in AutoDock Vina 1.2.0. Additionally, AutoDock Vina 1.2.0 supports the AutoDock4.2 scoring function, simultaneous docking of multiple ligands, and a batch mode for docking a large number of ligands. Furthermore, we implemented Python bindings to facilitate scripting and the development of docking workflows. This work is an effort toward the unification of the features of the AutoDock4 and AutoDock Vina programs. The source code is available at https://github.com/ccsb-scripps/AutoDock-Vina.",
            references = [
             Citation(doi_url = "https://doi.org/10.1002/jcc.21334"
                 , title ="AutoDock Vina: improving the speed and accuracy of docking with a new scoring function, efficient optimization, and multithreading"
diff --git a/input/test/test_input.py b/input/test/test_input.py
index b2ca55f961565fd1192b72ce992c9ff95bd23020..33bccbeecdb753c4339b008a2bc199abed54aa07 100755
--- a/input/test/test_input.py
+++ b/input/test/test_input.py
@@ -5,43 +5,74 @@ from input.publication import Publication
 
 """
 Testing the Publication fetcher
-
-Publication 1: 'https://doi.org/10.1021/acs.jcim.1c00203'
-Publication 2: 'doi.org/10.1021/acs.jcim.1c00917'
-Publication 3: '10.1038/nchem.1781'
-Publication 4: '11.12/jaj'
-Publication 5: '11.12/'
-Publication 6: 'https://doi.org/10.1021/acs.jmedchem.0c01332' # Paper is a PDF
 """
-# TODO: Testcases for:
-#       - Specific Journals: Inherit from FetcherTestCase
-#       - interface module-importer (test case)
-#       - Error detection
-#           - wrong/no Journal_fetchers
-#           - wrong urls
-#           - correct Types in publication
-#       - Edgecases (i.e. paper as pdf, no connection, etc)
+
+
+txt_files=["acs.txt"]
+supported_fetchers=["AcsFetcher","NatureFetcher"]
 
 
 class InterfaceTestCase(unittest.TestCase):
+    """
+    Testcase for the InputInterface-class
+    """
+
     def setUp(self):
-        self.assertEqual(InputInterface.instance, None)
+        """
+        Is called at the start of this TestCase
+        """
+
         self.interface = InputInterface()
 
+
     def test_singleton(self):
-        # interface should already be made in setUp()
+        """
+        Checks if InputInterface is a Singleton.
+        """
+        # Interface should already be made in setUp()
         self.assertNotEqual(self.interface.instance, None)
+        # Checks if there is only one instance
         new_interface = InputInterface()
         self.assertEqual(self.interface, new_interface)
     
-    # def test_imported_modules(self):
-    #    fetchers = self.interface.get_supported_fetchers
 
-class FetcherTestCase(unittest.TestCase):
+    def test_imported_modules(self):
+        """
+        Checks if every Fetcher has been automaticlly imported.
+        """
+        for fetcher in self.interface.get_supported_fetchers():
+            self.assertIn(fetcher, supported_fetchers)
+
+
+    def test_fetcher_selection(self):
+
+        """
+        fetches every txt-file in 'txt_files', extracts Doi-Url of the publication
+        and then compares the the txt-string with the newly fetched one.
+        """
 
+        for txt in txt_files:
+            f = open("input/test/test_txt/{}".format(txt),'r')
+            txt_list = f.readlines()
+            # Extracts the doi-url
+            url = txt_list[1][8:].strip()
+            # Creates new publication with the doi-url
+            pub = self.interface.get_publication(url)
+            self.maxDiff = None
+            # Important: remove \n at the end, which is made by print()
+            self.assertEqual("".join(txt_list),pub.__str__())
+            f.close()   
+
+
+class FetcherTestCase(unittest.TestCase):
+    """
+    Parent class for all journal specific fetchers.
+    """
 
     def can_use_url_test(self, fetcher : JournalFetcher, test_url: str, expected_res: bool):
-        # Tests the 'can_use_url'-method
+        """
+        Tests the 'can_use_url'-method
+        """ 
         self.assertEqual(fetcher.can_use_url(test_url), expected_res)
 
 
@@ -56,6 +87,7 @@ class FetcherTestCase(unittest.TestCase):
         self.assertEqual(actual_res.journal, expected_res.journal)
         self.assertEqual(actual_res.publication_date, expected_res.publication_date)
         self.assertEqual(actual_res.subjects, expected_res.subjects)
+        self.assertEqual(actual_res.abstract, expected_res.abstract)
 
         # Checking for all references
         self.assertEqual(len(actual_res.references), len(expected_res.references))
@@ -77,6 +109,8 @@ class FetcherTestCase(unittest.TestCase):
 
 
     def get_publication_exception_test(self, fetcher: JournalFetcher, test_url: str):
-        # Ckecks 
+        """
+        Checks if fetcher gives expected ValueError.
+        """
         with self.assertRaises(ValueError):
             fetcher.get_publication(test_url)
\ No newline at end of file
diff --git a/input/test/test_txt/acs.txt b/input/test/test_txt/acs.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c21c3ace6c41d23b73147719ba6c34a712dbec08
--- /dev/null
+++ b/input/test/test_txt/acs.txt
@@ -0,0 +1,166 @@
+Title:			Feasibility of Active Machine Learning for Multiclass Compound Classification
+Doi-url:		https://doi.org/10.1021/acs.jcim.5b00332
+Authors:		Tobias Lang, Florian Flachsenberg, Ulrike von Luxburg, Matthias Rarey
+Journal:		Journal of Chemical Information and Modeling
+Published on:	07.01.2016
+Subjects:		Algorithms, Molecules, Drug discovery, Screening assays, Receptors
+Abstract:		A common task in the hit-to-lead process is classifying sets of compounds into multiple, usually structural classes, which build the groundwork for subsequent SAR studies. Machine learning techniques can be used to automate this process by learning classification models from training compounds of each class. Gathering class information for compounds can be cost-intensive as the required data needs to be provided by human experts or experiments. This paper studies whether active machine learning can be used to reduce the required number of training compounds. Active learning is a machine learning method which processes class label data in an iterative fashion. It has gained much attention in a broad range of application areas. In this paper, an active learning method for multiclass compound classification is proposed. This method selects informative training compounds so as to optimally support the learning progress. The combination with human feedback leads to a semiautomated interactive multiclass classification procedure. This method was investigated empirically on 15 compound classification tasks containing 86–2870 compounds in 3–38 classes. The empirical results show that active learning can solve these classification tasks using 10–80% of the data which would be necessary for standard learning techniques.
+References:
+None
+
+Citations:
+
+	Citation-Title:			Active Learning for Drug Design: A Case Study on the Plasma Exposure of Orally Administered Drugs. 
+	Citation-Doi:			https://doi.org/10.1021/acs.jmedchem.1c01683
+	Citation-Journal:		Journal of Medicinal Chemistry
+	Citation-Contributors:		Xiaoyu Ding, Rongrong Cui, Jie Yu, Tiantian Liu, Tingfei Zhu, Dingyan Wang, Jie Chang, Zisheng Fan, Xiaomeng Liu, Kaixian Chen, Hualiang Jiang, Xutong Li, Xiaomin Luo, Mingyue Zheng
+
+	Citation-Title:			Concepts of Artificial Intelligence for Computer-Assisted Drug Discovery. 
+	Citation-Doi:			https://doi.org/10.1021/acs.chemrev.8b00728
+	Citation-Journal:		Chemical Reviews
+	Citation-Contributors:		Xin Yang, Yifei Wang, Ryan Byrne, Gisbert Schneider, Shengyong Yang
+
+	Citation-Title:			De Novo Molecule Design by Translating from Reduced Graphs to SMILES. 
+	Citation-Doi:			https://doi.org/10.1021/acs.jcim.8b00626
+	Citation-Journal:		Journal of Chemical Information and Modeling
+	Citation-Contributors:		Peter Pogány, Navot Arad, Sam Genway, Stephen D. Pickett
+
+	Citation-Title:			Designing Algorithms To Aid Discovery by Chemical Robots. 
+	Citation-Doi:			https://doi.org/10.1021/acscentsci.8b00176
+	Citation-Journal:		ACS Central Science
+	Citation-Contributors:		Alon B. Henson, Piotr S. Gromski, Leroy Cronin
+
+	Citation-Title:			Modeling Kinase Inhibition Using Highly Confident Data Sets. 
+	Citation-Doi:			https://doi.org/10.1021/acs.jcim.7b00729
+	Citation-Journal:		Journal of Chemical Information and Modeling
+	Citation-Contributors:		Sorin Avram, Alina Bora, Liliana Halip, Ramona Curpăn
+
+	Citation-Title:			Predictive Models for Fast and Effective Profiling of Kinase Inhibitors. 
+	Citation-Doi:			https://doi.org/10.1021/acs.jcim.5b00646
+	Citation-Journal:		Journal of Chemical Information and Modeling
+	Citation-Contributors:		Alina  Bora, Sorin  Avram, Ionel  Ciucanu, Marius  Raica, and Stefana  Avram
+
+	Citation-Title:			Evaluation of categorical matrix completion algorithms: toward improved active learning for drug discovery. 
+	Citation-Doi:			https://doi.org/10.1093/bioinformatics/btab322
+	Citation-Journal:		Bioinformatics
+	Citation-Contributors:		Huangqingbo  Sun, Robert F  Murphy
+
+	Citation-Title:			An Artificial Intelligence Approach Based on Hybrid CNN-XGB Model to Achieve High Prediction Accuracy through Feature Extraction, Classification and Regression for Enhancing Drug Discovery in Biomedicine. 
+	Citation-Doi:			https://doi.org/10.46300/91011.2021.15.22
+	Citation-Journal:		International Journal of Biology and Biomedical Engineering
+	Citation-Contributors:		Mukesh  Madanan, Biju T.  Sayed, Nurul Akhmal  Mohd Zulkefli, Nitha C.  Velayudhan
+
+	Citation-Title:			Artificial Intelligence in Medicinal Chemistry. 
+	Citation-Doi:			https://doi.org/10.1002/0471266949.bmc267
+	Citation-Journal:		
+	Citation-Contributors:		Edward  Griffen, Alexander  Dossetter, Andrew  Leach, Shane  Montague
+
+	Citation-Title:			Practical Chemogenomic Modeling and Molecule Discovery Strategies Unveiled by Active Learning. 
+	Citation-Doi:			https://doi.org/10.1016/B978-0-12-801238-3.11533-8
+	Citation-Journal:		
+	Citation-Contributors:		J.B.  Brown
+
+	Citation-Title:			Machine learning phases and criticalities without using real data for training. 
+	Citation-Doi:			https://doi.org/10.1103/PhysRevB.102.224434
+	Citation-Journal:		Physical Review B
+	Citation-Contributors:		D.-R.  Tan, F.-J.  Jiang
+
+	Citation-Title:			Active learning effectively identifies a minimal set of maximally informative and asymptotically performant cytotoxic structure–activity patterns in NCI-60 cell lines. 
+	Citation-Doi:			https://doi.org/10.1039/D0MD00110D
+	Citation-Journal:		RSC Medicinal Chemistry
+	Citation-Contributors:		Takumi  Nakano, Shunichi  Takeda, J.B.  Brown
+
+	Citation-Title:			Active learning efficiently converges on rational limits of toxicity prediction and identifies patterns for molecule design. 
+	Citation-Doi:			https://doi.org/10.1016/j.comtox.2020.100129
+	Citation-Journal:		Computational Toxicology
+	Citation-Contributors:		Ahsan  Habib Polash, Takumi  Nakano, Christin  Rakers, Shunichi  Takeda, J.B.  Brown
+
+	Citation-Title:			Practical considerations for active machine learning in drug discovery. 
+	Citation-Doi:			https://doi.org/10.1016/j.ddtec.2020.06.001
+	Citation-Journal:		Drug Discovery Today: Technologies
+	Citation-Contributors:		Daniel  Reker
+
+	Citation-Title:			Designing compact training sets for data-driven molecular property prediction through optimal exploitation and exploration. 
+	Citation-Doi:			https://doi.org/10.1039/C9ME00078J
+	Citation-Journal:		Molecular Systems Design & Engineering
+	Citation-Contributors:		Bowen  Li, Srinivas  Rangarajan
+
+	Citation-Title:			Applicability Domain of Active Learning in Chemical Probe Identification: Convergence in Learning from Non-Specific Compounds and Decision Rule Clarification. 
+	Citation-Doi:			https://doi.org/10.3390/molecules24152716
+	Citation-Journal:		Molecules
+	Citation-Contributors:		Ahsan Habib  Polash, Takumi  Nakano, Shunichi  Takeda, J.B.  Brown
+
+	Citation-Title:			Capturing and applying knowledge to guide compound optimisation. 
+	Citation-Doi:			https://doi.org/10.1016/j.drudis.2019.02.004
+	Citation-Journal:		Drug Discovery Today
+	Citation-Contributors:		Matthew  Segall, Tamsin  Mansley, Peter  Hunt, Edmund  Champness
+
+	Citation-Title:			A novel graph kernel on chemical compound classification. 
+	Citation-Doi:			https://doi.org/10.1142/S0219720018500269
+	Citation-Journal:		Journal of Bioinformatics and Computational Biology
+	Citation-Contributors:		Qiangrong  Jiang, Jiajia  Ma
+
+	Citation-Title:			Accelerating Drug Discovery Using Convolution Neural Network Based Active Learning. 
+	Citation-Doi:			https://doi.org/10.1109/TENCON.2018.8650298
+	Citation-Journal:		
+	Citation-Contributors:		Pengfei  Liu, Kwong-Sak  Leung
+
+	Citation-Title:			An Adaptive Lightweight Security Framework Suited for IoT. 
+	Citation-Doi:			https://doi.org/10.5772/intechopen.73712
+	Citation-Journal:		
+	Citation-Contributors:		Menachem  Domb
+
+	Citation-Title:			Adaptive mining and model building of medicinal chemistry data with a multi-metric perspective. 
+	Citation-Doi:			https://doi.org/10.4155/fmc-2018-0188
+	Citation-Journal:		Future Medicinal Chemistry
+	Citation-Contributors:		JB  Brown
+
+	Citation-Title:			Chemogenomic Active Learning's Domain of Applicability on Small, Sparse qHTS Matrices: A Study Using Cytochrome P450 and Nuclear Hormone Receptor Families. 
+	Citation-Doi:			https://doi.org/10.1002/cmdc.201700677
+	Citation-Journal:		ChemMedChem
+	Citation-Contributors:		Christin  Rakers, Rifat Ara  Najnin, Ahsan Habib  Polash, Shunichi  Takeda, J.B.  Brown
+
+	Citation-Title:			Automating drug discovery. 
+	Citation-Doi:			https://doi.org/10.1038/nrd.2017.232
+	Citation-Journal:		Nature Reviews Drug Discovery
+	Citation-Contributors:		Gisbert  Schneider
+
+	Citation-Title:			Classifiers and their Metrics Quantified. 
+	Citation-Doi:			https://doi.org/10.1002/minf.201700127
+	Citation-Journal:		Molecular Informatics
+	Citation-Contributors:		J. B.  Brown
+
+	Citation-Title:			Active Search for Computer-aided Drug Design. 
+	Citation-Doi:			https://doi.org/10.1002/minf.201700130
+	Citation-Journal:		Molecular Informatics
+	Citation-Contributors:		Dino  Oglic, Steven A.  Oatley, Simon J. F.  Macdonald, Thomas  Mcinally, Roman  Garnett, Jonathan D.  Hirst, Thomas  Gärtner
+
+	Citation-Title:			Selection of Informative Examples in Chemogenomic Datasets. 
+	Citation-Doi:			https://doi.org/10.1007/978-1-4939-8639-2_13
+	Citation-Journal:		
+	Citation-Contributors:		Daniel  Reker, J. B.  Brown
+
+	Citation-Title:			The value of prior knowledge in machine learning of complex network systems. 
+	Citation-Doi:			https://doi.org/10.1093/bioinformatics/btx438
+	Citation-Journal:		Bioinformatics
+	Citation-Contributors:		Dana  Ferranti, David  Krane, David  Craft
+
+	Citation-Title:			Lightweight adaptive Random-Forest for IoT rule generation and execution. 
+	Citation-Doi:			https://doi.org/10.1016/j.jisa.2017.03.001
+	Citation-Journal:		Journal of Information Security and Applications
+	Citation-Contributors:		Menachem  Domb, Elisheva  Bonchek-Dokow, Guy  Leshem
+
+	Citation-Title:			Active learning for computational chemogenomics. 
+	Citation-Doi:			https://doi.org/10.4155/fmc-2016-0197
+	Citation-Journal:		Future Medicinal Chemistry
+	Citation-Contributors:		Daniel  Reker, Petra  Schneider, Gisbert  Schneider, JB  Brown
+
+	Citation-Title:			Small Random Forest Models for Effective Chemogenomic Active Learning. 
+	Citation-Doi:			https://doi.org/10.2751/jcac.18.124
+	Citation-Journal:		Journal of Computer Aided Chemistry
+	Citation-Contributors:		Christin  Rakers, Daniel  Reker, J.B.  Brown
+
+	Citation-Title:			Large-Scale Off-Target Identification Using Fast and Accurate Dual Regularized One-Class Collaborative Filtering and Its Application to Drug Repurposing. 
+	Citation-Doi:			https://doi.org/10.1371/journal.pcbi.1005135
+	Citation-Journal:		PLOS Computational Biology
+	Citation-Contributors:		Hansaim  Lim, Aleksandar  Poleksic, Yuan  Yao, Hanghang  Tong, Di  He, Luke  Zhuang, Patrick  Meng, Lei  Xie
\ No newline at end of file
diff --git a/input/test/unittest b/input/test/unittest
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/verarbeitung/construct_new_graph/add_citations_rec.py b/verarbeitung/construct_new_graph/add_citations_rec.py
index 95afa3553e9b0927196bcb94d792b5b5be82e83d..dee665cbbdfb139e0ddc44654784cf1236a82be9 100644
--- a/verarbeitung/construct_new_graph/add_citations_rec.py
+++ b/verarbeitung/construct_new_graph/add_citations_rec.py
@@ -46,7 +46,7 @@ def get_cit_type_list(pub, cit_type):
         :param cit_type:            variable to differenciate citation and reference call
         :type cit_type:             String
 
-        function to create nodes and edges and call create_graph_structure_citations
+        function to return citation or reference list for given pub
     '''
     if cit_type == "Citation":
         return(pub.citations)
@@ -164,12 +164,16 @@ def process_citations_rec(citations_pub_obj_list, search_depth, search_depth_max
     '''
 
     # adds next level to nodes/edges
+
+    new_citation_pub_obj_save_list = []
     for pub in citations_pub_obj_list:
-        new_citation_pub_obj_list = create_graph_structure_citations(pub, search_depth, search_depth_max, cit_type, test_var)   
+        new_citation_pub_obj_list = create_graph_structure_citations(pub, search_depth, search_depth_max, cit_type, test_var)
+        if len(new_citation_pub_obj_list) > 0:
+            new_citation_pub_obj_save_list += new_citation_pub_obj_list
 
         # If the maximum depth has not yet been reached, calls function recursivly with increased depth 
-        if (search_depth < search_depth_max):
-            process_citations_rec(new_citation_pub_obj_list, search_depth+1, search_depth_max, cit_type, test_var)
+    if (search_depth < search_depth_max):
+        process_citations_rec(new_citation_pub_obj_save_list, search_depth+1, search_depth_max, cit_type, test_var)
 
 
 def add_citations(input_nodes, input_edges, citations_pub_obj_list, search_depth, search_depth_max, cit_type, test_var):
diff --git a/verarbeitung/construct_new_graph/export_to_json.py b/verarbeitung/construct_new_graph/export_to_json.py
index fd21dc1728fa4f5af19573cebe2a160c1a69c30b..0aaf996b8cf0aace22b0d4c20de65148ccacbaa4 100644
--- a/verarbeitung/construct_new_graph/export_to_json.py
+++ b/verarbeitung/construct_new_graph/export_to_json.py
@@ -31,6 +31,7 @@ def format_nodes(nodes):
         new_dict["author"] = node.contributors
         new_dict["year"] = node.publication_date
         new_dict["journal"] = node.journal
+        new_dict["abstract"] = node.abstract
         if (node.group == 0):
             new_dict["group"] = "Input"
         elif (node.group > 0):
diff --git a/verarbeitung/new_height.json b/verarbeitung/new_height.json
index f96362a05cea7ad954fa28bfc22074e15e9fa1cd..6d4d75d2064919529bc5ea85e2e9b9d5609ed1b4 100644
--- a/verarbeitung/new_height.json
+++ b/verarbeitung/new_height.json
@@ -1 +1 @@
-{"nodes": [{"doi": "doi_lg_1_i", "name": "title_lg_1_i", "author": ["contributor_lg_1_i"], "year": "date_lg_1_i", "journal": "journal_lg_1_i", "group": "Input", "depth": 0, "citations": 2}, {"doi": "doi_lg_1_d11", "name": "title_lg_1_d11", "author": ["contributor_lg_1_d11"], "year": "date_lg_1_d11", "journal": "journal_lg_1_d11", "group": "Reference", "depth": -1, "citations": 1}, {"doi": "doi_lg_1_d12", "name": "title_lg_1_d12", "author": ["contributor_lg_1_d12"], "year": "date_lg_1_d12", "journal": "journal_lg_1_d12", "group": "Reference", "depth": -1, "citations": 2}, {"doi": "doi_lg_1_h11", "name": "title_lg_1_h11", "author": ["contributor_lg_1_h11"], "year": "date_lg_1_h11", "journal": "journal_lg_1_h11", "group": "Citedby", "depth": 1, "citations": 2}, {"doi": "doi_lg_1_h12", "name": "title_lg_1_h12", "author": ["contributor_lg_1_h12"], "year": "date_lg_1_h12", "journal": "journal_lg_1_h12", "group": "Citedby", "depth": 1, "citations": 2}, {"doi": "doi_lg_1_h21", "name": "title_lg_1_h21", "author": ["contributor_lg_1_h21"], "year": "date_lg_1_h21", "journal": "journal_lg_1_h21", "group": "Citedby", "depth": 2, "citations": 0}, {"doi": "doi_lg_1_h22", "name": "title_lg_1_h22", "author": ["contributor_lg_1_h22"], "year": "date_lg_1_h22", "journal": "journal_lg_1_h22", "group": "Citedby", "depth": 2, "citations": 0}, {"doi": "doi_lg_1_h23", "name": "title_lg_1_h23", "author": ["contributor_lg_1_h23"], "year": "date_lg_1_h23", "journal": "journal_lg_1_h23", "group": "Citedby", "depth": 2, "citations": 0}, {"doi": "doi_lg_1_d21", "name": "title_lg_1_d21", "author": ["contributor_lg_1_d21"], "year": "date_lg_1_d21", "journal": "journal_lg_1_d21", "group": "Reference", "depth": -2, "citations": 2}, {"doi": "doi_lg_1_d22", "name": "title_lg_1_d22", "author": ["contributor_lg_1_d22"], "year": "date_lg_1_d22", "journal": "journal_lg_1_d22", "group": "Reference", "depth": -2, "citations": 2}, {"doi": "doi_lg_1_d23", "name": "title_lg_1_d23", "author": ["contributor_lg_1_d23"], "year": "date_lg_1_d23", "journal": "journal_lg_1_d23", "group": "Reference", "depth": -2, "citations": 2}], "links": [{"source": "doi_lg_1_i", "target": "doi_lg_1_d11"}, {"source": "doi_lg_1_i", "target": "doi_lg_1_d12"}, {"source": "doi_lg_1_h11", "target": "doi_lg_1_i"}, {"source": "doi_lg_1_h12", "target": "doi_lg_1_i"}, {"source": "doi_lg_1_h21", "target": "doi_lg_1_h11"}, {"source": "doi_lg_1_h22", "target": "doi_lg_1_h11"}, {"source": "doi_lg_1_h22", "target": "doi_lg_1_h12"}, {"source": "doi_lg_1_h23", "target": "doi_lg_1_h12"}, {"source": "doi_lg_1_d11", "target": "doi_lg_1_d21"}, {"source": "doi_lg_1_d11", "target": "doi_lg_1_d22"}, {"source": "doi_lg_1_d21", "target": "doi_lg_1_d22"}, {"source": "doi_lg_1_d22", "target": "doi_lg_1_d21"}, {"source": "doi_lg_1_d12", "target": "doi_lg_1_d23"}, {"source": "doi_lg_1_h12", "target": "doi_lg_1_d12"}]}
\ No newline at end of file
+{"nodes": [{"doi": "doi_lg_1_i", "name": "title_lg_1_i", "author": ["contributor_lg_1_i"], "year": "date_lg_1_i", "journal": "journal_lg_1_i", "abstract": null, "group": "Input", "depth": 0, "citations": 2}, {"doi": "doi_lg_1_d11", "name": "title_lg_1_d11", "author": ["contributor_lg_1_d11"], "year": "date_lg_1_d11", "journal": "journal_lg_1_d11", "abstract": null, "group": "Reference", "depth": -1, "citations": 1}, {"doi": "doi_lg_1_d12", "name": "title_lg_1_d12", "author": ["contributor_lg_1_d12"], "year": "date_lg_1_d12", "journal": "journal_lg_1_d12", "abstract": null, "group": "Reference", "depth": -1, "citations": 2}, {"doi": "doi_lg_1_h11", "name": "title_lg_1_h11", "author": ["contributor_lg_1_h11"], "year": "date_lg_1_h11", "journal": "journal_lg_1_h11", "abstract": null, "group": "Citedby", "depth": 1, "citations": 2}, {"doi": "doi_lg_1_h12", "name": "title_lg_1_h12", "author": ["contributor_lg_1_h12"], "year": "date_lg_1_h12", "journal": "journal_lg_1_h12", "abstract": null, "group": "Citedby", "depth": 1, "citations": 2}, {"doi": "doi_lg_1_h21", "name": "title_lg_1_h21", "author": ["contributor_lg_1_h21"], "year": "date_lg_1_h21", "journal": "journal_lg_1_h21", "abstract": null, "group": "Citedby", "depth": 2, "citations": 0}, {"doi": "doi_lg_1_h22", "name": "title_lg_1_h22", "author": ["contributor_lg_1_h22"], "year": "date_lg_1_h22", "journal": "journal_lg_1_h22", "abstract": null, "group": "Citedby", "depth": 2, "citations": 0}, {"doi": "doi_lg_1_h23", "name": "title_lg_1_h23", "author": ["contributor_lg_1_h23"], "year": "date_lg_1_h23", "journal": "journal_lg_1_h23", "abstract": null, "group": "Citedby", "depth": 2, "citations": 0}, {"doi": "doi_lg_1_d21", "name": "title_lg_1_d21", "author": ["contributor_lg_1_d21"], "year": "date_lg_1_d21", "journal": "journal_lg_1_d21", "abstract": null, "group": "Reference", "depth": -2, "citations": 2}, {"doi": "doi_lg_1_d22", "name": "title_lg_1_d22", "author": ["contributor_lg_1_d22"], "year": "date_lg_1_d22", "journal": "journal_lg_1_d22", "abstract": null, "group": "Reference", "depth": -2, "citations": 2}, {"doi": "doi_lg_1_d23", "name": "title_lg_1_d23", "author": ["contributor_lg_1_d23"], "year": "date_lg_1_d23", "journal": "journal_lg_1_d23", "abstract": null, "group": "Reference", "depth": -2, "citations": 2}], "links": [{"source": "doi_lg_1_i", "target": "doi_lg_1_d11"}, {"source": "doi_lg_1_i", "target": "doi_lg_1_d12"}, {"source": "doi_lg_1_h11", "target": "doi_lg_1_i"}, {"source": "doi_lg_1_h12", "target": "doi_lg_1_i"}, {"source": "doi_lg_1_h21", "target": "doi_lg_1_h11"}, {"source": "doi_lg_1_h22", "target": "doi_lg_1_h11"}, {"source": "doi_lg_1_h22", "target": "doi_lg_1_h12"}, {"source": "doi_lg_1_h23", "target": "doi_lg_1_h12"}, {"source": "doi_lg_1_d11", "target": "doi_lg_1_d21"}, {"source": "doi_lg_1_d11", "target": "doi_lg_1_d22"}, {"source": "doi_lg_1_d12", "target": "doi_lg_1_d23"}, {"source": "doi_lg_1_d21", "target": "doi_lg_1_d22"}, {"source": "doi_lg_1_d22", "target": "doi_lg_1_d21"}, {"source": "doi_lg_1_h12", "target": "doi_lg_1_d12"}]}
\ No newline at end of file
diff --git a/verarbeitung/start_script.py b/verarbeitung/start_script.py
new file mode 100644
index 0000000000000000000000000000000000000000..43e9bcc1310e4060f1b2ecba979bb419fdda18ab
--- /dev/null
+++ b/verarbeitung/start_script.py
@@ -0,0 +1,23 @@
+import sys
+import gc
+from pathlib import Path
+from verarbeitung.process_main import Processing
+#from verarbeitung.dev_files.print_graph_test import try_known_publications, try_delete_nodes
+
+
+doi_list = []
+#doi_list.append('https://pubs.acs.org/doi/10.1021/acs.jcim.9b00249')
+#doi_list.append('https://doi.org/10.1021/acs.jcim.9b00249')
+#doi_list.append('https://pubs.acs.org/doi/10.1021/acs.jcim.1c00203')
+#doi_list.append('https://doi.org/10.1021/acs.jmedchem.0c01332')
+#doi_list.append('https://pubs.acs.org/doi/10.1021/acs.jcim.6b00709')
+#doi_list.append('https://doi.org/10.1021/acs.chemrev.8b00728')
+#doi_list.append('https://pubs.acs.org/doi/10.1021/acs.chemrestox.0c00006')#
+doi_list.append('https://doi.org/10.1021/acs.chemrev.8b00728')
+doi_list.append('https://doi.org/10.1021/acs.jpclett.1c03335 ')
+error_list = Processing(doi_list, 2, 2, 'test728.json')
+print(error_list)
+
+del doi_list
+del error_list
+gc.collect()
diff --git a/verarbeitung/test/construct_graph_unittest.py b/verarbeitung/test/construct_graph_unittest.py
index b73dc4cabf196068ed2f35a69ecb437512e97da7..d1887277748aa93de0ce878c6d512ea9ce5f4cd4 100644
--- a/verarbeitung/test/construct_graph_unittest.py
+++ b/verarbeitung/test/construct_graph_unittest.py
@@ -24,11 +24,6 @@ class ConstructionTest(unittest.TestCase):
          self.assertCountEqual(doi_nodes, ['doiz1', 'doiz2'])
          self.assertCountEqual(edges, [['doiz2', 'doiz1'], ['doiz1', 'doiz2']])
 
-    #def testBigCycle(self):
-
-    #def testEmptyHeight(self):
-
-    #def testEmptyDepth(self):
 
      def testEmptyDepthHeight(self):
          nodes, edges, err_list = init_graph_construction(['doi1'],0,0,True,False)
@@ -85,6 +80,25 @@ class ConstructionTest(unittest.TestCase):
           self.assertCountEqual(doi_nodes,['doi_d02','doi_d1','doi_d2'])
           self.assertCountEqual(edges, [['doi_d02','doi_d1'], ['doi_d1','doi_d2']])
 
+     def test_incorrect_input_dois(self):
+          nodes, edges, err_list = init_graph_construction(['doi1ic', 'doi2ic'],1,1, True, False)
+          doi_nodes = keep_only_dois(nodes)
+          self.assertCountEqual(doi_nodes, [])
+          self.assertCountEqual(edges, [])
+          self.assertCountEqual(err_list, ['doi1ic', 'doi2ic'])
+
+          nodes, edges, err_list = init_graph_construction(['doi1ic', 'doi2ic'],2,2, True, False)
+          doi_nodes = keep_only_dois(nodes)
+          self.assertCountEqual(doi_nodes, [])
+          self.assertCountEqual(edges, [])
+          self.assertCountEqual(err_list, ['doi1ic', 'doi2ic'])
+
+          nodes, edges, err_list = init_graph_construction(['doi1', 'doi2ic'],1,1, True, False)
+          doi_nodes = keep_only_dois(nodes)
+          self.assertCountEqual(doi_nodes, ['doi1', 'doi2', 'doi3'])
+          self.assertCountEqual(edges, [['doi1', 'doi2'], ['doi3', 'doi1']])
+          self.assertCountEqual(err_list, ['doi2ic'])
+
 
      ## Ab hier die Tests für die einzelnen Funktionen ##
 
@@ -195,9 +209,9 @@ class ConstructionTest(unittest.TestCase):
           pub_lg_1_d_11.group = -1
 
           return_list_of_node_dicts = format_nodes([pub_lg_1_i, pub_lg_1_h_11, pub_lg_1_d_11])
-          check_list_of_node_dicts = [  {"doi": 'doi_lg_1_i', "name": 'title_lg_1_i', "author": ['contributor_lg_1_i'], "year": 'date_lg_1_i', "journal": 'journal_lg_1_i', "group": 'Input', "depth": 0, "citations": 2},
-                                        {"doi": 'doi_lg_1_h11', "name": 'title_lg_1_h11', "author": ['contributor_lg_1_h11'], "year": 'date_lg_1_h11', "journal": 'journal_lg_1_h11', "group": 'Citedby', "depth": 1, "citations": 2},
-                                        {"doi": 'doi_lg_1_d11', "name": 'title_lg_1_d11', "author": ['contributor_lg_1_d11'], "year": 'date_lg_1_d11', "journal": 'journal_lg_1_d11', "group": 'Reference', "depth": -1, "citations": 1}]
+          check_list_of_node_dicts = [  {"doi": 'doi_lg_1_i', "name": 'title_lg_1_i', "author": ['contributor_lg_1_i'], "year": 'date_lg_1_i', "journal": 'journal_lg_1_i', "abstract": None, "group": 'Input', "depth": 0, "citations": 2},
+                                        {"doi": 'doi_lg_1_h11', "name": 'title_lg_1_h11', "author": ['contributor_lg_1_h11'], "year": 'date_lg_1_h11', "journal": 'journal_lg_1_h11', "abstract": None, "group": 'Citedby', "depth": 1, "citations": 2},
+                                        {"doi": 'doi_lg_1_d11', "name": 'title_lg_1_d11', "author": ['contributor_lg_1_d11'], "year": 'date_lg_1_d11', "journal": 'journal_lg_1_d11', "abstract": None, "group": 'Reference', "depth": -1, "citations": 1}]
           
           self.assertCountEqual(return_list_of_node_dicts, check_list_of_node_dicts)
 
diff --git a/verarbeitung/test_output.json b/verarbeitung/test_output.json
index f96362a05cea7ad954fa28bfc22074e15e9fa1cd..6d4d75d2064919529bc5ea85e2e9b9d5609ed1b4 100644
--- a/verarbeitung/test_output.json
+++ b/verarbeitung/test_output.json
@@ -1 +1 @@
-{"nodes": [{"doi": "doi_lg_1_i", "name": "title_lg_1_i", "author": ["contributor_lg_1_i"], "year": "date_lg_1_i", "journal": "journal_lg_1_i", "group": "Input", "depth": 0, "citations": 2}, {"doi": "doi_lg_1_d11", "name": "title_lg_1_d11", "author": ["contributor_lg_1_d11"], "year": "date_lg_1_d11", "journal": "journal_lg_1_d11", "group": "Reference", "depth": -1, "citations": 1}, {"doi": "doi_lg_1_d12", "name": "title_lg_1_d12", "author": ["contributor_lg_1_d12"], "year": "date_lg_1_d12", "journal": "journal_lg_1_d12", "group": "Reference", "depth": -1, "citations": 2}, {"doi": "doi_lg_1_h11", "name": "title_lg_1_h11", "author": ["contributor_lg_1_h11"], "year": "date_lg_1_h11", "journal": "journal_lg_1_h11", "group": "Citedby", "depth": 1, "citations": 2}, {"doi": "doi_lg_1_h12", "name": "title_lg_1_h12", "author": ["contributor_lg_1_h12"], "year": "date_lg_1_h12", "journal": "journal_lg_1_h12", "group": "Citedby", "depth": 1, "citations": 2}, {"doi": "doi_lg_1_h21", "name": "title_lg_1_h21", "author": ["contributor_lg_1_h21"], "year": "date_lg_1_h21", "journal": "journal_lg_1_h21", "group": "Citedby", "depth": 2, "citations": 0}, {"doi": "doi_lg_1_h22", "name": "title_lg_1_h22", "author": ["contributor_lg_1_h22"], "year": "date_lg_1_h22", "journal": "journal_lg_1_h22", "group": "Citedby", "depth": 2, "citations": 0}, {"doi": "doi_lg_1_h23", "name": "title_lg_1_h23", "author": ["contributor_lg_1_h23"], "year": "date_lg_1_h23", "journal": "journal_lg_1_h23", "group": "Citedby", "depth": 2, "citations": 0}, {"doi": "doi_lg_1_d21", "name": "title_lg_1_d21", "author": ["contributor_lg_1_d21"], "year": "date_lg_1_d21", "journal": "journal_lg_1_d21", "group": "Reference", "depth": -2, "citations": 2}, {"doi": "doi_lg_1_d22", "name": "title_lg_1_d22", "author": ["contributor_lg_1_d22"], "year": "date_lg_1_d22", "journal": "journal_lg_1_d22", "group": "Reference", "depth": -2, "citations": 2}, {"doi": "doi_lg_1_d23", "name": "title_lg_1_d23", "author": ["contributor_lg_1_d23"], "year": "date_lg_1_d23", "journal": "journal_lg_1_d23", "group": "Reference", "depth": -2, "citations": 2}], "links": [{"source": "doi_lg_1_i", "target": "doi_lg_1_d11"}, {"source": "doi_lg_1_i", "target": "doi_lg_1_d12"}, {"source": "doi_lg_1_h11", "target": "doi_lg_1_i"}, {"source": "doi_lg_1_h12", "target": "doi_lg_1_i"}, {"source": "doi_lg_1_h21", "target": "doi_lg_1_h11"}, {"source": "doi_lg_1_h22", "target": "doi_lg_1_h11"}, {"source": "doi_lg_1_h22", "target": "doi_lg_1_h12"}, {"source": "doi_lg_1_h23", "target": "doi_lg_1_h12"}, {"source": "doi_lg_1_d11", "target": "doi_lg_1_d21"}, {"source": "doi_lg_1_d11", "target": "doi_lg_1_d22"}, {"source": "doi_lg_1_d21", "target": "doi_lg_1_d22"}, {"source": "doi_lg_1_d22", "target": "doi_lg_1_d21"}, {"source": "doi_lg_1_d12", "target": "doi_lg_1_d23"}, {"source": "doi_lg_1_h12", "target": "doi_lg_1_d12"}]}
\ No newline at end of file
+{"nodes": [{"doi": "doi_lg_1_i", "name": "title_lg_1_i", "author": ["contributor_lg_1_i"], "year": "date_lg_1_i", "journal": "journal_lg_1_i", "abstract": null, "group": "Input", "depth": 0, "citations": 2}, {"doi": "doi_lg_1_d11", "name": "title_lg_1_d11", "author": ["contributor_lg_1_d11"], "year": "date_lg_1_d11", "journal": "journal_lg_1_d11", "abstract": null, "group": "Reference", "depth": -1, "citations": 1}, {"doi": "doi_lg_1_d12", "name": "title_lg_1_d12", "author": ["contributor_lg_1_d12"], "year": "date_lg_1_d12", "journal": "journal_lg_1_d12", "abstract": null, "group": "Reference", "depth": -1, "citations": 2}, {"doi": "doi_lg_1_h11", "name": "title_lg_1_h11", "author": ["contributor_lg_1_h11"], "year": "date_lg_1_h11", "journal": "journal_lg_1_h11", "abstract": null, "group": "Citedby", "depth": 1, "citations": 2}, {"doi": "doi_lg_1_h12", "name": "title_lg_1_h12", "author": ["contributor_lg_1_h12"], "year": "date_lg_1_h12", "journal": "journal_lg_1_h12", "abstract": null, "group": "Citedby", "depth": 1, "citations": 2}, {"doi": "doi_lg_1_h21", "name": "title_lg_1_h21", "author": ["contributor_lg_1_h21"], "year": "date_lg_1_h21", "journal": "journal_lg_1_h21", "abstract": null, "group": "Citedby", "depth": 2, "citations": 0}, {"doi": "doi_lg_1_h22", "name": "title_lg_1_h22", "author": ["contributor_lg_1_h22"], "year": "date_lg_1_h22", "journal": "journal_lg_1_h22", "abstract": null, "group": "Citedby", "depth": 2, "citations": 0}, {"doi": "doi_lg_1_h23", "name": "title_lg_1_h23", "author": ["contributor_lg_1_h23"], "year": "date_lg_1_h23", "journal": "journal_lg_1_h23", "abstract": null, "group": "Citedby", "depth": 2, "citations": 0}, {"doi": "doi_lg_1_d21", "name": "title_lg_1_d21", "author": ["contributor_lg_1_d21"], "year": "date_lg_1_d21", "journal": "journal_lg_1_d21", "abstract": null, "group": "Reference", "depth": -2, "citations": 2}, {"doi": "doi_lg_1_d22", "name": "title_lg_1_d22", "author": ["contributor_lg_1_d22"], "year": "date_lg_1_d22", "journal": "journal_lg_1_d22", "abstract": null, "group": "Reference", "depth": -2, "citations": 2}, {"doi": "doi_lg_1_d23", "name": "title_lg_1_d23", "author": ["contributor_lg_1_d23"], "year": "date_lg_1_d23", "journal": "journal_lg_1_d23", "abstract": null, "group": "Reference", "depth": -2, "citations": 2}], "links": [{"source": "doi_lg_1_i", "target": "doi_lg_1_d11"}, {"source": "doi_lg_1_i", "target": "doi_lg_1_d12"}, {"source": "doi_lg_1_h11", "target": "doi_lg_1_i"}, {"source": "doi_lg_1_h12", "target": "doi_lg_1_i"}, {"source": "doi_lg_1_h21", "target": "doi_lg_1_h11"}, {"source": "doi_lg_1_h22", "target": "doi_lg_1_h11"}, {"source": "doi_lg_1_h22", "target": "doi_lg_1_h12"}, {"source": "doi_lg_1_h23", "target": "doi_lg_1_h12"}, {"source": "doi_lg_1_d11", "target": "doi_lg_1_d21"}, {"source": "doi_lg_1_d11", "target": "doi_lg_1_d22"}, {"source": "doi_lg_1_d12", "target": "doi_lg_1_d23"}, {"source": "doi_lg_1_d21", "target": "doi_lg_1_d22"}, {"source": "doi_lg_1_d22", "target": "doi_lg_1_d21"}, {"source": "doi_lg_1_h12", "target": "doi_lg_1_d12"}]}
\ No newline at end of file
diff --git a/verarbeitung/update_graph/connect_new_input.py b/verarbeitung/update_graph/connect_new_input.py
index af7363a29fa1bac6bf2fba3dbf3de0d50e64196d..f4d2ef93ac7ec3c74b079dc415b67c158cde5c57 100644
--- a/verarbeitung/update_graph/connect_new_input.py
+++ b/verarbeitung/update_graph/connect_new_input.py
@@ -19,16 +19,19 @@ from os import error
 
 sys.path.append("../")
 
+from input.publication import Publication
+from verarbeitung.get_pub_from_input import get_pub
 from verarbeitung.construct_new_graph.initialize_graph import init_graph_construction
 from verarbeitung.construct_new_graph.add_citations_rec import add_citations, get_cit_type_list, create_global_lists_cit
 
 
-def find_furthermost_citations_test(test_nodes, test_edges, changed_node, old_search_depth, cit_type):
+
+def find_furthermost_citations_test(test_nodes, test_edges, changed_node, old_search_depth, new_search_depth, cit_type):
     global nodes, edges
     nodes = test_nodes
     edges = test_edges
 
-    return(find_furthermost_citations(nodes, edges, changed_node, old_search_depth, cit_type))
+    return(find_furthermost_citations(nodes, edges, changed_node, old_search_depth, new_search_depth, cit_type))
 
 def complete_changed_group_nodes_test(test_nodes, test_edges, inserted_test_nodes, old_search_depth, old_search_height, new_search_depth, new_search_height):
     global nodes, edges
@@ -40,7 +43,7 @@ def complete_changed_group_nodes_test(test_nodes, test_edges, inserted_test_node
 
 
 
-def find_furthermost_citations(new_nodes, new_edges, node, old_search_depth, cit_type):
+def find_furthermost_citations(new_nodes, new_edges, node, old_search_depth, new_search_depth, cit_type):
     '''
         :param new_nodes:           list of nodes which are generated seperately from main node list to avoid recursive problems
         :type new_nodes             List[Publication]
@@ -64,7 +67,7 @@ def find_furthermost_citations(new_nodes, new_edges, node, old_search_depth, cit
     citations_saved = [node]
 
     # group of node and old search depth/height determines how often the loop needs to be repeated
-    for depth in range(old_search_depth - abs(node.group)):
+    for depth in range(min(old_search_depth - abs(node.group), new_search_depth)):
         new_citations = []
         for citation in citations_saved:
             for cit_node in nodes:
@@ -147,38 +150,49 @@ def complete_changed_group_nodes(inserted_nodes, old_search_depth, old_search_he
         
         # moves known reference node to input and completes citations and references for this node
         if (node.group < 0) and (node.doi_url in inserted_nodes):
-            node.group = 0
-            new_max_citations = find_furthermost_citations(new_nodes, new_edges, node, old_search_height + abs(node.group), "Citation")
-            add_citations(new_nodes, new_edges, new_max_citations, old_search_height, new_search_height, "Citation", test_var)
-            
-            new_nodes, new_edges, error_doi_list_ref = init_graph_construction([node.doi_url], new_search_height, 0, test_var, True, new_nodes, new_edges)
-                
-            for err_node in error_doi_list_ref:
-                if err_node not in error_doi_list:
-                    error_doi_list.append(err_node)
 
+            # get pub from input
+            pub = get_pub(node.doi_url, test_var)
+            if (type(pub) != Publication):
+
+                error_doi_list.append(node.doi_url)
+                continue
+
+            # find old maximum publications and complete tree to new max depth
+            pub.group = node.group
+            old_max_references = find_furthermost_citations(new_nodes, new_edges, pub, old_search_depth, new_search_depth, "Reference")
+            add_citations(new_nodes, new_edges, old_max_references, min(old_search_depth - abs(node.group), new_search_depth), new_search_depth, "Reference", test_var)
+
+            # add tree for citations
+            add_citations(new_nodes, new_edges, [pub], 0, new_search_height, "Citation", test_var)
             
-            old_max_references = find_furthermost_citations(new_nodes, new_edges, node, old_search_depth, "Reference")
-            add_citations(new_nodes, new_edges, old_max_references, old_search_depth, new_search_depth, "Reference", test_var)
+            pub.group = 0
+            new_nodes.append(pub)  
             handled_inserted_nodes.append(node)
             
         # moves known citation node to input and completes citations and references for this node
         elif (node.group > 0) and (node.doi_url in inserted_nodes):
-            node.group = 0
-            new_max_references = find_furthermost_citations(new_nodes, new_edges, node, old_search_depth + abs(node.group), "Reference")
-            add_citations(new_nodes, new_edges, new_max_references, old_search_depth, new_search_depth, "Reference", test_var)
-            #new_nodes.append(new_max_references)
-            
-            new_nodes, new_edges, error_doi_list_ref = init_graph_construction([node.doi_url], new_search_depth, 0, test_var, True, new_nodes, new_edges)
-            for err_node in error_doi_list_ref:
-                if err_node not in error_doi_list:
-                    error_doi_list.append(err_node)
 
+            # get pub from input
+            pub = get_pub(node.doi_url, test_var)
+            if (type(pub) != Publication):
+
+                error_doi_list.append(node.doi_url)
+                continue
+
+            # find old maximum publications and complete tree to new max depth
+            pub.group = node.group
+            old_max_citations = find_furthermost_citations(new_nodes, new_edges, pub, old_search_height, new_search_height, "Citation")
+            add_citations(new_nodes, new_edges, old_max_citations, min(old_search_height - abs(node.group), new_search_height), new_search_height, "Citation", test_var)
+
+            # add tree for citations
+            add_citations(new_nodes, new_edges, [pub], 0, new_search_depth, "Reference", test_var)        
             
-            old_max_citations = find_furthermost_citations(new_nodes, new_edges, node, old_search_height, "Citation")
-            add_citations(new_nodes, new_edges, old_max_citations, old_search_height, new_search_height, "Citation", test_var)
+            pub.group = 0
+            new_nodes.append(pub)
             handled_inserted_nodes.append(node)
 
+    # ensure, input pubs are declared as group 0
     for new_node in new_nodes:
         for inserted_node in inserted_nodes:
             if new_node.doi_url == inserted_node:
diff --git a/verarbeitung/update_graph/update_graph.py b/verarbeitung/update_graph/update_graph.py
index 416be51f890443d9bf9bdb58bf3ccf63645ba37b..1e74773c68d5cd0af8127579a4d11362ef02841e 100644
--- a/verarbeitung/update_graph/update_graph.py
+++ b/verarbeitung/update_graph/update_graph.py
@@ -101,15 +101,18 @@ def update_graph(new_doi_input_list, json_file, search_depth, search_height, tes
     # retrieve which publications are already known, removed, inserted
     common_nodes, inserted_nodes, deleted_nodes = compare_old_and_new_node_lists(old_doi_input_list, new_doi_input_list)
 
-    old_search_depth, old_search_height = update_depth(processed_list, valid_edges, search_depth, search_height, test_var)
+    
 
     processed_list_copy = processed_list.copy()
     valid_edges_copy = valid_edges.copy()
 
+    old_search_depth, old_search_height = update_depth(processed_list, valid_edges, search_depth, search_height, test_var)
+
     # deletes publications and edges from node_list if publications can no longer be reached
     if (len(deleted_nodes) > 0):
         processed_list, valid_edges = delete_nodes_and_edges(processed_list, common_nodes, valid_edges)
     
+    
     if (len(inserted_nodes) > 0):      
         inserted_pub_nodes, inserted_edges, error_doi_list_new = connect_old_and_new_input(processed_list_copy, valid_edges_copy, inserted_nodes, old_search_depth, old_search_height, search_depth, search_height, test_var)
         for err_node in error_doi_list_new: