diff --git a/verarbeitung/Processing.py b/verarbeitung/Processing.py
index 5cfeba29303b9e618f1c49124bec8edcaac59278..89f1ef2707d456c565adca8988d1ebe27b6501e3 100644
--- a/verarbeitung/Processing.py
+++ b/verarbeitung/Processing.py
@@ -18,6 +18,7 @@ import requests as req
 import sys  
 from pathlib import Path
 from input_fj import input
+from input_test import input_test_func
 from json_demo import output_to_json
 
 # adds every publication from input list to graph structure
@@ -26,7 +27,7 @@ from json_demo import output_to_json
 # TO-DO: Listenelemente auf Korrektheit überprüfen
 def initialize_nodes_list(doi_input_list):
     for pub_doi in doi_input_list:
-        pub = input(pub_doi)
+        pub = input_test_func(pub_doi)
         not_in_nodes = True
         for node in nodes:                                              # checks if a pub is already in nodes
             if (pub.doi_url == node.doi_url):
@@ -34,6 +35,7 @@ def initialize_nodes_list(doi_input_list):
                 break
         if (not_in_nodes):
             nodes.append(pub)
+            pub.group = "input"
         else:
             doi_input_list.remove(pub_doi)
 
@@ -42,7 +44,7 @@ def initialize_nodes_list(doi_input_list):
 # adds a node for every publication unknown
 # adds edges for citations between publications     
 def create_graph_structure_citations(pub, search_height, search_height_max):
-    for citation in pub._citations:
+    for citation in pub.citations:
         not_in_nodes = True
         for node in nodes:
             # checks every citation for duplication 
@@ -51,7 +53,7 @@ def create_graph_structure_citations(pub, search_height, search_height_max):
                 break
         if (not_in_nodes):
             if (search_height <= search_height_max):
-                #citation.group = "citation"
+                citation.group = "height"
                 nodes.append(citation)
                 edges.append([pub.doi_url,citation.doi_url])
 
@@ -64,7 +66,7 @@ def create_graph_structure_citations(pub, search_height, search_height_max):
 # adds a node for every publication unknown
 # adds edges for references between publications     
 def create_graph_structure_references(pub, search_depth, search_depth_max):
-    for reference in pub._references:
+    for reference in pub.references:
         not_in_nodes = True
         for node in nodes:
             # checks every reference for duplication 
@@ -73,7 +75,7 @@ def create_graph_structure_references(pub, search_depth, search_depth_max):
                 break
         if (not_in_nodes):
             if (search_depth <= search_depth_max):
-                #reference.group = "reference"
+                reference.group = "depth"
                 nodes.append(reference)
                 edges.append([reference.doi_url,pub.doi_url])
 
@@ -93,13 +95,13 @@ def process_citations_rec(doi_citations, search_height, search_height_max):
 
     # create class object for every citation from list
     for pub_doi in doi_citations:
-        pub = input(pub_doi)
+        pub = input_test_func(pub_doi)
         create_graph_structure_citations(pub, search_height, search_height_max)    
         # If the maximum height has not yet been reached, all references from the publication 
         # are written to an array and the function is called again with this array.       
         if (search_height < search_height_max):
             citations_list = []
-            for citation in pub._citations:
+            for citation in pub.citations:
 
                 # currently only the references with acs are stored in the URL, because we can't 
                 # extract the info from other sources.
@@ -121,13 +123,13 @@ def process_references_rec(doi_references, search_depth, search_depth_max):
 
     # create class object for every citation from list
     for pub_doi in doi_references:
-        pub = input(pub_doi)
+        pub = input_test_func(pub_doi)
         create_graph_structure_references(pub, search_depth, search_depth_max)    
         # If the maximum depth has not yet been reached, all references from the publication 
         # are written to an array and the function is called again with this array.       
         if (search_depth < search_depth_max):
             references_list = []
-            for reference in pub._references:
+            for reference in pub.references:
 
                 # currently only the references with acs are stored in the URL, because we can't 
                 # extract the info from other sources. 
@@ -173,9 +175,9 @@ def process_main(doi_input_list, search_height, search_depth):
 # program test, because there is no connection to the input yet.
 def test_print():
     arr = []
-    arr.append('https://pubs.acs.org/doi/10.1021/acs.jcim.9b00249')
-    arr.append('https://pubs.acs.org/doi/10.1021/acs.jcim.9b00249')
-    arr.append('https://doi.org/10.1021/acs.jmedchem.0c01332')
+    #arr.append('https://pubs.acs.org/doi/10.1021/acs.jcim.9b00249')
+    #arr.append('https://pubs.acs.org/doi/10.1021/acs.jcim.9b00249')
+    #arr.append('https://doi.org/10.1021/acs.jmedchem.0c01332')
     #arr.append('https://doi.org/10.1021/acs.jcim.0c00741')
 
     #arr.append('https://doi.org/10.1021/ci700007b')
@@ -183,6 +185,11 @@ def test_print():
     #url = sys.argv[1]
     #arr.append[url]
 
+    arr.append('doi1')
+    #arr.append('doi2')
+    #arr.append('doi3')
+
+
     nodes,edges = process_main(arr,1,1)
 
     print("Knoten:\n")
@@ -192,5 +199,5 @@ def test_print():
     for edge in edges:
         print(edge,"\n")
 
-#test_print()
+test_print()
         
\ No newline at end of file
diff --git a/verarbeitung/__pycache__/input_fj.cpython-36.pyc b/verarbeitung/__pycache__/input_fj.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..04312c91f0a7675651e99a2a6c10a2c9da146758
Binary files /dev/null and b/verarbeitung/__pycache__/input_fj.cpython-36.pyc differ
diff --git a/verarbeitung/__pycache__/input_test.cpython-36.pyc b/verarbeitung/__pycache__/input_test.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9dd609c5b2fe3d7530f4120f8993c04d58f4600c
Binary files /dev/null and b/verarbeitung/__pycache__/input_test.cpython-36.pyc differ
diff --git a/verarbeitung/__pycache__/json_demo.cpython-36.pyc b/verarbeitung/__pycache__/json_demo.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..04acef5f40630ee2c7b6e887e33dc740b5e16a74
Binary files /dev/null and b/verarbeitung/__pycache__/json_demo.cpython-36.pyc differ
diff --git a/verarbeitung/input_test.py b/verarbeitung/input_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..79b00b1ce11e63f73e11251352745a673b8face9
--- /dev/null
+++ b/verarbeitung/input_test.py
@@ -0,0 +1,63 @@
+class Publication:
+    def __init__(self, doi_url, title, contributors, journal, publication_date, references, citations, group):
+        self.doi_url = doi_url
+        self.title = title
+        self.contributors = contributors
+        self.journal = journal
+        self.publication_date = publication_date
+        if references is None:
+            self.references = []
+        else:
+            self.references = ref(references)
+        if citations is None:
+            self.citations = []
+        else: 
+            self.citations = cit(citations)
+        self.group = group
+
+
+class Citation:
+    def __init__(self,doi_url, title, contributors, journal, publication_date):
+        self.doi_url = doi_url
+        self.title = title
+        self.contributors = contributors
+        self.journal = journal
+        self.publication_date = publication_date
+
+class Reference:
+    def __init__(self,doi_url, title, contributors, journal, publication_date):
+        self.doi_url = doi_url
+        self.title = title
+        self.contributors = contributors
+        self.journal = journal
+        self.publication_date = publication_date
+
+def input_test_func(pub_doi):
+    for array in list_of_arrays:
+        if pub_doi == array[0]:
+            pub = Publication(array[0], array[1], array[2], array[3], array[4], array[5], array[6], array[7])
+            return pub
+
+
+def cit(list_doi):
+    cits = []
+    for doi_url in list_doi:
+        for array in list_of_arrays:
+            if doi_url == array[0]:
+                cits.append(Citation(array[0], array[1], array[2], array[3], array[4]))
+    return cits
+
+def ref(list_doi):
+    refs = []
+    for doi_url in list_doi:
+        for array in list_of_arrays:
+            if doi_url == array[0]:
+                refs.append(Citation(array[0], array[1], array[2], array[3], array[4]))
+    return refs
+
+
+beispiel1 = ['doi1', 'title1', ['contributor1'], 'journal1', 'date1', ['doi2'], ['doi3'], '']
+beispiel2 = ['doi2', 'title2', ['contributor2'], 'journal2', 'date2', [], ['doi1'], '']
+beispiel3 = ['doi3', 'title3', ['contributor3'], 'journal3', 'date3', ['doi1'], [], '']
+
+list_of_arrays = [beispiel1, beispiel2, beispiel3]
diff --git a/verarbeitung/json_demo.py b/verarbeitung/json_demo.py
index 362d064463f48e266248fd52506e521a58b84afd..734156020bb499b68be222ee9a4c1eec57ffd132 100644
--- a/verarbeitung/json_demo.py
+++ b/verarbeitung/json_demo.py
@@ -10,10 +10,10 @@ def output_to_json(V,E):
         new_dict = dict()
         new_dict["name"] = node.title
         new_dict["author"] = node.contributors
-        #new_dict["year"] = node.publication_date
-        #new_dict["journal"] = node.journal
+        new_dict["year"] = node.publication_date
+        new_dict["journal"] = node.journal
         new_dict["doi"] = node.doi_url
-        #new_dict["group"] = node.group
+        new_dict["group"] = node.group
         list_of_node_dicts.append(new_dict)
     for edge in E:
         new_dict_2 = dict()
diff --git a/verarbeitung/json_text.json b/verarbeitung/json_text.json
index 2e6c8fa59091aa94ac7c0f6ec5abf1e19d329b31..683e49f3e3175e12dcbe10df1e48c51dd27a8b77 100644
--- a/verarbeitung/json_text.json
+++ b/verarbeitung/json_text.json
@@ -1 +1 @@
-{"nodes": [{"name": "Comparing Molecular Patterns Using the Example of SMARTS: Applications and Filter Collection Analysis", "author": ["Emanuel S. R. Ehmki", "Robert Schmidt", "Farina Ohm", "Matthias Rarey"], "doi": "https://doi.org/10.1021/acs.jcim.9b00249"}, {"name": "Combining Machine Learning and Computational Chemistry for Predictive Insights Into Chemical Systems ", "author": "John A. Keith, Valentin Vassilev-Galindo, Bingqing Cheng, Stefan Chmiela, Michael Gastegger, Klaus-Robert M\u00fcller, Alexandre Tkatchenko. ", "doi": "https://doi.org/10.1021/acs.chemrev.1c00107"}, {"name": "Disconnected Maximum Common Substructures under Constraints ", "author": "Robert Schmidt, Florian Krull, Anna Lina Heinzke, Matthias Rarey. ", "doi": "https://doi.org/10.1021/acs.jcim.0c00741"}, {"name": "Evolution of Novartis\u2019 Small Molecule Screening Deck Design ", "author": "Ansgar Schuffenhauer, Nadine Schneider, Samuel Hintermann, Douglas Auld, Jutta Blank, Simona Cotesta, Caroline Engeloch, Nikolas Fechner, Christoph Gaul, Jerome Giovannoni, Johanna Jansen, John Joslin, Philipp Krastel, Eugen Lounkine, John Manchester, Lauren G. Monovich, Anna Paola Pelliccioli, Manuel Schwarze, Michael D. Shultz, Nikolaus Stiefl, Daniel K. Baeschlin. ", "doi": "https://doi.org/10.1021/acs.jmedchem.0c01332"}, {"name": "Comparing Molecular Patterns Using the Example of SMARTS: Theory and Algorithms ", "author": "Robert Schmidt, Emanuel S. R. Ehmki, Farina Ohm, Hans-Christian Ehrlich, Andriy Mashychev, Matthias Rarey. ", "doi": "https://doi.org/10.1021/acs.jcim.9b00250"}, {"name": "Machine learning accelerates quantum mechanics predictions of molecular crystals ", "author": "Yanqiang  Han, Imran  Ali, Zhilong  Wang, Junfei  Cai, Sicheng  Wu, Jiequn  Tang, Lin  Zhang, Jiahao  Ren, Rui  Xiao, Qianqian  Lu, Lei  Hang, Hongyuan  Luo, Jinjin  Li. ", "doi": "https://doi.org/10.1016/j.physrep.2021.08.002"}, {"name": "The Growing Importance of Chirality in 3D Chemical Space Exploration and Modern Drug Discovery Approaches for Hit-ID ", "author": "Ilaria Proietti Silvestri, Paul J. J. Colbon. ", "doi": "https://doi.org/10.1021/acsmedchemlett.1c00251"}, {"name": "Target-Based Evaluation of \u201cDrug-Like\u201d Properties and Ligand Efficiencies ", "author": "Paul D. Leeson, A. Patricia Bento, Anna Gaulton, Anne Hersey, Emma J. Manners, Chris J. Radoux, Andrew R. Leach. ", "doi": "https://doi.org/10.1021/acs.jmedchem.1c00416"}, {"name": "Fostering Research Synergies between Chemists in Swiss Academia and at Novartis ", "author": "Arndt  Meyer, Daniel  Baeschlin, Cara E.  Brocklehurst, Myriam  Duckely, Fabrice  Gallou, Lucie E.  Lovelle, Michael  Parmentier, Thierry  Schlama, Radka  Snajdrova, Yves P.  Auberson. ", "doi": "https://doi.org/10.2533/chimia.2021.936"}, {"name": "BonMOLi\u00e8re: Small-Sized Libraries of Readily Purchasable Compounds, Optimized to Produce Genuine Hits in Biological Screens across the Protein Space ", "author": "Neann  Mathai, Conrad  Stork, Johannes  Kirchmair. ", "doi": "https://doi.org/10.3390/ijms22157773"}, {"name": "Accelerating high-throughput virtual screening through molecular pool-based active learning ", "author": "David E.  Graff, Eugene I.  Shakhnovich, Connor W.  Coley. ", "doi": "https://doi.org/10.1039/D0SC06805E"}, {"name": "Compound Screening ", "author": "Shin  Numao, Gianluca  Etienne, Goran  Malojcic, Enrico  Schmidt, Christoph E.  Dumelin. ", "doi": "https://doi.org/10.1016/B978-0-12-820472-6.00078-5"}], "links": [{"source": "https://doi.org/10.1021/acs.jcim.9b00249", "target": "https://doi.org/10.1021/acs.chemrev.1c00107"}, {"source": "https://doi.org/10.1021/acs.jcim.9b00249", "target": "https://doi.org/10.1021/acs.jcim.0c00741"}, {"source": "https://doi.org/10.1021/acs.jcim.9b00249", "target": "https://doi.org/10.1021/acs.jmedchem.0c01332"}, {"source": "https://doi.org/10.1021/acs.jcim.9b00249", "target": "https://doi.org/10.1021/acs.jcim.9b00250"}, {"source": "https://doi.org/10.1021/acs.jcim.9b00249", "target": "https://doi.org/10.1016/j.physrep.2021.08.002"}, {"source": "https://doi.org/10.1021/acs.jmedchem.0c01332", "target": "https://doi.org/10.1021/acsmedchemlett.1c00251"}, {"source": "https://doi.org/10.1021/acs.jmedchem.0c01332", "target": "https://doi.org/10.1021/acs.jmedchem.1c00416"}, {"source": "https://doi.org/10.1021/acs.jmedchem.0c01332", "target": "https://doi.org/10.2533/chimia.2021.936"}, {"source": "https://doi.org/10.1021/acs.jmedchem.0c01332", "target": "https://doi.org/10.3390/ijms22157773"}, {"source": "https://doi.org/10.1021/acs.jmedchem.0c01332", "target": "https://doi.org/10.1039/D0SC06805E"}, {"source": "https://doi.org/10.1021/acs.jmedchem.0c01332", "target": "https://doi.org/10.1016/B978-0-12-820472-6.00078-5"}]}
\ No newline at end of file
+{"nodes": [{"name": "title1", "author": ["contributor1"], "year": "date1", "journal": "journal1", "doi": "doi1", "group": "input"}, {"name": "title3", "author": ["contributor3"], "year": "date3", "journal": "journal3", "doi": "doi3", "group": "height"}, {"name": "title2", "author": ["contributor2"], "year": "date2", "journal": "journal2", "doi": "doi2", "group": "depth"}], "links": [{"source": "doi1", "target": "doi3"}, {"source": "doi2", "target": "doi1"}]}
\ No newline at end of file