diff --git a/app.py b/app.py
index ea4f78ad775b68a317a04f721b42dda5f8fd67a7..1f12af34b1ad35de36cf72c147e2d0954e47c548 100644
--- a/app.py
+++ b/app.py
@@ -333,283 +333,287 @@ In this project a more inclusive conception of digital methods is assumed: the u
                      description="""Refers to the complete process of 'knowledge mining from data'.<<Han_etal2012>> Can be applied on various data types and consists of different steps and paradigms. For an application in the context of text mining in the social science see the concept "blended-reading" (<<Stulpe_etal2016>>).""",
                      parent=method)
     db.session.add(method1)
-
     method2 = Method(id=3,
-                     name="automated data collection",
-                     description="""In principal there are multiple possible data sources in a data mining process. A basic distinction in relevance to automated data collection can be drawn between connected devices(internet, intranets) or unconnected devices(sensors, etc.). +
-Furthermore the server-client-model is the established communication paradigms for connected devices. In order to obtain data either from server or client there exists three different interfaces: log files, apis and user interfaces which constitute the available procedures <<Jünger2018>>.""",
-                     parent=method1)
-    db.session.add(method2)
-    method3 = Method(id=4,
-                     name="collect log-data",
-                     description="Collect log data which occur during providing the (web-)service or the information processing.",
-                     parent=method2)
-    db.session.add(method3)
-    method3 = Method(id=5,
-                     name="parsing from api",
-                     description="Parse structured data from via a documented REST-API.",
-                     parent=method2)
-    db.session.add(method3)
-    method3 = Method(id=6,
-                     name="scraping",
-                     description="Automatically parse unstructured or semi-structured data from a normal website (⇒ web-scraping) or service.",
-                     parent=method2)
-    db.session.add(method3)
-    method4 = Method(id=7,
-                     name="scraping (static content)",
-                     description="Automatically parse data from static HTML websites.",
-                     parent=method3)
-    db.session.add(method4)
-    method4 = Method(id=8,
-                     name="scraping (dynamic content)",
-                     description="Automatically parse dynamic content (HTML5/Javascript,) ⇒ sometimes requires mimicking user-interaction.",
-                     parent=method3)
-    db.session.add(method4)
-    method3 = Method(id=9,
-                     name="crawling",
-                     description="Collect websites with an initial set of webpages by following contained links <<Ignatow_etal2017>>.",
-                     parent=method2)
-    db.session.add(method3)
-    method2 = Method(id=10,
                      name="data wrangling",
                      description="Translate data into suited formats for automatic analysis. Examples: PDFs ⇒ Text . For a practical framework refer also <<Wickham_etal2017>>.",
                      parent=method1)
     db.session.add(method2)
-    method3 = Method(id=11,
+    method3 = Method(id=4,
                      name="regular expressions",
                      description="Complex string manipulations by searching and replacing specific patterns.",
                      parent=method2)
     db.session.add(method3)
-    method3 = Method(id=12,
+    method3 = Method(id=5,
                      name="data-format conversions",
                      description="Transfer between different formats in order to unify and handle vacancies.",
                      parent=method2)
     db.session.add(method3)
-    m_text_preprocessing = Method(id=13,
+    m_text_preprocessing = Method(id=6,
                      name="text preprocessing",
                      description="Some text preprocessing tasks in natuaral language processing.",
                      parent=method1)
     db.session.add(m_text_preprocessing)
-    m_tokenization = Method(id=14,
+    m_tokenization = Method(id=7,
                      name="tokenization",
                      description="Identify words in character input sequence.",
                      parent=m_text_preprocessing)
     db.session.add(m_tokenization)
-    m_stopword_removal = Method(id=15,
+    m_stopword_removal = Method(id=8,
                      name="stop-word removal",
                      description="Removing high-frequency words like pronoums, determiners or prepositions.",
                      parent=m_text_preprocessing)
     db.session.add(m_stopword_removal)
-    m_stemming = Method(id=16,
+    m_stemming = Method(id=9,
                      name="stemming",
                      description="Identify common stems on a syntactical level.",
                      parent=m_text_preprocessing)
     db.session.add(m_stemming)
-    m_word_sentence_segmentation = Method(id=17,
+    m_word_sentence_segmentation = Method(id=10,
                      name="word/sentence segmentation",
                      description="Separate a chunk of continuous text into separate words/sentences.",
                      parent=m_text_preprocessing)
     db.session.add(m_word_sentence_segmentation)
-    m_pos_tagging = Method(id=18,
+    m_pos_tagging = Method(id=11,
                      name="part-of-speech(POS)-tagging",
                      description="Identify the part of speech for words.",
                      parent=m_text_preprocessing)
     db.session.add(m_pos_tagging)
-    m_dependency_parsing = Method(id=19,
+    m_dependency_parsing = Method(id=12,
                      name="dependency parsing",
                      description="Create corresponding syntactic, semantic or morphologic trees from input text.",
                      parent=m_text_preprocessing)
     db.session.add(m_dependency_parsing)
-    m_syntactic_parsing = Method(id=20,
+    m_syntactic_parsing = Method(id=13,
                      name="syntactic parsing",
                      description="Create syntactic trees from input text using mostly unsupervised learning on manually annotated treebanks (<<Ignatow_etal2017>>,61).",
                      parent=m_dependency_parsing)
     db.session.add(m_syntactic_parsing)
-    m_word_sense_disambiguation = Method(id=21,
+    m_word_sense_disambiguation = Method(id=14,
                      name="word-sense disambiguation",
                      description="Recognizing context-sensetive meaning of words.",
                      parent=m_text_preprocessing)
     db.session.add(m_word_sense_disambiguation)
-    method2 = Method(id=22, name="information extraction",
+    method2 = Method(id=15, name="information extraction",
                      description="Extract factual information(e.g. people, places or situations) in free text.",
                      parent=method1)
     db.session.add(method3)
-    method3 = Method(id=23,
+    method3 = Method(id=16,
                      name="(named-)entity-recognition/resolution/extraction/tagging",
                      description="Identify instances of specific (pre-)defined types(e.g place, name or color) in text.",
                      parent=method2)
     db.session.add(method3)
-    method3 = Method(id=24,
+    method3 = Method(id=17,
                      name="relation extraction",
                      description="Extract relationships between entities.",
                      parent=method3)
     db.session.add(method3)
-    method2 = Method(id=25,name="information retrieval",
+    method2 = Method(id=18,name="information retrieval",
                      description="Retrieve relevant informations in response to the information requests.",
                      parent=method1)
     db.session.add(method2)
-    method3 = Method(id=26,name="indexing",
+    m_indexing = Method(id=19,name="indexing",
                      description="'organize data in such a way that it can be easily retrieved later on'(<<Ignatow_etal2017>>,137)",
                      parent=method2)
-    db.session.add(method3)
-    method3 = Method(id=27,name="searching/querying",
+    db.session.add(m_indexing)
+    m_search_query = Method(id=20,name="searching/querying",
                      description="'take information requests in the form of queries and return relevant documents'(<<Ignatow_etal2017>>,137). There are different models in order to estimate the similarity between records and the search queries (e.g. boolean, vector space or a probabilistic model)(ibid).",
                      parent=method2)
-    db.session.add(method3)
-    method2 = Method(id=28,name="statistical analysis",
+    db.session.add(m_search_query)
+    method2 = Method(id=21,name="statistical analysis",
                      description="",
                      parent=method1)
     db.session.add(method2)
-    method3 = Method(id=29,name="frequency analysis",
+    m_frequency_analysis = Method(id=22,name="frequency analysis",
                      description="Descriptiv statistical analysis by using specific text abundances.",
                      parent=method2)
-    db.session.add(method3)
-    m_w_dict_frequencies = Method(id=30,name="word frequencies/dictionary analysis",
+    db.session.add(m_frequency_analysis)
+    m_w_dict_frequencies = Method(id=23,name="word frequencies/dictionary analysis",
                      description="Analyse statistical significant occurence of words/word-groups. Can also be combined with meta-data (e.g. creation time of document).",
-                     parent=method3)
+                     parent=m_frequency_analysis)
     db.session.add(m_w_dict_frequencies)
-    m_co_occurence = Method(id=31,name="co-occurence analysis",
+    m_co_occurence = Method(id=24,name="co-occurence analysis",
                      description="Analyse statistical significant co-occurence of words in different contextual units.",
-                     parent=method3)
+                     parent=m_frequency_analysis)
     db.session.add(m_co_occurence)
-    m_context_volatility = Method(id=32,name="context volatility",
+    m_context_volatility = Method(id=25,name="context volatility",
                      description="'Analyse contextual change for certain words over a period of time.'(<<Niekler_etal2018>>,1316)",
-                     parent=method3)
+                     parent=m_frequency_analysis)
     db.session.add(m_context_volatility)
-    method3 = Method(id=33,name="classification/machine learning",
+    method3 = Method(id=26,name="classification/machine learning",
                      description="Various techniques to (semi-)automatically identify specific classes. ",
                      parent=method2)
     db.session.add(method3)
-    m_supervised_classification = Method(id=34,name="supervised classification",
+    m_supervised_classification = Method(id=27,name="supervised classification",
                      description="Use given training examples in order to classify certain entities.",
                      parent=method3)
     db.session.add(m_supervised_classification)
-    method4 = Method(id=35,name="latent semantic analysis",
+    method4 = Method(id=28,name="latent semantic analysis",
                      description="'The basic idea of latent semantic analysis (LSA) is, that text do have a higher order (=latent semantic) structure which, however, is obscured by word usage (e.g. through the use of synonyms or polysemy). By using conceptual indices that are derived statistically via a truncated singular value decomposition (a two-mode factor analysis) over a given document-term matrix, this variability problem can be overcome.'(link:https://cran.r-project.org/web/packages/lsa/lsa.pdf[CRAN-R])",
                      parent=method3)
     db.session.add(method4)
-    method4 = Method(id=36,name="topic modeling",
+    method4 = Method(id=29,name="topic modeling",
                      description="Probabilistic models to infer semantic clusters. See especially <<Papilloud_etal2018>>.",
                      parent=method3)
     db.session.add(method4)
-    lda = Method(id=37,name="latent dirichlet allocation",
+    lda = Method(id=30,name="latent dirichlet allocation",
                      description="""'The application of LDA is based on three nested concepts: the text collection to be modelled is referred to as the corpus; one item within the corpus is a document, with words within a document called terms.(...) +
 The aim of the LDA algorithm is to model a comprehensive representation of the corpus by inferring latent content variables, called topics. Regarding the level of analysis, topics are heuristically located on an intermediate level between the corpus and the documents and can be imagined as content-related categories, or clusters. (...) Since topics are hidden in the first place, no information about them is directly observable in the data. The LDA algorithm solves this problem by inferring topics from recurring patterns of word occurrence in documents.'(<<Maier_etal2018>>,94)""",
                      parent=method4)
     db.session.add(lda)
-    nmf = Method(id=38,name="non-negative-matrix-factorization",
+    nmf = Method(id=31,name="non-negative-matrix-factorization",
                      description="Inclusion of non-negative constraint.",
                      parent=method4)
     db.session.add(nmf)
-    stm = Method(id=39,name="structural topic modeling",
+    stm = Method(id=32,name="structural topic modeling",
                      description="Inclusion of meta-data. Refer especially to <<roberts2013>>.",
                      parent=method4)
     db.session.add(stm)
-    sa = Method(id=40,name="sentiment analysis",
+    sa = Method(id=33,name="sentiment analysis",
                      description="'Subjectivity and sentiment analysis focuses on the automatic identification of private states, such as opinions, emotions, sentiments, evaluations, beliefs, and speculations in natural language. While subjectivity classification labels text as either subjective or objective, sentiment classification adds an additional level of granularity, by further classifying subjective text as either positive, negative, or neutral.' (<<Ignatow_etal2017>> pp. 148)",
                      parent=method3)
     db.session.add(sa)
-    method4 = Method(id=41,name="automated narrative, argumentative structures, irony, metaphor detection/extraction",
+    method4 = Method(id=34,name="automated narrative, argumentative structures, irony, metaphor detection/extraction",
                      description="For automated narrative methapor analysis see (<<Ignatow_etal2017>>, 89-106. For argumentative structures(Task: Retrieving sentential arguments for any given controversial topic) <<Stab_etal2018>> .Refer for a current overview <<Cabrio2018>>.",
                      parent=method3)
     db.session.add(method4)
-    method3 = Method(id=42,name="network analysis/modeling",
+    method3 = Method(id=35,name="network analysis/modeling",
                      description="Generate networks out of text/relationships between text.",
                      parent=method2)
     db.session.add(method3)
-    method4 = Method(id=43, name="knowledge graph construction",
+    method4 = Method(id=36, name="knowledge graph construction",
                      description="Modelling entities and their relationships.",
                      parent=method3)
     db.session.add(method4)
-    method2 = Method(id=44,name="data visualization",
+    method2 = Method(id=37,name="data visualization",
                      description="Visualize the mined informations.",
                      parent=method1)
     db.session.add(method2)
-    method3 = Method(id=45,name="word relationships",
+    method3 = Method(id=38,name="word relationships",
                      description="",
                      parent=method2)
     db.session.add(method3)
-    method3 = Method(id=46,name="networks",
+    method3 = Method(id=39,name="networks",
                      description="",
                      parent=method2)
     db.session.add(method3)
-    method3 = Method(id=47,name="geo-referenced",
+    method3 = Method(id=40,name="geo-referenced",
                      description="",
                      parent=method2)
     db.session.add(method3)
-    method3 = Method(id=48,name="dynamic visualizations",
+    method3 = Method(id=41,name="dynamic visualizations",
                      description="Visualizations with user interaction or animations.",
                      parent=method2)
     db.session.add(method3)
-    method1 = Method(id=49,name="research practice",
+    method1 = Method(id=42,name="research practice",
                      description="",
                      parent=method)
     db.session.add(method1)
+    m_automated_data_collection = Method(id=43,
+                     name="automated data collection",
+                     description="""In principal there are multiple possible data sources in a data mining process. A basic distinction in relevance to automated data collection can be drawn between connected devices(internet, intranets) or unconnected devices(sensors, etc.). +
+    Furthermore the server-client-model is the established communication paradigms for connected devices. In order to obtain data either from server or client there exists three different interfaces: log files, apis and user interfaces which constitute the available procedures <<Jünger2018>>.""",
+                     parent=method1)
+    db.session.add(m_automated_data_collection)
+    m_collect_log = Method(id=44,
+                     name="collect log-data",
+                     description="Collect log data which occur during providing the (web-)service or the information processing.",
+                     parent=m_automated_data_collection)
+    db.session.add(m_collect_log)
+    m_parsing = Method(id=45,
+                     name="parsing from api",
+                     description="Parse structured data from via a documented REST-API.",
+                     parent=m_automated_data_collection)
+    db.session.add(m_parsing)
+    m_scraping = Method(id=46,
+                     name="scraping",
+                     description="Automatically parse unstructured or semi-structured data from a normal website (⇒ web-scraping) or service.",
+                     parent=m_automated_data_collection)
+    db.session.add(m_scraping)
+    m_scraping_stat = Method(id=47,
+                     name="scraping (static content)",
+                     description="Automatically parse data from static HTML websites.",
+                     parent=m_scraping)
+    db.session.add(m_scraping_stat)
+    m_scraping_dyn = Method(id=48,
+                     name="scraping (dynamic content)",
+                     description="Automatically parse dynamic content (HTML5/Javascript,) ⇒ sometimes requires mimicking user-interaction.",
+                     parent=m_scraping)
+    db.session.add(m_scraping_dyn)
+    m_crawling = Method(id=49,
+                     name="crawling",
+                     description="Collect websites with an initial set of webpages by following contained links <<Ignatow_etal2017>>.",
+                     parent=m_automated_data_collection)
+    db.session.add(m_crawling)
+
     method2 = Method(id=50,name="digital research design",
                      description="New possibilities in surveys or data aquisition techniques.",
                      parent=method1)
     db.session.add(method2)
-    method3 = Method(id=51,name="ecological momentary assessments (EMA)/Experience Sampling Method (ESM)",
+    m_esm_ema = Method(id=51,name="ecological momentary assessments (EMA)/Experience Sampling Method (ESM)",
                      description="Mostly equivalent. EMA focusses on medical questions or measurements in a natural environment; ESM more on subjective Questions in the real life. Four characteristics: 1) data collection in natural environments 2) Focussing on near events/impressions/actions 3) questions triggered randomly or event-based 4) multiple questions over a certain period of time [Citation after Stone and Shiffmann 1994] (<<Salganik2018>>,109)",
                      parent=method2)
-    db.session.add(method3)
+    db.session.add(m_esm_ema)
     method3 = Method(id=52,name="wiki surveys",
                      description="Guide open-answer questions with user feedback. Refer also (<<Salganik2018>>,111)",
                      parent=method2)
     db.session.add(method3)
-    method3 = Method(id=53,name="survey data linked to big data sources",
+    m_online_experiment = Method(id=53,name="Online experiments",
+                     description="Synchronous or asynchronous online experiments.",
+                     parent=method2)
+    db.session.add(m_online_experiment)
+    method3 = Method(id=54,name="survey data linked to big data sources",
                      description="",
                      parent=method2)
     db.session.add(method3)
-    method4 = Method(id=54,name="enriched asking",
+    method4 = Method(id=55,name="enriched asking",
                      description="'In enriched asking, survey data build context around a big data source that contains some important measurements but lacks others.'(<<Salganik2018>>,118)",
                      parent=method3)
     db.session.add(method4)
-    method4 = Method(id=55,name="amplified asking",
+    method4 = Method(id=56,name="amplified asking",
                      description="'Amplified asking using a predictive model to combine survey data from few people with a big data source from many people.'(<<Salganik2018>>,122)",
                      parent=method3)
     db.session.add(method4)
-    method2 = Method(id=56,name="collaborative work",
+    method2 = Method(id=57,name="collaborative work",
                      description="",
                      parent=method1)
     db.session.add(method2)
-    method3 = Method(id=57,name="open call projects",
+    method3 = Method(id=58,name="open call projects",
                      description="(e.g. annotation).",
                      parent=method2)
     db.session.add(method3)
-    method3 = Method(id=58,name="distributed data collection",
+    method3 = Method(id=59,name="distributed data collection",
                      description="",
                      parent=method2)
     db.session.add(method3)
-    method2 = Method(id=59,name="digital communication",
+    method2 = Method(id=60,name="digital communication",
                      description="",
                      parent=method1)
     db.session.add(method2)
-    method2 = Method(id=60,name="digital data/phenomena as reasearch-objective",
+    method2 = Method(id=61,name="digital data/phenomena as reasearch-objective",
                      description="",
                      parent=method1)
     db.session.add(method2)
-    m_statistical_modeling = Method(id=61,name="statistical modeling",
+    m_statistical_modeling = Method(id=62,name="statistical modeling",
                      description="",
                      parent=method1)
     db.session.add(m_statistical_modeling)
-    m_regression_analysis = Method(id=62,name="regression analysis",
+    m_regression_analysis = Method(id=63,name="regression analysis",
                      description="",
                      parent=m_statistical_modeling)
     db.session.add(m_regression_analysis)
-    m_time_series_analysis = Method(id=63,name="time-series analysis",
+    m_time_series_analysis = Method(id=64,name="time-series analysis",
                      description="",
                      parent=m_statistical_modeling)
     db.session.add(m_time_series_analysis)
-    m_agent_based_modeling = Method(id=64,name="agent-based modeling",
+    m_agent_based_modeling = Method(id=65,name="agent-based modeling",
                      description="",
                      parent=m_statistical_modeling)
     db.session.add(m_agent_based_modeling)
-    m_social_complexity = Method(id=65,name="social complexity modeling/ social simulation",
+    m_social_complexity = Method(id=66,name="social complexity modeling/ social simulation",
                      description="",
                      parent=method1)
     db.session.add(m_social_complexity)
-    m_nowcasting = Method(id=66,name="nowcasting",
+    m_nowcasting = Method(id=67,name="nowcasting",
                      description="Using methods to predict the future for estimation of current values. (Example: predict influenza epidemiology combining CDC Data and Google Trends(<<Salganik2018>>,46–50)).",
                      parent=m_social_complexity)
     db.session.add(m_nowcasting)
@@ -675,6 +679,10 @@ The aim of the LDA algorithm is to model a comprehensive representation of the c
     os_win = Operatingsystem(name="windows")
     os_browser = Operatingsystem(name="browser")
     os_docker = Operatingsystem(name="docker")
+    os_ix = Operatingsystem(name="linux")
+    os_osx = Operatingsystem(name="osx")
+    os_ios = Operatingsystem(name="ios")
+    os_droid = Operatingsystem(name="android")
 
     prol_r = Programminglanguage(name="R")
     prol_py = Programminglanguage(name="Python")
@@ -686,6 +694,7 @@ The aim of the LDA algorithm is to model a comprehensive representation of the c
     prol_js = Programminglanguage(name="Javascript")
     prol_c = Programminglanguage(name="C")
     prol_ruby = Programminglanguage(name="Ruby")
+    prol_perl = Programminglanguage(name="Perl")
 
     cat_tracking = SoftwareCategory(name="user-consented tracking", short_description="Collection of sensor data on (mobile) devices in accordance with data protection laws.")
     cat_scraping = SoftwareCategory(name="scraping", short_description="Tools in the area of web-scraping")
@@ -750,6 +759,11 @@ The aim of the LDA algorithm is to model a comprehensive representation of the c
                     architecture="framework",
                     license=lic_apache2,
                     programminglanguages=[prol_py,prol_java],
+                    languages=[lang_en],
+                    operatingsystems=[os_ix,os_droid,os_ios],
+                    currentversion="",
+                    lastchanged=datetime.datetime.strptime('26022019', '%d%m%Y').date(),
+                    methods=[m_esm_ema],
                     price="0")
     db.session.add(tool)
     db.session.add(Link(software=tool, type="website", url="https://passivedatakit.org/", comment=""))
@@ -757,7 +771,7 @@ The aim of the LDA algorithm is to model a comprehensive representation of the c
     db.session.add(Link(software=tool, type="repository", url="https://github.com/audaciouscode/PassiveDataKit-Android", comment="android"))
     db.session.add(Link(software=tool, type="repository", url="https://github.com/audaciouscode/PassiveDataKit-iOS", comment="iOS"))
 
-    tool = Software(name="Web Historian - Community Edition",
+    tool = Software(name="Web Historian(CE)",
                     short_description="Chrome browser extension designed to integrate web browsing history data collection into research projects collecting other types of data from participants (e.g. surveys, in-depth interviews, experiments). It uses client-side D3 visualizations to inform participants about the data being collected during the informed consent process. It allows participants to delete specific browsing data or opt-out of browsing data collection. It directs participants to an online survey once they have reviewed their data and made a choice of whether to participate. It has been used with Qualtrics surveys, but any survey that accepts data from a URL will work. It works with the open source Passive Data Kit (PDK) as the backend for data collection. To successfully upload, you need to fill in the address of your PDK server in the js/app/config.js file.",
                     developer="Ericka Menchen-Trevino and Chris Karr",
                     maintainer="Ericka Menchen-Trevino and Chris Karr",
@@ -765,7 +779,12 @@ The aim of the LDA algorithm is to model a comprehensive representation of the c
                     architecture="plugin",
                     license=lic_gpl3,
                     programminglanguages=[prol_js],
-                    price="free",
+                    languages=[lang_en],
+                    operatingsystems=[os_browser],
+                    currentversion="e06b3e174f9668f5c62f30a9bedde223023e0bca",
+                    lastchanged=datetime.datetime.strptime('18022019', '%d%m%Y').date(),
+                    methods=[m_esm_ema],
+                    price="0",
                     recommandedcitation="Menchen-Trevino, E., & Karr, C. (2018). Web Historian - Community Edition. Zenodo. https://doi.org/10.5281/zenodo.1322782")
     db.session.add(tool)
     db.session.add(Link(software=tool, type="website", url="https://doi.org/10.5281/zenodo.1322782", comment="doi"))
@@ -870,7 +889,12 @@ The aim of the LDA algorithm is to model a comprehensive representation of the c
                     softwarecategory=cat_int,
                     architecture="framework",
                     license=lic_gpl3,
-                    programminglanguages=[prol_c],
+                    programminglanguages=[prol_c,prol_perl],
+                    languages=[lang_en],
+                    operatingsystems=[os_win,os_ix],
+                    currentversion="3.4.15",
+                    lastchanged=datetime.datetime.strptime('01032019', '%d%m%Y').date(),
+                    methods=[m_search_query,m_indexing,m_frequency_analysis],
                     price="0")
     db.session.add(tool)
     db.session.add(Link(software=tool, type="website", url="http://cwb.sourceforge.net/index.php", comment=""))
@@ -902,7 +926,7 @@ The aim of the LDA algorithm is to model a comprehensive representation of the c
                     languages=[lang_de],
                     operatingsystems=[os_browser,os_docker],
                     currentversion="0.96",
-                    lastchanged=datetime.datetime.strptime('05032018', '%d%m%Y').date(),
+                    lastchanged=datetime.datetime.strptime('05032019', '%d%m%Y').date(),
                     methods=[m_text_preprocessing, m_pos_tagging, m_syntactic_parsing, m_context_volatility, m_co_occurence, m_w_dict_frequencies, lda],
                     price="0",
                     recommandedcitation="Niekler, A., Bleier, A., Kahmann, C., Posch, L., Wiedemann, G., Erdogan, K., Heyer, G., & Strohmaier, M. (2018). iLCM - A Virtual Research Infrastructure for Large-Scale Qualitative Data. CoRR, abs/1805.11404.")
@@ -1534,7 +1558,13 @@ The aim of the LDA algorithm is to model a comprehensive representation of the c
                     architecture="package",
                     license=lic_mit,
                     programminglanguages=[prol_js],
-                    price="0")
+                    languages=[lang_en],
+                    operatingsystems=[os_browser],
+                    currentversion="4.2.1",
+                    lastchanged=datetime.datetime.strptime('10122018', '%d%m%Y').date(),
+                    methods=[m_online_experiment],
+                    price="0",
+                    recommandedcitation='Balietti (2017) "nodeGame: Real-Time, Synchronous, Online Experiments in the Browser" Behavior Research Methods Volume 49, Issue 5, pp. 1696-1715.')
     db.session.add(tool)
     db.session.add(Link(software=tool, type="website", url="https://nodegame.org/", comment=""))
     db.session.add(Link(software=tool, type="repository", url="https://github.com/nodeGame", comment=""))