diff --git a/app.py b/app.py index 0f96f7728a58edb9f3bf830c99c44d10bcdea65f..ea4f78ad775b68a317a04f721b42dda5f8fd67a7 100644 --- a/app.py +++ b/app.py @@ -1,5 +1,6 @@ import os,sys import os.path as op +import datetime import zipfile import io import pathlib @@ -55,7 +56,15 @@ software_languages_table = db.Table('software_languages', db.Model.metadata, # Create N2one table software_programminglanguages_table = db.Table('software_programminglanguages', db.Model.metadata, db.Column('software_id', db.Integer, db.ForeignKey('software.id')), - db.Column('programming_id', db.Integer, db.ForeignKey('programminglanguage.id'))) + db.Column('programminglanguage_id', db.Integer, db.ForeignKey('programminglanguage.id'))) +# Create N2one table +software_operatingsystems_table = db.Table('software_operatingsystems', db.Model.metadata, + db.Column('software_id', db.Integer, db.ForeignKey('software.id')), + db.Column('operatingsystem_id', db.Integer, db.ForeignKey('operatingsystem.id'))) +# Create M2M table +software_methods_table = db.Table('software_methods', db.Model.metadata, + db.Column('software_id', db.Integer, db.ForeignKey('software.id')), + db.Column('method_id', db.Integer, db.ForeignKey('method.id'))) class License(db.Model): id = db.Column(db.Integer, primary_key=True) @@ -79,7 +88,14 @@ class Feature(db.Model): class Language(db.Model): id = db.Column(db.Integer, primary_key=True) - name = db.Column(db.Unicode(64)) + name = db.Column(db.Enum('german', 'english', 'french', 'spanish', 'chinese', 'russian', 'other', name='languages')) + + def __str__(self): + return "{}".format(self.name) + +class Operatingsystem(db.Model): + id = db.Column(db.Integer, primary_key=True) + name = db.Column(db.Enum('windows', 'linux', 'osx', 'ios', 'android', 'browser', 'docker', name='languages'), unique=True) def __str__(self): return "{}".format(self.name) @@ -105,6 +121,14 @@ class Method(db.Model): def __str__(self): return "{}".format(self.name) +class Othercollections(db.Model): + id = db.Column(db.Integer, primary_key=True) + name = db.Column(db.String(120)) + url = db.Column(db.Text) + + def __str__(self): + return "{}".format(self.name) + class SoftwareCategory(db.Model): id = db.Column(db.Integer, primary_key=True) @@ -122,6 +146,7 @@ class Software(db.Model): developer = db.Column(db.String(120)) maintainer = db.Column(db.String(120)) lastchanged = db.Column(db.Date) + currentversion = db.Column(db.String(64)) price = db.Column(db.Integer()) modelprice = type = db.Column(db.Enum('yearly', 'once', name='modelprice')) @@ -140,6 +165,10 @@ class Software(db.Model): architecture = db.Column(db.Enum('standalone', 'package', 'framework', 'app', 'SaaS', 'plugin', 'other', name='software_types')) + operatingsystems = db.relationship('Operatingsystem', secondary=software_operatingsystems_table) + + methods = db.relationship('Method', secondary=software_methods_table) + recommandedcitation = db.Column(db.String(120)) def __str__(self): @@ -266,13 +295,11 @@ admin = admin.Admin(app, name='digital methods:software-tools', template_mode='b # Add views admin.add_view(AdvancedSoftwareView(Software, db.session, name="software-tools")) admin.add_view(sqla.ModelView(Method, db.session, name="methods")) +admin.add_view(sqla.ModelView(Othercollections, db.session, name="other collections")) admin.add_view(sqla.ModelView(Feature, db.session, name="software-features", category="miscellaneous")) admin.add_view(sqla.ModelView(License, db.session, name="licenses", category="miscellaneous")) admin.add_view(sqla.ModelView(Link, db.session, name="links", category="miscellaneous")) admin.add_view(sqla.ModelView(SoftwareCategory, db.session, name="software categories", category="miscellaneous")) -admin.add_sub_category(name="other collections", parent_name="miscellaneous") -admin.add_link(MenuLink(name="CRAN-R", url='https://cran.r-project.org/web/views/', category='other collections', target="_blank")) -admin.add_link(MenuLink(name="ROpenSci", url='https://ropensci.org/packages/', category='other collections', target="_blank")) admin.add_link(MenuLink(name="wiki-export", url='/wiki-export', category="miscellaneous")) @@ -285,6 +312,16 @@ def build_sample_db(): db.drop_all() db.create_all() + collection = Othercollections(name="CRAN-R", + url="https://cran.r-project.org/web/views/") + db.session.add(collection) + collection = Othercollections(name="ROpenSci", + url="https://ropensci.org/packages/") + db.session.add(collection) + collection = Othercollections(name="Text Visualization Browser", + url="http://textvis.lnu.se/") + db.session.add(collection) + method = Method(id=1, name="digital methods", description="""<<Rogers2013>> distinguishes between digitalized/virtual and digital methods. The former methods import standard methods from the social sciences and humanities into the emerging medium. The latter are completly new methods which emerge following the new structures and their properties. + @@ -348,57 +385,57 @@ Furthermore the server-client-model is the established communication paradigms f description="Transfer between different formats in order to unify and handle vacancies.", parent=method2) db.session.add(method3) - method2 = Method(id=13, + m_text_preprocessing = Method(id=13, name="text preprocessing", description="Some text preprocessing tasks in natuaral language processing.", parent=method1) - db.session.add(method2) - method3 = Method(id=14, + db.session.add(m_text_preprocessing) + m_tokenization = Method(id=14, name="tokenization", description="Identify words in character input sequence.", - parent=method2) - db.session.add(method3) - method3 = Method(id=15, + parent=m_text_preprocessing) + db.session.add(m_tokenization) + m_stopword_removal = Method(id=15, name="stop-word removal", description="Removing high-frequency words like pronoums, determiners or prepositions.", - parent=method2) - db.session.add(method3) - method3 = Method(id=16, + parent=m_text_preprocessing) + db.session.add(m_stopword_removal) + m_stemming = Method(id=16, name="stemming", description="Identify common stems on a syntactical level.", - parent=method2) - db.session.add(method3) - method3 = Method(id=17, + parent=m_text_preprocessing) + db.session.add(m_stemming) + m_word_sentence_segmentation = Method(id=17, name="word/sentence segmentation", description="Separate a chunk of continuous text into separate words/sentences.", - parent=method2) - db.session.add(method3) - method3 = Method(id=18, + parent=m_text_preprocessing) + db.session.add(m_word_sentence_segmentation) + m_pos_tagging = Method(id=18, name="part-of-speech(POS)-tagging", description="Identify the part of speech for words.", - parent=method2) - db.session.add(method3) - method3 = Method(id=19, + parent=m_text_preprocessing) + db.session.add(m_pos_tagging) + m_dependency_parsing = Method(id=19, name="dependency parsing", description="Create corresponding syntactic, semantic or morphologic trees from input text.", - parent=method2) - db.session.add(method3) - method4 = Method(id=20, + parent=m_text_preprocessing) + db.session.add(m_dependency_parsing) + m_syntactic_parsing = Method(id=20, name="syntactic parsing", description="Create syntactic trees from input text using mostly unsupervised learning on manually annotated treebanks (<<Ignatow_etal2017>>,61).", - parent=method3) - db.session.add(method4) - method3 = Method(id=21, + parent=m_dependency_parsing) + db.session.add(m_syntactic_parsing) + m_word_sense_disambiguation = Method(id=21, name="word-sense disambiguation", description="Recognizing context-sensetive meaning of words.", - parent=method2) - db.session.add(method3) + parent=m_text_preprocessing) + db.session.add(m_word_sense_disambiguation) method2 = Method(id=22, name="information extraction", description="Extract factual information(e.g. people, places or situations) in free text.", parent=method1) db.session.add(method3) method3 = Method(id=23, - name="(named-)entity-recognition/resolution/extraction", + name="(named-)entity-recognition/resolution/extraction/tagging", description="Identify instances of specific (pre-)defined types(e.g place, name or color) in text.", parent=method2) db.session.add(method3) @@ -427,26 +464,26 @@ Furthermore the server-client-model is the established communication paradigms f description="Descriptiv statistical analysis by using specific text abundances.", parent=method2) db.session.add(method3) - method4 = Method(id=30,name="word frequencies/dictionary analysis", + m_w_dict_frequencies = Method(id=30,name="word frequencies/dictionary analysis", description="Analyse statistical significant occurence of words/word-groups. Can also be combined with meta-data (e.g. creation time of document).", parent=method3) - db.session.add(method4) - method4 = Method(id=31,name="co-occurence analysis", + db.session.add(m_w_dict_frequencies) + m_co_occurence = Method(id=31,name="co-occurence analysis", description="Analyse statistical significant co-occurence of words in different contextual units.", parent=method3) - db.session.add(method4) - method4 = Method(id=32,name="context volatility", + db.session.add(m_co_occurence) + m_context_volatility = Method(id=32,name="context volatility", description="'Analyse contextual change for certain words over a period of time.'(<<Niekler_etal2018>>,1316)", parent=method3) - db.session.add(method4) + db.session.add(m_context_volatility) method3 = Method(id=33,name="classification/machine learning", description="Various techniques to (semi-)automatically identify specific classes. ", parent=method2) db.session.add(method3) - method4 = Method(id=34,name="supervised classification", + m_supervised_classification = Method(id=34,name="supervised classification", description="Use given training examples in order to classify certain entities.", parent=method3) - db.session.add(method4) + db.session.add(m_supervised_classification) method4 = Method(id=35,name="latent semantic analysis", description="'The basic idea of latent semantic analysis (LSA) is, that text do have a higher order (=latent semantic) structure which, however, is obscured by word usage (e.g. through the use of synonyms or polysemy). By using conceptual indices that are derived statistically via a truncated singular value decomposition (a two-mode factor analysis) over a given document-term matrix, this variability problem can be overcome.'(link:https://cran.r-project.org/web/packages/lsa/lsa.pdf[CRAN-R])", parent=method3) @@ -455,23 +492,23 @@ Furthermore the server-client-model is the established communication paradigms f description="Probabilistic models to infer semantic clusters. See especially <<Papilloud_etal2018>>.", parent=method3) db.session.add(method4) - method5 = Method(id=37,name="latent dirichlet allocation", + lda = Method(id=37,name="latent dirichlet allocation", description="""'The application of LDA is based on three nested concepts: the text collection to be modelled is referred to as the corpus; one item within the corpus is a document, with words within a document called terms.(...) + The aim of the LDA algorithm is to model a comprehensive representation of the corpus by inferring latent content variables, called topics. Regarding the level of analysis, topics are heuristically located on an intermediate level between the corpus and the documents and can be imagined as content-related categories, or clusters. (...) Since topics are hidden in the first place, no information about them is directly observable in the data. The LDA algorithm solves this problem by inferring topics from recurring patterns of word occurrence in documents.'(<<Maier_etal2018>>,94)""", parent=method4) - db.session.add(method5) - method5 = Method(id=38,name="non-negative-matrix-factorization", + db.session.add(lda) + nmf = Method(id=38,name="non-negative-matrix-factorization", description="Inclusion of non-negative constraint.", parent=method4) - db.session.add(method5) - method5 = Method(id=39,name="structural topic modeling", + db.session.add(nmf) + stm = Method(id=39,name="structural topic modeling", description="Inclusion of meta-data. Refer especially to <<roberts2013>>.", parent=method4) - db.session.add(method5) - method4 = Method(id=40,name="sentiment analysis", + db.session.add(stm) + sa = Method(id=40,name="sentiment analysis", description="'Subjectivity and sentiment analysis focuses on the automatic identification of private states, such as opinions, emotions, sentiments, evaluations, beliefs, and speculations in natural language. While subjectivity classification labels text as either subjective or objective, sentiment classification adds an additional level of granularity, by further classifying subjective text as either positive, negative, or neutral.' (<<Ignatow_etal2017>> pp. 148)", parent=method3) - db.session.add(method4) + db.session.add(sa) method4 = Method(id=41,name="automated narrative, argumentative structures, irony, metaphor detection/extraction", description="For automated narrative methapor analysis see (<<Ignatow_etal2017>>, 89-106. For argumentative structures(Task: Retrieving sentential arguments for any given controversial topic) <<Stab_etal2018>> .Refer for a current overview <<Cabrio2018>>.", parent=method3) @@ -552,30 +589,30 @@ The aim of the LDA algorithm is to model a comprehensive representation of the c description="", parent=method1) db.session.add(method2) - method1 = Method(id=61,name="statistical modeling", - description="", - parent=method) - db.session.add(method1) - method2 = Method(id=62,name="regression analysis", + m_statistical_modeling = Method(id=61,name="statistical modeling", description="", parent=method1) - db.session.add(method2) - method2 = Method(id=63,name="time-series analysis", + db.session.add(m_statistical_modeling) + m_regression_analysis = Method(id=62,name="regression analysis", description="", - parent=method1) - db.session.add(method2) - method2 = Method(id=64,name="agent-based modeling", + parent=m_statistical_modeling) + db.session.add(m_regression_analysis) + m_time_series_analysis = Method(id=63,name="time-series analysis", description="", - parent=method1) - db.session.add(method2) - method1 = Method(id=65,name="social complexity modeling/ social simulation", + parent=m_statistical_modeling) + db.session.add(m_time_series_analysis) + m_agent_based_modeling = Method(id=64,name="agent-based modeling", + description="", + parent=m_statistical_modeling) + db.session.add(m_agent_based_modeling) + m_social_complexity = Method(id=65,name="social complexity modeling/ social simulation", description="", - parent=method) - db.session.add(method1) - method2 = Method(id=66,name="nowcasting", - description="Using methods to predict the future for estimation of current values. (Example: predict influenza epidemiology combining CDC Data and Google Trends(<<Salganik2018>>,46–50)).", parent=method1) - db.session.add(method2) + db.session.add(m_social_complexity) + m_nowcasting = Method(id=66,name="nowcasting", + description="Using methods to predict the future for estimation of current values. (Example: predict influenza epidemiology combining CDC Data and Google Trends(<<Salganik2018>>,46–50)).", + parent=m_social_complexity) + db.session.add(m_nowcasting) reference = Reference(name="Rogers2013", cited="Rogers, R. (2013). Digital methods. Cambridge, Massachusetts, London, England: The MIT Press.") @@ -632,6 +669,13 @@ The aim of the LDA algorithm is to model a comprehensive representation of the c lic_ccdl = License(name="CCDL", version="1.0") lic_prop = License(name="Proprietary") + lang_de = Language(name="german") + lang_en = Language(name="english") + + os_win = Operatingsystem(name="windows") + os_browser = Operatingsystem(name="browser") + os_docker = Operatingsystem(name="docker") + prol_r = Programminglanguage(name="R") prol_py = Programminglanguage(name="Python") prol_cy = Programminglanguage(name="Cython") @@ -645,9 +689,9 @@ The aim of the LDA algorithm is to model a comprehensive representation of the c cat_tracking = SoftwareCategory(name="user-consented tracking", short_description="Collection of sensor data on (mobile) devices in accordance with data protection laws.") cat_scraping = SoftwareCategory(name="scraping", short_description="Tools in the area of web-scraping") - cat_int = SoftwareCategory(name="tools for corpus linguistics", short_description="Integrated platforms for corpus analysis and processing.") + cat_int = SoftwareCategory(name="tools for corpus linguistics/text mining/(semi-)automated text analysis", short_description="Integrated platforms for corpus analysis and processing.") cat_qda = SoftwareCategory(name="computer assisted/aided qualitative data analysis software (CAQDAS)", short_description="assist with qualitative research such as transcription analysis, coding and text interpretation, recursive abstraction, content analysis, discourse analysis, grounded theory methodology, etc.") - cat_tm = SoftwareCategory(name="text mining/natuaral language processing(NLP)", short_description="") + cat_tm = SoftwareCategory(name="natuaral language processing(NLP)", short_description="") cat_senti = SoftwareCategory(name="sentiment analysis", short_description="") cat_topic = SoftwareCategory(name="topic-models", short_description="") cat_visu = SoftwareCategory(name="visualization", short_description="") @@ -662,8 +706,9 @@ The aim of the LDA algorithm is to model a comprehensive representation of the c cat_search = SoftwareCategory(name="search", short_description="information retrieval in large datasets.") cat_ocr = SoftwareCategory(name="optical character recognition (OCR)",short_description="OCR is the mechanical or electronic conversion of images of typed, handwritten or printed text into machine-encoded text.") cat_oe = SoftwareCategory(name="online experiments", short_description="") - cat_agent = SoftwareCategory(name="Agent-based modeling", short_description="") - cat_jour = SoftwareCategory(name="Investigative Journalism", short_description="") + cat_agent = SoftwareCategory(name="agent-based modeling", short_description="") + cat_jour = SoftwareCategory(name="investigative journalism", short_description="") + cat_eye = SoftwareCategory(name="(remote) eye tracking") cat_misc = SoftwareCategory(name="miscellaneous", short_description="") tool = Software(name="AWARE", @@ -720,14 +765,42 @@ The aim of the LDA algorithm is to model a comprehensive representation of the c architecture="plugin", license=lic_gpl3, programminglanguages=[prol_js], - price="0", + price="free", recommandedcitation="Menchen-Trevino, E., & Karr, C. (2018). Web Historian - Community Edition. Zenodo. https://doi.org/10.5281/zenodo.1322782") db.session.add(tool) db.session.add(Link(software=tool, type="website", url="https://doi.org/10.5281/zenodo.1322782", comment="doi")) db.session.add(Link(software=tool, type="website", url="http://www.webhistorian.org", comment="")) db.session.add(Link(software=tool, type="repository", url="https://github.com/WebHistorian/community", comment="")) + tool = Software(name="TWINT", + short_description="TWINT (Twitter Intelligence Tool) 'Formerly known as Tweep, Twint is an advanced Twitter scraping tool written in Python that allows for scraping Tweets from Twitter profiles without using Twitter's API.' link:https://github.com/twintproject/twint[Retrieved 07.03.2019]", + developer="Cody Zacharias", + maintainer="Cody Zacharias", + softwarecategory=cat_scraping, + architecture="package", + license=lic_mit, + programminglanguages=[prol_py], + price="0") + db.session.add(tool) + db.session.add(Link(software=tool, type="website", url="https://twint.io/", comment="")) + db.session.add(Link(software=tool, type="repository", url="https://github.com/twintproject/twint", comment="")) + + tool = Software(name="YouTubeComments", + short_description="'This repository contains an R script as well as an interactive Jupyter notebook to demonstrate how to automatically collect, format, and explore YouTube comments, including the emojis they contain. The script and notebook showcase the following steps: Getting access to the YouTube API Extracting comments for a video Formatting the comments & extracting emojis Basic sentiment analysis for text & emojis' link:https://github.com/JuKo007/YouTubeComments[Retrieved 07.03.2019]", + developer="Kohne, J., Breuer, J., & Mohseni, M. R.", + maintainer="Kohne, J., Breuer, J., & Mohseni, M. R.", + softwarecategory=cat_scraping, + architecture="package", + license=lic_unknown, + programminglanguages=[prol_r], + price="0", + recommandedcitation="Kohne, J., Breuer, J., & Mohseni, M. R. (2018). Automatic Sampling and Analysis of YouTube Comments. doi:10.17605/OSF.IO/HQSXE") + db.session.add(tool) + db.session.add(Link(software=tool, type="website", url="https://osf.io/hqsxe/", comment="")) + db.session.add(Link(software=tool, type="repository", url="https://github.com/JuKo007/YouTubeComments", comment="")) + tool = Software(name="facepager", + short_description="", developer="Jakob Jünger and Till Keyling", maintainer="Jakob Jünger", softwarecategory=cat_scraping, @@ -740,6 +813,7 @@ The aim of the LDA algorithm is to model a comprehensive representation of the c db.session.add(Link(software=tool, type="repository", url="https://github.com/strohne/Facepager", comment="")) tool = Software(name="Scrapy", + short_description="", developer="", maintainer="", softwarecategory=cat_scraping, @@ -752,6 +826,7 @@ The aim of the LDA algorithm is to model a comprehensive representation of the c db.session.add(Link(software=tool, type="repository", url="https://github.com/scrapy/scrapy", comment="")) tool = Software(name="RSelenium", + short_description="", developer="John Harrison", maintainer="Ju Yeong Kim", softwarecategory=cat_scraping, @@ -805,26 +880,35 @@ The aim of the LDA algorithm is to model a comprehensive representation of the c tool = Software(name="LCM", short_description="Leipzig Corpus Miner a decentralized SaaS application for the analysis of very large amounts of news texts ", developer="Gregor Wiedeman, Andreas Niekler", - maintainer="", + maintainer="Gregor Wiedeman, Andreas Niekler", softwarecategory=cat_int, architecture="framework", license=lic_lgpl, programminglanguages=[prol_java,prol_r], price="0") + + db.session.add(tool) db.session.add(Link(software=tool, type="website", url="http://lcm.informatik.uni-leipzig.de/generic.html", comment="")) tool = Software(name="iLCM", - short_description="The iLCM(LCM=Leipzig Corpus Miner) project pursues the development of an integrated research environment for the analysis of structured and unstructured data in a ‘Software as a Service’ architecture (SaaS). The research environment addresses requirements for the quantitative evaluation of large amounts of qualitative data using text mining methods and requirements for the reproducibility of data-driven research designs in the social sciences.", - developer="Gregor Wiedeman, Andreas Niekler", - maintainer="", + short_description="'The iLCM[LCM=Leipzig Corpus Miner] project pursues the development of an integrated research environment for the analysis of structured and unstructured data in a ‘Software as a Service’ architecture (SaaS). The research environment addresses requirements for the quantitative evaluation of large amounts of qualitative data using text mining methods and requirements for the reproducibility of data-driven research designs in the social sciences.' link:http://ilcm.informatik.uni-leipzig.de/ilcm/ilcm/[source, retrieved 08.03.2019]", + developer=" Andreas Niekler, Arnim Bleier, Christian Kahmann, Lisa Posch, Gregor Wiedemann, Kenan Erdogan, Gerhard Heyer and Markus Strohmaier", + maintainer="Andreas Niekler, Arnim Bleier, Christian Kahmann, Lisa Posch, Gregor Wiedemann, Kenan Erdogan, Gerhard Heyer and Markus Strohmaier", architecture="SaaS", softwarecategory=cat_int, license=lic_lgpl, programminglanguages=[prol_java,prol_py,prol_r], - price="0") + languages=[lang_de], + operatingsystems=[os_browser,os_docker], + currentversion="0.96", + lastchanged=datetime.datetime.strptime('05032018', '%d%m%Y').date(), + methods=[m_text_preprocessing, m_pos_tagging, m_syntactic_parsing, m_context_volatility, m_co_occurence, m_w_dict_frequencies, lda], + price="0", + recommandedcitation="Niekler, A., Bleier, A., Kahmann, C., Posch, L., Wiedemann, G., Erdogan, K., Heyer, G., & Strohmaier, M. (2018). iLCM - A Virtual Research Infrastructure for Large-Scale Qualitative Data. CoRR, abs/1805.11404.") db.session.add(tool) db.session.add(Link(software=tool, type="website", url="https://ilcm.informatik.uni-leipzig.de/", comment="")) + db.session.add(Link(software=tool, type="repository", url="https://hub.docker.com/r/ckahmann/ilcm_r/tags", comment="docker")) tool = Software(name="ATLAS.ti", short_description="", @@ -1430,6 +1514,18 @@ The aim of the LDA algorithm is to model a comprehensive representation of the c db.session.add(tool) db.session.add(Link(software=tool, type="repository", url="https://github.com/tesseract-ocr/tesseract", comment="")) + tool = Software(name="LIONESS", + short_description="'LIONESS Lab is a free web-based platform for online interactive experiments. It allows you to develop, test and conduct decision-making experiments with live feedback between participants. LIONESS experiments include a standardized set of methods to deal with the set of challenges arising when conducting interactive experiments online. These methods reflect current ‘best practices’ for, e.g., preventing participants to enter a session more than once, facilitating on-the-fly formation of interaction groups, reducing waiting times for participants, driving down attrition by retaining attention of online participants and, importantly, adequate handling of cases in which participants drop out.With LIONESS Lab you can readily develop and test your experiments online in a user-friendly environment. You can develop experiments from scratch in a point-and-click fashion or start from an existent design from our growing repository and adjust it according your own requirements.' link:https://lioness-lab.org/faq/[Retrieved 07.03.2019]", + developer="", + maintainer="", + softwarecategory=cat_oe, + architecture="package", + license=lic_prop, + programminglanguages=[prol_js], + price="0") + db.session.add(tool) + db.session.add(Link(software=tool, type="website", url="https://lioness-lab.org/", comment="")) + tool = Software(name="nodeGame", short_description="'NodeGame is a free, open source JavaScript/HTML5 framework for conducting synchronous experiments online and in the lab directly in the browser window. It is specifically designed to support behavioral research along three dimensions: larger group sizes, real-time (but also discrete time) experiments, batches of simultaneous experiments.'", developer="Stefan Balietti", @@ -1437,22 +1533,53 @@ The aim of the LDA algorithm is to model a comprehensive representation of the c softwarecategory=cat_oe, architecture="package", license=lic_mit, - programminglanguages=[prol_js]) + programminglanguages=[prol_js], + price="0") db.session.add(tool) db.session.add(Link(software=tool, type="website", url="https://nodegame.org/", comment="")) db.session.add(Link(software=tool, type="repository", url="https://github.com/nodeGame", comment="")) - tool = Software(name="scikit-learn", - short_description="'Scikit-learn is a free software machine learning library for the Python programming language. It features various classification, regression and clustering algorithms including support vector machines, random forests, gradient boosting, k-means and DBSCAN, and is designed to interoperate with the Python numerical and scientific libraries NumPy and SciPy.'", - developer="Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.", - maintainer="Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.", - softwarecategory=cat_misc, + tool = Software(name="Breadboard", + short_description="'Breadboard is a software platform for developing and conducting human interaction experiments on networks. It allows researchers to rapidly design experiments using a flexible domain-specific language and provides researchers with immediate access to a diverse pool of online participants.' link:http://breadboard.yale.edu/[Retrieved: 07.03.2019]", + developer="McKnight, Mark E., and Nicholas A. Christakis", + maintainer="McKnight, Mark E., and Nicholas A. Christakis", + softwarecategory=cat_oe, architecture="package", - license=lic_bsd, - programminglanguages=[prol_py]) + license=lic_unknown, + programminglanguages=[prol_js], + price="0", + recommandedcitation="McKnight, Mark E., and Nicholas A. Christakis. Breadboard. Computer software. Breadboard: Software for Online Social Experiments. Vers. 2. Yale University, 1 May 2016. Web.") db.session.add(tool) - db.session.add(Link(software=tool, type="website", url="https://scikit-learn.org/stable/index.html", comment="")) - db.session.add(Link(software=tool, type="repository", url="https://github.com/scikit-learn/scikit-learn", comment="")) + db.session.add(Link(software=tool, type="website", url="http://breadboard.yale.edu/", comment="")) + db.session.add(Link(software=tool, type="repository", url="https://github.com/human-nature-lab/breadboard", comment="")) + + tool = Software(name="Empirica(beta)", + short_description="'Open source project to tackle the problem of long development cycles required to produce software to conduct multi-participant and real-time human experiments online.' link:https://github.com/empiricaly/meteor-empirica-core[Retrieved: 07.03.2019]", + developer="Nicolas Paton, & Abdullah Almaatouq", + maintainer="Nicolas Paton, & Abdullah Almaatouq", + softwarecategory=cat_oe, + architecture="package", + license=lic_mit, + programminglanguages=[prol_js], + price="0", + recommandedcitation="Nicolas Paton, & Abdullah Almaatouq. (2018, November 15). Empirica: Open-Source, Real-Time, Synchronous, Virtual Lab Framework (Version v0.0.5). Zenodo. http://doi.org/10.5281/zenodo.1488413") + db.session.add(tool) + db.session.add(Link(software=tool, type="website", url="https://empirica.ly/", comment="")) + db.session.add(Link(software=tool, type="repository", url="https://github.com/empiricaly/meteor-empirica-core", comment="")) + + tool = Software(name="SearchGazer", + short_description="SearchGazer: Webcam Eye Tracking for Remote Studies of Web Search", + developer="Alexandra Papoutsaki and James Laskey and Jeff Huang", + maintainer="Alexandra Papoutsaki and James Laskey and Jeff Huang", + softwarecategory=cat_eye, + architecture="package", + license=lic_mit, + programminglanguages=[prol_js], + price="0", + recommandedcitation="@inproceedings{papoutsaki2017searchgazer, author = {Alexandra Papoutsaki and James Laskey and Jeff Huang}, title = {SearchGazer: Webcam Eye Tracking for Remote Studies of Web Search}, booktitle = {Proceedings of the ACM SIGIR Conference on Human Information Interaction \& Retrieval (CHIIR)}, year = {2017}, organization={ACM}} ") + db.session.add(tool) + db.session.add(Link(software=tool, type="website", url="https://nodegame.org/", comment="")) + db.session.add(Link(software=tool, type="repository", url="https://github.com/nodeGame", comment="")) tool = Software(name="NetLogo", short_description="'NetLogo is a multi-agent programmable modeling environment. It is used by many tens of thousands of students, teachers and researchers worldwide. It also powers HubNet participatory simulations.'", @@ -1502,6 +1629,17 @@ The aim of the LDA algorithm is to model a comprehensive representation of the c db.session.add(Link(software=tool, type="website", url="http://spades.predictiveecology.org/", comment="")) db.session.add(Link(software=tool, type="repository", url="", comment="")) + tool = Software(name="scikit-learn", + short_description="'Scikit-learn is a free software machine learning library for the Python programming language. It features various classification, regression and clustering algorithms including support vector machines, random forests, gradient boosting, k-means and DBSCAN, and is designed to interoperate with the Python numerical and scientific libraries NumPy and SciPy.'", + developer="Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.", + maintainer="Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.", + softwarecategory=cat_misc, + architecture="package", + license=lic_bsd, + programminglanguages=[prol_py]) + db.session.add(tool) + db.session.add(Link(software=tool, type="website", url="https://scikit-learn.org/stable/index.html", comment="")) + db.session.add(Link(software=tool, type="repository", url="https://github.com/scikit-learn/scikit-learn", comment="")) ''' tool = Software(name="", diff --git a/templates/export/software.jinja2 b/templates/export/software.jinja2 index e5c84a2c0016550ef9a01973dcf6aa733cc7dec9..a7b7fee64aa5f69113473a03f5c0ebfb7ee73b71 100644 --- a/templates/export/software.jinja2 +++ b/templates/export/software.jinja2 @@ -1,15 +1,23 @@ -.Basisdaten +.general data |=== -| Name | {{ software.name }} -| Kurzbeschreibung | {{software.short_description}} -| Entwickler | {{ software.developer }} -| Maintainer | {{ software.maintainer }} -| Aktuelle Version | #version -| Letzte Änderung | #lastrelease -| Programmiersprachen | {% for prol in software.programminglanguages %}{{prol.name}} {% endfor %} -| Betriebssystem | #os -| Lizenz | {{ software.license }} -| Sprache | #language -| Architektur | {{software.architecture}} -| Links | {% for link in software.links %}link:{{link.url}}[{{link.type}}{{'-'+link.comment if link.comment else '' }}] {% endfor %} +| name | {{ software.name }} +| short description | {{ software.short_description }} +| software category | {{ software.softwarecategory }} +| developer | {{ software.developer }} +| maintainer | {{ software.maintainer }} +| current version | {{ software.currentversion }} +| last changed | {{ software.lastchanged}} +| programming lanuage(s) | {{ software.programminglanguages |map(attribute='name')|join(', ') }} +| operating system(s)| {{ software.operatingsystems |map(attribute='name')|join(', ') }} +| license | {{ software.license }} +| costs | {{ software.price }} +| language | {{ software.languages |map(attribute='name')|join(', ') }} +| architecture | {{software.architecture}} +| web-links | {% for link in software.links %}link:{{link.url}}[{{link.type}}{{'-'+link.comment if link.comment else '' }}], {% endfor %} +|=== + +.features +|=== +| supported methods | {% for method in software.methods %}link:MethodsList#{{ method.name.replace(' ', '-').replace('(', '-').replace(')', '-').replace('/', '-').replace('--', '-').strip('-') }}[<{{method.name}}>] {% endfor %} +| additional features | {{ software.features |map(attribute='name')|join(', ') }} |=== \ No newline at end of file diff --git a/templates/export/softwares.jinja2 b/templates/export/softwares.jinja2 index 540a9183e397b174d37c38538f2c60e376d99726..74d5f01d44118a5eae3bc64cf6d3e01086ff169e 100644 --- a/templates/export/softwares.jinja2 +++ b/templates/export/softwares.jinja2 @@ -8,6 +8,6 @@ == {{ softwarecategory[0].name }} _{{softwarecategory[0].short_description}}_ {% for software in softwarecategory[1] %} -link:Tool_{{ software.name.replace(' ', '').replace('/', '') }}[{{ software.name }}] ({% for link in software.links %}link:{{link.url}}[{{link.type}}{{'-'+link.comment if link.comment else '' }}] {% endfor %}):: {{software.short_description}} < {{software.license}} | {{software.architecture}} | {% for prol in software.programminglanguages %}{{prol.name}} {% endfor %} > +link:Tool_{{ software.name.replace(' ', '').replace('/', '') }}[{{ software.name }}] ({% for link in software.links %}link:{{link.url}}[{{link.type}}{{'-'+link.comment if link.comment else '' }}] {% endfor %}):: {{software.short_description}} < {% if software.currentversion %}{{software.currentversion}}{% endif %} | {{software.license}} | {{software.architecture}} | {{ software.programminglanguages |map(attribute='name')|join(', ') }} | {{ software.languages |map(attribute='name')|join(', ') }}> {% endfor %} {% endfor %} \ No newline at end of file