From 7fec024bbaf88aa561a0f1b072f27f29a9e7c12a Mon Sep 17 00:00:00 2001
From: "Hartung, Michael" <michael.hartung@uni-hamburg.de>
Date: Fri, 21 Apr 2023 23:01:45 +0200
Subject: [PATCH] linting files

Former-commit-id: 37a4fed0b3d91b9f88c014791e05310352601f86 [formerly ef0ba07e927cf0f5487045b978fce35561aec74f]
Former-commit-id: ec1771c6308bcfe26bf2020bc2f9f80440ba5ac6
---
 .gitignore                                    |   2 +
 drugstone/management/includes/DataLoader.py   | 210 +----
 .../management/includes/DataPopulator.py      | 130 +--
 .../management/includes/DatasetLoader.py      | 197 ++---
 drugstone/models.py                           | 200 +++--
 drugstone/serializers.py                      | 106 ++-
 drugstone/settings/settings.py                | 142 ++--
 drugstone/urls.py                             |  74 +-
 drugstone/views.py                            | 760 +++++++++++-------
 setup.cfg                                     |   2 +
 10 files changed, 938 insertions(+), 885 deletions(-)

diff --git a/.gitignore b/.gitignore
index af2b847..9b7a0cc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,3 +15,5 @@ celerybeat-schedule.dir
 docker-django.env.prodnetworks.zip
 data/Networks/
 docker-django.env
+.prettierrc
+.vscode/
diff --git a/drugstone/management/includes/DataLoader.py b/drugstone/management/includes/DataLoader.py
index 958d45a..cd1fbb5 100755
--- a/drugstone/management/includes/DataLoader.py
+++ b/drugstone/management/includes/DataLoader.py
@@ -3,41 +3,41 @@ import json
 
 
 class DataLoader:
-    PATH_PROTEINS = 'data/Proteins/'
+    PATH_PROTEINS = "data/Proteins/"
     # PATH_DRUGS = 'data/Drugs/'
-    PATH_EXPR = 'data/Expression/'
+    PATH_EXPR = "data/Expression/"
     # PATH_DISORDERS = 'data/Disorders/'
-    PATH_PDI = 'data/PDI/'
-    PATH_PPI = 'data/PPI/'
+    PATH_PDI = "data/PDI/"
+    PATH_PPI = "data/PPI/"
     # PATH_PDi = 'data/PDi/'
-    PATH_DDi = 'data/DrDi/'
+    PATH_DDi = "data/DrDi/"
 
     # Proteins
     # PROTEINS_COVEX = 'protein_list.csv'
-    ENTREZ_TO_ENSG = 'entrez_to_ensg.json'
+    ENTREZ_TO_ENSG = "entrez_to_ensg.json"
 
     # Disorders
     # DISORDERS_MONDO = 'disorders.tsv'
-    #Drugs
+    # Drugs
     # DRUG_FILE = 'drug-file.txt'
 
-    #Expressions
-    EXPR_FILE = 'gene_tissue_expression.gct'
+    # Expressions
+    EXPR_FILE = "gene_tissue_expression.gct"
 
     # Protein-Protein-Interactions
-    PPI_APID = 'apid_9606_Q2.txt'
+    PPI_APID = "apid_9606_Q2.txt"
     # PPI_BIOGRID = 'BIOGRID-ORGANISM-Homo_sapiens-3.5.187.mitab.txt'
-    PPI_STRING = 'string_interactions.csv'
+    PPI_STRING = "string_interactions.csv"
     # Protein-Drug-Interactions
-    PDI_CHEMBL = 'chembl_drug_gene_interactions_uniq.csv'
-    PDI_DGIDB = 'DGIdb_drug_gene_interactions.csv'
+    PDI_CHEMBL = "chembl_drug_gene_interactions_uniq.csv"
+    PDI_DGIDB = "DGIdb_drug_gene_interactions.csv"
     # PDI_DRUGBANK = 'drugbank_drug_gene_interactions_uniq.csv'
 
     # Protein-Disorder-Interaction
     # PDi_DISGENET = 'disgenet-protein_disorder_association.tsv'
 
     # Drug-Disorder-Indictation
-    DDi_DRUGBANK = 'drugbank-drug_disorder_indication.tsv'
+    DDi_DRUGBANK = "drugbank-drug_disorder_indication.tsv"
 
     @staticmethod
     def _clean_entrez(x):
@@ -47,46 +47,9 @@ class DataLoader:
         except Exception:
             return x
 
-    # @staticmethod
-    # def _clean_mondo(x):
-    #     # convert to string if not empty
-    #     try:
-    #         return str(int(x))
-    #     except Exception:
-    #         return x
-
-    # @staticmethod
-    # def load_proteins() -> pd.DataFrame:
-    #     """Loads the list of proteins used in CoVex
-    #
-    #     Returns:
-    #         pd.DataFrame: columns 'protein_ac', 'gene_name', 'protein_name', 'entrez_id'
-    #     """
-    #
-    #     df = pd.read_csv(f'{DataLoader.PATH_PROTEINS}{DataLoader.PROTEINS_COVEX}')
-    #     df['entrez_id'] = df['entrez_id'].map(DataLoader._clean_entrez)
-    #     return df
-
-    # @staticmethod
-    # def load_drugs()-> pd.DataFrame:
-    #     return pd.read_csv(f'{DataLoader.PATH_DRUGS}{DataLoader.DRUG_FILE}', sep='\t')
-
     @staticmethod
     def load_expressions() -> pd.DataFrame:
-        return pd.read_csv(f'{DataLoader.PATH_EXPR}{DataLoader.EXPR_FILE}', sep='\t')
-
-
-    # @staticmethod
-    # def load_disorders() -> pd.DataFrame:
-    #     """Loads the list of disorders used in Nedrex
-    #
-    #     Returns:
-    #         pd.DataFrame: columns 'mondo_id', 'disorder_name', 'icd_10'
-    #     """
-    #
-    #     df = pd.read_csv(f'{DataLoader.PATH_DISORDERS}{DataLoader.DISORDERS_MONDO}', sep='\t')
-    #     df['mondo_id'] = df['mondo_id'].map(DataLoader._clean_mondo)
-    #     return df
+        return pd.read_csv(f"{DataLoader.PATH_EXPR}{DataLoader.EXPR_FILE}", sep="\t")
 
     @staticmethod
     def load_ensg() -> dict:
@@ -96,7 +59,9 @@ class DataLoader:
             dict with {entrez1: [ensg1, ensg2], ...}
         """
 
-        f = open(f'{DataLoader.PATH_PROTEINS}{DataLoader.ENTREZ_TO_ENSG}', )
+        f = open(
+            f"{DataLoader.PATH_PROTEINS}{DataLoader.ENTREZ_TO_ENSG}",
+        )
         data = json.load(f)
         return data
 
@@ -107,100 +72,12 @@ class DataLoader:
         Returns:
             pd.DataFrame: columns 'entrez_a', 'entrez_b'
         """
-        df = pd.read_csv(f'{DataLoader.PATH_PPI}{DataLoader.PPI_STRING}', index_col=0)
-        df['entrez_a'] = df['entrez_a'].map(DataLoader._clean_entrez)
-        df['entrez_b'] = df['entrez_b'].map(DataLoader._clean_entrez)
+        df = pd.read_csv(f"{DataLoader.PATH_PPI}{DataLoader.PPI_STRING}", index_col=0)
+        df["entrez_a"] = df["entrez_a"].map(DataLoader._clean_entrez)
+        df["entrez_b"] = df["entrez_b"].map(DataLoader._clean_entrez)
         df = df.drop_duplicates()
         return df
 
-    # @staticmethod
-    # def load_ppi_biogrid() -> pd.DataFrame:
-    #     """Loads the Biogrid PPI interactions with Entex IDs
-    #
-    #     Returns:
-    #         pd.DataFrame: columns 'entrez_a', 'entrez_b'
-    #     """
-    #     df = pd.read_csv(f'{DataLoader.PATH_PPI}{DataLoader.PPI_BIOGRID}', sep='\t')[
-    #         ['#ID Interactor A', 'ID Interactor B', 'Interaction Detection Method', 'Publication Identifiers',
-    #          'Interaction Types', 'Confidence Values']]
-    #
-    #     def parse_interactor(x):
-    #         # format entrez protein/locuslink:6416
-    #         # wanted: 6416
-    #         return x.split(':')[-1]
-    #
-    #     def parse_publication_identifiers(x):
-    #         # format: pubmed:9006895
-    #         # wanted: 9006895
-    #         return x.split(':')[-1]
-    #
-    #     def parse_interaction_detection_method_get_id(x):
-    #         # format: psi-mi:"MI:0018"(two hybrid)
-    #         # wanted: MI:0018
-    #         return x.split('"')[1]
-    #
-    #     def parse_interaction_detection_method_get_name(x):
-    #         # format: psi-mi:"MI:0018"(two hybrid)
-    #         # wanted: two hybrid
-    #         return x.split('(')[1][:-1]
-    #
-    #     def parse_interaction_types_get_id(x):
-    #         # format: psi-mi:"MI:0407"(direct interaction)
-    #         # wanted: MI:0407
-    #         return x.split('"')[1]
-    #
-    #     def parse_interaction_types_get_name(x):
-    #         # format: psi-mi:"MI:0407"(direct interaction)
-    #         # wanted: direct interaction
-    #         return x.split('(')[1][:-1]
-    #
-    #     def parse_confidence_value(x):
-    #         # format: score:7.732982515 or '-'
-    #         # wanted: 7.732982515 or '-'
-    #         if x == '-':
-    #             return '-'
-    #         else:
-    #             return x.split(':')[1]
-    #
-    #     df['#ID Interactor A'] = df['#ID Interactor A'].map(parse_interactor)
-    #     df['ID Interactor B'] = df['ID Interactor B'].map(parse_interactor)
-    #
-    #     df['Interaction Detection Method ID'] = df['Interaction Detection Method'].map(
-    #         parse_interaction_detection_method_get_id)
-    #     df['Interaction Detection Method Name'] = df['Interaction Detection Method'].map(
-    #         parse_interaction_detection_method_get_name)
-    #     df = df.drop('Interaction Detection Method', axis=1)
-    #
-    #     df['Publication Identifiers'] = df['Publication Identifiers'].map(parse_publication_identifiers)
-    #
-    #     df['Interaction Types ID'] = df['Interaction Types'].map(parse_interaction_types_get_id)
-    #     df['Interaction Types Name'] = df['Interaction Types'].map(parse_interaction_types_get_name)
-    #     df = df.drop('Interaction Types', axis=1)
-    #
-    #     df['Confidence Values'] = df['Confidence Values'].map(parse_confidence_value)
-    #
-    #     # remove dirty data (entrez id is sometimes protein name
-    #     to_remove = ['P0DTC1', 'P0DTD2', 'Q7TLC7']
-    #     df = df[~df['#ID Interactor A'].isin(to_remove)]
-    #
-    #     df = df.rename(
-    #         columns={
-    #             '#ID Interactor A': 'entrez_a',
-    #             'ID Interactor B': 'entrez_b',
-    #             'Publication Identifiers': 'pubmed_id',
-    #             'Confidence Values': 'confidence_value',
-    #             'Interaction Detection Method ID': 'detection_method_psi_mi',
-    #             'Interaction Detection Method Name': 'detection_method_name',
-    #             'Interaction Types ID': 'type_psi_mi',
-    #             'Interaction Types Name': 'type_name'
-    #         }
-    #     )
-    #
-    #     df['entrez_a'] = df['entrez_a'].map(DataLoader._clean_entrez)
-    #     df['entrez_b'] = df['entrez_b'].map(DataLoader._clean_entrez)
-    #
-    #     return df
-
     @staticmethod
     def load_ppi_apid() -> pd.DataFrame:
         """Loads the APID PPI interactions with Uniprot ACs
@@ -208,12 +85,13 @@ class DataLoader:
         Returns:
             pd.DataFrame: columns 'from_protein_ac', 'to_protein_ac'
         """
-        df = pd.read_csv(f'{DataLoader.PATH_PPI}{DataLoader.PPI_APID}', index_col=0, sep='\t')
-        df = df.rename(columns={
-            'UniprotID_A': 'from_protein_ac',
-            'UniprotID_B': 'to_protein_ac'
-        })
-        return df[['from_protein_ac', 'to_protein_ac']]
+        df = pd.read_csv(
+            f"{DataLoader.PATH_PPI}{DataLoader.PPI_APID}", index_col=0, sep="\t"
+        )
+        df = df.rename(
+            columns={"UniprotID_A": "from_protein_ac", "UniprotID_B": "to_protein_ac"}
+        )
+        return df[["from_protein_ac", "to_protein_ac"]]
 
     @staticmethod
     def load_pdi_chembl() -> pd.DataFrame:
@@ -222,16 +100,7 @@ class DataLoader:
         Returns:
             pd.DataFrame: columns "drug_id" and "protein_ac"
         """
-        return pd.read_csv(f'{DataLoader.PATH_PDI}{DataLoader.PDI_CHEMBL}')
-
-    # @staticmethod
-    # def load_pdis_disgenet() -> pd.DataFrame:
-    #     """Loads the DisGeNET PDis associations with Uniprot Numbers and Mondo IDs
-    #
-    #     Returns:
-    #         pd.DataFrame: columns "protein_name", "disorder_name" and "score"
-    #     """
-    #     return pd.read_csv(f'{DataLoader.PATH_PDi}{DataLoader.PDi_DISGENET}', sep='\t', dtype={'disorder_name':str, 'protein_name':str, 'score':float})
+        return pd.read_csv(f"{DataLoader.PATH_PDI}{DataLoader.PDI_CHEMBL}")
 
     @staticmethod
     def load_drdis_drugbank() -> pd.DataFrame:
@@ -240,7 +109,11 @@ class DataLoader:
         Returns:
             pd.DataFrame: columns "drugbank_id" and "mondo_id"
         """
-        return pd.read_csv(f'{DataLoader.PATH_DDi}{DataLoader.DDi_DRUGBANK}', sep='\t', dtype={'drugbank_id':str, 'mondo_id':str})
+        return pd.read_csv(
+            f"{DataLoader.PATH_DDi}{DataLoader.DDi_DRUGBANK}",
+            sep="\t",
+            dtype={"drugbank_id": str, "mondo_id": str},
+        )
 
     @staticmethod
     def load_pdi_dgidb() -> pd.DataFrame:
@@ -249,17 +122,6 @@ class DataLoader:
         Returns:
             pd.DataFrame: columns "drug_id" and "entrez_id"
         """
-        df = pd.read_csv(f'{DataLoader.PATH_PDI}{DataLoader.PDI_DGIDB}', index_col=0)
-        df['entrez_id'] = df['entrez_id'].map(DataLoader._clean_entrez)
+        df = pd.read_csv(f"{DataLoader.PATH_PDI}{DataLoader.PDI_DGIDB}", index_col=0)
+        df["entrez_id"] = df["entrez_id"].map(DataLoader._clean_entrez)
         return df
-
-    # @staticmethod
-    # def load_pdi_drugbank() -> pd.DataFrame:
-    #     """Loads the drugbank PDI interactions with DrugBank drug IDs and Entrez Gene IDs
-    #
-    #     Returns:
-    #         pd.DataFrame: columns "drug_id" and "entrez_id"
-    #     """
-    #     df = pd.read_csv(f'{DataLoader.PATH_PDI}{DataLoader.PDI_DRUGBANK}').dropna()
-    #     df['entrez_id'] = df['entrez_id'].map(DataLoader._clean_entrez)
-    #     return df
diff --git a/drugstone/management/includes/DataPopulator.py b/drugstone/management/includes/DataPopulator.py
index 4a4a570..988bd65 100755
--- a/drugstone/management/includes/DataPopulator.py
+++ b/drugstone/management/includes/DataPopulator.py
@@ -4,18 +4,16 @@ from drugstone.management.includes.NodeCache import NodeCache
 
 
 class DataPopulator:
-
     def __init__(self, cache: NodeCache):
         self.cache = cache
 
     def populate_expressions(self, update):
-
         self.cache.init_proteins()
         df = DataLoader.load_expressions()
 
         tissues_models = dict()
         for tissue_name in df.columns.values[2:]:
-            tissue,_ = models.Tissue.objects.get_or_create(name=tissue_name)
+            tissue, _ = models.Tissue.objects.get_or_create(name=tissue_name)
             tissues_models[tissue_name] = tissue
 
         proteins_linked = 0
@@ -27,15 +25,17 @@ class DataPopulator:
 
         size = 0
         for _, row in df.iterrows():
-            gene_name = row['Description']
+            gene_name = row["Description"]
 
             for protein_model in self.cache.get_proteins_by_gene(gene_name):
                 proteins_linked += 1
                 if not update or self.cache.is_new_protein(protein_model):
                     for tissue_name, tissue_model in tissues_models.items():
-                        expr = models.ExpressionLevel(protein=protein_model,
-                                                      tissue=tissue_model,
-                                                      expression_level=row[tissue_name])
+                        expr = models.ExpressionLevel(
+                            protein=protein_model,
+                            tissue=tissue_model,
+                            expression_level=row[tissue_name],
+                        )
                         id = hash(expr)
                         if id in uniq:
                             continue
@@ -49,8 +49,8 @@ class DataPopulator:
         models.ExpressionLevel.objects.bulk_create(bulk)
         return size + len(bulk)
 
-    def populate_ensg(self,update) -> int:
-        """ Populates the Ensembl-Gene table in the django database.
+    def populate_ensg(self, update) -> int:
+        """Populates the Ensembl-Gene table in the django database.
         Also maps the added ensg entries to the corresponding proteins.
         Handles loading the data and passing it to the django database
 
@@ -71,7 +71,7 @@ class DataPopulator:
         return len(bulk)
 
     def populate_ppi_string(self, dataset, update) -> int:
-        """ Populates the Protein-Protein-Interactions from STRINGdb
+        """Populates the Protein-Protein-Interactions from STRINGdb
         Handles loading the data and passing it to the django database
 
         Returns:
@@ -84,23 +84,28 @@ class DataPopulator:
         for _, row in df.iterrows():
             try:
                 # try fetching proteins
-                proteins_a = self.cache.get_proteins_by_entrez(row['entrez_a'])
-                proteins_b = self.cache.get_proteins_by_entrez(row['entrez_b'])
+                proteins_a = self.cache.get_proteins_by_entrez(row["entrez_a"])
+                proteins_b = self.cache.get_proteins_by_entrez(row["entrez_b"])
             except KeyError:
                 continue
             for protein_a in proteins_a:
                 for protein_b in proteins_b:
-                    if not update or (self.cache.is_new_protein(protein_a) or self.cache.is_new_protein(protein_b)):
-                        bulk.append(models.ProteinProteinInteraction(
-                            ppi_dataset=dataset,
-                            from_protein=protein_a,
-                            to_protein=protein_b
-                        ))
+                    if not update or (
+                        self.cache.is_new_protein(protein_a)
+                        or self.cache.is_new_protein(protein_b)
+                    ):
+                        bulk.append(
+                            models.ProteinProteinInteraction(
+                                ppi_dataset=dataset,
+                                from_protein=protein_a,
+                                to_protein=protein_b,
+                            )
+                        )
         models.ProteinProteinInteraction.objects.bulk_create(bulk)
         return len(bulk)
 
     def populate_ppi_apid(self, dataset, update) -> int:
-        """ Populates the Protein-Protein-Interactions from Apid
+        """Populates the Protein-Protein-Interactions from Apid
         Handles loading the data and passing it to the django database
 
         Returns:
@@ -113,17 +118,22 @@ class DataPopulator:
         for _, row in df.iterrows():
             try:
                 # try fetching proteins
-                protein_a = self.cache.get_protein_by_uniprot(row['from_protein_ac'])
-                protein_b = self.cache.get_protein_by_uniprot(row['to_protein_ac'])
+                protein_a = self.cache.get_protein_by_uniprot(row["from_protein_ac"])
+                protein_b = self.cache.get_protein_by_uniprot(row["to_protein_ac"])
             except KeyError:
                 # continue if not found
                 continue
-            if not update or (self.cache.is_new_protein(protein_a) or self.cache.is_new_protein(protein_b)):
-                bulk.add(models.ProteinProteinInteraction(
-                    ppi_dataset=dataset,
-                    from_protein=protein_a,
-                    to_protein=protein_b
-                ))
+            if not update or (
+                self.cache.is_new_protein(protein_a)
+                or self.cache.is_new_protein(protein_b)
+            ):
+                bulk.add(
+                    models.ProteinProteinInteraction(
+                        ppi_dataset=dataset,
+                        from_protein=protein_a,
+                        to_protein=protein_b,
+                    )
+                )
         models.ProteinProteinInteraction.objects.bulk_create(bulk)
         return len(bulk)
 
@@ -158,8 +168,8 @@ class DataPopulator:
     #     models.ProteinProteinInteraction.objects.bulk_create(bulk)
     #     return len(bulk)
 
-    def populate_pdi_chembl(self,dataset, update) -> int:
-        """ Populates the Protein-Drug-Interactions from Chembl
+    def populate_pdi_chembl(self, dataset, update) -> int:
+        """Populates the Protein-Drug-Interactions from Chembl
         Handles Loading the data and passing it to the django database
 
         Returns:
@@ -172,22 +182,24 @@ class DataPopulator:
         bulk = set()
         for _, row in df.iterrows():
             try:
-                protein = self.cache.get_protein_by_uniprot(row['protein_ac'])
+                protein = self.cache.get_protein_by_uniprot(row["protein_ac"])
             except KeyError:
                 # continue if not found
                 continue
             try:
                 # try fetching drug
-                drug = self.cache.get_drug_by_drugbank(row['drug_id'])
+                drug = self.cache.get_drug_by_drugbank(row["drug_id"])
             except KeyError:
                 # continue if not found
                 continue
-            if not update or (self.cache.is_new_protein(protein) or self.cache.is_new_drug(drug)):
-                bulk.add(models.ProteinDrugInteraction(
-                    pdi_dataset=dataset,
-                    protein=protein,
-                    drug=drug
-                ))
+            if not update or (
+                self.cache.is_new_protein(protein) or self.cache.is_new_drug(drug)
+            ):
+                bulk.add(
+                    models.ProteinDrugInteraction(
+                        pdi_dataset=dataset, protein=protein, drug=drug
+                    )
+                )
         models.ProteinDrugInteraction.objects.bulk_create(bulk)
         return len(bulk)
 
@@ -227,7 +239,7 @@ class DataPopulator:
     #     return len(bulk)
 
     def populate_drdis_drugbank(self, dataset, update) -> int:
-        """ Populates the Drug-Disorder-Indications from DrugBank
+        """Populates the Drug-Disorder-Indications from DrugBank
         Handles Loading the data and passing it to the django database
 
         Returns:
@@ -241,29 +253,33 @@ class DataPopulator:
         for _, row in df.iterrows():
             try:
                 # try fetching protein
-                drug = self.cache.get_drug_by_drugbank(row['drugbank_id'])
+                drug = self.cache.get_drug_by_drugbank(row["drugbank_id"])
             except KeyError:
                 print(f"Did not find drug: {row['drugbank_id']}")
                 # continue if not found
                 continue
             try:
                 # try fetching drug
-                disorder = self.cache.get_disorder_by_mondo(row['mondo_id'])
+                disorder = self.cache.get_disorder_by_mondo(row["mondo_id"])
             except KeyError:
                 print(f"Did not find drug: {row['mondo_id']}")
                 # continue if not found
                 continue
-            if not update or (self.cache.is_new_drug(drug) or self.cache.is_new_disease(disorder)):
-                bulk.add(models.DrugDisorderIndication(
-                    drdi_dataset=dataset,
-                    drug=drug,
-                    disorder=disorder,
-                ))
+            if not update or (
+                self.cache.is_new_drug(drug) or self.cache.is_new_disease(disorder)
+            ):
+                bulk.add(
+                    models.DrugDisorderIndication(
+                        drdi_dataset=dataset,
+                        drug=drug,
+                        disorder=disorder,
+                    )
+                )
         models.DrugDisorderIndication.objects.bulk_create(bulk)
         return len(bulk)
 
-    def populate_pdi_dgidb(self,dataset, update) -> int:
-        """ Populates the Protein-Drug-Interactions from DGIdb
+    def populate_pdi_dgidb(self, dataset, update) -> int:
+        """Populates the Protein-Drug-Interactions from DGIdb
         Handles Loading the data and passing it to the django database
 
         Returns:
@@ -276,20 +292,22 @@ class DataPopulator:
         bulk = set()
         for _, row in df.iterrows():
             try:
-                proteins = self.cache.get_proteins_by_entrez(row['entrez_id'])
+                proteins = self.cache.get_proteins_by_entrez(row["entrez_id"])
             except KeyError:
                 continue
             try:
-                drug = self.cache.get_drug_by_drugbank(row['drug_id'])
+                drug = self.cache.get_drug_by_drugbank(row["drug_id"])
             except KeyError:
                 continue
             for protein in proteins:
-                if not update or (self.cache.is_new_protein(protein) or self.cache.is_new_drug(drug)):
-                    bulk.add(models.ProteinDrugInteraction(
-                        pdi_dataset=dataset,
-                        protein=protein,
-                        drug=drug
-                    ))
+                if not update or (
+                    self.cache.is_new_protein(protein) or self.cache.is_new_drug(drug)
+                ):
+                    bulk.add(
+                        models.ProteinDrugInteraction(
+                            pdi_dataset=dataset, protein=protein, drug=drug
+                        )
+                    )
         models.ProteinDrugInteraction.objects.bulk_create(bulk)
         return len(bulk)
 
diff --git a/drugstone/management/includes/DatasetLoader.py b/drugstone/management/includes/DatasetLoader.py
index 062b7d9..7d11d56 100644
--- a/drugstone/management/includes/DatasetLoader.py
+++ b/drugstone/management/includes/DatasetLoader.py
@@ -8,30 +8,24 @@ LICENSE_FILE = "./data/license.txt"
 
 def get_ppi_string():
     dataset, _ = models.PPIDataset.objects.get_or_create(
-        name='STRING',
-        link='https://string-db.org/',
-        version='11.0',
-        licenced=False
+        name="STRING", link="https://string-db.org/", version="11.0", licenced=False
     )
     return dataset
 
 
 def get_ppi_apid():
     dataset, _ = models.PPIDataset.objects.get_or_create(
-        name='APID',
-        link='http://cicblade.dep.usal.es:8080/APID/',
-        version='January 2019',
-        licenced=False
+        name="APID",
+        link="http://cicblade.dep.usal.es:8080/APID/",
+        version="January 2019",
+        licenced=False,
     )
     return dataset
 
 
 def get_ppi_biogrid():
     dataset, _ = models.PPIDataset.objects.get_or_create(
-        name='BioGRID',
-        link='https://thebiogrid.org/',
-        version='4.0',
-        licenced=False
+        name="BioGRID", link="https://thebiogrid.org/", version="4.0", licenced=False
     )
     return dataset
 
@@ -39,7 +33,7 @@ def get_ppi_biogrid():
 def get_nedrex_version():
     version = get_today_version()
     try:
-        real_version = get_metadata()['version']
+        real_version = get_metadata()["version"]
         if real_version != "0.0.0":
             version = real_version
     except RetryError:
@@ -48,56 +42,44 @@ def get_nedrex_version():
 
 
 def get_nedrex_source_version(source):
-    metadata = get_metadata()['source_databases']
+    metadata = get_metadata()["source_databases"]
     # TODO remove once fixed in nedrex db
-    if 'drug_central' in metadata:
-        metadata['drugcentral'] = metadata['drug_central']
+    if "drug_central" in metadata:
+        metadata["drugcentral"] = metadata["drug_central"]
 
-    return metadata[source]['date']
+    return metadata[source]["date"]
 
 
 def get_drug_target_nedrex(url, licenced):
     dataset, _ = models.PDIDataset.objects.get_or_create(
-        name='NeDRex',
-        link=url,
-        version=get_nedrex_version(),
-        licenced=licenced
+        name="NeDRex", link=url, version=get_nedrex_version(), licenced=licenced
     )
     return dataset
 
 
 def get_ppi_nedrex(url, licenced):
     dataset, _ = models.PPIDataset.objects.get_or_create(
-        name='NeDRex',
-        link=url,
-        version=get_nedrex_version(),
-        licenced=licenced
+        name="NeDRex", link=url, version=get_nedrex_version(), licenced=licenced
     )
     return dataset
 
 
 def get_protein_disorder_nedrex(url, licenced):
     dataset, _ = models.PDisDataset.objects.get_or_create(
-        name='NeDRex',
-        link=url,
-        version=get_nedrex_version(),
-        licenced=licenced
+        name="NeDRex", link=url, version=get_nedrex_version(), licenced=licenced
     )
     return dataset
 
 
 def get_drug_disorder_nedrex(url, licenced):
     dataset, _ = models.DrDiDataset.objects.get_or_create(
-        name='NeDRex',
-        link=url,
-        version=get_nedrex_version(),
-        licenced=licenced
+        name="NeDRex", link=url, version=get_nedrex_version(), licenced=licenced
     )
     return dataset
 
 
 def write_license(text):
-    with open(LICENSE_FILE, 'w') as fh:
+    with open(LICENSE_FILE, "w") as fh:
         fh.write(text)
 
 
@@ -107,76 +89,71 @@ def update_license():
         write_license(license)
         return license
     except RetryError:
-        print(f'License could not be retreived.')
+        print("License could not be retreived.")
         return ""
 
 
 def import_license():
     try:
         license = ""
-        with open(LICENSE_FILE, 'r') as fh:
+        with open(LICENSE_FILE, "r") as fh:
             for line in fh:
                 license += line
         return license
     except FileNotFoundError:
-        print(f'No license doc there yet! Make sure to run an update first!')
+        print("No license doc there yet! Make sure to run an update first!")
     return ""
 
 
 def get_drug_target_chembl():
     dataset, _ = models.PDIDataset.objects.get_or_create(
-        name='ChEMBL',
-        link='https://www.ebi.ac.uk/chembl/',
-        version='27',
-        licenced=False
+        name="ChEMBL",
+        link="https://www.ebi.ac.uk/chembl/",
+        version="27",
+        licenced=False,
     )
     return dataset
 
 
 def get_drug_target_dgidb():
     dataset, _ = models.PDIDataset.objects.get_or_create(
-        name='DGIdb',
-        link='https://www.dgidb.org/',
-        version='4.2.0',
-        licenced=False
+        name="DGIdb", link="https://www.dgidb.org/", version="4.2.0", licenced=False
     )
     return dataset
 
 
 def get_drug_target_drugbank():
     dataset, _ = models.PDIDataset.objects.get_or_create(
-        name='DrugBank',
-        link='https://go.drugbank.com/',
-        version='5.1.7',
-        licenced=True
+        name="DrugBank", link="https://go.drugbank.com/", version="5.1.7", licenced=True
     )
     return dataset
 
 
 def get_disorder_protein_disgenet():
     dataset, _ = models.PDisDataset.objects.get_or_create(
-        name='DisGeNET',
-        link='https://www.disgenet.org/home/',
-        version='6.0',
-        licenced=False
+        name="DisGeNET",
+        link="https://www.disgenet.org/home/",
+        version="6.0",
+        licenced=False,
     )
     return dataset
 
 
 def get_drug_disorder_drugbank():
     dataset, _ = models.DrDiDataset.objects.get_or_create(
-        name='DrugBank',
-        link='https://go.drugbank.com/',
-        version='5.1.8',
-        licenced=False
+        name="DrugBank",
+        link="https://go.drugbank.com/",
+        version="5.1.8",
+        licenced=False,
     )
     return dataset
 
 
 def get_today_version():
     import datetime
+
     now = datetime.date.today()
-    version = f'{now.year}-{now.month}-{now.day}_temp'
+    version = f"{now.year}-{now.month}-{now.day}_temp"
     return version
 
 
@@ -188,10 +165,7 @@ def get_ppi_nedrex_dataset(url, licenced, source):
         pass
 
     dataset, _ = models.PPIDataset.objects.get_or_create(
-        name=source,
-        link=url,
-        version=version,
-        licenced=licenced
+        name=source, link=url, version=version, licenced=licenced
     )
     return dataset
 
@@ -204,10 +178,7 @@ def get_pdi_nedrex_dataset(url, licenced, source):
         pass
 
     dataset, _ = models.PDIDataset.objects.get_or_create(
-        name=source,
-        link=url,
-        version=version,
-        licenced=licenced
+        name=source, link=url, version=version, licenced=licenced
     )
     return dataset
 
@@ -220,10 +191,7 @@ def get_pdis_nedrex_dataset(url, licenced, source):
         pass
 
     dataset, _ = models.PDisDataset.objects.get_or_create(
-        name=source,
-        link=url,
-        version=version,
-        licenced=licenced
+        name=source, link=url, version=version, licenced=licenced
     )
     return dataset
 
@@ -236,10 +204,7 @@ def get_drdi_nedrex_dataset(url, licenced, source):
         pass
 
     dataset, _ = models.DrDiDataset.objects.get_or_create(
-        name=source,
-        link=url,
-        version=version,
-        licenced=licenced
+        name=source, link=url, version=version, licenced=licenced
     )
     return dataset
 
@@ -253,7 +218,7 @@ def is_licenced_ppi_source(source):
 
     try:
         models.PPIDataset.objects.get(name=source, version=version, licenced=False).link
-    except:
+    except Exception:
         return True
     return False
 
@@ -267,7 +232,7 @@ def is_licenced_pdi_source(source):
 
     try:
         models.PDIDataset.objects.get(name=source, version=version, licenced=False).link
-    except:
+    except Exception:
         return True
     return False
 
@@ -280,8 +245,10 @@ def is_licenced_pdis_source(source):
         pass
 
     try:
-        models.PDisDataset.objects.get(name=source, version=version, licenced=False).link
-    except:
+        models.PDisDataset.objects.get(
+            name=source, version=version, licenced=False
+        ).link
+    except Exception:
         return True
     return False
 
@@ -294,75 +261,9 @@ def is_licenced_drdi_source(source):
         pass
 
     try:
-        models.DrDiDataset.objects.get(name=source, version=version, licenced=False).link
-    except:
+        models.DrDiDataset.objects.get(
+            name=source, version=version, licenced=False
+        ).link
+    except Exception:
         return True
     return False
-
-
-# def remove_old_pdi_data(new_datasets, licenced):
-#     if new_datasets is None:
-#         return
-#     for dataset in new_datasets:
-#         print("Deleting all except "+str(dataset))
-#         try:
-#             for d in models.PDIDataset.objects.filter(name=dataset.name, licenced=licenced):
-#                 print("Testing: "+str(d))
-#                 if d != dataset:
-#                     print("Deleting: "+str(d))
-#                     d.delete()
-#         except Exception as e:
-#             print("Error when trying to delete old datasets")
-#             print(e)
-#             continue
-
-
-# def remove_old_ppi_data(new_datasets, licenced):
-#     if new_datasets is None:
-#         return
-#     for dataset in new_datasets:
-#         print("Deleting all except " + str(dataset))
-#         try:
-#             for d in models.PPIDataset.objects.filter(name=dataset.name, licenced=licenced):
-#                 print("Testing: " + str(d))
-#                 if d != dataset:
-#                     print("Deleting: " + str(d))
-#                     d.delete()
-#         except Exception as e:
-#             print("Error when trying to delete old datasets")
-#             print(e)
-#             continue
-
-
-# def remove_old_pdis_data(new_datasets, licenced):
-#     if new_datasets is None:
-#         return
-#     for dataset in new_datasets:
-#         print("Deleting all except " + str(dataset))
-#         try:
-#             for d in models.PDisDataset.objects.filter(name=dataset.name, licenced=licenced):
-#                 print("Testing: " + str(d))
-#                 if d != dataset:
-#                     print("Deleting: " + str(d))
-#                     d.delete()
-#         except Exception as e:
-#             print("Error when trying to delete old datasets")
-#             print(e)
-#             continue
-
-
-# def remove_old_drdi_data(new_datasets, licenced):
-#     if new_datasets is None:
-#         return
-#     for dataset in new_datasets:
-#         print("Deleting all except " + str(dataset))
-#         try:
-#             for d in models.DrDiDataset.objects.filter(name=dataset.name, licenced=licenced):
-#                 print("Testing: " + str(d))
-#                 if d != dataset:
-#                     print("Deleting: " + str(d))
-#                     d.delete()
-#         except Exception as e:
-#             print("Error when trying to delete old datasets")
-#             print(e)
-#             continue
diff --git a/drugstone/models.py b/drugstone/models.py
index 3c66432..ea9e124 100755
--- a/drugstone/models.py
+++ b/drugstone/models.py
@@ -7,64 +7,66 @@ from django.db import models
 
 class PPIDataset(models.Model):
     id = models.AutoField(primary_key=True)
-    name = models.CharField(max_length=128, default='', unique=False)
-    link = models.CharField(max_length=128, default='', unique=False)
-    version = models.CharField(max_length=128, default='', unique=False)
+    name = models.CharField(max_length=128, default="", unique=False)
+    link = models.CharField(max_length=128, default="", unique=False)
+    version = models.CharField(max_length=128, default="", unique=False)
     licenced = models.BooleanField(default=True)
 
     def __str__(self):
         return f'{self.name}-{self.version}_{"licenced" if self.licenced else "unlicenced"}'
 
     class Meta:
-        unique_together = ('name', 'version', 'licenced')
+        unique_together = ("name", "version", "licenced")
 
 
 class PDIDataset(models.Model):
     id = models.AutoField(primary_key=True)
-    name = models.CharField(max_length=128, default='', unique=False)
-    link = models.CharField(max_length=128, default='', unique=False)
-    version = models.CharField(max_length=128, default='', unique=False)
+    name = models.CharField(max_length=128, default="", unique=False)
+    link = models.CharField(max_length=128, default="", unique=False)
+    version = models.CharField(max_length=128, default="", unique=False)
     licenced = models.BooleanField(default=True)
 
     def __str__(self):
         return f'{self.name}-{self.version}_{"licenced" if self.licenced else "unlicenced"}'
 
     class Meta:
-        unique_together = ('name', 'version','licenced')
+        unique_together = ("name", "version", "licenced")
 
 
 class PDisDataset(models.Model):
     id = models.AutoField(primary_key=True)
-    name = models.CharField(max_length=128, default='', unique=False)
-    link = models.CharField(max_length=128, default='', unique=False)
-    version = models.CharField(max_length=128, default='', unique=False)
+    name = models.CharField(max_length=128, default="", unique=False)
+    link = models.CharField(max_length=128, default="", unique=False)
+    version = models.CharField(max_length=128, default="", unique=False)
     licenced = models.BooleanField(default=True)
 
     def __str__(self):
         return f'{self.name}-{self.version}_{"licenced" if self.licenced else "unlicenced"}'
 
     class Meta:
-        unique_together = ('name', 'version', 'licenced')
+        unique_together = ("name", "version", "licenced")
 
 
 class DrDiDataset(models.Model):
     id = models.AutoField(primary_key=True)
-    name = models.CharField(max_length=128, default='', unique=False)
-    link = models.CharField(max_length=128, default='', unique=False)
-    version = models.CharField(max_length=128, default='', unique=False)
+    name = models.CharField(max_length=128, default="", unique=False)
+    link = models.CharField(max_length=128, default="", unique=False)
+    version = models.CharField(max_length=128, default="", unique=False)
     licenced = models.BooleanField(default=True)
 
     def __str__(self):
         return f'{self.name}-{self.version}_{"licenced" if self.licenced else "unlicenced"}'
 
     class Meta:
-        unique_together = ('name', 'version', 'licenced')
+        unique_together = ("name", "version", "licenced")
 
 
 class EnsemblGene(models.Model):
     id = models.AutoField(primary_key=True)
     name = models.CharField(max_length=15)  # starts with ENSG...
-    protein = models.ForeignKey('Protein', on_delete=models.CASCADE, related_name='ensg')
+    protein = models.ForeignKey(
+        "Protein", on_delete=models.CASCADE, related_name="ensg"
+    )
 
 
 class Protein(models.Model):
@@ -72,22 +74,29 @@ class Protein(models.Model):
     # are either 6 or 10 characters long
     id = models.AutoField(primary_key=True)
     uniprot_code = models.CharField(max_length=10)
-    gene = models.CharField(max_length=127, default='')  # symbol
-    protein_name = models.CharField(max_length=255, default='')
-    entrez = models.CharField(max_length=15, default='')
-    drugs = models.ManyToManyField('Drug', through='ProteinDrugInteraction',
-                                   related_name='interacting_drugs')
-    tissue_expression = models.ManyToManyField('Tissue', through='ExpressionLevel',
-                                               related_name='interacting_drugs')
+    gene = models.CharField(max_length=127, default="")  # symbol
+    protein_name = models.CharField(max_length=255, default="")
+    entrez = models.CharField(max_length=15, default="")
+    drugs = models.ManyToManyField(
+        "Drug", through="ProteinDrugInteraction", related_name="interacting_drugs"
+    )
+    tissue_expression = models.ManyToManyField(
+        "Tissue", through="ExpressionLevel", related_name="interacting_drugs"
+    )
 
     class Meta:
-        unique_together = ('uniprot_code', 'gene', 'entrez')
+        unique_together = ("uniprot_code", "gene", "entrez")
 
     def __str__(self):
         return self.gene
 
     def __eq__(self, other):
-        return self.uniprot_code == other.uniprot_code and self.gene == other.gene and self.protein_name == other.protein_name and self.entrez == other.entrez
+        return (
+            self.uniprot_code == other.uniprot_code
+            and self.gene == other.gene
+            and self.protein_name == other.protein_name
+            and self.entrez == other.entrez
+        )
 
     def __ne__(self, other):
         return not self.__eq__(other)
@@ -104,20 +113,20 @@ class Protein(models.Model):
 
 class ExpressionLevel(models.Model):
     id = models.AutoField(primary_key=True)
-    tissue = models.ForeignKey('Tissue', on_delete=models.CASCADE)
-    protein = models.ForeignKey('Protein', on_delete=models.CASCADE)
+    tissue = models.ForeignKey("Tissue", on_delete=models.CASCADE)
+    protein = models.ForeignKey("Protein", on_delete=models.CASCADE)
     expression_level = models.FloatField()
 
     class Meta:
-        unique_together = ('tissue', 'protein')
+        unique_together = ("tissue", "protein")
 
     def __hash__(self):
-        return hash(f'{self.tissue_id}_{self.protein_id}')
+        return hash(f"{self.tissue_id}_{self.protein_id}")
 
 
 class Tissue(models.Model):
     id = models.AutoField(primary_key=True)
-    name = models.CharField(max_length=128, default='', unique=True)
+    name = models.CharField(max_length=128, default="", unique=True)
 
     def __str__(self):
         return self.name
@@ -126,19 +135,26 @@ class Tissue(models.Model):
 class Disorder(models.Model):
     id = models.AutoField(primary_key=True)
     mondo_id = models.CharField(max_length=7)
-    label = models.CharField(max_length=256, default='')  # symbol
-    icd10 = models.CharField(max_length=512, default='')
+    label = models.CharField(max_length=256, default="")  # symbol
+    icd10 = models.CharField(max_length=512, default="")
     proteins = models.ManyToManyField(
-        'Protein', through='ProteinDisorderAssociation', related_name='associated_proteins')
+        "Protein",
+        through="ProteinDisorderAssociation",
+        related_name="associated_proteins",
+    )
 
     class Meta:
-        unique_together = ('mondo_id', 'label', 'icd10')
+        unique_together = ("mondo_id", "label", "icd10")
 
     def __str__(self):
         return self.label
 
     def __eq__(self, other):
-        return self.mondo_id == other.mondo_id and self.label == other.label and self.icd10 == other.icd10
+        return (
+            self.mondo_id == other.mondo_id
+            and self.label == other.label
+            and self.icd10 == other.icd10
+        )
 
     def __ne__(self, other):
         return not self.__eq__(other)
@@ -155,17 +171,21 @@ class Disorder(models.Model):
 class Drug(models.Model):
     id = models.AutoField(primary_key=True)
     drug_id = models.CharField(max_length=10, unique=True)
-    name = models.CharField(max_length=256, default='')
-    status = models.CharField(max_length=128, default='')
+    name = models.CharField(max_length=256, default="")
+    status = models.CharField(max_length=128, default="")
     # in_trial = models.BooleanField(default=False)
     # in_literature = models.BooleanField(default=False)
-    links = models.CharField(max_length=16 * 1024, default='')
+    links = models.CharField(max_length=16 * 1024, default="")
 
     def __str__(self):
         return self.drug_id
 
     def __eq__(self, other):
-        return self.drug_id == other.drug_id and self.name == other.name and self.status == other.status
+        return (
+            self.drug_id == other.drug_id
+            and self.name == other.name
+            and self.status == other.status
+        )
 
     def __ne__(self, other):
         return not self.__eq__(other)
@@ -183,19 +203,27 @@ class Drug(models.Model):
 class ProteinDisorderAssociation(models.Model):
     id = models.BigAutoField(primary_key=True)
     pdis_dataset = models.ForeignKey(
-        'PDisDataset', null=True, on_delete=models.CASCADE, related_name='pdis_dataset_relation')
-    protein = models.ForeignKey('Protein', on_delete=models.CASCADE)
-    disorder = models.ForeignKey('Disorder', on_delete=models.CASCADE)
+        "PDisDataset",
+        null=True,
+        on_delete=models.CASCADE,
+        related_name="pdis_dataset_relation",
+    )
+    protein = models.ForeignKey("Protein", on_delete=models.CASCADE)
+    disorder = models.ForeignKey("Disorder", on_delete=models.CASCADE)
     score = models.FloatField()
 
     class Meta:
-        unique_together = ('pdis_dataset', 'protein', 'disorder')
+        unique_together = ("pdis_dataset", "protein", "disorder")
 
     def __str__(self):
-        return f'{self.pdis_dataset}-{self.protein}-{self.disorder}'
+        return f"{self.pdis_dataset}-{self.protein}-{self.disorder}"
 
     def __eq__(self, other):
-        return self.pdis_dataset_id == other.pdis_dataset_id and self.protein_id == other.protein_id and self.disorder_id == other.disorder_id
+        return (
+            self.pdis_dataset_id == other.pdis_dataset_id
+            and self.protein_id == other.protein_id
+            and self.disorder_id == other.disorder_id
+        )
 
     def __ne__(self, other):
         return not self.__eq__(other)
@@ -207,18 +235,26 @@ class ProteinDisorderAssociation(models.Model):
 class DrugDisorderIndication(models.Model):
     id = models.AutoField(primary_key=True)
     drdi_dataset = models.ForeignKey(
-        'DrDiDataset', null=True, on_delete=models.CASCADE, related_name='drdi_dataset_relation')
-    drug = models.ForeignKey('Drug', on_delete=models.CASCADE)
-    disorder = models.ForeignKey('Disorder', on_delete=models.CASCADE)
+        "DrDiDataset",
+        null=True,
+        on_delete=models.CASCADE,
+        related_name="drdi_dataset_relation",
+    )
+    drug = models.ForeignKey("Drug", on_delete=models.CASCADE)
+    disorder = models.ForeignKey("Disorder", on_delete=models.CASCADE)
 
     class Meta:
-        unique_together = ('drdi_dataset', 'drug', 'disorder')
+        unique_together = ("drdi_dataset", "drug", "disorder")
 
     def __str__(self):
-        return f'{self.drdi_dataset}-{self.drug}-{self.disorder}'
+        return f"{self.drdi_dataset}-{self.drug}-{self.disorder}"
 
     def __eq__(self, other):
-        return self.drdi_dataset_id == other.drdi_dataset_id and self.drug_id == other.drug_id and self.disorder_id == other.disorder_id
+        return (
+            self.drdi_dataset_id == other.drdi_dataset_id
+            and self.drug_id == other.drug_id
+            and self.disorder_id == other.disorder_id
+        )
 
     def __ne__(self, other):
         return not self.__eq__(other)
@@ -230,34 +266,46 @@ class DrugDisorderIndication(models.Model):
 class ProteinProteinInteraction(models.Model):
     id = models.BigAutoField(primary_key=True)
     ppi_dataset = models.ForeignKey(
-        'PPIDataset', null=True, on_delete=models.CASCADE, related_name='ppi_dataset_relation')
-    from_protein = models.ForeignKey('Protein', on_delete=models.CASCADE, related_name='interacting_proteins_out')
-    to_protein = models.ForeignKey('Protein', on_delete=models.CASCADE, related_name='interacting_proteins_in')
+        "PPIDataset",
+        null=True,
+        on_delete=models.CASCADE,
+        related_name="ppi_dataset_relation",
+    )
+    from_protein = models.ForeignKey(
+        "Protein", on_delete=models.CASCADE, related_name="interacting_proteins_out"
+    )
+    to_protein = models.ForeignKey(
+        "Protein", on_delete=models.CASCADE, related_name="interacting_proteins_in"
+    )
 
     def validate_unique(self, exclude=None):
         p1p2_q = ProteinProteinInteraction.objects.filter(
             from_protein=self.from_protein,
             to_protein=self.to_protein,
-            ppi_dataset=self.ppi_dataset
+            ppi_dataset=self.ppi_dataset,
         )
         p2p1_q = ProteinProteinInteraction.objects.filter(
             from_protein=self.to_protein,
             to_protein=self.from_protein,
-            ppi_dataset=self.ppi_dataset
+            ppi_dataset=self.ppi_dataset,
         )
 
         if p1p2_q.exists() or p2p1_q.exists():
-            raise ValidationError('Protein-Protein interaction must be unique!')
+            raise ValidationError("Protein-Protein interaction must be unique!")
 
     def save(self, *args, **kwargs):
         self.validate_unique()
         super(ProteinProteinInteraction, self).save(*args, **kwargs)
 
     def __str__(self):
-        return f'{self.ppi_dataset}-{self.from_protein}-{self.to_protein}'
+        return f"{self.ppi_dataset}-{self.from_protein}-{self.to_protein}"
 
     def __eq__(self, other):
-        return self.ppi_dataset_id == other.ppi_dataset_id and self.from_protein_id == other.from_protein_id and self.to_protein_id == other.to_protein_id
+        return (
+            self.ppi_dataset_id == other.ppi_dataset_id
+            and self.from_protein_id == other.from_protein_id
+            and self.to_protein_id == other.to_protein_id
+        )
 
     def __ne__(self, other):
         return not self.__eq__(other)
@@ -269,18 +317,26 @@ class ProteinProteinInteraction(models.Model):
 class ProteinDrugInteraction(models.Model):
     id = models.BigAutoField(primary_key=True)
     pdi_dataset = models.ForeignKey(
-        PDIDataset, null=True, on_delete=models.CASCADE, related_name='pdi_dataset_relation')
-    protein = models.ForeignKey('Protein', on_delete=models.CASCADE)
-    drug = models.ForeignKey('Drug', on_delete=models.CASCADE)
+        PDIDataset,
+        null=True,
+        on_delete=models.CASCADE,
+        related_name="pdi_dataset_relation",
+    )
+    protein = models.ForeignKey("Protein", on_delete=models.CASCADE)
+    drug = models.ForeignKey("Drug", on_delete=models.CASCADE)
 
     class Meta:
-        unique_together = ('pdi_dataset', 'protein', 'drug')
+        unique_together = ("pdi_dataset", "protein", "drug")
 
     def __str__(self):
-        return f'{self.pdi_dataset}-{self.protein}-{self.drug}'
+        return f"{self.pdi_dataset}-{self.protein}-{self.drug}"
 
     def __eq__(self, other):
-        return self.pdi_dataset_id == other.pdi_dataset_id and self.protein_id == other.protein_id and self.drug_id == other.drug_id
+        return (
+            self.pdi_dataset_id == other.pdi_dataset_id
+            and self.protein_id == other.protein_id
+            and self.drug_id == other.drug_id
+        )
 
     def __ne__(self, other):
         return not self.__eq__(other)
@@ -292,7 +348,9 @@ class ProteinDrugInteraction(models.Model):
 class Task(models.Model):
     token = models.CharField(max_length=32, unique=True, primary_key=True)
     created_at = models.DateTimeField(auto_now_add=True)
-    target = models.CharField(max_length=32, choices=[('drug', 'Drug'), ('drug-target', 'Drug Target')])
+    target = models.CharField(
+        max_length=32, choices=[("drug", "Drug"), ("drug-target", "Drug Target")]
+    )
 
     algorithm = models.CharField(max_length=128)
     parameters = models.TextField()
@@ -312,7 +370,7 @@ class Task(models.Model):
 class Network(models.Model):
     id = models.CharField(primary_key=True, max_length=32, unique=True)
     created_at = models.DateTimeField(auto_now_add=True)
-    nodes = models.TextField(null=True, default='')
-    edges = models.TextField(null=True, default='')
-    config = models.TextField(null=True, default='')
-    groups = models.TextField(null=True, default='')
+    nodes = models.TextField(null=True, default="")
+    edges = models.TextField(null=True, default="")
+    config = models.TextField(null=True, default="")
+    groups = models.TextField(null=True, default="")
diff --git a/drugstone/serializers.py b/drugstone/serializers.py
index 3805f42..1603305 100755
--- a/drugstone/serializers.py
+++ b/drugstone/serializers.py
@@ -2,31 +2,43 @@
 import json
 from rest_framework import serializers
 from drugstone import models
-from drugstone.models import Protein, Task, Drug, ProteinDrugInteraction, \
-    Tissue, ProteinProteinInteraction, Network, ProteinDisorderAssociation, Disorder, DrugDisorderIndication
+from drugstone.models import (
+    Protein,
+    Task,
+    Drug,
+    ProteinDrugInteraction,
+    Tissue,
+    ProteinProteinInteraction,
+    Network,
+    ProteinDisorderAssociation,
+    Disorder,
+    DrugDisorderIndication,
+)
 
 
 class PDIDatasetSerializer(serializers.ModelSerializer):
     class Meta:
         model = models.PDIDataset
-        fields = '__all__'
+        fields = "__all__"
 
 
 class PPIDatasetSerializer(serializers.ModelSerializer):
     class Meta:
         model = models.PPIDataset
-        fields = '__all__'
+        fields = "__all__"
+
 
 class PDisDatasetSerializer(serializers.ModelSerializer):
     class Meta:
         model = models.PDisDataset
-        fields = '__all__'
+        fields = "__all__"
 
 
 class DrDisDatasetSerializer(serializers.ModelSerializer):
     class Meta:
         model = models.DrDiDataset
-        fields = '__all__'
+        fields = "__all__"
+
 
 class ProteinNodeSerializer(serializers.ModelSerializer):
     drugstone_id = serializers.SerializerMethodField()
@@ -36,7 +48,7 @@ class ProteinNodeSerializer(serializers.ModelSerializer):
     entrez = serializers.SerializerMethodField()
 
     def get_drugstone_id(self, obj):
-        return [f'p{obj.id}']
+        return [f"p{obj.id}"]
 
     def get_uniprot(self, obj):
         return [obj.uniprot_code]
@@ -44,7 +56,7 @@ class ProteinNodeSerializer(serializers.ModelSerializer):
     def get_symbol(self, obj):
         return [obj.gene]
 
-    def get_entrez(self,obj):
+    def get_entrez(self, obj):
         return [obj.entrez]
 
     def get_ensg(self, obj) -> str:
@@ -61,7 +73,7 @@ class ProteinNodeSerializer(serializers.ModelSerializer):
 
     class Meta:
         model = Protein
-        fields = ['drugstone_id', 'uniprot', 'symbol', 'protein_name', 'entrez', 'ensg']
+        fields = ["drugstone_id", "uniprot", "symbol", "protein_name", "entrez", "ensg"]
 
 
 class ProteinSerializer(serializers.ModelSerializer):
@@ -71,7 +83,7 @@ class ProteinSerializer(serializers.ModelSerializer):
     ensg = serializers.SerializerMethodField()
 
     def get_drugstone_id(self, obj):
-        return f'p{obj.id}'
+        return f"p{obj.id}"
 
     def get_uniprot(self, obj):
         return obj.uniprot_code
@@ -93,7 +105,7 @@ class ProteinSerializer(serializers.ModelSerializer):
 
     class Meta:
         model = Protein
-        fields = ['drugstone_id', 'uniprot', 'symbol', 'protein_name', 'entrez', 'ensg']
+        fields = ["drugstone_id", "uniprot", "symbol", "protein_name", "entrez", "ensg"]
 
 
 class DrugSerializer(serializers.ModelSerializer):
@@ -102,17 +114,17 @@ class DrugSerializer(serializers.ModelSerializer):
     label = serializers.SerializerMethodField()
 
     def get_drugstone_id(self, obj):
-        return f'dr{obj.id}'
+        return f"dr{obj.id}"
 
     def get_trial_links(self, obj):
-        return [] if obj.links == '' else obj.links.split(';')
+        return [] if obj.links == "" else obj.links.split(";")
 
     def get_label(self, obj):
         return obj.name
 
     class Meta:
         model = Drug
-        fields = ['drugstone_id', 'drug_id', 'label', 'status', 'trial_links']
+        fields = ["drugstone_id", "drug_id", "label", "status", "trial_links"]
 
 
 class DisorderSerializer(serializers.ModelSerializer):
@@ -121,17 +133,17 @@ class DisorderSerializer(serializers.ModelSerializer):
     disorder_id = serializers.SerializerMethodField()
 
     def get_drugstone_id(self, obj):
-        return f'di{obj.id}'
+        return f"di{obj.id}"
 
     def get_icd_10(self, obj):
-        return obj.icd10[1:len(obj.icd10)-1].split(',')
+        return obj.icd10[1 : len(obj.icd10) - 1].split(",")
 
     def get_disorder_id(self, obj):
         return obj.mondo_id
 
     class Meta:
         model = Disorder
-        fields = ['drugstone_id', 'label', 'icd_10', 'disorder_id']
+        fields = ["drugstone_id", "label", "icd_10", "disorder_id"]
 
 
 class ProteinProteinInteractionSerializer(serializers.ModelSerializer):
@@ -143,14 +155,14 @@ class ProteinProteinInteractionSerializer(serializers.ModelSerializer):
         return obj.ppi_dataset.name
 
     def get_protein_a(self, obj):
-        return f'p{obj.from_protein.id}'
+        return f"p{obj.from_protein.id}"
 
     def get_protein_b(self, obj):
-        return f'p{obj.to_protein.id}'
+        return f"p{obj.to_protein.id}"
 
     class Meta:
         model = ProteinProteinInteraction
-        fields = ['dataset', 'protein_a', 'protein_b']
+        fields = ["dataset", "protein_a", "protein_b"]
 
 
 class ProteinDrugInteractionSerializer(serializers.ModelSerializer):
@@ -162,14 +174,14 @@ class ProteinDrugInteractionSerializer(serializers.ModelSerializer):
         return obj.pdi_dataset.name
 
     def get_protein(self, obj):
-        return f'p{obj.protein.id}'
+        return f"p{obj.protein.id}"
 
     def get_drug(self, obj):
-        return f'dr{obj.drug.id}'
+        return f"dr{obj.drug.id}"
 
     class Meta:
         model = ProteinDrugInteraction
-        fields = ['dataset', 'protein', 'drug']
+        fields = ["dataset", "protein", "drug"]
 
 
 class ProteinDisorderAssociationSerializer(serializers.ModelSerializer):
@@ -182,17 +194,17 @@ class ProteinDisorderAssociationSerializer(serializers.ModelSerializer):
         return obj.pdis_dataset.name
 
     def get_protein(self, obj):
-        return f'p{obj.protein.id}'
+        return f"p{obj.protein.id}"
 
     def get_disorder(self, obj):
-        return f'di{obj.disorder.id}'
+        return f"di{obj.disorder.id}"
 
     def get_score(self, obj):
         return float(obj.score)
 
     class Meta:
         model = ProteinDisorderAssociation
-        fields = ['dataset', 'protein', 'disorder', 'score']
+        fields = ["dataset", "protein", "disorder", "score"]
 
 
 class DrugDisorderIndicationSerializer(serializers.ModelSerializer):
@@ -204,14 +216,14 @@ class DrugDisorderIndicationSerializer(serializers.ModelSerializer):
         return obj.drdi_dataset.name
 
     def get_drug(self, obj):
-        return f'dr{obj.drug.id}'
+        return f"dr{obj.drug.id}"
 
     def get_disorder(self, obj):
-        return f'di{obj.disorder.id}'
+        return f"di{obj.disorder.id}"
 
     class Meta:
         model = DrugDisorderIndication
-        fields = ['dataset', 'drug', 'disorder']
+        fields = ["dataset", "drug", "disorder"]
 
 
 class TaskSerializer(serializers.ModelSerializer):
@@ -222,8 +234,20 @@ class TaskSerializer(serializers.ModelSerializer):
 
     class Meta:
         model = Task
-        fields = ['algorithm', 'target', 'parameters', 'job_id', 'worker_id', 'progress', 'status', 'created_at',
-                  'started_at', 'finished_at', 'done', 'failed']
+        fields = [
+            "algorithm",
+            "target",
+            "parameters",
+            "job_id",
+            "worker_id",
+            "progress",
+            "status",
+            "created_at",
+            "started_at",
+            "finished_at",
+            "done",
+            "failed",
+        ]
 
 
 class NetworkSerializer(serializers.ModelSerializer):
@@ -233,7 +257,8 @@ class NetworkSerializer(serializers.ModelSerializer):
 
     class Meta:
         model = Network
-        fields = '__all__'
+        fields = "__all__"
+
 
 #    def get_nodes(self,obj):
 #        return json.loads(obj.nodes)
@@ -248,16 +273,25 @@ class NetworkSerializer(serializers.ModelSerializer):
 class TaskStatusSerializer(serializers.ModelSerializer):
     class Meta:
         model = Task
-        fields = ['algorithm', 'target', 'progress', 'status', 'created_at', 'started_at', 'finished_at', 'done',
-                  'failed']
+        fields = [
+            "algorithm",
+            "target",
+            "progress",
+            "status",
+            "created_at",
+            "started_at",
+            "finished_at",
+            "done",
+            "failed",
+        ]
 
 
 class TissueSerializer(serializers.ModelSerializer):
     drugstone_id = serializers.SerializerMethodField()
 
     def get_drugstone_id(self, obj):
-        return f'{obj.id}'
+        return f"{obj.id}"
 
     class Meta:
         model = Tissue
-        fields = ['drugstone_id', 'name']
+        fields = ["drugstone_id", "name"]
diff --git a/drugstone/settings/settings.py b/drugstone/settings/settings.py
index 7ac2439..460dba9 100755
--- a/drugstone/settings/settings.py
+++ b/drugstone/settings/settings.py
@@ -21,77 +21,77 @@ BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
 # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
 
 # SECURITY WARNING: keep the secret key used in production secret!
-SECRET_KEY = os.environ.get('SECRET_KEY')
+SECRET_KEY = os.environ.get("SECRET_KEY")
 
 # SECURITY WARNING: don't run with debug turned on in production!
-DEBUG = os.environ.get('DEBUG') == '1'
+DEBUG = os.environ.get("DEBUG") == "1"
 
 ALLOWED_HOSTS = [
-    'localhost',
-    '127.0.0.1',
-    'drugstone-backend.zbh.uni-hamburg.de',
-    'drugst.one'
+    "localhost",
+    "127.0.0.1",
+    "drugstone-backend.zbh.uni-hamburg.de",
+    "drugst.one",
 ]
 
 
 # Application definition
 
 INSTALLED_APPS = [
-    'django.contrib.admin',
-    'django.contrib.auth',
-    'django.contrib.contenttypes',
-    'django.contrib.sessions',
-    'django.contrib.messages',
-    'django.contrib.staticfiles',
-    'corsheaders',
-    'drugstone',
-    'rest_framework',
+    "django.contrib.admin",
+    "django.contrib.auth",
+    "django.contrib.contenttypes",
+    "django.contrib.sessions",
+    "django.contrib.messages",
+    "django.contrib.staticfiles",
+    "corsheaders",
+    "drugstone",
+    "rest_framework",
 ]
 
 MIDDLEWARE = [
-    'corsheaders.middleware.CorsMiddleware',
-    'django.middleware.security.SecurityMiddleware',
-    'django.contrib.sessions.middleware.SessionMiddleware',
-    'django.middleware.common.BrokenLinkEmailsMiddleware',
-    'django.middleware.common.CommonMiddleware',
-    'django.middleware.csrf.CsrfViewMiddleware',
-    'django.contrib.auth.middleware.AuthenticationMiddleware',
-    'django.contrib.messages.middleware.MessageMiddleware',
-    'django.middleware.clickjacking.XFrameOptionsMiddleware',
+    "corsheaders.middleware.CorsMiddleware",
+    "django.middleware.security.SecurityMiddleware",
+    "django.contrib.sessions.middleware.SessionMiddleware",
+    "django.middleware.common.BrokenLinkEmailsMiddleware",
+    "django.middleware.common.CommonMiddleware",
+    "django.middleware.csrf.CsrfViewMiddleware",
+    "django.contrib.auth.middleware.AuthenticationMiddleware",
+    "django.contrib.messages.middleware.MessageMiddleware",
+    "django.middleware.clickjacking.XFrameOptionsMiddleware",
 ]
 
-ROOT_URLCONF = 'drugstone.urls'
+ROOT_URLCONF = "drugstone.urls"
 
 TEMPLATES = [
     {
-        'BACKEND': 'django.template.backends.django.DjangoTemplates',
-        'DIRS': [],
-        'APP_DIRS': True,
-        'OPTIONS': {
-            'context_processors': [
-                'django.template.context_processors.debug',
-                'django.template.context_processors.request',
-                'django.contrib.auth.context_processors.auth',
-                'django.contrib.messages.context_processors.messages',
+        "BACKEND": "django.template.backends.django.DjangoTemplates",
+        "DIRS": [],
+        "APP_DIRS": True,
+        "OPTIONS": {
+            "context_processors": [
+                "django.template.context_processors.debug",
+                "django.template.context_processors.request",
+                "django.contrib.auth.context_processors.auth",
+                "django.contrib.messages.context_processors.messages",
             ],
         },
     },
 ]
 
-WSGI_APPLICATION = 'drugstone.wsgi.application'
+WSGI_APPLICATION = "drugstone.wsgi.application"
 
 
 # Database
 # https://docs.djangoproject.com/en/3.0/ref/settings/#databases
 
 DATABASES = {
-    'default': {
-        'ENGINE': os.environ.get('SQL_ENGINE', 'django.db.backends.postgresql'),
-        'NAME': os.environ.get('SQL_DATABASE'),
-        'USER': os.environ.get('SQL_USER'),
-        'PASSWORD': os.environ.get('SQL_PASSWORD'),
-        'HOST': os.environ.get('SQL_HOST'),
-        'PORT': os.environ.get('SQL_PORT'),
+    "default": {
+        "ENGINE": os.environ.get("SQL_ENGINE", "django.db.backends.postgresql"),
+        "NAME": os.environ.get("SQL_DATABASE"),
+        "USER": os.environ.get("SQL_USER"),
+        "PASSWORD": os.environ.get("SQL_PASSWORD"),
+        "HOST": os.environ.get("SQL_HOST"),
+        "PORT": os.environ.get("SQL_PORT"),
     }
 }
 
@@ -101,16 +101,16 @@ DATABASES = {
 
 AUTH_PASSWORD_VALIDATORS = [
     {
-        'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
+        "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
     },
     {
-        'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
+        "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
     },
     {
-        'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
+        "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
     },
     {
-        'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
+        "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
     },
 ]
 
@@ -118,9 +118,9 @@ AUTH_PASSWORD_VALIDATORS = [
 # Internationalization
 # https://docs.djangoproject.com/en/3.0/topics/i18n/
 
-LANGUAGE_CODE = 'en-us'
+LANGUAGE_CODE = "en-us"
 
-TIME_ZONE = 'UTC'
+TIME_ZONE = "UTC"
 
 USE_I18N = True
 
@@ -133,50 +133,46 @@ CORS_ORIGIN_ALLOW_ALL = True
 # Static files (CSS, JavaScript, Images)
 # https://docs.djangoproject.com/en/3.0/howto/static-files/
 
-STATIC_URL = '/static/'
+STATIC_URL = "/static/"
 
 
 REST_FRAMEWORK = {
-
-    'DEFAULT_RENDERER_CLASSES': (
-        'djangorestframework_camel_case.render.CamelCaseJSONRenderer',
-        'djangorestframework_camel_case.render.CamelCaseBrowsableAPIRenderer',
+    "DEFAULT_RENDERER_CLASSES": (
+        "djangorestframework_camel_case.render.CamelCaseJSONRenderer",
+        "djangorestframework_camel_case.render.CamelCaseBrowsableAPIRenderer",
     ),
-
-    'DEFAULT_PARSER_CLASSES': (
-        'djangorestframework_camel_case.parser.CamelCaseJSONParser',
+    "DEFAULT_PARSER_CLASSES": (
+        "djangorestframework_camel_case.parser.CamelCaseJSONParser",
     ),
-
-    'DEFAULT_PERMISSION_CLASSES': [
-       'rest_framework.permissions.AllowAny',
-    ]
-
+    "DEFAULT_PERMISSION_CLASSES": [
+        "rest_framework.permissions.AllowAny",
+    ],
 }
 
-SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
+SESSION_ENGINE = "django.contrib.sessions.backends.cache"
 
 CACHES = {
-    'default': {
-        'BACKEND': 'django_redis.cache.RedisCache',
-        'LOCATION': f'redis://{os.environ.get("REDIS_HOST")}: \
+    "default": {
+        "BACKEND": "django_redis.cache.RedisCache",
+        "LOCATION": f'redis://{os.environ.get("REDIS_HOST")}: \
             {os.environ.get("REDIS_PORT")}/1',
-        'OPTIONS': {
-            'CLIENT_CLASS': 'django_redis.client.DefaultClient',
-        }
+        "OPTIONS": {
+            "CLIENT_CLASS": "django_redis.client.DefaultClient",
+        },
     }
 }
 
 EMAIL_HOST = "exchange.uni-hamburg.de"
 EMAIL_PORT = 587
-EMAIL_HOST_USER="taa0368"
+EMAIL_HOST_USER = "taa0368"
 EMAIL_ADDRESS = "tools-cosybio.zbh@uni-hamburg.de"
-EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
+EMAIL_HOST_PASSWORD = os.environ.get("EMAIL_HOST_PASSWORD")
 EMAIL_USE_SSL = False
 EMAIL_USE_TLS = True
 
-CELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL')
+CELERY_BROKER_URL = os.environ.get("CELERY_BROKER_URL")
 # timezones: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
-CELERY_TIMEZONE = 'Europe/Berlin'
+CELERY_TIMEZONE = "Europe/Berlin"
 
 
-DEFAULTS = { 'ppi': 'NeDRex', 'pdi': 'NeDRex', 'pdis': 'NeDRex', 'drdi': 'NeDRex'}
+DEFAULTS = {"ppi": "NeDRex", "pdi": "NeDRex", "pdis": "NeDRex", "drdi": "NeDRex"}
diff --git a/drugstone/urls.py b/drugstone/urls.py
index eb547c7..fff8b1a 100755
--- a/drugstone/urls.py
+++ b/drugstone/urls.py
@@ -16,34 +16,54 @@ Including another URLconf
 from django.contrib import admin
 from django.urls import path
 
-from drugstone.views import map_nodes, tasks_view, result_view, \
-    graph_export, TissueView, TissueExpressionView, query_tissue_proteins, TaskView, \
-    adjacent_drugs, adjacent_disorders, fetch_edges, create_network, load_network, get_license, get_datasets, \
-    get_max_tissue_expression, convert_compact_ids, get_default_params, send_bugreport, save_selection, get_view, get_view_infos
+from drugstone.views import (
+    map_nodes,
+    tasks_view,
+    result_view,
+    graph_export,
+    TissueView,
+    TissueExpressionView,
+    query_tissue_proteins,
+    TaskView,
+    adjacent_drugs,
+    adjacent_disorders,
+    fetch_edges,
+    create_network,
+    load_network,
+    get_license,
+    get_datasets,
+    get_max_tissue_expression,
+    convert_compact_ids,
+    get_default_params,
+    send_bugreport,
+    save_selection,
+    get_view,
+    get_view_infos,
+)
 
 # cache time is 6 hours
 urlpatterns = [
-    path('get_datasets/', get_datasets),
-    path('map_nodes/', map_nodes),
-    path('convert_compact_node_list/', convert_compact_ids),
-    path('fetch_edges/', fetch_edges),
-    path('task/', TaskView.as_view()),
-    path('tasks/', tasks_view),
-    path('task_result/', result_view),
-    path('graph_export/', graph_export),
-    path('query_tissue_proteins/', query_tissue_proteins),
-    path('adjacent_drugs/', adjacent_drugs),
-    path('adjacent_disorders/', adjacent_disorders),
-    path('tissue_expression/', TissueExpressionView.as_view()),
-    path('tissue_max_expression/', get_max_tissue_expression),
-    path('tissues/', TissueView.as_view()),
-    path('admin/', admin.site.urls),
-    path('create_network', create_network),
-    path('load_network', load_network),
-    path('get_default_params', get_default_params),
-    path('get_license', get_license),
-    path('send_bugreport/', send_bugreport),
-    path('save_selection', save_selection),
-    path('view/', get_view),
-    path('view_infos', get_view_infos)
+    path("get_datasets/", get_datasets),
+    path("map_nodes/", map_nodes),
+    path("convert_compact_node_list/", convert_compact_ids),
+    path("fetch_edges/", fetch_edges),
+    path("task/", TaskView.as_view()),
+    path("tasks/", tasks_view),
+    path("task_result/", result_view),
+    path("graph_export/", graph_export),
+    path("query_tissue_proteins/", query_tissue_proteins),
+    path("adjacent_drugs/", adjacent_drugs),
+    path("adjacent_disorders/", adjacent_disorders),
+    path("tissue_expression/", TissueExpressionView.as_view()),
+    path("tissue_max_expression/", get_max_tissue_expression),
+    path("tissues/", TissueView.as_view()),
+    path("admin/", admin.site.urls),
+    path("create_network", create_network),
+    path("load_network", load_network),
+    path("get_default_params", get_default_params),
+    path("get_license", get_license),
+    path("send_bugreport/", send_bugreport),
+    path("save_selection", save_selection),
+    path("view/", get_view),
+    path("view_infos", get_view_infos),
 ]
diff --git a/drugstone/views.py b/drugstone/views.py
index 366af9f..7e9f32f 100755
--- a/drugstone/views.py
+++ b/drugstone/views.py
@@ -4,25 +4,30 @@ import string
 import time
 import uuid
 from collections import defaultdict
-from functools import reduce
-
 import pandas as pd
-
 import networkx as nx
 from django.http import HttpResponse
 from django.db.models import Q, Max
 from django.db import IntegrityError
-from rest_framework import status
 from rest_framework.decorators import api_view
 from rest_framework.response import Response
 from rest_framework.views import APIView
 
 from drugstone.util.mailer import bugreport
-from drugstone.util.query_db import query_proteins_by_identifier, clean_proteins_from_compact_notation
+from drugstone.util.query_db import (
+    query_proteins_by_identifier,
+    clean_proteins_from_compact_notation,
+)
 
 from drugstone.models import *
 from drugstone.serializers import *
-from drugstone.backend_tasks import start_task, refresh_from_redis, task_stats, task_result, task_parameters
+from drugstone.backend_tasks import (
+    start_task,
+    refresh_from_redis,
+    task_stats,
+    task_result,
+    task_parameters,
+)
 
 from drugstone.settings import DEFAULTS
 
@@ -42,87 +47,109 @@ def get_pdi_ds(source, licenced):
 
 
 def get_pdis_ds(source, licenced):
-    ds = models.PDisDataset.objects.filter(name__iexact=source, licenced=licenced).last()
+    ds = models.PDisDataset.objects.filter(
+        name__iexact=source, licenced=licenced
+    ).last()
     if ds is None and licenced:
         return get_pdis_ds(source, False)
     return ds
 
 
 def get_drdis_ds(source, licenced):
-    ds = models.DrDiDataset.objects.filter(name__iexact=source, licenced=licenced).last()
+    ds = models.DrDiDataset.objects.filter(
+        name__iexact=source, licenced=licenced
+    ).last()
     if ds is None and licenced:
         return get_drdis_ds(source, False)
     return ds
 
 
 class TaskView(APIView):
-
     def post(self, request) -> Response:
         chars = string.ascii_lowercase + string.ascii_uppercase + string.digits
-        token_str = ''.join(random.choice(chars) for _ in range(32))
-        parameters = request.data['parameters']
-        licenced = parameters.get('licenced', False)
-        algorithm = request.data['algorithm']
+        token_str = "".join(random.choice(chars) for _ in range(32))
+        parameters = request.data["parameters"]
+        licenced = parameters.get("licenced", False)
+        algorithm = request.data["algorithm"]
 
         # find databases based on parameter strings
-        parameters['ppi_dataset'] = PPIDatasetSerializer().to_representation(
-            get_ppi_ds(parameters.get('ppi_dataset', DEFAULTS['ppi']), licenced))
+        parameters["ppi_dataset"] = PPIDatasetSerializer().to_representation(
+            get_ppi_ds(parameters.get("ppi_dataset", DEFAULTS["ppi"]), licenced)
+        )
 
-        parameters['pdi_dataset'] = PDIDatasetSerializer().to_representation(
-            get_pdi_ds(parameters.get('pdi_dataset', DEFAULTS['pdi']), licenced))
+        parameters["pdi_dataset"] = PDIDatasetSerializer().to_representation(
+            get_pdi_ds(parameters.get("pdi_dataset", DEFAULTS["pdi"]), licenced)
+        )
 
         # if algorithm in ['connect', 'connectSelected', 'quick', 'super']:
         #     parameters["num_trees"] = 5
         #     parameters["tolerance"] = 5
         #     parameters["hub_penalty"] = 0.5
 
-        task = Task.objects.create(token=token_str,
-                                   target=request.data['target'],
-                                   algorithm=algorithm,
-                                   parameters=json.dumps(parameters))
+        task = Task.objects.create(
+            token=token_str,
+            target=request.data["target"],
+            algorithm=algorithm,
+            parameters=json.dumps(parameters),
+        )
         start_task(task)
         task.save()
 
-        return Response({
-            'token': token_str,
-        })
+        return Response(
+            {
+                "token": token_str,
+            }
+        )
 
     def get(self, request) -> Response:
-        token_str = request.query_params['token']
+        token_str = request.query_params["token"]
         task = Task.objects.get(token=token_str)
 
         if not task.done and not task.failed:
             refresh_from_redis(task)
             task.save()
 
-        return Response({
-            'token': task.token,
-            'info': TaskSerializer().to_representation(task),
-            'stats': task_stats(task),
-        })
+        return Response(
+            {
+                "token": task.token,
+                "info": TaskSerializer().to_representation(task),
+                "stats": task_stats(task),
+            }
+        )
 
 
-@api_view(['GET'])
+@api_view(["GET"])
 def get_license(request) -> Response:
     from drugstone.management.includes.DatasetLoader import import_license
-    return Response({'license': import_license()})
 
+    return Response({"license": import_license()})
 
-@api_view(['GET'])
+
+@api_view(["GET"])
 def get_default_params(request) -> Response:
-    algorithm = request.GET.get('algorithm')
-    connect = {'algorithm': 'multisteiner', 'numTrees': 5, 'tolerance': 5, 'hubPenalty': 0.5}
-    quick = {'algorithm': 'closeness', 'result_size': 50, 'hub_penalty': 0, 'include_non_approved_drugs': False,
-             'include_indirect_drugs': False}
+    algorithm = request.GET.get("algorithm")
+    connect = {
+        "algorithm": "multisteiner",
+        "numTrees": 5,
+        "tolerance": 5,
+        "hubPenalty": 0.5,
+    }
+    quick = {
+        "algorithm": "closeness",
+        "result_size": 50,
+        "hub_penalty": 0,
+        "include_non_approved_drugs": False,
+        "include_indirect_drugs": False,
+    }
     resp = {}
-    if algorithm in ['quick', 'super', 'connect', 'connectSelected']:
-        resp['protein'] = connect
-    if algorithm in ['quick', 'super']:
-        resp['drug'] = quick
+    if algorithm in ["quick", "super", "connect", "connectSelected"]:
+        resp["protein"] = connect
+    if algorithm in ["quick", "super"]:
+        resp["drug"] = quick
     return Response(resp)
 
 
-@api_view(['POST'])
+@api_view(["POST"])
 def fetch_edges(request) -> Response:
     """Retrieves interactions between nodes given as a list of drugstone IDs.
 
@@ -133,32 +160,39 @@ def fetch_edges(request) -> Response:
     Returns:
         Response: List of edges which are objects with 'from' and to ' attribtues'
     """
-    dataset = request.data.get('dataset', DEFAULTS['ppi'])
+    dataset = request.data.get("dataset", DEFAULTS["ppi"])
     drugstone_ids = set()
-    for node in request.data.get('nodes', '[]'):
-        if 'drugstone_id' in node:
-            if isinstance(node['drugstone_id'], list):
-                for id in node['drugstone_id']:
+    for node in request.data.get("nodes", "[]"):
+        if "drugstone_id" in node:
+            if isinstance(node["drugstone_id"], list):
+                for id in node["drugstone_id"]:
                     drugstone_ids.add(id[1:])
             else:
-                drugstone_ids.add(node['drugstone_id'])
-    licenced = request.data.get('licenced', False)
+                drugstone_ids.add(node["drugstone_id"])
+    licenced = request.data.get("licenced", False)
     dataset_object = get_ppi_ds(dataset, licenced)
     interaction_objects = models.ProteinProteinInteraction.objects.filter(
-        Q(ppi_dataset=dataset_object) & Q(from_protein__in=drugstone_ids) & Q(to_protein__in=drugstone_ids))
+        Q(ppi_dataset=dataset_object)
+        & Q(from_protein__in=drugstone_ids)
+        & Q(to_protein__in=drugstone_ids)
+    )
 
-    return Response(ProteinProteinInteractionSerializer(many=True).to_representation(interaction_objects))
+    return Response(
+        ProteinProteinInteractionSerializer(many=True).to_representation(
+            interaction_objects
+        )
+    )
 
 
-@api_view(['POST'])
+@api_view(["POST"])
 def convert_compact_ids(request) -> Response:
-    nodes = request.data.get('nodes', '[]')
-    identifier = request.data.get('identifier', '')
+    nodes = request.data.get("nodes", "[]")
+    identifier = request.data.get("identifier", "")
     cleaned = clean_proteins_from_compact_notation(nodes, identifier)
     return Response(cleaned)
 
 
-@api_view(['POST'])
+@api_view(["POST"])
 def map_nodes(request) -> Response:
     """Maps user given input nodes to Proteins in the django database.
     Further updates the node list given by the user by extending the matching proteins with information
@@ -174,23 +208,23 @@ def map_nodes(request) -> Response:
         Response: Updates node list.
     """
     # load data from request
-    nodes = request.data.get('nodes', '[]')
+    nodes = request.data.get("nodes", "[]")
 
     id_map = {}
     nodes_clean = []
     for node in nodes:
-        if not node['id']:
+        if not node["id"]:
             # skip empty node id ''
             continue
-        upper = node['id'].upper()
-        id_map[upper] = node['id']
-        node['id'] = upper
+        upper = node["id"].upper()
+        id_map[upper] = node["id"]
+        node["id"] = upper
         nodes_clean.append(node)
     nodes = nodes_clean
 
-    identifier = request.data.get('identifier', '')
+    identifier = request.data.get("identifier", "")
     # extract ids for filtering
-    node_ids = set([node['id'] for node in nodes])
+    node_ids = set([node["id"] for node in nodes])
 
     # query protein table
     nodes_mapped, id_key = query_proteins_by_identifier(node_ids, identifier)
@@ -207,55 +241,61 @@ def map_nodes(request) -> Response:
 
     # merge fetched data with given data to avoid data loss
     for node in nodes:
-        node['drugstoneType'] = 'other'
-        if node['id'] in nodes_mapped_dict:
-            node.update(nodes_mapped_dict[node['id']])
-            node['drugstoneType'] = 'protein'
-        node['id'] = id_map[node['id']]
+        node["drugstoneType"] = "other"
+        if node["id"] in nodes_mapped_dict:
+            node.update(nodes_mapped_dict[node["id"]])
+            node["drugstoneType"] = "protein"
+        node["id"] = id_map[node["id"]]
 
     # set label to node identifier if label is unset, otherwise
     # return list of nodes updated nodes
     return Response(nodes)
 
 
-@api_view(['POST'])
+@api_view(["POST"])
 def tasks_view(request) -> Response:
-    tokens = json.loads(request.data.get('tokens', '[]'))
-    tasks = Task.objects.filter(token__in=tokens).order_by('-created_at').all()
+    tokens = json.loads(request.data.get("tokens", "[]"))
+    tasks = Task.objects.filter(token__in=tokens).order_by("-created_at").all()
     tasks_info = []
     for task in tasks:
         if not task.done and not task.failed:
             refresh_from_redis(task)
             task.save()
 
-        tasks_info.append({
-            'token': task.token,
-            'info': TaskStatusSerializer().to_representation(task),
-            'stats': task_stats(task),
-        })
+        tasks_info.append(
+            {
+                "token": task.token,
+                "info": TaskStatusSerializer().to_representation(task),
+                "stats": task_stats(task),
+            }
+        )
     return Response(tasks_info)
 
 
-@api_view(['POST'])
+@api_view(["POST"])
 def create_network(request) -> Response:
-    if 'network' not in request.data:
+    if "network" not in request.data:
         return Response(None)
     else:
-        if 'nodes' not in request.data['network']:
-            request.data['network']['nodes'] = []
-        if 'edges' not in request.data['network']:
-            request.data['network']['edges'] = []
-    if 'config' not in request.data:
-        request.data['config'] = {}
-    if 'groups' not in request.data:
-        request.data['groups'] = {}
+        if "nodes" not in request.data["network"]:
+            request.data["network"]["nodes"] = []
+        if "edges" not in request.data["network"]:
+            request.data["network"]["edges"] = []
+    if "config" not in request.data:
+        request.data["config"] = {}
+    if "groups" not in request.data:
+        request.data["groups"] = {}
 
     id = uuid.uuid4().hex
     while True:
         try:
-            Network.objects.create(id=id, nodes=request.data['network']['nodes'],
-                                   edges=request.data['network']['edges'], config=request.data['config'],
-                                   groups=request.data['groups'])
+            Network.objects.create(
+                id=id,
+                nodes=request.data["network"]["nodes"],
+                edges=request.data["network"]["edges"],
+                config=request.data["config"],
+                groups=request.data["groups"],
+            )
             break
         except IntegrityError:
             id = uuid.uuid4().hex
@@ -274,75 +314,93 @@ def latest_datasets(ds):
     return dataset_dict.values()
 
 
-@api_view(['GET'])
+@api_view(["GET"])
 def get_datasets(request) -> Response:
     datasets = {}
-    datasets['protein-protein'] = PPIDatasetSerializer(many=True).to_representation(
-        latest_datasets(PPIDataset.objects.all()))
-    datasets['protein-drug'] = PDIDatasetSerializer(many=True).to_representation(
-        latest_datasets(PDIDataset.objects.all()))
-    datasets['protein-disorder'] = PDisDatasetSerializer(many=True).to_representation(
-        latest_datasets(PDisDataset.objects.all()))
-    datasets['drug-disorder'] = DrDisDatasetSerializer(many=True).to_representation(
-        latest_datasets(DrDiDataset.objects.all()))
+    datasets["protein-protein"] = PPIDatasetSerializer(many=True).to_representation(
+        latest_datasets(PPIDataset.objects.all())
+    )
+    datasets["protein-drug"] = PDIDatasetSerializer(many=True).to_representation(
+        latest_datasets(PDIDataset.objects.all())
+    )
+    datasets["protein-disorder"] = PDisDatasetSerializer(many=True).to_representation(
+        latest_datasets(PDisDataset.objects.all())
+    )
+    datasets["drug-disorder"] = DrDisDatasetSerializer(many=True).to_representation(
+        latest_datasets(DrDiDataset.objects.all())
+    )
     return Response(datasets)
 
 
-@api_view(['GET'])
+@api_view(["GET"])
 def load_network(request) -> Response:
-    network = NetworkSerializer().to_representation(Network.objects.get(id=request.query_params.get('id')))
-    result = {'network': {'nodes': json.loads(network['nodes'].replace("'", '"')),
-                          'edges': json.loads(network['edges'].replace("'", '"'))},
-              'config': json.loads(
-                  network['config'].replace("'", '"').replace('True', 'true').replace('False', 'false')),
-              'groups': json.loads(
-                  network['groups'].replace("'", '"').replace('True', 'true').replace('False', 'false'))}
+    network = NetworkSerializer().to_representation(
+        Network.objects.get(id=request.query_params.get("id"))
+    )
+    result = {
+        "network": {
+            "nodes": json.loads(network["nodes"].replace("'", '"')),
+            "edges": json.loads(network["edges"].replace("'", '"')),
+        },
+        "config": json.loads(
+            network["config"]
+            .replace("'", '"')
+            .replace("True", "true")
+            .replace("False", "false")
+        ),
+        "groups": json.loads(
+            network["groups"]
+            .replace("'", '"')
+            .replace("True", "true")
+            .replace("False", "false")
+        ),
+    }
     return Response(result)
 
 
 @api_view()
 def result_view(request) -> Response:
-    node_name_attribute = 'drugstone_id'
+    node_name_attribute = "drugstone_id"
 
-    view = request.query_params.get('view')
-    fmt = request.query_params.get('fmt')
-    token_str = request.query_params['token']
+    view = request.query_params.get("view")
+    fmt = request.query_params.get("fmt")
+    token_str = request.query_params["token"]
     task = Task.objects.get(token=token_str)
     result = task_result(task)
-    node_attributes = result.get('node_attributes')
+    node_attributes = result.get("node_attributes")
     if not node_attributes:
         node_attributes = {}
-        result['node_attributes'] = node_attributes
+        result["node_attributes"] = node_attributes
 
     proteins = []
     drugs = []
 
-    network = result['network']
-    node_types = node_attributes.get('node_types')
+    network = result["network"]
+    node_types = node_attributes.get("node_types")
     if not node_types:
         node_types = {}
-        node_attributes['node_types'] = node_types
+        node_attributes["node_types"] = node_types
 
-    is_seed = node_attributes.get('is_seed')
+    is_seed = node_attributes.get("is_seed")
     if not is_seed:
         is_seed = {}
-        node_attributes['is_seed'] = is_seed
-    scores = node_attributes.get('scores', {})
+        node_attributes["is_seed"] = is_seed
+    scores = node_attributes.get("scores", {})
     node_details = {}
     protein_id_map = defaultdict(set)
-    node_attributes['details'] = node_details
+    node_attributes["details"] = node_details
     parameters = json.loads(task.parameters)
-    seeds = parameters['seeds']
-    nodes = network['nodes']
+    seeds = parameters["seeds"]
+    nodes = network["nodes"]
 
     parameters = task_parameters(task)
     # attach input parameters to output
-    result['parameters'] = parameters
+    result["parameters"] = parameters
     identifier_nodes = set()
-    identifier = parameters['config']['identifier']
+    identifier = parameters["config"]["identifier"]
 
     # merge input network with result network
-    for node in parameters['input_network']['nodes']:
+    for node in parameters["input_network"]["nodes"]:
         # if node was already mapped, add user defined values to result of analysis
         if identifier in node:
             node_name = node[identifier][0]
@@ -356,30 +414,32 @@ def result_view(request) -> Response:
                 # append mapped input node to analysis result
                 nodes.append(node_name)
                 # manually add node to node types
-                result['node_attributes']['node_types'][node_name] = 'protein'
+                result["node_attributes"]["node_types"][node_name] = "protein"
         else:
             # node is custom node from user, not mapped to drugstone but will be displayed with all custom attributes
-            node_id = node['id']
+            node_id = node["id"]
             identifier_nodes.add(node_id)
             node_details[node_id] = node
             is_seed[node_id] = False
             # append custom node to analysis result later on
             # manually add node to node types
-            result['node_attributes']['node_types'][node_id] = 'custom'
+            result["node_attributes"]["node_types"][node_id] = "custom"
     # extend the analysis network by the input netword nodes
     # map edge endpoints to database proteins if possible and add edges to analysis network
     protein_nodes = set()
     # mapping all new protein and drug nodes by drugstoneIDs + adding scores
     for node_id in nodes:
-        if node_id[:2] == 'dr':
-            node_data = DrugSerializer().to_representation(Drug.objects.get(id=int(node_id[2:])))
-            node_data['drugstoneType'] = 'drug'
+        if node_id[:2] == "dr":
+            node_data = DrugSerializer().to_representation(
+                Drug.objects.get(id=int(node_id[2:]))
+            )
+            node_data["drugstoneType"] = "drug"
             drugs.append(node_data)
             if node_id in scores:
-                node_data['score'] = scores.get(node_id, None)
-            node_types[node_id] = 'drug'
+                node_data["score"] = scores.get(node_id, None)
+            node_types[node_id] = "drug"
             node_details[node_id] = node_data
-        elif node_id[:2] != 'di':
+        elif node_id[:2] != "di":
             protein_nodes.add(node_id)
         else:
             continue
@@ -393,109 +453,143 @@ def result_view(request) -> Response:
         if node_id in nodes_mapped_dict:
             # node.update(nodes_mapped_dict[node['id']])
             node_data = nodes_mapped_dict[node_id]
-            node_data['drugstoneType'] = 'protein'
+            node_data["drugstoneType"] = "protein"
             # proteins.append(node_data)
             node_ident = node_data[identifier][0]
             # node_data[identifier] = [node_ident]
             protein_id_map[node_ident].add(node_id)
             identifier_nodes.add(node_ident)
-            is_seed[node_ident] = node_id in seeds or (is_seed[node_ident] if node_ident in is_seed else False)
-            node_types[node_ident] = 'protein'
+            is_seed[node_ident] = node_id in seeds or (
+                is_seed[node_ident] if node_ident in is_seed else False
+            )
+            node_types[node_ident] = "protein"
             score = scores.get(node_id, None)
             if node_ident in node_details:
                 data = node_details[node_ident]
-                data['score'] = [score] if score else None
+                data["score"] = [score] if score else None
             else:
-                node_data['score'] = score if score else None
-                node_data['drugstoneType'] = 'protein'
-                node_data['id'] = node_ident
-                node_data['label'] = node_ident
+                node_data["score"] = score if score else None
+                node_data["drugstoneType"] = "protein"
+                node_data["id"] = node_ident
+                node_data["label"] = node_ident
                 node_details[node_ident] = node_data
 
     for node_id, detail in node_details.items():
-        if 'drugstoneType' in detail and detail['drugstoneType'] == 'protein':
-            detail['symbol'] = list(set(detail['symbol'])) if 'symbol' in detail else []
-            detail['entrez'] = list(set(detail['entrez'])) if 'entrez' in detail else []
-            detail['uniprot'] = list(set(detail['uniprot'])) if 'uniprot' in detail else []
-            detail['ensg'] = list(set(detail['ensg'])) if 'ensg' in detail else []
+        if "drugstoneType" in detail and detail["drugstoneType"] == "protein":
+            detail["symbol"] = list(set(detail["symbol"])) if "symbol" in detail else []
+            detail["entrez"] = list(set(detail["entrez"])) if "entrez" in detail else []
+            detail["uniprot"] = (
+                list(set(detail["uniprot"])) if "uniprot" in detail else []
+            )
+            detail["ensg"] = list(set(detail["ensg"])) if "ensg" in detail else []
 
-    edges = parameters['input_network']['edges']
+    edges = parameters["input_network"]["edges"]
 
     edge_endpoint_ids = set()
 
     # TODO check for custom edges when working again with ensemble gene ids
     for edge in edges:
-        edge_endpoint_ids.add(edge['from'])
-        edge_endpoint_ids.add(edge['to'])
+        edge_endpoint_ids.add(edge["from"])
+        edge_endpoint_ids.add(edge["to"])
 
     nodes_mapped, id_key = query_proteins_by_identifier(edge_endpoint_ids, identifier)
 
-    if 'autofill_edges' in parameters['config'] and parameters['config']['autofill_edges']:
-        prots = list(filter(lambda n: n['drugstone_type'] == 'protein',
-                            filter(lambda n: 'drugstone_type' in n and node_name_attribute in n,
-                                   parameters['input_network']['nodes'])))
-
-        proteins = {node_name[1:] for node in prots for node_name in node[node_name_attribute]}
-        dataset = DEFAULTS['ppi'] if 'interaction_protein_protein' not in parameters['config'] else \
-            parameters['config'][
-                'interaction_protein_protein']
+    if (
+        "autofill_edges" in parameters["config"]
+        and parameters["config"]["autofill_edges"]
+    ):
+        prots = list(
+            filter(
+                lambda n: n["drugstone_type"] == "protein",
+                filter(
+                    lambda n: "drugstone_type" in n and node_name_attribute in n,
+                    parameters["input_network"]["nodes"],
+                ),
+            )
+        )
+
+        proteins = {
+            node_name[1:] for node in prots for node_name in node[node_name_attribute]
+        }
+        dataset = (
+            DEFAULTS["ppi"]
+            if "interaction_protein_protein" not in parameters["config"]
+            else parameters["config"]["interaction_protein_protein"]
+        )
         dataset_object = models.PPIDataset.objects.filter(name__iexact=dataset).last()
         interaction_objects = models.ProteinProteinInteraction.objects.filter(
-            Q(ppi_dataset=dataset_object) & Q(from_protein__in=proteins) & Q(to_protein__in=proteins))
-        auto_edges = list(map(lambda n: {"from": f'p{n.from_protein_id}', "to": f'p{n.to_protein_id}'},
-                              interaction_objects))
+            Q(ppi_dataset=dataset_object)
+            & Q(from_protein__in=proteins)
+            & Q(to_protein__in=proteins)
+        )
+        auto_edges = list(
+            map(
+                lambda n: {
+                    "from": f"p{n.from_protein_id}",
+                    "to": f"p{n.to_protein_id}",
+                },
+                interaction_objects,
+            )
+        )
         edges.extend(auto_edges)
 
-    result['network']['edges'].extend(edges)
+    result["network"]["edges"].extend(edges)
     uniq_edges = dict()
-    for edge in result['network']['edges']:
-        hash = edge['from'] + edge['to']
+    for edge in result["network"]["edges"]:
+        hash = edge["from"] + edge["to"]
         uniq_edges[hash] = edge
-    result['network']['edges'] = list(uniq_edges.values())
+    result["network"]["edges"] = list(uniq_edges.values())
 
-    if 'scores' in result['node_attributes']:
-        del result['node_attributes']['scores']
+    if "scores" in result["node_attributes"]:
+        del result["node_attributes"]["scores"]
 
     if not view:
         return Response(result)
     else:
-        if view == 'proteins':
+        if view == "proteins":
             proteins = list(
-                filter(lambda n: 'drugstone_type' in n and n['drugstone_type'] == 'protein', node_details.values()))
-            if fmt == 'csv':
+                filter(
+                    lambda n: "drugstone_type" in n
+                    and n["drugstone_type"] == "protein",
+                    node_details.values(),
+                )
+            )
+            if fmt == "csv":
                 items = []
                 for i in proteins:
                     new_i = {
-                        'id': i['id'],
-                        'uniprot': i['uniprot'] if 'uniprot' in i else [],
-                        'gene': i['symbol'] if 'symbol' in i else [],
-                        'name': i['protein_name'] if 'protein_name' in i else [],
-                        'ensembl': i['ensg'] if 'ensg' in i else [],
-                        'entrez': i['entrez'] if 'entrez' in i else [],
-                        'seed': is_seed[i['id']],
+                        "id": i["id"],
+                        "uniprot": i["uniprot"] if "uniprot" in i else [],
+                        "gene": i["symbol"] if "symbol" in i else [],
+                        "name": i["protein_name"] if "protein_name" in i else [],
+                        "ensembl": i["ensg"] if "ensg" in i else [],
+                        "entrez": i["entrez"] if "entrez" in i else [],
+                        "seed": is_seed[i["id"]],
                     }
-                    if 'score' in i:
-                        new_i['score'] = i['score']
+                    if "score" in i:
+                        new_i["score"] = i["score"]
                     items.append(new_i)
             else:
                 items = proteins
-        elif view == 'drugs':
-            if fmt == 'csv':
+        elif view == "drugs":
+            if fmt == "csv":
                 items = [i for i in drugs]
             else:
                 items = drugs
         else:
             return Response({})
 
-        if not fmt or fmt == 'json':
+        if not fmt or fmt == "json":
             return Response(items)
-        elif fmt == 'csv':
+        elif fmt == "csv":
             if len(items) != 0:
                 keys = items[0].keys()
             else:
                 keys = []
-            response = HttpResponse(content_type='text/csv')
-            response['Content-Disposition'] = f'attachment; filename="{task.token}_{view}.csv"'
+            response = HttpResponse(content_type="text/csv")
+            response[
+                "Content-Disposition"
+            ] = f'attachment; filename="{task.token}_{view}.csv"'
             dict_writer = csv.DictWriter(response, keys)
             dict_writer.writeheader()
             dict_writer.writerows(items)
@@ -504,20 +598,32 @@ def result_view(request) -> Response:
             return Response({})
 
 
-@api_view(['POST'])
+@api_view(["POST"])
 def graph_export(request) -> Response:
     """
     Recieve whole graph data and write it to graphml file. Return the
     file ready to download.
     """
-    remove_node_properties = ['color', 'shape', 'border_width', 'group', 'border_width_selected', 'shadow',
-                              'group_id', 'drugstone_type', 'font', 'x', 'y', '_group']
-    rename_node_properties = {'group_name': 'group'}
-    remove_edge_properties = ['group', 'color', 'dashes', 'shadow', 'id']
-    rename_edge_properties = {'group_name': 'group'}
-    nodes = request.data.get('nodes', [])
-    edges = request.data.get('edges', [])
-    fmt = request.data.get('fmt', 'graphml')
+    remove_node_properties = [
+        "color",
+        "shape",
+        "border_width",
+        "group",
+        "border_width_selected",
+        "shadow",
+        "group_id",
+        "drugstone_type",
+        "font",
+        "x",
+        "y",
+        "_group",
+    ]
+    rename_node_properties = {"group_name": "group"}
+    remove_edge_properties = ["group", "color", "dashes", "shadow", "id"]
+    rename_edge_properties = {"group_name": "group"}
+    nodes = request.data.get("nodes", [])
+    edges = request.data.get("edges", [])
+    fmt = request.data.get("fmt", "graphml")
     G = nx.Graph()
     node_map = dict()
     for node in nodes:
@@ -534,15 +640,15 @@ def graph_export(request) -> Response:
                 node[key] = json.dumps(node[key])
             elif node[key] is None:
                 # networkx has difficulties with None when writing graphml
-                node[key] = ''
+                node[key] = ""
         try:
-            node_name = node['label']
-            if 'drugstone_id' in node:
-                node_map[node['drugstone_id']] = node['label']
-            elif 'id' in node:
-                node_map[node['id']] = node['label']
+            node_name = node["label"]
+            if "drugstone_id" in node:
+                node_map[node["drugstone_id"]] = node["label"]
+            elif "id" in node:
+                node_map[node["id"]] = node["label"]
         except KeyError:
-            node_name = node['drugstone_id']
+            node_name = node["drugstone_id"]
         G.add_node(node_name, **node)
 
     for e in edges:
@@ -558,20 +664,20 @@ def graph_export(request) -> Response:
             if isinstance(e[key], list) or isinstance(e[key], dict):
                 e[key] = json.dumps(e[key])
             elif e[key] is None:
-                e[key] = ''
-        u_of_edge = e.pop('from')
+                e[key] = ""
+        u_of_edge = e.pop("from")
         u_of_edge = u_of_edge if u_of_edge not in node_map else node_map[u_of_edge]
-        v_of_edge = e.pop('to')
+        v_of_edge = e.pop("to")
         v_of_edge = node_map[v_of_edge] if v_of_edge in node_map else v_of_edge
         G.add_edge(u_of_edge, v_of_edge, **e)
 
-    if fmt == 'graphml':
+    if fmt == "graphml":
         data = nx.generate_graphml(G)
-        response = HttpResponse(data, content_type='application/xml')
-    elif fmt == 'json':
+        response = HttpResponse(data, content_type="application/xml")
+    elif fmt == "json":
         data = nx.readwrite.json_graph.node_link_data(G)
-        del data['graph']
-        del data['multigraph']
+        del data["graph"]
+        del data["multigraph"]
 
         # for node in data['nodes']:
         # for prop in remove_node_properties:
@@ -583,56 +689,78 @@ def graph_export(request) -> Response:
         #         del edge[prop]
         data["edges"] = data.pop("links")
         data = json.dumps(data)
-        data = data.replace('"{', '{').replace('}"', '}').replace('"[', '[').replace(']"', ']').replace('\\"', '"')
-        response = HttpResponse(data, content_type='application/json')
-    elif fmt == 'csv':
-        data = pd.DataFrame(nx.to_numpy_array(G), columns=G.nodes(), index=G.nodes(), dtype=int)
-        response = HttpResponse(data.to_csv(), content_type='text/csv')
-
-    response['content-disposition'] = f'attachment; filename="{int(time.time())}_network.{fmt}"'
+        data = (
+            data.replace('"{', "{")
+            .replace('}"', "}")
+            .replace('"[', "[")
+            .replace(']"', "]")
+            .replace('\\"', '"')
+        )
+        response = HttpResponse(data, content_type="application/json")
+    elif fmt == "csv":
+        data = pd.DataFrame(
+            nx.to_numpy_array(G), columns=G.nodes(), index=G.nodes(), dtype=int
+        )
+        response = HttpResponse(data.to_csv(), content_type="text/csv")
+
+    response[
+        "content-disposition"
+    ] = f'attachment; filename="{int(time.time())}_network.{fmt}"'
     return response
 
 
-@api_view(['POST'])
+@api_view(["POST"])
 def adjacent_disorders(request) -> Response:
     """Find all adjacent disorders to a list of proteins.
 
-       Args:
-           request (django.request): Request object with keys "proteins" and "pdi_dataset"
+    Args:
+        request (django.request): Request object with keys "proteins" and "pdi_dataset"
 
-       Returns:
-           Response: With lists "pdis" (protein-drug-intersions) and "disorders"
-       """
+    Returns:
+        Response: With lists "pdis" (protein-drug-intersions) and "disorders"
+    """
     data = request.data
-    if 'proteins' in data:
-        drugstone_ids = data.get('proteins', [])
-        pdis_dataset = get_pdis_ds(data.get('dataset', DEFAULTS['pdis']), data.get('licenced', False))
+    if "proteins" in data:
+        drugstone_ids = data.get("proteins", [])
+        pdis_dataset = get_pdis_ds(
+            data.get("dataset", DEFAULTS["pdis"]), data.get("licenced", False)
+        )
         # find adjacent drugs by looking at drug-protein edges
-        pdis_objects = ProteinDisorderAssociation.objects.filter(protein__id__in=drugstone_ids,
-                                                                 pdis_dataset_id=pdis_dataset.id)
+        pdis_objects = ProteinDisorderAssociation.objects.filter(
+            protein__id__in=drugstone_ids, pdis_dataset_id=pdis_dataset.id
+        )
         disorders = {e.disorder for e in pdis_objects}
         # serialize
-        edges = ProteinDisorderAssociationSerializer(many=True).to_representation(pdis_objects)
+        edges = ProteinDisorderAssociationSerializer(many=True).to_representation(
+            pdis_objects
+        )
         disorders = DisorderSerializer(many=True).to_representation(disorders)
-    elif 'drugs' in data:
-        drugstone_ids = data.get('drugs', [])
-        drdi_dataset = get_drdis_ds(data.get('dataset', DEFAULTS['drdi']), data.get('licenced', False))
+    elif "drugs" in data:
+        drugstone_ids = data.get("drugs", [])
+        drdi_dataset = get_drdis_ds(
+            data.get("dataset", DEFAULTS["drdi"]), data.get("licenced", False)
+        )
         # find adjacent drugs by looking at drug-protein edges
-        drdi_objects = DrugDisorderIndication.objects.filter(drug__id__in=drugstone_ids,
-                                                             drdi_dataset_id=drdi_dataset.id)
+        drdi_objects = DrugDisorderIndication.objects.filter(
+            drug__id__in=drugstone_ids, drdi_dataset_id=drdi_dataset.id
+        )
         disorders = {e.disorder for e in drdi_objects}
         # serialize
-        edges = DrugDisorderIndicationSerializer(many=True).to_representation(drdi_objects)
+        edges = DrugDisorderIndicationSerializer(many=True).to_representation(
+            drdi_objects
+        )
         disorders = DisorderSerializer(many=True).to_representation(disorders)
     for d in disorders:
-        d['drugstone_type'] = 'disorder'
-    return Response({
-        'edges': edges,
-        'disorders': disorders,
-    })
+        d["drugstone_type"] = "disorder"
+    return Response(
+        {
+            "edges": edges,
+            "disorders": disorders,
+        }
+    )
 
 
-@api_view(['POST'])
+@api_view(["POST"])
 def adjacent_drugs(request) -> Response:
     """Find all adjacent drugs to a list of proteins.
 
@@ -643,24 +771,30 @@ def adjacent_drugs(request) -> Response:
         Response: With lists "pdis" (protein-drug-intersions) and "drugs"
     """
     data = request.data
-    drugstone_ids = data.get('proteins', [])
-    pdi_dataset = get_pdi_ds(data.get('pdi_dataset', DEFAULTS['pdi']), data.get('licenced', False))
+    drugstone_ids = data.get("proteins", [])
+    pdi_dataset = get_pdi_ds(
+        data.get("pdi_dataset", DEFAULTS["pdi"]), data.get("licenced", False)
+    )
     # find adjacent drugs by looking at drug-protein edges
-    pdi_objects = ProteinDrugInteraction.objects.filter(protein__id__in=drugstone_ids, pdi_dataset_id=pdi_dataset.id)
+    pdi_objects = ProteinDrugInteraction.objects.filter(
+        protein__id__in=drugstone_ids, pdi_dataset_id=pdi_dataset.id
+    )
     drugs = {e.drug for e in pdi_objects}
     # serialize
     pdis = ProteinDrugInteractionSerializer(many=True).to_representation(pdi_objects)
     drugs = DrugSerializer(many=True).to_representation(drugs)
     for drug in drugs:
-        drug['drugstone_type'] = 'drug'
+        drug["drugstone_type"] = "drug"
 
-    return Response({
-        'pdis': pdis,
-        'drugs': drugs,
-    })
+    return Response(
+        {
+            "pdis": pdis,
+            "drugs": drugs,
+        }
+    )
 
 
-@api_view(['POST'])
+@api_view(["POST"])
 def query_proteins(request) -> Response:
     proteins = request.data
 
@@ -682,13 +816,15 @@ def query_proteins(request) -> Response:
 
         not_found.append(p)
 
-    return Response({
-        'details': details,
-        'notFound': not_found,
-    })
+    return Response(
+        {
+            "details": details,
+            "notFound": not_found,
+        }
+    )
 
 
-@api_view(['POST'])
+@api_view(["POST"])
 def send_bugreport(request) -> Response:
     data = request.data
     title = data.get("title")
@@ -703,54 +839,74 @@ def send_bugreport(request) -> Response:
     return Response({"status": 200})
 
 
-@api_view(['POST'])
+@api_view(["POST"])
 def save_selection(request) -> Response:
     chars = string.ascii_lowercase + string.ascii_uppercase + string.digits
-    token_str = ''.join(random.choice(chars) for _ in range(32))
+    token_str = "".join(random.choice(chars) for _ in range(32))
 
     config = request.data.get("config")
     network = request.data.get("network")
 
-    Network.objects.create(id=token_str, config=json.dumps(config), nodes=json.dumps(network["nodes"]), edges=json.dumps(network["edges"]))
-    return Response({
-        'token': token_str,
-    })
+    Network.objects.create(
+        id=token_str,
+        config=json.dumps(config),
+        nodes=json.dumps(network["nodes"]),
+        edges=json.dumps(network["edges"]),
+    )
+    return Response(
+        {
+            "token": token_str,
+        }
+    )
+
 
-@api_view(['GET'])
+@api_view(["GET"])
 def get_view(request) -> Response:
-    token = request.query_params.get('token')
+    token = request.query_params.get("token")
     network = Network.objects.get(id=token)
-    return Response({
-        'config': json.loads(network.config),
-        'created_at': network.created_at,
-        'network': {
-            'nodes': json.loads(network.nodes),
-            'edges': json.loads(network.edges),
+    return Response(
+        {
+            "config": json.loads(network.config),
+            "created_at": network.created_at,
+            "network": {
+                "nodes": json.loads(network.nodes),
+                "edges": json.loads(network.edges),
+            },
         }
-    })
+    )
 
 
-@api_view(['POST'])
+@api_view(["POST"])
 def get_view_infos(request) -> Response:
-    tokens = request.data.get('tokens')
-    networks = Network.objects.filter(id__in = tokens)
-    return Response([{
-        'token': n.id,
-        'created_at': n.created_at,
-    } for n in networks])
-
-
-@api_view(['GET'])
+    tokens = request.data.get("tokens")
+    networks = Network.objects.filter(id__in=tokens)
+    return Response(
+        [
+            {
+                "token": n.id,
+                "created_at": n.created_at,
+            }
+            for n in networks
+        ]
+    )
+
+
+@api_view(["GET"])
 def get_max_tissue_expression(request) -> Response:
-    tissue = Tissue.objects.get(id=request.query_params.get('tissue'))
-    return Response({'max': ExpressionLevel.objects.filter(tissue=tissue).aggregate(Max('expression_level'))[
-        'expression_level__max']})
+    tissue = Tissue.objects.get(id=request.query_params.get("tissue"))
+    return Response(
+        {
+            "max": ExpressionLevel.objects.filter(tissue=tissue).aggregate(
+                Max("expression_level")
+            )["expression_level__max"]
+        }
+    )
 
 
-@api_view(['POST'])
+@api_view(["POST"])
 def query_tissue_proteins(request) -> Response:
-    threshold = request.data['threshold']
-    tissue_id = request.data['tissue_id']
+    threshold = request.data["threshold"]
+    tissue_id = request.data["tissue_id"]
     tissue = Tissue.objects.get(id=tissue_id)
 
     proteins = []
@@ -761,7 +917,6 @@ def query_tissue_proteins(request) -> Response:
 
 
 class TissueView(APIView):
-
     def get(self, request) -> Response:
         tissues = Tissue.objects.all()
         return Response(TissueSerializer(many=True).to_representation(tissues))
@@ -773,15 +928,15 @@ class TissueExpressionView(APIView):
     """
 
     def get(self, request) -> Response:
-        tissue = Tissue.objects.get(id=request.query_params.get('tissue'))
-        proteins = request.query_params.get('proteins')
-        token = request.query_params.get('token')
+        tissue = Tissue.objects.get(id=request.query_params.get("tissue"))
+        proteins = request.query_params.get("proteins")
+        token = request.query_params.get("token")
         return self.get_tissue_expression(tissue, proteins, token)
 
     def post(self, request) -> Response:
-        tissue = Tissue.objects.get(id=request.data.get('tissue'))
-        proteins = request.data.get('proteins')
-        token = request.data.get('token')
+        tissue = Tissue.objects.get(id=request.data.get("tissue"))
+        proteins = request.data.get("proteins")
+        token = request.data.get("token")
         return self.get_tissue_expression(tissue, proteins, token)
 
     def get_tissue_expression(self, tissue, proteins, token):
@@ -792,20 +947,20 @@ class TissueExpressionView(APIView):
             proteins = []
             task = Task.objects.get(token=token)
             result = task_result(task)
-            network = result['network']
-            node_attributes = result.get('node_attributes')
+            network = result["network"]
+            node_attributes = result.get("node_attributes")
             if not node_attributes:
                 node_attributes = {}
-            node_types = node_attributes.get('node_types')
+            node_types = node_attributes.get("node_types")
             if not node_types:
                 node_types = {}
             parameters = json.loads(task.parameters)
-            seeds = parameters['seeds']
-            nodes = network['nodes']
+            seeds = parameters["seeds"]
+            nodes = network["nodes"]
             for node in nodes + seeds:
                 node_type = node_types.get(node)
                 details = None
-                if node_type == 'protein':
+                if node_type == "protein":
                     if details:
                         proteins.append(details)
                     else:
@@ -820,10 +975,15 @@ class TissueExpressionView(APIView):
 
         for protein in proteins:
             try:
-                expression_level = ExpressionLevel.objects.get(protein=protein, tissue=tissue)
+                expression_level = ExpressionLevel.objects.get(
+                    protein=protein, tissue=tissue
+                )
                 pt_expressions[
-                    ProteinSerializer().to_representation(protein)['drugstone_id']] = expression_level.expression_level
+                    ProteinSerializer().to_representation(protein)["drugstone_id"]
+                ] = expression_level.expression_level
             except ExpressionLevel.DoesNotExist:
-                pt_expressions[ProteinSerializer().to_representation(protein)['drugstone_id']] = None
+                pt_expressions[
+                    ProteinSerializer().to_representation(protein)["drugstone_id"]
+                ] = None
 
         return Response(pt_expressions)
diff --git a/setup.cfg b/setup.cfg
index 9f22bc3..901b495 100755
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,8 +1,10 @@
 [pycodestyle]
 exclude = */migrations/*,.env,venv,test_*,manage.py,tasks/*,*/commands/*
 max-line-length = 120
+ignore = E203,W503
 
 [flake8]
 exclude = */migrations/*,.env,venv,test_*,manage.py,tasks/*,*/commands/*,*/__init__.py,*/settings.py
 max-line-length = 120
+ignore = E203,W503,F405,F403
 inline-quotes = '
-- 
GitLab