In [1]:
# Filepaths and Hard-coded Defaults

run = 1
timestamp = "2022-02-16_03-46-35"
cell_type = "K562/ENCSR261KBX"
bias_model = False

in_window = 2114
out_window = 1000
input_length, profile_length = in_window, out_window
shap_score_center_size = 500   #########!!!!!!!
profile_display_center_size = 400

proj_root = "/users/kcochran/projects/procap_models/"
sequence_path = proj_root + "genomes/GRCh38_no_alt_analysis_set_GCA_000001405.15.fasta"
chrom_sizes = proj_root + "genomes/hg38.chrom.sizes.withrRNA"
data_dir = proj_root + "/data/procap/processed/" + cell_type + "/"

plus_bw_path = data_dir + "final.5prime.pos.bigWig"
minus_bw_path = data_dir + "final.5prime.neg.bigWig"

if bias_model:
    save_dir = proj_root + "model_out/procap_bias/bpnetlite_basic/" + cell_type + "/"
    val_save_path = save_dir + timestamp + "_run" + str(run) + "_val"
    val_peak_path = data_dir + "peaks_uni_and_bi_val.bed.gz"
else:
    save_dir = proj_root + "model_out/procap/bpnetlite_basic/" + cell_type + "/"
    val_save_path = save_dir + timestamp + "_run" + str(run) + "_train_and_val"
    val_peak_path = data_dir + "peaks_uni_and_bi_train_and_val.bed.gz"


attr_save_path = save_dir + timestamp + "_run" + str(run) + "_deepshap"
modisco_out_path = attr_save_path.replace("deepshap", "modisco") + "/"
In [2]:
# Imports, Plotting Defaults

import os, sys
import numpy as np
import h5py
import pandas as pd
import gzip
import sklearn.cluster
import scipy.cluster.hierarchy

import modisco
import viz_sequence

import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager

plot_params = {
    "figure.titlesize": 22,
    "axes.titlesize": 22,
    "axes.labelsize": 20,
    "legend.fontsize": 18,
    "xtick.labelsize": 16,
    "ytick.labelsize": 16,
    "font.weight": "bold"
}
plt.rcParams.update(plot_params)

import io
import base64
import urllib

import vdom.helpers as vdomh
from IPython.display import display
import tqdm
tqdm.tqdm_notebook()
/users/kcochran/anaconda3/envs/pytorch/lib/python3.7/site-packages/ipykernel_launcher.py:35: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0
Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`
Out[2]:
<tqdm.notebook.tqdm_notebook at 0x7f7cea5afe50>
In [3]:
# Modisco Task-Specific Filepaths

scoring_type = "profile"


assert scoring_type in ["profile", "counts"], scoring_type

if scoring_type == "profile":
    scores_path = attr_save_path + "_prof.npy"
    onehot_scores_path = attr_save_path + "_prof_onehot.npy"
    save_path = modisco_out_path + "results_allChroms_prof_slice500.hdf5"
    seqlet_path = modisco_out_path + "seqlets_prof.txt"
else:
    scores_path = attr_save_path + "_count.npy"
    onehot_scores_path = attr_save_path + "_count_onehot.npy"
    save_path = modisco_out_path + "results_allChroms_count_slice500.hdf5"
    seqlet_path = modisco_out_path + "seqlets_count.txt"
    

assert(os.path.exists(scores_path)), scores_path
assert(os.path.exists(onehot_scores_path)), onehot_scores_path
In [25]:
# Load in True Profiles and Sequences

sys.path.append('../1_train_models')

from data_loading import extract_peaks

one_hot_seqs, true_profs = extract_peaks(sequence_path, 
    plus_bw_path, minus_bw_path, val_peak_path, in_window, out_window,
    max_jitter=0, verbose=True)

one_hot_seqs = one_hot_seqs.swapaxes(1,2)
one_hot_seqs = one_hot_seqs[:, (in_window // 2 - shap_score_center_size // 2):(in_window // 2 + shap_score_center_size // 2), :]
Reading FASTA:   0%|          | 0/24 [00:00<?, ?it/s]
Reading FASTA:   4%|▍         | 1/24 [00:00<00:17,  1.33it/s]
Reading FASTA:   8%|▊         | 2/24 [00:01<00:15,  1.45it/s]
Reading FASTA:  12%|█▎        | 3/24 [00:01<00:12,  1.62it/s]
Reading FASTA:  17%|█▋        | 4/24 [00:02<00:11,  1.75it/s]
Reading FASTA:  21%|██        | 5/24 [00:02<00:10,  1.84it/s]
Reading FASTA:  25%|██▌       | 6/24 [00:03<00:09,  1.96it/s]
Reading FASTA:  29%|██▉       | 7/24 [00:03<00:08,  2.08it/s]
Reading FASTA:  33%|███▎      | 8/24 [00:04<00:07,  2.22it/s]
Reading FASTA:  38%|███▊      | 9/24 [00:04<00:06,  2.33it/s]
Reading FASTA:  42%|████▏     | 10/24 [00:04<00:05,  2.47it/s]
Reading FASTA:  46%|████▌     | 11/24 [00:05<00:05,  2.56it/s]
Reading FASTA:  50%|█████     | 12/24 [00:05<00:04,  2.65it/s]
Reading FASTA:  54%|█████▍    | 13/24 [00:05<00:03,  2.77it/s]
Reading FASTA:  58%|█████▊    | 14/24 [00:06<00:03,  2.96it/s]
Reading FASTA:  62%|██████▎   | 15/24 [00:06<00:02,  3.16it/s]
Reading FASTA:  67%|██████▋   | 16/24 [00:06<00:02,  3.40it/s]
Reading FASTA:  71%|███████   | 17/24 [00:06<00:01,  3.69it/s]
Reading FASTA:  75%|███████▌  | 18/24 [00:07<00:01,  3.95it/s]
Reading FASTA:  79%|███████▉  | 19/24 [00:07<00:01,  4.48it/s]
Reading FASTA:  83%|████████▎ | 20/24 [00:07<00:00,  4.81it/s]
Reading FASTA:  88%|████████▊ | 21/24 [00:07<00:00,  5.60it/s]
Reading FASTA:  92%|█████████▏| 22/24 [00:07<00:00,  6.02it/s]
Reading FASTA:  96%|█████████▌| 23/24 [00:08<00:00,  4.16it/s]
Reading FASTA: 100%|██████████| 24/24 [00:08<00:00,  2.89it/s]

Loading Peaks: 0it [00:00, ?it/s]
Loading Peaks: 76it [00:00, 756.47it/s]
Loading Peaks: 173it [00:00, 881.67it/s]
Loading Peaks: 282it [00:00, 972.62it/s]
Loading Peaks: 388it [00:00, 1005.35it/s]
Loading Peaks: 493it [00:00, 1018.58it/s]
Loading Peaks: 601it [00:00, 1036.56it/s]
Loading Peaks: 706it [00:00, 1039.86it/s]
Loading Peaks: 810it [00:00, 1026.57it/s]
Loading Peaks: 913it [00:00, 1017.10it/s]
Loading Peaks: 1022it [00:01, 1037.79it/s]
Loading Peaks: 1131it [00:01, 1050.81it/s]
Loading Peaks: 1239it [00:01, 1057.43it/s]
Loading Peaks: 1348it [00:01, 1064.54it/s]
Loading Peaks: 1456it [00:01, 1067.79it/s]
Loading Peaks: 1563it [00:01, 1061.22it/s]
Loading Peaks: 1672it [00:01, 1067.57it/s]
Loading Peaks: 1779it [00:01, 1063.51it/s]
Loading Peaks: 1886it [00:01, 1061.94it/s]
Loading Peaks: 1994it [00:01, 1066.52it/s]
Loading Peaks: 2101it [00:02, 1057.32it/s]
Loading Peaks: 2207it [00:02, 1051.68it/s]
Loading Peaks: 2315it [00:02, 1058.27it/s]
Loading Peaks: 2421it [00:02, 1043.22it/s]
Loading Peaks: 2526it [00:02, 1034.55it/s]
Loading Peaks: 2633it [00:02, 1044.31it/s]
Loading Peaks: 2740it [00:02, 1051.84it/s]
Loading Peaks: 2849it [00:02, 1060.78it/s]
Loading Peaks: 2956it [00:02, 1047.87it/s]
Loading Peaks: 3063it [00:02, 1052.75it/s]
Loading Peaks: 3170it [00:03, 1057.26it/s]
Loading Peaks: 3278it [00:03, 1061.20it/s]
Loading Peaks: 3385it [00:03, 1063.80it/s]
Loading Peaks: 3492it [00:03, 1046.58it/s]
Loading Peaks: 3599it [00:03, 1052.75it/s]
Loading Peaks: 3705it [00:03, 1052.09it/s]
Loading Peaks: 3813it [00:03, 1057.16it/s]
Loading Peaks: 3919it [00:03, 1046.73it/s]
Loading Peaks: 4024it [00:03, 1042.22it/s]
Loading Peaks: 4129it [00:03, 1036.14it/s]
Loading Peaks: 4236it [00:04, 1043.73it/s]
Loading Peaks: 4341it [00:04, 1043.06it/s]
Loading Peaks: 4448it [00:04, 1049.85it/s]
Loading Peaks: 4554it [00:04, 1050.74it/s]
Loading Peaks: 4660it [00:04, 1049.39it/s]
Loading Peaks: 4765it [00:04, 1033.03it/s]
Loading Peaks: 4871it [00:04, 1040.13it/s]
Loading Peaks: 4976it [00:04, 1034.81it/s]
Loading Peaks: 5082it [00:04, 1040.14it/s]
Loading Peaks: 5189it [00:04, 1047.92it/s]
Loading Peaks: 5294it [00:05, 1032.08it/s]
Loading Peaks: 5399it [00:05, 1035.95it/s]
Loading Peaks: 5503it [00:05, 1036.42it/s]
Loading Peaks: 5609it [00:05, 1042.70it/s]
Loading Peaks: 5714it [00:05, 1018.52it/s]
Loading Peaks: 5817it [00:05, 1019.87it/s]
Loading Peaks: 5923it [00:05, 1031.09it/s]
Loading Peaks: 6027it [00:05, 1028.26it/s]
Loading Peaks: 6135it [00:05, 1041.63it/s]
Loading Peaks: 6240it [00:05, 1036.30it/s]
Loading Peaks: 6346it [00:06, 1041.53it/s]
Loading Peaks: 6451it [00:06, 1042.81it/s]
Loading Peaks: 6556it [00:06, 1040.38it/s]
Loading Peaks: 6661it [00:06, 1032.77it/s]
Loading Peaks: 6765it [00:06, 1032.02it/s]
Loading Peaks: 6870it [00:06, 1036.52it/s]
Loading Peaks: 6975it [00:06, 1039.80it/s]
Loading Peaks: 7082it [00:06, 1046.42it/s]
Loading Peaks: 7188it [00:06, 1047.21it/s]
Loading Peaks: 7293it [00:07, 1041.48it/s]
Loading Peaks: 7398it [00:07, 1030.42it/s]
Loading Peaks: 7503it [00:07, 1034.43it/s]
Loading Peaks: 7611it [00:07, 1045.52it/s]
Loading Peaks: 7719it [00:07, 1053.57it/s]
Loading Peaks: 7827it [00:07, 1058.93it/s]
Loading Peaks: 7935it [00:07, 1064.06it/s]
Loading Peaks: 8042it [00:07, 1064.84it/s]
Loading Peaks: 8149it [00:07, 1065.31it/s]
Loading Peaks: 8257it [00:07, 1068.82it/s]
Loading Peaks: 8364it [00:08, 1066.32it/s]
Loading Peaks: 8472it [00:08, 1068.96it/s]
Loading Peaks: 8579it [00:08, 1069.14it/s]
Loading Peaks: 8687it [00:08, 1070.39it/s]
Loading Peaks: 8795it [00:08, 1067.51it/s]
Loading Peaks: 8902it [00:08, 1064.41it/s]
Loading Peaks: 9009it [00:08, 1047.28it/s]
Loading Peaks: 9114it [00:08, 1045.39it/s]
Loading Peaks: 9221it [00:08, 1050.62it/s]
Loading Peaks: 9327it [00:08, 1052.82it/s]
Loading Peaks: 9433it [00:09, 1047.53it/s]
Loading Peaks: 9538it [00:09, 1045.64it/s]
Loading Peaks: 9643it [00:09, 935.46it/s] 
Loading Peaks: 9739it [00:09, 937.15it/s]
Loading Peaks: 9844it [00:09, 968.15it/s]
Loading Peaks: 9950it [00:09, 992.27it/s]
Loading Peaks: 10057it [00:09, 1012.44it/s]
Loading Peaks: 10162it [00:09, 1022.73it/s]
Loading Peaks: 10270it [00:09, 1036.83it/s]
Loading Peaks: 10377it [00:09, 1044.57it/s]
Loading Peaks: 10484it [00:10, 1050.81it/s]
Loading Peaks: 10590it [00:10, 1037.88it/s]
Loading Peaks: 10694it [00:10, 1033.88it/s]
Loading Peaks: 10799it [00:10, 1037.95it/s]
Loading Peaks: 10907it [00:10, 1048.81it/s]
Loading Peaks: 11012it [00:10, 1049.11it/s]
Loading Peaks: 11120it [00:10, 1055.61it/s]
Loading Peaks: 11226it [00:10, 1056.78it/s]
Loading Peaks: 11332it [00:10, 1053.91it/s]
Loading Peaks: 11438it [00:10, 1055.30it/s]
Loading Peaks: 11546it [00:11, 1060.41it/s]
Loading Peaks: 11654it [00:11, 1064.47it/s]
Loading Peaks: 11761it [00:11, 1065.42it/s]
Loading Peaks: 11868it [00:11, 1059.90it/s]
Loading Peaks: 11975it [00:11, 1062.47it/s]
Loading Peaks: 12082it [00:11, 1059.07it/s]
Loading Peaks: 12188it [00:11, 1050.79it/s]
Loading Peaks: 12294it [00:11, 1033.13it/s]
Loading Peaks: 12399it [00:11, 1035.81it/s]
Loading Peaks: 12505it [00:12, 1041.38it/s]
Loading Peaks: 12613it [00:12, 1050.04it/s]
Loading Peaks: 12719it [00:12, 1052.64it/s]
Loading Peaks: 12825it [00:12, 1051.94it/s]
Loading Peaks: 12931it [00:12, 1053.87it/s]
Loading Peaks: 13037it [00:12, 1055.28it/s]
Loading Peaks: 13143it [00:12, 1050.46it/s]
Loading Peaks: 13250it [00:12, 1053.12it/s]
Loading Peaks: 13356it [00:12, 1053.81it/s]
Loading Peaks: 13462it [00:12, 1054.28it/s]
Loading Peaks: 13569it [00:13, 1057.19it/s]
Loading Peaks: 13675it [00:13, 1049.24it/s]
Loading Peaks: 13780it [00:13, 1040.88it/s]
Loading Peaks: 13885it [00:13, 1030.98it/s]
Loading Peaks: 13991it [00:13, 1037.74it/s]
Loading Peaks: 14095it [00:13, 1024.01it/s]
Loading Peaks: 14201it [00:13, 1033.75it/s]
Loading Peaks: 14306it [00:13, 1036.88it/s]
Loading Peaks: 14412it [00:13, 1043.15it/s]
Loading Peaks: 14519it [00:13, 1048.56it/s]
Loading Peaks: 14626it [00:14, 1053.57it/s]
Loading Peaks: 14732it [00:14, 1052.69it/s]
Loading Peaks: 14839it [00:14, 1056.89it/s]
Loading Peaks: 14945it [00:14, 1057.80it/s]
Loading Peaks: 15051it [00:14, 1054.06it/s]
Loading Peaks: 15157it [00:14, 1055.32it/s]
Loading Peaks: 15264it [00:14, 1057.69it/s]
Loading Peaks: 15370it [00:14, 1053.76it/s]
Loading Peaks: 15477it [00:14, 1055.42it/s]
Loading Peaks: 15583it [00:14, 1034.51it/s]
Loading Peaks: 15688it [00:15, 1038.09it/s]
Loading Peaks: 15793it [00:15, 1041.40it/s]
Loading Peaks: 15898it [00:15, 1033.03it/s]
Loading Peaks: 16003it [00:15, 1035.56it/s]
Loading Peaks: 16107it [00:15, 943.24it/s] 
Loading Peaks: 16203it [00:15, 909.53it/s]
Loading Peaks: 16309it [00:15, 948.87it/s]
Loading Peaks: 16416it [00:15, 982.12it/s]
Loading Peaks: 16523it [00:15, 1004.93it/s]
Loading Peaks: 16630it [00:15, 1023.56it/s]
Loading Peaks: 16734it [00:16, 1026.96it/s]
Loading Peaks: 16841it [00:16, 1036.70it/s]
Loading Peaks: 16947it [00:16, 1041.25it/s]
Loading Peaks: 17054it [00:16, 1047.33it/s]
Loading Peaks: 17159it [00:16, 1032.66it/s]
Loading Peaks: 17266it [00:16, 1042.50it/s]
Loading Peaks: 17371it [00:16, 1041.52it/s]
Loading Peaks: 17477it [00:16, 1045.93it/s]
Loading Peaks: 17582it [00:16, 1036.95it/s]
Loading Peaks: 17688it [00:17, 1042.72it/s]
Loading Peaks: 17793it [00:17, 1043.07it/s]
Loading Peaks: 17900it [00:17, 1050.88it/s]
Loading Peaks: 18007it [00:17, 1054.93it/s]
Loading Peaks: 18113it [00:17, 1055.94it/s]
Loading Peaks: 18220it [00:17, 1058.71it/s]
Loading Peaks: 18327it [00:17, 1061.84it/s]
Loading Peaks: 18434it [00:17, 1048.68it/s]
Loading Peaks: 18539it [00:17, 1047.80it/s]
Loading Peaks: 18646it [00:17, 1052.21it/s]
Loading Peaks: 18752it [00:18, 1047.54it/s]
Loading Peaks: 18857it [00:18, 1046.80it/s]
Loading Peaks: 18964it [00:18, 1051.05it/s]
Loading Peaks: 19070it [00:18, 1051.17it/s]
Loading Peaks: 19176it [00:18, 1048.83it/s]
Loading Peaks: 19282it [00:18, 1049.84it/s]
Loading Peaks: 19387it [00:18, 1044.31it/s]
Loading Peaks: 19494it [00:18, 1050.27it/s]
Loading Peaks: 19600it [00:18, 1047.61it/s]
Loading Peaks: 19706it [00:18, 1049.25it/s]
Loading Peaks: 19811it [00:19, 1049.42it/s]
Loading Peaks: 19916it [00:19, 1047.14it/s]
Loading Peaks: 20022it [00:19, 1050.90it/s]
Loading Peaks: 20128it [00:19, 1052.16it/s]
Loading Peaks: 20234it [00:19, 1051.65it/s]
Loading Peaks: 20340it [00:19, 1052.74it/s]
Loading Peaks: 20446it [00:19, 1037.32it/s]
Loading Peaks: 20550it [00:19, 1036.24it/s]
Loading Peaks: 20655it [00:19, 1039.66it/s]
Loading Peaks: 20761it [00:19, 1044.82it/s]
Loading Peaks: 20867it [00:20, 1048.06it/s]
Loading Peaks: 20972it [00:20, 1048.20it/s]
Loading Peaks: 21077it [00:20, 1044.06it/s]
Loading Peaks: 21184it [00:20, 1050.15it/s]
Loading Peaks: 21291it [00:20, 1054.49it/s]
Loading Peaks: 21398it [00:20, 1056.44it/s]
Loading Peaks: 21506it [00:20, 1061.81it/s]
Loading Peaks: 21613it [00:20, 1064.02it/s]
Loading Peaks: 21720it [00:20, 1064.65it/s]
Loading Peaks: 21828it [00:20, 1067.53it/s]
Loading Peaks: 21935it [00:21, 1057.08it/s]
Loading Peaks: 22041it [00:21, 1053.07it/s]
Loading Peaks: 22147it [00:21, 1036.34it/s]
Loading Peaks: 22251it [00:21, 1037.05it/s]
Loading Peaks: 22358it [00:21, 1046.43it/s]
Loading Peaks: 22465it [00:21, 1052.73it/s]
Loading Peaks: 22571it [00:21, 1054.16it/s]
Loading Peaks: 22678it [00:21, 1058.69it/s]
Loading Peaks: 22784it [00:21, 1056.95it/s]
Loading Peaks: 22890it [00:21, 1055.57it/s]
Loading Peaks: 22996it [00:22, 1054.86it/s]
Loading Peaks: 23102it [00:22, 1055.34it/s]
Loading Peaks: 23209it [00:22, 1057.50it/s]
Loading Peaks: 23315it [00:22, 1058.18it/s]
Loading Peaks: 23421it [00:22, 1057.58it/s]
Loading Peaks: 23527it [00:22, 1056.23it/s]
Loading Peaks: 23633it [00:22, 1041.42it/s]
Loading Peaks: 23738it [00:22, 1024.61it/s]
Loading Peaks: 23844it [00:22, 1033.32it/s]
Loading Peaks: 23951it [00:22, 1040.82it/s]
Loading Peaks: 24058it [00:23, 1046.70it/s]
Loading Peaks: 24163it [00:23, 1046.01it/s]
Loading Peaks: 24268it [00:23, 1044.48it/s]
Loading Peaks: 24375it [00:23, 1050.66it/s]
Loading Peaks: 24482it [00:23, 1053.84it/s]
Loading Peaks: 24589it [00:23, 1058.13it/s]
Loading Peaks: 24696it [00:23, 1059.08it/s]
Loading Peaks: 24804it [00:23, 1063.10it/s]
Loading Peaks: 24912it [00:23, 1065.60it/s]
Loading Peaks: 25020it [00:23, 1067.49it/s]
Loading Peaks: 25127it [00:24, 1067.88it/s]
Loading Peaks: 25234it [00:24, 1059.19it/s]
Loading Peaks: 25340it [00:24, 1023.41it/s]
Loading Peaks: 25443it [00:24, 1015.87it/s]
Loading Peaks: 25549it [00:24, 1027.76it/s]
Loading Peaks: 25652it [00:24, 1027.37it/s]
Loading Peaks: 25760it [00:24, 1040.94it/s]
Loading Peaks: 25866it [00:24, 1045.91it/s]
Loading Peaks: 25974it [00:24, 1053.28it/s]
Loading Peaks: 26080it [00:24, 1054.49it/s]
Loading Peaks: 26188it [00:25, 1061.13it/s]
Loading Peaks: 26295it [00:25, 1053.07it/s]
Loading Peaks: 26401it [00:25, 1054.03it/s]
Loading Peaks: 26508it [00:25, 1057.20it/s]
Loading Peaks: 26616it [00:25, 1061.53it/s]
Loading Peaks: 26723it [00:25, 1061.57it/s]
Loading Peaks: 26830it [00:25, 1054.24it/s]
Loading Peaks: 27000it [00:25, 1043.57it/s]
In [5]:
# Load in Coordinates of Examples

def load_coords(peak_bed):
    if peak_bed.endswith(".gz"):
        with gzip.open(peak_bed) as f:
            lines = [line.decode().split() for line in f]
    else:
        with open(peak_bed) as f:
            lines = [line.split() for line in f]

    coords = []
    for line in lines:
        chrom, peak_start, peak_end = line[0], int(line[1]), int(line[2])
        mid = (peak_start + peak_end) // 2
        window_start = mid - in_window // 2
        window_end = mid + in_window // 2
        coords.append((chrom, window_start, window_end))
    return coords
    
coords = load_coords(val_peak_path)
In [7]:
# Import SHAP scores, predicted profiles

hyp_scores = np.load(scores_path).swapaxes(1,2)
hyp_scores = hyp_scores[:, (in_window // 2 - shap_score_center_size // 2):(in_window // 2 + shap_score_center_size // 2), :]
pred_profs = np.exp(np.load(val_save_path + ".profs.npy"))
#pred_counts = np.load(val_save_path + ".counts.npy")
In [26]:
# Load modisco results object

def import_tfmodisco_results(tfm_results_path, hyp_scores, one_hot_seqs):
    """
    Imports the TF-MoDISco results object.
    Arguments:
        `tfm_results_path`: path to HDF5 containing TF-MoDISco results
        `hyp_scores`: hypothetical importance scores used for this run
        `one_hot_seqs`: input sequences used for this run
    """ 
    # Everything should already be cut to `input_center_cut_size`
    act_scores = hyp_scores * one_hot_seqs
    
    track_set = modisco.tfmodisco_workflow.workflow.prep_track_set(
        task_names=["task0"],
        contrib_scores={"task0": act_scores},
        hypothetical_contribs={"task0": hyp_scores},
        one_hot=one_hot_seqs
    )
    
    with h5py.File(tfm_results_path,"r") as f:
        return modisco.tfmodisco_workflow.workflow.TfModiscoResults.from_hdf5(f, track_set=track_set)
    
tfm_obj = import_tfmodisco_results(save_path, hyp_scores, one_hot_seqs)
In [16]:
def extract_profiles_and_coords(
    seqlets_arr, one_hot_seqs, hyp_scores, true_profs, pred_profs, pred_coords,
    input_length, profile_length, input_center_cut_size, profile_center_cut_size,
    task_index=None
):
    """
    From the seqlets object of a TF-MoDISco pattern's seqlets and alignments,
    extracts the predicted and observed profiles of the model, as well as the
    set of coordinates for the seqlets.
    Arguments:
        `seqlets_arr`: a TF-MoDISco pattern's seqlets object array (N-array)
        `one_hot_seqs`: an N x R x 4 array of input sequences, where R is
            the cut centered size
        `hyp_scores`: an N x R x 4 array of hypothetical importance scores
        `true_profs`: an N x T x O x 2 array of true profile counts
        `pred_profs`: an N x T x O x 2 array of predicted profile probabilities
        `pred_coords`: an N x 3 object array of coordinates for the input sequence
            underlying the predictions
        `input_length`: length of original input sequences, I
        `profile_length`: length of profile predictions, O
        `input_center_cut_size`: centered cut size of SHAP scores used
        `profile_center_cut_size`: size to cut profiles to when returning them, P
        `task_index`: index of task to focus on for profiles; if None, returns
            profiles for all tasks
    Returns an N x (T or 1) x P x 2 array of true profile counts, an
    N x (T or 1) x P x 2 array of predicted profile probabilities, an N x Q x 4
    array of one-hot seqlet sequences, an N x Q x 4 array of hypothetical seqlet
    importance scores, and an N x 3 object array of seqlet coordinates, where P
    is the profile cut size and Q is the seqlet length. Returned profiles are
    centered at the same center as the seqlets.
    Note that it is important that the seqlet indices match exactly with the indices
    out of the N. This should be the exact sequences in the original SHAP scores.
    """
    true_seqlet_profs, pred_seqlet_profs, seqlet_seqs, seqlet_hyps, seqlet_coords, rcs = [], [], [], [], [], []
    
    def seqlet_coord_to_profile_coord(seqlet_coord):
        return seqlet_coord + ((input_length - input_center_cut_size) // 2) - ((input_length - profile_length) // 2)
    
    def seqlet_coord_to_input_coord(seqlet_coord):
        return seqlet_coord + ((input_length - input_center_cut_size) // 2)
        
    # For each seqlet, fetch the true/predicted profiles
    for seqlet in seqlets_arr:
        coord_index = seqlet.coor.example_idx
        seqlet_start = seqlet.coor.start
        seqlet_end = seqlet.coor.end
        seqlet_rc = seqlet.coor.is_revcomp
        rcs.append(seqlet_rc)
        
        # Get indices of profile to cut out
        seqlet_center = (seqlet_start + seqlet_end) // 2
        prof_center = seqlet_coord_to_profile_coord(seqlet_center)
        prof_start = prof_center - (profile_center_cut_size // 2)
        prof_end = prof_start + profile_center_cut_size
        
        if task_index is None or true_profs.shape[1] == 1:
            # Use all tasks if the predictions only have 1 task to begin with
            task_start, task_end = None, None
        else:
            task_start, task_end = task_index, task_index + 1
            
        true_prof = true_profs[coord_index, task_start:task_end, prof_start:prof_end]  # (T or 1) x P x 2
        pred_prof = pred_profs[coord_index, task_start:task_end, prof_start:prof_end]  # (T or 1) x P x 2
        
        true_seqlet_profs.append(true_prof)
        pred_seqlet_profs.append(pred_prof)
        
        # The one-hot-sequences and hypothetical scores are assumed to already by cut/centered,
        # so the indices match the seqlet indices
        if seqlet_rc:
            seqlet_seqs.append(np.flip(one_hot_seqs[coord_index, seqlet_start:seqlet_end], axis=(0, 1)))
            seqlet_hyps.append(np.flip(hyp_scores[coord_index, seqlet_start:seqlet_end], axis=(0, 1)))
        else:
            seqlet_seqs.append(one_hot_seqs[coord_index, seqlet_start:seqlet_end])
            seqlet_hyps.append(hyp_scores[coord_index, seqlet_start:seqlet_end])
            
        # Get the coordinates of the seqlet based on the input coordinates
        inp_start = seqlet_coord_to_input_coord(seqlet_start)
        inp_end = seqlet_coord_to_input_coord(seqlet_end)
        chrom, start, _ = pred_coords[coord_index]
        seqlet_coords.append([chrom, start + inp_start, start + inp_end])
    
    return np.stack(true_seqlet_profs), np.stack(pred_seqlet_profs), np.stack(seqlet_seqs), np.stack(seqlet_hyps), np.array(seqlet_coords, dtype=object), np.array(rcs)


def plot_profiles(seqlet_true_profs, seqlet_pred_profs, kmeans_clusters=5, save_path=None):
    """
    Plots the given profiles with a heatmap.
    Arguments:
        `seqlet_true_profs`: an N x O x 2 NumPy array of true profiles, either as raw
            counts or probabilities (they will be normalized)
        `seqlet_pred_profs`: an N x O x 2 NumPy array of predicted profiles, either as
            raw counts or probabilities (they will be normalized)
        `kmeans_cluster`: when displaying profile heatmaps, there will be this
            many clusters
        `save_path`: if provided, save the profile matrices here
    Returns the figure.
    """
    assert len(seqlet_true_profs.shape) == 3
    assert seqlet_true_profs.shape == seqlet_pred_profs.shape
    num_profs, width, _ = seqlet_true_profs.shape

    # First, normalize the profiles along the output profile dimension
    def normalize(arr, axis=0):
        arr_sum = np.sum(arr, axis=axis, keepdims=True)
        arr_sum[arr_sum == 0] = 1  # If 0, keep 0 as the quotient instead of dividing by 0
        return arr / arr_sum
    true_profs_norm = normalize(seqlet_true_profs, axis=1)
    pred_profs_norm = normalize(seqlet_pred_profs, axis=1)

    # Compute the mean profiles across all examples
    true_profs_mean = np.mean(true_profs_norm, axis=0)
    pred_profs_mean = np.mean(pred_profs_norm, axis=0)

    # Perform k-means clustering on the predicted profiles, with the strands pooled
    kmeans_clusters = max(5, num_profs // 50)  # Set number of clusters based on number of profiles, with minimum
    kmeans = sklearn.cluster.KMeans(n_clusters=kmeans_clusters)
    cluster_assignments = kmeans.fit_predict(
        np.reshape(pred_profs_norm, (pred_profs_norm.shape[0], -1))
    )

    # Perform hierarchical clustering on the cluster centers to determine optimal ordering
    kmeans_centers = kmeans.cluster_centers_
    cluster_order = scipy.cluster.hierarchy.leaves_list(
        scipy.cluster.hierarchy.optimal_leaf_ordering(
            scipy.cluster.hierarchy.linkage(kmeans_centers, method="centroid"), kmeans_centers
        )
    )

    # Order the profiles so that the cluster assignments follow the optimal ordering
    cluster_inds = []
    for cluster_id in cluster_order:
        cluster_inds.append(np.where(cluster_assignments == cluster_id)[0])
    cluster_inds = np.concatenate(cluster_inds)

    # Compute a matrix of profiles, normalized to the maximum height, ordered by clusters
    def make_profile_matrix(flat_profs, order_inds):
        matrix = flat_profs[order_inds]
        maxes = np.max(matrix, axis=1, keepdims=True)
        maxes[maxes == 0] = 1  # If 0, keep 0 as the quotient instead of dividing by 0
        return matrix / maxes
    true_matrix = make_profile_matrix(true_profs_norm, cluster_inds)
    pred_matrix = make_profile_matrix(pred_profs_norm, cluster_inds)
    
    if save_path:
        np.savez_compressed(
            true_profs_mean=true_profs_mean, pred_profs_mean=pred_profs_mean,
            true_matrix=true_matrix, pred_matrix=pred_matrix
        )

    # Create a figure with the right dimensions
    mean_height = 4
    heatmap_height = min(num_profs * 0.004, 8)
    fig_height = mean_height + (2 * heatmap_height)
    fig, ax = plt.subplots(
        3, 2, figsize=(16, fig_height), sharex=True,
        gridspec_kw={
            "width_ratios": [1, 1],
            "height_ratios": [mean_height / fig_height, heatmap_height / fig_height, heatmap_height / fig_height]
        }
    )

    # Plot the average predictions
    ax[0, 0].plot(true_profs_mean[:, 0], color="darkslateblue")
    ax[0, 0].plot(-true_profs_mean[:, 1], color="darkorange")
    ax[0, 1].plot(pred_profs_mean[:, 0], color="darkslateblue")
    ax[0, 1].plot(-pred_profs_mean[:, 1], color="darkorange")

    # Set axes on average predictions
    max_mean_val = max(np.max(true_profs_mean), np.max(pred_profs_mean))
    mean_ylim = max_mean_val * 1.05  # Make 5% higher
    ax[0, 0].set_title("True profiles")
    ax[0, 0].set_ylabel("Average probability")
    ax[0, 1].set_title("Predicted profiles")
    for j in (0, 1):
        ax[0, j].set_ylim(-mean_ylim, mean_ylim)
        ax[0, j].label_outer()

    # Plot the heatmaps
    ax[1, 0].imshow(true_matrix[:, :, 0], interpolation="nearest", aspect="auto", cmap="Blues")
    ax[1, 1].imshow(pred_matrix[:, :, 0], interpolation="nearest", aspect="auto", cmap="Blues")
    ax[2, 0].imshow(true_matrix[:, :, 1], interpolation="nearest", aspect="auto", cmap="Oranges")
    ax[2, 1].imshow(pred_matrix[:, :, 1], interpolation="nearest", aspect="auto", cmap="Oranges")

    # Set axes on heatmaps
    for i in (1, 2):
        for j in (0, 1):
            ax[i, j].set_yticks([])
            ax[i, j].set_yticklabels([])
            ax[i, j].label_outer()
    width = true_matrix.shape[1]
    delta = 100
    num_deltas = (width // 2) // delta
    labels = list(range(max(-width // 2, -num_deltas * delta), min(width // 2, num_deltas * delta) + 1, delta))
    tick_locs = [label + max(width // 2, num_deltas * delta) for label in labels]
    for j in (0, 1):
        ax[2, j].set_xticks(tick_locs)
        ax[2, j].set_xticklabels(labels)
        ax[2, j].set_xlabel("Distance from seqlet center (bp)")

    fig.tight_layout()
    plt.show()
    
    
    # Create a figure with the right dimensions
    fig2, ax = plt.subplots(
        1, 2, figsize=(16, mean_height), sharex=True,
        gridspec_kw={"width_ratios": [1, 1]}
    )

    # Plot the average predictions
    mid = true_profs_mean.shape[0] // 2
    zoom_width = 60
    start = mid - zoom_width // 2
    end = mid + zoom_width // 2
    ax[0].plot(true_profs_mean[start:end, 0], color="darkslateblue")
    ax[0].plot(-true_profs_mean[start:end, 1], color="darkorange")
    ax[1].plot(pred_profs_mean[start:end, 0], color="darkslateblue")
    ax[1].plot(-pred_profs_mean[start:end, 1], color="darkorange")

    # Set axes on average predictions
    max_mean_val = max(np.max(true_profs_mean[start:end]), np.max(pred_profs_mean[start:end]))
    mean_ylim = max_mean_val * 1.05  # Make 5% higher
    ax[0].set_title("True profiles")
    ax[0].set_ylabel("Average probability")
    ax[1].set_title("Predicted profiles")
    
    delta = 10
    num_deltas = (zoom_width // 2) // delta
    labels = list(range(max(-zoom_width // 2, -num_deltas * delta), min(zoom_width // 2, num_deltas * delta) + 1, delta))
    tick_locs = [label + max(zoom_width // 2, num_deltas * delta) for label in labels]
    
    for j in (0, 1):
        ax[j].set_ylim(-mean_ylim, mean_ylim)
        ax[j].label_outer()
        ax[j].set_xticks(tick_locs)
        ax[j].set_xticklabels(labels)
        ax[j].set_xlabel("Distance from seqlet center (bp)")

    fig2.tight_layout(w_pad=4, rect=(0.1, 0, 0.95, 1))
    plt.show()
    
    return fig

def get_summit_distances(coords, peak_coords):
    """
    Given a set of coordinates, computes the distance of the center of each
    coordinate to the nearest summit.
    Arguments:
        `coords`: an N x 3 object array of coordinates
        `peak_table`: a 10-column table of peak data, as imported by
            `import_peak_table`
    Returns and N-array of integers, which is the distance of each coordinate
    midpoint to the nearest coordinate.
    """
    peak_chroms = [coord[0] for coord in peak_coords]
    peak_summits = [(coord[1] + coord[2]) // 2 for coord in peak_coords]
    peak_table = pd.DataFrame({"chrom" : peak_chroms, "summit" : peak_summits})
    
    chroms = coords[:, 0]
    midpoints = (coords[:, 1] + coords[:, 2]) // 2
    dists = []
    for i in range(len(coords)):
        chrom = chroms[i]
        midpoint = midpoints[i]
        rows = peak_table[peak_table["chrom"] == chrom]
        dist_arr = (midpoint - rows["summit"]).values
        min_dist = dist_arr[np.argmin(np.abs(dist_arr))]
        dists.append(min_dist)
    return np.array(dists)

def plot_summit_dists(summit_dists):
    """
    Plots the distribution of seqlet distances to summits.
    Arguments:
        `summit_dists`: the array of distances as returned by
            `get_summit_distances`
    Returns the figure.
    """
    fig = plt.figure(figsize=(8, 6))
    num_bins = max(len(summit_dists) // 30, 20)
    plt.hist(summit_dists, bins=num_bins, color="purple")
    plt.title("Histogram of distance of seqlets to peak summits")
    plt.xlabel("Signed distance from seqlet center to nearest peak summit (bp)")
    plt.show()
    return fig
In [10]:
BACKGROUND_FREQS = np.array([0.25, 0.25, 0.25, 0.25])

def pfm_info_content(track, pseudocount=0.001):
    """
    Given an L x 4 track, computes information content for each base and
    returns it as an L-array.
    """
    num_bases = track.shape[1]
    # Normalize track to probabilities along base axis
    track_norm = (track + pseudocount) / (np.sum(track, axis=1, keepdims=True) + (num_bases * pseudocount))
    ic = track_norm * np.log2(track_norm / np.expand_dims(BACKGROUND_FREQS, axis=0))
    return np.sum(ic, axis=1)


def pfm_to_pwm(pfm):
    ic = pfm_info_content(pfm)
    return pfm * np.expand_dims(ic, axis=1)


def trim_motif_by_ic(pfm, motif, min_ic=0.2, pad=0):
    """
    Given the PFM and motif (both L x 4 arrays) (the motif could be the
    PFM itself), trims `motif` by cutting off flanks of low information
    content in `pfm`. `min_ic` is the minimum required information
    content. If specified this trimmed motif will be extended on either
    side by `pad` bases.
    If no base passes the `min_ic` threshold, then no trimming is done.
    """
    # Trim motif based on information content
    ic = pfm_info_content(pfm)
    pass_inds = np.where(ic >= min_ic)[0]  # Cut off flanks with less than min_ic IC
    
    if not pass_inds.size:
        return motif

    # Expand trimming to +/- pad bp on either side
    start, end = max(0, np.min(pass_inds) - pad), min(len(pfm), np.max(pass_inds) + pad + 1)
    return motif[start:end]


def figure_to_vdom_image(figure):
    buf = io.BytesIO()
    figure.savefig(buf, format='png')
    buf.seek(0)
    string = base64.b64encode(buf.read())
        
    return vdomh.div(
        vdomh.img(src='data:image/png;base64,' + urllib.parse.quote(string)),
        style={"display": "inline-block"}
    )
In [27]:
motif_pfms, motif_hcwms, motif_cwms = [], [], []  # Save the trimmed PFMs, hCWMs, and CWMs
motif_pfms_short = []  # PFMs that are even more trimmed (for TOMTOM)
num_seqlets = []  # Number of seqlets for each motif
motif_seqlets = []  # Save seqlets of each motif

    
metaclusters = tfm_obj.metacluster_idx_to_submetacluster_results
num_metaclusters = len(metaclusters.keys())
for metacluster_i, metacluster_key in enumerate(metaclusters.keys()):
    metacluster = metaclusters[metacluster_key]
    display(vdomh.h3("Metacluster %d/%d" % (metacluster_i + 1, num_metaclusters)))
    patterns = metacluster.seqlets_to_patterns_result.patterns
    if not patterns:
        break
    motif_pfms.append([])
    motif_hcwms.append([])
    motif_cwms.append([])
    motif_pfms_short.append([])
    num_seqlets.append([])
    motif_seqlets.append([])
    num_patterns = len(patterns)
    for pattern_i, pattern in enumerate(patterns):
        seqlets = pattern.seqlets
        display(vdomh.h4("Pattern %d/%d" % (pattern_i + 1, num_patterns)))
        display(vdomh.p("%d seqlets" % len(seqlets)))
        
        pfm = pattern["sequence"].fwd
        hcwm = pattern["task0_hypothetical_contribs"].fwd
        cwm = pattern["task0_contrib_scores"].fwd
        
        pfm_fig = viz_sequence.plot_weights(pfm, subticks_frequency=10, return_fig=True)
        hcwm_fig = viz_sequence.plot_weights(hcwm, subticks_frequency=10, return_fig=True)
        cwm_fig = viz_sequence.plot_weights(cwm, subticks_frequency=10, return_fig=True)
        pfm_fig.tight_layout()
        hcwm_fig.tight_layout()
        cwm_fig.tight_layout()
        
        motif_table = vdomh.table(
            vdomh.tr(
                vdomh.td("Sequence (PFM)"),
                vdomh.td(figure_to_vdom_image(pfm_fig))
            ),
            vdomh.tr(
                vdomh.td("Hypothetical contributions (hCWM)"),
                vdomh.td(figure_to_vdom_image(hcwm_fig))
            ),
            vdomh.tr(
                vdomh.td("Actual contributions (CWM)"),
                vdomh.td(figure_to_vdom_image(cwm_fig))
            )
        )
        display(motif_table)
        plt.close("all")  # Remove all standing figures
        
        # Trim motif based on information content
        short_trimmed_pfm = trim_motif_by_ic(pfm, pfm)
        motif_pfms_short[-1].append(short_trimmed_pfm)
        
        # Expand trimming to +/- 4bp on either side
        trimmed_pfm = trim_motif_by_ic(pfm, pfm, pad=4)
        trimmed_hcwm = trim_motif_by_ic(pfm, hcwm, pad=4)
        trimmed_cwm = trim_motif_by_ic(pfm, cwm, pad=4)
        
        motif_pfms[-1].append(trimmed_pfm)
        motif_hcwms[-1].append(trimmed_hcwm)
        motif_cwms[-1].append(trimmed_cwm)
        
        num_seqlets[-1].append(len(seqlets))

        seqlet_true_profs, seqlet_pred_profs, seqlet_seqs, seqlet_hyps, seqlet_coords, rcs = extract_profiles_and_coords(
            seqlets, one_hot_seqs, hyp_scores, true_profs, pred_profs, coords,
            input_length, profile_length, shap_score_center_size,
            profile_display_center_size, task_index=None
        )
        
        motif_seqlets[-1].append((seqlet_seqs, seqlet_hyps))
        
        seqlet_true_profs = seqlet_true_profs.swapaxes(1,2)
        seqlet_pred_profs = seqlet_pred_profs.swapaxes(1,2)
        
        for i in range(len(rcs)):
            if rcs[i]:
                seqlet_true_profs[i, :, :] = seqlet_true_profs[i, ::-1, ::-1]
                seqlet_pred_profs[i, :, :] = seqlet_pred_profs[i, ::-1, ::-1]

        #assert np.allclose(np.sum(seqlet_seqs, axis=0) / len(seqlet_seqs), pattern["sequence"].fwd)
        # ^Sanity check: PFM derived from seqlets match the PFM stored in the pattern
        prof_fig = plot_profiles(seqlet_true_profs, seqlet_pred_profs)
        
        summit_dists = get_summit_distances(seqlet_coords, coords)
        dist_fig = plot_summit_dists(summit_dists)

Metacluster 1/2

Pattern 1/43

7317 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 2/43

6683 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 3/43

3648 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 4/43

2784 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 5/43

2654 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 6/43

1641 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 7/43

1007 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 8/43

924 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 9/43

894 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 10/43

817 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 11/43

810 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 12/43

784 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 13/43

782 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 14/43

618 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 15/43

583 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 16/43

577 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 17/43

501 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 18/43

377 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 19/43

331 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 20/43

326 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 21/43

287 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 22/43

211 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 23/43

201 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 24/43

193 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 25/43

166 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 26/43

132 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 27/43

129 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 28/43

128 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 29/43

91 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 30/43

83 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 31/43

79 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 32/43

74 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 33/43

60 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 34/43

58 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 35/43

54 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 36/43

54 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 37/43

40 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 38/43

39 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 39/43

39 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 40/43

37 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 41/43

32 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 42/43

31 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 43/43

24 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Metacluster 2/2

Pattern 1/3

42 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 2/3

30 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)

Pattern 3/3

21 seqlets

Sequence (PFM)
Hypothetical contributions (hCWM)
Actual contributions (CWM)
In [53]:
import os
import subprocess
import numpy as np
import pandas as pd
import tempfile

BACKGROUND_FREQS = np.array([0.25, 0.25, 0.25, 0.25])
DATABASE_PATH = "/users/kcochran/projects/procap_models/annotations/JASPAR2022_CORE_pfms.meme"

def import_database_pfms(database_path):
    """
    Imports the database of PFMs by reading through the entire database and
    constructing a dictionary mapping motif IDs to NumPy arrays of PFMs.
    """
    motif_dict = {}
    with open(database_path, "r") as f:
        try:
            while True:
                line = next(f)
                if line.startswith("MOTIF"):
                    key = line.strip().split()[1]
                    header = next(f)
                    motif_width = int(header.split()[5])
                    motif = np.empty((motif_width, 4))
                    for i in range(motif_width):
                        motif[i] = np.array([
                            float(x) for x in next(f).strip().split()
                        ])
                    motif_dict[key] = motif
        except StopIteration:
            pass
    return motif_dict


def export_pfms_to_meme_format(
    pfms, outfile, background_freqs=None, names=None
):
    """
    Exports a set of PFMs to MEME motif format. Includes the background
    frequencies `BACKGROUND_FREQS`.
    Arguments:
        `pfms`: a list of L x 4 PFMs (where L can be different for each PFM)
        `outfile`: path to file to output the MEME-format PFMs
        `background_freqs`: background frequencies of A, C, G, T as a length-4
            NumPy array; defaults to `BACKGROUND_FREQS`
        `names`: if specified, a list of unique names to give to each PFM, must
            be parallel to `pfms`
    """
    if names is None:
        names = [str(i) for i in range(len(pfms))]
    else:
        assert len(names) == pfms
        assert len(names) == len(np.unique(names))
    if background_freqs is None:
        background_freqs = BACKGROUND_FREQS

    os.makedirs(os.path.dirname(outfile), exist_ok=True)
    with open(outfile, "w") as f:
        f.write("MEME version 5\n\n")
        f.write("ALPHABET= ACGT\n\n")
        f.write("Background letter frequencies\n")
        f.write("A %f C %f G %f T %f\n\n" % tuple(background_freqs))
        for i in range(len(pfms)):
            pfm, name = pfms[i], names[i]
            f.write("MOTIF %s\n" % name)
            f.write("letter-probability matrix:\n")
            for row in pfm:
                f.write(" ".join([str(freq) for freq in row]) + "\n")
            f.write("\n")


def run_tomtom(
    query_motif_file, target_motif_file, outdir, show_output=True
):
    """
    Runs TOMTOM given the target and query motif files. The default threshold
    of q < 0.5 is used to filter for matches.
    Arguments:
        `query_motif_file`: file containing motifs in MEME format, which will
            be the query motifs for which matches are found
        `target_motif_file`: file containing motifs in MEME format, which will
            be used to search for matches
        `outdir`: path to directory to store results
        `show_output`: whether or not to show TOMTOM output
    """
    comm = ["tomtom"]
    comm += [query_motif_file, target_motif_file]
    comm += ["-oc", outdir]
    comm += ["-no-ssc"]
    comm += ["-dist", "pearson"]
    comm += ["-min-overlap", "5"]
    comm += ["-text"]
    proc = subprocess.run(comm, capture_output=(not show_output))


def import_tomtom_results(tomtom_dir):
    """
    Imports the TOMTOM output directory as a Pandas DataFrame.
    Arguments:
        `tomtom_dir`: TOMTOM output directory, which contains the output file
            "tomtom.tsv"
    Returns a Pandas DataFrame.
    """
    df = pd.read_csv(
        os.path.join(tomtom_dir, "tomtom.txt"), sep="\t", header=0,
        index_col=False, comment="#"
    )
    df.columns = ["Query_ID", "Target_ID", "Optimal_offset",
            "p-value", "E-value", "q-value",
            "Overlap", "Query_consensus", "Target_consensus", "Orientation"]
    return df


def match_motifs_to_targets(
    query_pfms, target_pfms, temp_dir=None, show_tomtom_output=True
):
    """
    For each motif in the query PFMs, finds the best match to the target PFMs,
    based on TOMTOM q-value.
    Arguments:
        `query_pfms`: list of L x 4 PFMs to look for matches for
        `target_pfms`: list of L x 4 PFMs to match to
        `temp_dir`: a temporary directory to store intermediates; defaults to
            a randomly created directory
        `show_tomtom_output`: whether to show TOMTOM output when running
    Returns an array of indices parallel to `query_pfms`, where each index is
    denotes the best PFM within `target_pfms` that matches the query PFM. If
    a good match is not found (i.e. based on TOMTOM's threshold), the index will
    be -1.
    """
    if temp_dir is None:
        temp_dir_obj = tempfile.TemporaryDirectory()
        temp_dir = temp_dir_obj.name
    else:
        temp_dir_obj = None

    # Convert motifs to MEME format
    query_motif_file = os.path.join(temp_dir, "query_motifs.txt")
    target_motif_file = os.path.join(temp_dir, "target_motifs.txt")
    export_pfms_to_meme_format(query_pfms, query_motif_file)
    export_pfms_to_meme_format(target_pfms, target_motif_file)

    # Run TOMTOM
    tomtom_dir = os.path.join(temp_dir, "tomtom")
    run_tomtom(
        query_motif_file, target_motif_file, tomtom_dir,
        show_output=show_tomtom_output
    )

    # Find results, mapping each query motif to target index
    # The query/target IDs are the indices
    tomtom_table = import_tomtom_results(tomtom_dir)
    match_inds = []
    for i in range(len(query_pfms)):
        rows = tomtom_table[tomtom_table["Query_ID"] == i]
        if rows.empty:
            match_inds.append(-1)
            continue
        target_id = rows.loc[rows["q-value"].idxmin()]["Target_ID"]
        match_inds.append(target_id)

    if temp_dir_obj is not None:
        temp_dir_obj.cleanup()

    return np.array(match_inds)
        

def match_motifs_to_database(
    query_pfms, top_k=5, temp_dir=None, database_path=DATABASE_PATH,
    show_tomtom_output=True
):
    """
    For each motif in the query PFMs, finds the best matches to the TOMTOM
    database, ranked by TOMTOM q-value.
    Arguments:
        `query_pfms`: list of L x 4 PFMs to look for matches for
        `top_k`: the number of motifs to return based on q-value
        `temp_dir`: a temporary directory to store intermediates; defaults to
            a randomly created directory
        `database_path`: the path to a TOMTOM motif database; defaults to
            DATABASE_PATH
        `show_tomtom_output`: whether to show TOMTOM output when running
    Returns a list of lists of (motif name, motif PFM, q-value) tuples
    parallel to `query_pfms`, where each sublist of tuples is the set of motif
    names, motif PFMs (as NumPy arrays), and q-values for the corresponding
    query motif. Each sublit is sorted in ascending order by q-value. If fewer
    than `top_k` matches are found (based on TOMTOM's threshold), the returned
    sublist will be shorter (and may even be empty).
    """
    # First, import the database PFMs
    database_pfms = import_database_pfms(database_path)

    if temp_dir is None:
        temp_dir_obj = tempfile.TemporaryDirectory()
        temp_dir = temp_dir_obj.name
    else:
        temp_dir_obj = None

    # Convert motifs to MEME format
    query_motif_file = os.path.join(temp_dir, "query_motifs.txt")
    export_pfms_to_meme_format(query_pfms, query_motif_file)

    # Run TOMTOM
    tomtom_dir = os.path.join(temp_dir, "tomtom")
    run_tomtom(
        query_motif_file, database_path, tomtom_dir,
        show_output=show_tomtom_output
    )

    # Find results, mapping each query motif to target index
    # The query/target IDs are the indices
    tomtom_table = import_tomtom_results(tomtom_dir)
    matches = []
    for i in range(len(query_pfms)):
        rows = tomtom_table[tomtom_table["Query_ID"] == i]
        if rows.empty:
            matches.append([])
            continue
        rows = rows.sort_values("q-value").head(top_k)
        tups = list(zip(rows["Target_ID"], rows["q-value"]))
        tups = [
            (tup[0], database_pfms[tup[0]], tup[1]) for tup in tups
        ]
        matches.append(tups)

    if temp_dir_obj is not None:
        temp_dir_obj.cleanup()

    return matches
In [54]:
#from tomtom import match_motifs_to_database

num_matches_to_keep = 5
num_matches_to_show = 5

header = vdomh.thead(
    vdomh.tr(
        vdomh.th("Motif ID", style={"text-align": "center"}),
        vdomh.th("q-val", style={"text-align": "center"}),
        vdomh.th("PWM", style={"text-align": "center"})
    )
)

for i in range(len(motif_pfms)):
    display(vdomh.h3("Metacluster %d/%d" % (i + 1, num_metaclusters)))
    
    # Compute TOMTOM matches for all motifs in the metacluster at once
    tomtom_matches = match_motifs_to_database(motif_pfms_short[i],
                                              top_k=num_matches_to_keep,
                                              temp_dir=modisco_out_path)
    
    for j in range(len(motif_pfms[i])):
        display(vdomh.h4("Motif %d/%d" % (j + 1, len(motif_pfms[i]))))
        viz_sequence.plot_weights(motif_hcwms[i][j])
    
        body = []
        for k, (match_name, match_pfm, match_qval) in enumerate(tomtom_matches[j]):
            fig = viz_sequence.plot_weights(pfm_to_pwm(match_pfm), return_fig=True)
            fig.tight_layout()
            if k < num_matches_to_show:
                body.append(
                    vdomh.tr(
                        vdomh.td(match_name),
                        vdomh.td(str(match_qval)),
                        vdomh.td(figure_to_vdom_image(fig))
                    )
                )
            else:
                body.append(
                    vdomh.tr(
                        vdomh.td(match_name),
                        vdomh.td(str(match_qval)),
                        vdomh.td("Not shown")
                    )
                )
        if not body:
            display(vdomh.p("No TOMTOM matches passing threshold"))
        else:
            display(vdomh.table(header, vdomh.tbody(*body)))
        plt.close("all")

Metacluster 1/2

Motif 1/43

No TOMTOM matches passing threshold

Motif 2/43

Motif IDq-valPWM
MA1892.10.000106926
MA0685.20.000106926
MA0742.20.000106926
MA1511.20.000106926
MA0516.30.00013497

Motif 3/43

Motif IDq-valPWM
MA0076.26.83165e-08
MA0750.21.42243e-06
MA0764.35.14295e-05
MA0759.25.78582e-05
MA0156.36.17155e-05

Motif 4/43

Motif IDq-valPWM
MA0314.21.10289e-05
MA0060.31.10289e-05
MA1644.11.98287e-05
MA0502.20.0126678
MA0316.10.0126678

Motif 5/43

Motif IDq-valPWM
MA0506.29.35119e-07
MA1412.10.136911
MA1560.10.217149
MA1826.10.336469
MA0103.30.397279

Motif 6/43

Motif IDq-valPWM
MA1899.10.0127756
MA0967.10.0478106
MA0609.20.0490066
MA1348.10.0490066
MA0605.20.0490066

Motif 7/43

Motif IDq-valPWM
MA1833.14.93464e-09
MA2022.14.93464e-09
MA1817.12.53634e-07
MA1819.14.02546e-07
MA1239.18.08029e-07

Motif 8/43

Motif IDq-valPWM
MA1257.12.16047e-09
MA1833.14.42229e-09
MA1262.11.48394e-08
MA1239.11.48394e-08
MA1832.12.77183e-08

Motif 9/43

Motif IDq-valPWM
MA0975.13.02409e-05
MA0748.26.78398e-05
MA0998.10.0004982
MA0997.10.00138409
MA1004.10.0032287

Motif 10/43

Motif IDq-valPWM
MA1821.10.0236972
MA1832.10.0236972
MA1819.10.0328354
MA0290.10.0328354
MA1049.10.0377078

Motif 11/43

Motif IDq-valPWM
MA0591.10.000577044
MA0501.10.000577044
MA0150.20.000672513
MA0089.20.000870298
MA1448.10.000870298

Motif 12/43

No TOMTOM matches passing threshold

Motif 13/43

No TOMTOM matches passing threshold

Motif 14/43

Motif IDq-valPWM
MA1053.10.00136248
MA1713.10.00136248
MA0997.10.00218146
MA1051.10.00218146
MA0567.10.0026741

Motif 15/43

Motif IDq-valPWM
MA1573.25.52997e-08
MA0088.20.0113958
MA1716.10.0175344
MA1625.10.0883468
MA0519.10.154637

Motif 16/43

Motif IDq-valPWM
MA0139.13.51532e-06
MA1102.21.90261e-05
MA1929.12.40469e-05
MA1930.17.53824e-05
MA0531.10.000265096

Motif 17/43

Motif IDq-valPWM
MA1833.11.66274e-05
MA1817.10.000144295
MA1820.10.000144295
MA1819.10.000144295
MA1880.10.000144295

Motif 18/43

Motif IDq-valPWM
MA0527.14.6615e-05

Motif 19/43

Motif IDq-valPWM
MA1279.10.00952036
MA1823.10.00952036
MA1268.10.00952036
MA1277.10.0106661
MA1278.10.0153707

Motif 20/43

Motif IDq-valPWM
MA1820.10.00183514
MA1819.10.00183514
MA1817.10.00183514
MA0975.10.00183514
MA1818.10.00472587

Motif 21/43

Motif IDq-valPWM
MA1819.10.000751368
MA1820.10.00103176
MA1833.10.00116874
MA0975.10.00116874
MA1818.10.00116874

Motif 22/43

Motif IDq-valPWM
MA1596.10.0116798
MA1513.10.213066
MA1712.10.213066
MA0587.10.213066
MA1961.10.213066

Motif 23/43

Motif IDq-valPWM
MA2022.11.75954e-08
MA1833.11.75954e-08
MA1819.11.75954e-08
MA1257.11.75954e-08
MA1817.11.41391e-07

Motif 24/43

No TOMTOM matches passing threshold

Motif 25/43

Motif IDq-valPWM
MA1927.10.0011929
MA1818.10.0645888
MA1262.10.0645888
MA0361.10.0645888
MA0299.10.0645888

Motif 26/43

No TOMTOM matches passing threshold

Motif 27/43

Motif IDq-valPWM
MA1650.10.0453795
MA0597.20.0453795
MA1976.10.0453795
MA1615.10.0453795
MA1833.10.0453795

Motif 28/43

Motif IDq-valPWM
MA1596.10.000329752
MA1587.10.414871

Motif 29/43

No TOMTOM matches passing threshold

Motif 30/43

Motif IDq-valPWM
MA1573.22.49516e-05
MA1687.10.298448

Motif 31/43

No TOMTOM matches passing threshold

Motif 32/43

Motif IDq-valPWM
MA0527.10.179869

Motif 33/43

No TOMTOM matches passing threshold

Motif 34/43

Motif IDq-valPWM
MA0443.10.132946
MA0342.10.132946
MA0341.10.132946
MA0366.10.132946
MA0452.20.132946

Motif 35/43

Motif IDq-valPWM
MA0750.20.000363736
MA1893.10.0039404
MA1892.10.0039404
MA0076.20.00680219
MA1880.10.00790561

Motif 36/43

Motif IDq-valPWM
MA0506.20.00540085
MA1834.10.0840535
MA1099.20.0840535
MA1816.10.0840535
MA1412.10.0840535

Motif 37/43

Motif IDq-valPWM
MA1712.10.0273475
MA1961.10.0273475
MA0146.20.0296815
MA1513.10.0398165
MA1833.10.0460215

Motif 38/43

No TOMTOM matches passing threshold

Motif 39/43

Motif IDq-valPWM
MA0916.10.0677908
MA0026.10.0677908
MA1708.10.0677908
MA0076.20.186317
MA0645.10.186317

Motif 40/43

Motif IDq-valPWM
MA0997.10.0857825
MA0976.20.0857825
MA1820.10.0857825
MA1102.20.0857825
MA0123.10.0857825

Motif 41/43

Motif IDq-valPWM
MA1050.10.124205
MA1066.10.124205
MA1097.10.290662
MA1095.10.290662
MA1098.10.290662

Motif 42/43

No TOMTOM matches passing threshold

Motif 43/43

Motif IDq-valPWM
MA1929.10.314707

Metacluster 2/2

Motif 1/3

No TOMTOM matches passing threshold

Motif 2/3

Motif IDq-valPWM
MA1892.10.000106926
MA0685.20.000106926
MA0742.20.000106926
MA1511.20.000106926
MA0516.30.00013497

Motif 3/3

Motif IDq-valPWM
MA0076.26.83165e-08
MA0750.21.42243e-06
MA0764.35.14295e-05
MA0759.25.78582e-05
MA0156.36.17155e-05
In [ ]: