# Filepaths and Hard-coded Defaults
run = 1
timestamp = "2022-02-16_03-46-35"
cell_type = "K562/ENCSR261KBX"
bias_model = False
in_window = 2114
out_window = 1000
input_length, profile_length = in_window, out_window
shap_score_center_size = 500 #########!!!!!!!
profile_display_center_size = 400
proj_root = "/users/kcochran/projects/procap_models/"
sequence_path = proj_root + "genomes/GRCh38_no_alt_analysis_set_GCA_000001405.15.fasta"
chrom_sizes = proj_root + "genomes/hg38.chrom.sizes.withrRNA"
data_dir = proj_root + "/data/procap/processed/" + cell_type + "/"
plus_bw_path = data_dir + "final.5prime.pos.bigWig"
minus_bw_path = data_dir + "final.5prime.neg.bigWig"
if bias_model:
save_dir = proj_root + "model_out/procap_bias/bpnetlite_basic/" + cell_type + "/"
val_save_path = save_dir + timestamp + "_run" + str(run) + "_val"
val_peak_path = data_dir + "peaks_uni_and_bi_val.bed.gz"
else:
save_dir = proj_root + "model_out/procap/bpnetlite_basic/" + cell_type + "/"
val_save_path = save_dir + timestamp + "_run" + str(run) + "_train_and_val"
val_peak_path = data_dir + "peaks_uni_and_bi_train_and_val.bed.gz"
attr_save_path = save_dir + timestamp + "_run" + str(run) + "_deepshap"
modisco_out_path = attr_save_path.replace("deepshap", "modisco") + "/"
# Imports, Plotting Defaults
import os, sys
import numpy as np
import h5py
import pandas as pd
import gzip
import sklearn.cluster
import scipy.cluster.hierarchy
import modisco
import viz_sequence
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
plot_params = {
"figure.titlesize": 22,
"axes.titlesize": 22,
"axes.labelsize": 20,
"legend.fontsize": 18,
"xtick.labelsize": 16,
"ytick.labelsize": 16,
"font.weight": "bold"
}
plt.rcParams.update(plot_params)
import io
import base64
import urllib
import vdom.helpers as vdomh
from IPython.display import display
import tqdm
tqdm.tqdm_notebook()
/users/kcochran/anaconda3/envs/pytorch/lib/python3.7/site-packages/ipykernel_launcher.py:35: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0 Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`
<tqdm.notebook.tqdm_notebook at 0x7f99c8f7ef10>
# Modisco Task-Specific Filepaths
scoring_type = "counts"
assert scoring_type in ["profile", "counts"], scoring_type
if scoring_type == "profile":
scores_path = attr_save_path + "_prof.npy"
onehot_scores_path = attr_save_path + "_prof_onehot.npy"
save_path = modisco_out_path + "results_allChroms_prof_slice500.hdf5"
seqlet_path = modisco_out_path + "seqlets_prof.txt"
else:
scores_path = attr_save_path + "_count.npy"
onehot_scores_path = attr_save_path + "_count_onehot.npy"
save_path = modisco_out_path + "results_allChroms_count_slice500.hdf5"
seqlet_path = modisco_out_path + "seqlets_count.txt"
assert(os.path.exists(scores_path)), scores_path
assert(os.path.exists(onehot_scores_path)), onehot_scores_path
# Load in True Profiles and Sequences
sys.path.append('../1_train_models')
from data_loading import extract_peaks
one_hot_seqs, true_profs = extract_peaks(sequence_path,
plus_bw_path, minus_bw_path, val_peak_path, in_window, out_window,
max_jitter=0, verbose=True)
one_hot_seqs = one_hot_seqs.swapaxes(1,2)
one_hot_seqs = one_hot_seqs[:, (in_window // 2 - shap_score_center_size // 2):(in_window // 2 + shap_score_center_size // 2), :]
Reading FASTA: 0%| | 0/24 [00:00<?, ?it/s] Reading FASTA: 4%|▍ | 1/24 [00:00<00:16, 1.38it/s] Reading FASTA: 8%|▊ | 2/24 [00:01<00:14, 1.50it/s] Reading FASTA: 12%|█▎ | 3/24 [00:01<00:12, 1.67it/s] Reading FASTA: 17%|█▋ | 4/24 [00:02<00:11, 1.79it/s] Reading FASTA: 21%|██ | 5/24 [00:02<00:09, 1.91it/s] Reading FASTA: 25%|██▌ | 6/24 [00:03<00:09, 1.97it/s] Reading FASTA: 29%|██▉ | 7/24 [00:03<00:08, 2.10it/s] Reading FASTA: 33%|███▎ | 8/24 [00:04<00:07, 2.26it/s] Reading FASTA: 38%|███▊ | 9/24 [00:04<00:06, 2.42it/s] Reading FASTA: 42%|████▏ | 10/24 [00:04<00:05, 2.57it/s] Reading FASTA: 46%|████▌ | 11/24 [00:05<00:04, 2.65it/s] Reading FASTA: 50%|█████ | 12/24 [00:05<00:04, 2.70it/s] Reading FASTA: 54%|█████▍ | 13/24 [00:05<00:03, 2.88it/s] Reading FASTA: 58%|█████▊ | 14/24 [00:06<00:03, 3.07it/s] Reading FASTA: 62%|██████▎ | 15/24 [00:06<00:02, 3.26it/s] Reading FASTA: 67%|██████▋ | 16/24 [00:06<00:02, 3.53it/s] Reading FASTA: 71%|███████ | 17/24 [00:06<00:01, 3.77it/s] Reading FASTA: 75%|███████▌ | 18/24 [00:06<00:01, 4.01it/s] Reading FASTA: 79%|███████▉ | 19/24 [00:07<00:01, 4.53it/s] Reading FASTA: 83%|████████▎ | 20/24 [00:07<00:00, 4.84it/s] Reading FASTA: 88%|████████▊ | 21/24 [00:07<00:00, 5.49it/s] Reading FASTA: 92%|█████████▏| 22/24 [00:07<00:00, 5.98it/s] Reading FASTA: 96%|█████████▌| 23/24 [00:07<00:00, 4.26it/s] Reading FASTA: 100%|██████████| 24/24 [00:08<00:00, 2.96it/s] Loading Peaks: 0it [00:00, ?it/s] Loading Peaks: 45it [00:00, 448.74it/s] Loading Peaks: 145it [00:00, 769.87it/s] Loading Peaks: 247it [00:00, 880.43it/s] Loading Peaks: 350it [00:00, 936.68it/s] Loading Peaks: 457it [00:00, 983.09it/s] Loading Peaks: 563it [00:00, 1007.66it/s] Loading Peaks: 669it [00:00, 1023.44it/s] Loading Peaks: 776it [00:00, 1036.72it/s] Loading Peaks: 882it [00:00, 1042.19it/s] Loading Peaks: 990it [00:01, 1053.01it/s] Loading Peaks: 1098it [00:01, 1058.92it/s] Loading Peaks: 1207it [00:01, 1066.86it/s] Loading Peaks: 1316it [00:01, 1071.70it/s] Loading Peaks: 1425it [00:01, 1075.13it/s] Loading Peaks: 1534it [00:01, 1079.10it/s] Loading Peaks: 1642it [00:01, 1018.52it/s] Loading Peaks: 1745it [00:01, 984.39it/s] Loading Peaks: 1847it [00:01, 993.71it/s] Loading Peaks: 1950it [00:01, 1001.98it/s] Loading Peaks: 2053it [00:02, 1008.21it/s] Loading Peaks: 2156it [00:02, 1014.20it/s] Loading Peaks: 2259it [00:02, 1016.59it/s] Loading Peaks: 2362it [00:02, 1019.03it/s] Loading Peaks: 2465it [00:02, 1013.39it/s] Loading Peaks: 2567it [00:02, 1013.54it/s] Loading Peaks: 2670it [00:02, 1017.02it/s] Loading Peaks: 2773it [00:02, 1020.78it/s] Loading Peaks: 2876it [00:02, 1022.38it/s] Loading Peaks: 2980it [00:02, 1024.97it/s] Loading Peaks: 3083it [00:03, 1024.92it/s] Loading Peaks: 3191it [00:03, 1040.44it/s] Loading Peaks: 3296it [00:03, 940.97it/s] Loading Peaks: 3401it [00:03, 969.38it/s] Loading Peaks: 3508it [00:03, 997.50it/s] Loading Peaks: 3616it [00:03, 1020.99it/s] Loading Peaks: 3725it [00:03, 1040.41it/s] Loading Peaks: 3834it [00:03, 1054.53it/s] Loading Peaks: 3943it [00:03, 1062.96it/s] Loading Peaks: 4052it [00:03, 1069.18it/s] Loading Peaks: 4162it [00:04, 1076.00it/s] Loading Peaks: 4271it [00:04, 1079.92it/s] Loading Peaks: 4380it [00:04, 1081.72it/s] Loading Peaks: 4489it [00:04, 1083.60it/s] Loading Peaks: 4598it [00:04, 1082.20it/s] Loading Peaks: 4708it [00:04, 1084.75it/s] Loading Peaks: 4817it [00:04, 1086.19it/s] Loading Peaks: 4926it [00:04, 994.91it/s] Loading Peaks: 5032it [00:04, 1011.38it/s] Loading Peaks: 5141it [00:05, 1032.73it/s] Loading Peaks: 5249it [00:05, 1044.48it/s] Loading Peaks: 5358it [00:05, 1054.78it/s] Loading Peaks: 5464it [00:05, 1048.14it/s] Loading Peaks: 5570it [00:05, 1051.42it/s] Loading Peaks: 5678it [00:05, 1057.82it/s] Loading Peaks: 5786it [00:05, 1063.34it/s] Loading Peaks: 5894it [00:05, 1066.22it/s] Loading Peaks: 6002it [00:05, 1068.68it/s] Loading Peaks: 6110it [00:05, 1071.93it/s] Loading Peaks: 6218it [00:06, 1071.25it/s] Loading Peaks: 6326it [00:06, 1073.63it/s] Loading Peaks: 6435it [00:06, 1076.15it/s] Loading Peaks: 6543it [00:06, 1062.94it/s] Loading Peaks: 6650it [00:06, 1044.55it/s] Loading Peaks: 6757it [00:06, 1049.69it/s] Loading Peaks: 6863it [00:06, 1042.58it/s] Loading Peaks: 6968it [00:06, 1038.04it/s] Loading Peaks: 7072it [00:06, 1033.53it/s] Loading Peaks: 7176it [00:06, 1027.82it/s] Loading Peaks: 7279it [00:07, 1025.57it/s] Loading Peaks: 7388it [00:07, 1042.63it/s] Loading Peaks: 7497it [00:07, 1056.52it/s] Loading Peaks: 7606it [00:07, 1065.61it/s] Loading Peaks: 7713it [00:07, 1059.87it/s] Loading Peaks: 7822it [00:07, 1067.68it/s] Loading Peaks: 7931it [00:07, 1073.07it/s] Loading Peaks: 8040it [00:07, 1077.87it/s] Loading Peaks: 8148it [00:07, 1077.77it/s] Loading Peaks: 8256it [00:07, 1046.33it/s] Loading Peaks: 8361it [00:08, 1028.33it/s] Loading Peaks: 8465it [00:08, 1021.18it/s] Loading Peaks: 8569it [00:08, 1026.17it/s] Loading Peaks: 8674it [00:08, 1031.00it/s] Loading Peaks: 8782it [00:08, 1043.97it/s] Loading Peaks: 8891it [00:08, 1055.76it/s] Loading Peaks: 8999it [00:08, 1062.00it/s] Loading Peaks: 9108it [00:08, 1067.92it/s] Loading Peaks: 9217it [00:08, 1072.05it/s] Loading Peaks: 9325it [00:08, 1073.06it/s] Loading Peaks: 9433it [00:09, 1073.81it/s] Loading Peaks: 9541it [00:09, 1075.37it/s] Loading Peaks: 9649it [00:09, 1075.04it/s] Loading Peaks: 9757it [00:09, 1076.48it/s] Loading Peaks: 9865it [00:09, 1043.90it/s] Loading Peaks: 9970it [00:09, 1023.92it/s] Loading Peaks: 10073it [00:09, 1017.95it/s] Loading Peaks: 10175it [00:09, 1014.00it/s] Loading Peaks: 10277it [00:09, 1009.84it/s] Loading Peaks: 10379it [00:10, 1009.92it/s] Loading Peaks: 10481it [00:10, 1005.27it/s] Loading Peaks: 10582it [00:10, 1002.27it/s] Loading Peaks: 10683it [00:10, 999.87it/s] Loading Peaks: 10783it [00:10, 997.81it/s] Loading Peaks: 10884it [00:10, 998.82it/s] Loading Peaks: 10984it [00:10, 998.05it/s] Loading Peaks: 11084it [00:10, 997.67it/s] Loading Peaks: 11192it [00:10, 1020.79it/s] Loading Peaks: 11295it [00:10, 1019.13it/s] Loading Peaks: 11398it [00:11, 1019.21it/s] Loading Peaks: 11500it [00:11, 1010.09it/s] Loading Peaks: 11607it [00:11, 1026.51it/s] Loading Peaks: 11714it [00:11, 1037.80it/s] Loading Peaks: 11820it [00:11, 1044.17it/s] Loading Peaks: 11925it [00:11, 1042.24it/s] Loading Peaks: 12030it [00:11, 1030.11it/s] Loading Peaks: 12134it [00:11, 1004.65it/s] Loading Peaks: 12235it [00:11, 988.38it/s] Loading Peaks: 12334it [00:11, 978.19it/s] Loading Peaks: 12434it [00:12, 983.03it/s] Loading Peaks: 12534it [00:12, 986.92it/s] Loading Peaks: 12634it [00:12, 988.07it/s] Loading Peaks: 12733it [00:12, 986.32it/s] Loading Peaks: 12832it [00:12, 984.89it/s] Loading Peaks: 12931it [00:12, 983.87it/s] Loading Peaks: 13030it [00:12, 980.99it/s] Loading Peaks: 13129it [00:12, 979.10it/s] Loading Peaks: 13228it [00:12, 982.24it/s] Loading Peaks: 13327it [00:12, 982.69it/s] Loading Peaks: 13428it [00:13, 990.30it/s] Loading Peaks: 13531it [00:13, 1001.10it/s] Loading Peaks: 13639it [00:13, 1023.15it/s] Loading Peaks: 13747it [00:13, 1037.41it/s] Loading Peaks: 13854it [00:13, 1046.62it/s] Loading Peaks: 13962it [00:13, 1046.38it/s] Loading Peaks: 14070it [00:13, 1056.21it/s] Loading Peaks: 14179it [00:13, 1064.79it/s] Loading Peaks: 14288it [00:13, 1069.36it/s] Loading Peaks: 14396it [00:13, 1071.97it/s] Loading Peaks: 14505it [00:14, 1074.49it/s] Loading Peaks: 14613it [00:14, 1071.12it/s] Loading Peaks: 14721it [00:14, 1045.71it/s] Loading Peaks: 14826it [00:14, 1045.30it/s] Loading Peaks: 14933it [00:14, 1049.75it/s] Loading Peaks: 15039it [00:14, 1050.26it/s] Loading Peaks: 15146it [00:14, 1054.69it/s] Loading Peaks: 15252it [00:14, 1054.99it/s] Loading Peaks: 15358it [00:14, 1055.99it/s] Loading Peaks: 15465it [00:14, 1057.76it/s] Loading Peaks: 15571it [00:15, 1055.29it/s] Loading Peaks: 15678it [00:15, 1057.29it/s] Loading Peaks: 15788it [00:15, 1067.53it/s] Loading Peaks: 15897it [00:15, 1072.01it/s] Loading Peaks: 16006it [00:15, 1077.13it/s] Loading Peaks: 16115it [00:15, 1079.25it/s] Loading Peaks: 16223it [00:15, 1068.21it/s] Loading Peaks: 16330it [00:15, 1050.68it/s] Loading Peaks: 16436it [00:15, 1038.06it/s] Loading Peaks: 16544it [00:16, 1047.95it/s] Loading Peaks: 16652it [00:16, 1055.68it/s] Loading Peaks: 16760it [00:16, 1061.17it/s] Loading Peaks: 16867it [00:16, 1061.98it/s] Loading Peaks: 16974it [00:16, 1063.73it/s] Loading Peaks: 17082it [00:16, 1065.97it/s] Loading Peaks: 17190it [00:16, 1068.38it/s] Loading Peaks: 17299it [00:16, 1074.63it/s] Loading Peaks: 17408it [00:16, 1077.81it/s] Loading Peaks: 17517it [00:16, 1079.94it/s] Loading Peaks: 17627it [00:17, 1083.56it/s] Loading Peaks: 17737it [00:17, 1085.72it/s] Loading Peaks: 17846it [00:17, 1086.42it/s] Loading Peaks: 17955it [00:17, 1080.23it/s] Loading Peaks: 18064it [00:17, 1053.52it/s] Loading Peaks: 18170it [00:17, 1036.04it/s] Loading Peaks: 18274it [00:17, 1034.57it/s] Loading Peaks: 18378it [00:17, 1022.46it/s] Loading Peaks: 18481it [00:17, 1023.44it/s] Loading Peaks: 18584it [00:17, 1022.28it/s] Loading Peaks: 18692it [00:18, 1038.42it/s] Loading Peaks: 18796it [00:18, 1034.01it/s] Loading Peaks: 18900it [00:18, 1032.92it/s] Loading Peaks: 19004it [00:18, 1033.07it/s] Loading Peaks: 19109it [00:18, 1036.19it/s] Loading Peaks: 19217it [00:18, 1047.68it/s] Loading Peaks: 19326it [00:18, 1059.92it/s] Loading Peaks: 19435it [00:18, 1066.16it/s] Loading Peaks: 19543it [00:18, 1069.74it/s] Loading Peaks: 19650it [00:18, 1051.87it/s] Loading Peaks: 19756it [00:19, 1036.11it/s] Loading Peaks: 19860it [00:19, 1033.00it/s] Loading Peaks: 19966it [00:19, 1038.91it/s] Loading Peaks: 20071it [00:19, 1040.65it/s] Loading Peaks: 20179it [00:19, 1051.93it/s] Loading Peaks: 20285it [00:19, 1048.58it/s] Loading Peaks: 20390it [00:19, 1047.42it/s] Loading Peaks: 20499it [00:19, 1059.61it/s] Loading Peaks: 20608it [00:19, 1067.47it/s] Loading Peaks: 20715it [00:19, 1062.19it/s] Loading Peaks: 20824it [00:20, 1067.85it/s] Loading Peaks: 20933it [00:20, 1072.30it/s] Loading Peaks: 21041it [00:20, 1074.11it/s] Loading Peaks: 21149it [00:20, 1075.69it/s] Loading Peaks: 21257it [00:20, 1070.58it/s] Loading Peaks: 21365it [00:20, 1044.57it/s] Loading Peaks: 21470it [00:20, 1043.90it/s] Loading Peaks: 21576it [00:20, 1045.79it/s] Loading Peaks: 21682it [00:20, 1047.77it/s] Loading Peaks: 21788it [00:20, 1049.67it/s] Loading Peaks: 21894it [00:21, 1048.25it/s] Loading Peaks: 22000it [00:21, 1048.69it/s] Loading Peaks: 22106it [00:21, 1050.81it/s] Loading Peaks: 22212it [00:21, 1051.58it/s] Loading Peaks: 22319it [00:21, 1055.51it/s] Loading Peaks: 22428it [00:21, 1064.62it/s] Loading Peaks: 22537it [00:21, 1070.81it/s] Loading Peaks: 22647it [00:21, 1076.61it/s] Loading Peaks: 22756it [00:21, 1079.90it/s] Loading Peaks: 22864it [00:21, 1072.33it/s] Loading Peaks: 22972it [00:22, 1052.01it/s] Loading Peaks: 23078it [00:22, 1032.23it/s] Loading Peaks: 23182it [00:22, 1024.51it/s] Loading Peaks: 23288it [00:22, 1034.37it/s] Loading Peaks: 23395it [00:22, 1044.53it/s] Loading Peaks: 23502it [00:22, 1050.36it/s] Loading Peaks: 23609it [00:22, 1055.48it/s] Loading Peaks: 23716it [00:22, 1059.62it/s] Loading Peaks: 23823it [00:22, 1061.46it/s] Loading Peaks: 23930it [00:23, 1061.39it/s] Loading Peaks: 24037it [00:23, 1063.74it/s] Loading Peaks: 24144it [00:23, 1064.15it/s] Loading Peaks: 24251it [00:23, 1065.56it/s] Loading Peaks: 24360it [00:23, 1072.24it/s] Loading Peaks: 24469it [00:23, 1075.83it/s] Loading Peaks: 24577it [00:23, 1070.95it/s] Loading Peaks: 24685it [00:23, 1046.32it/s] Loading Peaks: 24791it [00:23, 1049.87it/s] Loading Peaks: 24899it [00:23, 1057.13it/s] Loading Peaks: 25005it [00:24, 1055.22it/s] Loading Peaks: 25113it [00:24, 1059.85it/s] Loading Peaks: 25221it [00:24, 1063.58it/s] Loading Peaks: 25329it [00:24, 1067.88it/s] Loading Peaks: 25438it [00:24, 1071.71it/s] Loading Peaks: 25547it [00:24, 1075.04it/s] Loading Peaks: 25655it [00:24, 1074.92it/s] Loading Peaks: 25763it [00:24, 1076.08it/s] Loading Peaks: 25871it [00:24, 1075.86it/s] Loading Peaks: 25980it [00:24, 1079.40it/s] Loading Peaks: 26088it [00:25, 1078.20it/s] Loading Peaks: 26196it [00:25, 1078.29it/s] Loading Peaks: 26304it [00:25, 1059.36it/s] Loading Peaks: 26411it [00:25, 1053.98it/s] Loading Peaks: 26517it [00:25, 1048.55it/s] Loading Peaks: 26625it [00:25, 1055.73it/s] Loading Peaks: 26734it [00:25, 1063.87it/s] Loading Peaks: 26843it [00:25, 1069.59it/s] Loading Peaks: 27000it [00:25, 1043.28it/s]
# Load in Coordinates of Examples
def load_coords(peak_bed):
if peak_bed.endswith(".gz"):
with gzip.open(peak_bed) as f:
lines = [line.decode().split() for line in f]
else:
with open(peak_bed) as f:
lines = [line.split() for line in f]
coords = []
for line in lines:
chrom, peak_start, peak_end = line[0], int(line[1]), int(line[2])
mid = (peak_start + peak_end) // 2
window_start = mid - in_window // 2
window_end = mid + in_window // 2
coords.append((chrom, window_start, window_end))
return coords
coords = load_coords(val_peak_path)
# Import SHAP scores, predicted profiles
hyp_scores = np.load(scores_path).swapaxes(1,2)
hyp_scores = hyp_scores[:, (in_window // 2 - shap_score_center_size // 2):(in_window // 2 + shap_score_center_size // 2), :]
pred_profs = np.exp(np.load(val_save_path + ".profs.npy"))
#pred_counts = np.load(val_save_path + ".counts.npy")
# Load modisco results object
def import_tfmodisco_results(tfm_results_path, hyp_scores, one_hot_seqs):
"""
Imports the TF-MoDISco results object.
Arguments:
`tfm_results_path`: path to HDF5 containing TF-MoDISco results
`hyp_scores`: hypothetical importance scores used for this run
`one_hot_seqs`: input sequences used for this run
"""
# Everything should already be cut to `input_center_cut_size`
act_scores = hyp_scores * one_hot_seqs
track_set = modisco.tfmodisco_workflow.workflow.prep_track_set(
task_names=["task0"],
contrib_scores={"task0": act_scores},
hypothetical_contribs={"task0": hyp_scores},
one_hot=one_hot_seqs
)
with h5py.File(tfm_results_path,"r") as f:
return modisco.tfmodisco_workflow.workflow.TfModiscoResults.from_hdf5(f, track_set=track_set)
tfm_obj = import_tfmodisco_results(save_path, hyp_scores, one_hot_seqs)
def extract_profiles_and_coords(
seqlets_arr, one_hot_seqs, hyp_scores, true_profs, pred_profs, pred_coords,
input_length, profile_length, input_center_cut_size, profile_center_cut_size,
task_index=None
):
"""
From the seqlets object of a TF-MoDISco pattern's seqlets and alignments,
extracts the predicted and observed profiles of the model, as well as the
set of coordinates for the seqlets.
Arguments:
`seqlets_arr`: a TF-MoDISco pattern's seqlets object array (N-array)
`one_hot_seqs`: an N x R x 4 array of input sequences, where R is
the cut centered size
`hyp_scores`: an N x R x 4 array of hypothetical importance scores
`true_profs`: an N x T x O x 2 array of true profile counts
`pred_profs`: an N x T x O x 2 array of predicted profile probabilities
`pred_coords`: an N x 3 object array of coordinates for the input sequence
underlying the predictions
`input_length`: length of original input sequences, I
`profile_length`: length of profile predictions, O
`input_center_cut_size`: centered cut size of SHAP scores used
`profile_center_cut_size`: size to cut profiles to when returning them, P
`task_index`: index of task to focus on for profiles; if None, returns
profiles for all tasks
Returns an N x (T or 1) x P x 2 array of true profile counts, an
N x (T or 1) x P x 2 array of predicted profile probabilities, an N x Q x 4
array of one-hot seqlet sequences, an N x Q x 4 array of hypothetical seqlet
importance scores, and an N x 3 object array of seqlet coordinates, where P
is the profile cut size and Q is the seqlet length. Returned profiles are
centered at the same center as the seqlets.
Note that it is important that the seqlet indices match exactly with the indices
out of the N. This should be the exact sequences in the original SHAP scores.
"""
true_seqlet_profs, pred_seqlet_profs, seqlet_seqs, seqlet_hyps, seqlet_coords, rcs = [], [], [], [], [], []
def seqlet_coord_to_profile_coord(seqlet_coord):
return seqlet_coord + ((input_length - input_center_cut_size) // 2) - ((input_length - profile_length) // 2)
def seqlet_coord_to_input_coord(seqlet_coord):
return seqlet_coord + ((input_length - input_center_cut_size) // 2)
# For each seqlet, fetch the true/predicted profiles
for seqlet in seqlets_arr:
coord_index = seqlet.coor.example_idx
seqlet_start = seqlet.coor.start
seqlet_end = seqlet.coor.end
seqlet_rc = seqlet.coor.is_revcomp
rcs.append(seqlet_rc)
# Get indices of profile to cut out
seqlet_center = (seqlet_start + seqlet_end) // 2
prof_center = seqlet_coord_to_profile_coord(seqlet_center)
prof_start = prof_center - (profile_center_cut_size // 2)
prof_end = prof_start + profile_center_cut_size
if task_index is None or true_profs.shape[1] == 1:
# Use all tasks if the predictions only have 1 task to begin with
task_start, task_end = None, None
else:
task_start, task_end = task_index, task_index + 1
true_prof = true_profs[coord_index, task_start:task_end, prof_start:prof_end] # (T or 1) x P x 2
pred_prof = pred_profs[coord_index, task_start:task_end, prof_start:prof_end] # (T or 1) x P x 2
true_seqlet_profs.append(true_prof)
pred_seqlet_profs.append(pred_prof)
# The one-hot-sequences and hypothetical scores are assumed to already by cut/centered,
# so the indices match the seqlet indices
if seqlet_rc:
seqlet_seqs.append(np.flip(one_hot_seqs[coord_index, seqlet_start:seqlet_end], axis=(0, 1)))
seqlet_hyps.append(np.flip(hyp_scores[coord_index, seqlet_start:seqlet_end], axis=(0, 1)))
else:
seqlet_seqs.append(one_hot_seqs[coord_index, seqlet_start:seqlet_end])
seqlet_hyps.append(hyp_scores[coord_index, seqlet_start:seqlet_end])
# Get the coordinates of the seqlet based on the input coordinates
inp_start = seqlet_coord_to_input_coord(seqlet_start)
inp_end = seqlet_coord_to_input_coord(seqlet_end)
chrom, start, _ = pred_coords[coord_index]
seqlet_coords.append([chrom, start + inp_start, start + inp_end])
return np.stack(true_seqlet_profs), np.stack(pred_seqlet_profs), np.stack(seqlet_seqs), np.stack(seqlet_hyps), np.array(seqlet_coords, dtype=object), np.array(rcs)
def plot_profiles(seqlet_true_profs, seqlet_pred_profs, kmeans_clusters=5, save_path=None):
"""
Plots the given profiles with a heatmap.
Arguments:
`seqlet_true_profs`: an N x O x 2 NumPy array of true profiles, either as raw
counts or probabilities (they will be normalized)
`seqlet_pred_profs`: an N x O x 2 NumPy array of predicted profiles, either as
raw counts or probabilities (they will be normalized)
`kmeans_cluster`: when displaying profile heatmaps, there will be this
many clusters
`save_path`: if provided, save the profile matrices here
Returns the figure.
"""
assert len(seqlet_true_profs.shape) == 3
assert seqlet_true_profs.shape == seqlet_pred_profs.shape
num_profs, width, _ = seqlet_true_profs.shape
# First, normalize the profiles along the output profile dimension
def normalize(arr, axis=0):
arr_sum = np.sum(arr, axis=axis, keepdims=True)
arr_sum[arr_sum == 0] = 1 # If 0, keep 0 as the quotient instead of dividing by 0
return arr / arr_sum
true_profs_norm = normalize(seqlet_true_profs, axis=1)
pred_profs_norm = normalize(seqlet_pred_profs, axis=1)
# Compute the mean profiles across all examples
true_profs_mean = np.mean(true_profs_norm, axis=0)
pred_profs_mean = np.mean(pred_profs_norm, axis=0)
# Perform k-means clustering on the predicted profiles, with the strands pooled
kmeans_clusters = max(5, num_profs // 50) # Set number of clusters based on number of profiles, with minimum
kmeans = sklearn.cluster.KMeans(n_clusters=kmeans_clusters)
cluster_assignments = kmeans.fit_predict(
np.reshape(pred_profs_norm, (pred_profs_norm.shape[0], -1))
)
# Perform hierarchical clustering on the cluster centers to determine optimal ordering
kmeans_centers = kmeans.cluster_centers_
cluster_order = scipy.cluster.hierarchy.leaves_list(
scipy.cluster.hierarchy.optimal_leaf_ordering(
scipy.cluster.hierarchy.linkage(kmeans_centers, method="centroid"), kmeans_centers
)
)
# Order the profiles so that the cluster assignments follow the optimal ordering
cluster_inds = []
for cluster_id in cluster_order:
cluster_inds.append(np.where(cluster_assignments == cluster_id)[0])
cluster_inds = np.concatenate(cluster_inds)
# Compute a matrix of profiles, normalized to the maximum height, ordered by clusters
def make_profile_matrix(flat_profs, order_inds):
matrix = flat_profs[order_inds]
maxes = np.max(matrix, axis=1, keepdims=True)
maxes[maxes == 0] = 1 # If 0, keep 0 as the quotient instead of dividing by 0
return matrix / maxes
true_matrix = make_profile_matrix(true_profs_norm, cluster_inds)
pred_matrix = make_profile_matrix(pred_profs_norm, cluster_inds)
if save_path:
np.savez_compressed(
true_profs_mean=true_profs_mean, pred_profs_mean=pred_profs_mean,
true_matrix=true_matrix, pred_matrix=pred_matrix
)
# Create a figure with the right dimensions
mean_height = 4
heatmap_height = min(num_profs * 0.004, 8)
fig_height = mean_height + (2 * heatmap_height)
fig, ax = plt.subplots(
3, 2, figsize=(16, fig_height), sharex=True,
gridspec_kw={
"width_ratios": [1, 1],
"height_ratios": [mean_height / fig_height, heatmap_height / fig_height, heatmap_height / fig_height]
}
)
# Plot the average predictions
ax[0, 0].plot(true_profs_mean[:, 0], color="darkslateblue")
ax[0, 0].plot(-true_profs_mean[:, 1], color="darkorange")
ax[0, 1].plot(pred_profs_mean[:, 0], color="darkslateblue")
ax[0, 1].plot(-pred_profs_mean[:, 1], color="darkorange")
# Set axes on average predictions
max_mean_val = max(np.max(true_profs_mean), np.max(pred_profs_mean))
mean_ylim = max_mean_val * 1.05 # Make 5% higher
ax[0, 0].set_title("True profiles")
ax[0, 0].set_ylabel("Average probability")
ax[0, 1].set_title("Predicted profiles")
for j in (0, 1):
ax[0, j].set_ylim(-mean_ylim, mean_ylim)
ax[0, j].label_outer()
# Plot the heatmaps
ax[1, 0].imshow(true_matrix[:, :, 0], interpolation="nearest", aspect="auto", cmap="Blues")
ax[1, 1].imshow(pred_matrix[:, :, 0], interpolation="nearest", aspect="auto", cmap="Blues")
ax[2, 0].imshow(true_matrix[:, :, 1], interpolation="nearest", aspect="auto", cmap="Oranges")
ax[2, 1].imshow(pred_matrix[:, :, 1], interpolation="nearest", aspect="auto", cmap="Oranges")
# Set axes on heatmaps
for i in (1, 2):
for j in (0, 1):
ax[i, j].set_yticks([])
ax[i, j].set_yticklabels([])
ax[i, j].label_outer()
width = true_matrix.shape[1]
delta = 100
num_deltas = (width // 2) // delta
labels = list(range(max(-width // 2, -num_deltas * delta), min(width // 2, num_deltas * delta) + 1, delta))
tick_locs = [label + max(width // 2, num_deltas * delta) for label in labels]
for j in (0, 1):
ax[2, j].set_xticks(tick_locs)
ax[2, j].set_xticklabels(labels)
ax[2, j].set_xlabel("Distance from seqlet center (bp)")
fig.tight_layout()
plt.show()
# Create a figure with the right dimensions
fig2, ax = plt.subplots(
1, 2, figsize=(16, mean_height), sharex=True,
gridspec_kw={"width_ratios": [1, 1]}
)
# Plot the average predictions
mid = true_profs_mean.shape[0] // 2
zoom_width = 60
start = mid - zoom_width // 2
end = mid + zoom_width // 2
ax[0].plot(true_profs_mean[start:end, 0], color="darkslateblue")
ax[0].plot(-true_profs_mean[start:end, 1], color="darkorange")
ax[1].plot(pred_profs_mean[start:end, 0], color="darkslateblue")
ax[1].plot(-pred_profs_mean[start:end, 1], color="darkorange")
# Set axes on average predictions
max_mean_val = max(np.max(true_profs_mean[start:end]), np.max(pred_profs_mean[start:end]))
mean_ylim = max_mean_val * 1.05 # Make 5% higher
ax[0].set_title("True profiles")
ax[0].set_ylabel("Average probability")
ax[1].set_title("Predicted profiles")
delta = 10
num_deltas = (zoom_width // 2) // delta
labels = list(range(max(-zoom_width // 2, -num_deltas * delta), min(zoom_width // 2, num_deltas * delta) + 1, delta))
tick_locs = [label + max(zoom_width // 2, num_deltas * delta) for label in labels]
for j in (0, 1):
ax[j].set_ylim(-mean_ylim, mean_ylim)
ax[j].label_outer()
ax[j].set_xticks(tick_locs)
ax[j].set_xticklabels(labels)
ax[j].set_xlabel("Distance from seqlet center (bp)")
fig2.tight_layout(w_pad=4, rect=(0.1, 0, 0.95, 1))
plt.show()
return fig
def get_summit_distances(coords, peak_coords):
"""
Given a set of coordinates, computes the distance of the center of each
coordinate to the nearest summit.
Arguments:
`coords`: an N x 3 object array of coordinates
`peak_table`: a 10-column table of peak data, as imported by
`import_peak_table`
Returns and N-array of integers, which is the distance of each coordinate
midpoint to the nearest coordinate.
"""
peak_chroms = [coord[0] for coord in peak_coords]
peak_summits = [(coord[1] + coord[2]) // 2 for coord in peak_coords]
peak_table = pd.DataFrame({"chrom" : peak_chroms, "summit" : peak_summits})
chroms = coords[:, 0]
midpoints = (coords[:, 1] + coords[:, 2]) // 2
dists = []
for i in range(len(coords)):
chrom = chroms[i]
midpoint = midpoints[i]
rows = peak_table[peak_table["chrom"] == chrom]
dist_arr = (midpoint - rows["summit"]).values
min_dist = dist_arr[np.argmin(np.abs(dist_arr))]
dists.append(min_dist)
return np.array(dists)
def plot_summit_dists(summit_dists):
"""
Plots the distribution of seqlet distances to summits.
Arguments:
`summit_dists`: the array of distances as returned by
`get_summit_distances`
Returns the figure.
"""
fig = plt.figure(figsize=(8, 6))
num_bins = max(len(summit_dists) // 30, 20)
plt.hist(summit_dists, bins=num_bins, color="purple")
plt.title("Histogram of distance of seqlets to peak summits")
plt.xlabel("Signed distance from seqlet center to nearest peak summit (bp)")
plt.show()
return fig
BACKGROUND_FREQS = np.array([0.25, 0.25, 0.25, 0.25])
def pfm_info_content(track, pseudocount=0.001):
"""
Given an L x 4 track, computes information content for each base and
returns it as an L-array.
"""
num_bases = track.shape[1]
# Normalize track to probabilities along base axis
track_norm = (track + pseudocount) / (np.sum(track, axis=1, keepdims=True) + (num_bases * pseudocount))
ic = track_norm * np.log2(track_norm / np.expand_dims(BACKGROUND_FREQS, axis=0))
return np.sum(ic, axis=1)
def pfm_to_pwm(pfm):
ic = pfm_info_content(pfm)
return pfm * np.expand_dims(ic, axis=1)
def trim_motif_by_ic(pfm, motif, min_ic=0.2, pad=0):
"""
Given the PFM and motif (both L x 4 arrays) (the motif could be the
PFM itself), trims `motif` by cutting off flanks of low information
content in `pfm`. `min_ic` is the minimum required information
content. If specified this trimmed motif will be extended on either
side by `pad` bases.
If no base passes the `min_ic` threshold, then no trimming is done.
"""
# Trim motif based on information content
ic = pfm_info_content(pfm)
pass_inds = np.where(ic >= min_ic)[0] # Cut off flanks with less than min_ic IC
if not pass_inds.size:
return motif
# Expand trimming to +/- pad bp on either side
start, end = max(0, np.min(pass_inds) - pad), min(len(pfm), np.max(pass_inds) + pad + 1)
return motif[start:end]
def figure_to_vdom_image(figure):
buf = io.BytesIO()
figure.savefig(buf, format='png')
buf.seek(0)
string = base64.b64encode(buf.read())
return vdomh.div(
vdomh.img(src='data:image/png;base64,' + urllib.parse.quote(string)),
style={"display": "inline-block"}
)
motif_pfms, motif_hcwms, motif_cwms = [], [], [] # Save the trimmed PFMs, hCWMs, and CWMs
motif_pfms_short = [] # PFMs that are even more trimmed (for TOMTOM)
num_seqlets = [] # Number of seqlets for each motif
motif_seqlets = [] # Save seqlets of each motif
metaclusters = tfm_obj.metacluster_idx_to_submetacluster_results
num_metaclusters = len(metaclusters.keys())
for metacluster_i, metacluster_key in enumerate(metaclusters.keys()):
metacluster = metaclusters[metacluster_key]
display(vdomh.h3("Metacluster %d/%d" % (metacluster_i + 1, num_metaclusters)))
patterns = metacluster.seqlets_to_patterns_result.patterns
if not patterns:
break
motif_pfms.append([])
motif_hcwms.append([])
motif_cwms.append([])
motif_pfms_short.append([])
num_seqlets.append([])
motif_seqlets.append([])
num_patterns = len(patterns)
for pattern_i, pattern in enumerate(patterns):
seqlets = pattern.seqlets
display(vdomh.h4("Pattern %d/%d" % (pattern_i + 1, num_patterns)))
display(vdomh.p("%d seqlets" % len(seqlets)))
pfm = pattern["sequence"].fwd
hcwm = pattern["task0_hypothetical_contribs"].fwd
cwm = pattern["task0_contrib_scores"].fwd
pfm_fig = viz_sequence.plot_weights(pfm, subticks_frequency=10, return_fig=True)
hcwm_fig = viz_sequence.plot_weights(hcwm, subticks_frequency=10, return_fig=True)
cwm_fig = viz_sequence.plot_weights(cwm, subticks_frequency=10, return_fig=True)
pfm_fig.tight_layout()
hcwm_fig.tight_layout()
cwm_fig.tight_layout()
motif_table = vdomh.table(
vdomh.tr(
vdomh.td("Sequence (PFM)"),
vdomh.td(figure_to_vdom_image(pfm_fig))
),
vdomh.tr(
vdomh.td("Hypothetical contributions (hCWM)"),
vdomh.td(figure_to_vdom_image(hcwm_fig))
),
vdomh.tr(
vdomh.td("Actual contributions (CWM)"),
vdomh.td(figure_to_vdom_image(cwm_fig))
)
)
display(motif_table)
plt.close("all") # Remove all standing figures
# Trim motif based on information content
short_trimmed_pfm = trim_motif_by_ic(pfm, pfm)
motif_pfms_short[-1].append(short_trimmed_pfm)
# Expand trimming to +/- 4bp on either side
trimmed_pfm = trim_motif_by_ic(pfm, pfm, pad=4)
trimmed_hcwm = trim_motif_by_ic(pfm, hcwm, pad=4)
trimmed_cwm = trim_motif_by_ic(pfm, cwm, pad=4)
motif_pfms[-1].append(trimmed_pfm)
motif_hcwms[-1].append(trimmed_hcwm)
motif_cwms[-1].append(trimmed_cwm)
num_seqlets[-1].append(len(seqlets))
seqlet_true_profs, seqlet_pred_profs, seqlet_seqs, seqlet_hyps, seqlet_coords, rcs = extract_profiles_and_coords(
seqlets, one_hot_seqs, hyp_scores, true_profs, pred_profs, coords,
input_length, profile_length, shap_score_center_size,
profile_display_center_size, task_index=None
)
motif_seqlets[-1].append((seqlet_seqs, seqlet_hyps))
seqlet_true_profs = seqlet_true_profs.swapaxes(1,2)
seqlet_pred_profs = seqlet_pred_profs.swapaxes(1,2)
for i in range(len(rcs)):
if rcs[i]:
seqlet_true_profs[i, :, :] = seqlet_true_profs[i, ::-1, ::-1]
seqlet_pred_profs[i, :, :] = seqlet_pred_profs[i, ::-1, ::-1]
#assert np.allclose(np.sum(seqlet_seqs, axis=0) / len(seqlet_seqs), pattern["sequence"].fwd)
# ^Sanity check: PFM derived from seqlets match the PFM stored in the pattern
prof_fig = plot_profiles(seqlet_true_profs, seqlet_pred_profs)
summit_dists = get_summit_distances(seqlet_coords, coords)
dist_fig = plot_summit_dists(summit_dists)
5940 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
4352 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
3419 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
2967 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
2116 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
925 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
697 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
657 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
632 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
590 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
580 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
192 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
182 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
143 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
92 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
87 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
56 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
469 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
240 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
206 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
198 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
185 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
180 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
179 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
174 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
171 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
171 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
166 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
155 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
144 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
120 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
114 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
114 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
102 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
import os
import subprocess
import numpy as np
import pandas as pd
import tempfile
BACKGROUND_FREQS = np.array([0.25, 0.25, 0.25, 0.25])
DATABASE_PATH = "/users/kcochran/projects/procap_models/annotations/JASPAR2022_CORE_pfms.meme"
def import_database_pfms(database_path):
"""
Imports the database of PFMs by reading through the entire database and
constructing a dictionary mapping motif IDs to NumPy arrays of PFMs.
"""
motif_dict = {}
with open(database_path, "r") as f:
try:
while True:
line = next(f)
if line.startswith("MOTIF"):
key = line.strip().split()[1]
header = next(f)
motif_width = int(header.split()[5])
motif = np.empty((motif_width, 4))
for i in range(motif_width):
motif[i] = np.array([
float(x) for x in next(f).strip().split()
])
motif_dict[key] = motif
except StopIteration:
pass
return motif_dict
def export_pfms_to_meme_format(
pfms, outfile, background_freqs=None, names=None
):
"""
Exports a set of PFMs to MEME motif format. Includes the background
frequencies `BACKGROUND_FREQS`.
Arguments:
`pfms`: a list of L x 4 PFMs (where L can be different for each PFM)
`outfile`: path to file to output the MEME-format PFMs
`background_freqs`: background frequencies of A, C, G, T as a length-4
NumPy array; defaults to `BACKGROUND_FREQS`
`names`: if specified, a list of unique names to give to each PFM, must
be parallel to `pfms`
"""
if names is None:
names = [str(i) for i in range(len(pfms))]
else:
assert len(names) == pfms
assert len(names) == len(np.unique(names))
if background_freqs is None:
background_freqs = BACKGROUND_FREQS
os.makedirs(os.path.dirname(outfile), exist_ok=True)
with open(outfile, "w") as f:
f.write("MEME version 5\n\n")
f.write("ALPHABET= ACGT\n\n")
f.write("Background letter frequencies\n")
f.write("A %f C %f G %f T %f\n\n" % tuple(background_freqs))
for i in range(len(pfms)):
pfm, name = pfms[i], names[i]
f.write("MOTIF %s\n" % name)
f.write("letter-probability matrix:\n")
for row in pfm:
f.write(" ".join([str(freq) for freq in row]) + "\n")
f.write("\n")
def run_tomtom(
query_motif_file, target_motif_file, outdir, show_output=True
):
"""
Runs TOMTOM given the target and query motif files. The default threshold
of q < 0.5 is used to filter for matches.
Arguments:
`query_motif_file`: file containing motifs in MEME format, which will
be the query motifs for which matches are found
`target_motif_file`: file containing motifs in MEME format, which will
be used to search for matches
`outdir`: path to directory to store results
`show_output`: whether or not to show TOMTOM output
"""
comm = ["tomtom"]
comm += [query_motif_file, target_motif_file]
comm += ["-oc", outdir]
comm += ["-no-ssc"]
comm += ["-dist", "pearson"]
comm += ["-min-overlap", "5"]
comm += ["-text"]
proc = subprocess.run(comm, capture_output=(not show_output))
def import_tomtom_results(tomtom_dir):
"""
Imports the TOMTOM output directory as a Pandas DataFrame.
Arguments:
`tomtom_dir`: TOMTOM output directory, which contains the output file
"tomtom.tsv"
Returns a Pandas DataFrame.
"""
df = pd.read_csv(
os.path.join(tomtom_dir, "tomtom.txt"), sep="\t", header=0,
index_col=False, comment="#"
)
df.columns = ["Query_ID", "Target_ID", "Optimal_offset",
"p-value", "E-value", "q-value",
"Overlap", "Query_consensus", "Target_consensus", "Orientation"]
return df
def match_motifs_to_targets(
query_pfms, target_pfms, temp_dir=None, show_tomtom_output=True
):
"""
For each motif in the query PFMs, finds the best match to the target PFMs,
based on TOMTOM q-value.
Arguments:
`query_pfms`: list of L x 4 PFMs to look for matches for
`target_pfms`: list of L x 4 PFMs to match to
`temp_dir`: a temporary directory to store intermediates; defaults to
a randomly created directory
`show_tomtom_output`: whether to show TOMTOM output when running
Returns an array of indices parallel to `query_pfms`, where each index is
denotes the best PFM within `target_pfms` that matches the query PFM. If
a good match is not found (i.e. based on TOMTOM's threshold), the index will
be -1.
"""
if temp_dir is None:
temp_dir_obj = tempfile.TemporaryDirectory()
temp_dir = temp_dir_obj.name
else:
temp_dir_obj = None
# Convert motifs to MEME format
query_motif_file = os.path.join(temp_dir, "query_motifs.txt")
target_motif_file = os.path.join(temp_dir, "target_motifs.txt")
export_pfms_to_meme_format(query_pfms, query_motif_file)
export_pfms_to_meme_format(target_pfms, target_motif_file)
# Run TOMTOM
tomtom_dir = os.path.join(temp_dir, "tomtom")
run_tomtom(
query_motif_file, target_motif_file, tomtom_dir,
show_output=show_tomtom_output
)
# Find results, mapping each query motif to target index
# The query/target IDs are the indices
tomtom_table = import_tomtom_results(tomtom_dir)
match_inds = []
for i in range(len(query_pfms)):
rows = tomtom_table[tomtom_table["Query_ID"] == i]
if rows.empty:
match_inds.append(-1)
continue
target_id = rows.loc[rows["q-value"].idxmin()]["Target_ID"]
match_inds.append(target_id)
if temp_dir_obj is not None:
temp_dir_obj.cleanup()
return np.array(match_inds)
def match_motifs_to_database(
query_pfms, top_k=5, temp_dir=None, database_path=DATABASE_PATH,
show_tomtom_output=True
):
"""
For each motif in the query PFMs, finds the best matches to the TOMTOM
database, ranked by TOMTOM q-value.
Arguments:
`query_pfms`: list of L x 4 PFMs to look for matches for
`top_k`: the number of motifs to return based on q-value
`temp_dir`: a temporary directory to store intermediates; defaults to
a randomly created directory
`database_path`: the path to a TOMTOM motif database; defaults to
DATABASE_PATH
`show_tomtom_output`: whether to show TOMTOM output when running
Returns a list of lists of (motif name, motif PFM, q-value) tuples
parallel to `query_pfms`, where each sublist of tuples is the set of motif
names, motif PFMs (as NumPy arrays), and q-values for the corresponding
query motif. Each sublit is sorted in ascending order by q-value. If fewer
than `top_k` matches are found (based on TOMTOM's threshold), the returned
sublist will be shorter (and may even be empty).
"""
# First, import the database PFMs
database_pfms = import_database_pfms(database_path)
if temp_dir is None:
temp_dir_obj = tempfile.TemporaryDirectory()
temp_dir = temp_dir_obj.name
else:
temp_dir_obj = None
# Convert motifs to MEME format
query_motif_file = os.path.join(temp_dir, "query_motifs.txt")
export_pfms_to_meme_format(query_pfms, query_motif_file)
# Run TOMTOM
tomtom_dir = os.path.join(temp_dir, "tomtom")
run_tomtom(
query_motif_file, database_path, tomtom_dir,
show_output=show_tomtom_output
)
# Find results, mapping each query motif to target index
# The query/target IDs are the indices
tomtom_table = import_tomtom_results(tomtom_dir)
matches = []
for i in range(len(query_pfms)):
rows = tomtom_table[tomtom_table["Query_ID"] == i]
if rows.empty:
matches.append([])
continue
rows = rows.sort_values("q-value").head(top_k)
tups = list(zip(rows["Target_ID"], rows["q-value"]))
tups = [
(tup[0], database_pfms[tup[0]], tup[1]) for tup in tups
]
matches.append(tups)
if temp_dir_obj is not None:
temp_dir_obj.cleanup()
return matches
#from tomtom import match_motifs_to_database
num_matches_to_keep = 5
num_matches_to_show = 5
header = vdomh.thead(
vdomh.tr(
vdomh.th("Motif ID", style={"text-align": "center"}),
vdomh.th("q-val", style={"text-align": "center"}),
vdomh.th("PWM", style={"text-align": "center"})
)
)
for i in range(len(motif_pfms)):
display(vdomh.h3("Metacluster %d/%d" % (i + 1, num_metaclusters)))
# Compute TOMTOM matches for all motifs in the metacluster at once
tomtom_matches = match_motifs_to_database(motif_pfms_short[i],
top_k=num_matches_to_keep,
temp_dir=modisco_out_path)
for j in range(len(motif_pfms[i])):
display(vdomh.h4("Motif %d/%d" % (j + 1, len(motif_pfms[i]))))
viz_sequence.plot_weights(motif_hcwms[i][j])
body = []
for k, (match_name, match_pfm, match_qval) in enumerate(tomtom_matches[j]):
fig = viz_sequence.plot_weights(pfm_to_pwm(match_pfm), return_fig=True)
fig.tight_layout()
if k < num_matches_to_show:
body.append(
vdomh.tr(
vdomh.td(match_name),
vdomh.td(str(match_qval)),
vdomh.td(figure_to_vdom_image(fig))
)
)
else:
body.append(
vdomh.tr(
vdomh.td(match_name),
vdomh.td(str(match_qval)),
vdomh.td("Not shown")
)
)
if not body:
display(vdomh.p("No TOMTOM matches passing threshold"))
else:
display(vdomh.table(header, vdomh.tbody(*body)))
plt.close("all")
No TOMTOM matches passing threshold
Motif ID | q-val | PWM |
---|---|---|
MA1892.1 | 0.000106926 | |
MA0685.2 | 0.000106926 | |
MA0742.2 | 0.000106926 | |
MA1511.2 | 0.000106926 | |
MA0516.3 | 0.00013497 |
Motif ID | q-val | PWM |
---|---|---|
MA0076.2 | 6.83165e-08 | |
MA0750.2 | 1.42243e-06 | |
MA0764.3 | 5.14295e-05 | |
MA0759.2 | 5.78582e-05 | |
MA0156.3 | 6.17155e-05 |
Motif ID | q-val | PWM |
---|---|---|
MA0314.2 | 1.10289e-05 | |
MA0060.3 | 1.10289e-05 | |
MA1644.1 | 1.98287e-05 | |
MA0502.2 | 0.0126678 | |
MA0316.1 | 0.0126678 |
Motif ID | q-val | PWM |
---|---|---|
MA0506.2 | 9.35119e-07 | |
MA1412.1 | 0.136911 | |
MA1560.1 | 0.217149 | |
MA1826.1 | 0.336469 | |
MA0103.3 | 0.397279 |
Motif ID | q-val | PWM |
---|---|---|
MA1899.1 | 0.0127756 | |
MA0967.1 | 0.0478106 | |
MA0609.2 | 0.0490066 | |
MA1348.1 | 0.0490066 | |
MA0605.2 | 0.0490066 |
Motif ID | q-val | PWM |
---|---|---|
MA1833.1 | 4.93464e-09 | |
MA2022.1 | 4.93464e-09 | |
MA1817.1 | 2.53634e-07 | |
MA1819.1 | 4.02546e-07 | |
MA1239.1 | 8.08029e-07 |
Motif ID | q-val | PWM |
---|---|---|
MA1257.1 | 2.16047e-09 | |
MA1833.1 | 4.42229e-09 | |
MA1262.1 | 1.48394e-08 | |
MA1239.1 | 1.48394e-08 | |
MA1832.1 | 2.77183e-08 |
Motif ID | q-val | PWM |
---|---|---|
MA0975.1 | 3.02409e-05 | |
MA0748.2 | 6.78398e-05 | |
MA0998.1 | 0.0004982 | |
MA0997.1 | 0.00138409 | |
MA1004.1 | 0.0032287 |
Motif ID | q-val | PWM |
---|---|---|
MA1821.1 | 0.0236972 | |
MA1832.1 | 0.0236972 | |
MA1819.1 | 0.0328354 | |
MA0290.1 | 0.0328354 | |
MA1049.1 | 0.0377078 |
Motif ID | q-val | PWM |
---|---|---|
MA0591.1 | 0.000577044 | |
MA0501.1 | 0.000577044 | |
MA0150.2 | 0.000672513 | |
MA0089.2 | 0.000870298 | |
MA1448.1 | 0.000870298 |
No TOMTOM matches passing threshold
No TOMTOM matches passing threshold
Motif ID | q-val | PWM |
---|---|---|
MA1053.1 | 0.00136248 | |
MA1713.1 | 0.00136248 | |
MA0997.1 | 0.00218146 | |
MA1051.1 | 0.00218146 | |
MA0567.1 | 0.0026741 |
Motif ID | q-val | PWM |
---|---|---|
MA1573.2 | 5.52997e-08 | |
MA0088.2 | 0.0113958 | |
MA1716.1 | 0.0175344 | |
MA1625.1 | 0.0883468 | |
MA0519.1 | 0.154637 |
Motif ID | q-val | PWM |
---|---|---|
MA0139.1 | 3.51532e-06 | |
MA1102.2 | 1.90261e-05 | |
MA1929.1 | 2.40469e-05 | |
MA1930.1 | 7.53824e-05 | |
MA0531.1 | 0.000265096 |
Motif ID | q-val | PWM |
---|---|---|
MA1833.1 | 1.66274e-05 | |
MA1817.1 | 0.000144295 | |
MA1820.1 | 0.000144295 | |
MA1819.1 | 0.000144295 | |
MA1880.1 | 0.000144295 |
No TOMTOM matches passing threshold
Motif ID | q-val | PWM |
---|---|---|
MA1892.1 | 0.000106926 | |
MA0685.2 | 0.000106926 | |
MA0742.2 | 0.000106926 | |
MA1511.2 | 0.000106926 | |
MA0516.3 | 0.00013497 |
Motif ID | q-val | PWM |
---|---|---|
MA0076.2 | 6.83165e-08 | |
MA0750.2 | 1.42243e-06 | |
MA0764.3 | 5.14295e-05 | |
MA0759.2 | 5.78582e-05 | |
MA0156.3 | 6.17155e-05 |
Motif ID | q-val | PWM |
---|---|---|
MA0314.2 | 1.10289e-05 | |
MA0060.3 | 1.10289e-05 | |
MA1644.1 | 1.98287e-05 | |
MA0502.2 | 0.0126678 | |
MA0316.1 | 0.0126678 |
Motif ID | q-val | PWM |
---|---|---|
MA0506.2 | 9.35119e-07 | |
MA1412.1 | 0.136911 | |
MA1560.1 | 0.217149 | |
MA1826.1 | 0.336469 | |
MA0103.3 | 0.397279 |
Motif ID | q-val | PWM |
---|---|---|
MA1899.1 | 0.0127756 | |
MA0967.1 | 0.0478106 | |
MA0609.2 | 0.0490066 | |
MA1348.1 | 0.0490066 | |
MA0605.2 | 0.0490066 |
Motif ID | q-val | PWM |
---|---|---|
MA1833.1 | 4.93464e-09 | |
MA2022.1 | 4.93464e-09 | |
MA1817.1 | 2.53634e-07 | |
MA1819.1 | 4.02546e-07 | |
MA1239.1 | 8.08029e-07 |
Motif ID | q-val | PWM |
---|---|---|
MA1257.1 | 2.16047e-09 | |
MA1833.1 | 4.42229e-09 | |
MA1262.1 | 1.48394e-08 | |
MA1239.1 | 1.48394e-08 | |
MA1832.1 | 2.77183e-08 |
Motif ID | q-val | PWM |
---|---|---|
MA0975.1 | 3.02409e-05 | |
MA0748.2 | 6.78398e-05 | |
MA0998.1 | 0.0004982 | |
MA0997.1 | 0.00138409 | |
MA1004.1 | 0.0032287 |
Motif ID | q-val | PWM |
---|---|---|
MA1821.1 | 0.0236972 | |
MA1832.1 | 0.0236972 | |
MA1819.1 | 0.0328354 | |
MA0290.1 | 0.0328354 | |
MA1049.1 | 0.0377078 |
Motif ID | q-val | PWM |
---|---|---|
MA0591.1 | 0.000577044 | |
MA0501.1 | 0.000577044 | |
MA0150.2 | 0.000672513 | |
MA0089.2 | 0.000870298 | |
MA1448.1 | 0.000870298 |
No TOMTOM matches passing threshold
No TOMTOM matches passing threshold
Motif ID | q-val | PWM |
---|---|---|
MA1053.1 | 0.00136248 | |
MA1713.1 | 0.00136248 | |
MA0997.1 | 0.00218146 | |
MA1051.1 | 0.00218146 | |
MA0567.1 | 0.0026741 |
Motif ID | q-val | PWM |
---|---|---|
MA1573.2 | 5.52997e-08 | |
MA0088.2 | 0.0113958 | |
MA1716.1 | 0.0175344 | |
MA1625.1 | 0.0883468 | |
MA0519.1 | 0.154637 |
Motif ID | q-val | PWM |
---|---|---|
MA0139.1 | 3.51532e-06 | |
MA1102.2 | 1.90261e-05 | |
MA1929.1 | 2.40469e-05 | |
MA1930.1 | 7.53824e-05 | |
MA0531.1 | 0.000265096 |
Motif ID | q-val | PWM |
---|---|---|
MA1833.1 | 1.66274e-05 | |
MA1817.1 | 0.000144295 | |
MA1820.1 | 0.000144295 | |
MA1819.1 | 0.000144295 | |
MA1880.1 | 0.000144295 |