import os
import sys
sys.path.append(os.path.abspath("/users/amtseng/tfmodisco/src/"))
from tfmodisco.run_tfmodisco import import_shap_scores, import_tfmodisco_results
from motif.read_motifs import pfm_info_content, pfm_to_pwm, trim_motif_by_ic
from motif.match_motifs import match_motifs_to_database
from util import figure_to_vdom_image, import_peak_table
import plot.viz_sequence as viz_sequence
import numpy as np
import h5py
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import vdom.helpers as vdomh
from IPython.display import display
import tqdm
tqdm.tqdm_notebook()
/users/amtseng/miniconda3/envs/tfmodisco-mini/lib/python3.7/site-packages/ipykernel_launcher.py:16: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0 Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook` app.launch_new_instance()
<tqdm.notebook.tqdm_notebook at 0x7fcb2021d8d0>
# Plotting defaults
font_manager.fontManager.ttflist.extend(
font_manager.createFontList(
font_manager.findSystemFonts(fontpaths="/users/amtseng/modules/fonts")
)
)
plot_params = {
"figure.titlesize": 22,
"axes.titlesize": 22,
"axes.labelsize": 20,
"legend.fontsize": 18,
"xtick.labelsize": 16,
"ytick.labelsize": 16,
"font.family": "Roboto",
"font.weight": "bold"
}
plt.rcParams.update(plot_params)
/users/amtseng/miniconda3/envs/tfmodisco-mini/lib/python3.7/site-packages/ipykernel_launcher.py:4: MatplotlibDeprecationWarning: The createFontList function was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use FontManager.addfont instead. after removing the cwd from sys.path.
# Define parameters/fetch arguments
tf_name = os.environ["TFM_TF_NAME"]
shap_scores_path = os.environ["TFM_SHAP_PATH"]
tfm_results_path = os.environ["TFM_TFM_PATH"]
if "TFM_TASK_INDEX" in os.environ:
task_index = int(os.environ["TFM_TASK_INDEX"])
else:
task_index = None
hyp_score_key = os.environ["TFM_HYP_SCORE_KEY"]
if "TFM_MOTIF_CACHE" in os.environ:
tfm_motifs_cache_dir = os.environ["TFM_MOTIF_CACHE"]
else:
tfm_motifs_cache_dir = None
print("TF name: %s" % tf_name)
print("DeepSHAP scores path: %s" % shap_scores_path)
print("TF-MoDISco results path: %s" % tfm_results_path)
print("Task index: %s" % task_index)
print("Importance score key: %s" % hyp_score_key)
print("Saved TF-MoDISco-derived motifs cache: %s" % tfm_motifs_cache_dir)
TF name: SPI1 DeepSHAP scores path: /users/amtseng/tfmodisco/results/importance_scores/multitask_countreg/SPI1_fold8/SPI1_multitask_countreg_task3_fold8_shap_scores.h5 TF-MoDISco results path: /users/amtseng/tfmodisco/results/tfmodisco/multitask_countreg/SPI1_fold8/SPI1_multitask_countreg_task3_fold8_tfm.h5 Task index: 3 Importance score key: hyp_scores Saved TF-MoDISco-derived motifs cache: /users/amtseng/tfmodisco/results/reports/tfmodisco_results//cache/multitask_countreg/SPI1_multitask_countreg_fold8/SPI1_multitask_countreg_task3_fold8
# Define paths and constants
input_length = 1000
shap_score_center_size = 400
base_path = "/users/amtseng/tfmodisco/"
data_path = os.path.join(base_path, "data/processed/ENCODE/")
labels_path = os.path.join(data_path, "labels/%s" % tf_name)
# Paths to original called peaks
all_peak_beds = sorted([item for item in os.listdir(labels_path) if item.endswith(".bed.gz")])
if task_index is None:
peak_bed_paths = [os.path.join(labels_path, item) for item in all_peak_beds]
else:
peak_bed_paths = [os.path.join(labels_path, all_peak_beds[task_index])]
if tfm_motifs_cache_dir:
os.makedirs(tfm_motifs_cache_dir, exist_ok=True)
def extract_coords(
seqlets_arr, one_hot_seqs, hyp_scores, pred_coords, input_length,
input_center_cut_size
):
"""
From the seqlets object of a TF-MoDISco pattern's seqlets and alignments,
extracts the set of coordinates for the seqlets.
Arguments:
`seqlets_arr`: a TF-MoDISco pattern's seqlets object array (N-array)
`one_hot_seqs`: an N x R x 4 array of input sequences, where R is
the cut centered size
`hyp_scores`: an N x R x 4 array of hypothetical importance scores
`pred_coords`: an N x 3 object array of coordinates for the input sequences
`input_length`: length of original input sequences, I
`input_center_cut_size`: centered cut size of SHAP scores used
Returns an N x Q x 4 array of one-hot seqlet sequences, an N x Q x 4 array of
hypothetical seqlet importance scores, and an N x 3 object array of seqlet
coordinates, where Q is the seqlet length.
Note that it is important that the seqlet indices match exactly with the indices
out of the N. This should be the exact sequences in the original SHAP scores.
"""
seqlet_seqs, seqlet_hyps, seqlet_coords = [], [], []
def seqlet_coord_to_input_coord(seqlet_coord):
return seqlet_coord + ((input_length - input_center_cut_size) // 2)
# For each seqlet, fetch the true/predicted profiles
for seqlet in seqlets_arr:
coord_index = seqlet.coor.example_idx
seqlet_start = seqlet.coor.start
seqlet_end = seqlet.coor.end
seqlet_rc = seqlet.coor.is_revcomp
# The one-hot-sequences and hypothetical scores are assumed to already by cut/centered,
# so the indices match the seqlet indices
if seqlet_rc:
seqlet_seqs.append(np.flip(one_hot_seqs[coord_index, seqlet_start:seqlet_end], axis=(0, 1)))
seqlet_hyps.append(np.flip(hyp_scores[coord_index, seqlet_start:seqlet_end], axis=(0, 1)))
else:
seqlet_seqs.append(one_hot_seqs[coord_index, seqlet_start:seqlet_end])
seqlet_hyps.append(hyp_scores[coord_index, seqlet_start:seqlet_end])
# Get the coordinates of the seqlet based on the input coordinates
inp_start = seqlet_coord_to_input_coord(seqlet_start)
inp_end = seqlet_coord_to_input_coord(seqlet_end)
chrom, start, _ = pred_coords[coord_index]
seqlet_coords.append([chrom, start + inp_start, start + inp_end])
return np.stack(seqlet_seqs), np.stack(seqlet_hyps), np.array(seqlet_coords, dtype=object)
def get_summit_distances(coords, peak_table):
"""
Given a set of coordinates, computes the distance of the center of each
coordinate to the nearest summit.
Arguments:
`coords`: an N x 3 object array of coordinates
`peak_table`: a 10-column table of peak data, as imported by
`import_peak_table`
Returns and N-array of integers, which is the distance of each coordinate
midpoint to the nearest coordinate.
"""
chroms = coords[:, 0]
midpoints = (coords[:, 1] + coords[:, 2]) // 2
dists = []
for i in range(len(coords)):
chrom = chroms[i]
midpoint = midpoints[i]
rows = peak_table[peak_table["chrom"] == chrom]
dist_arr = (midpoint - rows["summit"]).values
min_dist = dist_arr[np.argmin(np.abs(dist_arr))]
dists.append(min_dist)
return np.array(dists)
def plot_summit_dists(summit_dists):
"""
Plots the distribution of seqlet distances to summits.
Arguments:
`summit_dists`: the array of distances as returned by
`get_summit_distances`
Returns the figure.
"""
fig = plt.figure(figsize=(8, 6))
num_bins = max(len(summit_dists) // 30, 20)
plt.hist(summit_dists, bins=num_bins, color="purple")
plt.title("Histogram of distance of seqlets to peak summits")
plt.xlabel("Signed distance from seqlet center to nearest peak summit (bp)")
plt.show()
return fig
# Import SHAP coordinates and one-hot sequences
hyp_scores, _, one_hot_seqs, shap_coords = import_shap_scores(shap_scores_path, hyp_score_key, center_cut_size=shap_score_center_size)
# This cuts the sequences/scores off just as how TF-MoDISco saw them, but the coordinates are uncut
Importing SHAP scores: 0%| | 0/64 [00:00<?, ?it/s] Importing SHAP scores: 2%|▏ | 1/64 [00:02<02:39, 2.53s/it] Importing SHAP scores: 3%|▎ | 2/64 [00:05<02:36, 2.52s/it] Importing SHAP scores: 5%|▍ | 3/64 [00:06<01:57, 1.92s/it] Importing SHAP scores: 6%|▋ | 4/64 [00:07<01:39, 1.65s/it] Importing SHAP scores: 8%|▊ | 5/64 [00:08<01:32, 1.57s/it] Importing SHAP scores: 9%|▉ | 6/64 [00:12<02:02, 2.12s/it] Importing SHAP scores: 11%|█ | 7/64 [00:14<02:02, 2.14s/it] Importing SHAP scores: 12%|█▎ | 8/64 [00:15<01:44, 1.87s/it] Importing SHAP scores: 14%|█▍ | 9/64 [00:17<01:46, 1.94s/it] Importing SHAP scores: 16%|█▌ | 10/64 [00:20<01:59, 2.22s/it] Importing SHAP scores: 17%|█▋ | 11/64 [00:22<01:57, 2.23s/it] Importing SHAP scores: 19%|█▉ | 12/64 [00:24<01:50, 2.13s/it] Importing SHAP scores: 20%|██ | 13/64 [00:26<01:48, 2.12s/it] Importing SHAP scores: 22%|██▏ | 14/64 [00:28<01:34, 1.88s/it] Importing SHAP scores: 23%|██▎ | 15/64 [00:30<01:38, 2.01s/it] Importing SHAP scores: 25%|██▌ | 16/64 [00:32<01:40, 2.09s/it] Importing SHAP scores: 27%|██▋ | 17/64 [00:34<01:28, 1.89s/it] Importing SHAP scores: 28%|██▊ | 18/64 [00:36<01:39, 2.16s/it] Importing SHAP scores: 30%|██▉ | 19/64 [00:39<01:43, 2.29s/it] Importing SHAP scores: 31%|███▏ | 20/64 [00:41<01:42, 2.34s/it] Importing SHAP scores: 33%|███▎ | 21/64 [00:43<01:30, 2.10s/it] Importing SHAP scores: 34%|███▍ | 22/64 [00:46<01:34, 2.24s/it] Importing SHAP scores: 36%|███▌ | 23/64 [00:48<01:32, 2.26s/it] Importing SHAP scores: 38%|███▊ | 24/64 [00:51<01:37, 2.43s/it] Importing SHAP scores: 39%|███▉ | 25/64 [00:53<01:31, 2.34s/it] Importing SHAP scores: 41%|████ | 26/64 [00:55<01:30, 2.39s/it] Importing SHAP scores: 42%|████▏ | 27/64 [00:57<01:23, 2.27s/it] Importing SHAP scores: 44%|████▍ | 28/64 [00:59<01:13, 2.05s/it] Importing SHAP scores: 45%|████▌ | 29/64 [01:01<01:13, 2.11s/it] Importing SHAP scores: 47%|████▋ | 30/64 [01:03<01:12, 2.15s/it] Importing SHAP scores: 48%|████▊ | 31/64 [01:06<01:16, 2.33s/it] Importing SHAP scores: 50%|█████ | 32/64 [01:08<01:13, 2.30s/it] Importing SHAP scores: 52%|█████▏ | 33/64 [01:11<01:14, 2.40s/it] Importing SHAP scores: 53%|█████▎ | 34/64 [01:13<01:08, 2.29s/it] Importing SHAP scores: 55%|█████▍ | 35/64 [01:15<01:05, 2.26s/it] Importing SHAP scores: 56%|█████▋ | 36/64 [01:19<01:16, 2.74s/it] Importing SHAP scores: 58%|█████▊ | 37/64 [01:20<01:00, 2.24s/it] Importing SHAP scores: 59%|█████▉ | 38/64 [01:24<01:09, 2.69s/it] Importing SHAP scores: 61%|██████ | 39/64 [01:26<01:04, 2.58s/it] Importing SHAP scores: 62%|██████▎ | 40/64 [01:28<00:53, 2.24s/it] Importing SHAP scores: 64%|██████▍ | 41/64 [01:30<00:50, 2.20s/it] Importing SHAP scores: 66%|██████▌ | 42/64 [01:32<00:49, 2.23s/it] Importing SHAP scores: 67%|██████▋ | 43/64 [01:34<00:44, 2.10s/it] Importing SHAP scores: 69%|██████▉ | 44/64 [01:36<00:40, 2.02s/it] Importing SHAP scores: 70%|███████ | 45/64 [01:38<00:40, 2.16s/it] Importing SHAP scores: 72%|███████▏ | 46/64 [01:39<00:34, 1.89s/it] Importing SHAP scores: 73%|███████▎ | 47/64 [01:40<00:26, 1.55s/it] Importing SHAP scores: 75%|███████▌ | 48/64 [01:44<00:33, 2.12s/it] Importing SHAP scores: 77%|███████▋ | 49/64 [01:46<00:31, 2.12s/it] Importing SHAP scores: 78%|███████▊ | 50/64 [01:47<00:25, 1.82s/it] Importing SHAP scores: 80%|███████▉ | 51/64 [01:48<00:22, 1.70s/it] Importing SHAP scores: 81%|████████▏ | 52/64 [01:51<00:23, 1.99s/it] Importing SHAP scores: 83%|████████▎ | 53/64 [01:54<00:24, 2.25s/it] Importing SHAP scores: 84%|████████▍ | 54/64 [01:56<00:23, 2.37s/it] Importing SHAP scores: 86%|████████▌ | 55/64 [01:58<00:19, 2.19s/it] Importing SHAP scores: 88%|████████▊ | 56/64 [02:01<00:18, 2.32s/it] Importing SHAP scores: 89%|████████▉ | 57/64 [02:05<00:19, 2.76s/it] Importing SHAP scores: 91%|█████████ | 58/64 [02:08<00:18, 3.01s/it] Importing SHAP scores: 92%|█████████▏| 59/64 [02:11<00:14, 2.98s/it] Importing SHAP scores: 94%|█████████▍| 60/64 [02:13<00:10, 2.74s/it] Importing SHAP scores: 95%|█████████▌| 61/64 [02:17<00:09, 3.11s/it] Importing SHAP scores: 97%|█████████▋| 62/64 [02:20<00:06, 3.00s/it] Importing SHAP scores: 98%|█████████▊| 63/64 [02:23<00:03, 3.01s/it] Importing SHAP scores: 100%|██████████| 64/64 [02:24<00:00, 2.26s/it]
# Import the set of peaks
peak_table = import_peak_table(peak_bed_paths)
# Import the TF-MoDISco results object
tfm_obj = import_tfmodisco_results(tfm_results_path, hyp_scores, one_hot_seqs, shap_score_center_size)
Plot the central region of some randomly selected actual importance scores
plot_slice = slice(int(shap_score_center_size / 4), int(3 * shap_score_center_size / 4))
for index in np.random.choice(hyp_scores.shape[0], size=5, replace=False):
viz_sequence.plot_weights((hyp_scores[index] * one_hot_seqs[index])[plot_slice], subticks_frequency=100)
motif_pfms, motif_hcwms, motif_cwms = [], [], [] # Save the trimmed PFMs, hCWMs, and CWMs
motif_pfms_short = [] # PFMs that are even more trimmed (for TOMTOM)
num_seqlets = [] # Number of seqlets for each motif
motif_seqlets = [] # Save seqlets of each motif
metaclusters = tfm_obj.metacluster_idx_to_submetacluster_results
num_metaclusters = len(metaclusters.keys())
if tfm_motifs_cache_dir:
motif_hdf5 = h5py.File(os.path.join(tfm_motifs_cache_dir, "all_motifs.h5"), "w")
for metacluster_i, metacluster_key in enumerate(metaclusters.keys()):
metacluster = metaclusters[metacluster_key]
display(vdomh.h3("Metacluster %d/%d" % (metacluster_i + 1, num_metaclusters)))
patterns = metacluster.seqlets_to_patterns_result.patterns
if not patterns:
break
motif_pfms.append([])
motif_hcwms.append([])
motif_cwms.append([])
motif_pfms_short.append([])
num_seqlets.append([])
motif_seqlets.append([])
num_patterns = len(patterns)
for pattern_i, pattern in enumerate(patterns):
seqlets = pattern.seqlets
display(vdomh.h4("Pattern %d/%d" % (pattern_i + 1, num_patterns)))
display(vdomh.p("%d seqlets" % len(seqlets)))
pfm = pattern["sequence"].fwd
hcwm = pattern["task0_hypothetical_contribs"].fwd
cwm = pattern["task0_contrib_scores"].fwd
pfm_fig = viz_sequence.plot_weights(pfm, subticks_frequency=10, return_fig=True)
hcwm_fig = viz_sequence.plot_weights(hcwm, subticks_frequency=10, return_fig=True)
cwm_fig = viz_sequence.plot_weights(cwm, subticks_frequency=10, return_fig=True)
pfm_fig.tight_layout()
hcwm_fig.tight_layout()
cwm_fig.tight_layout()
motif_table = vdomh.table(
vdomh.tr(
vdomh.td("Sequence (PFM)"),
vdomh.td(figure_to_vdom_image(pfm_fig))
),
vdomh.tr(
vdomh.td("Hypothetical contributions (hCWM)"),
vdomh.td(figure_to_vdom_image(hcwm_fig))
),
vdomh.tr(
vdomh.td("Actual contributions (CWM)"),
vdomh.td(figure_to_vdom_image(cwm_fig))
)
)
display(motif_table)
plt.close("all") # Remove all standing figures
# Trim motif based on information content
short_trimmed_pfm = trim_motif_by_ic(pfm, pfm)
motif_pfms_short[-1].append(short_trimmed_pfm)
# Expand trimming to +/- 4bp on either side
trimmed_pfm = trim_motif_by_ic(pfm, pfm, pad=4)
trimmed_hcwm = trim_motif_by_ic(pfm, hcwm, pad=4)
trimmed_cwm = trim_motif_by_ic(pfm, cwm, pad=4)
motif_pfms[-1].append(trimmed_pfm)
motif_hcwms[-1].append(trimmed_hcwm)
motif_cwms[-1].append(trimmed_cwm)
num_seqlets[-1].append(len(seqlets))
seqlet_seqs, seqlet_hyps, seqlet_coords = extract_coords(
seqlets, one_hot_seqs, hyp_scores, shap_coords, input_length,
shap_score_center_size
)
motif_seqlets[-1].append((seqlet_seqs, seqlet_hyps))
assert np.allclose(np.sum(seqlet_seqs, axis=0) / len(seqlet_seqs), pattern["sequence"].fwd)
# ^Sanity check: PFM derived from seqlets match the PFM stored in the pattern
summit_dists = get_summit_distances(seqlet_coords, peak_table)
dist_fig = plot_summit_dists(summit_dists)
if tfm_motifs_cache_dir:
# Save results and figures
motif_id = "%d_%d" % (metacluster_i, pattern_i)
np.savez_compressed(
os.path.join(tfm_motifs_cache_dir, motif_id + "_seqlets"),
seqlet_seqs=seqlet_seqs, seqlet_hyps=seqlet_hyps, seqlet_coords=seqlet_coords,
summit_dists=summit_dists
)
dist_fig.savefig(os.path.join(tfm_motifs_cache_dir, motif_id + "_summitdists.png"))
pfm_fig.savefig(os.path.join(tfm_motifs_cache_dir, motif_id + "_pfm_full.png"))
hcwm_fig.savefig(os.path.join(tfm_motifs_cache_dir, motif_id + "_hcwm_full.png"))
cwm_fig.savefig(os.path.join(tfm_motifs_cache_dir, motif_id + "_cwm_full.png"))
motif_dset = motif_hdf5.create_group(motif_id)
motif_dset.create_dataset("pfm_full", data=pfm, compression="gzip")
motif_dset.create_dataset("hcwm_full", data=hcwm, compression="gzip")
motif_dset.create_dataset("cwm_full", data=cwm, compression="gzip")
motif_dset.create_dataset("pfm_trimmed", data=trimmed_pfm, compression="gzip")
motif_dset.create_dataset("hcwm_trimmed", data=trimmed_hcwm, compression="gzip")
motif_dset.create_dataset("cwm_trimmed", data=trimmed_cwm, compression="gzip")
motif_dset.create_dataset("pfm_short_trimmed", data=short_trimmed_pfm, compression="gzip")
if tfm_motifs_cache_dir:
motif_hdf5.close()
16853 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
32 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
Motifs are trimmed based on information content, and presented in descending order by number of supporting seqlets. The motifs are separated by metacluster. The motifs are presented as hCWMs. The forward orientation is defined as the orientation that is richer in purines.
colgroup = vdomh.colgroup(
vdomh.col(style={"width": "5%"}),
vdomh.col(style={"width": "5%"}),
vdomh.col(style={"width": "45%"}),
vdomh.col(style={"width": "45%"})
)
header = vdomh.thead(
vdomh.tr(
vdomh.th("#", style={"text-align": "center"}),
vdomh.th("Seqlets", style={"text-align": "center"}),
vdomh.th("Forward", style={"text-align": "center"}),
vdomh.th("Reverse", style={"text-align": "center"})
)
)
for i in range(len(motif_hcwms)):
display(vdomh.h3("Metacluster %d/%d" % (i + 1, num_metaclusters)))
body = []
for j in range(len(motif_hcwms[i])):
motif = motif_hcwms[i][j]
if np.sum(motif[:, [0, 2]]) > 0.5 * np.sum(motif):
# Forward is purine-rich, reverse-complement is pyrimidine-rich
f, rc = motif, np.flip(motif, axis=(0, 1))
else:
f, rc = np.flip(motif, axis=(0, 1)), motif
f_fig = viz_sequence.plot_weights(f, figsize=(20, 4), return_fig=True)
f_fig.tight_layout()
rc_fig = viz_sequence.plot_weights(rc, figsize=(20, 4), return_fig=True)
rc_fig.tight_layout()
if tfm_motifs_cache_dir:
# Save results and figures
motif_id = "%d_%d" % (i, j)
f_fig.savefig(os.path.join(tfm_motifs_cache_dir, motif_id + "_hcwm_trimmed_fwd.png"))
rc_fig.savefig(os.path.join(tfm_motifs_cache_dir, motif_id + "_hcwm_trimmed_rev.png"))
body.append(
vdomh.tr(
vdomh.td(str(j + 1)),
vdomh.td(str(num_seqlets[i][j])),
vdomh.td(figure_to_vdom_image(f_fig)),
vdomh.td(figure_to_vdom_image(rc_fig))
)
)
display(vdomh.table(colgroup, header, vdomh.tbody(*body)))
plt.close("all")
# | Seqlets | Forward | Reverse |
---|---|---|---|
1 | 16853 | ||
2 | 32 |
Here, the TF-MoDISco motifs are plotted as hCWMs, but the TOMTOM matches are shown as PWMs.
num_matches_to_keep = 10
num_matches_to_show = 5
header = vdomh.thead(
vdomh.tr(
vdomh.th("Motif ID", style={"text-align": "center"}),
vdomh.th("q-val", style={"text-align": "center"}),
vdomh.th("PWM", style={"text-align": "center"})
)
)
for i in range(len(motif_pfms)):
display(vdomh.h3("Metacluster %d/%d" % (i + 1, num_metaclusters)))
# Compute TOMTOM matches for all motifs in the metacluster at once
out_dir = os.path.join(tfm_motifs_cache_dir, "tomtom", "metacluster_%d" % i) if tfm_motifs_cache_dir else None
tomtom_matches = match_motifs_to_database(motif_pfms_short[i], top_k=num_matches_to_keep, temp_dir=out_dir)
for j in range(len(motif_pfms[i])):
display(vdomh.h4("Motif %d/%d" % (j + 1, len(motif_pfms[i]))))
viz_sequence.plot_weights(motif_hcwms[i][j])
body = []
for k, (match_name, match_pfm, match_qval) in enumerate(tomtom_matches[j]):
fig = viz_sequence.plot_weights(pfm_to_pwm(match_pfm), return_fig=True)
fig.tight_layout()
if k < num_matches_to_show:
body.append(
vdomh.tr(
vdomh.td(match_name),
vdomh.td(str(match_qval)),
vdomh.td(figure_to_vdom_image(fig))
)
)
if tfm_motifs_cache_dir:
# Save results and figures
motif_id = "%d_%d" % (i, j)
fig.savefig(os.path.join(out_dir, motif_id + ("_hit-%d.png" % (k + 1))))
else:
body.append(
vdomh.tr(
vdomh.td(match_name),
vdomh.td(str(match_qval)),
vdomh.td("Not shown")
)
)
if not body:
display(vdomh.p("No TOMTOM matches passing threshold"))
else:
display(vdomh.table(header, vdomh.tbody(*body)))
plt.close("all")
Motif ID | q-val | PWM |
---|---|---|
SPIB_HUMAN.H11MO.0.A | 6.01569e-15 | |
SPI1_HUMAN.H11MO.0.A | 7.36985e-14 | |
MA0081.2_SPIB | 3.2287e-11 | |
MA0080.5_SPI1 | 8.97424e-10 | |
BC11A_HUMAN.H11MO.0.A | 1.0032799999999999e-08 | |
IRF8_HUMAN.H11MO.0.B | 1.12521e-07 | Not shown |
IRF4_HUMAN.H11MO.0.A | 1.26844e-07 | Not shown |
MA0598.3_EHF | 0.000195265 | Not shown |
MA0687.1_SPIC | 0.000229349 | Not shown |
MA0473.3_ELF1 | 0.00268555 | Not shown |
Motif ID | q-val | PWM |
---|---|---|
ZN467_HUMAN.H11MO.0.C | 0.011014 | |
ETS2_HUMAN.H11MO.0.B | 0.011014 | |
ZN263_HUMAN.H11MO.0.A | 0.011014 | |
MAZ_HUMAN.H11MO.0.A | 0.011014 | |
PATZ1_HUMAN.H11MO.0.C | 0.011014 | |
ZBT17_HUMAN.H11MO.0.A | 0.0130828 | Not shown |
SPI1_HUMAN.H11MO.0.A | 0.0130828 | Not shown |
VEZF1_HUMAN.H11MO.0.C | 0.0130828 | Not shown |
IRF8_HUMAN.H11MO.0.B | 0.015718799999999998 | Not shown |
SPIB_HUMAN.H11MO.0.A | 0.015718799999999998 | Not shown |
Here, the motifs are presented as hCWMs, along with the hypothetical importance scores of a random sample of seqlets that support the motif.
num_seqlets_to_show = 10
colgroup = vdomh.colgroup(
vdomh.col(style={"width": "50%"}),
vdomh.col(style={"width": "50%"})
)
header = vdomh.thead(
vdomh.tr(
vdomh.th("Motif hCWM", style={"text-align": "center"}),
vdomh.th("Seqlets", style={"text-align": "center"})
)
)
for i in range(len(motif_hcwms)):
display(vdomh.h3("Metacluster %d/%d" % (i + 1, num_metaclusters)))
for j in range(len(motif_hcwms[i])):
display(vdomh.h4("Motif %d/%d" % (j + 1, len(motif_hcwms[i]))))
motif_fig = viz_sequence.plot_weights(motif_hcwms[i][j], figsize=(20, 4), return_fig=True)
motif_fig.tight_layout()
seqlet_seqs, seqlet_hyps = motif_seqlets[i][j]
sample_size = min(num_seqlets_to_show, len(seqlet_seqs))
sample_inds = np.random.choice(len(seqlet_seqs), size=sample_size, replace=False)
sample = []
for k in sample_inds:
fig = viz_sequence.plot_weights(seqlet_hyps[k] * seqlet_seqs[k], subticks_frequency=10, return_fig=True)
fig.tight_layout()
sample.append(figure_to_vdom_image(fig))
body = vdomh.tbody(vdomh.tr(vdomh.td(figure_to_vdom_image(motif_fig)), vdomh.td(*sample)))
display(vdomh.table(colgroup, header, body))
plt.close("all")
Motif hCWM | Seqlets |
---|---|
Motif hCWM | Seqlets |
---|---|