import os
import h5py
import util
import moods
import viz_sequence
import numpy as np
import pandas as pd
import modisco
import sklearn.decomposition
import umap
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import vdom.helpers as vdomh
from IPython.display import display
import tqdm
tqdm.tqdm_notebook()
/users/vir/miniconda2/envs/basepairmodels_latest/lib/python3.7/site-packages/ipykernel_launcher.py:16: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0 Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook` app.launch_new_instance()
<tqdm.notebook.tqdm_notebook at 0x7f48d38a8350>
# Plotting defaults
font_manager.fontManager.ttflist.extend(
font_manager.createFontList(
font_manager.findSystemFonts(fontpaths="/users/amtseng/modules/fonts")
)
)
plot_params = {
"figure.titlesize": 22,
"axes.titlesize": 22,
"axes.labelsize": 20,
"legend.fontsize": 18,
"xtick.labelsize": 16,
"ytick.labelsize": 16,
"font.family": "Roboto",
"font.weight": "bold"
}
plt.rcParams.update(plot_params)
/users/vir/miniconda2/envs/basepairmodels_latest/lib/python3.7/site-packages/ipykernel_launcher.py:4: MatplotlibDeprecationWarning: The createFontList function was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use FontManager.addfont instead. after removing the cwd from sys.path.
# Define parameters/fetch arguments
shap_scores_path = os.environ["TFM_SHAP_PATH"]
tfm_results_path = os.environ["TFM_TFM_PATH"]
moods_dir = os.environ["TFM_MOODS_DIR"]
embeddings_path = os.environ["TFM_EMB_PATH"]
print("DeepSHAP scores path: %s" % shap_scores_path)
print("TF-MoDISco results path: %s" % tfm_results_path)
print("Embeddings path: %s" % embeddings_path)
print("MOODS directory: %s" % moods_dir)
DeepSHAP scores path: /mnt/lab_data2/vir/tf_chr_atlas/02-24-2021//shap/ENCSR000EEC/profile_scores_alex_format.h5 TF-MoDISco results path: /mnt/lab_data2/vir/tf_chr_atlas/02-24-2021//modisco/ENCSR000EEC/profile/modisco_results.hd5 Embeddings path: /mnt/lab_data2/vir/tf_chr_atlas/02-24-2021//embeddings/ENCSR000EEC/embeddings.npz MOODS directory: /mnt/lab_data2/vir/tf_chr_atlas/02-24-2021/reports/tfmodisco/notebooks/ENCSR000EEC/moods/profile
# Define constants
shap_score_center_size = 400
hyp_score_key = "hyp_scores"
task_index = None
For plotting and organizing things
def compute_tfmodisco_motif_subclusters(tfm_results):
"""
From an imported TF-MoDISco results object, computes the subclustering
of heterogeneity within each motif/pattern.
"""
metaclusters = tfm_results.metacluster_idx_to_submetacluster_results
num_metaclusters = len(metaclusters.keys())
for metacluster_i, metacluster_key in enumerate(metaclusters.keys()):
metacluster = metaclusters[metacluster_key]
patterns = metacluster.seqlets_to_patterns_result.patterns
if not patterns:
break
num_patterns = len(patterns)
for pattern_i, pattern in enumerate(patterns):
# Compute subclustering for each pattern (motif)
pattern.compute_subclusters_and_embedding(
pattern_comparison_settings=modisco.affinitymat.core.PatternComparisonSettings(
track_names=["task0_hypothetical_contribs", "task0_contrib_scores"],
track_transformer=modisco.affinitymat.L1Normalizer(),
min_overlap=None # This argument is irrelevant here
),
perplexity=30, n_jobs=4, verbose=True
)
def trim_hcwm(pfm, hcwm):
# Trim motif based on information content
ic = util.info_content(pfm)
pass_inds = np.where(ic >= 0.2)[0] # Cut off flanks with less than 0.2 IC
# Expand trimming to +/- 4bp on either side
start, end = max(0, np.min(pass_inds) - 4), min(len(pfm), np.max(pass_inds) + 4 + 1)
return hcwm[start:end]
def plot_motif_heterogeneity(tfm_results):
colgroup = vdomh.colgroup(
vdomh.col(style={"width": "5%"}),
vdomh.col(style={"width": "5%"}),
vdomh.col(style={"width": "50%"}),
vdomh.col(style={"width": "40%"})
)
header = vdomh.thead(
vdomh.tr(
vdomh.th("Subpattern", style={"text-align": "center"}),
vdomh.th("Seqlets", style={"text-align": "center"}),
vdomh.th("Embeddings", style={"text-align": "center"}),
vdomh.th("hCWM", style={"text-align": "center"})
)
)
metaclusters = tfm_results.metacluster_idx_to_submetacluster_results
num_metaclusters = len(metaclusters.keys())
for metacluster_i, metacluster_key in enumerate(metaclusters.keys()):
metacluster = metaclusters[metacluster_key]
display(vdomh.h3("Metacluster %d/%d" % (metacluster_i + 1, num_metaclusters)))
patterns = metacluster.seqlets_to_patterns_result.patterns
if not patterns:
break
num_patterns = len(patterns)
for pattern_i, pattern in enumerate(patterns):
display(vdomh.h4("Pattern %d/%d" % (pattern_i + 1, num_patterns)))
embedding = pattern.twod_embedding
subpattern_clusters = pattern.subclusters
# Aggregate motif
pfm = pattern["sequence"].fwd
hcwm = pattern["task0_hypothetical_contribs"].fwd
trimmed_hcwm = trim_hcwm(pfm, hcwm)
hcwm_fig = viz_sequence.plot_weights(
trimmed_hcwm, subticks_frequency=(len(trimmed_hcwm) + 1), return_fig=True
)
emb_fig, ax = plt.subplots()
ax.scatter(
embedding[:,0], embedding[:,1], c=subpattern_clusters, cmap="tab20", alpha=0.3
)
table_rows = [vdomh.tr(
vdomh.td("Agg."),
vdomh.td(str(len(pattern.seqlets))),
vdomh.td(util.figure_to_vdom_image(emb_fig)),
vdomh.td(util.figure_to_vdom_image(hcwm_fig))
)]
for subpattern_key, subpattern in pattern.subcluster_to_subpattern.items():
pfm = subpattern["sequence"].fwd
hcwm = subpattern["task0_hypothetical_contribs"].fwd
trimmed_hcwm = trim_hcwm(pfm, hcwm)
hcwm_fig = viz_sequence.plot_weights(
trimmed_hcwm, subticks_frequency=(len(trimmed_hcwm) + 1), return_fig=True
)
emb_fig, ax = plt.subplots()
ax.scatter(
embedding[:,0], embedding[:,1], c=(subpattern_clusters == subpattern_key), alpha=0.3
)
table_rows.append(vdomh.tr(
vdomh.td(str(subpattern_key)),
vdomh.td(str(len(subpattern.seqlets))),
vdomh.td(util.figure_to_vdom_image(emb_fig)),
vdomh.td(util.figure_to_vdom_image(hcwm_fig))
))
table = vdomh.table(header, vdomh.tbody(*table_rows))
display(table)
plt.close("all") # Remove all standing figures
def import_tfmodisco_motifs(tfm_results_path, trim=True, only_pos=True):
"""
Imports hCWMs to into a dictionary, mapping `(x, y)` to the hCWM,
where `x` is the metacluster index and `y` is the pattern index.
Arguments:
`tfm_results_path`: path to HDF5 containing TF-MoDISco results
`out_dir`: where to save motifs
`trim`: if True, trim the motif flanks based on total importance
`only_pos`: if True, only return motifs with positive contributions
Returns the dictionary of hCWM.
"""
hcwms = {}
with h5py.File(tfm_results_path, "r") as f:
metaclusters = f["metacluster_idx_to_submetacluster_results"]
num_metaclusters = len(metaclusters.keys())
for metacluster_i, metacluster_key in enumerate(metaclusters.keys()):
metacluster = metaclusters[metacluster_key]
if "patterns" not in metacluster["seqlets_to_patterns_result"]:
continue
patterns = metacluster["seqlets_to_patterns_result"]["patterns"]
num_patterns = len(patterns["all_pattern_names"][:])
for pattern_i, pattern_name in enumerate(patterns["all_pattern_names"][:]):
pattern_name = pattern_name.decode()
pattern = patterns[pattern_name]
pfm = pattern["sequence"]["fwd"][:]
hcwm = pattern["task0_hypothetical_contribs"]["fwd"][:]
cwm = pattern["task0_contrib_scores"]["fwd"][:]
# Check that the contribution scores are overall positive
if only_pos and np.sum(cwm) < 0:
continue
if trim:
hcwm = trim_hcwm(pfm, hcwm)
hcwms["%d_%d" % (metacluster_i,pattern_i)] = hcwm
return hcwms
def get_hit_peak_indices(hit_table, motif_keys):
"""
Returns a dictionary of NumPy arrays, mapping each motif key to
the set of peak indices that contain that motif.
"""
hit_peak_indices = {}
for motif_key in motif_keys:
hit_peak_indices[motif_key] = hit_table[hit_table["key"] == motif_key]["peak_index"].values
return hit_peak_indices
def plot_peak_clustering(embeddings, motif_keys, hcwms, hit_peak_indices):
# First reduce using PCA
centered = embeddings - np.mean(embeddings, axis=0, keepdims=True)
pca = sklearn.decomposition.PCA(n_components=20)
reduced = pca.fit_transform(centered)
# Run UMAP
um = umap.UMAP(verbose=False)
trans = um.fit_transform(centered)
colgroup = vdomh.colgroup(
vdomh.col(style={"width": "5%"}),
vdomh.col(style={"width": "55"}),
vdomh.col(style={"width": "40%"})
)
header = vdomh.thead(
vdomh.tr(
vdomh.th("Motif key", style={"text-align": "center"}),
vdomh.th("Embeddings", style={"text-align": "center"}),
vdomh.th("hCWM", style={"text-align": "center"})
)
)
table_rows = []
for motif_key in motif_keys:
hcwm = hcwms[motif_key]
hcwm_fig = viz_sequence.plot_weights(
hcwm, subticks_frequency=(len(hcwm) + 1), return_fig=True
)
emb_fig, ax = plt.subplots()
subset = np.zeros(len(embeddings), dtype=int)
subset[hit_peak_indices[motif_key]] = 1
ax.scatter(
trans[:,0], trans[:,1], c=subset, alpha=0.3
)
table_rows.append(vdomh.tr(
vdomh.td(motif_key),
vdomh.td(util.figure_to_vdom_image(emb_fig)),
vdomh.td(util.figure_to_vdom_image(hcwm_fig))
))
table = vdomh.table(header, vdomh.tbody(*table_rows))
display(table)
plt.close("all") # Remove all standing figures
Run motif subclustering
# Import SHAP coordinates and one-hot sequences
hyp_scores, _, one_hot_seqs, shap_coords = util.import_shap_scores(shap_scores_path, hyp_score_key, center_cut_size=shap_score_center_size, remove_non_acgt=False)
# This cuts the sequences/scores off just as how TF-MoDISco saw them, but the coordinates are uncut
Importing SHAP scores: 0%| | 0/49 [00:00<?, ?it/s] Importing SHAP scores: 2%|▏ | 1/49 [00:00<00:13, 3.60it/s] Importing SHAP scores: 4%|▍ | 2/49 [00:00<00:10, 4.46it/s] Importing SHAP scores: 6%|▌ | 3/49 [00:00<00:10, 4.51it/s] Importing SHAP scores: 8%|▊ | 4/49 [00:00<00:11, 3.94it/s] Importing SHAP scores: 10%|█ | 5/49 [00:01<00:09, 4.41it/s] Importing SHAP scores: 12%|█▏ | 6/49 [00:01<00:09, 4.70it/s] Importing SHAP scores: 14%|█▍ | 7/49 [00:01<00:10, 4.17it/s] Importing SHAP scores: 16%|█▋ | 8/49 [00:01<00:09, 4.53it/s] Importing SHAP scores: 18%|█▊ | 9/49 [00:02<00:08, 4.82it/s] Importing SHAP scores: 20%|██ | 10/49 [00:02<00:08, 4.61it/s] Importing SHAP scores: 22%|██▏ | 11/49 [00:02<00:08, 4.70it/s] Importing SHAP scores: 24%|██▍ | 12/49 [00:02<00:07, 4.86it/s] Importing SHAP scores: 27%|██▋ | 13/49 [00:02<00:08, 4.34it/s] Importing SHAP scores: 29%|██▊ | 14/49 [00:03<00:07, 4.71it/s] Importing SHAP scores: 31%|███ | 15/49 [00:03<00:07, 4.72it/s] Importing SHAP scores: 33%|███▎ | 16/49 [00:03<00:06, 4.84it/s] Importing SHAP scores: 35%|███▍ | 17/49 [00:03<00:06, 4.66it/s] Importing SHAP scores: 37%|███▋ | 18/49 [00:03<00:06, 5.00it/s] Importing SHAP scores: 39%|███▉ | 19/49 [00:04<00:06, 4.66it/s] Importing SHAP scores: 41%|████ | 20/49 [00:04<00:06, 4.30it/s] Importing SHAP scores: 43%|████▎ | 21/49 [00:04<00:06, 4.64it/s] Importing SHAP scores: 45%|████▍ | 22/49 [00:04<00:05, 5.02it/s] Importing SHAP scores: 47%|████▋ | 23/49 [00:05<00:05, 4.45it/s] Importing SHAP scores: 49%|████▉ | 24/49 [00:05<00:05, 4.77it/s] Importing SHAP scores: 51%|█████ | 25/49 [00:05<00:04, 5.13it/s] Importing SHAP scores: 53%|█████▎ | 26/49 [00:05<00:05, 4.60it/s] Importing SHAP scores: 55%|█████▌ | 27/49 [00:05<00:04, 4.73it/s] Importing SHAP scores: 57%|█████▋ | 28/49 [00:06<00:04, 4.77it/s] Importing SHAP scores: 59%|█████▉ | 29/49 [00:06<00:04, 4.47it/s] Importing SHAP scores: 61%|██████ | 30/49 [00:06<00:04, 4.54it/s] Importing SHAP scores: 63%|██████▎ | 31/49 [00:06<00:03, 4.89it/s] Importing SHAP scores: 65%|██████▌ | 32/49 [00:06<00:03, 5.17it/s] Importing SHAP scores: 67%|██████▋ | 33/49 [00:07<00:03, 4.85it/s] Importing SHAP scores: 69%|██████▉ | 34/49 [00:07<00:02, 5.05it/s] Importing SHAP scores: 71%|███████▏ | 35/49 [00:07<00:02, 5.35it/s] Importing SHAP scores: 73%|███████▎ | 36/49 [00:07<00:02, 4.94it/s] Importing SHAP scores: 76%|███████▌ | 37/49 [00:07<00:02, 5.06it/s] Importing SHAP scores: 78%|███████▊ | 38/49 [00:08<00:02, 5.24it/s] Importing SHAP scores: 80%|███████▉ | 39/49 [00:08<00:02, 4.93it/s] Importing SHAP scores: 82%|████████▏ | 40/49 [00:08<00:01, 5.23it/s] Importing SHAP scores: 84%|████████▎ | 41/49 [00:08<00:01, 4.94it/s] Importing SHAP scores: 86%|████████▌ | 42/49 [00:08<00:01, 4.73it/s] Importing SHAP scores: 88%|████████▊ | 43/49 [00:09<00:01, 4.73it/s] Importing SHAP scores: 90%|████████▉ | 44/49 [00:09<00:00, 5.03it/s] Importing SHAP scores: 92%|█████████▏| 45/49 [00:09<00:00, 4.55it/s] Importing SHAP scores: 94%|█████████▍| 46/49 [00:09<00:00, 4.76it/s] Importing SHAP scores: 96%|█████████▌| 47/49 [00:09<00:00, 4.98it/s] Importing SHAP scores: 98%|█████████▊| 48/49 [00:10<00:00, 5.22it/s] Importing SHAP scores: 100%|██████████| 49/49 [00:10<00:00, 4.78it/s]
# Import the TF-MoDISco results object
tfm_obj = util.import_tfmodisco_results(tfm_results_path, hyp_scores, one_hot_seqs, shap_score_center_size)
# Compute subclusters (needed for older versions of TF-MoDISco); this takes awhile!
compute_tfmodisco_motif_subclusters(tfm_obj)
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 5.7s [Parallel(n_jobs=4)]: Done 192 tasks | elapsed: 12.8s [Parallel(n_jobs=4)]: Done 442 tasks | elapsed: 24.7s [Parallel(n_jobs=4)]: Done 792 tasks | elapsed: 41.0s [Parallel(n_jobs=4)]: Done 1242 tasks | elapsed: 1.0min [Parallel(n_jobs=4)]: Done 1792 tasks | elapsed: 1.4min [Parallel(n_jobs=4)]: Done 2442 tasks | elapsed: 1.9min [Parallel(n_jobs=4)]: Done 3192 tasks | elapsed: 2.5min [Parallel(n_jobs=4)]: Done 4042 tasks | elapsed: 3.1min [Parallel(n_jobs=4)]: Done 4992 tasks | elapsed: 3.9min [Parallel(n_jobs=4)]: Done 6042 tasks | elapsed: 4.7min [Parallel(n_jobs=4)]: Done 7192 tasks | elapsed: 5.6min [Parallel(n_jobs=4)]: Done 8442 tasks | elapsed: 6.6min [Parallel(n_jobs=4)]: Done 9792 tasks | elapsed: 7.6min [Parallel(n_jobs=4)]: Done 11242 tasks | elapsed: 8.7min [Parallel(n_jobs=4)]: Done 12792 tasks | elapsed: 10.0min [Parallel(n_jobs=4)]: Done 13721 out of 13721 | elapsed: 10.7min finished /users/vir/miniconda2/envs/basepairmodels_latest/lib/python3.7/site-packages/sklearn/manifold/_t_sne.py:699: FutureWarning: 'square_distances' has been introduced in 0.24 to help phase out legacy squaring behavior. The 'legacy' setting will be removed in 1.1 (renaming of 0.26), and the default setting will be changed to True. In 1.3, 'square_distances' will be removed altogether, and distances will be squared by default. Set 'square_distances'=True to silence this warning. FutureWarning /users/vir/miniconda2/envs/basepairmodels_latest/lib/python3.7/site-packages/sklearn/neighbors/_base.py:176: EfficiencyWarning: Precomputed sparse input was not sorted by data. EfficiencyWarning)
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 13721 samples in 0.106s... [t-SNE] Computed neighbors for 13721 samples in 0.006s... [t-SNE] Computed conditional probabilities for sample 1000 / 13721 [t-SNE] Computed conditional probabilities for sample 2000 / 13721 [t-SNE] Computed conditional probabilities for sample 3000 / 13721 [t-SNE] Computed conditional probabilities for sample 4000 / 13721 [t-SNE] Computed conditional probabilities for sample 5000 / 13721 [t-SNE] Computed conditional probabilities for sample 6000 / 13721 [t-SNE] Computed conditional probabilities for sample 7000 / 13721 [t-SNE] Computed conditional probabilities for sample 8000 / 13721 [t-SNE] Computed conditional probabilities for sample 9000 / 13721 [t-SNE] Computed conditional probabilities for sample 10000 / 13721 [t-SNE] Computed conditional probabilities for sample 11000 / 13721 [t-SNE] Computed conditional probabilities for sample 12000 / 13721 [t-SNE] Computed conditional probabilities for sample 13000 / 13721 [t-SNE] Computed conditional probabilities for sample 13721 / 13721 [t-SNE] Mean sigma: 0.191822 [t-SNE] Computed conditional probabilities in 0.902s [t-SNE] Iteration 50: error = 98.8877792, gradient norm = 0.0000144 (50 iterations in 13.910s) [t-SNE] Iteration 100: error = 97.8826904, gradient norm = 0.0019212 (50 iterations in 14.261s) [t-SNE] Iteration 150: error = 96.2591324, gradient norm = 0.0005127 (50 iterations in 13.488s) [t-SNE] Iteration 200: error = 96.1788940, gradient norm = 0.0000348 (50 iterations in 12.876s) [t-SNE] Iteration 250: error = 96.1777802, gradient norm = 0.0000412 (50 iterations in 13.488s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 96.177780 [t-SNE] Iteration 300: error = 3.8984511, gradient norm = 0.0011407 (50 iterations in 14.484s) [t-SNE] Iteration 350: error = 3.5256460, gradient norm = 0.0004893 (50 iterations in 14.672s) [t-SNE] Iteration 400: error = 3.3577683, gradient norm = 0.0002899 (50 iterations in 13.232s) [t-SNE] Iteration 450: error = 3.2564201, gradient norm = 0.0002018 (50 iterations in 13.224s) [t-SNE] Iteration 500: error = 3.1862645, gradient norm = 0.0001533 (50 iterations in 14.720s) [t-SNE] Iteration 550: error = 3.1340325, gradient norm = 0.0001218 (50 iterations in 14.885s) [t-SNE] Iteration 600: error = 3.0937319, gradient norm = 0.0001006 (50 iterations in 16.043s) [t-SNE] Iteration 650: error = 3.0613813, gradient norm = 0.0000854 (50 iterations in 14.829s) [t-SNE] Iteration 700: error = 3.0349061, gradient norm = 0.0000755 (50 iterations in 15.196s) [t-SNE] Iteration 750: error = 3.0130131, gradient norm = 0.0000653 (50 iterations in 15.476s) [t-SNE] Iteration 800: error = 2.9945922, gradient norm = 0.0000596 (50 iterations in 15.275s) [t-SNE] Iteration 850: error = 2.9788859, gradient norm = 0.0000535 (50 iterations in 15.165s) [t-SNE] Iteration 900: error = 2.9654026, gradient norm = 0.0000508 (50 iterations in 15.252s) [t-SNE] Iteration 950: error = 2.9539495, gradient norm = 0.0000474 (50 iterations in 15.380s) [t-SNE] Iteration 1000: error = 2.9440763, gradient norm = 0.0000439 (50 iterations in 10.796s) [t-SNE] KL divergence after 1000 iterations: 2.944076 [t-SNE] Computed conditional probabilities for sample 1000 / 13721 [t-SNE] Computed conditional probabilities for sample 2000 / 13721 [t-SNE] Computed conditional probabilities for sample 3000 / 13721 [t-SNE] Computed conditional probabilities for sample 4000 / 13721 [t-SNE] Computed conditional probabilities for sample 5000 / 13721 [t-SNE] Computed conditional probabilities for sample 6000 / 13721 [t-SNE] Computed conditional probabilities for sample 7000 / 13721 [t-SNE] Computed conditional probabilities for sample 8000 / 13721 [t-SNE] Computed conditional probabilities for sample 9000 / 13721 [t-SNE] Computed conditional probabilities for sample 10000 / 13721 [t-SNE] Computed conditional probabilities for sample 11000 / 13721 [t-SNE] Computed conditional probabilities for sample 12000 / 13721 [t-SNE] Computed conditional probabilities for sample 13000 / 13721 [t-SNE] Computed conditional probabilities for sample 13721 / 13721 [t-SNE] Mean sigma: 0.191822 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 2.9min
Quality: 0.6833917034228073 Quality: 0.6838488232071932
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 3.5min finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 200 tasks | elapsed: 2.6s [Parallel(n_jobs=4)]: Done 627 out of 627 | elapsed: 4.7s finished
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 627 samples in 0.005s... [t-SNE] Computed neighbors for 627 samples in 0.001s... [t-SNE] Computed conditional probabilities for sample 627 / 627 [t-SNE] Mean sigma: 0.323984 [t-SNE] Computed conditional probabilities in 0.079s [t-SNE] Iteration 50: error = 69.5869446, gradient norm = 0.4563997 (50 iterations in 14.519s) [t-SNE] Iteration 100: error = 72.0226898, gradient norm = 0.4255284 (50 iterations in 13.876s) [t-SNE] Iteration 150: error = 70.1863861, gradient norm = 0.4590021 (50 iterations in 14.108s) [t-SNE] Iteration 200: error = 71.4655457, gradient norm = 0.4369505 (50 iterations in 14.280s) [t-SNE] Iteration 250: error = 71.8358154, gradient norm = 0.4292149 (50 iterations in 14.280s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 71.835815 [t-SNE] Iteration 300: error = 1.0404609, gradient norm = 0.0035189 (50 iterations in 14.168s) [t-SNE] Iteration 350: error = 0.9812051, gradient norm = 0.0005192 (50 iterations in 14.130s) [t-SNE] Iteration 400: error = 0.9658819, gradient norm = 0.0002868 (50 iterations in 14.118s) [t-SNE] Iteration 450: error = 0.9560483, gradient norm = 0.0003043 (50 iterations in 14.712s) [t-SNE] Iteration 500: error = 0.9503652, gradient norm = 0.0001862 (50 iterations in 14.212s) [t-SNE] Iteration 550: error = 0.9472948, gradient norm = 0.0001782 (50 iterations in 13.912s) [t-SNE] Iteration 600: error = 0.9458899, gradient norm = 0.0001290 (50 iterations in 14.080s) [t-SNE] Iteration 650: error = 0.9442515, gradient norm = 0.0001153 (50 iterations in 14.228s) [t-SNE] Iteration 700: error = 0.9434564, gradient norm = 0.0001247 (50 iterations in 14.128s) [t-SNE] Iteration 750: error = 0.9429828, gradient norm = 0.0000880 (50 iterations in 14.148s) [t-SNE] Iteration 800: error = 0.9422053, gradient norm = 0.0000816 (50 iterations in 14.564s) [t-SNE] Iteration 850: error = 0.9416287, gradient norm = 0.0001263 (50 iterations in 9.824s) [t-SNE] Iteration 900: error = 0.9406828, gradient norm = 0.0000990 (50 iterations in 9.636s) [t-SNE] Iteration 950: error = 0.9397326, gradient norm = 0.0001229 (50 iterations in 9.252s) [t-SNE] Iteration 1000: error = 0.9385031, gradient norm = 0.0001753 (50 iterations in 9.568s) [t-SNE] KL divergence after 1000 iterations: 0.938503 [t-SNE] Computed conditional probabilities for sample 627 / 627 [t-SNE] Mean sigma: 0.323984 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 34.5s
Quality: 0.6260720620728253 Quality: 0.6262189121938811
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 41.8s finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 175 tasks | elapsed: 0.6s [Parallel(n_jobs=4)]: Done 211 out of 211 | elapsed: 0.8s finished
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 211 samples in 0.003s... [t-SNE] Computed neighbors for 211 samples in 0.001s... [t-SNE] Computed conditional probabilities for sample 211 / 211 [t-SNE] Mean sigma: 0.330055 [t-SNE] Computed conditional probabilities in 0.029s [t-SNE] Iteration 50: error = 67.1440353, gradient norm = 0.4737504 (50 iterations in 14.537s) [t-SNE] Iteration 100: error = 67.7779694, gradient norm = 0.4956588 (50 iterations in 14.271s) [t-SNE] Iteration 150: error = 66.5320969, gradient norm = 0.4914231 (50 iterations in 14.461s) [t-SNE] Iteration 200: error = 70.1314163, gradient norm = 0.4308272 (50 iterations in 14.500s) [t-SNE] Iteration 250: error = 66.4692001, gradient norm = 0.5280363 (50 iterations in 14.384s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 66.469200 [t-SNE] Iteration 300: error = 1.2999778, gradient norm = 0.0057169 (50 iterations in 14.192s) [t-SNE] Iteration 350: error = 1.0529135, gradient norm = 0.0040747 (50 iterations in 14.112s) [t-SNE] Iteration 400: error = 0.8796563, gradient norm = 0.0050108 (50 iterations in 14.076s) [t-SNE] Iteration 450: error = 0.7843000, gradient norm = 0.0016665 (50 iterations in 14.264s) [t-SNE] Iteration 500: error = 0.7590276, gradient norm = 0.0019381 (50 iterations in 13.968s) [t-SNE] Iteration 550: error = 0.7342200, gradient norm = 0.0012498 (50 iterations in 14.301s) [t-SNE] Iteration 600: error = 0.7251683, gradient norm = 0.0009059 (50 iterations in 14.887s) [t-SNE] Iteration 650: error = 0.7198693, gradient norm = 0.0003032 (50 iterations in 16.044s) [t-SNE] Iteration 700: error = 0.7148410, gradient norm = 0.0007594 (50 iterations in 16.003s) [t-SNE] Iteration 750: error = 0.7065721, gradient norm = 0.0003604 (50 iterations in 15.413s) [t-SNE] Iteration 800: error = 0.7046598, gradient norm = 0.0001792 (50 iterations in 15.484s) [t-SNE] Iteration 850: error = 0.7040578, gradient norm = 0.0003570 (50 iterations in 14.272s) [t-SNE] Iteration 900: error = 0.7040629, gradient norm = 0.0001486 (50 iterations in 13.908s) [t-SNE] Iteration 950: error = 0.7038955, gradient norm = 0.0001638 (50 iterations in 13.832s) [t-SNE] Iteration 1000: error = 0.7042001, gradient norm = 0.0001613 (50 iterations in 14.280s) [t-SNE] KL divergence after 1000 iterations: 0.704200 [t-SNE] Computed conditional probabilities for sample 211 / 211 [t-SNE] Mean sigma: 0.330055 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 40.1s
Quality: 0.42996533488639155 Quality: 0.4305191671055225 Quality: 0.4305851080129033 Quality: 0.43081992078317277 Quality: 0.4310326085952449 Quality: 0.43118453281188074
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 46.5s finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 130 tasks | elapsed: 0.3s [Parallel(n_jobs=4)]: Done 177 out of 177 | elapsed: 0.6s finished
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 177 samples in 0.002s... [t-SNE] Computed neighbors for 177 samples in 0.001s... [t-SNE] Computed conditional probabilities for sample 177 / 177 [t-SNE] Mean sigma: 0.208925 [t-SNE] Computed conditional probabilities in 0.029s [t-SNE] Iteration 50: error = 69.0079269, gradient norm = 0.4788089 (50 iterations in 15.237s) [t-SNE] Iteration 100: error = 70.1651840, gradient norm = 0.4720786 (50 iterations in 14.782s) [t-SNE] Iteration 150: error = 70.1608810, gradient norm = 0.5047005 (50 iterations in 15.402s) [t-SNE] Iteration 200: error = 73.1866455, gradient norm = 0.4836816 (50 iterations in 14.968s) [t-SNE] Iteration 250: error = 69.0125885, gradient norm = 0.5192096 (50 iterations in 14.936s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 69.012589 [t-SNE] Iteration 300: error = 1.3351090, gradient norm = 0.0073878 (50 iterations in 15.383s) [t-SNE] Iteration 350: error = 1.1178845, gradient norm = 0.0044517 (50 iterations in 15.140s) [t-SNE] Iteration 400: error = 0.9716842, gradient norm = 0.0030198 (50 iterations in 14.992s) [t-SNE] Iteration 450: error = 0.9107239, gradient norm = 0.0035285 (50 iterations in 15.504s) [t-SNE] Iteration 500: error = 0.8709940, gradient norm = 0.0015707 (50 iterations in 14.896s) [t-SNE] Iteration 550: error = 0.8680761, gradient norm = 0.0008218 (50 iterations in 14.992s) [t-SNE] Iteration 600: error = 0.8433237, gradient norm = 0.0012949 (50 iterations in 14.240s) [t-SNE] Iteration 650: error = 0.8242183, gradient norm = 0.0020110 (50 iterations in 14.160s) [t-SNE] Iteration 700: error = 0.8227801, gradient norm = 0.0004378 (50 iterations in 14.328s) [t-SNE] Iteration 750: error = 0.8218392, gradient norm = 0.0002787 (50 iterations in 14.356s) [t-SNE] Iteration 800: error = 0.8222740, gradient norm = 0.0001620 (50 iterations in 14.308s) [t-SNE] Iteration 850: error = 0.8222404, gradient norm = 0.0001052 (50 iterations in 11.528s) [t-SNE] Iteration 900: error = 0.8222609, gradient norm = 0.0000962 (50 iterations in 9.312s) [t-SNE] Iteration 950: error = 0.8222552, gradient norm = 0.0001118 (50 iterations in 9.324s) [t-SNE] Iteration 1000: error = 0.8222178, gradient norm = 0.0001113 (50 iterations in 9.320s) [t-SNE] KL divergence after 1000 iterations: 0.822218 [t-SNE] Computed conditional probabilities for sample 177 / 177 [t-SNE] Mean sigma: 0.208925 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 34.8s
Quality: 0.36025891998818543 Quality: 0.3631546178022487
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 41.5s finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 80 out of 80 | elapsed: 0.3s finished
[t-SNE] Computing 79 nearest neighbors... [t-SNE] Indexed 80 samples in 0.002s... [t-SNE] Computed neighbors for 80 samples in 0.001s... [t-SNE] Computed conditional probabilities for sample 80 / 80 [t-SNE] Mean sigma: 0.361888 [t-SNE] Computed conditional probabilities in 0.025s [t-SNE] Iteration 50: error = 55.6263046, gradient norm = 0.5418246 (50 iterations in 14.633s) [t-SNE] Iteration 100: error = 56.8736305, gradient norm = 0.5067564 (50 iterations in 16.009s) [t-SNE] Iteration 150: error = 58.9666710, gradient norm = 0.4781292 (50 iterations in 15.652s) [t-SNE] Iteration 200: error = 56.3681793, gradient norm = 0.6354036 (50 iterations in 15.804s) [t-SNE] Iteration 250: error = 56.0231171, gradient norm = 0.5518212 (50 iterations in 16.004s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 56.023117 [t-SNE] Iteration 300: error = 0.8885117, gradient norm = 0.0045687 (50 iterations in 15.619s) [t-SNE] Iteration 350: error = 0.8146965, gradient norm = 0.0607318 (50 iterations in 16.136s) [t-SNE] Iteration 400: error = 0.6948527, gradient norm = 0.0019491 (50 iterations in 15.296s) [t-SNE] Iteration 450: error = 0.7141933, gradient norm = 0.0005713 (50 iterations in 15.117s) [t-SNE] Iteration 500: error = 0.6873496, gradient norm = 0.0002564 (50 iterations in 15.971s) [t-SNE] Iteration 550: error = 0.6563169, gradient norm = 0.0003641 (50 iterations in 15.968s) [t-SNE] Iteration 600: error = 0.6083952, gradient norm = 0.0003627 (50 iterations in 15.348s) [t-SNE] Iteration 650: error = 0.6363466, gradient norm = 0.0099425 (50 iterations in 15.740s) [t-SNE] Iteration 700: error = 0.6743546, gradient norm = 0.0004730 (50 iterations in 15.776s) [t-SNE] Iteration 750: error = 0.6325415, gradient norm = 0.0002915 (50 iterations in 15.359s) [t-SNE] Iteration 800: error = 0.6125697, gradient norm = 0.0002424 (50 iterations in 14.318s) [t-SNE] Iteration 850: error = 0.5761612, gradient norm = 0.0001926 (50 iterations in 14.176s) [t-SNE] Iteration 900: error = 0.5650089, gradient norm = 0.0002264 (50 iterations in 13.872s) [t-SNE] Iteration 950: error = 0.6267213, gradient norm = 0.0002364 (50 iterations in 14.584s) [t-SNE] Iteration 1000: error = 0.6221863, gradient norm = 0.0001299 (50 iterations in 15.688s) [t-SNE] KL divergence after 1000 iterations: 0.622186 [t-SNE] Computed conditional probabilities for sample 80 / 80 [t-SNE] Mean sigma: 0.361888 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 46.4s
Quality: 0.24507493609643793 Quality: 0.24604053250136276 Quality: 0.24676902266062542
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 53.2s finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 211 tasks | elapsed: 1.1s [Parallel(n_jobs=4)]: Done 232 out of 232 | elapsed: 1.3s finished
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 232 samples in 0.002s... [t-SNE] Computed neighbors for 232 samples in 0.001s... [t-SNE] Computed conditional probabilities for sample 232 / 232 [t-SNE] Mean sigma: 0.342071 [t-SNE] Computed conditional probabilities in 0.040s [t-SNE] Iteration 50: error = 66.3946152, gradient norm = 0.4941365 (50 iterations in 15.823s) [t-SNE] Iteration 100: error = 69.2134933, gradient norm = 0.4852921 (50 iterations in 15.108s) [t-SNE] Iteration 150: error = 67.7871399, gradient norm = 0.4791799 (50 iterations in 14.988s) [t-SNE] Iteration 200: error = 65.6713943, gradient norm = 0.4879585 (50 iterations in 15.220s) [t-SNE] Iteration 250: error = 70.7853088, gradient norm = 0.4664988 (50 iterations in 15.003s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 70.785309 [t-SNE] Iteration 300: error = 1.0036762, gradient norm = 0.0096923 (50 iterations in 15.103s) [t-SNE] Iteration 350: error = 0.8064265, gradient norm = 0.0026958 (50 iterations in 15.109s) [t-SNE] Iteration 400: error = 0.7298294, gradient norm = 0.0023204 (50 iterations in 15.356s) [t-SNE] Iteration 450: error = 0.7003893, gradient norm = 0.0014619 (50 iterations in 14.724s) [t-SNE] Iteration 500: error = 0.6735385, gradient norm = 0.0006908 (50 iterations in 14.920s) [t-SNE] Iteration 550: error = 0.6698445, gradient norm = 0.0010592 (50 iterations in 15.124s) [t-SNE] Iteration 600: error = 0.6689910, gradient norm = 0.0001527 (50 iterations in 14.920s) [t-SNE] Iteration 650: error = 0.6652773, gradient norm = 0.0010876 (50 iterations in 15.156s) [t-SNE] Iteration 700: error = 0.6605956, gradient norm = 0.0004304 (50 iterations in 14.772s) [t-SNE] Iteration 750: error = 0.6560189, gradient norm = 0.0001858 (50 iterations in 14.440s) [t-SNE] Iteration 800: error = 0.6561432, gradient norm = 0.0001393 (50 iterations in 14.556s) [t-SNE] Iteration 850: error = 0.6561543, gradient norm = 0.0001220 (50 iterations in 14.548s) [t-SNE] Iteration 900: error = 0.6561859, gradient norm = 0.0000454 (50 iterations in 15.136s) [t-SNE] Iteration 950: error = 0.6561298, gradient norm = 0.0000597 (50 iterations in 14.584s) [t-SNE] Iteration 1000: error = 0.6560916, gradient norm = 0.0001520 (50 iterations in 14.476s) [t-SNE] KL divergence after 1000 iterations: 0.656092 [t-SNE] Computed conditional probabilities for sample 232 / 232 [t-SNE] Mean sigma: 0.342071 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 42.1s
Quality: 0.5370651468355077
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 51.5s finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 122 out of 122 | elapsed: 0.3s finished
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 122 samples in 0.002s... [t-SNE] Computed neighbors for 122 samples in 0.001s... [t-SNE] Computed conditional probabilities for sample 122 / 122 [t-SNE] Mean sigma: 0.344239 [t-SNE] Computed conditional probabilities in 0.008s [t-SNE] Iteration 50: error = 60.3058586, gradient norm = 0.5300133 (50 iterations in 15.149s) [t-SNE] Iteration 100: error = 58.7652664, gradient norm = 0.5047957 (50 iterations in 15.912s) [t-SNE] Iteration 150: error = 61.8696480, gradient norm = 0.4875160 (50 iterations in 18.360s) [t-SNE] Iteration 200: error = 61.9140167, gradient norm = 0.4666477 (50 iterations in 19.304s) [t-SNE] Iteration 250: error = 62.1068077, gradient norm = 0.5639783 (50 iterations in 19.154s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 62.106808 [t-SNE] Iteration 300: error = 1.4155877, gradient norm = 0.0072292 (50 iterations in 20.703s) [t-SNE] Iteration 350: error = 1.2158779, gradient norm = 0.0037199 (50 iterations in 22.836s) [t-SNE] Iteration 400: error = 1.0249339, gradient norm = 0.0032959 (50 iterations in 23.028s) [t-SNE] Iteration 450: error = 0.8834728, gradient norm = 0.0055406 (50 iterations in 22.960s) [t-SNE] Iteration 500: error = 0.8625416, gradient norm = 0.0067632 (50 iterations in 22.068s) [t-SNE] Iteration 550: error = 0.7694434, gradient norm = 0.0011070 (50 iterations in 22.024s) [t-SNE] Iteration 600: error = 0.7115557, gradient norm = 0.0027955 (50 iterations in 19.196s) [t-SNE] Iteration 650: error = 0.6651524, gradient norm = 0.0117778 (50 iterations in 19.464s) [t-SNE] Iteration 700: error = 0.6459988, gradient norm = 0.0020666 (50 iterations in 19.868s) [t-SNE] Iteration 750: error = 0.6392302, gradient norm = 0.0016245 (50 iterations in 21.280s) [t-SNE] Iteration 800: error = 0.6300267, gradient norm = 0.0017018 (50 iterations in 22.292s) [t-SNE] Iteration 850: error = 0.6256139, gradient norm = 0.0031537 (50 iterations in 22.524s) [t-SNE] Iteration 900: error = 0.6063113, gradient norm = 0.0009640 (50 iterations in 22.944s) [t-SNE] Iteration 950: error = 0.6057842, gradient norm = 0.0003714 (50 iterations in 23.116s) [t-SNE] Iteration 1000: error = 0.6045096, gradient norm = 0.0003181 (50 iterations in 22.564s) [t-SNE] KL divergence after 1000 iterations: 0.604510 [t-SNE] Computed conditional probabilities for sample 122 / 122 [t-SNE] Mean sigma: 0.344239 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 1.2min
Quality: 0.35064825116207987 Quality: 0.3519919146131988 Quality: 0.3519952965959341 Quality: 0.3521363951041258
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 1.4min finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 96 out of 96 | elapsed: 0.3s finished
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 96 samples in 0.002s... [t-SNE] Computed neighbors for 96 samples in 0.001s... [t-SNE] Computed conditional probabilities for sample 96 / 96 [t-SNE] Mean sigma: 0.552420 [t-SNE] Computed conditional probabilities in 0.007s [t-SNE] Iteration 50: error = 56.3807526, gradient norm = 0.5629714 (50 iterations in 19.421s) [t-SNE] Iteration 100: error = 55.0418968, gradient norm = 0.5378823 (50 iterations in 20.540s) [t-SNE] Iteration 150: error = 56.8342209, gradient norm = 0.4917724 (50 iterations in 22.348s) [t-SNE] Iteration 200: error = 58.2003899, gradient norm = 0.4757186 (50 iterations in 17.034s) [t-SNE] Iteration 250: error = 56.4644661, gradient norm = 0.5245247 (50 iterations in 15.538s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 56.464466 [t-SNE] Iteration 300: error = 0.7450489, gradient norm = 0.0352057 (50 iterations in 17.064s) [t-SNE] Iteration 350: error = 0.5613391, gradient norm = 0.0055142 (50 iterations in 17.176s) [t-SNE] Iteration 400: error = 0.4094673, gradient norm = 0.0058210 (50 iterations in 18.904s) [t-SNE] Iteration 450: error = 0.3309927, gradient norm = 0.0024221 (50 iterations in 16.232s) [t-SNE] Iteration 500: error = 0.3241532, gradient norm = 0.0003124 (50 iterations in 15.296s) [t-SNE] Iteration 550: error = 0.3208709, gradient norm = 0.0003083 (50 iterations in 14.620s) [t-SNE] Iteration 600: error = 0.3188967, gradient norm = 0.0003976 (50 iterations in 14.872s) [t-SNE] Iteration 650: error = 0.3082704, gradient norm = 0.0021708 (50 iterations in 15.752s) [t-SNE] Iteration 700: error = 0.2829228, gradient norm = 0.0029633 (50 iterations in 16.209s) [t-SNE] Iteration 750: error = 0.2493552, gradient norm = 0.0045182 (50 iterations in 15.211s) [t-SNE] Iteration 800: error = 0.2368684, gradient norm = 0.0033126 (50 iterations in 15.716s) [t-SNE] Iteration 850: error = 0.2325243, gradient norm = 0.0004402 (50 iterations in 14.978s) [t-SNE] Iteration 900: error = 0.2326207, gradient norm = 0.0003488 (50 iterations in 15.078s) [t-SNE] Iteration 950: error = 0.2325152, gradient norm = 0.0003710 (50 iterations in 14.952s) [t-SNE] Iteration 1000: error = 0.2324666, gradient norm = 0.0003278 (50 iterations in 15.260s) [t-SNE] KL divergence after 1000 iterations: 0.232467 [t-SNE] Computed conditional probabilities for sample 96 / 96 [t-SNE] Mean sigma: 0.552420 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 56.7s
Quality: 0.4502980917063113
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 1.1min finished
For each motif, determine the peaks that contain it
# Import the hCWMs
hcwms = import_tfmodisco_motifs(tfm_results_path)
motif_keys = list(hcwms.keys())
# Import the motif hits
hit_table = moods.import_moods_hits(os.path.join(moods_dir, "moods_filtered_collapsed.bed"))
hit_peak_indices = get_hit_peak_indices(hit_table, motif_keys)
# Import embeddings (this can take awhile)
embeddings = np.load(embeddings_path)["embeddings"]
# Sum up over sequence axis to remove position dependencies
summed_embeddings = np.sum(embeddings, axis=1)
For each motif, show the subclusters that exist within the TF-MoDISco-identified subpatterns
plot_motif_heterogeneity(tfm_obj)
/mnt/lab_data2/vir/tf_chr_atlas/02-24-2021/TF-Atlas/3M/reports/viz_sequence.py:152: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`). fig = plt.figure(figsize=figsize)
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 13721 | ||
| 0 | 2166 | ||
| 1 | 1583 | ||
| 2 | 1550 | ||
| 3 | 1530 | ||
| 4 | 1216 | ||
| 5 | 1215 | ||
| 6 | 1175 | ||
| 7 | 766 | ||
| 8 | 693 | ||
| 9 | 662 | ||
| 10 | 644 | ||
| 11 | 323 | ||
| 12 | 198 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 627 | ||
| 0 | 140 | ||
| 1 | 116 | ||
| 2 | 91 | ||
| 3 | 74 | ||
| 4 | 62 | ||
| 5 | 54 | ||
| 6 | 52 | ||
| 7 | 34 | ||
| 8 | 4 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 211 | ||
| 0 | 59 | ||
| 1 | 51 | ||
| 2 | 44 | ||
| 3 | 36 | ||
| 4 | 16 | ||
| 5 | 5 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 177 | ||
| 0 | 51 | ||
| 1 | 44 | ||
| 2 | 42 | ||
| 3 | 25 | ||
| 4 | 13 | ||
| 5 | 2 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 80 | ||
| 0 | 23 | ||
| 1 | 22 | ||
| 2 | 18 | ||
| 3 | 14 | ||
| 4 | 3 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 232 | ||
| 0 | 72 | ||
| 1 | 63 | ||
| 2 | 38 | ||
| 3 | 32 | ||
| 4 | 21 | ||
| 5 | 6 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 122 | ||
| 0 | 38 | ||
| 1 | 34 | ||
| 2 | 19 | ||
| 3 | 18 | ||
| 4 | 11 | ||
| 5 | 2 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 96 | ||
| 0 | 28 | ||
| 1 | 22 | ||
| 2 | 20 | ||
| 3 | 17 | ||
| 4 | 9 |
For each peak, cluster the peaks by embeddings to highlight the structure of different peaks and different motifs
plot_peak_clustering(summed_embeddings, motif_keys, hcwms, hit_peak_indices)
| Motif key | Embeddings | hCWM |
|---|---|---|
| 0_0 | ||
| 0_1 | ||
| 0_2 | ||
| 0_3 | ||
| 0_4 | ||
| 1_0 | ||
| 1_2 |