import os
import h5py
import util
import moods
import viz_sequence
import numpy as np
import pandas as pd
import modisco
import sklearn.decomposition
import umap
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import vdom.helpers as vdomh
from IPython.display import display
import tqdm
tqdm.tqdm_notebook()
/users/vir/miniconda2/envs/basepairmodels_latest/lib/python3.7/site-packages/ipykernel_launcher.py:16: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0 Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook` app.launch_new_instance()
<tqdm.notebook.tqdm_notebook at 0x7f8746d8ce10>
# Plotting defaults
font_manager.fontManager.ttflist.extend(
font_manager.createFontList(
font_manager.findSystemFonts(fontpaths="/users/amtseng/modules/fonts")
)
)
plot_params = {
"figure.titlesize": 22,
"axes.titlesize": 22,
"axes.labelsize": 20,
"legend.fontsize": 18,
"xtick.labelsize": 16,
"ytick.labelsize": 16,
"font.family": "Roboto",
"font.weight": "bold"
}
plt.rcParams.update(plot_params)
/users/vir/miniconda2/envs/basepairmodels_latest/lib/python3.7/site-packages/ipykernel_launcher.py:4: MatplotlibDeprecationWarning: The createFontList function was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use FontManager.addfont instead. after removing the cwd from sys.path.
# Define parameters/fetch arguments
shap_scores_path = os.environ["TFM_SHAP_PATH"]
tfm_results_path = os.environ["TFM_TFM_PATH"]
moods_dir = os.environ["TFM_MOODS_DIR"]
embeddings_path = os.environ["TFM_EMB_PATH"]
print("DeepSHAP scores path: %s" % shap_scores_path)
print("TF-MoDISco results path: %s" % tfm_results_path)
print("Embeddings path: %s" % embeddings_path)
print("MOODS directory: %s" % moods_dir)
DeepSHAP scores path: /mnt/lab_data2/vir/tf_chr_atlas/02-24-2021//shap/ENCSR000EDZ/profile_scores_alex_format.h5 TF-MoDISco results path: /mnt/lab_data2/vir/tf_chr_atlas/02-24-2021//modisco/ENCSR000EDZ/profile/modisco_results.hd5 Embeddings path: /mnt/lab_data2/vir/tf_chr_atlas/02-24-2021//embeddings/ENCSR000EDZ/embeddings.npz MOODS directory: /mnt/lab_data2/vir/tf_chr_atlas/02-24-2021/reports/tfmodisco/notebooks/ENCSR000EDZ/moods/profile
# Define constants
shap_score_center_size = 400
hyp_score_key = "hyp_scores"
task_index = None
For plotting and organizing things
def compute_tfmodisco_motif_subclusters(tfm_results):
"""
From an imported TF-MoDISco results object, computes the subclustering
of heterogeneity within each motif/pattern.
"""
metaclusters = tfm_results.metacluster_idx_to_submetacluster_results
num_metaclusters = len(metaclusters.keys())
for metacluster_i, metacluster_key in enumerate(metaclusters.keys()):
metacluster = metaclusters[metacluster_key]
patterns = metacluster.seqlets_to_patterns_result.patterns
if not patterns:
break
num_patterns = len(patterns)
for pattern_i, pattern in enumerate(patterns):
# Compute subclustering for each pattern (motif)
pattern.compute_subclusters_and_embedding(
pattern_comparison_settings=modisco.affinitymat.core.PatternComparisonSettings(
track_names=["task0_hypothetical_contribs", "task0_contrib_scores"],
track_transformer=modisco.affinitymat.L1Normalizer(),
min_overlap=None # This argument is irrelevant here
),
perplexity=30, n_jobs=4, verbose=True
)
def trim_hcwm(pfm, hcwm):
# Trim motif based on information content
ic = util.info_content(pfm)
pass_inds = np.where(ic >= 0.2)[0] # Cut off flanks with less than 0.2 IC
# Expand trimming to +/- 4bp on either side
start, end = max(0, np.min(pass_inds) - 4), min(len(pfm), np.max(pass_inds) + 4 + 1)
return hcwm[start:end]
def plot_motif_heterogeneity(tfm_results):
colgroup = vdomh.colgroup(
vdomh.col(style={"width": "5%"}),
vdomh.col(style={"width": "5%"}),
vdomh.col(style={"width": "50%"}),
vdomh.col(style={"width": "40%"})
)
header = vdomh.thead(
vdomh.tr(
vdomh.th("Subpattern", style={"text-align": "center"}),
vdomh.th("Seqlets", style={"text-align": "center"}),
vdomh.th("Embeddings", style={"text-align": "center"}),
vdomh.th("hCWM", style={"text-align": "center"})
)
)
metaclusters = tfm_results.metacluster_idx_to_submetacluster_results
num_metaclusters = len(metaclusters.keys())
for metacluster_i, metacluster_key in enumerate(metaclusters.keys()):
metacluster = metaclusters[metacluster_key]
display(vdomh.h3("Metacluster %d/%d" % (metacluster_i + 1, num_metaclusters)))
patterns = metacluster.seqlets_to_patterns_result.patterns
if not patterns:
break
num_patterns = len(patterns)
for pattern_i, pattern in enumerate(patterns):
display(vdomh.h4("Pattern %d/%d" % (pattern_i + 1, num_patterns)))
embedding = pattern.twod_embedding
subpattern_clusters = pattern.subclusters
# Aggregate motif
pfm = pattern["sequence"].fwd
hcwm = pattern["task0_hypothetical_contribs"].fwd
trimmed_hcwm = trim_hcwm(pfm, hcwm)
hcwm_fig = viz_sequence.plot_weights(
trimmed_hcwm, subticks_frequency=(len(trimmed_hcwm) + 1), return_fig=True
)
emb_fig, ax = plt.subplots()
ax.scatter(
embedding[:,0], embedding[:,1], c=subpattern_clusters, cmap="tab20", alpha=0.3
)
table_rows = [vdomh.tr(
vdomh.td("Agg."),
vdomh.td(str(len(pattern.seqlets))),
vdomh.td(util.figure_to_vdom_image(emb_fig)),
vdomh.td(util.figure_to_vdom_image(hcwm_fig))
)]
for subpattern_key, subpattern in pattern.subcluster_to_subpattern.items():
pfm = subpattern["sequence"].fwd
hcwm = subpattern["task0_hypothetical_contribs"].fwd
trimmed_hcwm = trim_hcwm(pfm, hcwm)
hcwm_fig = viz_sequence.plot_weights(
trimmed_hcwm, subticks_frequency=(len(trimmed_hcwm) + 1), return_fig=True
)
emb_fig, ax = plt.subplots()
ax.scatter(
embedding[:,0], embedding[:,1], c=(subpattern_clusters == subpattern_key), alpha=0.3
)
table_rows.append(vdomh.tr(
vdomh.td(str(subpattern_key)),
vdomh.td(str(len(subpattern.seqlets))),
vdomh.td(util.figure_to_vdom_image(emb_fig)),
vdomh.td(util.figure_to_vdom_image(hcwm_fig))
))
table = vdomh.table(header, vdomh.tbody(*table_rows))
display(table)
plt.close("all") # Remove all standing figures
def import_tfmodisco_motifs(tfm_results_path, trim=True, only_pos=True):
"""
Imports hCWMs to into a dictionary, mapping `(x, y)` to the hCWM,
where `x` is the metacluster index and `y` is the pattern index.
Arguments:
`tfm_results_path`: path to HDF5 containing TF-MoDISco results
`out_dir`: where to save motifs
`trim`: if True, trim the motif flanks based on total importance
`only_pos`: if True, only return motifs with positive contributions
Returns the dictionary of hCWM.
"""
hcwms = {}
with h5py.File(tfm_results_path, "r") as f:
metaclusters = f["metacluster_idx_to_submetacluster_results"]
num_metaclusters = len(metaclusters.keys())
for metacluster_i, metacluster_key in enumerate(metaclusters.keys()):
metacluster = metaclusters[metacluster_key]
if "patterns" not in metacluster["seqlets_to_patterns_result"]:
continue
patterns = metacluster["seqlets_to_patterns_result"]["patterns"]
num_patterns = len(patterns["all_pattern_names"][:])
for pattern_i, pattern_name in enumerate(patterns["all_pattern_names"][:]):
pattern_name = pattern_name.decode()
pattern = patterns[pattern_name]
pfm = pattern["sequence"]["fwd"][:]
hcwm = pattern["task0_hypothetical_contribs"]["fwd"][:]
cwm = pattern["task0_contrib_scores"]["fwd"][:]
# Check that the contribution scores are overall positive
if only_pos and np.sum(cwm) < 0:
continue
if trim:
hcwm = trim_hcwm(pfm, hcwm)
hcwms["%d_%d" % (metacluster_i,pattern_i)] = hcwm
return hcwms
def get_hit_peak_indices(hit_table, motif_keys):
"""
Returns a dictionary of NumPy arrays, mapping each motif key to
the set of peak indices that contain that motif.
"""
hit_peak_indices = {}
for motif_key in motif_keys:
hit_peak_indices[motif_key] = hit_table[hit_table["key"] == motif_key]["peak_index"].values
return hit_peak_indices
def plot_peak_clustering(embeddings, motif_keys, hcwms, hit_peak_indices):
# First reduce using PCA
centered = embeddings - np.mean(embeddings, axis=0, keepdims=True)
pca = sklearn.decomposition.PCA(n_components=20)
reduced = pca.fit_transform(centered)
# Run UMAP
um = umap.UMAP(verbose=False)
trans = um.fit_transform(centered)
colgroup = vdomh.colgroup(
vdomh.col(style={"width": "5%"}),
vdomh.col(style={"width": "55"}),
vdomh.col(style={"width": "40%"})
)
header = vdomh.thead(
vdomh.tr(
vdomh.th("Motif key", style={"text-align": "center"}),
vdomh.th("Embeddings", style={"text-align": "center"}),
vdomh.th("hCWM", style={"text-align": "center"})
)
)
table_rows = []
for motif_key in motif_keys:
hcwm = hcwms[motif_key]
hcwm_fig = viz_sequence.plot_weights(
hcwm, subticks_frequency=(len(hcwm) + 1), return_fig=True
)
emb_fig, ax = plt.subplots()
subset = np.zeros(len(embeddings), dtype=int)
subset[hit_peak_indices[motif_key]] = 1
ax.scatter(
trans[:,0], trans[:,1], c=subset, alpha=0.3
)
table_rows.append(vdomh.tr(
vdomh.td(motif_key),
vdomh.td(util.figure_to_vdom_image(emb_fig)),
vdomh.td(util.figure_to_vdom_image(hcwm_fig))
))
table = vdomh.table(header, vdomh.tbody(*table_rows))
display(table)
plt.close("all") # Remove all standing figures
Run motif subclustering
# Import SHAP coordinates and one-hot sequences
hyp_scores, _, one_hot_seqs, shap_coords = util.import_shap_scores(shap_scores_path, hyp_score_key, center_cut_size=shap_score_center_size, remove_non_acgt=False)
# This cuts the sequences/scores off just as how TF-MoDISco saw them, but the coordinates are uncut
Importing SHAP scores: 0%| | 0/47 [00:00<?, ?it/s] Importing SHAP scores: 2%|▏ | 1/47 [00:00<00:14, 3.10it/s] Importing SHAP scores: 4%|▍ | 2/47 [00:00<00:10, 4.28it/s] Importing SHAP scores: 6%|▋ | 3/47 [00:00<00:10, 4.32it/s] Importing SHAP scores: 9%|▊ | 4/47 [00:00<00:08, 4.86it/s] Importing SHAP scores: 11%|█ | 5/47 [00:01<00:10, 3.98it/s] Importing SHAP scores: 13%|█▎ | 6/47 [00:01<00:14, 2.92it/s] Importing SHAP scores: 15%|█▍ | 7/47 [00:02<00:14, 2.83it/s] Importing SHAP scores: 17%|█▋ | 8/47 [00:02<00:15, 2.47it/s] Importing SHAP scores: 19%|█▉ | 9/47 [00:03<00:16, 2.31it/s] Importing SHAP scores: 21%|██▏ | 10/47 [00:03<00:16, 2.28it/s] Importing SHAP scores: 23%|██▎ | 11/47 [00:04<00:15, 2.28it/s] Importing SHAP scores: 26%|██▌ | 12/47 [00:04<00:14, 2.43it/s] Importing SHAP scores: 28%|██▊ | 13/47 [00:04<00:13, 2.55it/s] Importing SHAP scores: 30%|██▉ | 14/47 [00:05<00:13, 2.45it/s] Importing SHAP scores: 32%|███▏ | 15/47 [00:05<00:13, 2.41it/s] Importing SHAP scores: 34%|███▍ | 16/47 [00:06<00:13, 2.31it/s] Importing SHAP scores: 36%|███▌ | 17/47 [00:06<00:10, 2.83it/s] Importing SHAP scores: 38%|███▊ | 18/47 [00:06<00:08, 3.33it/s] Importing SHAP scores: 40%|████ | 19/47 [00:06<00:07, 3.57it/s] Importing SHAP scores: 43%|████▎ | 20/47 [00:06<00:06, 3.97it/s] Importing SHAP scores: 45%|████▍ | 21/47 [00:07<00:06, 4.05it/s] Importing SHAP scores: 47%|████▋ | 22/47 [00:07<00:06, 3.91it/s] Importing SHAP scores: 49%|████▉ | 23/47 [00:07<00:06, 3.56it/s] Importing SHAP scores: 51%|█████ | 24/47 [00:08<00:07, 3.00it/s] Importing SHAP scores: 53%|█████▎ | 25/47 [00:08<00:07, 2.95it/s] Importing SHAP scores: 55%|█████▌ | 26/47 [00:08<00:07, 2.91it/s] Importing SHAP scores: 57%|█████▋ | 27/47 [00:09<00:07, 2.65it/s] Importing SHAP scores: 60%|█████▉ | 28/47 [00:09<00:07, 2.43it/s] Importing SHAP scores: 62%|██████▏ | 29/47 [00:10<00:07, 2.32it/s] Importing SHAP scores: 64%|██████▍ | 30/47 [00:10<00:06, 2.48it/s] Importing SHAP scores: 66%|██████▌ | 31/47 [00:10<00:06, 2.61it/s] Importing SHAP scores: 68%|██████▊ | 32/47 [00:11<00:06, 2.46it/s] Importing SHAP scores: 70%|███████ | 33/47 [00:11<00:05, 2.60it/s] Importing SHAP scores: 72%|███████▏ | 34/47 [00:11<00:04, 3.06it/s] Importing SHAP scores: 74%|███████▍ | 35/47 [00:12<00:03, 3.34it/s] Importing SHAP scores: 77%|███████▋ | 36/47 [00:12<00:02, 3.82it/s] Importing SHAP scores: 79%|███████▊ | 37/47 [00:12<00:02, 3.95it/s] Importing SHAP scores: 81%|████████ | 38/47 [00:12<00:02, 4.35it/s] Importing SHAP scores: 83%|████████▎ | 39/47 [00:13<00:01, 4.15it/s] Importing SHAP scores: 85%|████████▌ | 40/47 [00:13<00:02, 3.21it/s] Importing SHAP scores: 87%|████████▋ | 41/47 [00:13<00:01, 3.14it/s] Importing SHAP scores: 89%|████████▉ | 42/47 [00:14<00:01, 2.75it/s] Importing SHAP scores: 91%|█████████▏| 43/47 [00:14<00:01, 2.81it/s] Importing SHAP scores: 94%|█████████▎| 44/47 [00:14<00:01, 2.86it/s] Importing SHAP scores: 96%|█████████▌| 45/47 [00:15<00:00, 2.61it/s] Importing SHAP scores: 98%|█████████▊| 46/47 [00:15<00:00, 2.72it/s] Importing SHAP scores: 100%|██████████| 47/47 [00:15<00:00, 2.95it/s]
# Import the TF-MoDISco results object
tfm_obj = util.import_tfmodisco_results(tfm_results_path, hyp_scores, one_hot_seqs, shap_score_center_size)
# Compute subclusters (needed for older versions of TF-MoDISco); this takes awhile!
compute_tfmodisco_motif_subclusters(tfm_obj)
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 9.6s [Parallel(n_jobs=4)]: Done 192 tasks | elapsed: 21.9s [Parallel(n_jobs=4)]: Done 442 tasks | elapsed: 42.4s [Parallel(n_jobs=4)]: Done 792 tasks | elapsed: 1.2min [Parallel(n_jobs=4)]: Done 1242 tasks | elapsed: 1.8min [Parallel(n_jobs=4)]: Done 1792 tasks | elapsed: 2.5min [Parallel(n_jobs=4)]: Done 2442 tasks | elapsed: 3.0min [Parallel(n_jobs=4)]: Done 3192 tasks | elapsed: 3.5min [Parallel(n_jobs=4)]: Done 4042 tasks | elapsed: 4.1min [Parallel(n_jobs=4)]: Done 4992 tasks | elapsed: 4.8min [Parallel(n_jobs=4)]: Done 6042 tasks | elapsed: 5.6min [Parallel(n_jobs=4)]: Done 7192 tasks | elapsed: 6.5min [Parallel(n_jobs=4)]: Done 8442 tasks | elapsed: 8.3min [Parallel(n_jobs=4)]: Done 9792 tasks | elapsed: 10.6min [Parallel(n_jobs=4)]: Done 11242 tasks | elapsed: 12.8min [Parallel(n_jobs=4)]: Done 12084 out of 12084 | elapsed: 14.3min finished /users/vir/miniconda2/envs/basepairmodels_latest/lib/python3.7/site-packages/sklearn/manifold/_t_sne.py:699: FutureWarning: 'square_distances' has been introduced in 0.24 to help phase out legacy squaring behavior. The 'legacy' setting will be removed in 1.1 (renaming of 0.26), and the default setting will be changed to True. In 1.3, 'square_distances' will be removed altogether, and distances will be squared by default. Set 'square_distances'=True to silence this warning. FutureWarning /users/vir/miniconda2/envs/basepairmodels_latest/lib/python3.7/site-packages/sklearn/neighbors/_base.py:176: EfficiencyWarning: Precomputed sparse input was not sorted by data. EfficiencyWarning)
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 12084 samples in 0.199s... [t-SNE] Computed neighbors for 12084 samples in 0.007s... [t-SNE] Computed conditional probabilities for sample 1000 / 12084 [t-SNE] Computed conditional probabilities for sample 2000 / 12084 [t-SNE] Computed conditional probabilities for sample 3000 / 12084 [t-SNE] Computed conditional probabilities for sample 4000 / 12084 [t-SNE] Computed conditional probabilities for sample 5000 / 12084 [t-SNE] Computed conditional probabilities for sample 6000 / 12084 [t-SNE] Computed conditional probabilities for sample 7000 / 12084 [t-SNE] Computed conditional probabilities for sample 8000 / 12084 [t-SNE] Computed conditional probabilities for sample 9000 / 12084 [t-SNE] Computed conditional probabilities for sample 10000 / 12084 [t-SNE] Computed conditional probabilities for sample 11000 / 12084 [t-SNE] Computed conditional probabilities for sample 12000 / 12084 [t-SNE] Computed conditional probabilities for sample 12084 / 12084 [t-SNE] Mean sigma: 0.192738 [t-SNE] Computed conditional probabilities in 1.666s [t-SNE] Iteration 50: error = 97.8401718, gradient norm = 0.0000187 (50 iterations in 18.613s) [t-SNE] Iteration 100: error = 96.7346344, gradient norm = 0.0039168 (50 iterations in 18.308s) [t-SNE] Iteration 150: error = 96.2593842, gradient norm = 0.0000144 (50 iterations in 18.628s) [t-SNE] Iteration 200: error = 96.2578201, gradient norm = 0.0000150 (50 iterations in 17.744s) [t-SNE] Iteration 250: error = 96.2542038, gradient norm = 0.0000253 (50 iterations in 16.043s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 96.254204 [t-SNE] Iteration 300: error = 3.9181707, gradient norm = 0.0012867 (50 iterations in 15.910s) [t-SNE] Iteration 350: error = 3.5421965, gradient norm = 0.0005286 (50 iterations in 15.544s) [t-SNE] Iteration 400: error = 3.3699074, gradient norm = 0.0003206 (50 iterations in 15.300s) [t-SNE] Iteration 450: error = 3.2646182, gradient norm = 0.0002190 (50 iterations in 15.688s) [t-SNE] Iteration 500: error = 3.1918874, gradient norm = 0.0001632 (50 iterations in 15.652s) [t-SNE] Iteration 550: error = 3.1380603, gradient norm = 0.0001291 (50 iterations in 16.212s) [t-SNE] Iteration 600: error = 3.0962236, gradient norm = 0.0001071 (50 iterations in 16.776s) [t-SNE] Iteration 650: error = 3.0631409, gradient norm = 0.0000898 (50 iterations in 16.916s) [t-SNE] Iteration 700: error = 3.0361915, gradient norm = 0.0000809 (50 iterations in 16.848s) [t-SNE] Iteration 750: error = 3.0143361, gradient norm = 0.0000697 (50 iterations in 16.980s) [t-SNE] Iteration 800: error = 2.9960706, gradient norm = 0.0000632 (50 iterations in 17.396s) [t-SNE] Iteration 850: error = 2.9807963, gradient norm = 0.0000588 (50 iterations in 16.800s) [t-SNE] Iteration 900: error = 2.9680598, gradient norm = 0.0000549 (50 iterations in 17.672s) [t-SNE] Iteration 950: error = 2.9576623, gradient norm = 0.0000530 (50 iterations in 17.003s) [t-SNE] Iteration 1000: error = 2.9490950, gradient norm = 0.0000502 (50 iterations in 17.733s) [t-SNE] KL divergence after 1000 iterations: 2.949095 [t-SNE] Computed conditional probabilities for sample 1000 / 12084 [t-SNE] Computed conditional probabilities for sample 2000 / 12084 [t-SNE] Computed conditional probabilities for sample 3000 / 12084 [t-SNE] Computed conditional probabilities for sample 4000 / 12084 [t-SNE] Computed conditional probabilities for sample 5000 / 12084 [t-SNE] Computed conditional probabilities for sample 6000 / 12084 [t-SNE] Computed conditional probabilities for sample 7000 / 12084 [t-SNE] Computed conditional probabilities for sample 8000 / 12084 [t-SNE] Computed conditional probabilities for sample 9000 / 12084 [t-SNE] Computed conditional probabilities for sample 10000 / 12084 [t-SNE] Computed conditional probabilities for sample 11000 / 12084 [t-SNE] Computed conditional probabilities for sample 12000 / 12084 [t-SNE] Computed conditional probabilities for sample 12084 / 12084 [t-SNE] Mean sigma: 0.192738 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 6.2min
Quality: 0.67448011935245 Quality: 0.6745915514253953 Quality: 0.6746565929314844 Quality: 0.6746685082487618 Quality: 0.6746700333286481
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 7.2min finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 76 tasks | elapsed: 1.5s [Parallel(n_jobs=4)]: Done 376 tasks | elapsed: 6.6s [Parallel(n_jobs=4)]: Done 876 tasks | elapsed: 14.7s [Parallel(n_jobs=4)]: Done 1576 tasks | elapsed: 26.6s [Parallel(n_jobs=4)]: Done 1842 out of 1842 | elapsed: 31.1s finished
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 1842 samples in 0.011s... [t-SNE] Computed neighbors for 1842 samples in 0.002s... [t-SNE] Computed conditional probabilities for sample 1000 / 1842 [t-SNE] Computed conditional probabilities for sample 1842 / 1842 [t-SNE] Mean sigma: 0.200372 [t-SNE] Computed conditional probabilities in 0.229s [t-SNE] Iteration 50: error = 75.6942291, gradient norm = 0.1799940 (50 iterations in 16.584s) [t-SNE] Iteration 100: error = 76.4984360, gradient norm = 0.1421612 (50 iterations in 15.292s) [t-SNE] Iteration 150: error = 76.6769104, gradient norm = 0.1553825 (50 iterations in 14.920s) [t-SNE] Iteration 200: error = 77.1579514, gradient norm = 0.1531068 (50 iterations in 15.080s) [t-SNE] Iteration 250: error = 75.8800812, gradient norm = 0.1428561 (50 iterations in 15.448s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 75.880081 [t-SNE] Iteration 300: error = 2.2154641, gradient norm = 0.0013897 (50 iterations in 15.032s) [t-SNE] Iteration 350: error = 2.0880747, gradient norm = 0.0004976 (50 iterations in 14.540s) [t-SNE] Iteration 400: error = 2.0339913, gradient norm = 0.0003338 (50 iterations in 15.376s) [t-SNE] Iteration 450: error = 2.0065196, gradient norm = 0.0001842 (50 iterations in 14.820s) [t-SNE] Iteration 500: error = 1.9925749, gradient norm = 0.0001531 (50 iterations in 14.832s) [t-SNE] Iteration 550: error = 1.9845226, gradient norm = 0.0001409 (50 iterations in 15.240s) [t-SNE] Iteration 600: error = 1.9799073, gradient norm = 0.0001129 (50 iterations in 14.880s) [t-SNE] Iteration 650: error = 1.9760517, gradient norm = 0.0001098 (50 iterations in 14.989s) [t-SNE] Iteration 700: error = 1.9723423, gradient norm = 0.0001220 (50 iterations in 14.975s) [t-SNE] Iteration 750: error = 1.9694014, gradient norm = 0.0000905 (50 iterations in 15.160s) [t-SNE] Iteration 800: error = 1.9673088, gradient norm = 0.0000801 (50 iterations in 14.340s) [t-SNE] Iteration 850: error = 1.9652938, gradient norm = 0.0000907 (50 iterations in 14.262s) [t-SNE] Iteration 900: error = 1.9634950, gradient norm = 0.0001058 (50 iterations in 14.174s) [t-SNE] Iteration 950: error = 1.9622608, gradient norm = 0.0001244 (50 iterations in 15.256s) [t-SNE] Iteration 1000: error = 1.9607730, gradient norm = 0.0000821 (50 iterations in 14.608s) [t-SNE] KL divergence after 1000 iterations: 1.960773 [t-SNE] Computed conditional probabilities for sample 1000 / 1842 [t-SNE] Computed conditional probabilities for sample 1842 / 1842 [t-SNE] Mean sigma: 0.200372 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 1.2min
Quality: 0.5580442292938426 Quality: 0.5585456910699081 Quality: 0.559103097847472 Quality: 0.5591752107186767 Quality: 0.5593485500413204 Quality: 0.5593986604280651 Quality: 0.5594160909280034 Quality: 0.5595395546755745
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 1.4min finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 196 out of 196 | elapsed: 0.7s finished
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 196 samples in 0.002s... [t-SNE] Computed neighbors for 196 samples in 0.001s... [t-SNE] Computed conditional probabilities for sample 196 / 196 [t-SNE] Mean sigma: 0.207277 [t-SNE] Computed conditional probabilities in 0.027s [t-SNE] Iteration 50: error = 72.1344299, gradient norm = 0.4721882 (50 iterations in 18.394s) [t-SNE] Iteration 100: error = 73.5215759, gradient norm = 0.4626701 (50 iterations in 18.636s) [t-SNE] Iteration 150: error = 70.6959991, gradient norm = 0.4551903 (50 iterations in 19.396s) [t-SNE] Iteration 200: error = 73.5112228, gradient norm = 0.4604131 (50 iterations in 19.476s) [t-SNE] Iteration 250: error = 72.8069534, gradient norm = 0.4379607 (50 iterations in 22.856s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 72.806953 [t-SNE] Iteration 300: error = 1.1081481, gradient norm = 0.0079488 (50 iterations in 23.092s) [t-SNE] Iteration 350: error = 0.9004459, gradient norm = 0.0046531 (50 iterations in 22.672s) [t-SNE] Iteration 400: error = 0.8038333, gradient norm = 0.0026035 (50 iterations in 22.124s) [t-SNE] Iteration 450: error = 0.7655749, gradient norm = 0.0030100 (50 iterations in 22.388s) [t-SNE] Iteration 500: error = 0.7240446, gradient norm = 0.0015711 (50 iterations in 19.148s) [t-SNE] Iteration 550: error = 0.7130186, gradient norm = 0.0004434 (50 iterations in 19.508s) [t-SNE] Iteration 600: error = 0.7126379, gradient norm = 0.0000832 (50 iterations in 18.668s) [t-SNE] Iteration 650: error = 0.7118836, gradient norm = 0.0007275 (50 iterations in 19.588s) [t-SNE] Iteration 700: error = 0.7099325, gradient norm = 0.0001687 (50 iterations in 21.460s) [t-SNE] Iteration 750: error = 0.7100989, gradient norm = 0.0001159 (50 iterations in 21.428s) [t-SNE] Iteration 800: error = 0.7100409, gradient norm = 0.0001285 (50 iterations in 21.896s) [t-SNE] Iteration 850: error = 0.7100967, gradient norm = 0.0000572 (50 iterations in 22.156s) [t-SNE] Iteration 900: error = 0.7100786, gradient norm = 0.0000804 (50 iterations in 22.848s) [t-SNE] Iteration 950: error = 0.7101216, gradient norm = 0.0000639 (50 iterations in 22.824s) [t-SNE] Iteration 1000: error = 0.7101310, gradient norm = 0.0000411 (50 iterations in 19.036s) [t-SNE] KL divergence after 1000 iterations: 0.710131 [t-SNE] Computed conditional probabilities for sample 196 / 196 [t-SNE] Mean sigma: 0.207277 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 1.2min
Quality: 0.42506843797906535 Quality: 0.4252363878371819
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 1.3min finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 130 tasks | elapsed: 0.4s [Parallel(n_jobs=4)]: Done 156 out of 156 | elapsed: 0.6s finished
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 156 samples in 0.002s... [t-SNE] Computed neighbors for 156 samples in 0.006s... [t-SNE] Computed conditional probabilities for sample 156 / 156 [t-SNE] Mean sigma: 0.384562 [t-SNE] Computed conditional probabilities in 0.041s [t-SNE] Iteration 50: error = 64.9674683, gradient norm = 0.4692222 (50 iterations in 23.262s) [t-SNE] Iteration 100: error = 63.7538872, gradient norm = 0.4569393 (50 iterations in 19.396s) [t-SNE] Iteration 150: error = 65.2411270, gradient norm = 0.4699982 (50 iterations in 16.944s) [t-SNE] Iteration 200: error = 62.6192017, gradient norm = 0.4567654 (50 iterations in 17.712s) [t-SNE] Iteration 250: error = 68.3434677, gradient norm = 0.4265467 (50 iterations in 16.796s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 68.343468 [t-SNE] Iteration 300: error = 1.1422937, gradient norm = 0.0090401 (50 iterations in 17.843s) [t-SNE] Iteration 350: error = 0.8752730, gradient norm = 0.0084328 (50 iterations in 17.412s) [t-SNE] Iteration 400: error = 0.6130094, gradient norm = 0.0084622 (50 iterations in 15.988s) [t-SNE] Iteration 450: error = 0.5593429, gradient norm = 0.0021597 (50 iterations in 14.748s) [t-SNE] Iteration 500: error = 0.5280651, gradient norm = 0.0019917 (50 iterations in 15.324s) [t-SNE] Iteration 550: error = 0.5208037, gradient norm = 0.0010704 (50 iterations in 15.488s) [t-SNE] Iteration 600: error = 0.5191317, gradient norm = 0.0002433 (50 iterations in 17.024s) [t-SNE] Iteration 650: error = 0.5191595, gradient norm = 0.0001860 (50 iterations in 15.352s) [t-SNE] Iteration 700: error = 0.5188586, gradient norm = 0.0001666 (50 iterations in 15.336s) [t-SNE] Iteration 750: error = 0.5190752, gradient norm = 0.0002557 (50 iterations in 14.652s) [t-SNE] Iteration 800: error = 0.5189483, gradient norm = 0.0004177 (50 iterations in 15.180s) [t-SNE] Iteration 850: error = 0.5188360, gradient norm = 0.0001614 (50 iterations in 14.516s) [t-SNE] Iteration 900: error = 0.5187735, gradient norm = 0.0001238 (50 iterations in 14.864s) [t-SNE] Iteration 950: error = 0.5188518, gradient norm = 0.0001389 (50 iterations in 14.552s) [t-SNE] Iteration 1000: error = 0.5188066, gradient norm = 0.0001373 (50 iterations in 15.238s) [t-SNE] KL divergence after 1000 iterations: 0.518807 [t-SNE] Computed conditional probabilities for sample 156 / 156 [t-SNE] Mean sigma: 0.384562 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 56.4s
Quality: 0.4558663484917155
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 1.1min finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 130 tasks | elapsed: 0.4s [Parallel(n_jobs=4)]: Done 149 out of 149 | elapsed: 0.5s finished
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 149 samples in 0.002s... [t-SNE] Computed neighbors for 149 samples in 0.001s... [t-SNE] Computed conditional probabilities for sample 149 / 149 [t-SNE] Mean sigma: 0.396545 [t-SNE] Computed conditional probabilities in 0.029s [t-SNE] Iteration 50: error = 68.0247040, gradient norm = 0.4602324 (50 iterations in 15.008s) [t-SNE] Iteration 100: error = 66.4100876, gradient norm = 0.5035163 (50 iterations in 15.056s) [t-SNE] Iteration 150: error = 63.9127464, gradient norm = 0.5402898 (50 iterations in 14.664s) [t-SNE] Iteration 200: error = 67.0245514, gradient norm = 0.4827542 (50 iterations in 14.852s) [t-SNE] Iteration 250: error = 68.2790146, gradient norm = 0.4678780 (50 iterations in 14.820s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 68.279015 [t-SNE] Iteration 300: error = 1.1021550, gradient norm = 0.0117363 (50 iterations in 14.850s) [t-SNE] Iteration 350: error = 0.8579618, gradient norm = 0.0030881 (50 iterations in 14.014s) [t-SNE] Iteration 400: error = 0.7802648, gradient norm = 0.0020708 (50 iterations in 15.182s) [t-SNE] Iteration 450: error = 0.7243990, gradient norm = 0.0060081 (50 iterations in 14.768s) [t-SNE] Iteration 500: error = 0.5337957, gradient norm = 0.0052582 (50 iterations in 14.980s) [t-SNE] Iteration 550: error = 0.5248150, gradient norm = 0.0004934 (50 iterations in 15.092s) [t-SNE] Iteration 600: error = 0.5240846, gradient norm = 0.0005987 (50 iterations in 15.012s) [t-SNE] Iteration 650: error = 0.5247484, gradient norm = 0.0004529 (50 iterations in 14.588s) [t-SNE] Iteration 700: error = 0.5219350, gradient norm = 0.0004794 (50 iterations in 14.976s) [t-SNE] Iteration 750: error = 0.5217716, gradient norm = 0.0005294 (50 iterations in 13.652s) [t-SNE] Iteration 800: error = 0.5052130, gradient norm = 0.0035209 (50 iterations in 13.504s) [t-SNE] Iteration 850: error = 0.4905711, gradient norm = 0.0007645 (50 iterations in 13.080s) [t-SNE] Iteration 900: error = 0.4852873, gradient norm = 0.0003895 (50 iterations in 15.028s) [t-SNE] Iteration 950: error = 0.4845555, gradient norm = 0.0000750 (50 iterations in 17.708s) [t-SNE] Iteration 1000: error = 0.4846405, gradient norm = 0.0001438 (50 iterations in 18.112s) [t-SNE] KL divergence after 1000 iterations: 0.484641 [t-SNE] Computed conditional probabilities for sample 149 / 149 [t-SNE] Mean sigma: 0.396545 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 43.7s
Quality: 0.4419918152182888 Quality: 0.44398383278292464 Quality: 0.4476469584874645
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 51.0s finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 74 tasks | elapsed: 0.1s [Parallel(n_jobs=4)]: Done 83 out of 83 | elapsed: 0.2s finished
[t-SNE] Computing 82 nearest neighbors... [t-SNE] Indexed 83 samples in 0.001s... [t-SNE] Computed neighbors for 83 samples in 0.000s... [t-SNE] Computed conditional probabilities for sample 83 / 83 [t-SNE] Mean sigma: 0.406513 [t-SNE] Computed conditional probabilities in 0.013s [t-SNE] Iteration 50: error = 51.6939240, gradient norm = 0.6116346 (50 iterations in 18.120s) [t-SNE] Iteration 100: error = 57.5304108, gradient norm = 0.5078361 (50 iterations in 17.664s) [t-SNE] Iteration 150: error = 54.0790291, gradient norm = 0.5142255 (50 iterations in 17.352s) [t-SNE] Iteration 200: error = 55.9411736, gradient norm = 0.4531455 (50 iterations in 15.556s) [t-SNE] Iteration 250: error = 59.5245552, gradient norm = 0.4744658 (50 iterations in 14.684s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 59.524555 [t-SNE] Iteration 300: error = 1.3364123, gradient norm = 0.0028797 (50 iterations in 14.611s) [t-SNE] Iteration 350: error = 0.9602560, gradient norm = 0.0027238 (50 iterations in 17.200s) [t-SNE] Iteration 400: error = 0.7888786, gradient norm = 0.0024685 (50 iterations in 17.756s) [t-SNE] Iteration 450: error = 0.6385206, gradient norm = 0.0011201 (50 iterations in 18.084s) [t-SNE] Iteration 500: error = 0.5878088, gradient norm = 0.0012613 (50 iterations in 18.300s) [t-SNE] Iteration 550: error = 0.5230569, gradient norm = 0.0005008 (50 iterations in 17.976s) [t-SNE] Iteration 600: error = 0.5182199, gradient norm = 0.0008701 (50 iterations in 18.324s) [t-SNE] Iteration 650: error = 0.5377570, gradient norm = 0.0032129 (50 iterations in 14.612s) [t-SNE] Iteration 700: error = 0.5008842, gradient norm = 0.0005768 (50 iterations in 14.008s) [t-SNE] Iteration 750: error = 0.4448480, gradient norm = 0.0007345 (50 iterations in 13.725s) [t-SNE] Iteration 800: error = 0.3993179, gradient norm = 0.0007026 (50 iterations in 14.059s) [t-SNE] Iteration 850: error = 0.3562574, gradient norm = 0.0001074 (50 iterations in 14.978s) [t-SNE] Iteration 900: error = 0.3558875, gradient norm = 0.0000369 (50 iterations in 15.134s) [t-SNE] Iteration 950: error = 0.3559234, gradient norm = 0.0000389 (50 iterations in 18.000s) [t-SNE] Iteration 1000: error = 0.3558098, gradient norm = 0.0000814 (50 iterations in 18.670s) [t-SNE] KL divergence after 1000 iterations: 0.355810 [t-SNE] Computed conditional probabilities for sample 83 / 83 [t-SNE] Mean sigma: 0.406513 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 51.3s
Quality: 0.36271441442074853
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 59.3s finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 60 out of 60 | elapsed: 0.1s finished
[t-SNE] Computing 59 nearest neighbors... [t-SNE] Indexed 60 samples in 0.002s... [t-SNE] Computed neighbors for 60 samples in 0.001s... [t-SNE] Computed conditional probabilities for sample 60 / 60 [t-SNE] Mean sigma: 0.580420 [t-SNE] Computed conditional probabilities in 0.003s [t-SNE] Iteration 50: error = 53.3773270, gradient norm = 0.5005381 (50 iterations in 19.052s) [t-SNE] Iteration 100: error = 52.5201874, gradient norm = 0.5305525 (50 iterations in 18.462s) [t-SNE] Iteration 150: error = 52.2106171, gradient norm = 0.4931392 (50 iterations in 21.173s) [t-SNE] Iteration 200: error = 50.5820923, gradient norm = 0.5070693 (50 iterations in 19.415s) [t-SNE] Iteration 250: error = 51.5667152, gradient norm = 0.5618857 (50 iterations in 15.020s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 51.566715 [t-SNE] Iteration 300: error = 1.0886347, gradient norm = 0.0023708 (50 iterations in 16.836s) [t-SNE] Iteration 350: error = 0.8796514, gradient norm = 0.0013350 (50 iterations in 17.329s) [t-SNE] Iteration 400: error = 0.6531913, gradient norm = 0.0012935 (50 iterations in 18.599s) [t-SNE] Iteration 450: error = 0.5264741, gradient norm = 0.0006944 (50 iterations in 17.956s) [t-SNE] Iteration 500: error = 0.4924654, gradient norm = 0.0003001 (50 iterations in 16.300s) [t-SNE] Iteration 550: error = 0.4353119, gradient norm = 0.0005124 (50 iterations in 15.197s) [t-SNE] Iteration 600: error = 0.4194471, gradient norm = 0.0002283 (50 iterations in 16.507s) [t-SNE] Iteration 650: error = 0.4035104, gradient norm = 0.0003204 (50 iterations in 14.380s) [t-SNE] Iteration 700: error = 0.3535504, gradient norm = 0.0006141 (50 iterations in 15.328s) [t-SNE] Iteration 750: error = 0.2498419, gradient norm = 0.0006526 (50 iterations in 15.000s) [t-SNE] Iteration 800: error = 0.2336070, gradient norm = 0.0001694 (50 iterations in 15.100s) [t-SNE] Iteration 850: error = 0.2268221, gradient norm = 0.0001008 (50 iterations in 15.468s) [t-SNE] Iteration 900: error = 0.2229057, gradient norm = 0.0000546 (50 iterations in 15.976s) [t-SNE] Iteration 950: error = 0.2209592, gradient norm = 0.0000293 (50 iterations in 20.816s) [t-SNE] Iteration 1000: error = 0.2204329, gradient norm = 0.0000454 (50 iterations in 21.508s) [t-SNE] KL divergence after 1000 iterations: 0.220433 [t-SNE] Computed conditional probabilities for sample 60 / 60 [t-SNE] Mean sigma: 0.580420 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 50.9s
Quality: 0.3863359999102472
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 57.4s finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 96 out of 96 | elapsed: 0.2s finished
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 96 samples in 0.002s... [t-SNE] Computed neighbors for 96 samples in 0.000s... [t-SNE] Computed conditional probabilities for sample 96 / 96 [t-SNE] Mean sigma: 0.325955 [t-SNE] Computed conditional probabilities in 0.015s [t-SNE] Iteration 50: error = 62.1041985, gradient norm = 0.4566090 (50 iterations in 17.742s) [t-SNE] Iteration 100: error = 59.8655205, gradient norm = 0.4979038 (50 iterations in 14.836s) [t-SNE] Iteration 150: error = 59.7741623, gradient norm = 0.5725798 (50 iterations in 14.636s) [t-SNE] Iteration 200: error = 61.2473221, gradient norm = 0.5240078 (50 iterations in 14.600s) [t-SNE] Iteration 250: error = 62.0466576, gradient norm = 0.4516634 (50 iterations in 14.120s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 62.046658 [t-SNE] Iteration 300: error = 0.9169676, gradient norm = 0.0051332 (50 iterations in 16.087s) [t-SNE] Iteration 350: error = 0.7910029, gradient norm = 0.0023046 (50 iterations in 16.316s) [t-SNE] Iteration 400: error = 0.7060533, gradient norm = 0.0011155 (50 iterations in 13.980s) [t-SNE] Iteration 450: error = 0.6779107, gradient norm = 0.0004081 (50 iterations in 14.572s) [t-SNE] Iteration 500: error = 0.6588613, gradient norm = 0.0005648 (50 iterations in 14.364s) [t-SNE] Iteration 550: error = 0.6434733, gradient norm = 0.0012797 (50 iterations in 15.052s) [t-SNE] Iteration 600: error = 0.5526755, gradient norm = 0.0148448 (50 iterations in 14.592s) [t-SNE] Iteration 650: error = 0.5177051, gradient norm = 0.0013229 (50 iterations in 15.456s) [t-SNE] Iteration 700: error = 0.5173403, gradient norm = 0.0005617 (50 iterations in 14.768s) [t-SNE] Iteration 750: error = 0.5175282, gradient norm = 0.0004204 (50 iterations in 14.840s) [t-SNE] Iteration 800: error = 0.5174741, gradient norm = 0.0004361 (50 iterations in 14.856s) [t-SNE] Iteration 850: error = 0.5174901, gradient norm = 0.0005014 (50 iterations in 14.712s) [t-SNE] Iteration 900: error = 0.5175128, gradient norm = 0.0006729 (50 iterations in 14.780s) [t-SNE] Iteration 950: error = 0.5174552, gradient norm = 0.0005154 (50 iterations in 15.620s) [t-SNE] Iteration 1000: error = 0.5174558, gradient norm = 0.0004435 (50 iterations in 14.996s) [t-SNE] KL divergence after 1000 iterations: 0.517456 [t-SNE] Computed conditional probabilities for sample 96 / 96 [t-SNE] Mean sigma: 0.325955 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. /users/vir/miniconda2/envs/basepairmodels_latest/lib/python3.7/site-packages/joblib/externals/loky/process_executor.py:691: UserWarning: A worker stopped while some jobs were given to the executor. This can be caused by a too short worker timeout or by a memory leak. "timeout or by a memory leak.", UserWarning [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 49.0s
Quality: 0.29155644602055764 Quality: 0.29573605856506513
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 55.4s finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 54 out of 54 | elapsed: 0.1s finished
[t-SNE] Computing 53 nearest neighbors... [t-SNE] Indexed 54 samples in 0.001s... [t-SNE] Computed neighbors for 54 samples in 0.001s... [t-SNE] Computed conditional probabilities for sample 54 / 54 [t-SNE] Mean sigma: 0.419653 [t-SNE] Computed conditional probabilities in 0.003s [t-SNE] Iteration 50: error = 52.8022308, gradient norm = 0.5094065 (50 iterations in 14.888s) [t-SNE] Iteration 100: error = 52.5144844, gradient norm = 0.5857486 (50 iterations in 14.108s) [t-SNE] Iteration 150: error = 51.9706764, gradient norm = 0.5624253 (50 iterations in 14.412s) [t-SNE] Iteration 200: error = 56.8292007, gradient norm = 0.4400248 (50 iterations in 14.127s) [t-SNE] Iteration 250: error = 52.0993423, gradient norm = 0.4573041 (50 iterations in 15.337s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 52.099342 [t-SNE] Iteration 300: error = 0.9983720, gradient norm = 0.0017527 (50 iterations in 17.619s) [t-SNE] Iteration 350: error = 0.8101432, gradient norm = 0.0013014 (50 iterations in 15.320s) [t-SNE] Iteration 400: error = 0.7228248, gradient norm = 0.0008105 (50 iterations in 15.448s) [t-SNE] Iteration 450: error = 0.6824510, gradient norm = 0.0003395 (50 iterations in 16.940s) [t-SNE] Iteration 500: error = 0.6492205, gradient norm = 0.0006875 (50 iterations in 13.788s) [t-SNE] Iteration 550: error = 0.6024780, gradient norm = 0.0004490 (50 iterations in 14.280s) [t-SNE] Iteration 600: error = 0.5717476, gradient norm = 0.0004297 (50 iterations in 14.372s) [t-SNE] Iteration 650: error = 0.5492414, gradient norm = 0.0005100 (50 iterations in 15.008s) [t-SNE] Iteration 700: error = 0.5020887, gradient norm = 0.0006679 (50 iterations in 14.872s) [t-SNE] Iteration 750: error = 0.4731865, gradient norm = 0.0004408 (50 iterations in 15.322s) [t-SNE] Iteration 800: error = 0.4565465, gradient norm = 0.0004393 (50 iterations in 12.487s) [t-SNE] Iteration 850: error = 0.4453829, gradient norm = 0.0002646 (50 iterations in 12.812s) [t-SNE] Iteration 900: error = 0.4380736, gradient norm = 0.0001214 (50 iterations in 12.672s) [t-SNE] Iteration 950: error = 0.4379223, gradient norm = 0.0000557 (50 iterations in 13.980s) [t-SNE] Iteration 1000: error = 0.4363164, gradient norm = 0.0001236 (50 iterations in 14.840s) [t-SNE] KL divergence after 1000 iterations: 0.436316 [t-SNE] Computed conditional probabilities for sample 54 / 54 [t-SNE] Mean sigma: 0.419653 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 34.7s
Quality: 0.20344972761170332 Quality: 0.20648092690249686
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 40.3s finished
For each motif, determine the peaks that contain it
# Import the hCWMs
hcwms = import_tfmodisco_motifs(tfm_results_path)
motif_keys = list(hcwms.keys())
# Import the motif hits
hit_table = moods.import_moods_hits(os.path.join(moods_dir, "moods_filtered_collapsed.bed"))
hit_peak_indices = get_hit_peak_indices(hit_table, motif_keys)
# Import embeddings (this can take awhile)
embeddings = np.load(embeddings_path)["embeddings"]
# Sum up over sequence axis to remove position dependencies
summed_embeddings = np.sum(embeddings, axis=1)
For each motif, show the subclusters that exist within the TF-MoDISco-identified subpatterns
plot_motif_heterogeneity(tfm_obj)
/mnt/lab_data2/vir/tf_chr_atlas/02-24-2021/TF-Atlas/3M/reports/viz_sequence.py:152: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`). fig = plt.figure(figsize=figsize)
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 12084 | ||
| 0 | 1794 | ||
| 1 | 1705 | ||
| 2 | 1559 | ||
| 3 | 1492 | ||
| 4 | 1385 | ||
| 5 | 1214 | ||
| 6 | 783 | ||
| 7 | 728 | ||
| 8 | 609 | ||
| 9 | 606 | ||
| 10 | 209 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 1842 | ||
| 0 | 376 | ||
| 1 | 332 | ||
| 2 | 281 | ||
| 3 | 228 | ||
| 4 | 227 | ||
| 5 | 198 | ||
| 6 | 115 | ||
| 7 | 52 | ||
| 8 | 33 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 196 | ||
| 0 | 76 | ||
| 1 | 48 | ||
| 2 | 39 | ||
| 3 | 30 | ||
| 4 | 3 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 156 | ||
| 0 | 51 | ||
| 1 | 34 | ||
| 2 | 22 | ||
| 3 | 20 | ||
| 4 | 12 | ||
| 5 | 8 | ||
| 6 | 7 | ||
| 7 | 2 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 149 | ||
| 0 | 50 | ||
| 1 | 30 | ||
| 2 | 30 | ||
| 3 | 15 | ||
| 4 | 15 | ||
| 5 | 7 | ||
| 6 | 2 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 83 | ||
| 0 | 25 | ||
| 1 | 21 | ||
| 2 | 21 | ||
| 3 | 12 | ||
| 4 | 4 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 60 | ||
| 0 | 19 | ||
| 1 | 16 | ||
| 2 | 12 | ||
| 3 | 7 | ||
| 4 | 6 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 96 | ||
| 0 | 29 | ||
| 1 | 20 | ||
| 2 | 19 | ||
| 3 | 15 | ||
| 4 | 13 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 54 | ||
| 0 | 17 | ||
| 1 | 14 | ||
| 2 | 13 | ||
| 3 | 10 |
For each peak, cluster the peaks by embeddings to highlight the structure of different peaks and different motifs
plot_peak_clustering(summed_embeddings, motif_keys, hcwms, hit_peak_indices)
| Motif key | Embeddings | hCWM |
|---|---|---|
| 0_0 | ||
| 0_1 | ||
| 0_2 | ||
| 0_3 | ||
| 0_4 | ||
| 0_5 | ||
| 0_6 |