import os
import h5py
import util
import moods
import viz_sequence
import numpy as np
import pandas as pd
import modisco
import sklearn.decomposition
import umap
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import vdom.helpers as vdomh
from IPython.display import display
import tqdm
tqdm.tqdm_notebook()
/users/vir/miniconda2/envs/basepairmodels_latest/lib/python3.7/site-packages/ipykernel_launcher.py:16: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0 Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook` app.launch_new_instance()
<tqdm.notebook.tqdm_notebook at 0x7f9377f0c950>
# Plotting defaults
font_manager.fontManager.ttflist.extend(
font_manager.createFontList(
font_manager.findSystemFonts(fontpaths="/users/amtseng/modules/fonts")
)
)
plot_params = {
"figure.titlesize": 22,
"axes.titlesize": 22,
"axes.labelsize": 20,
"legend.fontsize": 18,
"xtick.labelsize": 16,
"ytick.labelsize": 16,
"font.family": "Roboto",
"font.weight": "bold"
}
plt.rcParams.update(plot_params)
/users/vir/miniconda2/envs/basepairmodels_latest/lib/python3.7/site-packages/ipykernel_launcher.py:4: MatplotlibDeprecationWarning: The createFontList function was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use FontManager.addfont instead. after removing the cwd from sys.path.
# Define parameters/fetch arguments
shap_scores_path = os.environ["TFM_SHAP_PATH"]
tfm_results_path = os.environ["TFM_TFM_PATH"]
moods_dir = os.environ["TFM_MOODS_DIR"]
embeddings_path = os.environ["TFM_EMB_PATH"]
print("DeepSHAP scores path: %s" % shap_scores_path)
print("TF-MoDISco results path: %s" % tfm_results_path)
print("Embeddings path: %s" % embeddings_path)
print("MOODS directory: %s" % moods_dir)
DeepSHAP scores path: /mnt/lab_data2/vir/tf_chr_atlas/02-24-2021//shap/ENCSR000EEB/counts_scores_alex_format.h5 TF-MoDISco results path: /mnt/lab_data2/vir/tf_chr_atlas/02-24-2021//modisco/ENCSR000EEB/counts/modisco_results.hd5 Embeddings path: /mnt/lab_data2/vir/tf_chr_atlas/02-24-2021//embeddings/ENCSR000EEB/embeddings.npz MOODS directory: /mnt/lab_data2/vir/tf_chr_atlas/02-24-2021/reports/tfmodisco/notebooks/ENCSR000EEB/moods/counts
# Define constants
shap_score_center_size = 400
hyp_score_key = "hyp_scores"
task_index = None
For plotting and organizing things
def compute_tfmodisco_motif_subclusters(tfm_results):
"""
From an imported TF-MoDISco results object, computes the subclustering
of heterogeneity within each motif/pattern.
"""
metaclusters = tfm_results.metacluster_idx_to_submetacluster_results
num_metaclusters = len(metaclusters.keys())
for metacluster_i, metacluster_key in enumerate(metaclusters.keys()):
metacluster = metaclusters[metacluster_key]
patterns = metacluster.seqlets_to_patterns_result.patterns
if not patterns:
break
num_patterns = len(patterns)
for pattern_i, pattern in enumerate(patterns):
# Compute subclustering for each pattern (motif)
pattern.compute_subclusters_and_embedding(
pattern_comparison_settings=modisco.affinitymat.core.PatternComparisonSettings(
track_names=["task0_hypothetical_contribs", "task0_contrib_scores"],
track_transformer=modisco.affinitymat.L1Normalizer(),
min_overlap=None # This argument is irrelevant here
),
perplexity=30, n_jobs=4, verbose=True
)
def trim_hcwm(pfm, hcwm):
# Trim motif based on information content
ic = util.info_content(pfm)
pass_inds = np.where(ic >= 0.2)[0] # Cut off flanks with less than 0.2 IC
# Expand trimming to +/- 4bp on either side
start, end = max(0, np.min(pass_inds) - 4), min(len(pfm), np.max(pass_inds) + 4 + 1)
return hcwm[start:end]
def plot_motif_heterogeneity(tfm_results):
colgroup = vdomh.colgroup(
vdomh.col(style={"width": "5%"}),
vdomh.col(style={"width": "5%"}),
vdomh.col(style={"width": "50%"}),
vdomh.col(style={"width": "40%"})
)
header = vdomh.thead(
vdomh.tr(
vdomh.th("Subpattern", style={"text-align": "center"}),
vdomh.th("Seqlets", style={"text-align": "center"}),
vdomh.th("Embeddings", style={"text-align": "center"}),
vdomh.th("hCWM", style={"text-align": "center"})
)
)
metaclusters = tfm_results.metacluster_idx_to_submetacluster_results
num_metaclusters = len(metaclusters.keys())
for metacluster_i, metacluster_key in enumerate(metaclusters.keys()):
metacluster = metaclusters[metacluster_key]
display(vdomh.h3("Metacluster %d/%d" % (metacluster_i + 1, num_metaclusters)))
patterns = metacluster.seqlets_to_patterns_result.patterns
if not patterns:
break
num_patterns = len(patterns)
for pattern_i, pattern in enumerate(patterns):
display(vdomh.h4("Pattern %d/%d" % (pattern_i + 1, num_patterns)))
embedding = pattern.twod_embedding
subpattern_clusters = pattern.subclusters
# Aggregate motif
pfm = pattern["sequence"].fwd
hcwm = pattern["task0_hypothetical_contribs"].fwd
trimmed_hcwm = trim_hcwm(pfm, hcwm)
hcwm_fig = viz_sequence.plot_weights(
trimmed_hcwm, subticks_frequency=(len(trimmed_hcwm) + 1), return_fig=True
)
emb_fig, ax = plt.subplots()
ax.scatter(
embedding[:,0], embedding[:,1], c=subpattern_clusters, cmap="tab20", alpha=0.3
)
table_rows = [vdomh.tr(
vdomh.td("Agg."),
vdomh.td(str(len(pattern.seqlets))),
vdomh.td(util.figure_to_vdom_image(emb_fig)),
vdomh.td(util.figure_to_vdom_image(hcwm_fig))
)]
for subpattern_key, subpattern in pattern.subcluster_to_subpattern.items():
pfm = subpattern["sequence"].fwd
hcwm = subpattern["task0_hypothetical_contribs"].fwd
trimmed_hcwm = trim_hcwm(pfm, hcwm)
hcwm_fig = viz_sequence.plot_weights(
trimmed_hcwm, subticks_frequency=(len(trimmed_hcwm) + 1), return_fig=True
)
emb_fig, ax = plt.subplots()
ax.scatter(
embedding[:,0], embedding[:,1], c=(subpattern_clusters == subpattern_key), alpha=0.3
)
table_rows.append(vdomh.tr(
vdomh.td(str(subpattern_key)),
vdomh.td(str(len(subpattern.seqlets))),
vdomh.td(util.figure_to_vdom_image(emb_fig)),
vdomh.td(util.figure_to_vdom_image(hcwm_fig))
))
table = vdomh.table(header, vdomh.tbody(*table_rows))
display(table)
plt.close("all") # Remove all standing figures
def import_tfmodisco_motifs(tfm_results_path, trim=True, only_pos=True):
"""
Imports hCWMs to into a dictionary, mapping `(x, y)` to the hCWM,
where `x` is the metacluster index and `y` is the pattern index.
Arguments:
`tfm_results_path`: path to HDF5 containing TF-MoDISco results
`out_dir`: where to save motifs
`trim`: if True, trim the motif flanks based on total importance
`only_pos`: if True, only return motifs with positive contributions
Returns the dictionary of hCWM.
"""
hcwms = {}
with h5py.File(tfm_results_path, "r") as f:
metaclusters = f["metacluster_idx_to_submetacluster_results"]
num_metaclusters = len(metaclusters.keys())
for metacluster_i, metacluster_key in enumerate(metaclusters.keys()):
metacluster = metaclusters[metacluster_key]
if "patterns" not in metacluster["seqlets_to_patterns_result"]:
continue
patterns = metacluster["seqlets_to_patterns_result"]["patterns"]
num_patterns = len(patterns["all_pattern_names"][:])
for pattern_i, pattern_name in enumerate(patterns["all_pattern_names"][:]):
pattern_name = pattern_name.decode()
pattern = patterns[pattern_name]
pfm = pattern["sequence"]["fwd"][:]
hcwm = pattern["task0_hypothetical_contribs"]["fwd"][:]
cwm = pattern["task0_contrib_scores"]["fwd"][:]
# Check that the contribution scores are overall positive
if only_pos and np.sum(cwm) < 0:
continue
if trim:
hcwm = trim_hcwm(pfm, hcwm)
hcwms["%d_%d" % (metacluster_i,pattern_i)] = hcwm
return hcwms
def get_hit_peak_indices(hit_table, motif_keys):
"""
Returns a dictionary of NumPy arrays, mapping each motif key to
the set of peak indices that contain that motif.
"""
hit_peak_indices = {}
for motif_key in motif_keys:
hit_peak_indices[motif_key] = hit_table[hit_table["key"] == motif_key]["peak_index"].values
return hit_peak_indices
def plot_peak_clustering(embeddings, motif_keys, hcwms, hit_peak_indices):
# First reduce using PCA
centered = embeddings - np.mean(embeddings, axis=0, keepdims=True)
pca = sklearn.decomposition.PCA(n_components=20)
reduced = pca.fit_transform(centered)
# Run UMAP
um = umap.UMAP(verbose=False)
trans = um.fit_transform(centered)
colgroup = vdomh.colgroup(
vdomh.col(style={"width": "5%"}),
vdomh.col(style={"width": "55"}),
vdomh.col(style={"width": "40%"})
)
header = vdomh.thead(
vdomh.tr(
vdomh.th("Motif key", style={"text-align": "center"}),
vdomh.th("Embeddings", style={"text-align": "center"}),
vdomh.th("hCWM", style={"text-align": "center"})
)
)
table_rows = []
for motif_key in motif_keys:
hcwm = hcwms[motif_key]
hcwm_fig = viz_sequence.plot_weights(
hcwm, subticks_frequency=(len(hcwm) + 1), return_fig=True
)
emb_fig, ax = plt.subplots()
subset = np.zeros(len(embeddings), dtype=int)
subset[hit_peak_indices[motif_key]] = 1
ax.scatter(
trans[:,0], trans[:,1], c=subset, alpha=0.3
)
table_rows.append(vdomh.tr(
vdomh.td(motif_key),
vdomh.td(util.figure_to_vdom_image(emb_fig)),
vdomh.td(util.figure_to_vdom_image(hcwm_fig))
))
table = vdomh.table(header, vdomh.tbody(*table_rows))
display(table)
plt.close("all") # Remove all standing figures
Run motif subclustering
# Import SHAP coordinates and one-hot sequences
hyp_scores, _, one_hot_seqs, shap_coords = util.import_shap_scores(shap_scores_path, hyp_score_key, center_cut_size=shap_score_center_size, remove_non_acgt=False)
# This cuts the sequences/scores off just as how TF-MoDISco saw them, but the coordinates are uncut
Importing SHAP scores: 0%| | 0/73 [00:00<?, ?it/s] Importing SHAP scores: 1%|▏ | 1/73 [00:00<00:28, 2.51it/s] Importing SHAP scores: 3%|▎ | 2/73 [00:00<00:30, 2.35it/s] Importing SHAP scores: 4%|▍ | 3/73 [00:01<00:29, 2.38it/s] Importing SHAP scores: 5%|▌ | 4/73 [00:01<00:29, 2.37it/s] Importing SHAP scores: 7%|▋ | 5/73 [00:02<00:28, 2.35it/s] Importing SHAP scores: 8%|▊ | 6/73 [00:02<00:28, 2.33it/s] Importing SHAP scores: 10%|▉ | 7/73 [00:02<00:28, 2.29it/s] Importing SHAP scores: 11%|█ | 8/73 [00:03<00:32, 1.99it/s] Importing SHAP scores: 12%|█▏ | 9/73 [00:03<00:27, 2.31it/s] Importing SHAP scores: 14%|█▎ | 10/73 [00:04<00:27, 2.29it/s] Importing SHAP scores: 15%|█▌ | 11/73 [00:04<00:27, 2.30it/s] Importing SHAP scores: 16%|█▋ | 12/73 [00:05<00:27, 2.25it/s] Importing SHAP scores: 18%|█▊ | 13/73 [00:05<00:26, 2.24it/s] Importing SHAP scores: 19%|█▉ | 14/73 [00:06<00:25, 2.27it/s] Importing SHAP scores: 21%|██ | 15/73 [00:06<00:25, 2.30it/s] Importing SHAP scores: 22%|██▏ | 16/73 [00:07<00:27, 2.05it/s] Importing SHAP scores: 23%|██▎ | 17/73 [00:07<00:28, 1.95it/s] Importing SHAP scores: 25%|██▍ | 18/73 [00:08<00:24, 2.29it/s] Importing SHAP scores: 26%|██▌ | 19/73 [00:08<00:24, 2.23it/s] Importing SHAP scores: 27%|██▋ | 20/73 [00:08<00:24, 2.20it/s] Importing SHAP scores: 29%|██▉ | 21/73 [00:09<00:23, 2.23it/s] Importing SHAP scores: 30%|███ | 22/73 [00:09<00:23, 2.19it/s] Importing SHAP scores: 32%|███▏ | 23/73 [00:10<00:22, 2.23it/s] Importing SHAP scores: 33%|███▎ | 24/73 [00:10<00:22, 2.20it/s] Importing SHAP scores: 34%|███▍ | 25/73 [00:11<00:21, 2.20it/s] Importing SHAP scores: 36%|███▌ | 26/73 [00:11<00:19, 2.36it/s] Importing SHAP scores: 37%|███▋ | 27/73 [00:12<00:22, 2.00it/s] Importing SHAP scores: 38%|███▊ | 28/73 [00:12<00:25, 1.77it/s] Importing SHAP scores: 40%|███▉ | 29/73 [00:13<00:20, 2.10it/s] Importing SHAP scores: 41%|████ | 30/73 [00:13<00:18, 2.35it/s] Importing SHAP scores: 42%|████▏ | 31/73 [00:14<00:18, 2.27it/s] Importing SHAP scores: 44%|████▍ | 32/73 [00:14<00:18, 2.22it/s] Importing SHAP scores: 45%|████▌ | 33/73 [00:15<00:20, 2.00it/s] Importing SHAP scores: 47%|████▋ | 34/73 [00:15<00:21, 1.80it/s] Importing SHAP scores: 48%|████▊ | 35/73 [00:16<00:19, 1.95it/s] Importing SHAP scores: 49%|████▉ | 36/73 [00:16<00:19, 1.93it/s] Importing SHAP scores: 51%|█████ | 37/73 [00:17<00:18, 1.94it/s] Importing SHAP scores: 52%|█████▏ | 38/73 [00:17<00:17, 1.98it/s] Importing SHAP scores: 53%|█████▎ | 39/73 [00:18<00:16, 2.05it/s] Importing SHAP scores: 55%|█████▍ | 40/73 [00:18<00:17, 1.87it/s] Importing SHAP scores: 56%|█████▌ | 41/73 [00:19<00:16, 1.97it/s] Importing SHAP scores: 58%|█████▊ | 42/73 [00:19<00:15, 2.02it/s] Importing SHAP scores: 59%|█████▉ | 43/73 [00:20<00:14, 2.08it/s] Importing SHAP scores: 60%|██████ | 44/73 [00:20<00:10, 2.64it/s] Importing SHAP scores: 62%|██████▏ | 45/73 [00:20<00:09, 2.98it/s] Importing SHAP scores: 63%|██████▎ | 46/73 [00:20<00:08, 3.30it/s] Importing SHAP scores: 64%|██████▍ | 47/73 [00:21<00:07, 3.58it/s] Importing SHAP scores: 66%|██████▌ | 48/73 [00:21<00:08, 3.05it/s] Importing SHAP scores: 67%|██████▋ | 49/73 [00:21<00:08, 2.75it/s] Importing SHAP scores: 68%|██████▊ | 50/73 [00:22<00:09, 2.54it/s] Importing SHAP scores: 70%|██████▉ | 51/73 [00:22<00:09, 2.41it/s] Importing SHAP scores: 71%|███████ | 52/73 [00:23<00:07, 2.66it/s] Importing SHAP scores: 73%|███████▎ | 53/73 [00:23<00:07, 2.52it/s] Importing SHAP scores: 74%|███████▍ | 54/73 [00:24<00:07, 2.40it/s] Importing SHAP scores: 75%|███████▌ | 55/73 [00:24<00:07, 2.31it/s] Importing SHAP scores: 77%|███████▋ | 56/73 [00:25<00:08, 2.08it/s] Importing SHAP scores: 78%|███████▊ | 57/73 [00:25<00:08, 1.92it/s] Importing SHAP scores: 79%|███████▉ | 58/73 [00:26<00:07, 1.99it/s] Importing SHAP scores: 81%|████████ | 59/73 [00:26<00:06, 2.06it/s] Importing SHAP scores: 82%|████████▏ | 60/73 [00:27<00:07, 1.84it/s] Importing SHAP scores: 84%|████████▎ | 61/73 [00:27<00:06, 1.97it/s] Importing SHAP scores: 85%|████████▍ | 62/73 [00:28<00:06, 1.78it/s] Importing SHAP scores: 86%|████████▋ | 63/73 [00:29<00:05, 1.70it/s] Importing SHAP scores: 88%|████████▊ | 64/73 [00:29<00:05, 1.71it/s] Importing SHAP scores: 89%|████████▉ | 65/73 [00:30<00:04, 1.85it/s] Importing SHAP scores: 90%|█████████ | 66/73 [00:30<00:03, 1.87it/s] Importing SHAP scores: 92%|█████████▏| 67/73 [00:31<00:03, 1.97it/s] Importing SHAP scores: 93%|█████████▎| 68/73 [00:31<00:02, 2.04it/s] Importing SHAP scores: 95%|█████████▍| 69/73 [00:31<00:01, 2.05it/s] Importing SHAP scores: 96%|█████████▌| 70/73 [00:32<00:01, 2.15it/s] Importing SHAP scores: 97%|█████████▋| 71/73 [00:33<00:01, 1.90it/s] Importing SHAP scores: 99%|█████████▊| 72/73 [00:33<00:00, 1.75it/s] Importing SHAP scores: 100%|██████████| 73/73 [00:34<00:00, 2.15it/s]
# Import the TF-MoDISco results object
tfm_obj = util.import_tfmodisco_results(tfm_results_path, hyp_scores, one_hot_seqs, shap_score_center_size)
# Compute subclusters (needed for older versions of TF-MoDISco); this takes awhile!
compute_tfmodisco_motif_subclusters(tfm_obj)
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 12.7s [Parallel(n_jobs=4)]: Done 192 tasks | elapsed: 31.3s [Parallel(n_jobs=4)]: Done 442 tasks | elapsed: 1.0min [Parallel(n_jobs=4)]: Done 792 tasks | elapsed: 1.6min [Parallel(n_jobs=4)]: Done 1242 tasks | elapsed: 2.2min [Parallel(n_jobs=4)]: Done 1792 tasks | elapsed: 3.0min [Parallel(n_jobs=4)]: Done 2442 tasks | elapsed: 4.3min [Parallel(n_jobs=4)]: Done 3192 tasks | elapsed: 6.0min [Parallel(n_jobs=4)]: Done 4042 tasks | elapsed: 7.8min [Parallel(n_jobs=4)]: Done 4992 tasks | elapsed: 10.1min [Parallel(n_jobs=4)]: Done 6042 tasks | elapsed: 12.3min [Parallel(n_jobs=4)]: Done 7192 tasks | elapsed: 13.7min [Parallel(n_jobs=4)]: Done 8442 tasks | elapsed: 16.9min [Parallel(n_jobs=4)]: Done 9792 tasks | elapsed: 19.8min [Parallel(n_jobs=4)]: Done 11242 tasks | elapsed: 23.4min [Parallel(n_jobs=4)]: Done 12792 tasks | elapsed: 26.3min [Parallel(n_jobs=4)]: Done 12988 out of 12988 | elapsed: 27.0min finished /users/vir/miniconda2/envs/basepairmodels_latest/lib/python3.7/site-packages/sklearn/manifold/_t_sne.py:699: FutureWarning: 'square_distances' has been introduced in 0.24 to help phase out legacy squaring behavior. The 'legacy' setting will be removed in 1.1 (renaming of 0.26), and the default setting will be changed to True. In 1.3, 'square_distances' will be removed altogether, and distances will be squared by default. Set 'square_distances'=True to silence this warning. FutureWarning /users/vir/miniconda2/envs/basepairmodels_latest/lib/python3.7/site-packages/sklearn/neighbors/_base.py:176: EfficiencyWarning: Precomputed sparse input was not sorted by data. EfficiencyWarning)
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 12988 samples in 0.322s... [t-SNE] Computed neighbors for 12988 samples in 0.032s... [t-SNE] Computed conditional probabilities for sample 1000 / 12988 [t-SNE] Computed conditional probabilities for sample 2000 / 12988 [t-SNE] Computed conditional probabilities for sample 3000 / 12988 [t-SNE] Computed conditional probabilities for sample 4000 / 12988 [t-SNE] Computed conditional probabilities for sample 5000 / 12988 [t-SNE] Computed conditional probabilities for sample 6000 / 12988 [t-SNE] Computed conditional probabilities for sample 7000 / 12988 [t-SNE] Computed conditional probabilities for sample 8000 / 12988 [t-SNE] Computed conditional probabilities for sample 9000 / 12988 [t-SNE] Computed conditional probabilities for sample 10000 / 12988 [t-SNE] Computed conditional probabilities for sample 11000 / 12988 [t-SNE] Computed conditional probabilities for sample 12000 / 12988 [t-SNE] Computed conditional probabilities for sample 12988 / 12988 [t-SNE] Mean sigma: 0.191326 [t-SNE] Computed conditional probabilities in 3.405s [t-SNE] Iteration 50: error = 98.6161270, gradient norm = 0.0001666 (50 iterations in 25.816s) [t-SNE] Iteration 100: error = 95.9750671, gradient norm = 0.0020661 (50 iterations in 24.988s) [t-SNE] Iteration 150: error = 94.4635010, gradient norm = 0.0000447 (50 iterations in 25.796s) [t-SNE] Iteration 200: error = 94.4626083, gradient norm = 0.0000341 (50 iterations in 25.964s) [t-SNE] Iteration 250: error = 94.4599915, gradient norm = 0.0000191 (50 iterations in 24.352s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 94.459991 [t-SNE] Iteration 300: error = 3.8204794, gradient norm = 0.0011607 (50 iterations in 21.967s) [t-SNE] Iteration 350: error = 3.4484096, gradient norm = 0.0004974 (50 iterations in 21.160s) [t-SNE] Iteration 400: error = 3.2807791, gradient norm = 0.0002968 (50 iterations in 23.620s) [t-SNE] Iteration 450: error = 3.1788995, gradient norm = 0.0002090 (50 iterations in 25.300s) [t-SNE] Iteration 500: error = 3.1096246, gradient norm = 0.0001542 (50 iterations in 25.180s) [t-SNE] Iteration 550: error = 3.0580304, gradient norm = 0.0001225 (50 iterations in 25.980s) [t-SNE] Iteration 600: error = 3.0176954, gradient norm = 0.0001019 (50 iterations in 25.632s) [t-SNE] Iteration 650: error = 2.9856834, gradient norm = 0.0000868 (50 iterations in 25.300s) [t-SNE] Iteration 700: error = 2.9596691, gradient norm = 0.0000769 (50 iterations in 22.596s) [t-SNE] Iteration 750: error = 2.9383087, gradient norm = 0.0000694 (50 iterations in 16.668s) [t-SNE] Iteration 800: error = 2.9204085, gradient norm = 0.0000612 (50 iterations in 17.244s) [t-SNE] Iteration 850: error = 2.9054315, gradient norm = 0.0000556 (50 iterations in 16.416s) [t-SNE] Iteration 900: error = 2.8924246, gradient norm = 0.0000505 (50 iterations in 20.024s) [t-SNE] Iteration 950: error = 2.8810599, gradient norm = 0.0000497 (50 iterations in 21.772s) [t-SNE] Iteration 1000: error = 2.8718147, gradient norm = 0.0000503 (50 iterations in 25.572s) [t-SNE] KL divergence after 1000 iterations: 2.871815 [t-SNE] Computed conditional probabilities for sample 1000 / 12988 [t-SNE] Computed conditional probabilities for sample 2000 / 12988 [t-SNE] Computed conditional probabilities for sample 3000 / 12988 [t-SNE] Computed conditional probabilities for sample 4000 / 12988 [t-SNE] Computed conditional probabilities for sample 5000 / 12988 [t-SNE] Computed conditional probabilities for sample 6000 / 12988 [t-SNE] Computed conditional probabilities for sample 7000 / 12988 [t-SNE] Computed conditional probabilities for sample 8000 / 12988 [t-SNE] Computed conditional probabilities for sample 9000 / 12988 [t-SNE] Computed conditional probabilities for sample 10000 / 12988 [t-SNE] Computed conditional probabilities for sample 11000 / 12988 [t-SNE] Computed conditional probabilities for sample 12000 / 12988 [t-SNE] Computed conditional probabilities for sample 12988 / 12988 [t-SNE] Mean sigma: 0.191326 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 7.6min
Quality: 0.6949751084397497 Quality: 0.6955794867756897 Quality: 0.6979744175876629 Quality: 0.6980630828437131
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 9.0min finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 200 tasks | elapsed: 1.7s [Parallel(n_jobs=4)]: Done 527 out of 527 | elapsed: 3.9s finished
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 527 samples in 0.004s... [t-SNE] Computed neighbors for 527 samples in 0.001s... [t-SNE] Computed conditional probabilities for sample 527 / 527 [t-SNE] Mean sigma: 0.265834 [t-SNE] Computed conditional probabilities in 0.093s [t-SNE] Iteration 50: error = 76.6595993, gradient norm = 0.4492794 (50 iterations in 16.240s) [t-SNE] Iteration 100: error = 81.3795624, gradient norm = 0.4130844 (50 iterations in 17.816s) [t-SNE] Iteration 150: error = 82.2960892, gradient norm = 0.3879465 (50 iterations in 17.288s) [t-SNE] Iteration 200: error = 83.6891174, gradient norm = 0.3920951 (50 iterations in 15.164s) [t-SNE] Iteration 250: error = 85.2052765, gradient norm = 0.3779395 (50 iterations in 14.916s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 85.205276 [t-SNE] Iteration 300: error = 1.6124532, gradient norm = 0.0052906 (50 iterations in 14.791s) [t-SNE] Iteration 350: error = 1.4597185, gradient norm = 0.0016718 (50 iterations in 17.192s) [t-SNE] Iteration 400: error = 1.4016998, gradient norm = 0.0009981 (50 iterations in 17.716s) [t-SNE] Iteration 450: error = 1.3647875, gradient norm = 0.0009149 (50 iterations in 17.564s) [t-SNE] Iteration 500: error = 1.3439271, gradient norm = 0.0005506 (50 iterations in 14.984s) [t-SNE] Iteration 550: error = 1.3358471, gradient norm = 0.0005207 (50 iterations in 14.740s) [t-SNE] Iteration 600: error = 1.3319019, gradient norm = 0.0002718 (50 iterations in 15.024s) [t-SNE] Iteration 650: error = 1.3304409, gradient norm = 0.0001503 (50 iterations in 16.152s) [t-SNE] Iteration 700: error = 1.3271215, gradient norm = 0.0001846 (50 iterations in 17.708s) [t-SNE] Iteration 750: error = 1.3240672, gradient norm = 0.0003101 (50 iterations in 17.296s) [t-SNE] Iteration 800: error = 1.3203163, gradient norm = 0.0002803 (50 iterations in 16.088s) [t-SNE] Iteration 850: error = 1.3171183, gradient norm = 0.0001622 (50 iterations in 16.188s) [t-SNE] Iteration 900: error = 1.3167514, gradient norm = 0.0001083 (50 iterations in 16.636s) [t-SNE] Iteration 950: error = 1.3160887, gradient norm = 0.0001068 (50 iterations in 15.952s) [t-SNE] Iteration 1000: error = 1.3155508, gradient norm = 0.0001163 (50 iterations in 14.952s) [t-SNE] KL divergence after 1000 iterations: 1.315551 [t-SNE] Computed conditional probabilities for sample 527 / 527 [t-SNE] Mean sigma: 0.265834 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 54.1s
Quality: 0.4822863364238771 Quality: 0.4833435946194884
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 1.0min finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 200 tasks | elapsed: 1.2s [Parallel(n_jobs=4)]: Done 324 out of 324 | elapsed: 1.8s finished
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 324 samples in 0.009s... [t-SNE] Computed neighbors for 324 samples in 0.010s... [t-SNE] Computed conditional probabilities for sample 324 / 324 [t-SNE] Mean sigma: 0.294109 [t-SNE] Computed conditional probabilities in 0.086s [t-SNE] Iteration 50: error = 74.9118042, gradient norm = 0.4768100 (50 iterations in 18.068s) [t-SNE] Iteration 100: error = 73.6190643, gradient norm = 0.4705039 (50 iterations in 17.528s) [t-SNE] Iteration 150: error = 76.7830353, gradient norm = 0.4728844 (50 iterations in 15.920s) [t-SNE] Iteration 200: error = 78.4791107, gradient norm = 0.4381552 (50 iterations in 14.932s) [t-SNE] Iteration 250: error = 79.9008408, gradient norm = 0.4373595 (50 iterations in 15.248s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 79.900841 [t-SNE] Iteration 300: error = 1.5802404, gradient norm = 0.0059492 (50 iterations in 15.449s) [t-SNE] Iteration 350: error = 1.4259306, gradient norm = 0.0023950 (50 iterations in 18.519s) [t-SNE] Iteration 400: error = 1.3086658, gradient norm = 0.0044655 (50 iterations in 18.784s) [t-SNE] Iteration 450: error = 1.2384566, gradient norm = 0.0010874 (50 iterations in 19.646s) [t-SNE] Iteration 500: error = 1.2014426, gradient norm = 0.0024004 (50 iterations in 20.026s) [t-SNE] Iteration 550: error = 1.1947663, gradient norm = 0.0003106 (50 iterations in 14.980s) [t-SNE] Iteration 600: error = 1.1830704, gradient norm = 0.0020963 (50 iterations in 16.412s) [t-SNE] Iteration 650: error = 1.1776443, gradient norm = 0.0008044 (50 iterations in 17.040s) [t-SNE] Iteration 700: error = 1.1736438, gradient norm = 0.0005349 (50 iterations in 17.260s) [t-SNE] Iteration 750: error = 1.1721333, gradient norm = 0.0002314 (50 iterations in 17.372s) [t-SNE] Iteration 800: error = 1.1716466, gradient norm = 0.0001749 (50 iterations in 16.456s) [t-SNE] Iteration 850: error = 1.1711632, gradient norm = 0.0001846 (50 iterations in 14.553s) [t-SNE] Iteration 900: error = 1.1696759, gradient norm = 0.0004238 (50 iterations in 15.715s) [t-SNE] Iteration 950: error = 1.1696721, gradient norm = 0.0001281 (50 iterations in 15.460s) [t-SNE] Iteration 1000: error = 1.1696715, gradient norm = 0.0000826 (50 iterations in 16.088s) [t-SNE] KL divergence after 1000 iterations: 1.169672 [t-SNE] Computed conditional probabilities for sample 324 / 324 [t-SNE] Mean sigma: 0.294109 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 51.0s
Quality: 0.3771801135059631 Quality: 0.37756181539393013 Quality: 0.37793405979602007 Quality: 0.3779951805683472 Quality: 0.378114526755708 Quality: 0.37823213631285835
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 1.0min finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 131 tasks | elapsed: 0.3s [Parallel(n_jobs=4)]: Done 164 out of 164 | elapsed: 0.5s finished
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 164 samples in 0.002s... [t-SNE] Computed neighbors for 164 samples in 0.001s... [t-SNE] Computed conditional probabilities for sample 164 / 164 [t-SNE] Mean sigma: 0.209929 [t-SNE] Computed conditional probabilities in 0.012s [t-SNE] Iteration 50: error = 70.2954102, gradient norm = 0.4564344 (50 iterations in 20.750s) [t-SNE] Iteration 100: error = 64.0894241, gradient norm = 0.5363775 (50 iterations in 22.176s) [t-SNE] Iteration 150: error = 73.0228043, gradient norm = 0.4077587 (50 iterations in 18.440s) [t-SNE] Iteration 200: error = 74.7765427, gradient norm = 0.4171570 (50 iterations in 14.864s) [t-SNE] Iteration 250: error = 77.5197601, gradient norm = 0.4185043 (50 iterations in 14.912s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 77.519760 [t-SNE] Iteration 300: error = 1.5055809, gradient norm = 0.0070437 (50 iterations in 14.944s) [t-SNE] Iteration 350: error = 1.2626966, gradient norm = 0.0030173 (50 iterations in 15.568s) [t-SNE] Iteration 400: error = 1.1468855, gradient norm = 0.0021659 (50 iterations in 15.217s) [t-SNE] Iteration 450: error = 1.0339818, gradient norm = 0.0024887 (50 iterations in 14.847s) [t-SNE] Iteration 500: error = 0.9507586, gradient norm = 0.0028937 (50 iterations in 14.784s) [t-SNE] Iteration 550: error = 0.9030272, gradient norm = 0.0014392 (50 iterations in 14.828s) [t-SNE] Iteration 600: error = 0.8649312, gradient norm = 0.0017704 (50 iterations in 16.008s) [t-SNE] Iteration 650: error = 0.8348898, gradient norm = 0.0039798 (50 iterations in 15.224s) [t-SNE] Iteration 700: error = 0.7673158, gradient norm = 0.0026014 (50 iterations in 16.252s) [t-SNE] Iteration 750: error = 0.7440838, gradient norm = 0.0008964 (50 iterations in 16.784s) [t-SNE] Iteration 800: error = 0.7416285, gradient norm = 0.0002788 (50 iterations in 15.444s) [t-SNE] Iteration 850: error = 0.7416470, gradient norm = 0.0001942 (50 iterations in 14.780s) [t-SNE] Iteration 900: error = 0.7417244, gradient norm = 0.0002327 (50 iterations in 14.805s) [t-SNE] Iteration 950: error = 0.7416185, gradient norm = 0.0002301 (50 iterations in 14.227s) [t-SNE] Iteration 1000: error = 0.7415859, gradient norm = 0.0002024 (50 iterations in 16.232s) [t-SNE] KL divergence after 1000 iterations: 0.741586 [t-SNE] Computed conditional probabilities for sample 164 / 164 [t-SNE] Mean sigma: 0.209929 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 46.7s
Quality: 0.3684341246791573 Quality: 0.3689909640209373
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 53.0s finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 130 tasks | elapsed: 0.4s [Parallel(n_jobs=4)]: Done 150 out of 150 | elapsed: 0.5s finished
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 150 samples in 0.002s... [t-SNE] Computed neighbors for 150 samples in 0.001s... [t-SNE] Computed conditional probabilities for sample 150 / 150 [t-SNE] Mean sigma: 0.303363 [t-SNE] Computed conditional probabilities in 0.033s [t-SNE] Iteration 50: error = 62.1743736, gradient norm = 0.5068683 (50 iterations in 17.994s) [t-SNE] Iteration 100: error = 61.6021996, gradient norm = 0.4833897 (50 iterations in 16.704s) [t-SNE] Iteration 150: error = 60.6205139, gradient norm = 0.4908145 (50 iterations in 15.012s) [t-SNE] Iteration 200: error = 62.0402794, gradient norm = 0.5495116 (50 iterations in 15.212s) [t-SNE] Iteration 250: error = 67.1128235, gradient norm = 0.4230658 (50 iterations in 15.280s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 67.112823 [t-SNE] Iteration 300: error = 0.9268088, gradient norm = 0.0091070 (50 iterations in 15.760s) [t-SNE] Iteration 350: error = 0.8261697, gradient norm = 0.0027754 (50 iterations in 16.024s) [t-SNE] Iteration 400: error = 0.6795245, gradient norm = 0.0041626 (50 iterations in 14.744s) [t-SNE] Iteration 450: error = 0.6293949, gradient norm = 0.0009833 (50 iterations in 15.100s) [t-SNE] Iteration 500: error = 0.5863745, gradient norm = 0.0016707 (50 iterations in 14.844s) [t-SNE] Iteration 550: error = 0.5282181, gradient norm = 0.0027886 (50 iterations in 15.616s) [t-SNE] Iteration 600: error = 0.4937559, gradient norm = 0.0022504 (50 iterations in 16.660s) [t-SNE] Iteration 650: error = 0.4915476, gradient norm = 0.0004750 (50 iterations in 16.836s) [t-SNE] Iteration 700: error = 0.4885727, gradient norm = 0.0020105 (50 iterations in 15.172s) [t-SNE] Iteration 750: error = 0.4850198, gradient norm = 0.0009184 (50 iterations in 15.868s) [t-SNE] Iteration 800: error = 0.4767795, gradient norm = 0.0014826 (50 iterations in 15.760s) [t-SNE] Iteration 850: error = 0.4766762, gradient norm = 0.0003542 (50 iterations in 14.341s) [t-SNE] Iteration 900: error = 0.4753906, gradient norm = 0.0001535 (50 iterations in 14.423s) [t-SNE] Iteration 950: error = 0.4750355, gradient norm = 0.0002965 (50 iterations in 14.208s) [t-SNE] Iteration 1000: error = 0.4752204, gradient norm = 0.0002723 (50 iterations in 14.292s) [t-SNE] KL divergence after 1000 iterations: 0.475220 [t-SNE] Computed conditional probabilities for sample 150 / 150 [t-SNE] Mean sigma: 0.303363 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 38.3s
Quality: 0.42426092422833434 Quality: 0.42497098795584604
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 43.8s finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 190 tasks | elapsed: 0.7s [Parallel(n_jobs=4)]: Done 206 out of 206 | elapsed: 0.8s finished
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 206 samples in 0.002s... [t-SNE] Computed neighbors for 206 samples in 0.000s... [t-SNE] Computed conditional probabilities for sample 206 / 206 [t-SNE] Mean sigma: 0.324384 [t-SNE] Computed conditional probabilities in 0.029s [t-SNE] Iteration 50: error = 70.3927002, gradient norm = 0.4712104 (50 iterations in 14.329s) [t-SNE] Iteration 100: error = 66.8507080, gradient norm = 0.4725009 (50 iterations in 13.416s) [t-SNE] Iteration 150: error = 71.8817291, gradient norm = 0.4624828 (50 iterations in 12.805s) [t-SNE] Iteration 200: error = 66.6932373, gradient norm = 0.5124757 (50 iterations in 11.648s) [t-SNE] Iteration 250: error = 70.9855499, gradient norm = 0.4810375 (50 iterations in 12.565s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 70.985550 [t-SNE] Iteration 300: error = 1.4986506, gradient norm = 0.0070689 (50 iterations in 10.606s) [t-SNE] Iteration 350: error = 1.2537887, gradient norm = 0.0061423 (50 iterations in 10.176s) [t-SNE] Iteration 400: error = 1.1765001, gradient norm = 0.0021395 (50 iterations in 8.696s) [t-SNE] Iteration 450: error = 1.1348763, gradient norm = 0.0015564 (50 iterations in 8.637s) [t-SNE] Iteration 500: error = 1.1183195, gradient norm = 0.0008630 (50 iterations in 8.772s) [t-SNE] Iteration 550: error = 1.1143137, gradient norm = 0.0004676 (50 iterations in 9.850s) [t-SNE] Iteration 600: error = 1.1110399, gradient norm = 0.0018421 (50 iterations in 11.864s) [t-SNE] Iteration 650: error = 1.1094482, gradient norm = 0.0001646 (50 iterations in 9.824s) [t-SNE] Iteration 700: error = 1.1096261, gradient norm = 0.0000627 (50 iterations in 10.244s) [t-SNE] Iteration 750: error = 1.1097690, gradient norm = 0.0000724 (50 iterations in 11.271s) [t-SNE] Iteration 800: error = 1.1098850, gradient norm = 0.0000823 (50 iterations in 9.741s) [t-SNE] Iteration 850: error = 1.1097749, gradient norm = 0.0001131 (50 iterations in 11.604s) [t-SNE] Iteration 900: error = 1.1096740, gradient norm = 0.0000957 (50 iterations in 11.860s) [t-SNE] Iteration 950: error = 1.1096096, gradient norm = 0.0001147 (50 iterations in 13.012s) [t-SNE] Iteration 1000: error = 1.1096687, gradient norm = 0.0001211 (50 iterations in 11.376s) [t-SNE] Iteration 1000: did not make any progress during the last 300 episodes. Finished. [t-SNE] KL divergence after 1000 iterations: 1.109669 [t-SNE] Computed conditional probabilities for sample 206 / 206 [t-SNE] Mean sigma: 0.324385 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 29.2s
Quality: 0.3295771445885359 Quality: 0.33003551563026057 Quality: 0.33216621060360035 Quality: 0.33238049060767677
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 33.6s finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 190 tasks | elapsed: 0.3s [Parallel(n_jobs=4)]: Done 212 out of 212 | elapsed: 0.4s finished
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 212 samples in 0.002s... [t-SNE] Computed neighbors for 212 samples in 0.000s... [t-SNE] Computed conditional probabilities for sample 212 / 212 [t-SNE] Mean sigma: 0.318683 [t-SNE] Computed conditional probabilities in 0.015s [t-SNE] Iteration 50: error = 66.9068756, gradient norm = 0.5332282 (50 iterations in 10.069s) [t-SNE] Iteration 100: error = 71.0167847, gradient norm = 0.4579129 (50 iterations in 12.808s) [t-SNE] Iteration 150: error = 70.1806488, gradient norm = 0.4954595 (50 iterations in 9.764s) [t-SNE] Iteration 200: error = 69.6288376, gradient norm = 0.4648366 (50 iterations in 9.172s) [t-SNE] Iteration 250: error = 70.2868576, gradient norm = 0.4932132 (50 iterations in 11.896s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 70.286858 [t-SNE] Iteration 300: error = 1.5945715, gradient norm = 0.0068541 (50 iterations in 11.479s) [t-SNE] Iteration 350: error = 1.3297123, gradient norm = 0.0056128 (50 iterations in 9.435s) [t-SNE] Iteration 400: error = 1.2726594, gradient norm = 0.0019931 (50 iterations in 8.713s) [t-SNE] Iteration 450: error = 1.2333623, gradient norm = 0.0028442 (50 iterations in 8.854s) [t-SNE] Iteration 500: error = 1.1948920, gradient norm = 0.0036498 (50 iterations in 9.492s) [t-SNE] Iteration 550: error = 1.1726019, gradient norm = 0.0013135 (50 iterations in 9.346s) [t-SNE] Iteration 600: error = 1.1618693, gradient norm = 0.0012164 (50 iterations in 13.440s) [t-SNE] Iteration 650: error = 1.1559474, gradient norm = 0.0004689 (50 iterations in 9.412s) [t-SNE] Iteration 700: error = 1.1554742, gradient norm = 0.0001565 (50 iterations in 12.080s) [t-SNE] Iteration 750: error = 1.1549902, gradient norm = 0.0005599 (50 iterations in 14.036s) [t-SNE] Iteration 800: error = 1.1540482, gradient norm = 0.0003221 (50 iterations in 14.411s) [t-SNE] Iteration 850: error = 1.1521958, gradient norm = 0.0002952 (50 iterations in 14.689s) [t-SNE] Iteration 900: error = 1.1519454, gradient norm = 0.0002831 (50 iterations in 12.005s) [t-SNE] Iteration 950: error = 1.1516769, gradient norm = 0.0002166 (50 iterations in 10.052s) [t-SNE] Iteration 1000: error = 1.1518178, gradient norm = 0.0001594 (50 iterations in 9.816s) [t-SNE] KL divergence after 1000 iterations: 1.151818 [t-SNE] Computed conditional probabilities for sample 212 / 212 [t-SNE] Mean sigma: 0.318683 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 26.8s
Quality: 0.35296261095521964 Quality: 0.3554656253525572 Quality: 0.3573330470958818 Quality: 0.3587195902391799 Quality: 0.359311193953378
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 32.0s finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 107 out of 107 | elapsed: 0.1s finished
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 107 samples in 0.003s... [t-SNE] Computed neighbors for 107 samples in 0.001s... [t-SNE] Computed conditional probabilities for sample 107 / 107 [t-SNE] Mean sigma: 0.356830 [t-SNE] Computed conditional probabilities in 0.011s [t-SNE] Iteration 50: error = 66.8922729, gradient norm = 0.4140726 (50 iterations in 9.652s) [t-SNE] Iteration 100: error = 68.1002502, gradient norm = 0.4059772 (50 iterations in 10.540s) [t-SNE] Iteration 150: error = 63.1313477, gradient norm = 0.4626520 (50 iterations in 10.232s) [t-SNE] Iteration 200: error = 63.5654488, gradient norm = 0.4771143 (50 iterations in 11.924s) [t-SNE] Iteration 250: error = 59.4606934, gradient norm = 0.5629480 (50 iterations in 11.868s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 59.460693 [t-SNE] Iteration 300: error = 1.2004472, gradient norm = 0.0093496 (50 iterations in 11.727s) [t-SNE] Iteration 350: error = 1.0266163, gradient norm = 0.0126471 (50 iterations in 10.664s) [t-SNE] Iteration 400: error = 0.7765548, gradient norm = 0.0198589 (50 iterations in 10.040s) [t-SNE] Iteration 450: error = 0.6921906, gradient norm = 0.0069704 (50 iterations in 11.594s) [t-SNE] Iteration 500: error = 0.6821017, gradient norm = 0.0012849 (50 iterations in 12.394s) [t-SNE] Iteration 550: error = 0.6793482, gradient norm = 0.0017897 (50 iterations in 10.575s) [t-SNE] Iteration 600: error = 0.6792173, gradient norm = 0.0003390 (50 iterations in 11.217s) [t-SNE] Iteration 650: error = 0.6791668, gradient norm = 0.0002069 (50 iterations in 12.301s) [t-SNE] Iteration 700: error = 0.6791796, gradient norm = 0.0001941 (50 iterations in 11.192s) [t-SNE] Iteration 750: error = 0.6790826, gradient norm = 0.0001441 (50 iterations in 12.583s) [t-SNE] Iteration 800: error = 0.6792158, gradient norm = 0.0002547 (50 iterations in 10.136s) [t-SNE] Iteration 850: error = 0.6791533, gradient norm = 0.0001866 (50 iterations in 10.828s) [t-SNE] Iteration 900: error = 0.6788290, gradient norm = 0.0003088 (50 iterations in 11.036s) [t-SNE] Iteration 950: error = 0.6792284, gradient norm = 0.0002699 (50 iterations in 9.788s) [t-SNE] Iteration 1000: error = 0.6793334, gradient norm = 0.0002055 (50 iterations in 1.376s) [t-SNE] KL divergence after 1000 iterations: 0.679333 [t-SNE] Computed conditional probabilities for sample 107 / 107 [t-SNE] Mean sigma: 0.356830 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 26.7s
Quality: 0.296847279195967 Quality: 0.2969687166520147
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 32.0s finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 41 out of 41 | elapsed: 0.1s finished
[t-SNE] Computing 40 nearest neighbors... [t-SNE] Indexed 41 samples in 0.001s... [t-SNE] Computed neighbors for 41 samples in 0.001s... [t-SNE] Computed conditional probabilities for sample 41 / 41 [t-SNE] Mean sigma: 0.385315 [t-SNE] Computed conditional probabilities in 0.002s [t-SNE] Iteration 50: error = 50.8475342, gradient norm = 0.5391945 (50 iterations in 11.337s) [t-SNE] Iteration 100: error = 51.9092636, gradient norm = 0.4932864 (50 iterations in 10.816s) [t-SNE] Iteration 150: error = 45.9313240, gradient norm = 0.5463169 (50 iterations in 10.940s) [t-SNE] Iteration 200: error = 50.2845459, gradient norm = 0.5170355 (50 iterations in 10.704s) [t-SNE] Iteration 250: error = 47.3028793, gradient norm = 0.5661652 (50 iterations in 11.468s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 47.302879 [t-SNE] Iteration 300: error = 0.9006186, gradient norm = 0.0012346 (50 iterations in 10.859s) [t-SNE] Iteration 350: error = 0.7032563, gradient norm = 0.0006432 (50 iterations in 10.592s) [t-SNE] Iteration 400: error = 0.6397275, gradient norm = 0.0002910 (50 iterations in 10.408s) [t-SNE] Iteration 450: error = 0.6210990, gradient norm = 0.0002239 (50 iterations in 10.736s) [t-SNE] Iteration 500: error = 0.6014051, gradient norm = 0.0001866 (50 iterations in 11.220s) [t-SNE] Iteration 550: error = 0.5807078, gradient norm = 0.0001714 (50 iterations in 11.208s) [t-SNE] Iteration 600: error = 0.5657578, gradient norm = 0.0002955 (50 iterations in 11.384s) [t-SNE] Iteration 650: error = 0.5516077, gradient norm = 0.0001914 (50 iterations in 10.996s) [t-SNE] Iteration 700: error = 0.5462565, gradient norm = 0.0000971 (50 iterations in 10.992s) [t-SNE] Iteration 750: error = 0.5423430, gradient norm = 0.0001358 (50 iterations in 10.920s) [t-SNE] Iteration 800: error = 0.5317760, gradient norm = 0.0001949 (50 iterations in 10.740s) [t-SNE] Iteration 850: error = 0.5228560, gradient norm = 0.0001142 (50 iterations in 11.164s) [t-SNE] Iteration 900: error = 0.5211853, gradient norm = 0.0000735 (50 iterations in 3.383s) [t-SNE] Iteration 950: error = 0.5194023, gradient norm = 0.0000959 (50 iterations in 0.405s) [t-SNE] Iteration 1000: error = 0.5171481, gradient norm = 0.0001010 (50 iterations in 6.936s) [t-SNE] KL divergence after 1000 iterations: 0.517148 [t-SNE] Computed conditional probabilities for sample 41 / 41 [t-SNE] Mean sigma: 0.385315 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 26.6s
Quality: 0.11301668955803623
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 31.0s finished [Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 116 tasks | elapsed: 0.2s [Parallel(n_jobs=4)]: Done 126 out of 133 | elapsed: 0.3s remaining: 0.0s [Parallel(n_jobs=4)]: Done 133 out of 133 | elapsed: 0.3s finished
[t-SNE] Computing 91 nearest neighbors... [t-SNE] Indexed 133 samples in 0.013s... [t-SNE] Computed neighbors for 133 samples in 0.001s... [t-SNE] Computed conditional probabilities for sample 133 / 133 [t-SNE] Mean sigma: 0.358098 [t-SNE] Computed conditional probabilities in 0.019s [t-SNE] Iteration 50: error = 65.2287521, gradient norm = 0.4760590 (50 iterations in 11.054s) [t-SNE] Iteration 100: error = 60.6679535, gradient norm = 0.5619494 (50 iterations in 11.794s) [t-SNE] Iteration 150: error = 65.3229523, gradient norm = 0.4648649 (50 iterations in 10.222s) [t-SNE] Iteration 200: error = 62.5101776, gradient norm = 0.5306792 (50 iterations in 9.824s) [t-SNE] Iteration 250: error = 66.0390778, gradient norm = 0.4529900 (50 iterations in 11.332s) [t-SNE] KL divergence after 250 iterations with early exaggeration: 66.039078 [t-SNE] Iteration 300: error = 1.2922381, gradient norm = 0.0078127 (50 iterations in 10.612s) [t-SNE] Iteration 350: error = 1.0967872, gradient norm = 0.0033260 (50 iterations in 12.928s) [t-SNE] Iteration 400: error = 0.9827366, gradient norm = 0.0054714 (50 iterations in 12.604s) [t-SNE] Iteration 450: error = 0.8571025, gradient norm = 0.0048200 (50 iterations in 12.428s) [t-SNE] Iteration 500: error = 0.8036230, gradient norm = 0.0047329 (50 iterations in 11.832s) [t-SNE] Iteration 550: error = 0.7901999, gradient norm = 0.0026567 (50 iterations in 12.692s) [t-SNE] Iteration 600: error = 0.7832347, gradient norm = 0.0009581 (50 iterations in 12.448s) [t-SNE] Iteration 650: error = 0.7781312, gradient norm = 0.0015238 (50 iterations in 12.980s) [t-SNE] Iteration 700: error = 0.7763915, gradient norm = 0.0003794 (50 iterations in 6.603s) [t-SNE] Iteration 750: error = 0.7766684, gradient norm = 0.0002323 (50 iterations in 0.991s) [t-SNE] Iteration 800: error = 0.7769586, gradient norm = 0.0002186 (50 iterations in 1.923s) [t-SNE] Iteration 850: error = 0.7770272, gradient norm = 0.0003346 (50 iterations in 8.099s) [t-SNE] Iteration 900: error = 0.7768216, gradient norm = 0.0002863 (50 iterations in 3.869s) [t-SNE] Iteration 950: error = 0.7767971, gradient norm = 0.0002419 (50 iterations in 8.428s) [t-SNE] Iteration 1000: error = 0.7767563, gradient norm = 0.0003430 (50 iterations in 5.421s) [t-SNE] KL divergence after 1000 iterations: 0.776756 [t-SNE] Computed conditional probabilities for sample 133 / 133 [t-SNE] Mean sigma: 0.358098 Beginning preprocessing + Leiden
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers. [Parallel(n_jobs=4)]: Done 42 tasks | elapsed: 27.5s
Quality: 0.3033654993249845 Quality: 0.3047200255148048 Quality: 0.3051752583267727
[Parallel(n_jobs=4)]: Done 50 out of 50 | elapsed: 32.9s finished
For each motif, determine the peaks that contain it
# Import the hCWMs
hcwms = import_tfmodisco_motifs(tfm_results_path)
motif_keys = list(hcwms.keys())
# Import the motif hits
hit_table = moods.import_moods_hits(os.path.join(moods_dir, "moods_filtered_collapsed.bed"))
hit_peak_indices = get_hit_peak_indices(hit_table, motif_keys)
# Import embeddings (this can take awhile)
embeddings = np.load(embeddings_path)["embeddings"]
# Sum up over sequence axis to remove position dependencies
summed_embeddings = np.sum(embeddings, axis=1)
For each motif, show the subclusters that exist within the TF-MoDISco-identified subpatterns
plot_motif_heterogeneity(tfm_obj)
/mnt/lab_data2/vir/tf_chr_atlas/02-24-2021/TF-Atlas/3M/reports/viz_sequence.py:152: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`). fig = plt.figure(figsize=figsize)
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 12988 | ||
| 0 | 1775 | ||
| 1 | 1650 | ||
| 2 | 1567 | ||
| 3 | 1403 | ||
| 4 | 1393 | ||
| 5 | 1322 | ||
| 6 | 1158 | ||
| 7 | 778 | ||
| 8 | 647 | ||
| 9 | 576 | ||
| 10 | 291 | ||
| 11 | 223 | ||
| 12 | 205 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 527 | ||
| 0 | 105 | ||
| 1 | 104 | ||
| 2 | 99 | ||
| 3 | 78 | ||
| 4 | 64 | ||
| 5 | 43 | ||
| 6 | 30 | ||
| 7 | 4 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 324 | ||
| 0 | 84 | ||
| 1 | 64 | ||
| 2 | 55 | ||
| 3 | 43 | ||
| 4 | 34 | ||
| 5 | 21 | ||
| 6 | 21 | ||
| 7 | 2 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 164 | ||
| 0 | 54 | ||
| 1 | 44 | ||
| 2 | 34 | ||
| 3 | 32 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 150 | ||
| 0 | 43 | ||
| 1 | 34 | ||
| 2 | 32 | ||
| 3 | 25 | ||
| 5 | 8 | ||
| 4 | 8 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 206 | ||
| 0 | 46 | ||
| 1 | 39 | ||
| 2 | 37 | ||
| 3 | 35 | ||
| 4 | 32 | ||
| 5 | 15 | ||
| 6 | 2 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 212 | ||
| 0 | 43 | ||
| 1 | 41 | ||
| 2 | 36 | ||
| 3 | 33 | ||
| 4 | 29 | ||
| 5 | 25 | ||
| 6 | 5 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 107 | ||
| 0 | 32 | ||
| 1 | 31 | ||
| 2 | 28 | ||
| 3 | 11 | ||
| 4 | 3 | ||
| 5 | 2 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 41 | ||
| 0 | 16 | ||
| 1 | 14 | ||
| 2 | 6 | ||
| 3 | 5 |
| Subpattern | Seqlets | Embeddings | hCWM |
|---|---|---|---|
| Agg. | 133 | ||
| 0 | 35 | ||
| 1 | 26 | ||
| 2 | 21 | ||
| 3 | 19 | ||
| 4 | 18 | ||
| 5 | 14 |
For each peak, cluster the peaks by embeddings to highlight the structure of different peaks and different motifs
plot_peak_clustering(summed_embeddings, motif_keys, hcwms, hit_peak_indices)
| Motif key | Embeddings | hCWM |
|---|---|---|
| 0_0 | ||
| 0_1 | ||
| 0_2 | ||
| 0_3 | ||
| 0_4 |