# Filepaths and Hard-coded Defaults
run = 1
timestamp = "2022-02-21_11-53-42"
cell_type = "K562/ENCSR261KBX"
bias_model = True
in_window = 2114
out_window = 1000
input_length, profile_length = in_window, out_window
shap_score_center_size = in_window
profile_display_center_size = 400
proj_root = "/users/kcochran/projects/procap_models/"
sequence_path = proj_root + "genomes/GRCh38_no_alt_analysis_set_GCA_000001405.15.fasta"
chrom_sizes = proj_root + "genomes/hg38.chrom.sizes.withrRNA"
data_dir = proj_root + "/data/procap/processed/" + cell_type + "/"
val_peak_path = data_dir + "peaks_uni_and_bi_val.bed.gz"
plus_bw_path = data_dir + "final.5prime.pos.bigWig"
minus_bw_path = data_dir + "final.5prime.neg.bigWig"
if bias_model:
save_dir = proj_root + "model_out/procap_bias/bpnetlite_basic/" + cell_type + "/"
val_save_path = save_dir + timestamp + "_run" + str(run) + "_val"
else:
save_dir = proj_root + "model_out/procap/bpnetlite_basic/" + cell_type + "/"
val_save_path = save_dir + timestamp + "_run" + str(run) + "_train_and-val"
attr_save_path = save_dir + timestamp + "_run" + str(run) + "_deepshap"
modisco_out_path = attr_save_path.replace("deepshap", "modisco") + "/"
# Imports, Plotting Defaults
import os, sys
import numpy as np
import h5py
import pandas as pd
import gzip
import sklearn.cluster
import scipy.cluster.hierarchy
import modisco
import viz_sequence
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
plot_params = {
"figure.titlesize": 22,
"axes.titlesize": 22,
"axes.labelsize": 20,
"legend.fontsize": 18,
"xtick.labelsize": 16,
"ytick.labelsize": 16,
"font.weight": "bold"
}
plt.rcParams.update(plot_params)
import io
import base64
import urllib
import vdom.helpers as vdomh
from IPython.display import display
import tqdm
tqdm.tqdm_notebook()
/users/kcochran/anaconda3/envs/pytorch/lib/python3.7/site-packages/ipykernel_launcher.py:35: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0 Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`
<tqdm.notebook.tqdm_notebook at 0x7f9004113310>
# Modisco Task-Specific Filepaths
scoring_type = "counts"
assert scoring_type in ["profile", "counts"], scoring_type
if scoring_type == "profile":
scores_path = attr_save_path + "_prof.npy"
onehot_scores_path = attr_save_path + "_prof_onehot.npy"
save_path = modisco_out_path + "results_allChroms_prof.hdf5"
seqlet_path = modisco_out_path + "seqlets_prof.txt"
else:
scores_path = attr_save_path + "_count.npy"
onehot_scores_path = attr_save_path + "_count_onehot.npy"
save_path = modisco_out_path + "results_allChroms_count.hdf5"
seqlet_path = modisco_out_path + "seqlets_count.txt"
assert(os.path.exists(scores_path)), scores_path
assert(os.path.exists(onehot_scores_path)), onehot_scores_path
# Load in True Profiles and Sequences
sys.path.append('../1_train_models')
from data_loading import extract_peaks
one_hot_seqs, true_profs = extract_peaks(sequence_path,
plus_bw_path, minus_bw_path, val_peak_path, in_window, out_window,
max_jitter=0, verbose=True)
one_hot_seqs = one_hot_seqs.swapaxes(1,2)
Reading FASTA: 0%| | 0/24 [00:00<?, ?it/s] Reading FASTA: 4%|▍ | 1/24 [00:00<00:16, 1.41it/s] Reading FASTA: 8%|▊ | 2/24 [00:01<00:14, 1.49it/s] Reading FASTA: 12%|█▎ | 3/24 [00:01<00:12, 1.67it/s] Reading FASTA: 17%|█▋ | 4/24 [00:02<00:10, 1.82it/s] Reading FASTA: 21%|██ | 5/24 [00:02<00:09, 1.92it/s] Reading FASTA: 25%|██▌ | 6/24 [00:03<00:08, 2.04it/s] Reading FASTA: 29%|██▉ | 7/24 [00:03<00:07, 2.17it/s] Reading FASTA: 33%|███▎ | 8/24 [00:04<00:06, 2.33it/s] Reading FASTA: 38%|███▊ | 9/24 [00:04<00:06, 2.45it/s] Reading FASTA: 42%|████▏ | 10/24 [00:04<00:05, 2.57it/s] Reading FASTA: 46%|████▌ | 11/24 [00:05<00:04, 2.66it/s] Reading FASTA: 50%|█████ | 12/24 [00:05<00:04, 2.76it/s] Reading FASTA: 54%|█████▍ | 13/24 [00:05<00:03, 2.93it/s] Reading FASTA: 58%|█████▊ | 14/24 [00:05<00:03, 3.10it/s] Reading FASTA: 62%|██████▎ | 15/24 [00:06<00:02, 3.32it/s] Reading FASTA: 67%|██████▋ | 16/24 [00:06<00:02, 3.59it/s] Reading FASTA: 71%|███████ | 17/24 [00:06<00:01, 3.89it/s] Reading FASTA: 75%|███████▌ | 18/24 [00:06<00:01, 4.18it/s] Reading FASTA: 79%|███████▉ | 19/24 [00:06<00:01, 4.74it/s] Reading FASTA: 83%|████████▎ | 20/24 [00:07<00:00, 5.11it/s] Reading FASTA: 88%|████████▊ | 21/24 [00:07<00:00, 5.73it/s] Reading FASTA: 92%|█████████▏| 22/24 [00:07<00:00, 6.17it/s] Reading FASTA: 96%|█████████▌| 23/24 [00:07<00:00, 4.24it/s] Reading FASTA: 100%|██████████| 24/24 [00:07<00:00, 3.01it/s] Loading Peaks: 0it [00:00, ?it/s] Loading Peaks: 80it [00:00, 791.06it/s] Loading Peaks: 186it [00:00, 945.38it/s] Loading Peaks: 295it [00:00, 1010.67it/s] Loading Peaks: 404it [00:00, 1041.20it/s] Loading Peaks: 513it [00:00, 1058.48it/s] Loading Peaks: 622it [00:00, 1069.09it/s] Loading Peaks: 732it [00:00, 1076.61it/s] Loading Peaks: 840it [00:00, 1068.10it/s] Loading Peaks: 947it [00:00, 1048.82it/s] Loading Peaks: 1052it [00:01, 1040.84it/s] Loading Peaks: 1158it [00:01, 1045.30it/s] Loading Peaks: 1265it [00:01, 1052.31it/s] Loading Peaks: 1375it [00:01, 1064.44it/s] Loading Peaks: 1483it [00:01, 1068.87it/s] Loading Peaks: 1593it [00:01, 1077.51it/s] Loading Peaks: 1703it [00:01, 1083.53it/s] Loading Peaks: 1813it [00:01, 1087.35it/s] Loading Peaks: 1922it [00:01, 1086.75it/s] Loading Peaks: 2032it [00:01, 1089.61it/s] Loading Peaks: 2141it [00:02, 1088.36it/s] Loading Peaks: 2250it [00:02, 1088.68it/s] Loading Peaks: 2360it [00:02, 1090.17it/s] Loading Peaks: 2470it [00:02, 1091.72it/s] Loading Peaks: 2580it [00:02, 1061.26it/s] Loading Peaks: 2687it [00:02, 1051.38it/s] Loading Peaks: 2793it [00:02, 1044.49it/s] Loading Peaks: 2898it [00:02, 1039.93it/s] Loading Peaks: 3003it [00:02, 1029.14it/s] Loading Peaks: 3106it [00:02, 1029.11it/s] Loading Peaks: 3212it [00:03, 1037.55it/s] Loading Peaks: 3322it [00:03, 1054.15it/s] Loading Peaks: 3432it [00:03, 1066.58it/s] Loading Peaks: 3541it [00:03, 1072.36it/s] Loading Peaks: 3651it [00:03, 1078.20it/s] Loading Peaks: 3834it [00:03, 1060.57it/s]
# Load in Coordinates of Examples
def load_coords(peak_bed):
if peak_bed.endswith(".gz"):
with gzip.open(peak_bed) as f:
lines = [line.decode().split() for line in f]
else:
with open(peak_bed) as f:
lines = [line.split() for line in f]
coords = []
for line in lines:
chrom, peak_start, peak_end = line[0], int(line[1]), int(line[2])
mid = (peak_start + peak_end) // 2
window_start = mid - in_window // 2
window_end = mid + in_window // 2
coords.append((chrom, window_start, window_end))
return coords
coords = load_coords(val_peak_path)
# Import SHAP scores, predicted profiles
hyp_scores = np.load(scores_path).swapaxes(1,2)
pred_profs = np.exp(np.load(val_save_path + ".profs.npy"))
#pred_counts = np.load(val_save_path + ".counts.npy")
# Load modisco results object
def import_tfmodisco_results(tfm_results_path, hyp_scores, one_hot_seqs):
"""
Imports the TF-MoDISco results object.
Arguments:
`tfm_results_path`: path to HDF5 containing TF-MoDISco results
`hyp_scores`: hypothetical importance scores used for this run
`one_hot_seqs`: input sequences used for this run
"""
# Everything should already be cut to `input_center_cut_size`
act_scores = hyp_scores * one_hot_seqs
track_set = modisco.tfmodisco_workflow.workflow.prep_track_set(
task_names=["task0"],
contrib_scores={"task0": act_scores},
hypothetical_contribs={"task0": hyp_scores},
one_hot=one_hot_seqs
)
with h5py.File(tfm_results_path,"r") as f:
return modisco.tfmodisco_workflow.workflow.TfModiscoResults.from_hdf5(f, track_set=track_set)
tfm_obj = import_tfmodisco_results(save_path, hyp_scores, one_hot_seqs)
def extract_profiles_and_coords(
seqlets_arr, one_hot_seqs, hyp_scores, true_profs, pred_profs, pred_coords,
input_length, profile_length, input_center_cut_size, profile_center_cut_size,
task_index=None
):
"""
From the seqlets object of a TF-MoDISco pattern's seqlets and alignments,
extracts the predicted and observed profiles of the model, as well as the
set of coordinates for the seqlets.
Arguments:
`seqlets_arr`: a TF-MoDISco pattern's seqlets object array (N-array)
`one_hot_seqs`: an N x R x 4 array of input sequences, where R is
the cut centered size
`hyp_scores`: an N x R x 4 array of hypothetical importance scores
`true_profs`: an N x T x O x 2 array of true profile counts
`pred_profs`: an N x T x O x 2 array of predicted profile probabilities
`pred_coords`: an N x 3 object array of coordinates for the input sequence
underlying the predictions
`input_length`: length of original input sequences, I
`profile_length`: length of profile predictions, O
`input_center_cut_size`: centered cut size of SHAP scores used
`profile_center_cut_size`: size to cut profiles to when returning them, P
`task_index`: index of task to focus on for profiles; if None, returns
profiles for all tasks
Returns an N x (T or 1) x P x 2 array of true profile counts, an
N x (T or 1) x P x 2 array of predicted profile probabilities, an N x Q x 4
array of one-hot seqlet sequences, an N x Q x 4 array of hypothetical seqlet
importance scores, and an N x 3 object array of seqlet coordinates, where P
is the profile cut size and Q is the seqlet length. Returned profiles are
centered at the same center as the seqlets.
Note that it is important that the seqlet indices match exactly with the indices
out of the N. This should be the exact sequences in the original SHAP scores.
"""
true_seqlet_profs, pred_seqlet_profs, seqlet_seqs, seqlet_hyps, seqlet_coords, rcs = [], [], [], [], [], []
def seqlet_coord_to_profile_coord(seqlet_coord):
return seqlet_coord + ((input_length - input_center_cut_size) // 2) - ((input_length - profile_length) // 2)
def seqlet_coord_to_input_coord(seqlet_coord):
return seqlet_coord + ((input_length - input_center_cut_size) // 2)
# For each seqlet, fetch the true/predicted profiles
for seqlet in seqlets_arr:
coord_index = seqlet.coor.example_idx
seqlet_start = seqlet.coor.start
seqlet_end = seqlet.coor.end
seqlet_rc = seqlet.coor.is_revcomp
# Get indices of profile to cut out
seqlet_center = (seqlet_start + seqlet_end) // 2
prof_center = seqlet_coord_to_profile_coord(seqlet_center)
prof_start = prof_center - (profile_center_cut_size // 2)
prof_end = prof_start + profile_center_cut_size
if prof_start < 0 or prof_end > out_window:
continue
if task_index is None or true_profs.shape[1] == 1:
# Use all tasks if the predictions only have 1 task to begin with
task_start, task_end = None, None
else:
task_start, task_end = task_index, task_index + 1
true_prof = true_profs[coord_index, :, prof_start:prof_end] # (T or 1) x 2 x P x 2
pred_prof = pred_profs[coord_index, :, prof_start:prof_end] # (T or 1) x 2 x P
true_seqlet_profs.append(true_prof)
pred_seqlet_profs.append(pred_prof)
# The one-hot-sequences and hypothetical scores are assumed to already by cut/centered,
# so the indices match the seqlet indices
rcs.append(seqlet_rc)
if seqlet_rc:
seqlet_seqs.append(np.flip(one_hot_seqs[coord_index, seqlet_start:seqlet_end], axis=(0, 1)))
seqlet_hyps.append(np.flip(hyp_scores[coord_index, seqlet_start:seqlet_end], axis=(0, 1)))
else:
seqlet_seqs.append(one_hot_seqs[coord_index, seqlet_start:seqlet_end])
seqlet_hyps.append(hyp_scores[coord_index, seqlet_start:seqlet_end])
# Get the coordinates of the seqlet based on the input coordinates
inp_start = seqlet_coord_to_input_coord(seqlet_start)
inp_end = seqlet_coord_to_input_coord(seqlet_end)
chrom, start, _ = pred_coords[coord_index]
seqlet_coords.append([chrom, start + inp_start, start + inp_end])
return np.stack(true_seqlet_profs), np.stack(pred_seqlet_profs), np.stack(seqlet_seqs), np.stack(seqlet_hyps), np.array(seqlet_coords, dtype=object), np.array(rcs)
def plot_profiles(seqlet_true_profs, seqlet_pred_profs, kmeans_clusters=5, save_path=None):
"""
Plots the given profiles with a heatmap.
Arguments:
`seqlet_true_profs`: an N x O x 2 NumPy array of true profiles, either as raw
counts or probabilities (they will be normalized)
`seqlet_pred_profs`: an N x O x 2 NumPy array of predicted profiles, either as
raw counts or probabilities (they will be normalized)
`kmeans_cluster`: when displaying profile heatmaps, there will be this
many clusters
`save_path`: if provided, save the profile matrices here
Returns the figure.
"""
assert len(seqlet_true_profs.shape) == 3
assert seqlet_true_profs.shape == seqlet_pred_profs.shape
num_profs, width, _ = seqlet_true_profs.shape
# First, normalize the profiles along the output profile dimension
def normalize(arr, axis=0):
arr_sum = np.sum(arr, axis=axis, keepdims=True)
arr_sum[arr_sum == 0] = 1 # If 0, keep 0 as the quotient instead of dividing by 0
return arr / arr_sum
true_profs_norm = normalize(seqlet_true_profs, axis=1)
pred_profs_norm = normalize(seqlet_pred_profs, axis=1)
# Compute the mean profiles across all examples
true_profs_mean = np.mean(true_profs_norm, axis=0)
pred_profs_mean = np.mean(pred_profs_norm, axis=0)
# Perform k-means clustering on the predicted profiles, with the strands pooled
kmeans_clusters = max(5, num_profs // 50) # Set number of clusters based on number of profiles, with minimum
kmeans = sklearn.cluster.KMeans(n_clusters=kmeans_clusters)
cluster_assignments = kmeans.fit_predict(
np.reshape(pred_profs_norm, (pred_profs_norm.shape[0], -1))
)
# Perform hierarchical clustering on the cluster centers to determine optimal ordering
kmeans_centers = kmeans.cluster_centers_
cluster_order = scipy.cluster.hierarchy.leaves_list(
scipy.cluster.hierarchy.optimal_leaf_ordering(
scipy.cluster.hierarchy.linkage(kmeans_centers, method="centroid"), kmeans_centers
)
)
# Order the profiles so that the cluster assignments follow the optimal ordering
cluster_inds = []
for cluster_id in cluster_order:
cluster_inds.append(np.where(cluster_assignments == cluster_id)[0])
cluster_inds = np.concatenate(cluster_inds)
# Compute a matrix of profiles, normalized to the maximum height, ordered by clusters
def make_profile_matrix(flat_profs, order_inds):
matrix = flat_profs[order_inds]
maxes = np.max(matrix, axis=1, keepdims=True)
maxes[maxes == 0] = 1 # If 0, keep 0 as the quotient instead of dividing by 0
return matrix / maxes
true_matrix = make_profile_matrix(true_profs_norm, cluster_inds)
pred_matrix = make_profile_matrix(pred_profs_norm, cluster_inds)
if save_path:
np.savez_compressed(
true_profs_mean=true_profs_mean, pred_profs_mean=pred_profs_mean,
true_matrix=true_matrix, pred_matrix=pred_matrix
)
# Create a figure with the right dimensions
mean_height = 4
heatmap_height = min(num_profs * 0.004, 8)
fig_height = mean_height + (2 * heatmap_height)
fig, ax = plt.subplots(
3, 2, figsize=(16, fig_height), sharex=True,
gridspec_kw={
"width_ratios": [1, 1],
"height_ratios": [mean_height / fig_height, heatmap_height / fig_height, heatmap_height / fig_height]
}
)
# Plot the average predictions
ax[0, 0].plot(true_profs_mean[:, 0], color="darkslateblue")
ax[0, 0].plot(-true_profs_mean[:, 1], color="darkorange")
ax[0, 1].plot(pred_profs_mean[:, 0], color="darkslateblue")
ax[0, 1].plot(-pred_profs_mean[:, 1], color="darkorange")
# Set axes on average predictions
max_mean_val = max(np.max(true_profs_mean), np.max(pred_profs_mean))
mean_ylim = max_mean_val * 1.05 # Make 5% higher
ax[0, 0].set_title("True profiles")
ax[0, 0].set_ylabel("Average probability")
ax[0, 1].set_title("Predicted profiles")
for j in (0, 1):
ax[0, j].set_ylim(-mean_ylim, mean_ylim)
ax[0, j].label_outer()
# Plot the heatmaps
ax[1, 0].imshow(true_matrix[:, :, 0], interpolation="nearest", aspect="auto", cmap="Blues")
ax[1, 1].imshow(pred_matrix[:, :, 0], interpolation="nearest", aspect="auto", cmap="Blues")
ax[2, 0].imshow(true_matrix[:, :, 1], interpolation="nearest", aspect="auto", cmap="Oranges")
ax[2, 1].imshow(pred_matrix[:, :, 1], interpolation="nearest", aspect="auto", cmap="Oranges")
# Set axes on heatmaps
for i in (1, 2):
for j in (0, 1):
ax[i, j].set_yticks([])
ax[i, j].set_yticklabels([])
ax[i, j].label_outer()
width = true_matrix.shape[1]
delta = 100
num_deltas = (width // 2) // delta
labels = list(range(max(-width // 2, -num_deltas * delta), min(width // 2, num_deltas * delta) + 1, delta))
tick_locs = [label + max(width // 2, num_deltas * delta) for label in labels]
for j in (0, 1):
ax[2, j].set_xticks(tick_locs)
ax[2, j].set_xticklabels(labels)
ax[2, j].set_xlabel("Distance from seqlet center (bp)")
fig.tight_layout()
plt.show()
# Create a figure with the right dimensions
fig2, ax = plt.subplots(
1, 2, figsize=(16, mean_height), sharex=True,
gridspec_kw={"width_ratios": [1, 1]}
)
# Plot the average predictions
mid = true_profs_mean.shape[0] // 2
zoom_width = 60
start = mid - zoom_width // 2
end = mid + zoom_width // 2
ax[0].plot(true_profs_mean[start:end, 0], color="darkslateblue")
ax[0].plot(-true_profs_mean[start:end, 1], color="darkorange")
ax[1].plot(pred_profs_mean[start:end, 0], color="darkslateblue")
ax[1].plot(-pred_profs_mean[start:end, 1], color="darkorange")
# Set axes on average predictions
max_mean_val = max(np.max(true_profs_mean[start:end]), np.max(pred_profs_mean[start:end]))
mean_ylim = max_mean_val * 1.05 # Make 5% higher
ax[0].set_title("True profiles")
ax[0].set_ylabel("Average probability")
ax[1].set_title("Predicted profiles")
delta = 10
num_deltas = (zoom_width // 2) // delta
labels = list(range(max(-zoom_width // 2, -num_deltas * delta), min(zoom_width // 2, num_deltas * delta) + 1, delta))
tick_locs = [label + max(zoom_width // 2, num_deltas * delta) for label in labels]
for j in (0, 1):
ax[j].set_ylim(-mean_ylim, mean_ylim)
ax[j].label_outer()
ax[j].set_xticks(tick_locs)
ax[j].set_xticklabels(labels)
ax[j].set_xlabel("Distance from seqlet center (bp)")
fig2.tight_layout(w_pad=4, rect=(0.1, 0, 0.95, 1))
plt.show()
return fig
def get_summit_distances(coords, peak_coords):
"""
Given a set of coordinates, computes the distance of the center of each
coordinate to the nearest summit.
Arguments:
`coords`: an N x 3 object array of coordinates
`peak_table`: a 10-column table of peak data, as imported by
`import_peak_table`
Returns and N-array of integers, which is the distance of each coordinate
midpoint to the nearest coordinate.
"""
peak_chroms = [coord[0] for coord in peak_coords]
peak_summits = [(coord[1] + coord[2]) // 2 for coord in peak_coords]
peak_table = pd.DataFrame({"chrom" : peak_chroms, "summit" : peak_summits})
chroms = coords[:, 0]
midpoints = (coords[:, 1] + coords[:, 2]) // 2
dists = []
for i in range(len(coords)):
chrom = chroms[i]
midpoint = midpoints[i]
rows = peak_table[peak_table["chrom"] == chrom]
dist_arr = (midpoint - rows["summit"]).values
min_dist = dist_arr[np.argmin(np.abs(dist_arr))]
dists.append(min_dist)
return np.array(dists)
def plot_summit_dists(summit_dists):
"""
Plots the distribution of seqlet distances to summits.
Arguments:
`summit_dists`: the array of distances as returned by
`get_summit_distances`
Returns the figure.
"""
fig = plt.figure(figsize=(8, 6))
num_bins = max(len(summit_dists) // 30, 20)
plt.hist(summit_dists, bins=num_bins, color="purple")
plt.title("Histogram of distance of seqlets to peak summits")
plt.xlabel("Signed distance from seqlet center to nearest peak summit (bp)")
plt.show()
return fig
BACKGROUND_FREQS = np.array([0.25, 0.25, 0.25, 0.25])
def pfm_info_content(track, pseudocount=0.001):
"""
Given an L x 4 track, computes information content for each base and
returns it as an L-array.
"""
num_bases = track.shape[1]
# Normalize track to probabilities along base axis
track_norm = (track + pseudocount) / (np.sum(track, axis=1, keepdims=True) + (num_bases * pseudocount))
ic = track_norm * np.log2(track_norm / np.expand_dims(BACKGROUND_FREQS, axis=0))
return np.sum(ic, axis=1)
def pfm_to_pwm(pfm):
ic = pfm_info_content(pfm)
return pfm * np.expand_dims(ic, axis=1)
def trim_motif_by_ic(pfm, motif, min_ic=0.2, pad=0):
"""
Given the PFM and motif (both L x 4 arrays) (the motif could be the
PFM itself), trims `motif` by cutting off flanks of low information
content in `pfm`. `min_ic` is the minimum required information
content. If specified this trimmed motif will be extended on either
side by `pad` bases.
If no base passes the `min_ic` threshold, then no trimming is done.
"""
# Trim motif based on information content
ic = pfm_info_content(pfm)
pass_inds = np.where(ic >= min_ic)[0] # Cut off flanks with less than min_ic IC
if not pass_inds.size:
return motif
# Expand trimming to +/- pad bp on either side
start, end = max(0, np.min(pass_inds) - pad), min(len(pfm), np.max(pass_inds) + pad + 1)
return motif[start:end]
def figure_to_vdom_image(figure):
buf = io.BytesIO()
figure.savefig(buf, format='png')
buf.seek(0)
string = base64.b64encode(buf.read())
return vdomh.div(
vdomh.img(src='data:image/png;base64,' + urllib.parse.quote(string)),
style={"display": "inline-block"}
)
motif_pfms, motif_hcwms, motif_cwms = [], [], [] # Save the trimmed PFMs, hCWMs, and CWMs
motif_pfms_short = [] # PFMs that are even more trimmed (for TOMTOM)
num_seqlets = [] # Number of seqlets for each motif
motif_seqlets = [] # Save seqlets of each motif
metaclusters = tfm_obj.metacluster_idx_to_submetacluster_results
num_metaclusters = len(metaclusters.keys())
for metacluster_i, metacluster_key in enumerate(metaclusters.keys()):
metacluster = metaclusters[metacluster_key]
display(vdomh.h3("Metacluster %d/%d" % (metacluster_i + 1, num_metaclusters)))
patterns = metacluster.seqlets_to_patterns_result.patterns
if not patterns:
break
motif_pfms.append([])
motif_hcwms.append([])
motif_cwms.append([])
motif_pfms_short.append([])
num_seqlets.append([])
motif_seqlets.append([])
num_patterns = len(patterns)
for pattern_i, pattern in enumerate(patterns):
seqlets = pattern.seqlets
display(vdomh.h4("Pattern %d/%d" % (pattern_i + 1, num_patterns)))
display(vdomh.p("%d seqlets" % len(seqlets)))
pfm = pattern["sequence"].fwd
hcwm = pattern["task0_hypothetical_contribs"].fwd
cwm = pattern["task0_contrib_scores"].fwd
pfm_fig = viz_sequence.plot_weights(pfm, subticks_frequency=10, return_fig=True)
hcwm_fig = viz_sequence.plot_weights(hcwm, subticks_frequency=10, return_fig=True)
cwm_fig = viz_sequence.plot_weights(cwm, subticks_frequency=10, return_fig=True)
pfm_fig.tight_layout()
hcwm_fig.tight_layout()
cwm_fig.tight_layout()
motif_table = vdomh.table(
vdomh.tr(
vdomh.td("Sequence (PFM)"),
vdomh.td(figure_to_vdom_image(pfm_fig))
),
vdomh.tr(
vdomh.td("Hypothetical contributions (hCWM)"),
vdomh.td(figure_to_vdom_image(hcwm_fig))
),
vdomh.tr(
vdomh.td("Actual contributions (CWM)"),
vdomh.td(figure_to_vdom_image(cwm_fig))
)
)
display(motif_table)
plt.close("all") # Remove all standing figures
# Trim motif based on information content
short_trimmed_pfm = trim_motif_by_ic(pfm, pfm)
motif_pfms_short[-1].append(short_trimmed_pfm)
# Expand trimming to +/- 4bp on either side
trimmed_pfm = trim_motif_by_ic(pfm, pfm, pad=4)
trimmed_hcwm = trim_motif_by_ic(pfm, hcwm, pad=4)
trimmed_cwm = trim_motif_by_ic(pfm, cwm, pad=4)
motif_pfms[-1].append(trimmed_pfm)
motif_hcwms[-1].append(trimmed_hcwm)
motif_cwms[-1].append(trimmed_cwm)
num_seqlets[-1].append(len(seqlets))
seqlet_true_profs, seqlet_pred_profs, seqlet_seqs, seqlet_hyps, seqlet_coords, rcs = extract_profiles_and_coords(
seqlets, one_hot_seqs, hyp_scores, true_profs, pred_profs, coords,
input_length, profile_length, shap_score_center_size,
profile_display_center_size, task_index=None
)
motif_seqlets[-1].append((seqlet_seqs, seqlet_hyps))
seqlet_true_profs = seqlet_true_profs.swapaxes(1,2)
seqlet_pred_profs = seqlet_pred_profs.swapaxes(1,2)
for i in range(len(rcs)):
if rcs[i]:
seqlet_true_profs[i, :, :] = seqlet_true_profs[i, ::-1, ::-1]
seqlet_pred_profs[i, :, :] = seqlet_pred_profs[i, ::-1, ::-1]
#assert np.allclose(np.sum(seqlet_seqs, axis=0) / len(seqlet_seqs), pattern["sequence"].fwd)
# ^Sanity check: PFM derived from seqlets match the PFM stored in the pattern
prof_fig = plot_profiles(seqlet_true_profs, seqlet_pred_profs)
summit_dists = get_summit_distances(seqlet_coords, coords)
dist_fig = plot_summit_dists(summit_dists)
15648 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
528 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
385 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
361 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
308 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
178 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
167 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
91 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
60 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
41 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |
38 seqlets
Sequence (PFM) | |
Hypothetical contributions (hCWM) | |
Actual contributions (CWM) |