Analyze models with w=200
f"{ddir}/processed/dnase/exp/models/background/models/w=200,filters=128"import matplotlib.pyplot as plt
import basepair
from basepair.plots import regression_eval
from basepair.losses import MultichannelMultinomialNLL
from basepair.exp.dnase.models import KmerBiasModel
from kipoi.readers import HDF5Reader
from basepair.utils import read_pkl
from keras.models import load_model
from basepair.cli.schemas import DataSpec, TaskSpec
from basepair.datasets import StrandedProfile
from basepair.datasets import chip_exo_nexus
from pathlib import Path
import pandas as pd
import numpy as np
from basepair.config import create_tf_session, get_data_dir
ddir = get_data_dir()
SEQ_WIDTH = 1000
# get the predictions
r = HDF5Reader(f"{ddir}/processed/dnase/6mer/retrained.IMR90_6mer.w{SEQ_WIDTH}.normalized.preds.h5")
r.open()
preds = r.f['/preds'][:]
targets = r.f['/targets/profile/DNase'][:].sum(axis=-1)
r.close()
# normalize
y_true = targets[:,3:(1000-2)]
yt_counts = np.log(1+y_true.sum(axis=-1))
yp_counts = np.log(1+preds.sum(axis=-1))
regression_eval(yt_counts, yp_counts, alpha=0.1)
# it's actually a bi-modal distribution (7%, 93%)
plt.hexbin(yp_counts,
yt_counts,
gridsize=(40,30),
cmap=plt.cm.BuGn)
plt.hexbin(yp_counts[(yt_counts>1) & (yt_counts < 3.5)],
yt_counts[(yt_counts>1) & (yt_counts < 3.5)],
gridsize=(30,15),#gridsize=(40,30),
cmap=plt.cm.BuGn)
# correlation when removing 0's
regression_eval(yt_counts[yt_counts > 0], yp_counts[yt_counts > 0], alpha=0.1)
# outliers (.4%)
regression_eval(yt_counts[np.exp(yt_counts)-1 > 25], yp_counts[np.exp(yt_counts)-1 > 25], alpha=0.1)
# get the predictions
r = HDF5Reader(f"{ddir}/processed/dnase/6mer/IMR90_6mer.w{SEQ_WIDTH}.preds.h5")
r.open()
preds = r.f['/preds'][:]
targets = r.f['/targets/profile/DNase'][:].sum(axis=-1)
r.close()
# normalize
y_true = targets[:,3:(1000-2)]
yt_counts = np.log(1+y_true.sum(axis=-1))
yp_counts = np.log(1+preds.sum(axis=-1))
regression_eval(yt_counts, yp_counts, alpha=0.1)
# w=1000,filters=128
plt.hexbin(yp_counts[(yt_counts>1) & (yt_counts < 3.5)],
yt_counts[(yt_counts>1) & (yt_counts < 3.5)],
gridsize=(30,15),#gridsize=(40,30),
cmap=plt.cm.BuGn)
km_new = read_pkl(f"{ddir}/processed/dnase/6mer/new_model.train-pos3.w{SEQ_WIDTH}.normalized.pkl")
km_old_pkl = read_pkl(f"{ddir}/raw/dnase/6mer/IMR90_6mer.pickle")
km_old = KmerBiasModel({k: float(v['forward']) for k,v in km_old_pkl.items()})
def kmermodel2df(km):
return pd.DataFrame([{"kmer": k, "value": v} for k,v in km.d.items()])
dfm = pd.merge(kmermodel2df(km_new), kmermodel2df(km_old), on='kmer', suffixes=('_new', '_old'))
dfm.head()
plt.scatter(np.log(1+dfm.value_new*100), np.log(1+dfm.value_old), alpha=0.1)
plt.xlabel("value_new")
plt.ylabel("value_old")
mdir_root = Path(f"{ddir}/processed/dnase/exp/models/background/models")
mdirs = list(mdir_root.glob("w=1000*"))
mdirs
# performance comparison
def get_performance(mdir):
try:
dfh = pd.read_csv(f"{mdir}/history.csv")
return dict(dfh.iloc[dfh.val_loss.idxmin()])
except:
return {}
df_exp = pd.DataFrame([{**get_performance(md), "name":md.name}
for md in mdirs])
df_exp.sort_values("val_counts/DNase_loss")
# select the model
mdir = mdir_root / "w=1000,filters=128" # for 256 it's the same as for w=1000
create_tf_session(0)
model = load_model(mdir / "model.h5",
custom_objects={"mc_multinomial_nll": MultichannelMultinomialNLL(1),
"MultichannelMultinomialNLL": MultichannelMultinomialNLL(1)})
from basepair.config import test_chr
from basepair.preproc import AppendCounts
ds = DataSpec.load(mdir / "dataspec.yaml")
dl_test = StrandedProfile(ds,
incl_chromosomes=test_chr,
peak_width=SEQ_WIDTH,
shuffle=False,
target_transformer=AppendCounts())
test = dl_test.load_all(num_workers=28)
y_pred = model.predict(test['inputs'], verbose=1)
# w=1000,filters=128
regression_eval(test["targets"]['counts/DNase'].mean(-1),
y_pred[ds.task2idx("DNase", 'counts')].mean(-1), alpha=0.05)
# w=1000,filters=128
regression_eval(test["targets"]['counts/DNase'].mean(-1)[test["targets"]['counts/DNase'].mean(-1)>0],
y_pred[ds.task2idx("DNase", 'counts')].mean(-1)[test["targets"]['counts/DNase'].mean(-1)>0],
alpha=0.01)
# w=1000,filters=128
plt.hexbin(y_pred[ds.task2idx("DNase", 'counts')].mean(-1),
test["targets"]['counts/DNase'].mean(-1),
gridsize=(40,30),
cmap=plt.cm.BuGn);
# w=1000,filters=128
ytc = test["targets"]['counts/DNase'].mean(-1)
plt.hexbin(y_pred[ds.task2idx("DNase", 'counts')].mean(-1)[(ytc>1) & (ytc < 3.5)],
test["targets"]['counts/DNase'].mean(-1)[(ytc>1) & (ytc < 3.5)],
gridsize=(30,15),
cmap=plt.cm.BuGn);
# w=1000
regression_eval(test["targets"]['counts/DNase'].mean(-1),
y_pred[ds.task2idx("DNase", 'counts')].mean(-1), alpha=0.05)
# w=1000
regression_eval(test["targets"]['counts/DNase'].mean(-1)[counts > 25],
y_pred[ds.task2idx("DNase", 'counts')].mean(-1)[counts > 25], alpha=0.05)
import scipy.stats as stats
counts = test["targets"]['profile/DNase'].sum(axis=-1).sum(axis=-1)
plt.hist(counts[counts<50], bins=50);
np.mean(counts > 25) # 0.4% of the data are outliers
# Poisson: poisson is the right loss function
stats.probplot(counts[counts<25], sparams=(counts[counts<25].mean()), dist="poisson", plot=plt);
# log-normal
stats.probplot(np.log(1 + counts), dist="norm", plot=plt);
# same as counts/DNase
stats.probplot(test["targets"]['counts/DNase'].mean(-1), dist="norm", plot=plt);
# Poisson
stats.probplot(counts, sparams=(counts.mean()), dist="poisson", plot=plt);
# Negative binomial
stats.probplot(counts, sparams=(0.2, 0.001), dist="nbinom", plot=plt);
# Negative binomial
stats.probplot(counts[counts<50], sparams=(0.2, 0.001), dist="nbinom", plot=plt);