Analyze models with w=200
f"{ddir}/processed/dnase/exp/models/background/models/w=200,filters=128"import matplotlib.pyplot as plt
import basepair
from basepair.plots import regression_eval
from basepair.losses import MultichannelMultinomialNLL
from basepair.exp.dnase.models import KmerBiasModel
from kipoi.readers import HDF5Reader
from basepair.utils import read_pkl
from keras.models import load_model
from basepair.cli.schemas import DataSpec, TaskSpec
from basepair.datasets import StrandedProfile
from basepair.datasets import chip_exo_nexus
from pathlib import Path
import pandas as pd
import numpy as np
from basepair.config import create_tf_session, get_data_dir
ddir = get_data_dir()
SEQ_WIDTH = 200
# get the predictions
r = HDF5Reader(f"{ddir}/processed/dnase/6mer/retrained.IMR90_6mer.w200.normalized.preds.h5")
r.open()
preds = r.f['/preds'][:]
targets = r.f['/targets/profile/DNase'][:].sum(axis=-1)
r.close()
# normalize
y_true = targets[:,3:(1000-2)]
yt_counts = np.log(1+y_true.sum(axis=-1))
yp_counts = np.log(1+preds.sum(axis=-1))
regression_eval(yt_counts, yp_counts, alpha=0.1)
# get the predictions
r = HDF5Reader(f"{ddir}/processed/dnase/6mer/IMR90_6mer.w200.preds.h5")
r.open()
preds = r.f['/preds'][:]
targets = r.f['/targets/profile/DNase'][:].sum(axis=-1)
r.close()
# normalize
y_true = targets[:,3:(1000-2)]
yt_counts = np.log(1+y_true.sum(axis=-1))
yp_counts = np.log(1+preds.sum(axis=-1))
regression_eval(yt_counts, yp_counts, alpha=0.1)
km_new = read_pkl(f"{ddir}/processed/dnase/6mer/new_model.train-pos3.w200.normalized.pkl")
km_old_pkl = read_pkl(f"{ddir}/raw/dnase/6mer/IMR90_6mer.pickle")
km_old = KmerBiasModel({k: float(v['forward']) for k,v in km_old_pkl.items()})
def kmermodel2df(km):
return pd.DataFrame([{"kmer": k, "value": v} for k,v in km.d.items()])
dfm = pd.merge(kmermodel2df(km_new), kmermodel2df(km_old), on='kmer', suffixes=('_new', '_old'))
dfm.head()
plt.scatter(np.log(1+dfm.value_new*100), np.log(1+dfm.value_old), alpha=0.1)
plt.xlabel("value_new")
plt.ylabel("value_old")
mdir_root = Path(f"{ddir}/processed/dnase/exp/models/background/models")
mdirs = list(mdir_root.glob("w=200*"))
mdirs
# performance comparison
def get_performance(mdir):
try:
dfh = pd.read_csv(f"{mdir}/history.csv")
return dict(dfh.iloc[dfh.val_loss.idxmin()])
except:
return {}
df_exp = pd.DataFrame([{**get_performance(md), "name":md.name}
for md in mdirs])
df_exp.sort_values("val_counts/DNase_loss")
# select the model
mdir = mdir_root / "w=200,filters=128"
# select the model
mdir = mdir_root / "w=200,n_dil_layers=2,filters=128"
create_tf_session(0)
model = load_model(mdir / "model.h5",
custom_objects={"mc_multinomial_nll": MultichannelMultinomialNLL(1),
"MultichannelMultinomialNLL": MultichannelMultinomialNLL(1)})
from basepair.config import test_chr
from basepair.preproc import AppendCounts
ds = DataSpec.load(mdir / "dataspec.yaml")
dl_test = StrandedProfile(ds,
incl_chromosomes=test_chr,
peak_width=SEQ_WIDTH,
shuffle=False,
target_transformer=AppendCounts())
test = dl_test.load_all(num_workers=28)
y_true = test["targets"]
y_pred = model.predict(test['inputs'], verbose=1)
# w=200,filters=128
regression_eval(y_true['counts/DNase'].mean(-1), y_pred[ds.task2idx("DNase", 'counts')].mean(-1), alpha=0.1)
# w=200,n_dil_layers=2,filters=128
regression_eval(y_true['counts/DNase'].mean(-1), y_pred[ds.task2idx("DNase", 'counts')].mean(-1), alpha=0.1)
# w=200,filters=64
regression_eval(y_true['counts/DNase'].mean(-1), y_pred[ds.task2idx("DNase", 'counts')].mean(-1), alpha=0.1)