Goal

Analyze models with w=200

  • kmer:
    • predictions: data/processed/dnase/6mer/retrained.IMR90_6mer.w200.normalized.preds.h5
    • model: new_model.train-pos3.w200.normalized.pkl
    • [x] compare weights with the obtained pkl file - do they match now
  • BPNet model:
    • w200
    • w=200,filters=64
    • w=200,filters=128
    • w=200,n_dil_layers=2,filters=128

Conclusions

  • best models: f"{ddir}/processed/dnase/exp/models/background/models/w=200,filters=128"

TODO

  • [ ] train a BPNet model with poisson loss
  • [x] make the same notebook for 1-kb
In [11]:
import matplotlib.pyplot as plt
import basepair
from basepair.plots import regression_eval
from basepair.losses import MultichannelMultinomialNLL
from basepair.exp.dnase.models import KmerBiasModel
from kipoi.readers import HDF5Reader
from basepair.utils import read_pkl
from keras.models import load_model
from basepair.cli.schemas import DataSpec, TaskSpec
from basepair.datasets import StrandedProfile
from basepair.datasets import chip_exo_nexus
from pathlib import Path
import pandas as pd
import numpy as np
from basepair.config import create_tf_session, get_data_dir

ddir = get_data_dir()
SEQ_WIDTH = 200

K-mer models

Re-trained

In [4]:
# get the predictions
r = HDF5Reader(f"{ddir}/processed/dnase/6mer/retrained.IMR90_6mer.w200.normalized.preds.h5")
r.open()
preds = r.f['/preds'][:]
targets = r.f['/targets/profile/DNase'][:].sum(axis=-1)
r.close()
In [6]:
# normalize
y_true = targets[:,3:(1000-2)]
yt_counts = np.log(1+y_true.sum(axis=-1))
yp_counts = np.log(1+preds.sum(axis=-1))
In [9]:
regression_eval(yt_counts, yp_counts, alpha=0.1)

Existing

In [45]:
# get the predictions
r = HDF5Reader(f"{ddir}/processed/dnase/6mer/IMR90_6mer.w200.preds.h5")
r.open()
preds = r.f['/preds'][:]
targets = r.f['/targets/profile/DNase'][:].sum(axis=-1)
r.close()
In [46]:
# normalize
y_true = targets[:,3:(1000-2)]
yt_counts = np.log(1+y_true.sum(axis=-1))
yp_counts = np.log(1+preds.sum(axis=-1))
In [47]:
regression_eval(yt_counts, yp_counts, alpha=0.1)

Re-trained vs existing

In [13]:
km_new = read_pkl(f"{ddir}/processed/dnase/6mer/new_model.train-pos3.w200.normalized.pkl")
In [14]:
km_old_pkl = read_pkl(f"{ddir}/raw/dnase/6mer/IMR90_6mer.pickle")
km_old = KmerBiasModel({k: float(v['forward']) for k,v in km_old_pkl.items()})
In [17]:
def kmermodel2df(km):
    return pd.DataFrame([{"kmer": k, "value": v} for k,v in km.d.items()])

dfm = pd.merge(kmermodel2df(km_new), kmermodel2df(km_old), on='kmer', suffixes=('_new', '_old'))
In [18]:
dfm.head()
Out[18]:
kmer value_new value_old
0 AAAAAA 0.000002 0.001431
1 AAAAAC 0.000055 0.003110
2 AAAAAG 0.000080 0.001913
3 AAAAAT 0.000020 0.000993
4 AAAACA 0.000053 0.004403
In [19]:
plt.scatter(np.log(1+dfm.value_new*100), np.log(1+dfm.value_old), alpha=0.1)
plt.xlabel("value_new")
plt.ylabel("value_old")
Out[19]:
Text(0,0.5,'value_old')

BPNet

In [16]:
mdir_root = Path(f"{ddir}/processed/dnase/exp/models/background/models")
mdirs = list(mdir_root.glob("w=200*"))
mdirs
Out[16]:
[PosixPath('/users/avsec/workspace/basepair/basepair/../data/processed/dnase/exp/models/background/models/w=200,n_dil_layers=2,filters=128'),
 PosixPath('/users/avsec/workspace/basepair/basepair/../data/processed/dnase/exp/models/background/models/w=200,filters=64'),
 PosixPath('/users/avsec/workspace/basepair/basepair/../data/processed/dnase/exp/models/background/models/w=200,filters=128')]
In [17]:
# performance comparison
def get_performance(mdir):
    try:
        dfh = pd.read_csv(f"{mdir}/history.csv")
        return dict(dfh.iloc[dfh.val_loss.idxmin()])
    except:
        return {}

df_exp = pd.DataFrame([{**get_performance(md), "name":md.name}
                        for md in mdirs])

df_exp.sort_values("val_counts/DNase_loss")
Out[17]:
counts/DNase_loss epoch loss name profile/DNase_loss val_counts/DNase_loss val_loss val_profile/DNase_loss
2 0.261141 8.0 6.455075 w=200,filters=128 6.193934 0.263611 7.001217 6.737606
0 0.261051 12.0 6.432851 w=200,n_dil_layers=2,filters=128 6.171800 0.264330 6.971433 6.707103
1 0.263549 1.0 6.462193 w=200,filters=64 6.198643 0.266819 6.993509 6.726691
In [18]:
# select the model
mdir = mdir_root / "w=200,filters=128" 
In [55]:
# select the model
mdir = mdir_root / "w=200,n_dil_layers=2,filters=128" 
In [2]:
create_tf_session(0)
Out[2]:
<tensorflow.python.client.session.Session at 0x7f83b426ed30>
In [56]:
model = load_model(mdir / "model.h5",
                   custom_objects={"mc_multinomial_nll": MultichannelMultinomialNLL(1),
                                   "MultichannelMultinomialNLL": MultichannelMultinomialNLL(1)})
In [22]:
from basepair.config import test_chr
from basepair.preproc import AppendCounts
ds = DataSpec.load(mdir / "dataspec.yaml")
dl_test = StrandedProfile(ds, 
                          incl_chromosomes=test_chr, 
                          peak_width=SEQ_WIDTH, 
                          shuffle=False,
                          target_transformer=AppendCounts())
Skipped 3 intervals outside of the genome size
In [23]:
test = dl_test.load_all(num_workers=28)
y_true = test["targets"]
100%|██████████| 83202/83202 [00:47<00:00, 1758.93it/s]
In [57]:
y_pred = model.predict(test['inputs'], verbose=1)
2662446/2662446 [==============================] - 211s 79us/step
In [44]:
# w=200,filters=128
regression_eval(y_true['counts/DNase'].mean(-1), y_pred[ds.task2idx("DNase", 'counts')].mean(-1), alpha=0.1)
In [58]:
# w=200,n_dil_layers=2,filters=128
regression_eval(y_true['counts/DNase'].mean(-1), y_pred[ds.task2idx("DNase", 'counts')].mean(-1), alpha=0.1)
In [54]:
# w=200,filters=64
regression_eval(y_true['counts/DNase'].mean(-1), y_pred[ds.task2idx("DNase", 'counts')].mean(-1), alpha=0.1)