In [1]:
# Example parameters
ddir= "/users/avsec/workspace/basepair/data/"
model_file = f"{ddir}/processed/chipnexus/exp/models/oct-sox-nanog-klf/models/filters=128/model.h5"
dataspec_file = f"{ddir}/processed/chipnexus/exp/models/oct-sox-nanog-klf/models/filters=128/dataspec.yaml"
history_file = f"{ddir}/processed/chipnexus/exp/models/oct-sox-nanog-klf/models/filters=128/history.csv"
seq_width = 1000
gpu = 0
num_workers = 10
In [2]:
# Parameters
model_file = "/users/amr1/bpnet/basepair/src/cage/model_1/model.h5"
dataspec_file = "/users/amr1/bpnet/basepair/src/cage/model_1/dataspec.yaml"
history_file = "/users/amr1/bpnet/basepair/src/cage/model_1/history.csv"
seq_width = 1000
gpu = 5
num_workers = 10
In [3]:
import basepair
import pandas as pd
import numpy as np
from basepair.cli.schemas import DataSpec, TaskSpec
from pathlib import Path
from keras.models import load_model
from basepair.config import create_tf_session, get_data_dir
from basepair.datasets import StrandedProfile
from basepair.preproc import AppendCounts
from basepair.losses import MultichannelMultinomialNLL
from basepair.config import valid_chr, test_chr
from basepair.plots import regression_eval, plot_loss
from basepair.cli.evaluate import eval_profile
from basepair import samplers 
from basepair.math import softmax

ddir = get_data_dir()
Using TensorFlow backend.
2018-12-11 22:06:31,894 [WARNING] git-lfs not installed
In [4]:
create_tf_session("")  # don't use gpu
Out[4]:
<tensorflow.python.client.session.Session at 0x7fdca5892ba8>
In [5]:
model = load_model(model_file, custom_objects={"mc_multinomial_nll": MultichannelMultinomialNLL(1),
                                               "MultichannelMultinomialNLL": MultichannelMultinomialNLL(1)})
WARNING:tensorflow:From /users/amr1/miniconda3/envs/basepair/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py:497: calling conv1d (from tensorflow.python.ops.nn_ops) with data_format=NHWC is deprecated and will be removed in a future version.
Instructions for updating:
`NHWC` for data_format is deprecated, use `NWC` instead
2018-12-11 22:06:37,077 [WARNING] From /users/amr1/miniconda3/envs/basepair/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py:497: calling conv1d (from tensorflow.python.ops.nn_ops) with data_format=NHWC is deprecated and will be removed in a future version.
Instructions for updating:
`NHWC` for data_format is deprecated, use `NWC` instead
WARNING:tensorflow:From /users/amr1/miniconda3/envs/basepair/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/base.py:198: retry (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Use the retry module or similar alternatives.
2018-12-11 22:06:44,336 [WARNING] From /users/amr1/miniconda3/envs/basepair/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/base.py:198: retry (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Use the retry module or similar alternatives.

Learning curves

In [6]:
ds = DataSpec.load(dataspec_file)
tasks = list(ds.task_specs)
In [7]:
# Best metrics
dfh = pd.read_csv(history_file)
dict(dfh.iloc[dfh.val_loss.idxmin()])
Out[7]:
{'epoch': 18.0,
 'counts/CAGE_loss': 6.048933212028405,
 'loss': 880.4811114525972,
 'profile/CAGE_loss': 819.9917812786093,
 'val_counts/CAGE_loss': 6.118661873266991,
 'val_loss': 1880.2062517019085,
 'val_profile/CAGE_loss': 1819.0196411340928}
In [8]:
plot_loss(dfh, [f"{p}/{task}_loss"
                for task in tasks
                for p in ['profile']
               ], figsize=(len(tasks)*4, 4))
In [9]:
plot_loss(dfh, [f"{p}/{task}_loss"
                for task in tasks
                for p in ['counts']
               ], figsize=(len(tasks)*4, 4))

Evaluation

In [10]:
dl_valid = StrandedProfile(ds, 
                          incl_chromosomes=valid_chr, 
                          peak_width=seq_width,
                          shuffle=False,
                          target_transformer=AppendCounts())
In [11]:
valid = dl_valid.load_all(num_workers=num_workers)
y_true = valid["targets"]
100%|██████████| 173/173 [00:01<00:00, 94.37it/s]
In [12]:
from basepair.BPNet import BiasModel
In [13]:
if [task for task, task_spec in ds.task_specs.items() if task_spec.bias_model]:
    bm = BiasModel(ds)
    valid['inputs'] = bm.predict([valid["inputs"]])[0]
In [14]:
y_pred = model.predict(valid['inputs'], verbose=1)
5535/5535 [==============================] - 31s 6ms/step
In [15]:
import matplotlib.pyplot as plt
In [16]:
for task in tasks:
    plt.figure()
    yt = y_true[f'counts/{task}'].mean(-1)
    yp = y_pred[ds.task2idx(task, 'counts')].mean(-1)
    regression_eval(yt, 
                    yp, alpha=0.1, task=task)
<Figure size 432x288 with 0 Axes>

Profile evaluation

In [17]:
from joblib import Parallel, delayed
import basepair
In [18]:
task=tasks[0]
In [19]:
yp = softmax(y_pred[ds.task2idx(task, "profile")])
yt = y_true["profile/" + task]
In [20]:
x = np.ravel(yt / (1+yt.sum(axis=-2, keepdims=True)))
In [21]:
plt.figure(figsize=(12,3))
plt.subplot(121)
plt.hist(x[(x<0.04) ], bins=100);
plt.subplot(122)
plt.hist(x[(x<0.04) & (x>0.0001)], bins=100);
In [22]:
np.mean(x>0.0001)
Out[22]:
0.014486359530261969
In [23]:
out_df = Parallel(n_jobs=len(tasks))(delayed(basepair.cli.evaluate.eval_profile)(y_true["profile/" + task], 
                                                                                 softmax(y_pred[ds.task2idx(task, "profile")]), 
                                                                                 binsizes=[1,10],
                                                                                 pos_min_threshold=0.015,
                                                                                 neg_max_threshold=0.005,)
                 for task in tasks)
df = pd.concat([out_df[i].assign(task=task) for i,task in enumerate(tasks)])
/users/amr1/bpnet/basepair/basepair/cli/evaluate.py:160: RuntimeWarning: divide by zero encountered in true_divide
  fracs = yt / yt.sum(axis=1, keepdims=True)
/users/amr1/bpnet/basepair/basepair/cli/evaluate.py:160: RuntimeWarning: invalid value encountered in true_divide
  fracs = yt / yt.sum(axis=1, keepdims=True)
/users/amr1/bpnet/basepair/basepair/cli/evaluate.py:161: RuntimeWarning: invalid value encountered in greater_equal
  is_peak = (fracs >= pos_min_threshold).astype(float)
/users/amr1/bpnet/basepair/basepair/cli/evaluate.py:162: RuntimeWarning: invalid value encountered in less
  ambigous = (fracs < pos_min_threshold) & (fracs >= neg_max_threshold)
/users/amr1/bpnet/basepair/basepair/cli/evaluate.py:162: RuntimeWarning: invalid value encountered in greater_equal
  ambigous = (fracs < pos_min_threshold) & (fracs >= neg_max_threshold)
In [24]:
df
Out[24]:
auprc binsize frac_ambigous imbalance n_positives random_auprc task
0 0.185330 1 0.007924 0.009905 25766 0.009802 CAGE
1 0.313667 10 0.026690 0.057268 14615 0.055863 CAGE

Profile plots

In [25]:
from basepair.plots import plot_profiles
from basepair import samplers
In [26]:
tasks = list(ds.task_specs)
task = tasks[0]
In [27]:
#idx_list = samplers.top_sum_count(y_true['profile/Nanog'], 10)
In [28]:
#idx_list = samplers.top_sum_count(y_true['profile/Oct4'], 10)
In [29]:
#Total sum
idx_list = samplers.top_sum_count(sum([y_true[f'profile/{task}'].mean(-1) for task in tasks])[:,:,np.newaxis], 10)
In [30]:
#idx_list = samplers.top_max_count(y_true[f'profile/{task}'], 10)
In [31]:
# np.random.seed(42)
# idx_list = samplers.random(y_true[f'profile/{task}'], 20, )
In [32]:
idx_list[:4]
Out[32]:
Int64Index([1725, 1724, 1723, 4247], dtype='int64')
In [33]:
plot_profiles(y_true, y_pred, tasks, idx_list, preproc=None, figsize=(24, 8), tic_freq=50, xlim=[200,800])