Goal

  • benchmark the k-mer bias model

Tasks

  • [x] get the training data
  • get the default 6-mer model
  • [x] make an abstract bias prediction class
  • [x] evaluate the default model
    • [ ] write a comprehensive evaluation function
  • [ ] train your own bias-prediction 6-mer model
  • [ ] evaluate
    • [ ] do they match? If now. Why?
  • [ ] Evaluate your own model
  • [ ] Imrove your own model

get the training data

In [1]:
import matplotlib.pyplot as plt

import basepair
from basepair.cli.schemas import DataSpec, TaskSpec
from basepair.datasets import chip_exo_nexus
from pathlib import Path
import pandas as pd
import numpy as np
from basepair.config import create_tf_session, get_data_dir

ddir = get_data_dir()
/users/avsec/bin/anaconda3/envs/chipnexus/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters
Using TensorFlow backend.
In [2]:
mkdir -p {ddir}/raw/dnase/6mer
In [460]:
!wget https://raw.githubusercontent.com/jpiper/pyDNase/master/pyDNase/data/IMR90_6mer.pickle -O {ddir}/raw/dnase/6mer/IMR90_6mer.pickle
--2018-08-13 01:55:39--  https://raw.githubusercontent.com/jpiper/pyDNase/master/pyDNase/data/IMR90_6mer.pickle
Resolving raw.githubusercontent.com... 151.101.188.133
Connecting to raw.githubusercontent.com|151.101.188.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 311612 (304K) [text/plain]
Saving to: ‘/users/avsec/workspace/basepair/basepair/../data/raw/dnase/6mer/IMR90_6mer.pickle’

/users/avsec/worksp 100%[===================>] 304.31K  --.-KB/s    in 0.03s   

2018-08-13 01:55:39 (9.36 MB/s) - ‘/users/avsec/workspace/basepair/basepair/../data/raw/dnase/6mer/IMR90_6mer.pickle’ saved [311612/311612]

In [3]:
from basepair.utils import read_pkl
In [4]:
# Load the kmer file
kmer_model = read_pkl(f"{ddir}/raw/dnase/6mer/IMR90_6mer.pickle")
# dict -> pd.DataFrame
df = pd.DataFrame([{"motif": k, **v} for k,v in kmer_model.items()])
df['forward'] = df['forward'].astype(float)
df['reverse'] = df['reverse'].astype(float)
In [5]:
kmer_model['AAAAAA']
Out[5]:
{'forward': '0.001430626', 'reverse': '0.000936022'}
In [6]:
len(df) # same as 4**6
Out[6]:
4096
In [7]:
df.head()
Out[7]:
forward motif reverse
0 0.017661 GAACGT 0.008511
1 0.003539 CTTCTT 0.002358
2 0.003063 CACCCT 0.008271
3 0.009527 GAACGG 0.003067
4 0.018333 GAACGC 0.004380
In [4]:
from basepair.utils import reverse_complement
In [9]:
df['rc_motif'] = df.motif.map(reverse_complement)
In [10]:
df.head()
Out[10]:
forward motif reverse rc_motif
0 0.017661 GAACGT 0.008511 ACGTTC
1 0.003539 CTTCTT 0.002358 AAGAAG
2 0.003063 CACCCT 0.008271 AGGGTG
3 0.009527 GAACGG 0.003067 CCGTTC
4 0.018333 GAACGC 0.004380 GCGTTC
In [12]:
df.forward.plot.hist(bins=50);
plt.xlabel("Forward");
In [13]:
df.plot.scatter('forward', 'reverse', alpha=0.3);

Why are there forward and reverse strands?

In [14]:
dfm = pd.merge(df[['motif', 'forward']], df[['rc_motif', 'reverse']], left_on='motif', right_on='rc_motif')
In [15]:
dfm.plot.scatter('forward', 'reverse', alpha=0.3);
In [16]:
np.allclose(dfm.forward, dfm.reverse)
Out[16]:
True

Both forward and reverse scores are symmetrically stored

make an abstract bias prediction class

In [143]:
from basepair.exp.dnase.models import KmerBiasModel
In [87]:
kmb.counts.get("asd", 0)
Out[87]:
0
In [88]:
kmb = KmerBiasModel()
In [89]:
len("ACGTAGTCGAG")
Out[89]:
11
In [90]:
kmb.fit_on_batch(encodeDNA(["ACGTAGTCGAG"]*10), np.ones((10,11)))
In [93]:
kmb.normalize(False)
In [94]:
kmb.d
Out[94]:
{'ACGTAG': 0.16666666666666666,
 'CGTAGT': 0.16666666666666666,
 'GTAGTC': 0.16666666666666666,
 'TAGTCG': 0.16666666666666666,
 'AGTCGA': 0.16666666666666666,
 'GTCGAG': 0.16666666666666666}
In [91]:
kmb.normalize()
In [92]:
kmb.d
Out[92]:
{'AAAAAA': 0.0,
 'AAAAAC': 0.0,
 'AAAAAG': 0.0,
 'AAAAAT': 0.0,
 'AAAACA': 0.0,
 'AAAACC': 0.0,
 'AAAACG': 0.0,
 'AAAACT': 0.0,
 'AAAAGA': 0.0,
 'AAAAGC': 0.0,
 'AAAAGG': 0.0,
 'AAAAGT': 0.0,
 'AAAATA': 0.0,
 'AAAATC': 0.0,
 'AAAATG': 0.0,
 'AAAATT': 0.0,
 'AAACAA': 0.0,
 'AAACAC': 0.0,
 'AAACAG': 0.0,
 'AAACAT': 0.0,
 'AAACCA': 0.0,
 'AAACCC': 0.0,
 'AAACCG': 0.0,
 'AAACCT': 0.0,
 'AAACGA': 0.0,
 'AAACGC': 0.0,
 'AAACGG': 0.0,
 'AAACGT': 0.0,
 'AAACTA': 0.0,
 'AAACTC': 0.0,
 'AAACTG': 0.0,
 'AAACTT': 0.0,
 'AAAGAA': 0.0,
 'AAAGAC': 0.0,
 'AAAGAG': 0.0,
 'AAAGAT': 0.0,
 'AAAGCA': 0.0,
 'AAAGCC': 0.0,
 'AAAGCG': 0.0,
 'AAAGCT': 0.0,
 'AAAGGA': 0.0,
 'AAAGGC': 0.0,
 'AAAGGG': 0.0,
 'AAAGGT': 0.0,
 'AAAGTA': 0.0,
 'AAAGTC': 0.0,
 'AAAGTG': 0.0,
 'AAAGTT': 0.0,
 'AAATAA': 0.0,
 'AAATAC': 0.0,
 'AAATAG': 0.0,
 'AAATAT': 0.0,
 'AAATCA': 0.0,
 'AAATCC': 0.0,
 'AAATCG': 0.0,
 'AAATCT': 0.0,
 'AAATGA': 0.0,
 'AAATGC': 0.0,
 'AAATGG': 0.0,
 'AAATGT': 0.0,
 'AAATTA': 0.0,
 'AAATTC': 0.0,
 'AAATTG': 0.0,
 'AAATTT': 0.0,
 'AACAAA': 0.0,
 'AACAAC': 0.0,
 'AACAAG': 0.0,
 'AACAAT': 0.0,
 'AACACA': 0.0,
 'AACACC': 0.0,
 'AACACG': 0.0,
 'AACACT': 0.0,
 'AACAGA': 0.0,
 'AACAGC': 0.0,
 'AACAGG': 0.0,
 'AACAGT': 0.0,
 'AACATA': 0.0,
 'AACATC': 0.0,
 'AACATG': 0.0,
 'AACATT': 0.0,
 'AACCAA': 0.0,
 'AACCAC': 0.0,
 'AACCAG': 0.0,
 'AACCAT': 0.0,
 'AACCCA': 0.0,
 'AACCCC': 0.0,
 'AACCCG': 0.0,
 'AACCCT': 0.0,
 'AACCGA': 0.0,
 'AACCGC': 0.0,
 'AACCGG': 0.0,
 'AACCGT': 0.0,
 'AACCTA': 0.0,
 'AACCTC': 0.0,
 'AACCTG': 0.0,
 'AACCTT': 0.0,
 'AACGAA': 0.0,
 'AACGAC': 0.0,
 'AACGAG': 0.0,
 'AACGAT': 0.0,
 'AACGCA': 0.0,
 'AACGCC': 0.0,
 'AACGCG': 0.0,
 'AACGCT': 0.0,
 'AACGGA': 0.0,
 'AACGGC': 0.0,
 'AACGGG': 0.0,
 'AACGGT': 0.0,
 'AACGTA': 0.0,
 'AACGTC': 0.0,
 'AACGTG': 0.0,
 'AACGTT': 0.0,
 'AACTAA': 0.0,
 'AACTAC': 0.0,
 'AACTAG': 0.0,
 'AACTAT': 0.0,
 'AACTCA': 0.0,
 'AACTCC': 0.0,
 'AACTCG': 0.0,
 'AACTCT': 0.0,
 'AACTGA': 0.0,
 'AACTGC': 0.0,
 'AACTGG': 0.0,
 'AACTGT': 0.0,
 'AACTTA': 0.0,
 'AACTTC': 0.0,
 'AACTTG': 0.0,
 'AACTTT': 0.0,
 'AAGAAA': 0.0,
 'AAGAAC': 0.0,
 'AAGAAG': 0.0,
 'AAGAAT': 0.0,
 'AAGACA': 0.0,
 'AAGACC': 0.0,
 'AAGACG': 0.0,
 'AAGACT': 0.0,
 'AAGAGA': 0.0,
 'AAGAGC': 0.0,
 'AAGAGG': 0.0,
 'AAGAGT': 0.0,
 'AAGATA': 0.0,
 'AAGATC': 0.0,
 'AAGATG': 0.0,
 'AAGATT': 0.0,
 'AAGCAA': 0.0,
 'AAGCAC': 0.0,
 'AAGCAG': 0.0,
 'AAGCAT': 0.0,
 'AAGCCA': 0.0,
 'AAGCCC': 0.0,
 'AAGCCG': 0.0,
 'AAGCCT': 0.0,
 'AAGCGA': 0.0,
 'AAGCGC': 0.0,
 'AAGCGG': 0.0,
 'AAGCGT': 0.0,
 'AAGCTA': 0.0,
 'AAGCTC': 0.0,
 'AAGCTG': 0.0,
 'AAGCTT': 0.0,
 'AAGGAA': 0.0,
 'AAGGAC': 0.0,
 'AAGGAG': 0.0,
 'AAGGAT': 0.0,
 'AAGGCA': 0.0,
 'AAGGCC': 0.0,
 'AAGGCG': 0.0,
 'AAGGCT': 0.0,
 'AAGGGA': 0.0,
 'AAGGGC': 0.0,
 'AAGGGG': 0.0,
 'AAGGGT': 0.0,
 'AAGGTA': 0.0,
 'AAGGTC': 0.0,
 'AAGGTG': 0.0,
 'AAGGTT': 0.0,
 'AAGTAA': 0.0,
 'AAGTAC': 0.0,
 'AAGTAG': 0.0,
 'AAGTAT': 0.0,
 'AAGTCA': 0.0,
 'AAGTCC': 0.0,
 'AAGTCG': 0.0,
 'AAGTCT': 0.0,
 'AAGTGA': 0.0,
 'AAGTGC': 0.0,
 'AAGTGG': 0.0,
 'AAGTGT': 0.0,
 'AAGTTA': 0.0,
 'AAGTTC': 0.0,
 'AAGTTG': 0.0,
 'AAGTTT': 0.0,
 'AATAAA': 0.0,
 'AATAAC': 0.0,
 'AATAAG': 0.0,
 'AATAAT': 0.0,
 'AATACA': 0.0,
 'AATACC': 0.0,
 'AATACG': 0.0,
 'AATACT': 0.0,
 'AATAGA': 0.0,
 'AATAGC': 0.0,
 'AATAGG': 0.0,
 'AATAGT': 0.0,
 'AATATA': 0.0,
 'AATATC': 0.0,
 'AATATG': 0.0,
 'AATATT': 0.0,
 'AATCAA': 0.0,
 'AATCAC': 0.0,
 'AATCAG': 0.0,
 'AATCAT': 0.0,
 'AATCCA': 0.0,
 'AATCCC': 0.0,
 'AATCCG': 0.0,
 'AATCCT': 0.0,
 'AATCGA': 0.0,
 'AATCGC': 0.0,
 'AATCGG': 0.0,
 'AATCGT': 0.0,
 'AATCTA': 0.0,
 'AATCTC': 0.0,
 'AATCTG': 0.0,
 'AATCTT': 0.0,
 'AATGAA': 0.0,
 'AATGAC': 0.0,
 'AATGAG': 0.0,
 'AATGAT': 0.0,
 'AATGCA': 0.0,
 'AATGCC': 0.0,
 'AATGCG': 0.0,
 'AATGCT': 0.0,
 'AATGGA': 0.0,
 'AATGGC': 0.0,
 'AATGGG': 0.0,
 'AATGGT': 0.0,
 'AATGTA': 0.0,
 'AATGTC': 0.0,
 'AATGTG': 0.0,
 'AATGTT': 0.0,
 'AATTAA': 0.0,
 'AATTAC': 0.0,
 'AATTAG': 0.0,
 'AATTAT': 0.0,
 'AATTCA': 0.0,
 'AATTCC': 0.0,
 'AATTCG': 0.0,
 'AATTCT': 0.0,
 'AATTGA': 0.0,
 'AATTGC': 0.0,
 'AATTGG': 0.0,
 'AATTGT': 0.0,
 'AATTTA': 0.0,
 'AATTTC': 0.0,
 'AATTTG': 0.0,
 'AATTTT': 0.0,
 'ACAAAA': 0.0,
 'ACAAAC': 0.0,
 'ACAAAG': 0.0,
 'ACAAAT': 0.0,
 'ACAACA': 0.0,
 'ACAACC': 0.0,
 'ACAACG': 0.0,
 'ACAACT': 0.0,
 'ACAAGA': 0.0,
 'ACAAGC': 0.0,
 'ACAAGG': 0.0,
 'ACAAGT': 0.0,
 'ACAATA': 0.0,
 'ACAATC': 0.0,
 'ACAATG': 0.0,
 'ACAATT': 0.0,
 'ACACAA': 0.0,
 'ACACAC': 0.0,
 'ACACAG': 0.0,
 'ACACAT': 0.0,
 'ACACCA': 0.0,
 'ACACCC': 0.0,
 'ACACCG': 0.0,
 'ACACCT': 0.0,
 'ACACGA': 0.0,
 'ACACGC': 0.0,
 'ACACGG': 0.0,
 'ACACGT': 0.0,
 'ACACTA': 0.0,
 'ACACTC': 0.0,
 'ACACTG': 0.0,
 'ACACTT': 0.0,
 'ACAGAA': 0.0,
 'ACAGAC': 0.0,
 'ACAGAG': 0.0,
 'ACAGAT': 0.0,
 'ACAGCA': 0.0,
 'ACAGCC': 0.0,
 'ACAGCG': 0.0,
 'ACAGCT': 0.0,
 'ACAGGA': 0.0,
 'ACAGGC': 0.0,
 'ACAGGG': 0.0,
 'ACAGGT': 0.0,
 'ACAGTA': 0.0,
 'ACAGTC': 0.0,
 'ACAGTG': 0.0,
 'ACAGTT': 0.0,
 'ACATAA': 0.0,
 'ACATAC': 0.0,
 'ACATAG': 0.0,
 'ACATAT': 0.0,
 'ACATCA': 0.0,
 'ACATCC': 0.0,
 'ACATCG': 0.0,
 'ACATCT': 0.0,
 'ACATGA': 0.0,
 'ACATGC': 0.0,
 'ACATGG': 0.0,
 'ACATGT': 0.0,
 'ACATTA': 0.0,
 'ACATTC': 0.0,
 'ACATTG': 0.0,
 'ACATTT': 0.0,
 'ACCAAA': 0.0,
 'ACCAAC': 0.0,
 'ACCAAG': 0.0,
 'ACCAAT': 0.0,
 'ACCACA': 0.0,
 'ACCACC': 0.0,
 'ACCACG': 0.0,
 'ACCACT': 0.0,
 'ACCAGA': 0.0,
 'ACCAGC': 0.0,
 'ACCAGG': 0.0,
 'ACCAGT': 0.0,
 'ACCATA': 0.0,
 'ACCATC': 0.0,
 'ACCATG': 0.0,
 'ACCATT': 0.0,
 'ACCCAA': 0.0,
 'ACCCAC': 0.0,
 'ACCCAG': 0.0,
 'ACCCAT': 0.0,
 'ACCCCA': 0.0,
 'ACCCCC': 0.0,
 'ACCCCG': 0.0,
 'ACCCCT': 0.0,
 'ACCCGA': 0.0,
 'ACCCGC': 0.0,
 'ACCCGG': 0.0,
 'ACCCGT': 0.0,
 'ACCCTA': 0.0,
 'ACCCTC': 0.0,
 'ACCCTG': 0.0,
 'ACCCTT': 0.0,
 'ACCGAA': 0.0,
 'ACCGAC': 0.0,
 'ACCGAG': 0.0,
 'ACCGAT': 0.0,
 'ACCGCA': 0.0,
 'ACCGCC': 0.0,
 'ACCGCG': 0.0,
 'ACCGCT': 0.0,
 'ACCGGA': 0.0,
 'ACCGGC': 0.0,
 'ACCGGG': 0.0,
 'ACCGGT': 0.0,
 'ACCGTA': 0.0,
 'ACCGTC': 0.0,
 'ACCGTG': 0.0,
 'ACCGTT': 0.0,
 'ACCTAA': 0.0,
 'ACCTAC': 0.0,
 'ACCTAG': 0.0,
 'ACCTAT': 0.0,
 'ACCTCA': 0.0,
 'ACCTCC': 0.0,
 'ACCTCG': 0.0,
 'ACCTCT': 0.0,
 'ACCTGA': 0.0,
 'ACCTGC': 0.0,
 'ACCTGG': 0.0,
 'ACCTGT': 0.0,
 'ACCTTA': 0.0,
 'ACCTTC': 0.0,
 'ACCTTG': 0.0,
 'ACCTTT': 0.0,
 'ACGAAA': 0.0,
 'ACGAAC': 0.0,
 'ACGAAG': 0.0,
 'ACGAAT': 0.0,
 'ACGACA': 0.0,
 'ACGACC': 0.0,
 'ACGACG': 0.0,
 'ACGACT': 0.0,
 'ACGAGA': 0.0,
 'ACGAGC': 0.0,
 'ACGAGG': 0.0,
 'ACGAGT': 0.0,
 'ACGATA': 0.0,
 'ACGATC': 0.0,
 'ACGATG': 0.0,
 'ACGATT': 0.0,
 'ACGCAA': 0.0,
 'ACGCAC': 0.0,
 'ACGCAG': 0.0,
 'ACGCAT': 0.0,
 'ACGCCA': 0.0,
 'ACGCCC': 0.0,
 'ACGCCG': 0.0,
 'ACGCCT': 0.0,
 'ACGCGA': 0.0,
 'ACGCGC': 0.0,
 'ACGCGG': 0.0,
 'ACGCGT': 0.0,
 'ACGCTA': 0.0,
 'ACGCTC': 0.0,
 'ACGCTG': 0.0,
 'ACGCTT': 0.0,
 'ACGGAA': 0.0,
 'ACGGAC': 0.0,
 'ACGGAG': 0.0,
 'ACGGAT': 0.0,
 'ACGGCA': 0.0,
 'ACGGCC': 0.0,
 'ACGGCG': 0.0,
 'ACGGCT': 0.0,
 'ACGGGA': 0.0,
 'ACGGGC': 0.0,
 'ACGGGG': 0.0,
 'ACGGGT': 0.0,
 'ACGGTA': 0.0,
 'ACGGTC': 0.0,
 'ACGGTG': 0.0,
 'ACGGTT': 0.0,
 'ACGTAA': 0.0,
 'ACGTAC': 0.0,
 'ACGTAG': 0.16666666666666666,
 'ACGTAT': 0.0,
 'ACGTCA': 0.0,
 'ACGTCC': 0.0,
 'ACGTCG': 0.0,
 'ACGTCT': 0.0,
 'ACGTGA': 0.0,
 'ACGTGC': 0.0,
 'ACGTGG': 0.0,
 'ACGTGT': 0.0,
 'ACGTTA': 0.0,
 'ACGTTC': 0.0,
 'ACGTTG': 0.0,
 'ACGTTT': 0.0,
 'ACTAAA': 0.0,
 'ACTAAC': 0.0,
 'ACTAAG': 0.0,
 'ACTAAT': 0.0,
 'ACTACA': 0.0,
 'ACTACC': 0.0,
 'ACTACG': 0.0,
 'ACTACT': 0.0,
 'ACTAGA': 0.0,
 'ACTAGC': 0.0,
 'ACTAGG': 0.0,
 'ACTAGT': 0.0,
 'ACTATA': 0.0,
 'ACTATC': 0.0,
 'ACTATG': 0.0,
 'ACTATT': 0.0,
 'ACTCAA': 0.0,
 'ACTCAC': 0.0,
 'ACTCAG': 0.0,
 'ACTCAT': 0.0,
 'ACTCCA': 0.0,
 'ACTCCC': 0.0,
 'ACTCCG': 0.0,
 'ACTCCT': 0.0,
 'ACTCGA': 0.0,
 'ACTCGC': 0.0,
 'ACTCGG': 0.0,
 'ACTCGT': 0.0,
 'ACTCTA': 0.0,
 'ACTCTC': 0.0,
 'ACTCTG': 0.0,
 'ACTCTT': 0.0,
 'ACTGAA': 0.0,
 'ACTGAC': 0.0,
 'ACTGAG': 0.0,
 'ACTGAT': 0.0,
 'ACTGCA': 0.0,
 'ACTGCC': 0.0,
 'ACTGCG': 0.0,
 'ACTGCT': 0.0,
 'ACTGGA': 0.0,
 'ACTGGC': 0.0,
 'ACTGGG': 0.0,
 'ACTGGT': 0.0,
 'ACTGTA': 0.0,
 'ACTGTC': 0.0,
 'ACTGTG': 0.0,
 'ACTGTT': 0.0,
 'ACTTAA': 0.0,
 'ACTTAC': 0.0,
 'ACTTAG': 0.0,
 'ACTTAT': 0.0,
 'ACTTCA': 0.0,
 'ACTTCC': 0.0,
 'ACTTCG': 0.0,
 'ACTTCT': 0.0,
 'ACTTGA': 0.0,
 'ACTTGC': 0.0,
 'ACTTGG': 0.0,
 'ACTTGT': 0.0,
 'ACTTTA': 0.0,
 'ACTTTC': 0.0,
 'ACTTTG': 0.0,
 'ACTTTT': 0.0,
 'AGAAAA': 0.0,
 'AGAAAC': 0.0,
 'AGAAAG': 0.0,
 'AGAAAT': 0.0,
 'AGAACA': 0.0,
 'AGAACC': 0.0,
 'AGAACG': 0.0,
 'AGAACT': 0.0,
 'AGAAGA': 0.0,
 'AGAAGC': 0.0,
 'AGAAGG': 0.0,
 'AGAAGT': 0.0,
 'AGAATA': 0.0,
 'AGAATC': 0.0,
 'AGAATG': 0.0,
 'AGAATT': 0.0,
 'AGACAA': 0.0,
 'AGACAC': 0.0,
 'AGACAG': 0.0,
 'AGACAT': 0.0,
 'AGACCA': 0.0,
 'AGACCC': 0.0,
 'AGACCG': 0.0,
 'AGACCT': 0.0,
 'AGACGA': 0.0,
 'AGACGC': 0.0,
 'AGACGG': 0.0,
 'AGACGT': 0.0,
 'AGACTA': 0.0,
 'AGACTC': 0.0,
 'AGACTG': 0.0,
 'AGACTT': 0.0,
 'AGAGAA': 0.0,
 'AGAGAC': 0.0,
 'AGAGAG': 0.0,
 'AGAGAT': 0.0,
 'AGAGCA': 0.0,
 'AGAGCC': 0.0,
 'AGAGCG': 0.0,
 'AGAGCT': 0.0,
 'AGAGGA': 0.0,
 'AGAGGC': 0.0,
 'AGAGGG': 0.0,
 'AGAGGT': 0.0,
 'AGAGTA': 0.0,
 'AGAGTC': 0.0,
 'AGAGTG': 0.0,
 'AGAGTT': 0.0,
 'AGATAA': 0.0,
 'AGATAC': 0.0,
 'AGATAG': 0.0,
 'AGATAT': 0.0,
 'AGATCA': 0.0,
 'AGATCC': 0.0,
 'AGATCG': 0.0,
 'AGATCT': 0.0,
 'AGATGA': 0.0,
 'AGATGC': 0.0,
 'AGATGG': 0.0,
 'AGATGT': 0.0,
 'AGATTA': 0.0,
 'AGATTC': 0.0,
 'AGATTG': 0.0,
 'AGATTT': 0.0,
 'AGCAAA': 0.0,
 'AGCAAC': 0.0,
 'AGCAAG': 0.0,
 'AGCAAT': 0.0,
 'AGCACA': 0.0,
 'AGCACC': 0.0,
 'AGCACG': 0.0,
 'AGCACT': 0.0,
 'AGCAGA': 0.0,
 'AGCAGC': 0.0,
 'AGCAGG': 0.0,
 'AGCAGT': 0.0,
 'AGCATA': 0.0,
 'AGCATC': 0.0,
 'AGCATG': 0.0,
 'AGCATT': 0.0,
 'AGCCAA': 0.0,
 'AGCCAC': 0.0,
 'AGCCAG': 0.0,
 'AGCCAT': 0.0,
 'AGCCCA': 0.0,
 'AGCCCC': 0.0,
 'AGCCCG': 0.0,
 'AGCCCT': 0.0,
 'AGCCGA': 0.0,
 'AGCCGC': 0.0,
 'AGCCGG': 0.0,
 'AGCCGT': 0.0,
 'AGCCTA': 0.0,
 'AGCCTC': 0.0,
 'AGCCTG': 0.0,
 'AGCCTT': 0.0,
 'AGCGAA': 0.0,
 'AGCGAC': 0.0,
 'AGCGAG': 0.0,
 'AGCGAT': 0.0,
 'AGCGCA': 0.0,
 'AGCGCC': 0.0,
 'AGCGCG': 0.0,
 'AGCGCT': 0.0,
 'AGCGGA': 0.0,
 'AGCGGC': 0.0,
 'AGCGGG': 0.0,
 'AGCGGT': 0.0,
 'AGCGTA': 0.0,
 'AGCGTC': 0.0,
 'AGCGTG': 0.0,
 'AGCGTT': 0.0,
 'AGCTAA': 0.0,
 'AGCTAC': 0.0,
 'AGCTAG': 0.0,
 'AGCTAT': 0.0,
 'AGCTCA': 0.0,
 'AGCTCC': 0.0,
 'AGCTCG': 0.0,
 'AGCTCT': 0.0,
 'AGCTGA': 0.0,
 'AGCTGC': 0.0,
 'AGCTGG': 0.0,
 'AGCTGT': 0.0,
 'AGCTTA': 0.0,
 'AGCTTC': 0.0,
 'AGCTTG': 0.0,
 'AGCTTT': 0.0,
 'AGGAAA': 0.0,
 'AGGAAC': 0.0,
 'AGGAAG': 0.0,
 'AGGAAT': 0.0,
 'AGGACA': 0.0,
 'AGGACC': 0.0,
 'AGGACG': 0.0,
 'AGGACT': 0.0,
 'AGGAGA': 0.0,
 'AGGAGC': 0.0,
 'AGGAGG': 0.0,
 'AGGAGT': 0.0,
 'AGGATA': 0.0,
 'AGGATC': 0.0,
 'AGGATG': 0.0,
 'AGGATT': 0.0,
 'AGGCAA': 0.0,
 'AGGCAC': 0.0,
 'AGGCAG': 0.0,
 'AGGCAT': 0.0,
 'AGGCCA': 0.0,
 'AGGCCC': 0.0,
 'AGGCCG': 0.0,
 'AGGCCT': 0.0,
 'AGGCGA': 0.0,
 'AGGCGC': 0.0,
 'AGGCGG': 0.0,
 'AGGCGT': 0.0,
 'AGGCTA': 0.0,
 'AGGCTC': 0.0,
 'AGGCTG': 0.0,
 'AGGCTT': 0.0,
 'AGGGAA': 0.0,
 'AGGGAC': 0.0,
 'AGGGAG': 0.0,
 'AGGGAT': 0.0,
 'AGGGCA': 0.0,
 'AGGGCC': 0.0,
 'AGGGCG': 0.0,
 'AGGGCT': 0.0,
 'AGGGGA': 0.0,
 'AGGGGC': 0.0,
 'AGGGGG': 0.0,
 'AGGGGT': 0.0,
 'AGGGTA': 0.0,
 'AGGGTC': 0.0,
 'AGGGTG': 0.0,
 'AGGGTT': 0.0,
 'AGGTAA': 0.0,
 'AGGTAC': 0.0,
 'AGGTAG': 0.0,
 'AGGTAT': 0.0,
 'AGGTCA': 0.0,
 'AGGTCC': 0.0,
 'AGGTCG': 0.0,
 'AGGTCT': 0.0,
 'AGGTGA': 0.0,
 'AGGTGC': 0.0,
 'AGGTGG': 0.0,
 'AGGTGT': 0.0,
 'AGGTTA': 0.0,
 'AGGTTC': 0.0,
 'AGGTTG': 0.0,
 'AGGTTT': 0.0,
 'AGTAAA': 0.0,
 'AGTAAC': 0.0,
 'AGTAAG': 0.0,
 'AGTAAT': 0.0,
 'AGTACA': 0.0,
 'AGTACC': 0.0,
 'AGTACG': 0.0,
 'AGTACT': 0.0,
 'AGTAGA': 0.0,
 'AGTAGC': 0.0,
 'AGTAGG': 0.0,
 'AGTAGT': 0.0,
 'AGTATA': 0.0,
 'AGTATC': 0.0,
 'AGTATG': 0.0,
 'AGTATT': 0.0,
 'AGTCAA': 0.0,
 'AGTCAC': 0.0,
 'AGTCAG': 0.0,
 'AGTCAT': 0.0,
 'AGTCCA': 0.0,
 'AGTCCC': 0.0,
 'AGTCCG': 0.0,
 'AGTCCT': 0.0,
 'AGTCGA': 0.16666666666666666,
 'AGTCGC': 0.0,
 'AGTCGG': 0.0,
 'AGTCGT': 0.0,
 'AGTCTA': 0.0,
 'AGTCTC': 0.0,
 'AGTCTG': 0.0,
 'AGTCTT': 0.0,
 'AGTGAA': 0.0,
 'AGTGAC': 0.0,
 'AGTGAG': 0.0,
 'AGTGAT': 0.0,
 'AGTGCA': 0.0,
 'AGTGCC': 0.0,
 'AGTGCG': 0.0,
 'AGTGCT': 0.0,
 'AGTGGA': 0.0,
 'AGTGGC': 0.0,
 'AGTGGG': 0.0,
 'AGTGGT': 0.0,
 'AGTGTA': 0.0,
 'AGTGTC': 0.0,
 'AGTGTG': 0.0,
 'AGTGTT': 0.0,
 'AGTTAA': 0.0,
 'AGTTAC': 0.0,
 'AGTTAG': 0.0,
 'AGTTAT': 0.0,
 'AGTTCA': 0.0,
 'AGTTCC': 0.0,
 'AGTTCG': 0.0,
 'AGTTCT': 0.0,
 'AGTTGA': 0.0,
 'AGTTGC': 0.0,
 'AGTTGG': 0.0,
 'AGTTGT': 0.0,
 'AGTTTA': 0.0,
 'AGTTTC': 0.0,
 'AGTTTG': 0.0,
 'AGTTTT': 0.0,
 'ATAAAA': 0.0,
 'ATAAAC': 0.0,
 'ATAAAG': 0.0,
 'ATAAAT': 0.0,
 'ATAACA': 0.0,
 'ATAACC': 0.0,
 'ATAACG': 0.0,
 'ATAACT': 0.0,
 'ATAAGA': 0.0,
 'ATAAGC': 0.0,
 'ATAAGG': 0.0,
 'ATAAGT': 0.0,
 'ATAATA': 0.0,
 'ATAATC': 0.0,
 'ATAATG': 0.0,
 'ATAATT': 0.0,
 'ATACAA': 0.0,
 'ATACAC': 0.0,
 'ATACAG': 0.0,
 'ATACAT': 0.0,
 'ATACCA': 0.0,
 'ATACCC': 0.0,
 'ATACCG': 0.0,
 'ATACCT': 0.0,
 'ATACGA': 0.0,
 'ATACGC': 0.0,
 'ATACGG': 0.0,
 'ATACGT': 0.0,
 'ATACTA': 0.0,
 'ATACTC': 0.0,
 'ATACTG': 0.0,
 'ATACTT': 0.0,
 'ATAGAA': 0.0,
 'ATAGAC': 0.0,
 'ATAGAG': 0.0,
 'ATAGAT': 0.0,
 'ATAGCA': 0.0,
 'ATAGCC': 0.0,
 'ATAGCG': 0.0,
 'ATAGCT': 0.0,
 'ATAGGA': 0.0,
 'ATAGGC': 0.0,
 'ATAGGG': 0.0,
 'ATAGGT': 0.0,
 'ATAGTA': 0.0,
 'ATAGTC': 0.0,
 'ATAGTG': 0.0,
 'ATAGTT': 0.0,
 'ATATAA': 0.0,
 'ATATAC': 0.0,
 'ATATAG': 0.0,
 'ATATAT': 0.0,
 'ATATCA': 0.0,
 'ATATCC': 0.0,
 'ATATCG': 0.0,
 'ATATCT': 0.0,
 'ATATGA': 0.0,
 'ATATGC': 0.0,
 'ATATGG': 0.0,
 'ATATGT': 0.0,
 'ATATTA': 0.0,
 'ATATTC': 0.0,
 'ATATTG': 0.0,
 'ATATTT': 0.0,
 'ATCAAA': 0.0,
 'ATCAAC': 0.0,
 'ATCAAG': 0.0,
 'ATCAAT': 0.0,
 'ATCACA': 0.0,
 'ATCACC': 0.0,
 'ATCACG': 0.0,
 'ATCACT': 0.0,
 'ATCAGA': 0.0,
 'ATCAGC': 0.0,
 'ATCAGG': 0.0,
 'ATCAGT': 0.0,
 'ATCATA': 0.0,
 'ATCATC': 0.0,
 'ATCATG': 0.0,
 'ATCATT': 0.0,
 'ATCCAA': 0.0,
 'ATCCAC': 0.0,
 'ATCCAG': 0.0,
 'ATCCAT': 0.0,
 'ATCCCA': 0.0,
 'ATCCCC': 0.0,
 'ATCCCG': 0.0,
 'ATCCCT': 0.0,
 'ATCCGA': 0.0,
 'ATCCGC': 0.0,
 'ATCCGG': 0.0,
 'ATCCGT': 0.0,
 'ATCCTA': 0.0,
 'ATCCTC': 0.0,
 'ATCCTG': 0.0,
 'ATCCTT': 0.0,
 'ATCGAA': 0.0,
 'ATCGAC': 0.0,
 'ATCGAG': 0.0,
 'ATCGAT': 0.0,
 'ATCGCA': 0.0,
 'ATCGCC': 0.0,
 'ATCGCG': 0.0,
 'ATCGCT': 0.0,
 'ATCGGA': 0.0,
 'ATCGGC': 0.0,
 'ATCGGG': 0.0,
 'ATCGGT': 0.0,
 'ATCGTA': 0.0,
 'ATCGTC': 0.0,
 'ATCGTG': 0.0,
 'ATCGTT': 0.0,
 'ATCTAA': 0.0,
 'ATCTAC': 0.0,
 'ATCTAG': 0.0,
 'ATCTAT': 0.0,
 'ATCTCA': 0.0,
 'ATCTCC': 0.0,
 'ATCTCG': 0.0,
 'ATCTCT': 0.0,
 'ATCTGA': 0.0,
 'ATCTGC': 0.0,
 'ATCTGG': 0.0,
 'ATCTGT': 0.0,
 'ATCTTA': 0.0,
 'ATCTTC': 0.0,
 'ATCTTG': 0.0,
 'ATCTTT': 0.0,
 'ATGAAA': 0.0,
 'ATGAAC': 0.0,
 'ATGAAG': 0.0,
 'ATGAAT': 0.0,
 'ATGACA': 0.0,
 'ATGACC': 0.0,
 'ATGACG': 0.0,
 'ATGACT': 0.0,
 'ATGAGA': 0.0,
 'ATGAGC': 0.0,
 'ATGAGG': 0.0,
 'ATGAGT': 0.0,
 'ATGATA': 0.0,
 'ATGATC': 0.0,
 'ATGATG': 0.0,
 'ATGATT': 0.0,
 'ATGCAA': 0.0,
 'ATGCAC': 0.0,
 'ATGCAG': 0.0,
 'ATGCAT': 0.0,
 'ATGCCA': 0.0,
 'ATGCCC': 0.0,
 'ATGCCG': 0.0,
 'ATGCCT': 0.0,
 'ATGCGA': 0.0,
 'ATGCGC': 0.0,
 'ATGCGG': 0.0,
 'ATGCGT': 0.0,
 'ATGCTA': 0.0,
 'ATGCTC': 0.0,
 'ATGCTG': 0.0,
 'ATGCTT': 0.0,
 'ATGGAA': 0.0,
 'ATGGAC': 0.0,
 'ATGGAG': 0.0,
 'ATGGAT': 0.0,
 'ATGGCA': 0.0,
 'ATGGCC': 0.0,
 'ATGGCG': 0.0,
 'ATGGCT': 0.0,
 'ATGGGA': 0.0,
 'ATGGGC': 0.0,
 'ATGGGG': 0.0,
 'ATGGGT': 0.0,
 'ATGGTA': 0.0,
 'ATGGTC': 0.0,
 'ATGGTG': 0.0,
 'ATGGTT': 0.0,
 'ATGTAA': 0.0,
 'ATGTAC': 0.0,
 'ATGTAG': 0.0,
 'ATGTAT': 0.0,
 'ATGTCA': 0.0,
 'ATGTCC': 0.0,
 'ATGTCG': 0.0,
 'ATGTCT': 0.0,
 'ATGTGA': 0.0,
 'ATGTGC': 0.0,
 'ATGTGG': 0.0,
 'ATGTGT': 0.0,
 'ATGTTA': 0.0,
 'ATGTTC': 0.0,
 'ATGTTG': 0.0,
 'ATGTTT': 0.0,
 'ATTAAA': 0.0,
 'ATTAAC': 0.0,
 'ATTAAG': 0.0,
 'ATTAAT': 0.0,
 'ATTACA': 0.0,
 'ATTACC': 0.0,
 'ATTACG': 0.0,
 'ATTACT': 0.0,
 'ATTAGA': 0.0,
 'ATTAGC': 0.0,
 'ATTAGG': 0.0,
 'ATTAGT': 0.0,
 'ATTATA': 0.0,
 'ATTATC': 0.0,
 'ATTATG': 0.0,
 'ATTATT': 0.0,
 'ATTCAA': 0.0,
 'ATTCAC': 0.0,
 'ATTCAG': 0.0,
 'ATTCAT': 0.0,
 'ATTCCA': 0.0,
 'ATTCCC': 0.0,
 'ATTCCG': 0.0,
 'ATTCCT': 0.0,
 'ATTCGA': 0.0,
 'ATTCGC': 0.0,
 'ATTCGG': 0.0,
 'ATTCGT': 0.0,
 'ATTCTA': 0.0,
 'ATTCTC': 0.0,
 'ATTCTG': 0.0,
 'ATTCTT': 0.0,
 'ATTGAA': 0.0,
 'ATTGAC': 0.0,
 'ATTGAG': 0.0,
 'ATTGAT': 0.0,
 'ATTGCA': 0.0,
 'ATTGCC': 0.0,
 'ATTGCG': 0.0,
 'ATTGCT': 0.0,
 ...}
In [57]:
{k:v for k,v in kmb.counts.items()}
Out[57]:
{'ACGTAG': 10.0,
 'CGTAGT': 10.0,
 'GTAGTC': 10.0,
 'TAGTCG': 10.0,
 'AGTCGA': 10.0,
 'GTCGAG': 10.0}
In [55]:
sum(kmb.counts.values())
Out[55]:
60.0
In [51]:
d = {k: float(v['forward']) for k,v in kmer_model.items()}
In [21]:
km = KmerBiasModel(d)
In [22]:
km.predict_seq("ACGTAGTCGAG", pad=True)
Out[22]:
array([0.        , 0.        , 0.07503211, 0.00632404, 0.01464586,
       0.00434952, 0.05245352, 0.01354322, 0.        , 0.        ,
       0.        ])
In [23]:
km.predict_seq("ACGTAGTCGAG", pad=False)
Out[23]:
array([0.07503211, 0.00632404, 0.01464586, 0.00434952, 0.05245352,
       0.01354322])
In [24]:
km.predict(encodeDNA(["ACGTAGTCGAG"])[0], pad=False)
Out[24]:
array([0.07503211, 0.00632404, 0.01464586, 0.00434952, 0.05245352,
       0.01354322])
In [25]:
km.predict_on_batch(encodeDNA(["ACGTAGTCGAG"]), pad=False)
Out[25]:
array([[0.07503211, 0.00632404, 0.01464586, 0.00434952, 0.05245352,
        0.01354322]])

evaluate the default model

  • total count prediction in a window
  • single base prediction
In [98]:
dpath = Path("/mnt/lab_data/users/avsec/dnase_bias")

SEQ_WIDTH = 1000

ds = DataSpec(
    task_specs={"DNase": TaskSpec(task="DNase",
                                 pos_counts=dpath / "IMR90_Naked_DNase.pos.bw",
                                 neg_counts=dpath / "IMR90_Naked_DNase.neg.bw",
                                 peaks=dpath / "genome.stride200.w200.nonblacklist.bed")},
    fasta_file=dpath / "GRCh38.genome.fa",
              )
In [99]:
valid_chr=['chr2', 'chr3', 'chr4']
test_chr=['chr1', 'chr8', 'chr9']
In [101]:
from basepair.datasets import StrandedProfile

class AppendCounts:
    def transform(self, x):
        counts = {k.replace("profile/", "counts/"): np.log(1 + x[k].sum(0))
                  for k in x}
        return {**x, **counts}
In [102]:
SEQ_WIDTH=1000
In [113]:
dl = StrandedProfile(ds, 
                     excl_chromosomes=valid_chr+test_chr, 
                     peak_width=SEQ_WIDTH, shuffle=True,
                     target_transformer=AppendCounts())
Skipped 90 intervals outside of the genome size
In [32]:
dl_valid = StrandedProfile(ds, 
                           incl_chromosomes=valid_chr, 
                           peak_width=SEQ_WIDTH, 
                           shuffle=True,
                           target_transformer=AppendCounts())
Skipped 15 intervals outside of the genome size
In [103]:
dl_test = StrandedProfile(ds, 
                          incl_chromosomes=test_chr, 
                          peak_width=SEQ_WIDTH, 
                          shuffle=True,
                          target_transformer=AppendCounts())
Skipped 15 intervals outside of the genome size
In [34]:
from kipoi.data_utils import numpy_collate_concat
In [36]:
def touch_file(file, verbose=True):
    if verbose:
        add = "v"
    else:
        add = ""
    !vmtouch -{add}tf {file}

def touch_all_files(ds, verbose=True):
    touch_file(ds.fasta_file, verbose)
    for ts in ds.task_specs.values():
        touch_file(ts.pos_counts, verbose)
        touch_file(ts.neg_counts, verbose)
        #touch_file(ts.peaks)
    
In [37]:
touch_all_files(ds)
/mnt/lab_data/users/avsec/dnase_bias/GRCh38.genome.fa
[OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO] 799490/799490

           Files: 1
     Directories: 0
   Touched Pages: 799490 (3G)
         Elapsed: 0.44305 seconds
/mnt/lab_data/users/avsec/dnase_bias/IMR90_Naked_DNase.pos.bw
[OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO] 18253/18253

           Files: 1
     Directories: 0
   Touched Pages: 18253 (71M)
         Elapsed: 0.012521 seconds
/mnt/lab_data/users/avsec/dnase_bias/IMR90_Naked_DNase.neg.bw
[OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO] 18240/18240

           Files: 1
     Directories: 0
   Touched Pages: 18240 (71M)
         Elapsed: 0.00939 seconds
In [3]:
from tqdm import tqdm 
In [110]:
from kipoi.writers import HDF5BatchWriter
In [111]:
!mkdir -p {ddir}/processed/dnase/6mer
In [42]:
#!rm {ddir}/processed/dnase/6mer/IMR90_6mer.preds.h5
In [107]:
dl_test[0]['targets']['profile/DNase'].shape
Out[107]:
(1000, 2)
In [124]:
%load_ext line_profiler
In [126]:
from numba import jit
In [133]:
new_kmer_model = KmerBiasModel(k=6)
In [136]:
%lprun -f new_kmer_model.fit_on_batch new_kmer_model.fit_on_batch(batch['inputs'], batch['targets']['profile/DNase'].sum(axis=-1))
Timer unit: 1e-06 s

Total time: 0.354284 s
File: <ipython-input-132-f21f62046ce3>
Function: fit_on_batch at line 34

Line #      Hits         Time  Per Hit   % Time  Line Contents
==============================================================
    34                                               def fit_on_batch(self, inputs, targets):
    35         1      40504.0  40504.0     11.4          seqs = one_hot2string(inputs, self.vocab)
    36        33         57.0      1.7      0.0          for seqi, seq in enumerate(seqs):
    37     31872      84337.0      2.6     23.8              for i in range(len(seq) - self.k + 1):
    38     31840      65055.0      2.0     18.4                  kmer = seq[i:(i+self.k)]
    39     31840     164331.0      5.2     46.4                  self.update(kmer, targets[seqi,i + self.k//2 - 1])

Write a comprehensive evaluation function

  • make a correlation plot of base-pair level counts
  • make a correlation plot of 1kb level counts
  • show some example profiles
In [33]:
from kipoi.readers import HDF5Reader
In [31]:
ls {ddir}/processed/dnase/6mer/
IMR90_6mer.preds.h5
new_model.train.pkl
new_model.train-pos3.normalized.pkl
new_model.train-pos3.pkl
new_model.train-pos3.w200.normalized.pkl
retrained.IMR90_6mer.preds.h5
retrained.IMR90_6mer.w200.normalized.preds.h5
In [52]:
r = HDF5Reader(f"{ddir}/processed/dnase/6mer/IMR90_6mer.preds.h5")
In [53]:
r = HDF5Reader(f"{ddir}/processed/dnase/6mer/retrained.IMR90_6mer.w200.normalized.preds.h5")

r.open()
In [54]:
r.ls()
Out[54]:
[('/metadata/interval_from_task',
  <HDF5 dataset "interval_from_task": shape (2662434,), type "|O">),
 ('/metadata/range/chr', <HDF5 dataset "chr": shape (2662434,), type "|O">),
 ('/metadata/range/end', <HDF5 dataset "end": shape (2662434,), type "<i8">),
 ('/metadata/range/id', <HDF5 dataset "id": shape (2662434,), type "<i8">),
 ('/metadata/range/start',
  <HDF5 dataset "start": shape (2662434,), type "<i8">),
 ('/metadata/range/strand',
  <HDF5 dataset "strand": shape (2662434,), type "|O">),
 ('/preds', <HDF5 dataset "preds": shape (2662434, 995), type "<f8">),
 ('/targets/counts/DNase',
  <HDF5 dataset "DNase": shape (2662434, 2), type "<f4">),
 ('/targets/profile/DNase',
  <HDF5 dataset "DNase": shape (2662434, 1000, 2), type "<f4">)]
In [70]:
preds = r.f['/preds'][:]

targets = r.f['/targets/profile/DNase'][:].sum(axis=-1)

y_true = targets[:,3:(1000-2)]
In [57]:
#y_true = targets[:,3:(200-1)]
In [72]:
assert preds.shape == y_true.shape
In [73]:
y_true.shape
Out[73]:
(2662434, 995)
In [68]:
preds.shape
Out[68]:
(2662434, 995)
In [74]:
y_true_l = np.ravel(y_true)
In [75]:
y_pred_l = np.ravel(preds)
In [76]:
y_true_l[:5]
Out[76]:
array([0., 0., 0., 0., 0.], dtype=float32)
In [61]:
y_true_l.max()
Out[61]:
61.0
In [62]:
import random
In [63]:
idx = np.unique(random.sample(range(0, len(y_pred_l)), 1000000))
In [47]:
plt.scatter(y_true_l[idx], y_pred_l[idx], alpha=0.1)
plt.xlabel("true values")
plt.ylabel("predictions")
Out[47]:
Text(0,0.5,'predictions')

Aggregated plot

In [77]:
from basepair.plots import regression_eval
In [78]:
yt_counts = np.log(1+y_true.sum(axis=-1))
yp_counts = np.log(1+preds.sum(axis=-1))
In [79]:
regression_eval(yt_counts, yp_counts, alpha=0.1)
In [50]:
regression_eval(yt_counts, yp_counts, alpha=0.1)
In [48]:
regression_eval(yt_counts, yp_counts, alpha=0.1)
In [ ]:
plt.scatter(yt_counts, yp_counts, alpha=0.1)
plt.xlabel("true values")
plt.ylabel("predictions")
In [34]:
from scipy.stats import pearsonr, spearmanr
In [35]:
yt = y_true_l
yp =y_pred_l
In [ ]:
rp = pearsonr(yt, yp)[0]
rs = spearmanr(yt, yp)[0]
In [ ]:
print(f"pearsonr: {rp}")
print(f"spearmanr: {rs}")

train your own bias-prediction 6-mer model

  • done in the script

evaluate

do they match? If now. Why?

They don't match for some reason. Check again how they compute the vector.

In [2]:
from basepair.utils import read_pkl
In [3]:
km_new = read_pkl(f"{ddir}/processed/dnase/6mer/new_model.train.pkl")
In [4]:
ls {ddir}/processed/dnase/6mer/
IMR90_6mer.preds.h5                  new_model.train-pos3.pkl
new_model.train.pkl                  retrained.IMR90_6mer.preds.h5
new_model.train-pos3.normalized.pkl
In [20]:
km_new_pos3 = read_pkl(f"{ddir}/processed/dnase/6mer/new_model.train-pos3.w200.normalized.pkl")
In [21]:
from basepair.exp.dnase.models import KmerBiasModel
In [22]:
kmer_model = read_pkl(f"{ddir}/raw/dnase/6mer/IMR90_6mer.pickle")
km_old = KmerBiasModel({k: float(v['forward']) for k,v in kmer_model.items()})
In [23]:
df_new = pd.DataFrame([{"kmer": k, "value": v} for k,v in km_new.d.items()])
In [24]:
df_new_pos3 = pd.DataFrame([{"kmer": k, "value": v} for k,v in km_new_pos3.d.items()])
In [25]:
df_old = pd.DataFrame([{"kmer": k, "value": v} for k,v in km_old.d.items()])
In [26]:
dfeval = pd.merge(df_new, df_old, on='kmer', suffixes=('_new', '_old'))
In [27]:
dfeval2 = pd.merge(df_new_pos3, df_old, on='kmer', suffixes=('_new', '_old'))
In [28]:
plt.scatter(dfeval.value_new*100, dfeval.value_old, alpha=0.1)
#plt.xlim([0, 0.3])
#plt.ylim([0, 0.3])
plt.xlabel("value_new")
plt.ylabel("value_old")
Out[28]:
Text(0,0.5,'value_old')
In [29]:
plt.scatter(np.log(1+dfeval2.value_new*100), np.log(1+dfeval.value_old), alpha=0.1)
plt.xlabel("value_new")
plt.ylabel("value_old")
Out[29]:
Text(0,0.5,'value_old')
In [30]:
plt.scatter(dfeval2.value_new*100, dfeval.value_old, alpha=0.1)
#plt.xlim([0, 0.3])
#plt.ylim([0, 0.3])
plt.xlabel("value_new")
plt.ylabel("value_old")
Out[30]:
Text(0,0.5,'value_old')
In [76]:
plt.scatter(dfeval.value_new*100, dfeval.kmer.map({k: float(v['forward']) for k,v in kmer_model.items()}), alpha=0.1)
#plt.xlim([0, 0.3])
#plt.ylim([0, 0.3])
plt.xlabel("value_new")
plt.ylabel("value_old")
Out[76]:
Text(0,0.5,'value_old')
In [75]:
plt.scatter(dfeval.value_new*100, dfeval.kmer.map({k: float(v['reverse']) for k,v in kmer_model.items()}), alpha=0.1)
#plt.xlim([0, 0.3])
#plt.ylim([0, 0.3])
plt.xlabel("value_new")
plt.ylabel("value_old")
Out[75]:
Text(0,0.5,'value_old')
In [69]:
dfeval.value_new.min()
Out[69]:
1.0826504576531734e-06

Evaluate your own model

Improve your own model