{ddir}/processed/chipnexus/exp/models/oct-sox-nanog-klf/models/n_dil_layers=9/model.h5{ddir}/processed/chipnexus/exp/models/oct-sox-nanog-klf/models/n_dil_layers=9/dataspec.yaml -> all files listed in dataspec.yaml# Imports
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
from basepair.imports import *
from basepair.plot.config import get_figsize, paper_config
from basepair.extractors import bw_extract
# Use matplotlib paper config
paper_config()
# Common paths
model_dir = Path(f"{ddir}/processed/chipnexus/exp/models/oct-sox-nanog-klf/models/n_dil_layers=9/")
modisco_dir = model_dir / f"modisco/all/profile/"
figures = f"{ddir}/figures/model-evaluation/chipnexus-bpnet"
# Parameters
model_file = model_dir / "model.h5"
dataspec_file = model_dir / "dataspec.yaml"
history_file = model_dir / "history.csv"
seq_width = 1000
num_workers = 10
import basepair
import pandas as pd
import numpy as np
from basepair.cli.schemas import DataSpec, TaskSpec
from pathlib import Path
from keras.models import load_model
from basepair.datasets import StrandedProfile
from basepair.preproc import AppendCounts
from basepair.losses import MultichannelMultinomialNLL
from basepair.config import valid_chr, test_chr
from basepair.plots import regression_eval, plot_loss
from basepair.cli.evaluate import eval_profile
from basepair import samplers
from basepair.math import softmax
# !mv {model_dir}/preds.test.pkl {model_dir}/preds.test.bak.pkl
ds = DataSpec.load(dataspec_file)
tasks = list(ds.task_specs)
# Cache predictions
create_tf_session(0)
bpnet = BPNetPredictor.from_mdir(model_dir)
if not os.path.exists(model_dir / "preds.test.pkl"):
model = bpnet.model
dl_test = StrandedProfile(ds,
incl_chromosomes=test_chr,
peak_width=seq_width,
shuffle=False,
target_transformer=AppendCounts())
test = dl_test.load_all(num_workers=num_workers)
y_true = test["targets"]
y_pred = model.predict(test['inputs'], verbose=1)
write_pkl((test, y_pred), model_dir / "preds.test.pkl")
# Load predictions
test, y_pred = read_pkl(model_dir / "preds.test.pkl")
y_true = test['targets']
def regression_eval(y_true, y_pred, alpha=0.5, markersize=2, task="", ax=None, same_lim=False, loglog=False):
if ax is None:
fig, ax = plt.subplots(1)
from scipy.stats import pearsonr, spearmanr
xmax = max([y_true.max(), y_pred.max()])
xmin = min([y_true.min(), y_pred.min()])
if loglog:
pearson, pearson_pval = pearsonr(np.log10(y_true), np.log10(y_pred))
spearman, spearman_pval = spearmanr(np.log10(y_true), np.log(y_pred))
else:
pearson, pearson_pval = pearsonr(y_true, y_pred)
spearman, spearman_pval = spearmanr(y_true, y_pred)
if loglog:
plt_fn = ax.loglog
else:
plt_fn = ax.plot
plt_fn(y_pred, y_true, ".",
markersize=markersize,
rasterized=True,
alpha=alpha)
ax.set_xlabel("Predicted")
ax.set_ylabel("Observed")
if same_lim:
ax.set_xlim((xmin, xmax))
ax.set_ylim((xmin, xmax))
rp = r"$R_{p}$"
rs = r"$R_{s}$"
ax.set_title(task)
ax.text(.95, .2, f"{rp}={pearson:.2f}",
verticalalignment='bottom',
horizontalalignment='right',
transform=ax.transAxes)
ax.text(.95, .05, f"{rs}={spearman:.2f}",
verticalalignment='bottom',
horizontalalignment='right',
transform=ax.transAxes)
import matplotlib.ticker as ticker
for task in tasks:
fig, ax= plt.subplots(figsize=get_figsize(frac=0.25, aspect=1))
yt = np.exp(y_true[f'counts/{task}'].mean(-1))
yp = np.exp(y_pred[ds.task2idx(task, 'counts')].mean(-1))
xrange = [10, 1e4]
ax.set_ylim(xrange)
ax.set_xlim(xrange)
ax.plot(xrange, xrange, c='grey', alpha=0.2)
regression_eval(yt,
yp, alpha=.1, task=task, ax=ax, loglog=True)
ax.xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=4))
ax.yaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=4))
plt.minorticks_off()
# save the figure
os.makedirs(f"{figures}/scatter", exist_ok=True)
fig.savefig(f"{figures}/scatter/{task}.pdf")
fig.savefig(f"{figures}/scatter/{task}.png")
fig, axes = plt.subplots(1, len(tasks), figsize=get_figsize(frac=1, aspect=1/len(tasks)),
sharex=True, sharey=True)
for i, (task, ax) in enumerate(zip(tasks, axes)):
yt = np.exp(y_true[f'counts/{task}'].mean(-1))
yp = np.exp(y_pred[ds.task2idx(task, 'counts')].mean(-1))
xrange = [10, 1e4]
ax.set_ylim(xrange)
ax.set_xlim(xrange)
ax.plot(xrange, xrange, c='grey', alpha=0.2)
regression_eval(yt,
yp, alpha=.1, task=task, ax=ax, loglog=True)
ax.xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=4))
ax.yaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=4))
if i > 0:
ax.set_ylabel("")
fig.subplots_adjust(wspace=0)
plt.minorticks_off()
# Save the figure
fig.savefig(f"{figures}/scatter/all.pdf")
fig.savefig(f"{figures}/scatter/all.png")
from basepair.plots import plot_profiles
from basepair import samplers
from kipoi.data_utils import get_dataset_item
from kipoi.metadata import GenomicRanges
import pybedtools
from basepair.utils import flatten_list
from basepair.plot.tracks import plot_tracks, filter_tracks
from basepair.preproc import dfint_no_intersection
from pybedtools import BedTool
# Figure out valid indices (non-overlapping)
df_ranges = pd.DataFrame(test['metadata']['range'])[['chr', 'start','end']]
df_ranges_tasks = {t: df_ranges[test['metadata']['interval_from_task'] == t] for t in bpnet.tasks}
all_intervals = list(BedTool.from_dataframe(df_ranges))
o = dict()
for i,t in enumerate(bpnet.tasks):
dft = df_ranges.iloc[test['metadata']['interval_from_task'] == t]
if i == 0:
o[t] = dft
else:
df_existing = pd.concat(list(o.values()), axis=0)
o[t] = dft[dfint_no_intersection(dft, df_existing)]
valid_idx = pd.concat(list(o.values()), axis=0).index
valid_idx_bool = pd.Series(np.arange(len(df_ranges))).isin(valid_idx)
print("Fraction of non-overlapping peaks:", valid_idx_bool.mean())
trim_edge = 300
xlim = [trim_edge, 1000 - trim_edge]
fig_width=get_figsize(0.5)[0]
rotate_y=90
fig_height_per_track=0.5
tasks = bpnet.tasks
for task in bpnet.tasks:
print(task)
for idx in samplers.top_sum_count(y_true[f'profile/{task}'],2, keep=(test['metadata']['interval_from_task'] == task) & valid_idx_bool):
# get the interval for that idx
r = get_dataset_item(test['metadata']['range'], idx)
interval = pybedtools.create_interval_from_list([r['chr'], int(r['start']), int(r['end'])])
interval_str = f"{interval.chrom}:{interval.start + trim_edge}-{interval.end - trim_edge}"
# make prediction
pred = bpnet.predict([interval], compute_grads=False)[0]
# compile the list of tracks to plot
viz_dict =flatten_list([[
# Observed
(f"{task}\nObs", y_true[f'profile/{task}'][idx]),
# Predicted
(f"\nPred", pred['preds']['scaled_profile'][task]),
] for task_idx, task in enumerate(tasks)])
fig = plot_tracks(filter_tracks(viz_dict, xlim),
title=interval_str,
fig_height_per_track=fig_height_per_track,
rotate_y=rotate_y,
fig_width=fig_width,
ylim=None,
legend=False)
fig.align_ylabels()
os.makedirs(f"{figures}/profiles", exist_ok=True)
fig.savefig(f"{figures}/profiles/max-count-{task}-{idx}-{interval_str}.pdf")
fig.savefig(f"{figures}/profiles/max-count-{task}-{idx}-{interval_str}.png")
from snakemake.io import glob_wildcards, expand
# Oct4-bigwigs
!ls /users/avsec/workspace/basepair-workflow/data/Oct4/signal/raw/rep*/*.bw
Number of reads:
# Sox2-bigwigs
!ls /users/avsec/workspace/basepair-workflow/data/Sox2/signal/raw/rep*/*.bw
N Reads
# Nanog-bigwigs
!ls /users/avsec/workspace/basepair-workflow/data/Nanog/signal/raw/rep*/*.bw
N Reads
# Klf4-bigwigs
!ls /users/avsec/workspace/basepair-workflow/data/Klf4/signal/raw/rep*/*.bw
N reads
wildcard = r"/users/avsec/workspace/basepair-workflow/data/{factor}/signal/raw/{rep}/{strand}.bw"
factors, reps, strands = glob_wildcards(wildcard)
# Remove DNase
factors, reps, strands = zip(*[(f,r,s) for f,r,s in zip(factors, reps, strands) if f != "DNase"])
files = expand(wildcard, zip, factor=factors, rep=reps, strand=strands, )
files = {f"/{f}/{r}/{s}": fi for f, r, s, fi in zip(factors, reps, strands, files)}
files
counts = {f: bw_extract(fp, all_intervals, lambda x: x) for f,fp in tqdm(files.items())}
# Add up reps [0,1] and [2,3]
counts['/Oct4/rep0/pos'] = sum([counts['/Oct4/rep0/pos'], counts['/Oct4/rep1/pos']])
counts['/Oct4/rep0/neg'] = sum([counts['/Oct4/rep0/neg'], counts['/Oct4/rep1/neg']])
counts['/Oct4/rep1/pos'] = sum([counts['/Oct4/rep2/pos'], counts['/Oct4/rep3/pos']])
counts['/Oct4/rep1/neg'] = sum([counts['/Oct4/rep2/neg'], counts['/Oct4/rep3/neg']])
for task in tasks:
if task == 'Klf4':
continue
fig, ax= plt.subplots(figsize=get_figsize(frac=0.25, aspect=1))
rep0 = 1+sum([counts[f'/{task}/rep0/pos'], counts[f'/{task}/rep0/neg']]).sum(1)
rep1 = 1+sum([counts[f'/{task}/rep1/pos'], counts[f'/{task}/rep1/neg']]).sum(1)
xrange = [10, 1e4]
ax.set_ylim(xrange)
ax.set_xlim(xrange)
ax.plot(xrange, xrange, c='grey', alpha=0.2)
regression_eval(rep0, rep1, alpha=.1, task=task, ax=ax, loglog=True)
ax.xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=4))
ax.yaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=4))
plt.minorticks_off()
# save the figure
os.makedirs(f"{figures}/scatter-replicates", exist_ok=True)
fig.savefig(f"{figures}/scatter-replicates/{task}.pdf")
fig.savefig(f"{figures}/scatter-replicates/{task}.png")
fig, axes = plt.subplots(1, len(tasks) -1 , figsize=get_figsize(frac=.75, aspect=1/(len(tasks) - 1)),
sharex=True, sharey=True)
for i, (task, ax) in enumerate(zip(tasks, axes)):
if task == 'Klf4':
continue
rep0 = 1+sum([counts[f'/{task}/rep0/pos'], counts[f'/{task}/rep0/neg']]).sum(1)
rep1 = 1+sum([counts[f'/{task}/rep1/pos'], counts[f'/{task}/rep1/neg']]).sum(1)
xrange = [10, 1e4]
ax.set_ylim(xrange)
ax.set_xlim(xrange)
ax.plot(xrange, xrange, c='grey', alpha=0.2)
regression_eval(rep0, rep1, alpha=.1, task=task, ax=ax, loglog=True)
ax.xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=4))
ax.yaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=4))
if i > 0:
ax.set_ylabel("")
fig.subplots_adjust(wspace=0)
plt.minorticks_off()
# Save the figure
fig.savefig(f"{figures}/scatter-replicates/all.pdf")
fig.savefig(f"{figures}/scatter-replicates/all.png")
log10(counts + 1)¶import holoviews.operation.datashader as hd
hd.shade.cmap=["lightblue", "darkblue"]
hv.extension("bokeh", "matplotlib")
import datashader as dsh
import datashader.transfer_functions as tf
task = 'Sox2'
rep0 = np.log10(1+sum([counts[f'/{task}/rep0/pos'], counts[f'/{task}/rep0/neg']]).ravel())
rep1 = np.log10(1+sum([counts[f'/{task}/rep1/pos'], counts[f'/{task}/rep1/neg']]).ravel())
df = pd.DataFrame({"rep0": rep0, "rep1": rep1})
fig = hd.spread(hd.shade(hv.Image(dsh.Canvas(plot_width=300, plot_height=300).points(df, 'rep0' ,'rep1'))), px=2)
hv.Store.renderers['matplotlib'].save(fig, f"{figures}/per-base-scatter-replicates/{task}", 'pdf')
fig
ypc = y_pred[ds.task2idx(task, 'counts')]
ypp = softmax(y_pred[ds.task2idx(task, 'profile')])
y_pred_profile = np.log10(1+(ypp * (np.exp(ypc) - 1 )[:, np.newaxis]).sum(axis=-1).ravel())
y_true_profile = np.log10(1+y_true[f'profile/{task}'].sum(axis=-1).ravel())
df = pd.DataFrame({"Predicted": y_pred_profile, "Observed": y_true_profile})
a = hd.spread(hd.shade(hv.Image(dsh.Canvas(plot_width=600, plot_height=600).points(df, 'Predicted' ,'Observed'), label='Using predicted total counts')), px=2)
y_pred_profile = np.log10(1+(ypp * y_true[f'profile/{task}'].sum(axis=1)[:, np.newaxis]).sum(axis=-1).ravel())
y_true_profile = np.log10(1+y_true[f'profile/{task}'].sum(axis=-1).ravel())
df = pd.DataFrame({"Predicted": y_pred_profile, "Observed": y_true_profile})
b = hd.spread(hd.shade(hv.Image(dsh.Canvas(plot_width=600, plot_height=600).points(df, 'Predicted' ,'Observed'), label='Using observed total counts')), px=2)
fig = a + b
hv.Store.renderers['matplotlib'].save(fig, f"{figures}/per-base-scatter/{task}", 'pdf')
fig
task = 'Nanog'
rep0 = np.log10(1+sum([counts[f'/{task}/rep0/pos'], counts[f'/{task}/rep0/neg']]).ravel())
rep1 = np.log10(1+sum([counts[f'/{task}/rep1/pos'], counts[f'/{task}/rep1/neg']]).ravel())
df = pd.DataFrame({"rep0": rep0, "rep1": rep1})
fig = hd.spread(hd.shade(hv.Image(dsh.Canvas(plot_width=300, plot_height=300).points(df, 'rep0' ,'rep1'))), px=2)
hv.Store.renderers['matplotlib'].save(fig, f"{figures}/per-base-scatter-replicates/{task}", 'pdf')
fig
ypc = y_pred[ds.task2idx(task, 'counts')]
ypp = softmax(y_pred[ds.task2idx(task, 'profile')])
y_pred_profile = np.log10(1+(ypp * (np.exp(ypc) - 1 )[:, np.newaxis]).sum(axis=-1).ravel())
y_true_profile = np.log10(1+y_true[f'profile/{task}'].sum(axis=-1).ravel())
df = pd.DataFrame({"Predicted": y_pred_profile, "Observed": y_true_profile})
a = hd.spread(hd.shade(hv.Image(dsh.Canvas(plot_width=600, plot_height=600).points(df, 'Predicted' ,'Observed'), label='Predicted total counts')), px=2)
y_pred_profile = np.log10(1+(ypp * y_true[f'profile/{task}'].sum(axis=1)[:, np.newaxis]).sum(axis=-1).ravel())
y_true_profile = np.log10(1+y_true[f'profile/{task}'].sum(axis=-1).ravel())
df = pd.DataFrame({"Predicted": y_pred_profile, "Observed": y_true_profile})
b = hd.spread(hd.shade(hv.Image(dsh.Canvas(plot_width=600, plot_height=600).points(df, 'Predicted' ,'Observed'), label='Observed total counts')), px=2)
fig = a + b
hv.Store.renderers['matplotlib'].save(fig, f"{figures}/per-base-scatter/{task}", 'pdf')
fig
task = 'Oct4'
rep0 = np.log10(1+sum([counts[f'/{task}/rep0/pos'], counts[f'/{task}/rep0/neg']]).ravel())
rep1 = np.log10(1+sum([counts[f'/{task}/rep1/pos'], counts[f'/{task}/rep1/neg']]).ravel())
df = pd.DataFrame({"rep0": rep0, "rep1": rep1})
fig = hd.spread(hd.shade(hv.Image(dsh.Canvas(plot_width=300, plot_height=300).points(df, 'rep0' ,'rep1'))), px=2)
hv.Store.renderers['matplotlib'].save(fig, f"{figures}/per-base-scatter-replicates/{task}", 'pdf')
fig
ypc = y_pred[ds.task2idx(task, 'counts')]
ypp = softmax(y_pred[ds.task2idx(task, 'profile')])
y_pred_profile = np.log10(1+(ypp * (np.exp(ypc) - 1 )[:, np.newaxis]).sum(axis=-1).ravel())
y_true_profile = np.log10(1+y_true[f'profile/{task}'].sum(axis=-1).ravel())
df = pd.DataFrame({"Predicted": y_pred_profile, "Observed": y_true_profile})
a = hd.spread(hd.shade(hv.Image(dsh.Canvas(plot_width=600, plot_height=600).points(df, 'Predicted' ,'Observed'), label='Predicted total counts')), px=2)
y_pred_profile = np.log10(1+(ypp * y_true[f'profile/{task}'].sum(axis=1)[:, np.newaxis]).sum(axis=-1).ravel())
y_true_profile = np.log10(1+y_true[f'profile/{task}'].sum(axis=-1).ravel())
df = pd.DataFrame({"Predicted": y_pred_profile, "Observed": y_true_profile})
b = hd.spread(hd.shade(hv.Image(dsh.Canvas(plot_width=600, plot_height=600).points(df, 'Predicted' ,'Observed'), label='Observed total counts')), px=2)
fig = a + b
hv.Store.renderers['matplotlib'].save(fig, f"{figures}/per-base-scatter/{task}", 'pdf')
fig
task=tasks[0]
yp = softmax(y_pred[ds.task2idx(task, "profile")])
yt = y_true["profile/" + task]
x = np.ravel(yt / (1+yt.sum(axis=-2, keepdims=True)))
plt.figure(figsize=(12,3))
plt.subplot(121)
plt.hist(x[(x<0.04) ], bins=100);
plt.subplot(122)
plt.hist(x[(x<0.04) & (x>0.0001)], bins=100);
np.mean(x>0.0001)
out_df = Parallel(n_jobs=len(tasks))(delayed(basepair.cli.evaluate.eval_profile)(y_true["profile/" + task],
softmax(y_pred[ds.task2idx(task, "profile")]),
binsizes=[1,10],
pos_min_threshold=0.015,
neg_max_threshold=0.005,)
for task in tasks)
df = pd.concat([out_df[i].assign(task=task) for i,task in enumerate(tasks)])
df
binsizes = [1,2, 5, 10, 20, 50]
np.random.seed(42)
out_df = Parallel(n_jobs=len(tasks))(delayed(basepair.cli.evaluate.eval_profile)(np.concatenate([np.stack([counts[f'/{task}/rep0/pos'], counts[f'/{task}/rep0/neg']], axis=-1),
np.stack([counts[f'/{task}/rep1/pos'], counts[f'/{task}/rep1/neg']], axis=-1)
], axis=0),
np.concatenate([softmax(y_pred[ds.task2idx(task, "profile")]),
softmax(y_pred[ds.task2idx(task, "profile")])], axis=0),
binsizes=binsizes,
pos_min_threshold=0.015,
neg_max_threshold=0.005,)
for task in tasks if task != 'Klf4')
df = pd.concat([out_df[i].assign(task=task) for i,task in enumerate(tasks) if task != 'Klf4'])
df['method'] = 'bpnet'
task = 'Oct4'
np.random.seed(42)
y_true_rep0 = np.stack([counts[f'/{task}/rep0/pos'], counts[f'/{task}/rep0/neg']], axis=-1)
y_true_rep1 = np.stack([counts[f'/{task}/rep1/pos'], counts[f'/{task}/rep1/neg']], axis=-1)
out_df = basepair.cli.evaluate.eval_profile(np.concatenate([y_true_rep0,
y_true_rep1], axis=0),
np.concatenate([y_true_rep1 / y_true_rep1.sum(axis=1, keepdims=True),
y_true_rep0 / y_true_rep0.sum(axis=1, keepdims=True), ], axis=0),
binsizes=binsizes,
pos_min_threshold=0.015,
neg_max_threshold=0.005,)
out_df["task"] = task
out_df["method"] = "replicates"
rep_dfs.append(out_df)
out_df
df[df.task == task]
task = 'Sox2'
rep_dfs = []
np.random.seed(42)
y_true_rep0 = np.stack([counts[f'/{task}/rep0/pos'], counts[f'/{task}/rep0/neg']], axis=-1)
y_true_rep1 = np.stack([counts[f'/{task}/rep1/pos'], counts[f'/{task}/rep1/neg']], axis=-1)
out_df = basepair.cli.evaluate.eval_profile(np.concatenate([y_true_rep0,
y_true_rep1], axis=0),
np.concatenate([y_true_rep1 / y_true_rep1.sum(axis=1, keepdims=True),
y_true_rep0 / y_true_rep0.sum(axis=1, keepdims=True), ], axis=0),
binsizes=binsizes,
pos_min_threshold=0.015,
neg_max_threshold=0.005,)
out_df["task"] = task
out_df["method"] = "replicates"
rep_dfs.append(out_df)
out_df
df[df.task == task]
task = 'Nanog'
np.random.seed(42)
y_true_rep0 = np.stack([counts[f'/{task}/rep0/pos'], counts[f'/{task}/rep0/neg']], axis=-1)
y_true_rep1 = np.stack([counts[f'/{task}/rep1/pos'], counts[f'/{task}/rep1/neg']], axis=-1)
out_df = basepair.cli.evaluate.eval_profile(np.concatenate([y_true_rep0,
y_true_rep1], axis=0),
np.concatenate([y_true_rep1 / y_true_rep1.sum(axis=1, keepdims=True),
y_true_rep0 / y_true_rep0.sum(axis=1, keepdims=True), ], axis=0),
binsizes=binsizes,
pos_min_threshold=0.015,
neg_max_threshold=0.005,)
out_df["task"] = task
out_df["method"] = "replicates"
rep_dfs.append(out_df)
out_df
df[df.task == task]
df_auprc = pd.concat([df] + rep_dfs, axis=0)
df_auprc
df_auprc_random = df.copy()
df_auprc_random['auprc'] = df_auprc_random['random_auprc']
df_auprc_random['method'] = 'random'# + df_auprc_random['method']
# df_auprc_random = df_auprc_random.drop_duplicates()
df_auprc_tidy = pd.concat([df_auprc, df_auprc_random], axis=0)
del df_auprc_tidy['random_auprc']
from plotnine import *
import plotnine
plotnine.__version__
df_auprc_tidy['method'] = pd.Categorical(df_auprc_tidy['method'], categories=df_auprc_tidy['method'].unique())
df_auprc_tidy['task'] = pd.Categorical(df_auprc_tidy['task'], categories=df_auprc_tidy['task'].unique())
plotnine.options.figure_size = get_figsize(0.5, aspect=0.3)
fig = ggplot(aes(x='binsize', y='auprc', color='method'), data=df_auprc_tidy) + \
scale_x_log10(breaks=binsizes) + \
geom_point() + \
geom_line() + \
facet_grid(".~task") + \
theme_classic(base_size=10, base_family='Arial') + \
theme(legend_position='top') + \
scale_color_brewer('qual', 7)
fig
!mkdir -p {figures}/profile-metrics
fig.save(f"{figures}/profile-metrics/auprc.pdf")
fig.save(f"{figures}/profile-metrics/auprc.png", dpi=300)