|
|
|
|
|
|
|
import re |
|
import datasets |
|
from typing import Callable |
|
import os |
|
import torch |
|
import torch.nn.functional as F |
|
from itertools import product |
|
import numpy as np |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def gc_content(seq): |
|
return (seq.count("G") + seq.count("C")) / len(seq) |
|
|
|
class CRISPRDataConfig(datasets.BuilderConfig): |
|
def __init__(self, ref_filter: Callable | None = None, cut_filter: Callable | None = None, author_filter: Callable | None = None, file_filter: Callable | None = None, test_ratio: float = 0.05, validation_ratio: float = 0.05, seed: int = 63036, features: datasets.Features = None, ref1len: int = 127, ref2len: int = 127, DELLEN_LIMIT: int = 60, Lindel_dlen: int = 30, Lindel_mh_len: int = 4, FOREcasT_MAX_DEL_SIZE: int = 30, **kwargs): |
|
"""BuilderConfig for CRISPR_data. |
|
Args: |
|
trans_func: *function*, transform function applied after filter. |
|
ref_filter: *function*, ref_filter(ref1, ref2) -> bool. |
|
cut_filter: *function*, cut_filter(cut1, cut2, ref1[optional], ref2[optional]) -> bool. |
|
author_filter: *function*, author_filter(author, ref1[optional], ref2[optional], cut1[optional], cut2[optional]) -> bool. |
|
file_filter: *function*, file_filter(file, ref1[optional], ref2[optional], cut1[optional], cut2[optional], author[optional]) -> bool. |
|
test_ratio: *float*, the ratio of data for test. |
|
validation_ratio: *float*, the ratio of data for validation. |
|
seed: *int*, the random seed. |
|
features: include the data structure in config (for auto generation of model card when test dataset). |
|
ref1len: length of ref1. |
|
ref2len: length of ref2. |
|
DELLEN_LIMIT: upper limit of inDelphi deletion size. |
|
Lindel_dlen: upper limit of Lindel deletion size. |
|
Lindel_mh_len: upper limit of Lindel micro-homology size (mh longer than Lindel_mh_len is not excluded, but cutoff to Lindel_mh_len). |
|
FOREcasT_MAX_DEL_SIZE: upper limit of FOREcasT deletion size. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super().__init__(**kwargs) |
|
self.ref_filter = ref_filter |
|
self.cut_filter = cut_filter |
|
self.author_filter = author_filter |
|
self.file_filter = file_filter |
|
self.test_ratio = test_ratio |
|
self.validation_ratio = validation_ratio |
|
self.seed = seed |
|
self.features = features |
|
self.ref1len = ref1len |
|
self.ref2len = ref2len |
|
self.DELLEN_LIMIT = DELLEN_LIMIT |
|
self.Lindel_dlen = Lindel_dlen |
|
self.Lindel_mh_len = Lindel_mh_len |
|
self.FOREcasT_MAX_DEL_SIZE = FOREcasT_MAX_DEL_SIZE |
|
|
|
class CRISPRData(datasets.GeneratorBasedBuilder): |
|
def __init__(self, **kargs): |
|
super().__init__(**kargs) |
|
with torch.no_grad(): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ref1len, ref2len = self.config.ref1len, self.config.ref2len |
|
row_indices = torch.arange(ref2len + 1)[:, None].expand(-1, ref1len + 1) |
|
col_indices = torch.arange(ref1len + 1).expand(ref2len + 1, -1) |
|
self.diag_indices = ( |
|
|
|
torch.cat([ |
|
row_indices.diagonal(offset) |
|
for offset in range(-ref2len, ref1len + 1) |
|
]), |
|
|
|
torch.cat([ |
|
col_indices.diagonal(offset) |
|
for offset in range(-ref2len, ref1len + 1) |
|
]) |
|
) |
|
|
|
self.nuc_code = "".join( |
|
str(i) |
|
for i in (np.frombuffer("ACGT".encode(), dtype=np.int8) % 5).clip(0, 3) |
|
) |
|
self.base_idx = [ |
|
torch.tensor([ |
|
int("".join(j), base=len(self.nuc_code)) |
|
for j in product(*([self.nuc_code] * i)) |
|
]) |
|
for i in range(1, 3) |
|
] |
|
self.base_cutoff = [0] |
|
for bi in self.base_idx: |
|
bi += self.base_cutoff[-1] |
|
self.base_cutoff.append(self.base_cutoff[-1] + len(bi)) |
|
self.base_idx = torch.cat(self.base_idx) |
|
|
|
|
|
|
|
def get_observations(self, ref1, ref2, cut, RANDOM_INSERT_LIMIT=99999, INSERT_LIMIT=None, count_long_insertion=False): |
|
assert len(ref1) == self.config.ref1len and len(ref2) == self.config.ref2len, "reference length does not fit" |
|
observations = torch.zeros(len(ref2) + 1, len(ref1) + 1, dtype=torch.int64) |
|
if INSERT_LIMIT is not None: |
|
assert INSERT_LIMIT < len(self.base_cutoff), "INSERT_LIMIT is beyonded" |
|
base = len(self.nuc_code) |
|
total = (base ** (INSERT_LIMIT + 1) - base) // (base - 1) |
|
insert_counts = torch.zeros(total, dtype=torch.int64) |
|
if count_long_insertion: |
|
insert_count_long = 0 |
|
cut1, cut2 = cut['cut1'], cut['cut2'] |
|
for author in cut['authors']: |
|
for file in author['files']: |
|
mask = torch.tensor([len(random_insert) <= RANDOM_INSERT_LIMIT for random_insert in file['random_insert']]) |
|
observations[ |
|
torch.tensor(file['ref2_start'])[mask], |
|
torch.tensor(file['ref1_end'])[mask] |
|
] += torch.tensor(file['count'])[mask] |
|
if INSERT_LIMIT is not None: |
|
for ref1_end, ref2_start, random_insert, count in zip(file['ref1_end'], file['ref2_start'], file['random_insert'], file['count']): |
|
if ref1_end < cut1 or ref2_start > cut2: |
|
continue |
|
insert = ref1[cut1:ref1_end] + random_insert + ref2[ref2_start:cut2] |
|
if len(insert) > 0: |
|
if len(insert) <= INSERT_LIMIT: |
|
insert_counts[ |
|
self.base_cutoff[len(insert) - 1] + |
|
int("".join(str(i) for i in (np.frombuffer(insert.encode(), dtype=np.int8) % 5).clip(0, 3)), base=base) |
|
] += count |
|
elif count_long_insertion: |
|
insert_count_long += count |
|
if INSERT_LIMIT is not None: |
|
insert_counts = insert_counts[self.base_idx[:total]] |
|
if count_long_insertion: |
|
return observations, torch.cat([ |
|
insert_counts, |
|
torch.tensor([insert_count_long], dtype=torch.int64) |
|
]) |
|
else: |
|
return observations, insert_counts |
|
return observations |
|
|
|
def num2micro_homology(self, ref1, ref2, cut1, cut2, ext1=0, ext2=0): |
|
assert len(ref1) == self.config.ref1len and len(ref2) == self.config.ref2len, "reference length does not fit" |
|
mh_matrix = F.pad( |
|
torch.from_numpy( |
|
np.frombuffer(ref1[:cut1 + ext1].encode(), dtype=np.int8)[None, :] == np.frombuffer(ref2[cut2 - ext2:].encode(), dtype=np.int8)[:, None] |
|
).to(torch.int16), |
|
pad=(0, len(ref1) - cut1 - ext1 + 1, cut2 - ext2, 1), value=0 |
|
) |
|
rep_num = torch.cat(( |
|
torch.tensor([-1], dtype=torch.int64), |
|
torch.where(mh_matrix[self.diag_indices].diff())[0], |
|
torch.tensor([(len(ref1) + 1) * (len(ref2) + 1) - 1], dtype=torch.int64) |
|
)).diff() |
|
rep_val = rep_num.clone() |
|
rep_val[0::2] = 0 |
|
rep_num[1::2] = rep_num[1::2] + 1 |
|
rep_num[2::2] = rep_num[2::2] - 1 |
|
rep_val = rep_val.repeat_interleave(rep_num) |
|
return mh_matrix, rep_num, rep_val |
|
|
|
def get_input(self, ref1, ref2, cut1, cut2, mh_matrix, rep_num, rep_val, model): |
|
if model != "FOREcasT": |
|
mh_lens = rep_val.to(torch.int16) |
|
if model in ["inDelphi", "Lindel", "FOREcasT"]: |
|
mask = rep_val == 0 |
|
if model != "FOREcasT": |
|
mh_idxs = rep_num.cumsum(dim=0)[1::2] - 1 |
|
del_lens = (torch.arange(cut1, cut1 - len(ref1) - 1, -1, dtype=torch.int16)[None, :] + torch.arange(-cut2, len(ref2) - cut2 + 1, dtype=torch.int16)[:, None])[self.diag_indices] |
|
if model in ["Lindel", "FOREcasT"]: |
|
dstarts = torch.arange(-cut1, len(ref1) - cut1 + 1, dtype=torch.int16)[None, :].expand(len(ref2) + 1, -1)[self.diag_indices] |
|
if model == "Lindel": |
|
return del_lens, mh_lens, dstarts, mh_idxs |
|
else: |
|
return del_lens, dstarts |
|
elif model == "inDelphi": |
|
gt_poss = (torch.arange(-cut2, len(ref2) - cut2 + 1, dtype=torch.int16) + cut1)[:, None].expand(-1, len(ref1) + 1)[self.diag_indices] |
|
del_lens = torch.cat([del_lens[mask], del_lens[mh_idxs]]) |
|
gt_poss = torch.cat([gt_poss[mask], gt_poss[mh_idxs]]) |
|
mh_lens = torch.cat([mh_lens[mask], mh_lens[mh_idxs]]) |
|
return del_lens, mh_lens, gt_poss |
|
mh_matrix[self.diag_indices] = mh_lens |
|
return mh_matrix |
|
|
|
def get_output(self, observations, rep_num, rep_val, model): |
|
if model in ["inDelphi", "Lindel", "FOREcasT"]: |
|
mask = rep_val == 0 |
|
counts = torch.zeros(len(rep_num) // 2, dtype=torch.int64) |
|
counts = counts.scatter_add( |
|
dim = 0, |
|
index = torch.arange(len(rep_num) // 2).repeat_interleave(rep_num[1::2]), |
|
src = observations[self.diag_indices][~mask] |
|
) |
|
if model != "FOREcasT": |
|
mh_idxs = rep_num.cumsum(dim=0)[1::2] - 1 |
|
if model in ["Lindel", "FOREcasT"]: |
|
if model == "Lindel": |
|
observations[self.diag_indices[0][~mask], self.diag_indices[1][~mask]] = 0 |
|
observations[self.diag_indices[0][mh_idxs], self.diag_indices[1][mh_idxs]] = counts |
|
return observations[self.diag_indices] |
|
else: |
|
observations = observations.to(torch.float32) |
|
observations[self.diag_indices[0][~mask], self.diag_indices[1][~mask]] = (counts / rep_num[1::2]).repeat_interleave(rep_num[1::2]) |
|
return observations[self.diag_indices] |
|
elif model == "inDelphi": |
|
counts = torch.cat([observations[self.diag_indices][mask], counts]) |
|
return counts |
|
return None |
|
|
|
|
|
def CRISPR_transformer_trans_func(self, examples): |
|
ref1s, ref2s, ob_ref1s, ob_ref2s, ob_vals = [], [], [], [], [] |
|
for ref1, ref2, cuts in zip(examples['ref1'], examples['ref2'], examples['cuts']): |
|
for cut in cuts: |
|
|
|
ref1s.append(ref1) |
|
ref2s.append(ref2) |
|
|
|
observations = self.get_observations(ref1, ref2, cut) |
|
ob_ref2, ob_ref1 = observations.nonzero(as_tuple=True) |
|
ob_ref1s.append(ob_ref1) |
|
ob_ref2s.append(ob_ref2) |
|
ob_vals.append(observations[ob_ref2, ob_ref1]) |
|
return { |
|
'ref1': ref1s, |
|
'ref2': ref2s, |
|
'ob_ref1': ob_ref1s, |
|
'ob_ref2': ob_ref2s, |
|
'ob_val': ob_vals |
|
} |
|
|
|
def CRISPR_diffuser_trans_func(self, examples): |
|
ref1s, ref2s, cut1s, cut2s, mh_ref1s, mh_ref2s, mh_vals, ob_ref1s, ob_ref2s, ob_vals = [], [], [], [], [], [], [], [], [], [] |
|
for ref1, ref2, cuts in zip(examples['ref1'], examples['ref2'], examples['cuts']): |
|
for cut in cuts: |
|
|
|
ref1s.append(ref1) |
|
ref2s.append(ref2) |
|
cut1, cut2 = cut['cut1'], cut['cut2'] |
|
cut1s.append(cut1) |
|
cut2s.append(cut2) |
|
|
|
mh_matrix, rep_num, rep_val = self.num2micro_homology(ref1, ref2, cut1, cut2) |
|
mh_matrix = self.get_input(ref1, ref2, cut1, cut2, mh_matrix, rep_num, rep_val, "CRISPR_diffuser") |
|
mh_ref2, mh_ref1 = mh_matrix.nonzero(as_tuple=True) |
|
mh_ref1s.append(mh_ref1) |
|
mh_ref2s.append(mh_ref2) |
|
mh_vals.append(mh_matrix[mh_ref2, mh_ref1]) |
|
|
|
observations = self.get_observations(ref1, ref2, cut) |
|
ob_ref2, ob_ref1 = observations.nonzero(as_tuple=True) |
|
ob_ref1s.append(ob_ref1) |
|
ob_ref2s.append(ob_ref2) |
|
ob_vals.append(observations[ob_ref2, ob_ref1]) |
|
return { |
|
'ref1': ref1s, |
|
'ref2': ref2s, |
|
'cut1': cut1s, |
|
'cut2': cut2s, |
|
'mh_ref1': mh_ref1s, |
|
'mh_ref2': mh_ref2s, |
|
'mh_val': mh_vals, |
|
'ob_ref1': ob_ref1s, |
|
'ob_ref2': ob_ref2s, |
|
'ob_val': ob_vals |
|
} |
|
|
|
def inDelphi_trans_func(self, examples): |
|
refs, cut_list, mh_del_lenss, mh_mh_lenss, mh_gt_posss, mh_gc_fracss, mh_countss, mhless_countss, insert_1bpss = [], [], [], [], [], [], [], [], [] |
|
for ref1, ref2, cuts in zip(examples['ref1'], examples['ref2'], examples['cuts']): |
|
for cut in cuts: |
|
|
|
cut1, cut2 = cut['cut1'], cut['cut2'] |
|
refs.append(ref1[:cut1] + ref2[cut2:]) |
|
cut_list.append(cut1) |
|
|
|
mh_matrix, rep_num, rep_val = self.num2micro_homology(ref1, ref2, cut1, cut2) |
|
del_lens, mh_lens, gt_poss = self.get_input(ref1, ref2, cut1, cut2, mh_matrix, rep_num, rep_val, "inDelphi") |
|
mask_del = (del_lens > 0) & (del_lens < self.config.DELLEN_LIMIT) & (gt_poss >= cut1) & (gt_poss - del_lens <= cut1) |
|
del_lens, mh_lens, gt_poss = del_lens[mask_del], mh_lens[mask_del], gt_poss[mask_del] |
|
mask_mh = mh_lens > 0 |
|
mh_del_lenss.append(del_lens[mask_mh]) |
|
mh_gt_posss.append(gt_poss[mask_mh]) |
|
mh_mh_lenss.append(mh_lens[mask_mh]) |
|
mh_gc_fracss.append([gc_content(refs[-1][gt_pos - mh_len:gt_pos]) for mh_len, gt_pos in zip(mh_mh_lenss[-1], mh_gt_posss[-1])]) |
|
|
|
observations, insert_counts = self.get_observations(ref1, ref2, cut, INSERT_LIMIT = 1) |
|
insert_1bpss.append(insert_counts) |
|
counts = self.get_output(observations, rep_num, rep_val, "inDelphi") |
|
counts = counts[mask_del] |
|
mh_countss.append(counts[mask_mh]) |
|
mhless_counts = torch.zeros(self.config.DELLEN_LIMIT - 1, dtype=torch.int64) |
|
mhless_counts = mhless_counts.scatter_add(dim = 0, index=(del_lens[~mask_mh] - 1).to(torch.int64), src=counts[~mask_mh]) |
|
mhless_countss.append(mhless_counts) |
|
return { |
|
'ref': refs, |
|
'cut': cut_list, |
|
'mh_gt_pos': mh_gt_posss, |
|
'mh_del_len': mh_del_lenss, |
|
'mh_mh_len': mh_mh_lenss, |
|
'mh_gc_frac': mh_gc_fracss, |
|
'mh_count': mh_countss, |
|
'mhless_count': mhless_countss, |
|
'insert_1bp': insert_1bpss |
|
} |
|
|
|
def Lindel_trans_func(self, examples): |
|
refs, cut_list, del_counts, ins_counts, dstarts_list, del_lens_list, mh_lens_list = [], [], [], [], [], [], [] |
|
for ref1, ref2, cuts in zip(examples['ref1'], examples['ref2'], examples['cuts']): |
|
for cut in cuts: |
|
|
|
cut1, cut2 = cut['cut1'], cut['cut2'] |
|
refs.append(ref1[:cut1] + ref2[cut2:]) |
|
cut_list.append(cut1) |
|
|
|
mh_matrix, rep_num, rep_val = self.num2micro_homology(ref1, ref2, cut1, cut2, ext1=2, ext2=1) |
|
del_lens, mh_lens, dstarts, mh_idxs = self.get_input(ref1, ref2, cut1, cut2, mh_matrix, rep_num, rep_val, "Lindel") |
|
mask_del_len = (del_lens > 0).logical_and(del_lens < self.config.Lindel_dlen).logical_and(dstarts < 3).logical_and(dstarts + del_lens > -2) |
|
mask_mh_end = torch.full(mask_del_len.shape, False) |
|
mask_mh_end[mh_idxs] = True |
|
mask = mask_del_len.logical_and((mh_lens == 0).logical_or(mask_mh_end)) |
|
mh_lens = mh_lens[mask] |
|
dstarts = dstarts[mask] |
|
dstarts[(dstarts > 0).logical_and(dstarts <= mh_lens)] = 0 |
|
del_lens = del_lens[mask] |
|
mh_lens = torch.min(del_lens, mh_lens).clamp(0, self.config.Lindel_mh_len) |
|
dstarts_list.append(dstarts) |
|
del_lens_list.append(del_lens) |
|
mh_lens_list.append(mh_lens) |
|
|
|
observations, insert_counts = self.get_observations(ref1, ref2, cut, INSERT_LIMIT=2, count_long_insertion=True) |
|
ins_counts.append(insert_counts) |
|
counts = self.get_output(observations, rep_num, rep_val, "Lindel") |
|
del_counts.append(counts[mask_del_len]) |
|
return { |
|
'ref': refs, |
|
'cut': cut_list, |
|
'del_count': del_counts, |
|
'ins_count': ins_counts, |
|
'dstart': dstarts_list, |
|
'del_len': del_lens_list, |
|
'mh_len': mh_lens_list |
|
} |
|
|
|
def FOREcasT_trans_func(self, examples): |
|
refs, cut_list, total_counts = [], [], [] |
|
for ref1, ref2, cuts in zip(examples['ref1'], examples['ref2'], examples['cuts']): |
|
for cut in cuts: |
|
|
|
cut1, cut2 = cut["cut1"], cut["cut2"] |
|
refs.append(ref1[:cut1] + ref2[cut2:]) |
|
cut_list.append(cut['cut1']) |
|
|
|
mh_matrix, rep_num, rep_val = self.num2micro_homology(ref1, ref2, cut1, cut2) |
|
del_lens, dstarts = self.get_input(ref1, ref2, cut1, cut2, mh_matrix, rep_num, rep_val, "FOREcasT") |
|
mask_del_len = (del_lens >= 0).logical_and(del_lens <= self.config.FOREcasT_MAX_DEL_SIZE).logical_and(dstarts <= 0).logical_and(dstarts + del_lens >= 0) |
|
|
|
observations, insert_counts = self.get_observations(ref1, ref2, cut, RANDOM_INSERT_LIMIT=0, INSERT_LIMIT=2) |
|
counts = self.get_output(observations, rep_num, rep_val, "FOREcasT") |
|
total_counts.append(torch.cat([ |
|
counts[mask_del_len], |
|
insert_counts |
|
])) |
|
return { |
|
'ref': refs, |
|
'cut': cut_list, |
|
'count': total_counts |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
features_CRISPR_transformer = datasets.Features({ |
|
'ref1': datasets.Value('string'), |
|
'ref2': datasets.Value('string'), |
|
'ob_ref1': datasets.Sequence(datasets.Value('int16')), |
|
'ob_ref2': datasets.Sequence(datasets.Value('int16')), |
|
'ob_val': datasets.Sequence(datasets.Value('int64')) |
|
}) |
|
|
|
features_CRISPR_diffuser = datasets.Features({ |
|
'ref1': datasets.Value('string'), |
|
'ref2': datasets.Value('string'), |
|
'cut1': datasets.Value('int16'), |
|
'cut2': datasets.Value('int16'), |
|
'mh_ref1': datasets.Sequence(datasets.Value('int16')), |
|
'mh_ref2': datasets.Sequence(datasets.Value('int16')), |
|
'mh_val': datasets.Sequence(datasets.Value('int16')), |
|
'ob_ref1': datasets.Sequence(datasets.Value('int16')), |
|
'ob_ref2': datasets.Sequence(datasets.Value('int16')), |
|
'ob_val': datasets.Sequence(datasets.Value('int64')) |
|
}) |
|
|
|
features_inDelphi = datasets.Features({ |
|
'ref': datasets.Value('string'), |
|
'cut': datasets.Value('int16'), |
|
'mh_gt_pos': datasets.Sequence(datasets.Value('int16')), |
|
'mh_del_len': datasets.Sequence(datasets.Value('int16')), |
|
'mh_mh_len': datasets.Sequence(datasets.Value('int16')), |
|
'mh_gc_frac': datasets.Sequence(datasets.Value('float32')), |
|
'mh_count': datasets.Sequence(datasets.Value('int64')), |
|
'mhless_count': datasets.Sequence(datasets.Value('int64')), |
|
'insert_1bp': datasets.Sequence(datasets.Value('int64')) |
|
}) |
|
|
|
features_Lindel = datasets.Features({ |
|
'ref': datasets.Value('string'), |
|
'cut': datasets.Value('int16'), |
|
'del_count': datasets.Sequence(datasets.Value('int64')), |
|
'ins_count': datasets.Sequence(datasets.Value('int64')) |
|
}) |
|
|
|
features_FOREcasT = datasets.Features({ |
|
'ref': datasets.Value('string'), |
|
'cut': datasets.Value('int16'), |
|
'count': datasets.Sequence(datasets.Value('float32')) |
|
}) |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIG_CLASS = CRISPRDataConfig |
|
|
|
BUILDER_CONFIGS = [ |
|
CRISPRDataConfig( |
|
author_filter = lambda author, ref1, ref2, cut1, cut2: author == "SX", |
|
file_filter = lambda file, ref1, ref2, cut1, cut2, author: bool(re.search("^(A2-|A7-|D2-)", file)), |
|
features = features_CRISPR_transformer, |
|
name = "SX_spcas9_CRISPR_transformer", |
|
version = VERSION, |
|
description = "Data of spcas9 protein of sx and lcy for CRISPR transformer training" |
|
), |
|
CRISPRDataConfig( |
|
author_filter = lambda author, ref1, ref2, cut1, cut2: author == "SX", |
|
file_filter = lambda file, ref1, ref2, cut1, cut2, author: bool(re.search("^(X-|x-|B2-|36t-)", file)), |
|
features = features_CRISPR_transformer, |
|
name = "SX_spymac_CRISPR_transformer", |
|
version = VERSION, |
|
description = "Data of spymac protein of sx and lcy for CRISPR transformer training" |
|
), |
|
CRISPRDataConfig( |
|
author_filter = lambda author, ref1, ref2, cut1, cut2: author == "SX", |
|
file_filter = lambda file, ref1, ref2, cut1, cut2, author: bool(re.search("^(i10t-|i83-)", file)), |
|
features = features_CRISPR_transformer, |
|
name = "SX_ispymac_CRISPR_transformer", |
|
version = VERSION, |
|
description = "Data of ispymac protein of sx and lcy for CRISPR transformer training" |
|
), |
|
|
|
CRISPRDataConfig( |
|
author_filter = lambda author, ref1, ref2, cut1, cut2: author == "SX", |
|
file_filter = lambda file, ref1, ref2, cut1, cut2, author: bool(re.search("^(A2-|A7-|D2-)", file)), |
|
features = features_CRISPR_diffuser, |
|
name = "SX_spcas9_CRISPR_diffuser", |
|
version = VERSION, |
|
description = "Data of spcas9 protein of sx and lcy for CRISPR diffuser training" |
|
), |
|
CRISPRDataConfig( |
|
author_filter = lambda author, ref1, ref2, cut1, cut2: author == "SX", |
|
file_filter = lambda file, ref1, ref2, cut1, cut2, author: bool(re.search("^(X-|x-|B2-|36t-)", file)), |
|
features = features_CRISPR_diffuser, |
|
name = "SX_spymac_CRISPR_diffuser", |
|
version = VERSION, |
|
description = "Data of spymac protein of sx and lcy for CRISPR diffuser training" |
|
), |
|
CRISPRDataConfig( |
|
author_filter = lambda author, ref1, ref2, cut1, cut2: author == "SX", |
|
file_filter = lambda file, ref1, ref2, cut1, cut2, author: bool(re.search("^(i10t-|i83-)", file)), |
|
features = features_CRISPR_diffuser, |
|
name = "SX_ispymac_CRISPR_diffuser", |
|
version = VERSION, |
|
description = "Data of ispymac protein of sx and lcy for CRISPR diffuser training" |
|
), |
|
|
|
CRISPRDataConfig( |
|
author_filter = lambda author, ref1, ref2, cut1, cut2: author == "SX", |
|
file_filter = lambda file, ref1, ref2, cut1, cut2, author: bool(re.search("^(A2-|A7-|D2-)", file)), |
|
features = features_inDelphi, |
|
name = "SX_spcas9_inDelphi", |
|
version = VERSION, |
|
description = "Data of spcas9 protein of sx and lcy for inDelphi training" |
|
), |
|
CRISPRDataConfig( |
|
author_filter = lambda author, ref1, ref2, cut1, cut2: author == "SX", |
|
file_filter = lambda file, ref1, ref2, cut1, cut2, author: bool(re.search("^(X-|x-|B2-|36t-)", file)), |
|
features = features_inDelphi, |
|
name = "SX_spymac_inDelphi", |
|
version = VERSION, |
|
description = "Data of spymac protein of sx and lcy for inDelphi training" |
|
), |
|
CRISPRDataConfig( |
|
author_filter = lambda author, ref1, ref2, cut1, cut2: author == "SX", |
|
file_filter = lambda file, ref1, ref2, cut1, cut2, author: bool(re.search("^(i10t-|i83-)", file)), |
|
features = features_inDelphi, |
|
name = "SX_ispymac_inDelphi", |
|
version = VERSION, |
|
description = "Data of ispymac protein of sx and lcy for inDelphi training" |
|
), |
|
|
|
CRISPRDataConfig( |
|
author_filter = lambda author, ref1, ref2, cut1, cut2: author == "SX", |
|
file_filter = lambda file, ref1, ref2, cut1, cut2, author: bool(re.search("^(A2-|A7-|D2-)", file)), |
|
features = features_Lindel, |
|
name = "SX_spcas9_Lindel", |
|
version = VERSION, |
|
description = "Data of spcas9 protein of sx and lcy for Lindel training" |
|
), |
|
CRISPRDataConfig( |
|
author_filter = lambda author, ref1, ref2, cut1, cut2: author == "SX", |
|
file_filter = lambda file, ref1, ref2, cut1, cut2, author: bool(re.search("^(X-|x-|B2-|36t-)", file)), |
|
features = features_Lindel, |
|
name = "SX_spymac_Lindel", |
|
version = VERSION, |
|
description = "Data of spymac protein of sx and lcy for Lindel training" |
|
), |
|
CRISPRDataConfig( |
|
author_filter = lambda author, ref1, ref2, cut1, cut2: author == "SX", |
|
file_filter = lambda file, ref1, ref2, cut1, cut2, author: bool(re.search("^(i10t-|i83-)", file)), |
|
features = features_Lindel, |
|
name = "SX_ispymac_Lindel", |
|
version = VERSION, |
|
description = "Data of ispymac protein of sx and lcy for Lindel training" |
|
), |
|
|
|
CRISPRDataConfig( |
|
author_filter = lambda author, ref1, ref2, cut1, cut2: author == "SX", |
|
file_filter = lambda file, ref1, ref2, cut1, cut2, author: bool(re.search("^(A2-|A7-|D2-)", file)), |
|
features = features_FOREcasT, |
|
name = "SX_spcas9_FOREcasT", |
|
version = VERSION, |
|
description = "Data of spcas9 protein of sx and lcy for FOREcasT training" |
|
), |
|
CRISPRDataConfig( |
|
author_filter = lambda author, ref1, ref2, cut1, cut2: author == "SX", |
|
file_filter = lambda file, ref1, ref2, cut1, cut2, author: bool(re.search("^(X-|x-|B2-|36t-)", file)), |
|
features = features_FOREcasT, |
|
name = "SX_spymac_FOREcasT", |
|
version = VERSION, |
|
description = "Data of spymac protein of sx and lcy for FOREcasT training" |
|
), |
|
CRISPRDataConfig( |
|
author_filter = lambda author, ref1, ref2, cut1, cut2: author == "SX", |
|
file_filter = lambda file, ref1, ref2, cut1, cut2, author: bool(re.search("^(i10t-|i83-)", file)), |
|
features = features_FOREcasT, |
|
name = "SX_ispymac_FOREcasT", |
|
version = VERSION, |
|
description = "Data of ispymac protein of sx and lcy for FOREcasT training" |
|
), |
|
] |
|
|
|
|
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
|
|
description="""\ |
|
This dataset is used to train a DL model predicting editing results of CRISPR. |
|
""", |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
proxy_url = os.environ.get("MY_HF_DATASETS_DOWNLOAD_MANAGER_PROXY") |
|
if proxy_url: |
|
from datasets.download.download_manager import DownloadManager, DownloadConfig |
|
dl_manager_proxy = DownloadManager( |
|
download_config=DownloadConfig( |
|
proxies={ |
|
"http": proxy_url, |
|
"https": proxy_url |
|
} |
|
) |
|
) |
|
downloaded_files = dl_manager_proxy.download("https://github.com/ljw20180420/CRISPRdata/raw/refs/heads/main/dataset.json.gz") |
|
else: |
|
downloaded_files = dl_manager.download("https://github.com/ljw20180420/CRISPRdata/raw/refs/heads/main/dataset.json.gz") |
|
|
|
|
|
|
|
|
|
ds = datasets.load_dataset('json', data_files=downloaded_files, features=datasets.Features({ |
|
'ref1': datasets.Value('string'), |
|
'ref2': datasets.Value('string'), |
|
'cuts': [datasets.Features({ |
|
'cut1': datasets.Value('int16'), |
|
'cut2': datasets.Value('int16'), |
|
'authors': [datasets.Features({ |
|
'author': datasets.Value('string'), |
|
'files': [datasets.Features({ |
|
'file': datasets.Value('string'), |
|
'ref1_end': datasets.Sequence(datasets.Value('int16')), |
|
'ref2_start': datasets.Sequence(datasets.Value('int16')), |
|
'random_insert': datasets.Sequence(datasets.Value('string')), |
|
'count': datasets.Sequence(datasets.Value('int64')) |
|
})] |
|
})] |
|
})] |
|
})) |
|
ds = ds.map(self.filter_refs, batched=True) |
|
if self.config.name.endswith("_CRISPR_transformer"): |
|
ds = ds.map(self.CRISPR_transformer_trans_func, batched=True, remove_columns=['cuts']) |
|
elif self.config.name.endswith("_CRISPR_diffuser"): |
|
ds = ds.map(self.CRISPR_diffuser_trans_func, batched=True, remove_columns=['cuts']) |
|
elif self.config.name.endswith("_inDelphi"): |
|
ds = ds.map(self.inDelphi_trans_func, batched=True, remove_columns=['ref1', 'ref2', 'cuts']) |
|
elif self.config.name.endswith("_Lindel"): |
|
ds = ds.map(self.Lindel_trans_func, batched=True, remove_columns=['ref1', 'ref2', 'cuts']) |
|
elif self.config.name.endswith("_FOREcasT"): |
|
ds = ds.map(self.FOREcasT_trans_func, batched=True, remove_columns=['ref1', 'ref2', 'cuts']) |
|
ds = self.split_train_valid_test(ds) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"dataset": ds['train'], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"dataset": ds['validation'], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"dataset": ds['test'], |
|
}, |
|
) |
|
] |
|
|
|
|
|
def _generate_examples(self, dataset): |
|
|
|
|
|
for id, example in enumerate(dataset): |
|
yield id, example |
|
|
|
def split_train_valid_test(self, ds): |
|
|
|
ds = ds['train'].train_test_split(test_size=self.config.test_ratio + self.config.validation_ratio, shuffle=True, seed=self.config.seed) |
|
ds_valid_test = ds['test'].train_test_split(test_size=self.config.test_ratio / (self.config.test_ratio + self.config.validation_ratio), shuffle=False) |
|
ds['validation'] = ds_valid_test.pop('train') |
|
ds['test'] = ds_valid_test.pop('test') |
|
return ds |
|
|
|
def filter_refs(self, examples): |
|
ref1s, ref2s, cutss = [], [], [] |
|
for ref1, ref2, cuts in zip(examples['ref1'], examples['ref2'], examples['cuts']): |
|
if self.config.ref_filter is None or self.config.ref_filter(ref1, ref2): |
|
if self.config.cut_filter is not None or self.config.author_filter is not None or self.config.file_filter is not None: |
|
cuts = self.filter_cuts(cuts, ref1, ref2) |
|
if cuts: |
|
ref1s.append(ref1) |
|
ref2s.append(ref2) |
|
cutss.append(cuts) |
|
return { |
|
"ref1": ref1s, |
|
"ref2": ref2s, |
|
"cuts": cutss |
|
} |
|
|
|
def filter_cuts(self, cuts, ref1, ref2): |
|
new_cuts = [] |
|
for cut in cuts: |
|
if self.config.cut_filter is None or self.config.cut_filter(cut["cut1"], cut["cut2"], ref1, ref2): |
|
if self.config.author_filter is not None or self.config.file_filter is not None: |
|
cut["authors"] = self.filter_authors(cut["authors"], ref1, ref2, cut["cut1"], cut["cut2"]) |
|
if cut["authors"]: |
|
new_cuts.append(cut) |
|
return new_cuts |
|
|
|
def filter_authors(self, authors, ref1, ref2, cut1, cut2): |
|
new_authors = [] |
|
for author in authors: |
|
if self.config.author_filter is None or self.config.author_filter(author["author"], ref1, ref2, cut1, cut2): |
|
if self.config.file_filter is not None: |
|
author["files"] = self.filter_files(author["files"], ref1, ref2, cut1, cut2, author["author"]) |
|
if author["files"]: |
|
new_authors.append(author) |
|
return new_authors |
|
|
|
def filter_files(self, files, ref1, ref2, cut1, cut2, author): |
|
return [file for file in files if self.config.file_filter(file["file"], ref1, ref2, cut1, cut2, author)] |
|
|