|
from pathlib import Path |
|
from tqdm import tqdm |
|
from rich import print |
|
from ember import PEFeatureExtractor |
|
import numpy as np |
|
import typing as t |
|
|
|
EMBER_DIM = 2381 |
|
|
|
def find_filename(hash: str, folder: Path) -> str: |
|
"""Looks for filename equal to hash in subfolders of folder. |
|
Filename can be suffixed with .dat, or be not suffixed at all. |
|
Returns relative path to the file""" |
|
for file in folder.rglob(f"{hash}*.dat"): |
|
return str(file) |
|
for file in folder.rglob(f"{hash}*"): |
|
return str(file) |
|
return None |
|
|
|
|
|
def read_quovadis_hashes( |
|
quovadis_path: Path, |
|
) -> t.Tuple[t.Dict[str, t.List[str]], t.Dict[str, t.List[str]]]: |
|
|
|
def _read_files(folder: Path) -> list[str]: |
|
files = [] |
|
for file in tqdm(folder.glob("*"), desc=f"[*] Reading files in {folder.name:<25}"): |
|
if file.is_file(): |
|
files.append(file.name.replace(".dat", "").replace(".json", "")) |
|
return files |
|
|
|
def _read_dataset(folder: Path) -> t.Dict[str, t.List[str]]: |
|
hashes = {} |
|
for subfolder in folder.glob("*"): |
|
if subfolder.is_dir(): |
|
if "clean" in subfolder.name or "syswow64" in subfolder.name: |
|
files = _read_files(subfolder) |
|
hashes["benign"] = files |
|
else: |
|
malware_type = subfolder.name.replace("report_", "") |
|
files = _read_files(subfolder) |
|
hashes[malware_type] = files |
|
return hashes |
|
|
|
|
|
train_quo_vadis_path = quovadis_path / "windows_emulation_trainset" |
|
train_hashes = _read_dataset(train_quo_vadis_path) |
|
|
|
test_quo_vadis_path = quovadis_path / "windows_emulation_testset" |
|
test_hashes = _read_dataset(test_quo_vadis_path) |
|
|
|
return train_hashes, test_hashes |
|
|
|
def get_ember_features(hash: str, pe_path: Path, extractor: PEFeatureExtractor) -> np.ndarray: |
|
pe_path = find_filename(hash, pe_path) |
|
with open(pe_path, "rb") as f: |
|
pe_bytes = f.read() |
|
|
|
features = np.array(extractor.feature_vector(pe_bytes), dtype=np.float32) |
|
return features |
|
|
|
|
|
def extract_and_write_features( |
|
hashes: t.Dict[str, t.List[str]], |
|
raw_pe_root_path: Path, |
|
output_dir: Path, |
|
write_every: int = 10, |
|
limit: int = None, |
|
) -> None: |
|
print(f"[*] Starting work on {output_dir.name}") |
|
output_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
def _save_intermediate_state( |
|
features_file: Path, |
|
features_numpy_arr: np.ndarray, |
|
completed_hashes_file: Path, |
|
completed_log: t.List[str], |
|
errors_log_file: Path, |
|
errors_log: t.List[str], |
|
) -> None: |
|
np.save(features_file, features_numpy_arr) |
|
with open(completed_hashes_file, 'w') as f: |
|
for h in completed_log: |
|
f.write(f"{h}\n") |
|
with open(errors_log_file, "w") as f: |
|
for h in errors_log: |
|
f.write(f"{h}\n") |
|
|
|
def _parse_binary_type(hashes: t.List[str], binary_type: str): |
|
print(f"\n[*] Parsing '{binary_type}' hashes!\n") |
|
output_dir_subtype = output_dir / binary_type |
|
output_dir_subtype.mkdir(parents=True, exist_ok=True) |
|
|
|
features_file = output_dir_subtype / "features.npy" |
|
completed_hashes_file = output_dir_subtype / "hashes.txt" |
|
errors_log_file = output_dir_subtype / "errors.log" |
|
|
|
files_exist = all(f.exists() for f in [features_file, completed_hashes_file, errors_log_file]) |
|
if files_exist: |
|
|
|
features_numpy_arr = np.load(features_file) |
|
|
|
|
|
with open(completed_hashes_file, 'r') as f: |
|
completed_log = [line.strip() for line in f] |
|
completed_hashes = [line.split(",")[1].strip() for line in completed_log] |
|
|
|
|
|
with open(errors_log_file, 'r') as f: |
|
errors_log = [line.strip() for line in f] |
|
errored_hashes = [e.split(",")[1].strip() for e in errors_log] |
|
|
|
remaining_hashes = [h for h in hashes if h not in completed_hashes] |
|
remaining_hashes = [h for h in remaining_hashes if h not in errored_hashes] |
|
assert len(remaining_hashes) == len(hashes) - len(completed_hashes) - len(errors_log),\ |
|
f"Remaining hashes do not match: {len(remaining_hashes)} != {len(hashes)} - {len(completed_hashes)} - {len(errors_log)}" |
|
|
|
print(f"[!] Found: {len(completed_hashes)} completed | {len(errors_log)} errored | {len(remaining_hashes)} remaining!") |
|
hashes = remaining_hashes |
|
|
|
else: |
|
features_numpy_arr = np.zeros((len(hashes), EMBER_DIM), dtype=np.float32) |
|
completed_log = [] |
|
errors_log = [] |
|
|
|
|
|
extractor = PEFeatureExtractor(print_feature_warning=False) |
|
start_idx = len(completed_log) + len(errors_log) |
|
print(f"[*] Starting from index {start_idx}") |
|
pbar = tqdm(hashes, desc=f"[*] Extracting features...") |
|
for i, h in enumerate(pbar): |
|
i += start_idx |
|
try: |
|
features = get_ember_features(h, raw_pe_root_path, extractor) |
|
features_numpy_arr[i] = features |
|
completed_log.append(f"{i},{h}") |
|
except KeyboardInterrupt: |
|
raise KeyboardInterrupt |
|
except Exception as e: |
|
err_msg = f"{i},{h},{e}" |
|
print(f"[-] {err_msg}") |
|
errors_log.append(err_msg) |
|
pbar.set_description(f"[*] Extracting features... | Errors: {len(errors_log)}") |
|
if i > 0 and i % write_every == 0: |
|
_save_intermediate_state( |
|
features_file, features_numpy_arr, completed_hashes_file, completed_log, errors_log_file, errors_log |
|
) |
|
|
|
|
|
|
|
_save_intermediate_state( |
|
features_file, features_numpy_arr, completed_hashes_file, completed_log, errors_log_file, errors_log |
|
) |
|
|
|
|
|
if features_numpy_arr.shape[0] != len(hashes): |
|
errored_indices = [int(e.split(",")[0]) for e in errors_log] |
|
assert np.all(features_numpy_arr[errored_indices] == 0), "Some errored indices are not 0" |
|
features_numpy_arr = np.delete(features_numpy_arr, errored_indices, axis=0) |
|
np.save(features_file, features_numpy_arr) |
|
|
|
print(f"[*] Finished work on {output_dir.name}") |
|
print(f"[INFO] {len(completed_log)} hashes completed | {len(errors_log)} errored | {len(features_numpy_arr)} features") |
|
|
|
for binary_type in hashes.keys(): |
|
_parse_binary_type(hashes[binary_type][:limit], binary_type) |
|
|
|
|
|
if __name__ == "__main__": |
|
quovadis_path = Path("quovadis") |
|
quovadis_ember_path = Path("quovadis_ember") |
|
quovadis_ember_path.mkdir(parents=True, exist_ok=True) |
|
|
|
train_hashes, test_hashes = read_quovadis_hashes(quovadis_path) |
|
|
|
raw_pe_root_path_train = Path("/data/datasets/pe/pe_trainset") |
|
raw_pe_root_path_test = Path("/data/datasets/pe/pe_testset") |
|
|
|
extract_and_write_features(train_hashes, raw_pe_root_path_train, quovadis_ember_path / "train") |
|
extract_and_write_features(test_hashes, raw_pe_root_path_test, quovadis_ember_path / "test") |
|
|
|
|
|
|