File size: 7,442 Bytes
2c4f474 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
from pathlib import Path
from tqdm import tqdm
from rich import print
from ember import PEFeatureExtractor
import numpy as np
import typing as t
EMBER_DIM = 2381
def find_filename(hash: str, folder: Path) -> str:
"""Looks for filename equal to hash in subfolders of folder.
Filename can be suffixed with .dat, or be not suffixed at all.
Returns relative path to the file"""
for file in folder.rglob(f"{hash}*.dat"):
return str(file)
for file in folder.rglob(f"{hash}*"):
return str(file)
return None
def read_quovadis_hashes(
quovadis_path: Path,
) -> t.Tuple[t.Dict[str, t.List[str]], t.Dict[str, t.List[str]]]:
def _read_files(folder: Path) -> list[str]:
files = []
for file in tqdm(folder.glob("*"), desc=f"[*] Reading files in {folder.name:<25}"):
if file.is_file():
files.append(file.name.replace(".dat", "").replace(".json", ""))
return files
def _read_dataset(folder: Path) -> t.Dict[str, t.List[str]]:
hashes = {}
for subfolder in folder.glob("*"):
if subfolder.is_dir():
if "clean" in subfolder.name or "syswow64" in subfolder.name:
files = _read_files(subfolder)
hashes["benign"] = files
else:
malware_type = subfolder.name.replace("report_", "")
files = _read_files(subfolder)
hashes[malware_type] = files
return hashes
train_quo_vadis_path = quovadis_path / "windows_emulation_trainset"
train_hashes = _read_dataset(train_quo_vadis_path)
test_quo_vadis_path = quovadis_path / "windows_emulation_testset"
test_hashes = _read_dataset(test_quo_vadis_path)
return train_hashes, test_hashes
def get_ember_features(hash: str, pe_path: Path, extractor: PEFeatureExtractor) -> np.ndarray:
pe_path = find_filename(hash, pe_path)
with open(pe_path, "rb") as f:
pe_bytes = f.read()
features = np.array(extractor.feature_vector(pe_bytes), dtype=np.float32)
return features
def extract_and_write_features(
hashes: t.Dict[str, t.List[str]],
raw_pe_root_path: Path,
output_dir: Path,
write_every: int = 10,
limit: int = None,
) -> None:
print(f"[*] Starting work on {output_dir.name}")
output_dir.mkdir(parents=True, exist_ok=True)
def _save_intermediate_state(
features_file: Path,
features_numpy_arr: np.ndarray,
completed_hashes_file: Path,
completed_log: t.List[str],
errors_log_file: Path,
errors_log: t.List[str],
) -> None:
np.save(features_file, features_numpy_arr)
with open(completed_hashes_file, 'w') as f:
for h in completed_log:
f.write(f"{h}\n")
with open(errors_log_file, "w") as f:
for h in errors_log:
f.write(f"{h}\n")
def _parse_binary_type(hashes: t.List[str], binary_type: str):
print(f"\n[*] Parsing '{binary_type}' hashes!\n")
output_dir_subtype = output_dir / binary_type
output_dir_subtype.mkdir(parents=True, exist_ok=True)
features_file = output_dir_subtype / "features.npy"
completed_hashes_file = output_dir_subtype / "hashes.txt"
errors_log_file = output_dir_subtype / "errors.log"
files_exist = all(f.exists() for f in [features_file, completed_hashes_file, errors_log_file])
if files_exist:
# read features
features_numpy_arr = np.load(features_file)
# read completed hashes
with open(completed_hashes_file, 'r') as f:
completed_log = [line.strip() for line in f]
completed_hashes = [line.split(",")[1].strip() for line in completed_log]
# read errors
with open(errors_log_file, 'r') as f:
errors_log = [line.strip() for line in f]
errored_hashes = [e.split(",")[1].strip() for e in errors_log]
remaining_hashes = [h for h in hashes if h not in completed_hashes]
remaining_hashes = [h for h in remaining_hashes if h not in errored_hashes]
assert len(remaining_hashes) == len(hashes) - len(completed_hashes) - len(errors_log),\
f"Remaining hashes do not match: {len(remaining_hashes)} != {len(hashes)} - {len(completed_hashes)} - {len(errors_log)}"
print(f"[!] Found: {len(completed_hashes)} completed | {len(errors_log)} errored | {len(remaining_hashes)} remaining!")
hashes = remaining_hashes
else:
features_numpy_arr = np.zeros((len(hashes), EMBER_DIM), dtype=np.float32)
completed_log = []
errors_log = []
extractor = PEFeatureExtractor(print_feature_warning=False)
start_idx = len(completed_log) + len(errors_log)
print(f"[*] Starting from index {start_idx}")
pbar = tqdm(hashes, desc=f"[*] Extracting features...")
for i, h in enumerate(pbar):
i += start_idx
try:
features = get_ember_features(h, raw_pe_root_path, extractor)
features_numpy_arr[i] = features
completed_log.append(f"{i},{h}")
except KeyboardInterrupt:
raise KeyboardInterrupt
except Exception as e:
err_msg = f"{i},{h},{e}"
print(f"[-] {err_msg}")
errors_log.append(err_msg)
pbar.set_description(f"[*] Extracting features... | Errors: {len(errors_log)}")
if i > 0 and i % write_every == 0:
_save_intermediate_state(
features_file, features_numpy_arr, completed_hashes_file, completed_log, errors_log_file, errors_log
)
# print(f"[!] Dumped intermediate state to {output_dir_subtype.name}")
# final save
_save_intermediate_state(
features_file, features_numpy_arr, completed_hashes_file, completed_log, errors_log_file, errors_log
)
# clean errored indices if any
if features_numpy_arr.shape[0] != len(hashes):
errored_indices = [int(e.split(",")[0]) for e in errors_log]
assert np.all(features_numpy_arr[errored_indices] == 0), "Some errored indices are not 0"
features_numpy_arr = np.delete(features_numpy_arr, errored_indices, axis=0)
np.save(features_file, features_numpy_arr)
print(f"[*] Finished work on {output_dir.name}")
print(f"[INFO] {len(completed_log)} hashes completed | {len(errors_log)} errored | {len(features_numpy_arr)} features")
for binary_type in hashes.keys():
_parse_binary_type(hashes[binary_type][:limit], binary_type)
if __name__ == "__main__":
quovadis_path = Path("quovadis")
quovadis_ember_path = Path("quovadis_ember")
quovadis_ember_path.mkdir(parents=True, exist_ok=True)
train_hashes, test_hashes = read_quovadis_hashes(quovadis_path)
raw_pe_root_path_train = Path("/data/datasets/pe/pe_trainset")
raw_pe_root_path_test = Path("/data/datasets/pe/pe_testset")
extract_and_write_features(train_hashes, raw_pe_root_path_train, quovadis_ember_path / "train")
extract_and_write_features(test_hashes, raw_pe_root_path_test, quovadis_ember_path / "test")
|