Update README.md
Browse files
README.md
CHANGED
|
@@ -118,10 +118,6 @@ with torch.no_grad():
|
|
| 118 |
input_signal=input_signal.to(device),
|
| 119 |
input_signal_length=input_signal_length.to(device)
|
| 120 |
).cpu()
|
| 121 |
-
|
| 122 |
-
# Check output dimensions
|
| 123 |
-
B, T, C = torch_outputs.shape
|
| 124 |
-
assert C == 2, "Output channels should be 2"
|
| 125 |
```
|
| 126 |
|
| 127 |
### Export to ONNX
|
|
@@ -164,9 +160,6 @@ torch.onnx.export(
|
|
| 164 |
"output": {0: "batch_size", 1: "sequence_length"}
|
| 165 |
}
|
| 166 |
)
|
| 167 |
-
|
| 168 |
-
# Validate ONNX model
|
| 169 |
-
onnx.checker.check_model(onnx.load(ONNX_EXPORT_PATH))
|
| 170 |
```
|
| 171 |
|
| 172 |
### Inference with ONNX Runtime
|
|
@@ -187,11 +180,6 @@ ort_inputs = {
|
|
| 187 |
|
| 188 |
# Run inference
|
| 189 |
onnx_outputs = session.run(None, ort_inputs)[0]
|
| 190 |
-
|
| 191 |
-
# Compare with PyTorch output
|
| 192 |
-
for torch_out, onnx_out in zip(torch_outputs, onnx_outputs):
|
| 193 |
-
torch.testing.assert_close(torch_out, torch.from_numpy(onnx_out), atol=1e-3, rtol=1e-3)
|
| 194 |
-
print("✅ PyTorch and ONNX Runtime outputs match!")
|
| 195 |
```
|
| 196 |
|
| 197 |
### RTTM Output from Frame-Level Speech Predictions
|
|
|
|
| 118 |
input_signal=input_signal.to(device),
|
| 119 |
input_signal_length=input_signal_length.to(device)
|
| 120 |
).cpu()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
```
|
| 122 |
|
| 123 |
### Export to ONNX
|
|
|
|
| 160 |
"output": {0: "batch_size", 1: "sequence_length"}
|
| 161 |
}
|
| 162 |
)
|
|
|
|
|
|
|
|
|
|
| 163 |
```
|
| 164 |
|
| 165 |
### Inference with ONNX Runtime
|
|
|
|
| 180 |
|
| 181 |
# Run inference
|
| 182 |
onnx_outputs = session.run(None, ort_inputs)[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 183 |
```
|
| 184 |
|
| 185 |
### RTTM Output from Frame-Level Speech Predictions
|