feat: Add timing for the execution of the for loop in app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
#%%
|
|
|
2 |
from text_processing import split_into_words, Word
|
3 |
import torch
|
4 |
from transformers import AutoTokenizer, AutoModelForCausalLM, PreTrainedModel, PreTrainedTokenizer
|
@@ -61,6 +62,8 @@ low_prob_words = [word for word in words if word.logprob < log_prob_threshold]
|
|
61 |
|
62 |
#%%
|
63 |
|
|
|
|
|
64 |
for word in low_prob_words:
|
65 |
prefix_index = word.first_token_index
|
66 |
prefix_tokens = [token for token, _ in result][:prefix_index + 1]
|
@@ -69,3 +72,5 @@ for word in low_prob_words:
|
|
69 |
print(f"Original word: {word.text}, Log Probability: {word.logprob:.4f}")
|
70 |
print(f"Proposed replacements: {replacements}")
|
71 |
print()
|
|
|
|
|
|
1 |
#%%
|
2 |
+
import time
|
3 |
from text_processing import split_into_words, Word
|
4 |
import torch
|
5 |
from transformers import AutoTokenizer, AutoModelForCausalLM, PreTrainedModel, PreTrainedTokenizer
|
|
|
62 |
|
63 |
#%%
|
64 |
|
65 |
+
start_time = time.time()
|
66 |
+
|
67 |
for word in low_prob_words:
|
68 |
prefix_index = word.first_token_index
|
69 |
prefix_tokens = [token for token, _ in result][:prefix_index + 1]
|
|
|
72 |
print(f"Original word: {word.text}, Log Probability: {word.logprob:.4f}")
|
73 |
print(f"Proposed replacements: {replacements}")
|
74 |
print()
|
75 |
+
end_time = time.time()
|
76 |
+
print(f"Time taken for the loop: {end_time - start_time:.4f} seconds")
|