Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,8 @@ import torch
|
|
6 |
import torch.nn as nn
|
7 |
from transformers.activations import get_activation
|
8 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
|
|
9 |
|
10 |
|
11 |
st.title('GPT2:')
|
@@ -14,9 +16,10 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
14 |
|
15 |
@st.cache(allow_output_mutation=True)
|
16 |
def get_model():
|
17 |
-
tokenizer = AutoTokenizer.from_pretrained("BigSalmon/MASKGPT2")
|
18 |
-
model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln80Paraphrase")
|
19 |
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/MASKGPT2")
|
|
|
|
|
20 |
return model, tokenizer
|
21 |
|
22 |
model, tokenizer = get_model()
|
|
|
6 |
import torch.nn as nn
|
7 |
from transformers.activations import get_activation
|
8 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
9 |
+
from transformers import AutoTokenizer, AutoModel
|
10 |
+
from transformers import GPTNeoXForCausalLM, GPTNeoXTokenizerFast
|
11 |
|
12 |
|
13 |
st.title('GPT2:')
|
|
|
16 |
|
17 |
@st.cache(allow_output_mutation=True)
|
18 |
def get_model():
|
19 |
+
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/MASKGPT2")
|
|
|
20 |
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/MASKGPT2")
|
21 |
+
tokenizer = GPTNeoXTokenizerFast.from_pretrained("CarperAI/FIM-NeoX-1.3B")
|
22 |
+
model = GPTNeoXForCausalLM.from_pretrained("BigSalmon/FormalInformalConcise-FIM-NeoX-1.3B")
|
23 |
return model, tokenizer
|
24 |
|
25 |
model, tokenizer = get_model()
|