File size: 1,041 Bytes
74e8af6 c2e4cc3 9a75d7c 6596636 4522029 6dbe383 4522029 8fe4b6c bcc7480 c447bbf 4522029 a38d800 6dbe383 a38d800 6dbe383 4522029 28f0c2b 4522029 6dbe383 4522029 bafdc64 c5042ea 50979de c5042ea 50979de c5042ea |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import os
from huggingface_hub import login
from transformers import pipeline, AutoTokenizer
import streamlit as st
# Access the secret token from HF secrets
hf_token = os.getenv("HF_MODEL_TOKEN")
# Login to Hugging Face
login(token=hf_token)
@st.cache_resource
def load_pipe():
model_name = "MSey/_table_CaBERT_0003_gbert-base_fl32_checkpoint-15852"
return pipeline("token-classification", model=model_name), AutoTokenizer.from_pretrained(model_name)
pipe, tokenizer = load_pipe()
st.header("Test Environment for GBERT Ca Model")
user_input = st.text_input("Enter your Prompt here:", "")
if user_input:
with st.spinner('Generating response...'):
response = pipe(user_input)
st.write("Response:")
tuples = ""
# Process each entity and highlight the labeled words
for entity in response:
label = entity['entity']
word = entity["word"]
tuples += f"{word}\t{label}\n"
# Display the highlighted text using st.markdown
st.text(tuples) |