import gradio
from transformers import pipeline

def merge_split_token(tokens):
  merged = []
  for token in tokens:
    if token["word"].startswith('##'):
      merged[-1]["word"] += token["word"][2:]
    else:
      merged.append(token)

  return merged
def process_trans_text(text):
  nlp=pipeline("ner",  model='KBLab/bert-base-swedish-cased-ner', tokenizer='KBLab/bert-base-swedish-cased-ner')
  nlp_results = nlp(text)
  print('nlp_results:', nlp_results)

  nlp_results_merge = merge_split_token(nlp_results)
  nlp_results_adjusted = map(lambda entity: dict(entity, **{ 'score': float(entity['score']) }), nlp_results_merge)
  print('nlp_results_adjusted:', nlp_results_adjusted)
  # Return values
  return {'entities': list(nlp_results_adjusted)}

gradio_intreface = gradio.Interface(
  fn=process_trans_text,
  inputs="text",
  outputs="json",
  examples=[
    ["Jag heter Tom och bor i Stockholm."],
    ["Groens malmgård är en av Stockholms malmgårdar, belägen vid Malmgårdsvägen 53 på Södermalm i Stockholm."]
  ],
  title="Entity Recognition",
  description="Something text",
  port=8888
)

gradio_intreface.launch(share=True)