Update app.py
Browse files
app.py
CHANGED
@@ -23,25 +23,24 @@ def img2txt(url):
|
|
23 |
return text
|
24 |
|
25 |
# Text-to-story
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
story = response.json()["output"]["choices"][0]["text"]
|
45 |
return story
|
46 |
|
47 |
|
@@ -56,6 +55,7 @@ def txt2speech(text):
|
|
56 |
|
57 |
with open('audio_story.mp3', 'wb') as file:
|
58 |
file.write(response.content)
|
|
|
59 |
|
60 |
|
61 |
# Streamlit web app main function
|
@@ -83,7 +83,7 @@ def main():
|
|
83 |
# Initiates AI processing and story generation
|
84 |
with st.spinner("## 🤖 AI is at Work! "):
|
85 |
scenario = img2txt("uploaded_image.jpg") # Extracts text from the image
|
86 |
-
story = txt2story(scenario,
|
87 |
txt2speech(story) # Converts the story to audio
|
88 |
|
89 |
st.markdown("---")
|
|
|
23 |
return text
|
24 |
|
25 |
# Text-to-story
|
26 |
+
|
27 |
+
model = "tiiuae/falcon-7b-instruct"
|
28 |
+
llm = HuggingFaceHub(
|
29 |
+
huggingfacehub_api_token = api_token,
|
30 |
+
repo_id = model,
|
31 |
+
verbose = False,
|
32 |
+
model_kwargs = {"temperature":temperature, "max_new_tokens": 1500})
|
33 |
+
|
34 |
+
def generate_story(scenario, llm):
|
35 |
+
template= """You are a story teller.
|
36 |
+
You get a scenario as an input text, and generates a short story out of it.
|
37 |
+
Context: {scenario}
|
38 |
+
Story:
|
39 |
+
"""
|
40 |
+
prompt = PromptTemplate(template=template, input_variables=["scenario"])
|
41 |
+
#Let's create our LLM chain now
|
42 |
+
chain = LLMChain(prompt=prompt, llm=llm)
|
43 |
+
story = chain.predict(scenario=scenario)
|
|
|
44 |
return story
|
45 |
|
46 |
|
|
|
55 |
|
56 |
with open('audio_story.mp3', 'wb') as file:
|
57 |
file.write(response.content)
|
58 |
+
|
59 |
|
60 |
|
61 |
# Streamlit web app main function
|
|
|
83 |
# Initiates AI processing and story generation
|
84 |
with st.spinner("## 🤖 AI is at Work! "):
|
85 |
scenario = img2txt("uploaded_image.jpg") # Extracts text from the image
|
86 |
+
story = txt2story(scenario, llm) # Generates a story based on the image text, LLM params
|
87 |
txt2speech(story) # Converts the story to audio
|
88 |
|
89 |
st.markdown("---")
|