Mo-alaa commited on
Commit
f5a396a
·
1 Parent(s): dd71470

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -28
app.py CHANGED
@@ -1,29 +1,16 @@
1
- from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
2
  import streamlit as st
3
- from transformers import AutoTokenizer, AutoModelWithLMHead
4
- import torch
5
- if torch.cuda.is_available():
6
- device = torch.device("cuda")
7
- else:
8
- device = "cpu"
9
-
10
-
11
- tokenizer = AutoTokenizer.from_pretrained("salesken/content_generation_from_phrases")
12
- model = AutoModelWithLMHead.from_pretrained("salesken/content_generation_from_phrases").to(device)
13
-
14
-
15
- input_query=st.text_input("Enter the Blog Title")
16
- query = "<|startoftext|> " +"Create a blog about "+ input_query + " ~~"
17
-
18
- input_ids = tokenizer.encode(query.lower(), return_tensors='pt').to(device)
19
- sample_outputs = model.generate(input_ids,
20
- do_sample=True,
21
- num_beams=1,
22
- max_length=4096,
23
- temperature=0.9,
24
- top_k = 30,
25
- num_return_sequences=1)
26
- r = tokenizer.decode(sample_outputs[0], skip_special_tokens=True).split('||')[0]
27
- r = r.split(' ~~ ')[1]
28
-
29
- st.write(r)
 
1
+ from langchain.chains import LLMChain
2
+ from langchain.llms import HuggingFaceHub
3
+ from langchain.prompts import PromptTemplate
4
  import streamlit as st
5
+ import os
6
+ topic = st.text_input("Enter Topic for the bog")
7
+ hub_llm = HuggingFaceHub(repo_id ="HuggingFaceH4/zephyr-7b-beta")
8
+ prompt = PromptTemplate(
9
+ input_variables = ['keyword'],
10
+ template = """
11
+ Write a comprehensive article about {keyword} covering the following aspects:
12
+ Introduction, History and Background, Key Concepts and Terminology, Use Cases and Applications, Benefits and Drawbacks, Future Outlook, Conclusion
13
+ Ensure that the article is well-structured, informative, and at least 1500 words long. Use SEO best practices for content optimization.
14
+ """)
15
+ hub_chain = LLMChain(prompt=prompt,llm = hub_llm,verbose=True)
16
+ st.write(hub_chain.run(topic))