first try
Browse files- README.md +1 -1
- app.py +51 -0
- requirements.txt +2 -1
README.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
---
|
| 2 |
title: Ai Buddy
|
| 3 |
-
emoji:
|
| 4 |
colorFrom: yellow
|
| 5 |
colorTo: green
|
| 6 |
sdk: streamlit
|
|
|
|
| 1 |
---
|
| 2 |
title: Ai Buddy
|
| 3 |
+
emoji: 🦥
|
| 4 |
colorFrom: yellow
|
| 5 |
colorTo: green
|
| 6 |
sdk: streamlit
|
app.py
CHANGED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import openai
|
| 2 |
+
import streamlit as st
|
| 3 |
+
from openai import OpenAI
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
# Set OpenAI API Key
|
| 7 |
+
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY", "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"))
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# Streamlit UI
|
| 11 |
+
st.title("AI Text Generator")
|
| 12 |
+
st.write("Generate text using OpenAI's language model.")
|
| 13 |
+
|
| 14 |
+
# User Inputs
|
| 15 |
+
prompt = st.text_area("Enter your prompt:", "Once upon a time...")
|
| 16 |
+
|
| 17 |
+
output_format = st.selectbox("Select Output Format:", ["Story", "Poem", "Article", "Code"])
|
| 18 |
+
tone = st.selectbox("Select Tone:", ["Formal", "Informal", "Humorous", "Technical"])
|
| 19 |
+
creativity = st.slider("Creativity Level (Temperature):", 0.0, 1.0, 0.7)
|
| 20 |
+
max_length = st.slider("Max Response Length:", 50, 1000, 500)
|
| 21 |
+
n_responses = st.number_input("Number of Responses:", 1, 5, 1)
|
| 22 |
+
creative_mode = st.checkbox("Enable Creative Mode")
|
| 23 |
+
|
| 24 |
+
if st.button("Generate Text"):
|
| 25 |
+
if not prompt.strip():
|
| 26 |
+
st.error("Please enter a prompt.")
|
| 27 |
+
else:
|
| 28 |
+
try:
|
| 29 |
+
full_prompt = f"Generate a {tone.lower()} {output_format.lower()} based on: {prompt}"
|
| 30 |
+
response = client.chat.completions.create(
|
| 31 |
+
model="gpt-3.5-turbo",
|
| 32 |
+
messages=[{"role": "user", "content": full_prompt}],
|
| 33 |
+
temperature=creativity,
|
| 34 |
+
max_tokens=max_length,
|
| 35 |
+
n=n_responses
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
for i, choice in enumerate(response.choices):
|
| 39 |
+
st.subheader(f"Response {i+1}")
|
| 40 |
+
st.write(choice.message.content)
|
| 41 |
+
|
| 42 |
+
except openai.OpenAIError as e:
|
| 43 |
+
st.error(f"API Error: {e}")
|
| 44 |
+
|
| 45 |
+
# Feedback Section
|
| 46 |
+
st.subheader("Feedback")
|
| 47 |
+
feedback = st.radio("Was this response helpful?", ("Yes", "No"))
|
| 48 |
+
if feedback == "No":
|
| 49 |
+
st.text_area("What could be improved?")
|
| 50 |
+
|
| 51 |
+
st.write("Thank you for using the AI Text Generator!")
|
requirements.txt
CHANGED
|
@@ -1 +1,2 @@
|
|
| 1 |
-
streamlit
|
|
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
openai
|