Update app.py
Browse files
app.py
CHANGED
@@ -1,32 +1,40 @@
|
|
1 |
import streamlit as st
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
|
|
3 |
|
4 |
-
# Load
|
5 |
-
model_name = "microsoft/CodeGPT-small-py"
|
6 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name
|
7 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
8 |
|
9 |
# Initialize the pipeline
|
10 |
-
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
11 |
|
12 |
-
def
|
13 |
-
|
14 |
-
|
15 |
-
|
|
|
|
|
|
|
|
|
16 |
|
17 |
# Streamlit UI
|
18 |
-
st.title("
|
19 |
-
st.write("Enter a prompt to generate
|
20 |
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
23 |
|
24 |
-
# Button to trigger the model inference
|
25 |
if st.button("Generate Code"):
|
26 |
if prompt:
|
27 |
-
|
28 |
st.subheader("Generated Code")
|
29 |
-
st.write(
|
30 |
else:
|
31 |
-
st.warning("Please enter a prompt
|
|
|
32 |
|
|
|
1 |
import streamlit as st
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
3 |
+
import black
|
4 |
|
5 |
+
# Load model and tokenizer
|
6 |
+
model_name = "microsoft/CodeGPT-small-py"
|
7 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
9 |
|
10 |
# Initialize the pipeline
|
11 |
+
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, temperature=0.5, top_p=0.9, max_length=150)
|
12 |
|
13 |
+
def generate_code_with_feedback(prompt):
|
14 |
+
generated_code = generator(prompt, num_return_sequences=1)[0]['generated_text']
|
15 |
+
# Apply self-check for code quality
|
16 |
+
formatted_code = format_code(generated_code)
|
17 |
+
return formatted_code
|
18 |
+
|
19 |
+
def format_code(code):
|
20 |
+
return black.format_str(code, mode=black.Mode())
|
21 |
|
22 |
# Streamlit UI
|
23 |
+
st.title("Smart Code Generation and Fixing")
|
24 |
+
st.write("Enter a prompt to generate or fix code:")
|
25 |
|
26 |
+
option = st.radio("Select Action", ("Generate Code", "Fix Code"))
|
27 |
+
if option == "Generate Code":
|
28 |
+
prompt = st.text_area("Prompt", "Write a Python function that reverses a string:")
|
29 |
+
else:
|
30 |
+
prompt = st.text_area("Prompt", "Fix the following buggy Python code:\n\ndef reverse_string(s):\n return s[::-1]")
|
31 |
|
|
|
32 |
if st.button("Generate Code"):
|
33 |
if prompt:
|
34 |
+
generated_code = generate_code_with_feedback(prompt)
|
35 |
st.subheader("Generated Code")
|
36 |
+
st.write(generated_code)
|
37 |
else:
|
38 |
+
st.warning("Please enter a prompt.")
|
39 |
+
|
40 |
|