teckmill commited on
Commit
8e0f1b0
·
verified ·
1 Parent(s): 1ddb9cd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -16
app.py CHANGED
@@ -1,32 +1,40 @@
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
 
3
 
4
- # Load the model and tokenizer from Hugging Face Hub
5
- model_name = "microsoft/CodeGPT-small-py" # Model name you provided
6
- tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
7
  model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
  # Initialize the pipeline
10
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
11
 
12
- def generate_text(prompt):
13
- # Generate text using the model
14
- generated = generator(prompt, max_length=50)
15
- return generated[0]['generated_text']
 
 
 
 
16
 
17
  # Streamlit UI
18
- st.title("CodeGPT Text Generation")
19
- st.write("Enter a prompt to generate code or text:")
20
 
21
- # Text input for the user to enter a prompt
22
- prompt = st.text_area("Prompt", "def example_function():\n # Your code here...")
 
 
 
23
 
24
- # Button to trigger the model inference
25
  if st.button("Generate Code"):
26
  if prompt:
27
- generated_text = generate_text(prompt)
28
  st.subheader("Generated Code")
29
- st.write(generated_text)
30
  else:
31
- st.warning("Please enter a prompt to generate code.")
 
32
 
 
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
+ import black
4
 
5
+ # Load model and tokenizer
6
+ model_name = "microsoft/CodeGPT-small-py"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
10
  # Initialize the pipeline
11
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer, temperature=0.5, top_p=0.9, max_length=150)
12
 
13
+ def generate_code_with_feedback(prompt):
14
+ generated_code = generator(prompt, num_return_sequences=1)[0]['generated_text']
15
+ # Apply self-check for code quality
16
+ formatted_code = format_code(generated_code)
17
+ return formatted_code
18
+
19
+ def format_code(code):
20
+ return black.format_str(code, mode=black.Mode())
21
 
22
  # Streamlit UI
23
+ st.title("Smart Code Generation and Fixing")
24
+ st.write("Enter a prompt to generate or fix code:")
25
 
26
+ option = st.radio("Select Action", ("Generate Code", "Fix Code"))
27
+ if option == "Generate Code":
28
+ prompt = st.text_area("Prompt", "Write a Python function that reverses a string:")
29
+ else:
30
+ prompt = st.text_area("Prompt", "Fix the following buggy Python code:\n\ndef reverse_string(s):\n return s[::-1]")
31
 
 
32
  if st.button("Generate Code"):
33
  if prompt:
34
+ generated_code = generate_code_with_feedback(prompt)
35
  st.subheader("Generated Code")
36
+ st.write(generated_code)
37
  else:
38
+ st.warning("Please enter a prompt.")
39
+
40