imkhan107 commited on
Commit
55b70aa
·
1 Parent(s): c29a0a1

Added Llama3 support

Browse files
Files changed (4) hide show
  1. app.py +4 -4
  2. gemini.py +3 -2
  3. handler.py +7 -4
  4. llama_groq.py +3 -5
app.py CHANGED
@@ -27,23 +27,23 @@ def main():
27
  if analysis_type == "Code review":
28
  res=review_code(code, prompt)
29
  #st.text_area(label="Result",value=res, height=300)
30
- st.markdown(res)
31
  time.sleep(1)
32
  #st.markdown("Hello ooo")
33
 
34
  elif analysis_type == "Code refinement":
35
  res=refine_code(code, prompt)
36
  #st.text_area(label="Result",value=res, height=300)
37
- st.markdown(res)
38
 
39
  elif analysis_type == "Documentation":
40
  res=generate_documentation(code, prompt)
41
  #st.text_area(label="Result",value=res, height=300)
42
- st.markdown(res)
43
  elif analysis_type == "Resume Writer":
44
  res=resume_writer(code, prompt)
45
  #st.text_area(label="Result",value=res, height=300)
46
- st.markdown(res)
47
 
48
  st.success(f"Code analysis for {analysis_type} submitted successfully!")
49
 
 
27
  if analysis_type == "Code review":
28
  res=review_code(code, prompt)
29
  #st.text_area(label="Result",value=res, height=300)
30
+ st.markdown(res['text'])
31
  time.sleep(1)
32
  #st.markdown("Hello ooo")
33
 
34
  elif analysis_type == "Code refinement":
35
  res=refine_code(code, prompt)
36
  #st.text_area(label="Result",value=res, height=300)
37
+ st.markdown(res['text'])
38
 
39
  elif analysis_type == "Documentation":
40
  res=generate_documentation(code, prompt)
41
  #st.text_area(label="Result",value=res, height=300)
42
+ st.markdown(res['text'])
43
  elif analysis_type == "Resume Writer":
44
  res=resume_writer(code, prompt)
45
  #st.text_area(label="Result",value=res, height=300)
46
+ st.markdown(res['text'])
47
 
48
  st.success(f"Code analysis for {analysis_type} submitted successfully!")
49
 
gemini.py CHANGED
@@ -35,11 +35,12 @@ class GeminiModel:
35
  def execute(self, prompt: str) -> str:
36
 
37
  try:
38
- total_tokens = self.model.count_tokens(prompt).total_tokens
39
  print(f"Input tokens: {total_tokens}")
40
  response = self.model.generate_content(prompt, generation_config=generation_config)
41
  output_tokens = self.model.count_tokens(response.text).total_tokens
42
  print(f"Output tokens: {output_tokens}")
43
- return response.text
 
44
  except Exception as e:
45
  return f"An error occurred: {e}"
 
35
  def execute(self, prompt: str) -> str:
36
 
37
  try:
38
+ prompt_tokens = self.model.count_tokens(prompt).total_tokens
39
  print(f"Input tokens: {total_tokens}")
40
  response = self.model.generate_content(prompt, generation_config=generation_config)
41
  output_tokens = self.model.count_tokens(response.text).total_tokens
42
  print(f"Output tokens: {output_tokens}")
43
+
44
+ return response.text,{'prompt_tokens':prompt_tokens,"total_tokens":output_tokens}
45
  except Exception as e:
46
  return f"An error occurred: {e}"
handler.py CHANGED
@@ -1,5 +1,8 @@
1
  import gemini
 
2
  from prompts import *
 
 
3
 
4
  def review_code(code, c_prompt=None):
5
  if code is None or len(code) < 5 or code.isspace():
@@ -11,7 +14,7 @@ def review_code(code, c_prompt=None):
11
  #prompt = validation_prompt(code.strip())
12
  prompt = default_review_prompt1(code.strip())
13
 
14
- model = gemini.GeminiModel()
15
  try:
16
  res = model.execute(prompt)
17
  except Exception as e:
@@ -29,7 +32,7 @@ def refine_code(code, c_prompt=None):
29
  #prompt = validation_prompt(code.strip())
30
  prompt = default_refine_prompt(code.strip())
31
 
32
- model = gemini.GeminiModel()
33
  try:
34
  res = model.execute(prompt)
35
  except Exception as e:
@@ -47,7 +50,7 @@ def generate_documentation(code,c_prompt):
47
  else:
48
  prompt = default_doc_prompt(code.strip())
49
 
50
- model = gemini.GeminiModel()
51
  try:
52
  res = model.execute(prompt)
53
  except Exception as e:
@@ -65,7 +68,7 @@ def resume_writer(code,c_prompt):
65
  else:
66
  prompt = resume_prompt(code.strip())
67
 
68
- model = gemini.GeminiModel()
69
  try:
70
  res = model.execute(prompt)
71
  except Exception as e:
 
1
  import gemini
2
+ import llama_groq
3
  from prompts import *
4
+ #model = gemini.GeminiModel()
5
+ model=llama_groq.LlamaModel()
6
 
7
  def review_code(code, c_prompt=None):
8
  if code is None or len(code) < 5 or code.isspace():
 
14
  #prompt = validation_prompt(code.strip())
15
  prompt = default_review_prompt1(code.strip())
16
 
17
+
18
  try:
19
  res = model.execute(prompt)
20
  except Exception as e:
 
32
  #prompt = validation_prompt(code.strip())
33
  prompt = default_refine_prompt(code.strip())
34
 
35
+
36
  try:
37
  res = model.execute(prompt)
38
  except Exception as e:
 
50
  else:
51
  prompt = default_doc_prompt(code.strip())
52
 
53
+
54
  try:
55
  res = model.execute(prompt)
56
  except Exception as e:
 
68
  else:
69
  prompt = resume_prompt(code.strip())
70
 
71
+
72
  try:
73
  res = model.execute(prompt)
74
  except Exception as e:
llama_groq.py CHANGED
@@ -30,11 +30,9 @@ class LlamaModel:
30
  def execute(self, prompt: str) -> str:
31
 
32
  try:
33
- #total_tokens = self.model.count_tokens(prompt).total_tokens
34
- #print(f"Input tokens: {total_tokens}")
35
  response = self.model.invoke(prompt)
36
- #output_tokens = response
37
- #print(f"Output tokens: {output_tokens}")
38
- return response
39
  except Exception as e:
40
  return f"An error occurred: {e}"
 
30
  def execute(self, prompt: str) -> str:
31
 
32
  try:
 
 
33
  response = self.model.invoke(prompt)
34
+ res=response.content
35
+ meta=response.response_metadata
36
+ return {'text':res,'meta':meta}
37
  except Exception as e:
38
  return f"An error occurred: {e}"