Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -77,7 +77,7 @@ LLAMA_MAX_MAX_NEW_TOKENS = 512
|
|
| 77 |
LLAMA_DEFAULT_MAX_NEW_TOKENS = 512
|
| 78 |
LLAMA_MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "1024"))
|
| 79 |
llama_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 80 |
-
llama_model_id = "meta-llama/Llama-3.2-
|
| 81 |
llama_tokenizer = AutoTokenizer.from_pretrained(llama_model_id)
|
| 82 |
llama_model = AutoModelForCausalLM.from_pretrained(
|
| 83 |
llama_model_id,
|
|
@@ -135,7 +135,7 @@ Given the following issue description:
|
|
| 135 |
---
|
| 136 |
{issue_text}
|
| 137 |
---
|
| 138 |
-
|
| 139 |
"""
|
| 140 |
try:
|
| 141 |
explanation = llama_generate(prompt)
|
|
|
|
| 77 |
LLAMA_DEFAULT_MAX_NEW_TOKENS = 512
|
| 78 |
LLAMA_MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "1024"))
|
| 79 |
llama_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 80 |
+
llama_model_id = "meta-llama/Llama-3.2-1B-Instruct"
|
| 81 |
llama_tokenizer = AutoTokenizer.from_pretrained(llama_model_id)
|
| 82 |
llama_model = AutoModelForCausalLM.from_pretrained(
|
| 83 |
llama_model_id,
|
|
|
|
| 135 |
---
|
| 136 |
{issue_text}
|
| 137 |
---
|
| 138 |
+
Providing short explanation why this issue might be classified as a **{quality_name}** issue.
|
| 139 |
"""
|
| 140 |
try:
|
| 141 |
explanation = llama_generate(prompt)
|