Commit
·
d10eb4f
1
Parent(s):
1233f56
Update README.md
Browse files
README.md
CHANGED
@@ -3,7 +3,7 @@ license: apache-2.0
|
|
3 |
tags:
|
4 |
- generated_from_keras_callback
|
5 |
model-index:
|
6 |
-
- name: kasrahabib/KM35NCDF
|
7 |
results: []
|
8 |
widget:
|
9 |
- text: "Application needs to keep track of subtasks in a task."
|
@@ -17,7 +17,7 @@ widget:
|
|
17 |
<!-- This model card has been generated automatically according to the information Keras had access to. You should
|
18 |
probably proofread and complete it, then remove this comment. -->
|
19 |
|
20 |
-
# kasrahabib/KM35NCDF
|
21 |
|
22 |
This model is a fine-tuned version of [sentence-transformers/all-MiniLM-L6-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2) on Software Requirements Dataset (SWARD) for classifying 19 Non-functional requirements. Note that based on literature, two out of 19 classes are Data and Behavior, which belong to types of Functional software requirements. It achieves the following results on the evaluation set:
|
23 |
- Train Loss: 0.1691
|
@@ -54,7 +54,7 @@ from transformers import pipeline
|
|
54 |
|
55 |
frame_work = 'tf'
|
56 |
task = 'text-classification'
|
57 |
-
model_ckpt = 'kasrahabib/KM35NCDF
|
58 |
|
59 |
software_requirment_cls = pipeline(task = task, model = model_ckpt, framework = frame_work)
|
60 |
|
@@ -76,7 +76,7 @@ software_requirment_cls([example_1_US, example_2_PE, example_3_AC])
|
|
76 |
import numpy as np
|
77 |
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
|
78 |
|
79 |
-
model_ckpt = 'kasrahabib/KM35NCDF
|
80 |
tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
|
81 |
model = TFAutoModelForSequenceClassification.from_pretrained(model_ckpt)
|
82 |
|
@@ -113,7 +113,7 @@ Then modify the code as below:
|
|
113 |
import numpy as np
|
114 |
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
|
115 |
|
116 |
-
model_ckpt = 'rest_of_the_path/KM35NCDF
|
117 |
tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
|
118 |
model = TFAutoModelForSequenceClassification.from_pretrained(model_ckpt)
|
119 |
|
|
|
3 |
tags:
|
4 |
- generated_from_keras_callback
|
5 |
model-index:
|
6 |
+
- name: kasrahabib/KM35NCDF
|
7 |
results: []
|
8 |
widget:
|
9 |
- text: "Application needs to keep track of subtasks in a task."
|
|
|
17 |
<!-- This model card has been generated automatically according to the information Keras had access to. You should
|
18 |
probably proofread and complete it, then remove this comment. -->
|
19 |
|
20 |
+
# kasrahabib/KM35NCDF
|
21 |
|
22 |
This model is a fine-tuned version of [sentence-transformers/all-MiniLM-L6-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2) on Software Requirements Dataset (SWARD) for classifying 19 Non-functional requirements. Note that based on literature, two out of 19 classes are Data and Behavior, which belong to types of Functional software requirements. It achieves the following results on the evaluation set:
|
23 |
- Train Loss: 0.1691
|
|
|
54 |
|
55 |
frame_work = 'tf'
|
56 |
task = 'text-classification'
|
57 |
+
model_ckpt = 'kasrahabib/KM35NCDF '
|
58 |
|
59 |
software_requirment_cls = pipeline(task = task, model = model_ckpt, framework = frame_work)
|
60 |
|
|
|
76 |
import numpy as np
|
77 |
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
|
78 |
|
79 |
+
model_ckpt = 'kasrahabib/KM35NCDF '
|
80 |
tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
|
81 |
model = TFAutoModelForSequenceClassification.from_pretrained(model_ckpt)
|
82 |
|
|
|
113 |
import numpy as np
|
114 |
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
|
115 |
|
116 |
+
model_ckpt = 'rest_of_the_path/KM35NCDF '
|
117 |
tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
|
118 |
model = TFAutoModelForSequenceClassification.from_pretrained(model_ckpt)
|
119 |
|