kasrahabib commited on
Commit
9c7c0e7
·
1 Parent(s): 891b1e4

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +38 -17
README.md CHANGED
@@ -18,9 +18,27 @@ This model is a fine-tuned version of [sentence-transformers/all-MiniLM-L6-v2](h
18
  - Epoch: 14
19
  - Final Macro F1-score: 0.79
20
 
 
21
  <b>Labels</b>:
22
- 0 or F -> Functional;
23
- 1 or NF -> Non-functional;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
 
26
  ## Usage Pipeline
@@ -33,12 +51,16 @@ model_ckpt = 'kasrahabib/KM35NCDF-NF-SUBCLASSES-cls'
33
 
34
  software_requirment_cls = pipeline(task = task, model = model_ckpt, framework = frame_work)
35
 
36
- example_1_f = 'The START NEW PROJECT function shall allow the user to create a new project.'
37
- example_2_nf = 'The email string consists of [email protected] and is less than 31 characters in length and is not empty.'
 
 
 
38
  ```
39
  ```
40
- [{'label': 'F', 'score': 0.9998922348022461},
41
- {'label': 'NF', 'score': 0.999846339225769}]
 
42
  ```
43
 
44
  ## Model Inference:
@@ -51,9 +73,10 @@ model_ckpt = 'kasrahabib/KM35NCDF-NF-SUBCLASSES-cls'
51
  tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
52
  model = TFAutoModelForSequenceClassification.from_pretrained(model_ckpt)
53
 
54
- example_1_f = 'The START NEW PROJECT function shall allow the user to create a new project.'
55
- example_2_nf = 'The email string consists of [email protected] and is less than 31 characters in length and is not empty.'
56
- requirements = [example_1_f, example_2_nf]
 
57
 
58
  encoded_requirements = tokenizer(requirements, return_tensors = 'np', padding = 'longest')
59
 
@@ -64,7 +87,7 @@ classifications = [model.config.id2label[output] for output in classifications]
64
  print(classifications)
65
  ```
66
  ```
67
- ['F', 'NF']
68
  ```
69
 
70
  ## Usage Locally Downloaded (e.g., GitHub):
@@ -83,13 +106,14 @@ Then modify the code as below:
83
  import numpy as np
84
  from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
85
 
86
- model_ckpt = 'rest_of_the_path/KM35NCDF-NF-SUBCLASSES-cls'
87
  tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
88
  model = TFAutoModelForSequenceClassification.from_pretrained(model_ckpt)
89
 
90
- example_1_f = 'The START NEW PROJECT function shall allow the user to create a new project.'
91
- example_2_nf = 'The email string consists of [email protected] and is less than 31 characters in length and is not empty.'
92
- requirements = [example_1_f, example_2_nf]
 
93
 
94
  encoded_requirements = tokenizer(requirements, return_tensors = 'np', padding = 'longest')
95
 
@@ -99,9 +123,6 @@ classifications = np.argmax(y_pred, axis = 1)
99
  classifications = [model.config.id2label[output] for output in classifications]
100
  print(classifications)
101
  ```
102
- ```
103
- [{'label': 'F', 'score': 0.9998922348022461},
104
- {'label': 'NF', 'score': 0.999846339225769}]
105
 
106
  ### Training hyperparameters
107
 
 
18
  - Epoch: 14
19
  - Final Macro F1-score: 0.79
20
 
21
+
22
  <b>Labels</b>:
23
+ 0 or A -> Availability;
24
+ 1 or AC -> Access Control;
25
+ 2 or AU -> Audit;
26
+ 3 or B -> Behaviour;
27
+ 4 or D -> Data;
28
+ 5 or FT -> Fault Tolerance;
29
+ 6 or I -> Interface/Interoperability;
30
+ 7 or LE -> Legal;
31
+ 8 or LF -> Look and Feel;
32
+ 9 or MN -> Maintainability;
33
+ 10 or O -> Operational;
34
+ 11 or PE -> Performance;
35
+ 12 or PO -> Portability;
36
+ 13 or RL -> Reliability;
37
+ 14 or SA -> Safety;
38
+ 15 or SC -> Scalability;
39
+ 16 or SE -> Security;
40
+ 17 or ST -> Stability;
41
+ 18 or US -> Usability;
42
 
43
 
44
  ## Usage Pipeline
 
51
 
52
  software_requirment_cls = pipeline(task = task, model = model_ckpt, framework = frame_work)
53
 
54
+ example_1_US = 'Application needs to keep track of subtasks in a task.'
55
+ example_2_PE = 'The system shall allow users to enter time in several different formats.'
56
+ example_3_AC = 'The system shall allow users who hold any of the ORES/ORELSE/PROVIDER keys to be viewed as a clinical user and has full access privileges to all problem list options.'
57
+
58
+ software_requirment_cls([example_1_US, example_2_PE, example_3_AC])
59
  ```
60
  ```
61
+ [{'label': 'US', 'score': 0.9712953567504883},
62
+ {'label': 'PE', 'score': 0.9457865953445435},
63
+ {'label': 'AC', 'score': 0.9639136791229248}]
64
  ```
65
 
66
  ## Model Inference:
 
73
  tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
74
  model = TFAutoModelForSequenceClassification.from_pretrained(model_ckpt)
75
 
76
+ example_1_US = 'Application needs to keep track of subtasks in a task.'
77
+ example_2_PE = 'The system shall allow users to enter time in several different formats.'
78
+ example_3_AC = 'The system shall allow users who hold any of the ORES/ORELSE/PROVIDER keys to be viewed as a clinical user and has full access privileges to all problem list options.'
79
+ requirements = [example_1_US, example_2_PE, example_3_AC]
80
 
81
  encoded_requirements = tokenizer(requirements, return_tensors = 'np', padding = 'longest')
82
 
 
87
  print(classifications)
88
  ```
89
  ```
90
+ ['US', 'PE', 'AC']
91
  ```
92
 
93
  ## Usage Locally Downloaded (e.g., GitHub):
 
106
  import numpy as np
107
  from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
108
 
109
+ model_ckpt = 'rest_of_the_path/KM35NCDF-NF-SUBCLASSES-cls'
110
  tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
111
  model = TFAutoModelForSequenceClassification.from_pretrained(model_ckpt)
112
 
113
+ example_1_US = 'Application needs to keep track of subtasks in a task.'
114
+ example_2_PE = 'The system shall allow users to enter time in several different formats.'
115
+ example_3_AC = 'The system shall allow users who hold any of the ORES/ORELSE/PROVIDER keys to be viewed as a clinical user and has full access privileges to all problem list options.'
116
+ requirements = [example_1_US, example_2_PE, example_3_AC]
117
 
118
  encoded_requirements = tokenizer(requirements, return_tensors = 'np', padding = 'longest')
119
 
 
123
  classifications = [model.config.id2label[output] for output in classifications]
124
  print(classifications)
125
  ```
 
 
 
126
 
127
  ### Training hyperparameters
128