sachiniyer commited on
Commit
0ae62c7
·
1 Parent(s): 1c09fc2

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +18 -21
README.md CHANGED
@@ -4,36 +4,33 @@ datasets:
4
  - jigsaw_toxicity_pred
5
  language:
6
  - en
7
- library_name: adapter-transformers
8
  pipeline_tag: text-classification
9
  model_details:
10
  model_name: Toxic Tweets
11
  model_version: v1.0
12
  model_description: A PyTorch model for tweet toxicity
13
  model_authors:
14
- - Sachin Iyer
15
  model_tags:
16
- - PyTorch
17
- - Text Classification
18
- - NLP
19
-
20
  inputs:
21
- - name: text
22
- type: string
23
- description: The input text to be classified
24
-
25
  outputs:
26
- - name: class
27
- type: string
28
- description: The predicted class label
29
- value_mapping:
30
- 'toxic': 'toxic'
31
- 'severe_toxic': 'severe_toxic'
32
- 'obscene': 'obscene'
33
- 'threat': 'threat'
34
- 'insult': 'insult'
35
- 'identity_hate': 'identity_hate'
36
-
37
  ---
38
  # Model Card for Model ID
39
 
 
4
  - jigsaw_toxicity_pred
5
  language:
6
  - en
7
+ library_name: transformers
8
  pipeline_tag: text-classification
9
  model_details:
10
  model_name: Toxic Tweets
11
  model_version: v1.0
12
  model_description: A PyTorch model for tweet toxicity
13
  model_authors:
14
+ - Sachin Iyer
15
  model_tags:
16
+ - PyTorch
17
+ - Text Classification
18
+ - NLP
 
19
  inputs:
20
+ - name: text
21
+ type: string
22
+ description: The input text to be classified
 
23
  outputs:
24
+ - name: class
25
+ type: string
26
+ description: The predicted class label
27
+ value_mapping:
28
+ toxic: toxic
29
+ severe_toxic: severe_toxic
30
+ obscene: obscene
31
+ threat: threat
32
+ insult: insult
33
+ identity_hate: identity_hate
 
34
  ---
35
  # Model Card for Model ID
36