system HF staff commited on
Commit
692bc7e
·
1 Parent(s): 4b20590

Commit From AutoTrain

Browse files
.gitattributes CHANGED
@@ -30,3 +30,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
30
  *.zip filter=lfs diff=lfs merge=lfs -text
31
  *.zst filter=lfs diff=lfs merge=lfs -text
32
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
30
  *.zip filter=lfs diff=lfs merge=lfs -text
31
  *.zst filter=lfs diff=lfs merge=lfs -text
32
  *tfevents* filter=lfs diff=lfs merge=lfs -text
33
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
34
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - autotrain
4
+ - text-classification
5
+ language:
6
+ - ar
7
+ widget:
8
+ - text: "I love AutoTrain 🤗"
9
+ datasets:
10
+ - Azizjah/autotrain-data-arabic_cuisine
11
+ co2_eq_emissions:
12
+ emissions: 0.02430968865158923
13
+ ---
14
+
15
+ # Model Trained Using AutoTrain
16
+
17
+ - Problem type: Multi-class Classification
18
+ - Model ID: 1367052683
19
+ - CO2 Emissions (in grams): 0.0243
20
+
21
+ ## Validation Metrics
22
+
23
+ - Loss: 2.302
24
+ - Accuracy: 0.439
25
+ - Macro F1: 0.133
26
+ - Micro F1: 0.439
27
+ - Weighted F1: 0.391
28
+ - Macro Precision: 0.167
29
+ - Micro Precision: 0.439
30
+ - Weighted Precision: 0.378
31
+ - Macro Recall: 0.140
32
+ - Micro Recall: 0.439
33
+ - Weighted Recall: 0.439
34
+
35
+
36
+ ## Usage
37
+
38
+ You can use cURL to access this model:
39
+
40
+ ```
41
+ $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/Azizjah/autotrain-arabic_cuisine-1367052683
42
+ ```
43
+
44
+ Or Python API:
45
+
46
+ ```
47
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
48
+
49
+ model = AutoModelForSequenceClassification.from_pretrained("Azizjah/autotrain-arabic_cuisine-1367052683", use_auth_token=True)
50
+
51
+ tokenizer = AutoTokenizer.from_pretrained("Azizjah/autotrain-arabic_cuisine-1367052683", use_auth_token=True)
52
+
53
+ inputs = tokenizer("I love AutoTrain", return_tensors="pt")
54
+
55
+ outputs = model(**inputs)
56
+ ```
config.json ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "AutoTrain",
3
+ "_num_labels": 66,
4
+ "architectures": [
5
+ "BertForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "classifier_dropout": null,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "Afghan Restaurant",
15
+ "1": "American Restaurant",
16
+ "2": "Arepa Restaurant",
17
+ "3": "Armenian Restaurant",
18
+ "4": "Asian Restaurant",
19
+ "5": "BBQ Joint",
20
+ "6": "Bagel Shop",
21
+ "7": "Bakery",
22
+ "8": "Breakfast Spot",
23
+ "9": "Buffet",
24
+ "10": "Burger Joint",
25
+ "11": "Caf\u00e9",
26
+ "12": "Chaat Place",
27
+ "13": "Chinese Restaurant",
28
+ "14": "Coffee Shop",
29
+ "15": "Comfort Food Restaurant",
30
+ "16": "Creperie",
31
+ "17": "Dessert Shop",
32
+ "18": "Dim Sum Restaurant",
33
+ "19": "Diner",
34
+ "20": "Donut Shop",
35
+ "21": "Egyptian Restaurant",
36
+ "22": "English Restaurant",
37
+ "23": "Falafel Restaurant",
38
+ "24": "Fast Food Restaurant",
39
+ "25": "Fish & Chips Shop",
40
+ "26": "Food Stand",
41
+ "27": "Food Truck",
42
+ "28": "French Restaurant",
43
+ "29": "Fried Chicken Joint",
44
+ "30": "Greek Restaurant",
45
+ "31": "Halal Restaurant",
46
+ "32": "Health Food Store",
47
+ "33": "Hot Dog Joint",
48
+ "34": "Indian Restaurant",
49
+ "35": "Indonesian Restaurant",
50
+ "36": "Iraqi Restaurant",
51
+ "37": "Italian Restaurant",
52
+ "38": "Japanese Restaurant",
53
+ "39": "Juice Bar",
54
+ "40": "Kebab Restaurant",
55
+ "41": "Lebanese Restaurant",
56
+ "42": "Lounge",
57
+ "43": "Mediterranean Restaurant",
58
+ "44": "Mexican Restaurant",
59
+ "45": "Middle Eastern Restaurant",
60
+ "46": "Moroccan Restaurant",
61
+ "47": "New American Restaurant",
62
+ "48": "Noodle House",
63
+ "49": "Pakistani Restaurant",
64
+ "50": "Persian Restaurant",
65
+ "51": "Pizza Place",
66
+ "52": "Poke Place",
67
+ "53": "Restaurant",
68
+ "54": "Salad Place",
69
+ "55": "Sandwich Place",
70
+ "56": "Seafood Restaurant",
71
+ "57": "Shawarma Place",
72
+ "58": "Snack Place",
73
+ "59": "Steakhouse",
74
+ "60": "Sushi Restaurant",
75
+ "61": "Taco Place",
76
+ "62": "Tea Room",
77
+ "63": "Thai Restaurant",
78
+ "64": "Turkish Restaurant",
79
+ "65": "Wings Joint"
80
+ },
81
+ "initializer_range": 0.02,
82
+ "intermediate_size": 3072,
83
+ "label2id": {
84
+ "Afghan Restaurant": 0,
85
+ "American Restaurant": 1,
86
+ "Arepa Restaurant": 2,
87
+ "Armenian Restaurant": 3,
88
+ "Asian Restaurant": 4,
89
+ "BBQ Joint": 5,
90
+ "Bagel Shop": 6,
91
+ "Bakery": 7,
92
+ "Breakfast Spot": 8,
93
+ "Buffet": 9,
94
+ "Burger Joint": 10,
95
+ "Caf\u00e9": 11,
96
+ "Chaat Place": 12,
97
+ "Chinese Restaurant": 13,
98
+ "Coffee Shop": 14,
99
+ "Comfort Food Restaurant": 15,
100
+ "Creperie": 16,
101
+ "Dessert Shop": 17,
102
+ "Dim Sum Restaurant": 18,
103
+ "Diner": 19,
104
+ "Donut Shop": 20,
105
+ "Egyptian Restaurant": 21,
106
+ "English Restaurant": 22,
107
+ "Falafel Restaurant": 23,
108
+ "Fast Food Restaurant": 24,
109
+ "Fish & Chips Shop": 25,
110
+ "Food Stand": 26,
111
+ "Food Truck": 27,
112
+ "French Restaurant": 28,
113
+ "Fried Chicken Joint": 29,
114
+ "Greek Restaurant": 30,
115
+ "Halal Restaurant": 31,
116
+ "Health Food Store": 32,
117
+ "Hot Dog Joint": 33,
118
+ "Indian Restaurant": 34,
119
+ "Indonesian Restaurant": 35,
120
+ "Iraqi Restaurant": 36,
121
+ "Italian Restaurant": 37,
122
+ "Japanese Restaurant": 38,
123
+ "Juice Bar": 39,
124
+ "Kebab Restaurant": 40,
125
+ "Lebanese Restaurant": 41,
126
+ "Lounge": 42,
127
+ "Mediterranean Restaurant": 43,
128
+ "Mexican Restaurant": 44,
129
+ "Middle Eastern Restaurant": 45,
130
+ "Moroccan Restaurant": 46,
131
+ "New American Restaurant": 47,
132
+ "Noodle House": 48,
133
+ "Pakistani Restaurant": 49,
134
+ "Persian Restaurant": 50,
135
+ "Pizza Place": 51,
136
+ "Poke Place": 52,
137
+ "Restaurant": 53,
138
+ "Salad Place": 54,
139
+ "Sandwich Place": 55,
140
+ "Seafood Restaurant": 56,
141
+ "Shawarma Place": 57,
142
+ "Snack Place": 58,
143
+ "Steakhouse": 59,
144
+ "Sushi Restaurant": 60,
145
+ "Taco Place": 61,
146
+ "Tea Room": 62,
147
+ "Thai Restaurant": 63,
148
+ "Turkish Restaurant": 64,
149
+ "Wings Joint": 65
150
+ },
151
+ "layer_norm_eps": 1e-12,
152
+ "max_length": 128,
153
+ "max_position_embeddings": 512,
154
+ "model_type": "bert",
155
+ "num_attention_heads": 12,
156
+ "num_hidden_layers": 12,
157
+ "output_past": true,
158
+ "pad_token_id": 0,
159
+ "padding": "max_length",
160
+ "position_embedding_type": "absolute",
161
+ "problem_type": "single_label_classification",
162
+ "torch_dtype": "float32",
163
+ "transformers_version": "4.20.0",
164
+ "type_vocab_size": 2,
165
+ "use_cache": true,
166
+ "vocab_size": 32000
167
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43d87653c46c16430621748e95a24ec244596d360b51b70f02ee63d6e4479e38
3
+ size 442743405
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "do_basic_tokenize": true,
4
+ "do_lower_case": true,
5
+ "full_tokenizer_file": null,
6
+ "mask_token": "[MASK]",
7
+ "name_or_path": "AutoTrain",
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "special_tokens_map_file": null,
12
+ "strip_accents": null,
13
+ "tokenize_chinese_chars": true,
14
+ "tokenizer_class": "BertTokenizer",
15
+ "unk_token": "[UNK]"
16
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff