datasetId
stringlengths
5
121
author
stringlengths
2
42
last_modified
unknowndate
2021-04-29 15:34:29
2025-03-27 01:29:56
downloads
int64
0
4.49M
likes
int64
0
7.65k
tags
sequencelengths
1
7.92k
task_categories
sequencelengths
0
47
createdAt
unknowndate
2022-03-02 23:29:22
2025-03-27 01:29:23
card
stringlengths
15
1.02M
Wuming/go
Wuming
"2024-11-25T21:42:19Z"
34
0
[ "size_categories:100K<n<1M", "format:parquet", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-24T16:57:21Z"
--- dataset_info: - config_name: pair_classification features: - name: external_gene_name dtype: string - name: go_name dtype: string - name: class dtype: int64 - name: gene_summary dtype: string - name: wikipedia dtype: string - name: wikicrow dtype: string splits: - name: train num_bytes: 3631691766 num_examples: 389019 - name: val num_bytes: 454724502 num_examples: 48627 - name: test num_bytes: 453687940 num_examples: 48628 download_size: 1996636905 dataset_size: 4540104208 - config_name: reranking features: - name: external_gene_name dtype: string - name: positive sequence: string - name: negative sequence: string - name: gene_summary dtype: string - name: wikipedia dtype: string - name: wikicrow dtype: string splits: - name: train num_bytes: 86035076 num_examples: 8204 - name: val num_bytes: 10771827 num_examples: 1026 - name: test num_bytes: 10591859 num_examples: 1026 download_size: 45833683 dataset_size: 107398762 configs: - config_name: pair_classification data_files: - split: train path: pair_classification/train-* - split: val path: pair_classification/val-* - split: test path: pair_classification/test-* - config_name: reranking data_files: - split: train path: reranking/train-* - split: val path: reranking/val-* - split: test path: reranking/test-* ---
jlbaker361/eval_512_wikiart-weird_vanilla_25_30_subjects
jlbaker361
"2024-10-24T17:00:04Z"
34
0
[ "size_categories:n<1K", "format:parquet", "modality:image", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-24T17:00:01Z"
--- dataset_info: features: - name: image dtype: image - name: prompt dtype: string - name: index dtype: int64 splits: - name: train num_bytes: 45176590.0 num_examples: 100 download_size: 45178537 dataset_size: 45176590.0 configs: - config_name: default data_files: - split: train path: data/train-* ---
Wuming/ppi
Wuming
"2024-11-26T21:34:40Z"
34
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-24T17:31:54Z"
--- dataset_info: - config_name: pair_classification features: - name: source_symbol dtype: string - name: target_symbol dtype: string - name: class dtype: int64 - name: source_summary dtype: string - name: target_summary dtype: string - name: source_wikipedia dtype: string - name: target_wikipedia dtype: string - name: source_wikicrow dtype: string - name: target_wikicrow dtype: string splits: - name: train num_bytes: 815464818 num_examples: 42176 - name: val num_bytes: 101517277 num_examples: 5272 - name: test num_bytes: 102828650 num_examples: 5272 download_size: 385319304 dataset_size: 1019810745 - config_name: reranking features: - name: external_gene_name dtype: string - name: positive_symbol sequence: string - name: positive_summary sequence: string - name: positive_wikipedia sequence: string - name: positive_wikicrow sequence: string - name: negative_symbol sequence: string - name: negative_summary sequence: string - name: negative_wikipedia sequence: string - name: negative_wikicrow sequence: string - name: gene_summary dtype: string - name: wikipedia dtype: string - name: wikicrow dtype: string splits: - name: train num_bytes: 413770629 num_examples: 2637 - name: val num_bytes: 50736614 num_examples: 330 - name: test num_bytes: 57419062 num_examples: 330 download_size: 213922433 dataset_size: 521926305 configs: - config_name: pair_classification data_files: - split: train path: pair_classification/train-* - split: val path: pair_classification/val-* - split: test path: pair_classification/test-* - config_name: reranking data_files: - split: train path: reranking/train-* - split: val path: reranking/val-* - split: test path: reranking/test-* ---
eyestrain/reviews
eyestrain
"2024-10-24T17:37:05Z"
34
0
[ "license:mit", "size_categories:n<1K", "format:csv", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-24T17:36:21Z"
--- license: mit ---
ADHIZ/khamzat
ADHIZ
"2024-10-24T17:44:07Z"
34
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-24T17:44:04Z"
--- dataset_info: features: - name: Prompt dtype: string - name: Answer dtype: string splits: - name: train num_bytes: 1232 num_examples: 5 download_size: 3618 dataset_size: 1232 configs: - config_name: default data_files: - split: train path: data/train-* ---
danigambit/D_ep1_run0_llama2-7b_wiki_doc1000_tok25
danigambit
"2024-10-24T18:42:14Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-24T18:42:10Z"
--- dataset_info: features: - name: id dtype: int64 - name: doc dtype: string splits: - name: train num_bytes: 1799953 num_examples: 1000 download_size: 410817 dataset_size: 1799953 configs: - config_name: default data_files: - split: train path: data/train-* ---
lucaelin/generic_domain_actions_v1
lucaelin
"2024-10-25T23:52:47Z"
34
0
[ "language:en", "license:mit", "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us", "synthetic" ]
null
"2024-10-24T18:45:51Z"
--- dataset_info: features: - name: profession dtype: string - name: process dtype: string - name: name dtype: string - name: description dtype: string - name: parameter_schema dtype: string - name: result_schema dtype: string splits: - name: train num_bytes: 2155269 num_examples: 4271 download_size: 633289 dataset_size: 2155269 configs: - config_name: default data_files: - split: train path: data/train-* license: mit language: - en tags: - synthetic ---
datasets-CNRS/Impec
datasets-CNRS
"2024-10-25T20:38:23Z"
34
0
[ "language:fr", "license:cc-by-nc-nd-3.0", "region:us" ]
null
"2024-10-24T21:14:22Z"
--- language: - fr viewer: false license: cc-by-nc-nd-3.0 --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/impec ## Description Au sein du laboratoire ICAR, spécialisé dans l’analyse des interactions, un sous-groupe de recherche « Interactions Multimodales Par ECran » (IMPEC) cherche à explorer les spécificités des situations qui articulent à la fois des interactions présentielles (Traverso 2016) et distancielles (Develotte, Kern, Lamy, 2011). Dans cette perspective, un atelier exploratoire a été organisé en 2016-2017 consistant à recueillir les données produites lors d’un séminaire doctoral suivi par des participants d’une part à Lyon et, d’autre part, à distance (France, Angleterre, Chine), via différents types de dispositifs interactionnels numériques. L’objectif de la recherche a consisté à décrire les interactions interpersonnelles dans un séminaire « polyartéfacté » c’est-à-dire associé à des technologies de téléprésence plus ou moins mobiles : la plateforme Adobe Connect, le robot Kubi et le robot Beam. Le séminaire s’est déroulé dans une salle équipée de caméras et de micros de façon à couvrir au maximum la complexité de la co-présence de participants en présentiel (environ 12 personnes), via Adobe Connect (4 personnes dont l’image est projetée sur le mur), et via les robot(s) (Beam et/ou Kubi selon la session). Par ailleurs, les participants à distance ont enregistré leurs écrans (capture dynamique d’écran) et se sont filmé devant leur ordinateur (par une caméra externe). Cinq sessions ont été filmées en octobre et novembre 2016, puis, en janvier, mars et avril 2017. Ci-dessous la durée totale des vidéos, par évènement : -Recueil du 21/10/2016 - data session Morgane: 00: 48: 44 -Recueil du 18/11/2016 - conférence anthropologues: 01: 37: 00 -Recueil du 20/01/2017 - travail collectif : (partie 1) 00: 32: 43 / (partie 2) 01: 08: 00 -Recueil du 24/03/2017 - conférence Susan Herring : 01: 40 : 00 -Recueil du 28/04/2017 - data session Christelle : (partie 1) 00: 46: 49 / (partie 2) 00: 52: 57 L'ensemble des données (toutes les vues comprises) constitue 34h d'enregistrement vidéo, et 9,5h de captures d'écran. Les résultats des analyses effectuées par les participant.e.s au séminaire doctoral sont publiés dans un document en accès libre intitulé « Fabrique de l’interaction parmi les écrans : formes de présences en recherche et en formation » à l’adresse suivante : https://ateliers.sens-public.org/ ## Citation ``` @misc{11403/impec/v2, title = {Présences numériques}, author = {Christine Develotte, Justine Lascar}, url = {https://hdl.handle.net/11403/impec/v2}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, copyright = {licence Creative Commons Attribution - Pas du2019Utilisation Commerciale - Pas de Modification 3.0 non transposé}, year = {2021} } ```
ycfNTU/usb_select_llama70b_update1
ycfNTU
"2024-10-31T21:11:42Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-24T21:32:38Z"
--- dataset_info: features: - name: id dtype: string - name: domain dtype: string - name: summ_idx dtype: int64 - name: input_lines dtype: string - name: topic_name dtype: string - name: output_lines dtype: string - name: top_sentences_words1 sequence: string - name: top_sentences_128 sequence: string - name: select_sentences dtype: string - name: summary1 dtype: string splits: - name: train num_bytes: 40417346 num_examples: 1023 download_size: 11332527 dataset_size: 40417346 configs: - config_name: default data_files: - split: train path: data/train-* ---
lucaelin/generic_domain_searches_v1
lucaelin
"2024-10-25T23:52:23Z"
34
0
[ "language:en", "license:mit", "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us", "synthetic" ]
null
"2024-10-24T21:36:43Z"
--- dataset_info: features: - name: profession dtype: string - name: process dtype: string - name: name dtype: string - name: entity_name dtype: string - name: description dtype: string - name: query_schema dtype: string - name: result_schema dtype: string splits: - name: train num_bytes: 1085392 num_examples: 810 download_size: 336057 dataset_size: 1085392 configs: - config_name: default data_files: - split: train path: data/train-* license: mit language: - en tags: - synthetic ---
sainathv02/gemma_2_9b_imm_qlora_eval
sainathv02
"2024-10-24T22:21:17Z"
34
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-24T21:54:08Z"
--- dataset_info: features: - name: question dtype: string - name: answer dtype: string - name: Generated Answers dtype: string splits: - name: test num_bytes: 41587 num_examples: 126 download_size: 23828 dataset_size: 41587 configs: - config_name: default data_files: - split: test path: data/test-* ---
ZixuanKe/cfa_extracted_qa_gpt4_verify_sup_chunk_0
ZixuanKe
"2024-10-25T01:13:09Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-24T23:26:25Z"
--- dataset_info: features: - name: topic dtype: string - name: title dtype: string - name: justification dtype: string - name: questions dtype: string - name: scenario dtype: string - name: exhibit dtype: string - name: answer_choices dtype: string - name: answer dtype: string - name: material dtype: string - name: gpt4_answerable_with_material dtype: string - name: gpt4_answerable_without_material dtype: string - name: gpt4_answer dtype: string - name: gpt4_answer_justification dtype: string splits: - name: train num_bytes: 32209608 num_examples: 1124 download_size: 2257043 dataset_size: 32209608 configs: - config_name: default data_files: - split: train path: data/train-* ---
danigambit/D_ep2_run0_llama2-7b_wiki_doc1000_tok25
danigambit
"2024-10-25T00:08:27Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T00:08:24Z"
--- dataset_info: features: - name: id dtype: int64 - name: doc dtype: string splits: - name: train num_bytes: 1861431 num_examples: 1000 download_size: 365202 dataset_size: 1861431 configs: - config_name: default data_files: - split: train path: data/train-* ---
jkazdan/gsm8k_llama8b_synthetic
jkazdan
"2024-10-25T00:26:21Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T00:26:20Z"
--- dataset_info: features: - name: question dtype: string - name: answer dtype: string - name: num_answer dtype: string splits: - name: train num_bytes: 5060972 num_examples: 7222 download_size: 2467077 dataset_size: 5060972 configs: - config_name: default data_files: - split: train path: data/train-* ---
ZixuanKe/cfa_extracted_qa_gpt4_verify_sup_chunk_6
ZixuanKe
"2024-10-25T01:13:39Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T00:44:10Z"
--- dataset_info: features: - name: topic dtype: string - name: title dtype: string - name: justification dtype: string - name: questions dtype: string - name: scenario dtype: string - name: exhibit dtype: string - name: answer_choices dtype: string - name: answer dtype: string - name: material dtype: string - name: gpt4_answerable_with_material dtype: string - name: gpt4_answerable_without_material dtype: string - name: gpt4_answer dtype: string - name: gpt4_answer_justification dtype: string splits: - name: train num_bytes: 34234699 num_examples: 1124 download_size: 2039157 dataset_size: 34234699 configs: - config_name: default data_files: - split: train path: data/train-* ---
AlanYky/tweets_topic_with_instructions
AlanYky
"2024-10-25T01:36:07Z"
34
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T01:36:05Z"
--- dataset_info: features: - name: instruction dtype: string - name: text dtype: string - name: target dtype: string - name: __index_level_0__ dtype: int64 splits: - name: train num_bytes: 4334042 num_examples: 22174 download_size: 2958462 dataset_size: 4334042 configs: - config_name: default data_files: - split: train path: data/train-* ---
ZixuanKe/cfa_extracted_qa_gpt4_verify_sup_chunk_8
ZixuanKe
"2024-10-25T01:39:12Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T01:39:10Z"
--- dataset_info: features: - name: topic dtype: string - name: title dtype: string - name: justification dtype: string - name: questions dtype: string - name: scenario dtype: string - name: exhibit dtype: string - name: answer_choices dtype: string - name: answer dtype: string - name: material dtype: string - name: gpt4_answerable_with_material dtype: string - name: gpt4_answerable_without_material dtype: string - name: gpt4_answer dtype: string - name: gpt4_answer_justification dtype: string splits: - name: train num_bytes: 33543363 num_examples: 1124 download_size: 5819882 dataset_size: 33543363 configs: - config_name: default data_files: - split: train path: data/train-* ---
ZixuanKe/cfa_extracted_qa_gpt4_verify_sft
ZixuanKe
"2024-11-03T22:11:58Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T01:58:29Z"
--- dataset_info: features: - name: topic dtype: string - name: title dtype: string - name: justification dtype: string - name: questions dtype: string - name: scenario dtype: string - name: exhibit dtype: string - name: answer_choices dtype: string - name: answer dtype: string - name: material dtype: string - name: gpt4_answerable_with_material dtype: string - name: gpt4_answerable_without_material dtype: string - name: gpt4_answer dtype: string - name: gpt4_answer_justification dtype: string - name: messages list: - name: content dtype: string - name: role dtype: string splits: - name: train num_bytes: 331819601 num_examples: 7078 download_size: 85664887 dataset_size: 331819601 configs: - config_name: default data_files: - split: train path: data/train-* ---
AlanYky/tweet_topic_area
AlanYky
"2024-10-25T02:44:01Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T02:43:59Z"
--- dataset_info: features: - name: instruction dtype: string - name: text dtype: string - name: label_name dtype: string splits: - name: train num_bytes: 1456332 num_examples: 6067 download_size: 727978 dataset_size: 1456332 configs: - config_name: default data_files: - split: train path: data/train-* ---
danigambit/D_ep3_run0_llama2-7b_wiki_doc1000_tok25
danigambit
"2024-10-25T05:39:19Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T05:39:17Z"
--- dataset_info: features: - name: id dtype: int64 - name: doc dtype: string splits: - name: train num_bytes: 1875964 num_examples: 1000 download_size: 352074 dataset_size: 1875964 configs: - config_name: default data_files: - split: train path: data/train-* ---
kmrasmussen/codecontests_py_mini
kmrasmussen
"2024-10-25T06:47:20Z"
34
0
[ "license:mit", "size_categories:1K<n<10K", "format:csv", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T06:43:41Z"
--- license: mit ---
adriansanz/MIX-TRAIN_double_columns_format_bai_newformat
adriansanz
"2024-10-25T07:10:55Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T07:10:53Z"
--- dataset_info: features: - name: query dtype: string - name: pos sequence: string - name: neg sequence: string splits: - name: train num_bytes: 3174824 num_examples: 5130 download_size: 205135 dataset_size: 3174824 configs: - config_name: default data_files: - split: train path: data/train-* ---
lmms-lab/application_image
lmms-lab
"2024-10-26T01:54:57Z"
34
0
[ "size_categories:n<1K", "format:parquet", "modality:image", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T07:42:01Z"
--- dataset_info: config_name: application_augmented features: - name: id dtype: string - name: question dtype: string - name: options sequence: string - name: answer dtype: string - name: image_1 dtype: image - name: question_type dtype: string splits: - name: test num_bytes: 48979169.0 num_examples: 189 download_size: 48700226 dataset_size: 48979169.0 configs: - config_name: application_augmented data_files: - split: test path: application_augmented/test-* ---
pdf2dataset/a6c09581af9dbb1e1ec7a27aca77a2c2
pdf2dataset
"2024-10-25T07:47:24Z"
34
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T07:47:23Z"
--- dataset_info: features: - name: text dtype: string - name: source dtype: string splits: - name: train num_bytes: 9019 num_examples: 3 download_size: 15432 dataset_size: 9019 configs: - config_name: default data_files: - split: train path: data/train-* ---
yaojie-shen/AutoTransition
yaojie-shen
"2024-10-26T07:32:16Z"
34
0
[ "license:cc-by-4.0", "region:us" ]
null
"2024-10-25T08:06:18Z"
--- license: cc-by-4.0 ---
francis47/back_transl_247_gl_2
francis47
"2024-10-25T08:53:45Z"
34
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T08:53:36Z"
--- dataset_info: features: - name: mg dtype: string - name: fr dtype: string splits: - name: train num_bytes: 6916909 num_examples: 52987 download_size: 5263718 dataset_size: 6916909 configs: - config_name: default data_files: - split: train path: data/train-* ---
BaoLocTown/amazon-massive-scenario-all-languages
BaoLocTown
"2024-10-25T08:54:57Z"
34
0
[ "size_categories:100K<n<1M", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T08:54:45Z"
--- dataset_info: features: - name: id dtype: string - name: label dtype: string - name: label_text dtype: string - name: text dtype: string - name: lang dtype: string splits: - name: train num_bytes: 54090142 num_examples: 575700 - name: test num_bytes: 13837713 num_examples: 148700 - name: validation num_bytes: 9492112 num_examples: 101650 download_size: 29066917 dataset_size: 77419967 configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* - split: validation path: data/validation-* ---
dipopotamus/coursera_course_detail_2
dipopotamus
"2024-10-25T09:07:06Z"
34
0
[ "license:apache-2.0", "size_categories:1K<n<10K", "format:json", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T09:05:36Z"
--- license: apache-2.0 ---
deiviz6332/ciclismo
deiviz6332
"2024-10-26T12:10:05Z"
34
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T09:09:49Z"
--- dataset_info: features: - name: pregunta dtype: string - name: respuesta dtype: string - name: libro dtype: string - name: capitulo dtype: string splits: - name: train num_bytes: 43108.8023255814 num_examples: 137 - name: test num_bytes: 11013.197674418605 num_examples: 35 download_size: 29414 dataset_size: 54122.0 configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* ---
timlenardo/RealVisXL_V4.0_woman_class_images
timlenardo
"2024-10-25T09:36:17Z"
34
0
[ "size_categories:n<1K", "format:parquet", "modality:image", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T09:35:51Z"
--- dataset_info: features: - name: image dtype: image splits: - name: train num_bytes: 399810597.0 num_examples: 300 download_size: 399843745 dataset_size: 399810597.0 configs: - config_name: default data_files: - split: train path: data/train-* ---
ShigrafS/Hindi_Prep_Dataset
ShigrafS
"2024-10-25T10:14:09Z"
34
0
[ "size_categories:10K<n<100K", "format:parquet", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T10:11:35Z"
--- dataset_info: features: - name: input_ids sequence: int32 - name: labels sequence: sequence: float32 - name: speaker_embeddings sequence: float32 splits: - name: train num_bytes: 2974517162.831395 num_examples: 16476 download_size: 2947184355 dataset_size: 2974517162.831395 configs: - config_name: default data_files: - split: train path: data/train-* ---
Giwabestie/vc_landscape_dataset
Giwabestie
"2024-10-25T11:01:26Z"
34
0
[ "license:mit", "size_categories:n<1K", "format:csv", "modality:tabular", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T10:57:57Z"
--- license: mit ---
danigambit/D_ep4_run0_llama2-7b_wiki_doc1000_tok25
danigambit
"2024-10-25T11:11:43Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T11:11:40Z"
--- dataset_info: features: - name: id dtype: int64 - name: doc dtype: string splits: - name: train num_bytes: 1889202 num_examples: 1000 download_size: 333796 dataset_size: 1889202 configs: - config_name: default data_files: - split: train path: data/train-* ---
akhooli/dfq_100_2
akhooli
"2024-10-25T11:29:23Z"
34
0
[ "license:mit", "size_categories:100K<n<1M", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T11:28:27Z"
--- license: mit dataset_info: features: - name: query_id dtype: int64 - name: text dtype: string splits: - name: train num_bytes: 6823022 num_examples: 100000 download_size: 4107153 dataset_size: 6823022 configs: - config_name: default data_files: - split: train path: data/train-* ---
Diplomkaazvposlednimsemestru/MUNIAI
Diplomkaazvposlednimsemestru
"2024-10-30T09:57:27Z"
34
0
[ "language:cs", "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us", "art" ]
null
"2024-10-25T11:36:33Z"
--- dataset_info: features: - name: text dtype: string - name: author dtype: string splits: - name: train num_bytes: 1256177 num_examples: 3355 download_size: 834137 dataset_size: 1256177 configs: - config_name: default data_files: - split: train path: data/train-* language: - cs tags: - art pretty_name: Homework ---
akhooli/dfq_100_4
akhooli
"2024-10-25T11:40:51Z"
34
0
[ "license:mit", "size_categories:100K<n<1M", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T11:40:45Z"
--- license: mit dataset_info: features: - name: query_id dtype: int64 - name: text dtype: string splits: - name: train num_bytes: 6805138 num_examples: 100000 download_size: 4098841 dataset_size: 6805138 configs: - config_name: default data_files: - split: train path: data/train-* ---
shivank21/mind2web-grouped-train
shivank21
"2024-10-25T13:24:56Z"
34
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T13:18:21Z"
--- dataset_info: features: - name: annotation_id dtype: string - name: website dtype: string - name: domain dtype: string - name: subdomain dtype: string - name: confirmed_task dtype: string - name: steps list: - name: action_reprs sequence: string - name: action_uid dtype: string - name: operation dtype: string - name: screenshot struct: - name: bytes dtype: binary - name: path dtype: string - name: target_action_index dtype: string - name: target_action_reprs dtype: string splits: - name: train num_bytes: 7343168118 num_examples: 1009 download_size: 6729655918 dataset_size: 7343168118 configs: - config_name: default data_files: - split: train path: data/train-* ---
cgoosen/strongreject-extended
cgoosen
"2024-10-25T13:19:34Z"
34
0
[ "license:mit", "region:us" ]
null
"2024-10-25T13:19:34Z"
--- license: mit ---
pdf2dataset/9113763e92ae41218f8bb32bfcbde5dd
pdf2dataset
"2024-10-25T13:28:13Z"
34
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T13:28:12Z"
--- dataset_info: features: - name: text dtype: string - name: source dtype: string splits: - name: train num_bytes: 671802 num_examples: 355 download_size: 339866 dataset_size: 671802 configs: - config_name: default data_files: - split: train path: data/train-* ---
shivank21/mind2web-grouped-test_task
shivank21
"2024-10-25T13:30:14Z"
34
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T13:29:07Z"
--- dataset_info: features: - name: annotation_id dtype: string - name: website dtype: string - name: domain dtype: string - name: subdomain dtype: string - name: confirmed_task dtype: string - name: steps list: - name: action_reprs sequence: string - name: action_uid dtype: string - name: operation dtype: string - name: screenshot struct: - name: bytes dtype: binary - name: path dtype: string - name: target_action_index dtype: string - name: target_action_reprs dtype: string splits: - name: train num_bytes: 1280070246 num_examples: 177 download_size: 1168671189 dataset_size: 1280070246 configs: - config_name: default data_files: - split: train path: data/train-* ---
shresthagarwal/vlD
shresthagarwal
"2024-10-25T14:21:43Z"
34
0
[ "size_categories:n<1K", "format:parquet", "modality:image", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T14:17:24Z"
--- dataset_info: features: - name: messages list: - name: role dtype: string - name: content list: - name: type dtype: string - name: text dtype: string - name: images list: image splits: - name: train num_bytes: 377113385.0 num_examples: 230 download_size: 360132585 dataset_size: 377113385.0 configs: - config_name: default data_files: - split: train path: data/train-* ---
bouloud/finetuning_demo
bouloud
"2024-10-25T14:34:05Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T14:34:03Z"
--- dataset_info: features: - name: prompt dtype: string splits: - name: train num_bytes: 1434769 num_examples: 5100 download_size: 192816 dataset_size: 1434769 configs: - config_name: default data_files: - split: train path: data/train-* ---
yananchen/mmlupro_sft
yananchen
"2024-10-25T14:44:39Z"
34
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T14:44:36Z"
--- dataset_info: features: - name: text dtype: string splits: - name: train num_bytes: 58669 num_examples: 70 - name: test num_bytes: 8354828 num_examples: 12032 download_size: 4137170 dataset_size: 8413497 configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* ---
itsnotacreativeuser/macbethact1-3
itsnotacreativeuser
"2024-10-25T14:45:39Z"
34
0
[ "license:cc", "size_categories:n<1K", "format:json", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T14:45:04Z"
--- license: cc ---
inpaint-context/opa-uptrain
inpaint-context
"2024-10-25T15:00:35Z"
34
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:image", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T14:56:21Z"
--- dataset_info: features: - name: image dtype: string - name: mask dtype: image splits: - name: train num_bytes: 229509833.234 num_examples: 26767 - name: validation num_bytes: 48218269.811 num_examples: 5273 download_size: 133803398 dataset_size: 277728103.045 configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* ---
ahmedheakl/asm2asm_O0_500000_risc_1
ahmedheakl
"2024-10-25T15:13:48Z"
34
0
[ "size_categories:100K<n<1M", "format:parquet", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T15:13:22Z"
--- dataset_info: features: - name: x86 dtype: string - name: risc dtype: string splits: - name: train num_bytes: 900782222 num_examples: 249851 download_size: 238327226 dataset_size: 900782222 configs: - config_name: default data_files: - split: train path: data/train-* ---
HF-SSSVVVTTT/wcep_edus
HF-SSSVVVTTT
"2024-10-26T04:53:30Z"
34
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T15:20:55Z"
--- dataset_info: features: - name: id dtype: int64 - name: summary dtype: string - name: document dtype: string - name: doc_token_num sequence: int64 - name: doc_rough_word_num sequence: int64 - name: jaccard_doc_score sequence: float64 - name: jaccard_doc_edu_score sequence: sequence: float64 - name: jaccard_multi_doc_score sequence: float64 - name: jaccard_multi_doc_edu_score sequence: sequence: float64 - name: sent_qa_trans_doc_score sequence: float64 - name: sent_qa_trans_doc_score_chunk sequence: float64 - name: sent_qa_trans_doc_edu_score sequence: sequence: float64 splits: - name: train num_bytes: 313290678 num_examples: 8158 - name: validation num_bytes: 40842647 num_examples: 1020 - name: test num_bytes: 39262822 num_examples: 1022 download_size: 171095008 dataset_size: 393396147 configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: test path: data/test-* ---
Bretagne/mmid_br
Bretagne
"2024-10-25T17:14:47Z"
34
0
[ "language:br", "license:cc-by-sa-4.0", "region:us" ]
null
"2024-10-25T15:55:25Z"
--- language: - br viewer: false license: cc-by-sa-4.0 --- > [!NOTE] > Dataset origin: https://github.com/penn-nlp/mmid/blob/master/downloads.md # Description Image/word dataset for breton (100 images by word), the metadata of all images and the webpages they showed up on, and the dictionary containing just the words we have images for in each language, as well as their canonical MMID ID within the language. For more information, see our [documentation page](https://multilingual-images.org/doc.html). MMID was constructed by building translations for the bilingual dictionaries found [here](https://www.seas.upenn.edu/~nlp/resources/TACL-data-release/dictionaries.tar.gz), which were built as described in the paper [The Language Demographics of Amazon Mechanical Turk](https://cs.brown.edu/people/epavlick/papers/language_demographics_mturk.pdf). Through the generosity of the Amazon Public Datasets program, each download is available via a public S3 bucket! ## Citation ``` @InProceedings{hewitt-et-al:2018:Long, author = {Hewitt, John and Ippolito, Daphne and Callahan, Brendan and Kriz, Reno and Wijaya, Derry Tanti and Callison-Burch, Chris}, title = {Learning Translations via Images with a Massively Multilingual Image Dataset}, booktitle = {Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, month = {July}, year = {2018}, address = {Melbourne, Australia}, publisher = {Association for Computational Linguistics} } ```
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_cfb8972c-c942-4cec-a8f3-cc851cc37bfa
argilla-internal-testing
"2024-10-25T16:29:09Z"
34
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T16:29:07Z"
--- dataset_info: features: - name: text dtype: string - name: label dtype: class_label: names: '0': positive '1': negative splits: - name: train num_bytes: 111 num_examples: 3 download_size: 1454 dataset_size: 111 configs: - config_name: default data_files: - split: train path: data/train-* ---
danigambit/D_ep5_run0_llama2-7b_wiki_doc1000_tok25
danigambit
"2024-10-25T16:42:28Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T16:42:25Z"
--- dataset_info: features: - name: id dtype: int64 - name: doc dtype: string splits: - name: train num_bytes: 1915580 num_examples: 1000 download_size: 324839 dataset_size: 1915580 configs: - config_name: default data_files: - split: train path: data/train-* ---
jcdansie/go_emotions_max_500
jcdansie
"2024-10-25T17:08:01Z"
34
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T17:07:51Z"
--- dataset_info: features: - name: input dtype: string - name: output sequence: string - name: id dtype: string splits: - name: train num_bytes: 4414532 num_examples: 43409 - name: test num_bytes: 555216 num_examples: 5427 - name: validation num_bytes: 554994 num_examples: 5426 - name: discarded num_bytes: 1522 num_examples: 1 download_size: 3476102 dataset_size: 5526264 configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* - split: validation path: data/validation-* - split: discarded path: data/discarded-* ---
datasets-CNRS/autismedascalu
datasets-CNRS
"2024-10-25T18:07:36Z"
34
0
[ "language:fr", "license:cc-by-nc-sa-4.0", "region:us" ]
null
"2024-10-25T17:21:43Z"
--- language: - fr license: cc-by-nc-sa-4.0 --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/autismedascalu > [!CAUTION] > Vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données (62G). ## Description Corpus d'interaction spontané de deux enfants autistes de haut niveau. ## Citation ``` @misc{11403/autismedascalu/v1, title = {Autisme-Dascalu}, author = {Camelia Dascalu}, url = {https://hdl.handle.net/11403/autismedascalu/v1}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, copyright = {Licence Creative Commons Attribution - Pas d'Utilisation Commerciale - Partage dans les Mêmes Conditions 4.0 International}, year = {2021} } ```
datasets-CNRS/convers
datasets-CNRS
"2024-10-25T20:25:16Z"
34
0
[ "language:fr", "license:cc-by-nc-4.0", "region:us" ]
null
"2024-10-25T17:24:07Z"
--- language: - fr license: cc-by-nc-4.0 viewer: false --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/convers > [!CAUTION] > Ce jeu de données ne contient que les transcriptions. Pour récupérer les audios, vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données. # Description Nous présentons un nouveau paradigme pour les neurosciences sociales qui compare une interaction sociale humaine (interaction humain-humain, HHI) à une interaction avec un robot conversationnel (interaction humain-robot, HRI) pendant l'imagerie par résonance magnétique fonctionnelle (IRMf). Nous avons enregistré des blocs d'une minute de discussion bidirectionnelle en direct entre un participant dans un scanner et un autre humain (confédéré) ou un robot qui se trouvait à l'extérieur du scanner. Une page de présentation fournit le sujet de la discussion tout en cachant aux participants les objectifs réels de l'expérience. Pour ce faire, nous avons recueilli des données multimodales incluant le comportement (discours du participant et de l'agent humain ou robot, capture vidéo de l'agent humain et robot, et le mouvement du regard du participant scanné) et la physiologie (signal BOLD, respiration et flux sanguin périphérique) pour former un corpus. Les données vocales transcrites (fichiers .textGrid) peuvent être trouvées ici (l'ensemble de données sera mis à jour au fur et à mesure de la transcription). La configuration audio de l'IRMf a permis une discussion en direct entre le participant scanné et l'agent (humain ou robot) à l'extérieur du scanner, malgré le bruit du scanner IRM. Ceci a été réalisé grâce à un microphone actif compatible MR à réduction de bruit (FORMI-III+ de l'optoacoustique monté sur la bobine de tête) et à des écouteurs à inserts de Sensimetrics. Les enregistrements de l'IRM consistaient en quatre sessions de six blocs de conversation d'une minute chacun, montrant des fruits et des légumes anthropomorphisés en "super-héros" ou images " décomposées". Nous avons enregistré 3 minutes de conversation par AGENT INTERACTEUR (humain ou robot) et session, pour un total de 24 minutes de conversation par participant. ## Citation ``` @misc{11403/convers/v2, title = {convers}, author = {INT and LPL}, url = {https://hdl.handle.net/11403/convers/v2}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, copyright = {Licence Creative Commons Attribution - Pas du2019Utilisation Commerciale 4.0 International}, year = {2020} } ```
KayoSilva88777/Garrafeiroofc
KayoSilva88777
"2024-10-25T17:31:30Z"
34
0
[ "license:openrail", "size_categories:n<1K", "format:audiofolder", "modality:audio", "library:datasets", "library:mlcroissant", "region:us" ]
null
"2024-10-25T17:29:35Z"
--- license: openrail ---
datasets-CNRS/queer-solidarity
datasets-CNRS
"2024-10-25T17:40:59Z"
34
0
[ "language:fr", "license:cc-by-nc-nd-3.0", "region:us" ]
null
"2024-10-25T17:39:07Z"
--- language: - fr license: cc-by-nc-nd-3.0 --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/queer-solidarity > [!CAUTION] > Vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données. ## Description Le corpus "Queer Solidarity Smashes Border" rassemble 56 photos de tracts, banderoles ou événements queers en soutien aux migrantes et aux migrants. Les photos répertoriées ont été prises entre 2005 et 2018, en Europe, Amérique du Nord, Australie et Moyen-Orient. ## Citation ``` @misc{11403/queer-solidarity/v1, title = {"Queer Solidarity Smashes Borders" Corpus}, author = {Julie Abbou}, url = {https://hdl.handle.net/11403/queer-solidarity/v1}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, copyright = {licence Creative Commons Attribution - Pas du2019Utilisation Commerciale - Pas de Modification 3.0 non transposé}, year = {2021} } ```
lucaelin/generic_process_details_v1
lucaelin
"2024-10-25T23:50:23Z"
34
0
[ "language:en", "license:mit", "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us", "synthetic" ]
null
"2024-10-25T17:44:06Z"
--- dataset_info: features: - name: profession dtype: string - name: process dtype: string - name: description dtype: string - name: location1 dtype: string - name: location2 dtype: string - name: location_name dtype: string - name: challenge dtype: string - name: duration dtype: string - name: frequency dtype: string - name: importance dtype: string - name: equipment dtype: string - name: localities dtype: string - name: sourroundings dtype: string - name: people dtype: string splits: - name: train num_bytes: 1462621 num_examples: 821 download_size: 603024 dataset_size: 1462621 configs: - config_name: default data_files: - split: train path: data/train-* license: mit language: - en tags: - synthetic ---
datasets-CNRS/smyle
datasets-CNRS
"2024-10-25T17:47:54Z"
34
0
[ "language:fr", "license:cc-by-nc-sa-4.0", "region:us" ]
null
"2024-10-25T17:46:51Z"
--- language: - fr license: cc-by-nc-sa-4.0 --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/smyle > [!CAUTION] > Vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données. ## Description SMYLE is a multimodal corpus in French (16h) including audio-video and neuro-physiological data from 60 participants engaged in face-to-face storytelling (8.2h) and free conversation tasks (7.8h). This corpus covers all modalities, precisely synchronized. It constitutes one of the first corpus of this size offering the opportunity to investigate cognitive characteristics of spontaneous conversation including at the brain level. The storytelling task comprises two conditions: a storyteller talking with a ``normal'' or a ``distracted'' listener. The goal of SMYLE is to address unexplored aspects of conversation such as the cognitive processes involved to build a common ground and achieve mutual understanding. SMYLE presents each pair of participants in both controlled and uncontrolled context with pre-defined discursive roles for the narration but free ones for conversation. Sixty participants took part in the experiment (mean age = 22.77, sd = 3.29 , min = 18, max = 36). Forty-three participants were female and 17 participants were male. Fifty-four participants were students of different levels and fields, five were employed and one was unemployed. Participants were recruited from the Aix-Marseille University and from the mailing list of Laboratoire Parole et Langage. All participants were native French speakers, right-handed, and reported no neurological or language disorders. The experiment was conducted in November at Laboratoire Parole et Langage at Aix-en-Provence, France. The participants received a compensation of 30€. None of the participant dyads knew each other before the experiment. All sixty participants were recorded in all modalities, except one participant with no EEG data and one storyteller with no video for Task 1 due to recording failure, both in the normal condition. Several levels of annotations are performed on the corpus. In order to reduce as much as possible the manual annotations, we have used several software programs to perform automatic annotations. So far, all automatic annotations have been performed and manual corrections are in progress. The manual annotations and corrections are performed by three expert annotators. The annotations include: - Gaze - Smile (neutral face, low intensity smile, high intensity smile) - Orthographic Transcription (including laughter, broken word, elision, repetition, personal information) - Head movements (nod, shake, tilt, other) - Feedback (generic and specific feedback including details annotations such as wince, eyebrow movements, shurg, etc.) - Momel Intint and OpenSmile prosodic annotations - Aligned tokens, part-of-speech and phonetization - Speech activity (speech, laugh, silence) Audio and video files will be made available in September after automatic anonymization of participants' personal data, on the basis of orthographic transcriptions, for members of the scientific community only. Please contact us for access to the corpus. You will be asked to sign a user agreement; we reserve the right to refuse/restrict access to certain types of data. ## Citation ``` @misc{11403/smyle/v1, title = {SMYLE}, author = {Auriane Boudin, Roxane Bertrand, Stéphane Rauzy, Thierry Legou, Magalie Ochs, Philippe Blache}, url = {https://hdl.handle.net/11403/smyle/v1}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, copyright = {Licence Creative Commons Attribution - Pas d'Utilisation Commerciale - Partage dans les Mêmes Conditions 4.0 International}, year = {2023} } ```
datasets-CNRS/BrainKT
datasets-CNRS
"2024-10-25T18:02:06Z"
34
0
[ "language:fr", "license:cc-by-nc-sa-4.0", "region:us" ]
null
"2024-10-25T17:48:51Z"
--- language: - fr license: cc-by-nc-sa-4.0 --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/brainkt > [!CAUTION] > Vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données. > Attention, le jeu de données fait 100G ! ## Description BrainKT est un corpus multimodal (audio, video et données neurophysiologiques) d'interactions conversationnelles dyadiques en français. Ce corpus a été constitué dans le but d'étudier les transferts d'information dans la conversation naturelle. #### Les tâches : Jeu video collaboratif (15min) : désamorçage d'une bombe sur le jeu Keep Talking and Nobody Explodes Conversation naturelle (15min) : résolution d'un dilemme moral puis discussion libre #### Les participants : 56 participants soit 28 dyades Chaque interaction dure en moyenne 30 min Les deux participants d'une même dyade ne se connaissaient pas avant l'enregistrement Les participants sont francophones natifs ; devaient posséder une vision normale ou corrigée et une vision normale des couleurs Tous les participants ont signé un consentement éclairé Aucun ne connaissait les raisons de l’enregistrement. Cela leur a été expliqué après l'enregistrement. #### Protocole d’enregistrement : Enregistrement audio et vidéo en chambre anéchoïque du Centre d'Expérimentation sur la Parole (LPL - Aix- en Provence, France) Les deux participants sont assis en face à face à une distance d'environ 1.4m, séparés par une table sur laquelle est posé du matériel utilisé pendant l'expérience Ils sont équipés d’un micro-casque Deux caméras filment les participants : chaque caméra est positionnée derrière chaque participant pour que les deux soient filmés de face L'activité cérébrale des participants est enregistrée (BioSemi), ainsi que leur activité physiologique (Empatica E4) #### Organisation du corpus : Les données alignées des différentes modalités sont mises à disposition : Les vidéos (montées) de l'expérience (MP4) Les enregistrements audio (WAV) Les données d'activité cérébrale alignées (FIF, format utilisé par MNE-Python) et préprocessées par tâche (EDF) Les données d'activité physiologique (JSON) Les transcriptions (transcription automatique réalisée par wav2vec2) (Elan) Des métadonnées (CSV) : marqueurs temporels recueillis dans l'expérience, réponses des participants aux questionnaires comportementaux #### Remarques : Les données de ce corpus sont actuellement en cours de traitement et d'analyse ; des données additionnelles (annotations...) pourront être mise à disposition plus tard. Le corpus contenant des données identifiantes (audio, vidéo), nous en restreignons pour l'instant l'accès. Les membres de la communauté scientifique souhaitant accéder au corpus peuvent nous contacter pour demander le déblocage des données. L'adhésion à un contrat d'utilisation vous sera demandé ; nous nous réservons le droit de refuser / limiter vos accès à certains types de données selon vos motivations. Les transcriptions corrigées et anonymisées des conversations seront mises à disposition de la communauté dans un futur proche. ## Citation ``` @misc{11403/brainkt/v0.1, title = {BrainKT}, author = {Eliot Maës, Philippe Blache, Leonor Becerra-Bonache}, url = {https://hdl.handle.net/11403/brainkt/v0.1}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, copyright = {Licence Creative Commons Attribution - Pas d'Utilisation Commerciale - Partage dans les Mêmes Conditions 4.0 International}, year = {2023} } ```
datasets-CNRS/Grenelle_II
datasets-CNRS
"2024-10-25T18:01:26Z"
34
0
[ "language:fr", "size_categories:n<1K", "format:audiofolder", "modality:audio", "modality:video", "library:datasets", "library:mlcroissant", "region:us" ]
null
"2024-10-25T17:51:35Z"
--- language: - fr viwer: false --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/sldr000744 et https://www.ortolang.fr/market/corpora/sldr000768 ## Description Extrait de la vidéo de la 2e séance du 4 mai 2010. Le débat sur le « Grenelle II de l’environnement » a été sélectionné en raison de la controverse importante qu’il a déclenchée. Le député Vert Yves Cochet y fait une intervention, de laquelle nous avons retenu 4 minutes du moment le plus vif de la controverse, où le député est interrompu à 11 reprises. Extrait de la vidéo de la 2e séance du 4 mai 2010. Le débat sur le « Grenelle II de l'environnement » a été sélectionné en raison de la controverse importante qu'il a déclenchée. Le député Vert Yves Cochet y fait une intervention, de laquelle nous avons retenu 4 minutes 50 secondes. Annotation : orthographique, syllabes, phonèmes, tons, contours intonatifs, gestes, morphosyntaxe, catégories, répétitions, etc. ## Citation ``` @misc{11403/sldr000744/v2, title = {Grenelle II - Sous Partie 1 : audio/video}, author = {Brigitte Bigi, Cristel Portes, Agnès Steuckardt, Marion Tellier}, url = {https://hdl.handle.net/11403/sldr000744/v2}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, year = {2011} } ``` ``` @misc{11403/sldr000768/v2, title = {Grenelle II - Sous Partie 2 : audio/video}, author = {LPL}, url = {https://hdl.handle.net/11403/sldr000768/v2}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, year = {2011} } ```
datasets-CNRS/focus_en_francais
datasets-CNRS
"2024-10-25T18:43:30Z"
34
0
[ "language:fr", "size_categories:n<1K", "format:audiofolder", "modality:audio", "library:datasets", "library:mlcroissant", "region:us" ]
null
"2024-10-25T18:39:47Z"
--- language: - fr --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/sldr000490 ## Description Focus en français ## Citation ``` @misc{11403/sldr000490/v1, title = {Focus en fran\c{c}ais}, author = {Clément Plancq}, url = {https://hdl.handle.net/11403/sldr000490/v1}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, year = {2013} } ```
datasets-CNRS/contractions
datasets-CNRS
"2024-10-25T18:49:29Z"
34
0
[ "language:fr", "region:us" ]
null
"2024-10-25T18:41:08Z"
--- language: - fr --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/sldr000795 > [!CAUTION] > Vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données. ## Description Corpus de parole spontanée et de lecture pour la comparaison des contractions des mots. Le corpus contient des enregistrements de 4 locuteurs différents en parole spontanée et en lecture de textes ## Citation ``` @misc{11403/sldr000795/v1, title = {Contractions de mots en parole spontanée et lecture}, author = {Charlotte Graux}, url = {https://hdl.handle.net/11403/sldr000795/v1}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, year = {2013} } ```
datasets-CNRS/chanteurs
datasets-CNRS
"2024-10-25T18:47:29Z"
34
0
[ "language:fr", "region:us" ]
null
"2024-10-25T18:41:27Z"
--- language: - fr --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/sldr000792 > [!CAUTION] > Vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données. ## Description Deux mélodies (Joyeux Anniversaire et une mélodie romantique libre) sont chantées par 50 chanteurs confirmés de 19 à 66 ans (moyenne : 36,94 ans). Ces 38 femmes et 12 hommes ont commencé leur formation entre 6 ans et 49 ans (moyenne : 20,18), ont une expérience scénique de 5 à 51 ans (moyenne : 19,86 ans) et pratiquent leur instrument vocal environ 13,68 heures par semaine. Suite à la production de deux glissendi, les participants ont chanté la mélodie « Joyeux Anniversaire » sans tonalité imposée, à deux reprises. La première fois sans technique particulière (séquences a) et la seconde fois avec une technique vocale lyrique (séquences b). Chaque participant a également choisi une mélodie de type romantique parmi leur répertoire musical. Ils l’ont chanté avec trois techniques différentes : sans technique particulière (séquences c), avec une technique vocale lyrique (séquences d) et en musant, c’est à dire en chantant la bouche fermée (séquences e). L’ordre de production était similaire pour chaque chanteur (séquences a, e, c, d, b). ## Citation ``` @misc{11403/sldr000792/v1, title = {Corpus « Chanteurs entra\^{\i}nés »}, author = {Pauline Larrouy-Maestri}, url = {https://hdl.handle.net/11403/sldr000792/v1}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, year = {2012} } ```
miketes/mobile-filtered-english-wave-ui-25k
miketes
"2024-10-25T19:26:24Z"
34
0
[ "size_categories:n<1K", "format:parquet", "modality:image", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T19:26:16Z"
--- dataset_info: features: - name: image dtype: image - name: instruction dtype: string - name: bbox sequence: float64 - name: resolution sequence: int64 - name: source dtype: string - name: platform dtype: string - name: name dtype: string - name: description dtype: string - name: type dtype: string - name: OCR dtype: string - name: language dtype: string - name: purpose dtype: string - name: expectation dtype: string splits: - name: train num_bytes: 318104238.57014287 num_examples: 795 download_size: 103481189 dataset_size: 318104238.57014287 configs: - config_name: default data_files: - split: train path: data/train-* ---
pclucas14/new_nqa_rag_128_test
pclucas14
"2024-10-25T19:46:35Z"
34
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T19:46:33Z"
--- dataset_info: features: - name: text sequence: sequence: string - name: questions sequence: string - name: answers sequence: sequence: string - name: document_id dtype: string - name: split dtype: string splits: - name: train num_bytes: 3832324 num_examples: 20 download_size: 1022987 dataset_size: 3832324 configs: - config_name: default data_files: - split: train path: data/train-* ---
maniro-ai/2024-10-25-engine-pick-berry-relative
maniro-ai
"2024-10-25T20:32:37Z"
34
0
[ "region:us" ]
null
"2024-10-25T20:32:32Z"
--- dataset_info: features: - name: observation.state sequence: float32 length: 4 - name: action sequence: float32 length: 4 - name: episode_index dtype: int64 - name: frame_index dtype: int64 - name: timestamp dtype: float32 - name: observation.images.wrist_1 dtype: video_frame - name: observation.images.wrist_2 dtype: video_frame - name: index dtype: int64 splits: - name: train num_bytes: 371340 num_examples: 2063 download_size: 92813 dataset_size: 371340 configs: - config_name: default data_files: - split: train path: data/train-* ---
datasets-CNRS/Europe
datasets-CNRS
"2024-10-25T20:54:04Z"
34
0
[ "language:fr", "region:us" ]
null
"2024-10-25T20:46:12Z"
--- language: - fr --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/ortolang-000909 > [!CAUTION] > Vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données. ## Description Enregistrement sur France Culture de l’émission «La Suite dans les idées» produite par Sylvain Bourmeau et diffusée le 25 février 2000 à partir du magnétophone d’une chaîne compacte privée (une seule piste). L’émission dure environ 45 minutes. Elle a pour titre «L’Europe et son élargissement, question taboue ?». Deux journalistes (un homme et une femme) conduisent le débat qui oppose quatre invités (de sexe masculin). La ressource se compose de l’enregistrement d’un fichier son de l’intégralité de l’émission au format .wav et de plusieurs fichiers d’étiquettes au format .TextGrid du logiciel Praat (phones, syllabes, contours intonatifs notamment). Elle comporte aussi la transcription orthographique du contenu de l’émission et un deuxième exemplaire de celle-ci contenant des annotations intonatives. fichier “TranscriptionOrthographique.pdf“: Transcription orthographique seule. fichier “CorpusEtContours.pdf“: Transcription orthographique enrichie de la localisation dans le texte des différents contours intonatifs utilisés. ## Citation ``` @misc{11403/ortolang-000909/v1, title = {Europe}, author = {Cristel Portes}, url = {https://hdl.handle.net/11403/ortolang-000909/v1}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, year = {2015} } ```
datasets-CNRS/DMG_entretiens
datasets-CNRS
"2024-10-25T20:56:32Z"
34
0
[ "language:fr", "region:us" ]
null
"2024-10-25T20:47:32Z"
--- language: - fr --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/sldr000714 > [!CAUTION] > Ce jeu de données ne contient que les transcriptions. Pour récupérer les audios, vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données. ## Description Corpus audio de 4 entretiens semi-dirigés (environ 5h30 au total). Les entretiens portent sur la féminisation des textes (double-marquage) en contexte politique libertaire, avec des locuteurs issus de ces cultures politiques. Productions métadiscursives sur les stratégies de féminisation des locuteurs. Transcription orthographique, basée sur la convention de transcription du LPL. ## Citation ``` @misc{11403/sldr000714/v3, title = {Corpus oral Double Marquage de Genre (masculin/féminin) - Entretiens}, author = {{Département de lettres modernes, Université du2019Aix-Marseille} and LPL}, url = {https://hdl.handle.net/11403/sldr000714/v3}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, year = {2010} } ```
datasets-CNRS/Marseille_2007
datasets-CNRS
"2024-10-25T20:57:30Z"
34
0
[ "language:fr", "region:us" ]
null
"2024-10-25T20:48:37Z"
--- language: - fr --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/sldr000019 > [!CAUTION] > Vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données. ## Description Corpus résultant d’une enquête de terrain réalisée à Marseille. Corpus constitué de 10 entretiens semi-directifs réalisés avec des informateurs nés à Marseille, entre janvier et novembre 2007. Tous les informateurs sont francophones natifs, nés et résidant à Marseille. Les autres critères pris en compte dans l’échantillonnage sont le sexe, l’âge et la catégorie socioprofessionnelle. Les enregistrements ont eu lieu au domicile des informateurs ou sur leur lieu de travail. La durée de chaque entretien varie entre 12 et 30 minutes. Corpus apparenté : voir http://sldr.org/sldr000020 ## Citation ``` @misc{11403/sldr000019/v5, title = {Corpus Représentations linguistiques Marseille 2007}, author = {{Département de sciences du langage, Université d'Aix-Marseille} and LPL}, url = {https://hdl.handle.net/11403/sldr000019/v5}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, year = {2010} } ```
danigambit/D_ep6_run0_llama2-7b_wiki_doc1000_tok25
danigambit
"2024-10-25T22:07:50Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T22:07:47Z"
--- dataset_info: features: - name: id dtype: int64 - name: doc dtype: string splits: - name: train num_bytes: 1914176 num_examples: 1000 download_size: 318218 dataset_size: 1914176 configs: - config_name: default data_files: - split: train path: data/train-* ---
ahmed275/opinions_dataset_temporal_test_generated_summaries_conservative
ahmed275
"2024-10-25T22:09:05Z"
34
0
[ "size_categories:n<1K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T22:09:03Z"
--- dataset_info: features: - name: id dtype: string - name: year dtype: int64 - name: url dtype: string - name: opinionOfTheCourt dtype: string - name: syllabus dtype: string - name: issueArea dtype: float64 - name: decisionDirection dtype: float64 - name: partyWinning dtype: float64 - name: voteDistribution dtype: float64 - name: respondentType dtype: int64 - name: respondent dtype: float64 - name: __index_level_0__ dtype: int64 - name: generated_summary dtype: string splits: - name: train num_bytes: 23586888 num_examples: 547 download_size: 12429627 dataset_size: 23586888 configs: - config_name: default data_files: - split: train path: data/train-* ---
pclucas14/new_nqa_rag_1024_test
pclucas14
"2024-10-25T22:50:22Z"
34
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-25T22:50:21Z"
--- dataset_info: features: - name: text sequence: sequence: string - name: questions sequence: string - name: answers sequence: sequence: string - name: document_id dtype: string - name: split dtype: string splits: - name: train num_bytes: 33896754 num_examples: 20 download_size: 13288585 dataset_size: 33896754 configs: - config_name: default data_files: - split: train path: data/train-* ---
Mohamk1234/raftified_pubmedqa_sample
Mohamk1234
"2024-10-26T00:16:52Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-26T00:16:50Z"
--- dataset_info: features: - name: id dtype: string - name: type dtype: string - name: question dtype: string - name: context struct: - name: sentences list: - name: contexts sequence: string - name: labels sequence: string - name: meshes sequence: string - name: title sequence: string - name: oracle_context struct: - name: contexts sequence: string - name: labels sequence: string - name: meshes sequence: string - name: cot_answer dtype: string - name: instruction dtype: string splits: - name: train num_bytes: 16811307 num_examples: 1000 download_size: 7875476 dataset_size: 16811307 configs: - config_name: default data_files: - split: train path: data/train-* ---
Drashtip/preprocessed_squad
Drashtip
"2024-10-26T00:54:36Z"
34
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-26T00:54:34Z"
--- dataset_info: features: - name: id dtype: string - name: title dtype: string - name: context dtype: string - name: question dtype: string - name: answers struct: - name: answer_start sequence: int64 - name: text sequence: string splits: - name: train num_bytes: 79696504 num_examples: 87599 download_size: 14464698 dataset_size: 79696504 configs: - config_name: default data_files: - split: train path: data/train-* ---
helpotcreator/krx-tutorial-raw
helpotcreator
"2024-10-26T04:32:42Z"
34
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-26T01:45:36Z"
--- dataset_info: features: - name: sampled_text dtype: string - name: question dtype: string - name: response dtype: string splits: - name: train num_bytes: 536832 num_examples: 120 download_size: 274156 dataset_size: 536832 configs: - config_name: default data_files: - split: train path: data/train-* ---
danigambit/D_ep7_run0_llama2-7b_wiki_doc1000_tok25
danigambit
"2024-10-26T03:31:23Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-26T03:31:19Z"
--- dataset_info: features: - name: id dtype: int64 - name: doc dtype: string splits: - name: train num_bytes: 1918406 num_examples: 1000 download_size: 313956 dataset_size: 1918406 configs: - config_name: default data_files: - split: train path: data/train-* ---
jkazdan/gsm8k_synthetic_Llama-3-8B-Instruct
jkazdan
"2024-10-26T21:04:54Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-26T03:52:58Z"
--- dataset_info: features: - name: question dtype: string - name: solution dtype: string - name: answer dtype: string - name: extracted_answer dtype: 'null' splits: - name: train num_bytes: 5060972 num_examples: 7222 download_size: 2468726 dataset_size: 5060972 configs: - config_name: default data_files: - split: train path: data/train-* ---
helpotcreator/krx-tutorial-web
helpotcreator
"2024-10-26T05:01:56Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-26T05:01:53Z"
--- dataset_info: features: - name: '' dtype: string - name: question dtype: string - name: response dtype: string splits: - name: train num_bytes: 4019739 num_examples: 1000 download_size: 2093575 dataset_size: 4019739 configs: - config_name: default data_files: - split: train path: data/train-* ---
ekon5509/waste_collect
ekon5509
"2024-10-26T05:35:13Z"
34
0
[ "license:mit", "size_categories:n<1K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-26T05:16:36Z"
--- license: mit dataset_info: features: - name: Weeks dtype: string - name: Bonaberi dtype: float64 - name: Bonapriso dtype: float64 - name: Akwa dtype: float64 - name: Bonanjo dtype: float64 - name: Bonamoussadi dtype: float64 - name: Bali dtype: float64 - name: Deido dtype: float64 - name: New Bell dtype: float64 - name: Bepanda dtype: float64 - name: Bassa dtype: float64 - name: Logbaba dtype: float64 - name: Ndogpassi dtype: float64 - name: Cite de palmiers dtype: float64 - name: Nyalla dtype: float64 - name: Village dtype: float64 - name: 'Unnamed: 16' dtype: float64 - name: 'Unnamed: 17' dtype: float64 - name: 'Unnamed: 18' dtype: float64 - name: 'Unnamed: 19' dtype: float64 - name: 'Unnamed: 20' dtype: float64 - name: 'Unnamed: 21' dtype: float64 - name: 'Unnamed: 22' dtype: float64 - name: 'Unnamed: 23' dtype: float64 - name: 'Unnamed: 24' dtype: float64 - name: 'Unnamed: 25' dtype: float64 - name: 'Unnamed: 26' dtype: float64 - name: 'Unnamed: 27' dtype: float64 - name: 'Unnamed: 28' dtype: float64 - name: 'Unnamed: 29' dtype: float64 - name: 'Unnamed: 30' dtype: float64 - name: 'Unnamed: 31' dtype: float64 - name: 'Unnamed: 32' dtype: float64 - name: 'Unnamed: 33' dtype: float64 - name: 'Unnamed: 34' dtype: float64 - name: 'Unnamed: 35' dtype: float64 - name: 'Unnamed: 36' dtype: float64 - name: 'Unnamed: 37' dtype: float64 - name: 'Unnamed: 38' dtype: float64 - name: 'Unnamed: 39' dtype: float64 - name: 'Unnamed: 40' dtype: float64 - name: 'Unnamed: 41' dtype: float64 - name: 'Unnamed: 42' dtype: float64 - name: 'Unnamed: 43' dtype: float64 - name: 'Unnamed: 44' dtype: float64 - name: 'Unnamed: 45' dtype: float64 - name: 'Unnamed: 46' dtype: float64 - name: 'Unnamed: 47' dtype: float64 - name: 'Unnamed: 48' dtype: float64 - name: 'Unnamed: 49' dtype: float64 - name: 'Unnamed: 50' dtype: float64 - name: 'Unnamed: 51' dtype: float64 - name: 'Unnamed: 52' dtype: float64 - name: 'Unnamed: 53' dtype: float64 - name: 'Unnamed: 54' dtype: float64 - name: 'Unnamed: 55' dtype: float64 - name: 'Unnamed: 56' dtype: float64 splits: - name: train num_bytes: 57056 num_examples: 124 download_size: 27444 dataset_size: 57056 configs: - config_name: default data_files: - split: train path: data/train-* ---
rayliang2/multi_lang_sentence_20k
rayliang2
"2024-10-26T06:21:48Z"
34
0
[ "size_categories:100K<n<1M", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-26T06:20:09Z"
--- dataset_info: features: - name: conversations list: - name: from dtype: string - name: value dtype: string - name: label dtype: string splits: - name: train num_bytes: 71454047 num_examples: 213333 download_size: 34790128 dataset_size: 71454047 configs: - config_name: default data_files: - split: train path: data/train-* ---
navimii/LaserTech_WorldMorph
navimii
"2024-10-26T08:28:53Z"
34
0
[ "license:mit", "size_categories:n<1K", "format:text", "modality:image", "modality:text", "library:datasets", "library:mlcroissant", "region:us", "art" ]
null
"2024-10-26T07:50:27Z"
--- license: mit tags: - art pretty_name: Laser Tech World Morph size_categories: - n<1K --- # status - trained on Flux - trained on SD1.5 # collection Date - 09/08/2024 to 10/08/2024 # Original intent - the illustration of a fictional world obressed with extreme precision
danigambit/D_ep8_run0_llama2-7b_wiki_doc1000_tok25
danigambit
"2024-10-26T09:01:27Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-26T09:01:24Z"
--- dataset_info: features: - name: id dtype: int64 - name: doc dtype: string splits: - name: train num_bytes: 1915645 num_examples: 1000 download_size: 312073 dataset_size: 1915645 configs: - config_name: default data_files: - split: train path: data/train-* ---
studymakesmehappyyyyy/nturgbd_videos
studymakesmehappyyyyy
"2024-10-26T09:32:19Z"
34
0
[ "license:mit", "region:us" ]
null
"2024-10-26T09:32:19Z"
--- license: mit ---
tuy20212521/my-image-captioning-dataset-test-jsontrial
tuy20212521
"2024-10-26T10:59:04Z"
34
0
[ "size_categories:n<1K", "format:parquet", "modality:image", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-26T10:58:56Z"
--- dataset_info: features: - name: image dtype: image splits: - name: train num_bytes: 14130495.0 num_examples: 55 download_size: 14135842 dataset_size: 14130495.0 configs: - config_name: default data_files: - split: train path: data/train-* ---
vantral/test
vantral
"2024-10-26T12:56:57Z"
34
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-26T12:56:53Z"
--- dataset_info: features: - name: all struct: - name: interlinear-text list: - name: item struct: - name: source dtype: string - name: paragraph list: - name: item struct: - name: speaker dtype: string - name: phrase list: - name: item struct: - name: ft dtype: string - name: id dtype: string - name: participant dtype: string - name: timestamp sequence: string - name: word list: list: - name: item struct: - name: grammar_tags sequence: string - name: translation sequence: string - name: txt dtype: string - name: morph list: - name: item struct: - name: gls dtype: string - name: id dtype: string - name: txt dtype: string - name: item dtype: 'null' splits: - name: train num_bytes: 307956 num_examples: 1 download_size: 84699 dataset_size: 307956 configs: - config_name: default data_files: - split: train path: data/train-* ---
datasets-CNRS/PSH_DISPE
datasets-CNRS
"2024-10-26T13:40:19Z"
34
0
[ "multilinguality:multilingual", "language:fr", "language:en", "region:us" ]
null
"2024-10-26T12:56:58Z"
--- language: - fr - en multilinguality: - multilingual --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/sldr000757 > [!CAUTION] > Vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données. ## Description Issu d'une collaboration entre le Laboratoire Parole et Langage (LPL) et l'Institut de Plongée Professionnelle de Marseille (INPP) en 1991, le corpus PSH/DISPE répond à la demande d'une base de sons pour le développement de nouveaux procédés de « décodage » de la parole hyperbare, et d'un outil pour l'évaluation des systèmes de communication vocale. Les fichiers d'annotations sont conformes au format standard SAM Europec du projet CEE-ESPRIT n°2589. ## Citation ``` @misc{11403/sldr000757/v2, title = {PSH/DISPE - Parole subaquatique et/ou hyperbare}, author = {Alain Marchal}, url = {https://hdl.handle.net/11403/sldr000757/v2}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, year = {2009} } ```
datasets-CNRS/voyelles_mots_FR
datasets-CNRS
"2024-10-26T13:38:17Z"
34
0
[ "language:fr", "region:us" ]
null
"2024-10-26T12:57:15Z"
--- language: - fr --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/sldr000040 > [!CAUTION] > Vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données. ## Description Extrait d'un corpus trilingue français/anglais/espagnol. Annotation phonétique disponible. ## Citation ``` @misc{11403/sldr000040/v2, title = {Voyelles et mots FR}, author = {LPL}, url = {https://hdl.handle.net/11403/sldr000040/v2}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, year = {2009} } ```
datasets-CNRS/Dialogue_francais_role_play
datasets-CNRS
"2024-10-26T13:41:55Z"
34
0
[ "language:fr", "region:us" ]
null
"2024-10-26T12:58:46Z"
--- language: - fr --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/sldr000738 and https://www.ortolang.fr/market/corpora/sldr000739 > [!CAUTION] > Ce jeu de données ne contient que les transcriptions. Pour récupérer les audios (sldr000738), vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données. ## Description Dialogue in French (role-play). The speech material used here contains dialogues spoken by 38 native speakers of French (10 pairs of female and 9 pairs of male students living in Paris). They were asked to simulate phone call conversations in the forms of role-plays of a request-refusal type. The total duration of the recordings is 95 minutes with a mean duration of 2.5 minutes for each conversation. ## Citation ``` @misc{11403/sldr000739/v2, title = {Annotations : Dialogue fran\c{c}ais (role-play)}, author = {Yukihiro Nishinuma}, url = {https://hdl.handle.net/11403/sldr000739/v2}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, year = {2008} } ``` ``` @misc{11403/sldr000738/v2, title = {Dialogue fran\c{c}ais (role-play)}, author = {Yukihiro Nishinuma, Akiko Hayashi}, url = {https://hdl.handle.net/11403/sldr000738/v2}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, year = {2008} } ```
FrancophonIA/ACTER-v1.5
FrancophonIA
"2024-10-26T13:55:16Z"
34
0
[ "multilinguality:multilingual", "language:fr", "language:en", "language:nl", "license:cc-by-nc-sa-4.0", "modality:text", "region:us" ]
null
"2024-10-26T13:46:15Z"
--- language: - fr - en - nl multilinguality: - multilingual license: cc-by-nc-sa-4.0 --- > [!NOTE] > Dataset origin: https://clarin.eurac.edu/repository/xmlui/handle/20.500.12124/47 # ACTER Annotated Corpora for Term Extraction Research, version 1.5 ACTER is a manually annotated dataset for term extraction, covering 3 languages (English, French, and Dutch), and 4 domains (corruption, dressage, heart failure, and wind energy). **Readme structure:** 1. General 2. Abbreviations 3. Data Structure 4. Annotations 5. Additional Information 6. Updates 7. Error Reporting 8. License ## 1. General * **Creator**: Ayla Rigouts Terryn * **Association**: LT3 Language and Translation Technology Team, Ghent University * **Date of creation version 1.0**: 17/12/2019 * **Date of creation current version 1.5**: 08/04/2022 * **Last updated**: 08/04/2022 * **Contact**: [email protected] * **Context**: Ayla Rigouts Terryn's PhD project + first TermEval shared task (CompuTerm2020) * **PhD**: D-Termine: Data-driven Term Extraction Methodologies Investigated http://hdl.handle.net/1854/LU-8709150 * **Shared Task**: see https://termeval.ugent.be; workshop proceedings with overview paper at https://lrec2020.lrec-conf.org/media/proceedings/Workshops/Books/COMPUTERM2020book.pdf) * **Annotation Guidelines**: http://hdl.handle.net/1854/LU-8503113 * **Source**: https://github.com/AylaRT/ACTER * **License**: Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0) (https://creativecommons.org/licenses/by-nc-sa/4.0/) * **Reference**: Please cite the following Open Access paper if you use this dataset https://doi.org/10.1007/s10579-019-09453-9 * Authors: Ayla Rigouts Terryn, Véronique Hoste, Els Lefever * Title: In no uncertain terms: a dataset for monolingual and multilingual automatic term extraction from comparable corpora * Date of online publication: 26 March 2019 * Date of print publication: 2020 (Volume 54, Issue 2, pages 385-418) * Journal: Language Resources and Evaluation (LRE) * Publisher: Springer * **Demo**: Online term extraction demo based on dataset: D-Terminer https://lt3.ugent.be/dterminer ## 2. Abbreviations **Languages and domains**: * "en" = English * "fr" = French * "nl" = Dutch * "corp" = corruption * "equi" = equitation (dressage) * "htfl" = heart failure * "wind" = wind energy * "cor" = parallel part of corruption corpus; completely unannotated **Annotation labels**: * "Spec" or "Specific": Specific Terms * "Com" or "Common": Common Terms * "OOD": Out-of-Domain Terms * "NE(s)": Named Entities ## 3. Data Structure ``` ACTER ├── README.md ├── sources.txt │ ├── en │ ├── corp │ │ ├── annotated │ │ │ ├── annotations │ │ │ │ ├── sequential_annotations │ │ │ │ │ ├── io_annotations │ │ │ │ │ │ ├── with_named_entities │ │ │ │ │ │ │ ├── corp_en_01_seq_terms_nes.tsv │ │ │ │ │ │ │ ├── corp_en_02_seq_terms_nes.tsv │ │ │ │ │ │ │ └── ... │ │ │ │ │ │ │ │ │ │ │ │ │ └── without_named_entities │ │ │ │ │ │ ├── corp_en_01_seq_terms.tsv │ │ │ │ │ │ ├── corp_en_02_seq_terms.tsv │ │ │ │ │ │ └── ... │ │ │ │ │ │ │ │ │ │ │ └── iob_annotations (equivalent to io_annotations) │ │ │ │ │ │ │ │ │ └── unique_annotation_lists │ │ │ │ ├── corp_en_terms.tsv │ │ │ │ ├── corp_en_terms_nes.tsv │ │ │ │ ├── corp_en_tokenised_terms.tsv │ │ │ │ └── corp_en_tokenised_terms_nes.tsv │ │ │ │ │ │ │ ├── texts │ │ │ └── texts_tokenised │ │ │ │ │ └── unannotated_texts │ │ ├── corp_en_03.txt │ │ ├── corp_en_13.txt │ │ └── ... │ │ │ ├── equi (equivalent to "corp") │ │ │ ├── htfl (equivalent to "corp") │ │ │ └── wind (equivalent to "corp") │ ├── fr (equivalent to "en") └── nl (equivalent to "en") ``` * **README.md, sources.txt** At the first level, there are two files with information about the dataset: the current README.md file and sources.txt, which mentions the sources of all texts in the dataset. * **languages** and language/**domains** At the first level, there is also one directory per language with an identical structure of subdirectories and files for each language. At the second level, there are four directories, i.e., one per domain, each with an identical structure of subdirectories and files. The corpora in each domain are comparable per language (i.e., similar size, topic, style). Only the corruption (corp) corpus is parallel, i.e., translations. * language/domain/**unannotated_texts** Per domain, there are annotated and unannotated texts. For the unannotated texts, only the original (normalised) texts themselves are offered as .txt-files. * language/domain/**annotated** For the annotated texts, many types of information are available, ordered in subdirectories. * language/domain/annotated/**annotations** The annotations can be found here, ordered in subdirectories for different formats of the data. * language/domain/annotated/**texts** and language/domain/annotated/**texts_tokenised** The texts of the annotated corpora can be found here, with the original (normalised) texts and the (normalised) tokenised texts in different directories. The texts were tokenised with LeTs PreProcess*, with one sentence per line and spaces between all tokens. * van de Kauter, M., Coorman, G., Lefever, E., Desmet, B., Macken, L., & Hoste, V. (2013). LeTs Preprocess: The Multilingual LT3 Linguistic Preprocessing Toolkit. Computational Linguistics in the Netherlands Journal, 3, 103–120.) * language/domain/annotated/annotations/**sequential_annotations** Sequential annotations always have one token per line, followed by a tab and a sequential label (more info in next section). There are empty lines between sentences. * .../**io(b)_annotations**: one directory per annotation scheme (IO versus IOB) * ../io(b)_annotations/**with(out)_named_entities**: per annotation scheme, one directory for data including and excluding Named Entities. * language/domain/annotated/annotations/**unique_annotation_lists** Lists of all unique annotations (lowercased, unlemmatised) for the entire corpus (langauge-domain), with one annotation per line, followed by a tab and its label (Specific_Term, Common_Term, OOD_Term, or Named Entity). * **domain_language_terms.tsv**: original annotations as they occur in the untokenised texts, including only term annotations (Specific_Term, Common_Term, OOD_Term), no Named Entities. * **domain_language_terms_nes.tsv**: same, but including Named Entities. * **domain_language_tokenised_terms.tsv**: original annotations mapped to tokens, including only those annotations that align exactly with token boundaries at least once in the corpus; including only term annotations (Specific_Term, Common_Term, OOD_Term), no Named Entities. * **domain_language_tokenised_terms_nes.tsv**: same, but including Named Entities. ## 4. Annotations ### 4.1 General The annotations are provided in simple UTF-8 encoded plain text files. No lemmatisation was performed. ### 4.2 Sequential annotations #### 4.2.1 Reference For an in-depth review of how the sequential labels were obtained and how they relate to the list-versions of the annotations, please check: Rigouts Terryn, A., Hoste, V., & Lefever, E. (2022). Tagging Terms in Text: A Supervised Sequential Labelling Approach to Automatic Term Extraction. Terminology. International Journal of Theoretical and Applied Issues in Specialized Communication, 28(1). https://doi.org/10.1075/term.21010.rig #### 4.2.2 General * one token per line, followed by a tab and the IO(B) label * based on the tokenised version of the corpus (see under language/domain/annotated/texts_tokenised) * normalised (see further), but with original casing * in case of nested annotations, the longest possible span is given sequential labels. * e.g., "myocyte hypertrophy": if "myocyte", "hypertrophy", and "myocyte hypertrophy" were originally all annotated separately, the sequential labels will be based only on the longest possible annotation, i.e., "myocyte hypertrophy". * when a token was partially (not completely) annotated, the token is gets a positive (I or B) label (= different strategy for unique annotation lists) * e.g., "defibrillator-only therapy": if "defibrillator" was annotated but the complete token ("defibrillator-only") was not, the full token will still get a positive sequential label, but "defibrillator" will only occur in the unique annotations lists if it occurs as a separate token somewhere else in the corpus. * annotations of parts of terms also get a positive (I or B) label (= different strategy for unique annotation lists) * e.g. "left and right ventricular assist devices": "left" is part of the term "left ventricular assist devices", but because the term is split, the full term cannot be annotated with an uninterrupted annotation. "left" will get a positive sequential label, but will not be included as an annotation in the unique annotation lists #### 4.2.3 IOB versus IO **IOB** (Inside, Outside, beginning): the first token of any annotation gets labelled "B" and each subsequent token of the same annotation gets labelled "I". Tokens that are not part of any annotation are "O". **IO** (Inside, Outside): same as IOB but with no distinction between the first and subsequent tokens of an annotation. **Impact**: binary labelling (IO) is easier to model, so technically gets higher f1-scores, but loses some detail in case of adjacent annotations. For instance, if "diabetic patients" occurs and both "diabetic" and "patients" are annotated separately, but "diabetic patients" is not annotated as a term, then this can be accurately encoded with IOB labels ("diabetic[B] patients[B]"). With the binary IO scheme, this will become "diabetic[I] patients[I]", which would be the same as if "diabetic patients" were annotated, instead of the two separate entities. For a more detailed analysis of the difference, see the paper cited in 4.2.1. ### 4.3 Unique annotations lists #### 4.3.1 General * one annotation per line, tab-separated from its label * one list per corpus (language-domain), combining all unique annotations (no doubles) * normalised and lowercased * in case of annotations with different labels depending on the context, the most frequently assigned label is given. * only complete and uninterrupted annotations are included (in contrast to the sequential dataset) #### 4.3.2 Labels More details on the annotation labels are provided in the main publication accompanying this dataset. **Overview with examples in the domain of heart failure**: * **Specific Terms**: are domain-specific and lexicon-specific, i.e., relevant to the domain and known only by domain-experts, not by laypeople. * e.g., ejection fraction, ventricular assist device, tachycardia * **Common Terms**: are domain-specific but not lexicon-specific, i.e., relevant to the domain and known by laypeople * heart, patients, quality-of-life * **Out-of-Domain Terms**: are not domain-specific, but they are lexicon-specific, i.e, not directly relevant to the domain, but not generally known by laypeople * e.g., confidence interval, p-value, structured-telephone-support * **Named Entities**: are proper names of people, places, organisations, brands, etc. * e.g., MEDLINE, HeartMate, New York #### 4.3.3 Tokenised annotations Tokenised annotations have a space between each token and are mostly identical to the original annotations, except that they only include those annotations that can be mapped to complete tokens. When an annotation never aligns with token boundaries, it is not included. The differences are minor (see also 5.3 Number of annotations per corpus), but it is important to mention which of the two versions of the data is used. ## 5. Additional Information ### 5.1 Websites * For more information about the annotation guidelines, visit: http://hdl.handle.net/1854/LU-8503113 * For more information about the TermEval shared task, visit: https://termeval.ugent.be * For more information about the CompuTerm workshop, visit: https://sites.google.com/view/computerm2020/ * Online term extraction demo based on dataset: D-Terminer https://lt3.ugent.be/dterminer ### 5.2 Publications * Rigouts Terryn, A., Hoste, V., & Lefever, E. (2018). A Gold Standard for Multilingual Automatic Term Extraction from Comparable Corpora: Term Structure and Translation Equivalents. Proceedings of LREC 2018. * Rigouts Terryn, A., Hoste, V., & Lefever, E. (2019). In No Uncertain Terms: A Dataset for Monolingual and Multilingual Automatic Term Extraction from Comparable Corpora. Language Resources and Evaluation, 54(2), 385–418. https://doi.org/10.1007/s10579-019-09453-9 * Rigouts Terryn, A., Hoste, V., Drouin, P., & Lefever, E. (2020). TermEval 2020: Shared Task on Automatic Term Extraction Using the Annotated Corpora for Term Extraction Research (ACTER) Dataset. Proceedings of the 6th International Workshop on Computational Terminology (COMPUTERM 2020), 85–94. * Rigouts Terryn, A., Hoste, V., & Lefever, E. (2022). Tagging Terms in Text: A Supervised Sequential Labelling Approach to Automatic Term Extraction. Terminology. International Journal of Theoretical and Applied Issues in Specialized Communication, 28(1). https://doi.org/10.1075/term.21010.rig The dataset has been updated since the publication of the former two papers. These papers also discuss aspects of the data which have not been made available yet, such as cross-lingual annotations and information on the span of the annotations. ### 5.3 Number of annotations per corpus #### 5.3.1 Explanation of differences in numbers * **Original versus tokenised**: only annotations that can be accurately mapped to token boundaries at least once in the corpus, are included as tokenised annotations. * **differences per label (with and without NEs)**: the most commonly assigned label is mentioned, so when Named Entities are included or excluded, this can impact the frequencies of the other labels as well, whenever an instance is assigned a Named Entity label in some contexts and a different label in others. #### 5.3.2 Original annotations, with Named Entities *path: language/domain/annotated/annotations/unique_annotation_lists/domain_language_terms_nes.tsv* **18,928 Annotations** | Domain | Language | Specific Terms | Common Terms | OOD Terms | Named Entities | Total | |--------|----------|---------------:|-------------:|----------:|---------------:|------:| | corp | en | 278 | 642 | 6 | 247 | 1173 | | corp | fr | 298 | 675 | 5 | 229 | 1207 | | corp | nl | 310 | 730 | 6 | 249 | 1295 | | equi | en | 777 | 309 | 69 | 420 | 1575 | | equi | fr | 701 | 234 | 26 | 220 | 1181 | | equi | nl | 1021 | 330 | 41 | 152 | 1544 | | htfl | en | 1883 | 319 | 157 | 222 | 2581 | | htfl | fr | 1684 | 487 | 57 | 146 | 2374 | | htfl | nl | 1559 | 449 | 66 | 180 | 2254 | | wind | en | 781 | 296 | 14 | 440 | 1531 | | wind | fr | 444 | 308 | 21 | 195 | 968 | | wind | nl | 577 | 342 | 21 | 305 | 1245 | #### 5.3.3 Original annotations, without Named Entities *path: language/domain/annotated/annotations/unique_annotation_lists/domain_language_terms.tsv* **15,929 Annotations** | Domain | Language | Specific Terms | Common Terms | OOD Terms | Total | |--------|----------|---------------:|-------------:|----------:|------:| | corp | en | 278 | 643 | 6 | 927 | | corp | fr | 298 | 676 | 5 | 979 | | corp | nl | 310 | 731 | 6 | 1047 | | equi | en | 777 | 309 | 69 | 1155 | | equi | fr | 701 | 234 | 26 | 961 | | equi | nl | 1022 | 330 | 41 | 1393 | | htfl | en | 1884 | 319 | 158 | 2361 | | htfl | fr | 1684 | 487 | 57 | 2228 | | htfl | nl | 1559 | 449 | 66 | 2074 | | wind | en | 781 | 296 | 14 | 1091 | | wind | fr | 444 | 308 | 21 | 773 | | wind | nl | 577 | 342 | 21 | 940 | #### 5.3.4 Tokenised annotations, with Named Entities *path: language/domain/annotated/annotations/unique_annotation_lists/domain_language_tokenised_terms_nes.tsv* **18,797 Annotations** | Domain | Language | Specific Terms | Common Terms | OOD Terms | Named Entities | Total | |--------|----------|---------------:|-------------:|----------:|---------------:|------:| | corp | en | 278 | 641 | 6 | 247 | 1172 | | corp | fr | 298 | 675 | 5 | 229 | 1207 | | corp | nl | 308 | 726 | 6 | 249 | 1287 | | equi | en | 769 | 309 | 68 | 420 | 1561 | | equi | fr | 697 | 234 | 26 | 220 | 1176 | | equi | nl | 1020 | 329 | 41 | 152 | 1541 | | htfl | en | 1864 | 316 | 157 | 222 | 2556 | | htfl | fr | 1671 | 486 | 57 | 146 | 2357 | | htfl | nl | 1535 | 447 | 65 | 180 | 2215 | | wind | en | 784 | 295 | 13 | 440 | 1529 | | wind | fr | 443 | 308 | 21 | 195 | 967 | | wind | nl | 571 | 338 | 21 | 305 | 1229 | #### 5.3.5 Tokenised annotations, without Named Entities *path: language/domain/annotated/annotations/unique_annotation_lists/domain_language_tokenised_terms.tsv* **15,834 Annotations** | Domain | Language | Specific Terms | Common Terms | OOD Terms | Total | |--------|----------|---------------:|-------------:|----------:|------:| | corp | en | 278 | 642 | 6 | 926 | | corp | fr | 298 | 676 | 5 | 979 | | corp | nl | 308 | 727 | 6 | 1041 | | equi | en | 769 | 309 | 68 | 1146 | | equi | fr | 697 | 234 | 26 | 957 | | equi | nl | 1021 | 329 | 41 | 1391 | | htfl | en | 1865 | 316 | 158 | 2339 | | htfl | fr | 1671 | 486 | 57 | 2214 | | htfl | nl | 1535 | 447 | 65 | 2047 | | wind | en | 784 | 295 | 13 | 1092 | | wind | fr | 443 | 308 | 21 | 772 | | wind | nl | 571 | 338 | 21 | 930 | ### 5.4 Corpus counts (only annotated parts of corpus) | Domain | Language | # files | # sentences | # tokens (excl. EOS) | # tokens (incl. EOS) | |--------|----------|--------:|------------:|---------------------:|---------------------:| | corp | en | 12 | 2002 | 52,847 | 54,849 | | corp | fr | 12 | 1977 | 61,107 | 63,084 | | corp | nl | 12 | 1988 | 54,233 | 56,221 | | equi | en | 34 | 3090 | 61,293 | 64,383 | | equi | fr | 78 | 2809 | 63,870 | 66,679 | | equi | nl | 65 | 3669 | 60,119 | 63,788 | | htfl | en | 190 | 2432 | 57,899 | 60,331 | | htfl | fr | 210 | 2177 | 57,204 | 59,381 | | htfl | nl | 174 | 2880 | 57,846 | 60,726 | | wind | en | 5 | 6638 | 64,404 | 71,042 | | wind | fr | 2 | 4770 | 69,759 | 74,529 | | wind | nl | 8 | 3356 | 58,684 | 62,040 | ### 5.6 Normalisation The following normalisation procedures are applied to all available versions of the data: 1. Unidecode to avoid encoding issues with the "unicodedata" Python package ``` normalised_text = unicodedata.normalize("NFC", text_string_to_normalise) ``` 2. Make sure all dashes and quotes use the same characters ``` dashes = ["-", "−", "‐"] double_quotes = ['"', '“', '”', '„', "„", "„"] single_quotes = ["'", "`", "´", "’", "‘", "’"] # fix double character quotes for double_quote in [',,', "''", "''", "‘’", "’’"]: if double_quote in text: text_string_to_normalise = text_string_to_normalise.replace(double_quote, '"') # fix single character dashes and quotes normalised_text = "" for char in text_string_to_normalise: if char in dashes: string_normalised += "-" elif char in double_quotes: string_normalised += '"' elif char in single_quotes: string_normalised += "'" else: string_normalised += char ``` 3. Replace a specifically accented I which could not be handled well with lowercasing ``` normalised_text = text_string_to_normalise.replace("İ", "I") ``` 4. Remove very specific and rare special characters which cause problems with Transformers library ``` problem_chars = ["", "", "", "", "œ"] for problem_char in problem_chars: normalised_text = text_string_to_normalise.replace(problem_char, "") ``` ## 6. Updates ### Changes version 1.0 > version 1.1 * English corpora: * corruption * Removed 1 NE: 'com(2007) 805 final' * wind energy * Removed 2 terms: 'variable pitch blades', 'renewable sources' * Removed 1 NE: 'skuodas' * French corpora: * corruption: * Removed 2 terms: 'indélicat', 'loi relative à la corruption' * equitation-dressage * Removed 2 terms: 'canons', 'équilibration' * wind energy * Added 1 term: 'systèmes mutisources-multistockages' * Removed 4 terms: 'systèmes mutisources', 'quadrature', 'inductance directe', 'résistance statorique' * Removed 98 NEs: 'bar', 'esk', 'akh', 'tht', 'enbw', 'rich', 'kama', 'man', 'sab', 'mer', 'deg', 'mor', 'aba', 'abo', 'ana', 'azm', 'joo', 'jen', 'pri', 'han', 'ree', 'dav', 'cou', 'hol', 'sau', 'lal', 'lei', 'vet', 'pur', 'per', 'her', 'hau', 'ans', 'slo', 'win', 'thi', 'ela', 'stem', 'cer', 'lav', 'ack', 'e.on', 'cim', 'luo', 'wik', 'ds1103', 'fag', 'and', 'alm', 'pan', 'rap', 'ric', 'saa', 'reb', 'bor', 'kin', 'sem', 'ecr', 'fau', 'ukt', 'kun', 'creg', 'sal', 'bou', 'crap', 'mog', 'nget', 'stu', 'sei', 'lec', 'dir', 'nor', 'abb', 'doh', 'rwe', 'mul', 'oud', 'bea', '96/92/ce', 'gar', 'eri', 'cal', 'goi', 'ish', 'fra', 'cra', 'bna', 'ull', 'des', 'ips', 'dro', 'uct', 'mat', 'ds 1104', 'mar', 'svk', 'bla', 'buh' * Dutch corpora: * corruption * Added 1 term: 'anticorruptie-eenheid' * Removed 4 terms: 'verslagen corruptiebestrijding', 'auditdiensten', 'anticorruptie', 'wet betreffende de omkoping' * equitation-dressage * Removed 2 terms: 'promotie', 'stuw' * wind energy * Removed 2 terms: 'windturbines een horizontale as', 'power coefficient' ### Changes version 1.1 > version 1.2 * Included domain of heart failure (test domain for TermEval shared task) ### Changes version 1.2 > version 1.3 * corrected wrong sources in htfl_nl * changed heart failure abbreviation to "htfl" to be consistent with four-letter domain abbreviations * created Github repository for data + submitted it to CLARIN ### Changes version 1.3 > version 1.4 * applied limited normalisation on both texts and annotations: * unicodedata.normalize("NFC", text) * normalising all dashes to "-", all single quotes to "'" and all double quotes to '"' ### Changes version 1.4 > version 1.5 Not many changes to actual annotations, but major update to how the annotations are presented etc.: * Removed a few very long Named Entity annotations (from wind-en and from htfl-en; counts updated) over which there was doubt whether it was a real NE. * Updated normalisation: * Replaced "İ" with "I" in the annotations to avoid problems lowercasing (concerns mainly wind_en_01) * Removed rare but problematic characters: ["", "", "", "", "œ"] (not handled well by some transformers) * Major update of README.md * Different structure of all data: * include sequential annotations * include tokenised versions of annotations ## 7. Error Reporting The ACTER dataset is an ongoing project, so we are always looking to improve the data. Any questions or issues regarding this dataset may be reported via the Github repository at: https://github.com/AylaRT/ACTER and will be addressed asap. ## 8. License * *License*: Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0) (https://creativecommons.org/licenses/by-nc-sa/4.0/) * *Reference*: Please cite the following Open Access paper if you use this dataset for your research (https://doi.org/10.1007/s10579-019-09453-9) * Authors: Ayla Rigouts Terryn, Véronique Hoste, Els Lefever * Title: In no uncertain terms: a dataset for monolingual and multilingual automatic term extraction from comparable corpora * Date of online publication: 26 March 2019 * Date of print publication: 2020 (Volume 54, Issue 2, pages 385-418) * Journal: Language Resources and Evaluation (LRE) * Publisher: Springer The data can be freely used and adapted for non-commercial purposes, provided the above-mentioned paper is cited and any changes made to the data are clearly stated. ## Citation ``` @misc{20.500.12124/47, title = {{ACTER} (Annotated Corpora for Term Extraction Research) v1.5}, author = {Rigouts Terryn, Ayla}, url = {http://hdl.handle.net/20.500.12124/47}, note = {Eurac Research {CLARIN} Centre}, copyright = {Creative Commons - Attribution-{NonCommercial}-{ShareAlike} 4.0 International ({CC} {BY}-{NC}-{SA} 4.0)}, year = {2022} } ```
datasets-CNRS/Migration
datasets-CNRS
"2024-10-26T14:04:43Z"
34
0
[ "multilinguality:multilingual", "language:fr", "language:en", "region:us" ]
null
"2024-10-26T13:55:40Z"
--- language: - fr - en multilinguality: - multilingual --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/sldr000718 > [!CAUTION] > Vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données. ## Description Témoignages franco-allemands de la migration européenne. Dans le cadre du projet interdisciplinaire franco-allemand “Migratio et memoria“ (Romanisches Seminar der Albert Ludwigs Universitaet Freiburg, S Pfaender et Laboratoire Parole et Langage, CNRS, UMR 6057, Aix-en-Provence, S Kriegel), nous avons enregistré des entretiens spontanés avec des personnes âgées, locuteurs de différentes variétés de français ayant un parcours de migration au cours de leur vie. La transcription de l’échantillon est publiée dans Caban, Marie-Christine & Kriegel, Sibylle & Pfaender, Stefan, 2007: L’Europe de voies en voix. Témoignages franco-allemands de la migration européenne, collection Transcriptions, Centre Français de l’Université de Freiburg, volume 1, ISBN-13: 978-3-8305-1253-0 ## Citation ``` @misc{11403/sldr000718/v2, title = {Migration}, author = {Sibylle Kriegel}, url = {https://hdl.handle.net/11403/sldr000718/v2}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, year = {2008} } ```
datasets-CNRS/Edinburgh
datasets-CNRS
"2024-10-26T14:03:43Z"
34
0
[ "language:fr", "license:cc-by-nc-sa-4.0", "region:us" ]
null
"2024-10-26T13:57:18Z"
--- language: - fr license: cc-by-nc-sa-4.0 --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/sldr000779 > [!CAUTION] > Vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données. ## Description Corpus français de parole de laboratoire, construit afin de tester le rôle des indices prosodiques (particulièrement l'accent initial en français) dans la désambiguïsation de phrases syntaxiquement ambiguës (similaire au paradigme "Old men and women"). Approx. 3000 phrases, variant en longueur de constituants et en portée syntaxique. 6 locuteur * 64 phrases * 3 répétitions * 2 conditions syntaxiques = 2304 phrases ## Citation ``` @misc{11403/sldr000779/v1, title = {Corpus Edinburgh-Accent Initial en Fran\c{c}ais}, author = {Corine Astesano, Ellen Bard, Alice Turk}, url = {https://hdl.handle.net/11403/sldr000779/v1}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, copyright = {Licence Creative Commons Attribution - Pas d'Utilisation Commerciale - Partage dans les Mêmes Conditions 4.0 International}, year = {2020} } ```
datasets-CNRS/relations_internationales
datasets-CNRS
"2024-10-26T13:59:34Z"
34
0
[ "language:fr", "size_categories:n<1K", "format:text", "modality:text", "library:datasets", "library:mlcroissant", "region:us" ]
null
"2024-10-26T13:57:49Z"
--- language: - fr --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/sldr000889 ## Description Rencontres entre des étudiants étrangers en fin de séjour et la secrétaire du service des relations internationales d’une université. Questions administratives. Anonymisation en cours. ## Citation ``` @misc{11403/sldr000889/v1, title = {Service des relations internationales}, author = {SYLED and ATILF}, url = {https://hdl.handle.net/11403/sldr000889/v1}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, year = {2014} } ```
datasets-CNRS/office_immigration
datasets-CNRS
"2024-10-26T14:16:25Z"
34
0
[ "language:fr", "region:us" ]
null
"2024-10-26T14:10:38Z"
--- language: - fr --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/sldr000885 > [!CAUTION] > Vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données. ## Description Rencontre entre des usagers et des agents administratifs à l’office de l’immigration. Questions administratives en lien avec la procédure d’obtention du visa long séjour. Transcriptions orthographique pour la plupart des enregistrements. Non faite pour les autres. Anonymisation en cours. Annotations : tous les prénoms sont des pseudonymes. ## Citation ``` @misc{11403/sldr000885/v1, title = {Office de l’immigration - Accueil}, author = {SYLED and ATILF}, url = {https://hdl.handle.net/11403/sldr000885/v1}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, year = {2014} } ```
datasets-CNRS/entretiens_comprehension_orale
datasets-CNRS
"2024-10-26T14:16:03Z"
34
0
[ "language:fr", "region:us" ]
null
"2024-10-26T14:10:55Z"
--- language: - fr --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/sldr000882 > [!CAUTION] > Vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données. ## Description Entretiens entre la chercheure et des étudiants étrangers au sujet de leur apprentissage du français, de leur séjour en France, de l’évolution de leur aisance dans leur compréhension de l’oral en français. Certains enregistrements contiennent également un test de reconnaissance des implicites. Transcriptions partielles disponibles sur demande. Anonymisation en cours. ## Citation ``` @misc{11403/sldr000882/v1, title = {Entretiens compréhension orale}, author = {SYLED and ATILF}, url = {https://hdl.handle.net/11403/sldr000882/v1}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, year = {2014} } ```
datasets-CNRS/ecole_doctorale
datasets-CNRS
"2024-10-26T14:15:10Z"
34
0
[ "language:fr", "region:us" ]
null
"2024-10-26T14:11:22Z"
--- language: - fr --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/sldr000880 > [!CAUTION] > Vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données. ## Description RDV entre une doctorante italienne et un secrétaire d’école doctorale roumain. Problème administratif. ## Citation ``` @misc{11403/sldr000880/v1, title = {Ecole doctorale}, author = {SYLED and ATILF}, url = {https://hdl.handle.net/11403/sldr000880/v1}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, year = {2014} } ```
datasets-CNRS/entretiens_conversation
datasets-CNRS
"2024-10-26T14:14:33Z"
34
0
[ "language:fr", "region:us" ]
null
"2024-10-26T14:11:48Z"
--- language: - fr --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/sldr000883 > [!CAUTION] > Vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données. ## Description Conversation entre la chercheure et des étudiants étrangers. Thèmes divers. Transcriptions partielles disponibles sur demande. Anonymisation en cours. ## Citation ``` @misc{11403/sldr000883/v1, title = {Entretiens conversation}, author = {SYLED and ATILF}, url = {https://hdl.handle.net/11403/sldr000883/v1}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, year = {2014} } ```
craa/100M
craa
"2024-12-15T02:01:58Z"
34
0
[ "size_categories:100K<n<1M", "format:parquet", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-26T14:26:11Z"
--- dataset_info: features: - name: text dtype: string splits: - name: train num_bytes: 618330794 num_examples: 124545 - name: validation num_bytes: 37647056 num_examples: 8014 - name: low_0 num_bytes: 1038090 num_examples: 4000 - name: low_10 num_bytes: 1038202 num_examples: 4000 - name: low_100 num_bytes: 1039217 num_examples: 4000 - name: low_500 num_bytes: 1043607 num_examples: 4000 - name: low_1000 num_bytes: 1049233 num_examples: 4000 - name: low_2000 num_bytes: 1060526 num_examples: 4000 - name: high_0 num_bytes: 1038090 num_examples: 4000 - name: high_10 num_bytes: 1038203 num_examples: 4000 - name: high_100 num_bytes: 1039132 num_examples: 4000 - name: high_500 num_bytes: 1043292 num_examples: 4000 - name: high_1000 num_bytes: 1048552 num_examples: 4000 - name: high_2000 num_bytes: 1058901 num_examples: 4000 download_size: 407782553 dataset_size: 668512895 configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: low_0 path: data/low_0-* - split: low_10 path: data/low_10-* - split: low_100 path: data/low_100-* - split: low_500 path: data/low_500-* - split: low_1000 path: data/low_1000-* - split: low_2000 path: data/low_2000-* - split: high_0 path: data/high_0-* - split: high_10 path: data/high_10-* - split: high_100 path: data/high_100-* - split: high_500 path: data/high_500-* - split: high_1000 path: data/high_1000-* - split: high_2000 path: data/high_2000-* ---
pclucas14/new_nqa_rag_256_test_full
pclucas14
"2024-10-26T14:27:25Z"
34
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-26T14:27:16Z"
--- dataset_info: features: - name: text sequence: sequence: string - name: questions sequence: string - name: answers sequence: sequence: string - name: document_id dtype: string - name: split dtype: string splits: - name: train num_bytes: 142482097 num_examples: 355 download_size: 60754943 dataset_size: 142482097 configs: - config_name: default data_files: - split: train path: data/train-* ---
danigambit/D_ep9_run0_llama2-7b_wiki_doc1000_tok25
danigambit
"2024-10-26T14:30:04Z"
34
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-26T14:29:59Z"
--- dataset_info: features: - name: id dtype: int64 - name: doc dtype: string splits: - name: train num_bytes: 1920795 num_examples: 1000 download_size: 311143 dataset_size: 1920795 configs: - config_name: default data_files: - split: train path: data/train-* ---
datasets-CNRS/lexique_designation
datasets-CNRS
"2024-10-26T14:35:12Z"
34
0
[ "language:fr", "region:us" ]
null
"2024-10-26T14:30:38Z"
--- language: - fr --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/sldr000852 > [!CAUTION] > Vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données. ## Description Ce corpus audio vient renforcer une enquête sociolinguistique menée en 2013 ; cette enquête est décrite comme suit : 'Nous nous intéressons à ce que nous appelons la désignation, c’est-à-dire au lexique employé par un locuteur pour en désigner un autre. On peut considérer que c’est un aspect particulier de la dénomination (d’un locuteur à un autre), qui possède un champ d’application restreint (cadre d’une conversation). Nous nous emploierons à relever ce lexique de la désignation, à discerner les sens qu’il véhicule et à l’analyser sur le plans des relations entre locuteurs. Nous nous concentrerons plus particulièrement sur la désignation directe, c’est-à-dire les termes qu’adresse directement un locuteur à un autre pour l’interpeller ou le désigner dans son discours.' ## Citation ``` @misc{11403/sldr000852/v1, title = {Le lexique de la désignation}, author = {{Département de sciences du langage, Université d'Aix-Marseille}}, url = {https://hdl.handle.net/11403/sldr000852/v1}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, year = {2013} } ```
datasets-CNRS/francais_nord_gabon
datasets-CNRS
"2024-10-26T14:34:25Z"
34
0
[ "language:fr", "region:us" ]
null
"2024-10-26T14:31:07Z"
--- language: - fr --- > [!NOTE] > Dataset origin: https://www.ortolang.fr/market/corpora/sldr000869 > [!CAUTION] > Vous devez vous rendre sur le site d'Ortholang et vous connecter afin de télécharger les données. ## Description Français parlé par des Gabonais du nord du pays, des locuteurs âgés peu ou pas scolarisés et de jeunes adultes moyennement scolarisés. ## Citation ``` @misc{11403/sldr000869/v1, title = {Fran\c{c}ais parlé dans le nord du Gabon}, author = {LPL}, url = {https://hdl.handle.net/11403/sldr000869/v1}, note = {{ORTOLANG} ({Open} {Resources} {and} {TOols} {for} {LANGuage}) \textendash www.ortolang.fr}, year = {2014} } ```