modelId
stringlengths
5
139
author
stringlengths
2
42
last_modified
timestamp[us, tz=UTC]date
2020-02-15 11:33:14
2025-09-09 12:33:01
downloads
int64
0
223M
likes
int64
0
11.7k
library_name
stringclasses
550 values
tags
listlengths
1
4.05k
pipeline_tag
stringclasses
55 values
createdAt
timestamp[us, tz=UTC]date
2022-03-02 23:29:04
2025-09-09 12:32:40
card
stringlengths
11
1.01M
ramonmedeiro1/bertimbau-products-reviews-pt-br
ramonmedeiro1
2023-09-10T22:52:13Z
117
5
transformers
[ "transformers", "pytorch", "safetensors", "bert", "text-classification", "feedback", "products", "pt", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2023-09-02T18:12:25Z
--- language: - pt metrics: - accuracy tags: - feedback - products --- # Introdução Modelo treinado a partir do Bertimbau da Neuralmind (https://huggingface.co/neuralmind/bert-base-portuguese-cased) em um dataset chamado B2W-Reviews01. Que é um corpus aberto de reviews de produtos. Ele contém mais de 130 mil avaliações de clientes de comércio eletrônico, coletadas no site da Americanas.com (https://github.com/americanas-tech/b2w-reviews01) O modelo rodou por apenas 50 minutos (3 épocas) numa instância do google com a GPU T4. O propósito desse projeto é totalmente para fins didáticos, onde a ideia é mostrar como fazer fine tunning de modelos para outras tarefas de NLP além da geração de textos. Encorajo quem encotrar esse repositório à rodar ele por muito mais tempo para conseguir melhores resultados. # Resultados * Epoch 1: - Training Loss: 0.863100 - Validation Loss: 0.873007 - Accuracy: 0.621733 - f1_score: 0.491815 * Epoch 2: - Training Loss: 0.802800 - Validation Loss: 0.897009 - Accuracy: 0.620914 - f1_score: 0.554796 * Epoch 3: - Training Loss: 0.692400 - Validation Loss: 0.966356 - Accuracy: 0.619210 - f1_score: 0.557672 # Github No repositório (https://github.com/ramoonmedeiro/LLMTasks/tree/main/text-classification) pode ser encontrado o notebook na qual o fine tunning foi realizado.
jennyc/bert-base-cased
jennyc
2023-09-10T22:37:14Z
106
0
transformers
[ "transformers", "pytorch", "bert", "question-answering", "generated_from_trainer", "endpoints_compatible", "region:us" ]
question-answering
2023-09-10T22:30:53Z
--- tags: - generated_from_trainer model-index: - name: bert-base-cased results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-cased This model was trained from scratch on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
hyungtak/ko-Llama2-7B-chat
hyungtak
2023-09-10T22:33:12Z
0
0
peft
[ "peft", "region:us" ]
null
2023-09-10T22:32:59Z
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - load_in_8bit: True - load_in_4bit: False - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: fp4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float32 ### Framework versions - PEFT 0.6.0.dev0
jennyc/bert-finetuned-squad
jennyc
2023-09-10T22:28:17Z
4
0
transformers
[ "transformers", "pytorch", "bert", "question-answering", "generated_from_trainer", "base_model:google-bert/bert-base-cased", "base_model:finetune:google-bert/bert-base-cased", "license:apache-2.0", "endpoints_compatible", "region:us" ]
question-answering
2023-08-26T15:10:13Z
--- license: apache-2.0 base_model: bert-base-cased tags: - generated_from_trainer model-index: - name: bert-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-squad This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
moonlightnexus/wonder-anime
moonlightnexus
2023-09-10T22:24:07Z
38
2
diffusers
[ "diffusers", "text-to-image", "en", "license:other", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
2023-09-10T18:45:12Z
--- license: other language: - en library_name: diffusers pipeline_tag: text-to-image ---
Yntec/DucHaitenClassicAnime768
Yntec
2023-09-10T22:21:14Z
500
3
diffusers
[ "diffusers", "safetensors", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "Classic Anime", "DucHaiten", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
2023-07-17T01:34:01Z
--- license: creativeml-openrail-m tags: - stable-diffusion - stable-diffusion-diffusers - diffusers - text-to-image - Classic Anime - DucHaiten --- # DucHaiten Classic Anime 768 version of this model with the Waifu 1.4 VAE baked in for the inference API based on the Fp16NoEma checkpoint. Use (80s anime style) or (gtav style) to enhance the style. If you like his content, support him at: https://linktr.ee/Duc_Haiten Original page: https://civitai.com/models/8542?modelVersionId=16168
the-neural-networker/xlm-roberta-base-finetuned-panx-all
the-neural-networker
2023-09-10T22:03:23Z
110
0
transformers
[ "transformers", "pytorch", "safetensors", "xlm-roberta", "token-classification", "generated_from_trainer", "base_model:FacebookAI/xlm-roberta-base", "base_model:finetune:FacebookAI/xlm-roberta-base", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2023-09-09T17:39:50Z
--- license: mit base_model: xlm-roberta-base tags: - generated_from_trainer metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-all results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-all This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5155 - F1: 0.8005 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.4866 | 1.0 | 438 | 0.3960 | 0.6932 | | 0.2614 | 2.0 | 876 | 0.3196 | 0.7360 | | 0.1845 | 3.0 | 1314 | 0.3218 | 0.7698 | | 0.1353 | 4.0 | 1752 | 0.3439 | 0.7701 | | 0.096 | 5.0 | 2190 | 0.3638 | 0.7885 | | 0.0689 | 6.0 | 2628 | 0.3983 | 0.7991 | | 0.0474 | 7.0 | 3066 | 0.4359 | 0.7962 | | 0.0317 | 8.0 | 3504 | 0.4701 | 0.8032 | | 0.0219 | 9.0 | 3942 | 0.5055 | 0.8032 | | 0.0153 | 10.0 | 4380 | 0.5155 | 0.8005 | ### Framework versions - Transformers 4.31.0 - Pytorch 2.0.1+cu117 - Datasets 2.14.4 - Tokenizers 0.13.3
armstrongrichi/sdxl_picarmstrongrichi
armstrongrichi
2023-09-10T20:58:43Z
1
1
diffusers
[ "diffusers", "text-to-image", "autotrain", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:finetune:stabilityai/stable-diffusion-xl-base-1.0", "region:us" ]
text-to-image
2023-09-10T20:58:37Z
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 instance_prompt: photo of a picarmstrongrichi person tags: - text-to-image - diffusers - autotrain inference: true --- # DreamBooth trained by AutoTrain Text encoder was not trained.
JCTN/fast-repo
JCTN
2023-09-10T20:47:59Z
0
1
null
[ "code", "en", "region:us" ]
null
2023-06-03T00:13:50Z
--- language: - en tags: - code --- # This is what powered almost all of my colab Mostly uses LZ4 compression, which means you'll need a specialized program to extract it, especially in windows. For Windows users, I recommend using [7zip-zstd](https://github.com/mcmilk/7-Zip-zstd/releases/latest) (it's 7zip but with lz4 support and more) For Linux users, use tar with liblz4-tool like this: `tar -xI lz4 -f repo.tar.lz4`
s3nh/Undi95-MLewdBoros-L2-13B-GGUF
s3nh
2023-09-10T20:23:08Z
108
2
transformers
[ "transformers", "gguf", "text-generation", "zh", "en", "license:openrail", "endpoints_compatible", "region:us" ]
text-generation
2023-09-10T20:07:46Z
--- license: openrail pipeline_tag: text-generation library_name: transformers language: - zh - en --- ## Original model card Buy me a coffee if you like this project ;) <a href="https://www.buymeacoffee.com/s3nh"><img src="https://www.buymeacoffee.com/assets/img/guidelines/download-assets-sm-1.svg" alt=""></a> #### Description GGUF Format model files for [This project](https://huggingface.co/Undi95/MLewdBoros-L2-13B). ### GGUF Specs GGUF is a format based on the existing GGJT, but makes a few changes to the format to make it more extensible and easier to use. The following features are desired: Single-file deployment: they can be easily distributed and loaded, and do not require any external files for additional information. Extensible: new features can be added to GGML-based executors/new information can be added to GGUF models without breaking compatibility with existing models. mmap compatibility: models can be loaded using mmap for fast loading and saving. Easy to use: models can be easily loaded and saved using a small amount of code, with no need for external libraries, regardless of the language used. Full information: all information needed to load a model is contained in the model file, and no additional information needs to be provided by the user. The key difference between GGJT and GGUF is the use of a key-value structure for the hyperparameters (now referred to as metadata), rather than a list of untyped values. This allows for new metadata to be added without breaking compatibility with existing models, and to annotate the model with additional information that may be useful for inference or for identifying the model. ### Perplexity params Model Measure Q2_K Q3_K_S Q3_K_M Q3_K_L Q4_0 Q4_1 Q4_K_S Q4_K_M Q5_0 Q5_1 Q5_K_S Q5_K_M Q6_K Q8_0 F16 7B perplexity 6.7764 6.4571 6.1503 6.0869 6.1565 6.0912 6.0215 5.9601 5.9862 5.9481 5.9419 5.9208 5.9110 5.9070 5.9066 13B perplexity 5.8545 5.6033 5.4498 5.4063 5.3860 5.3608 5.3404 5.3002 5.2856 5.2706 5.2785 5.2638 5.2568 5.2548 5.2543 ### inference TODO # Original model card
vladfatu/ppo-Huggy
vladfatu
2023-09-10T20:21:12Z
3
0
ml-agents
[ "ml-agents", "tensorboard", "onnx", "Huggy", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Huggy", "region:us" ]
reinforcement-learning
2023-09-10T20:21:08Z
--- library_name: ml-agents tags: - Huggy - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/ We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: - A *short tutorial* where you teach Huggy the Dog 🐶 to fetch the stick and then play with him directly in your browser: https://huggingface.co/learn/deep-rl-course/unitbonus1/introduction - A *longer tutorial* to understand how works ML-Agents: https://huggingface.co/learn/deep-rl-course/unit5/introduction ### Resume the training ```bash mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser** 1. If the environment is part of ML-Agents official environments, go to https://huggingface.co/unity 2. Step 1: Find your model_id: vladfatu/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Varun29/my-ai-project
Varun29
2023-09-10T20:15:24Z
5
0
diffusers
[ "diffusers", "safetensors", "NxtWave-GenAI-Webinar", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
2023-09-10T20:10:37Z
--- license: creativeml-openrail-m tags: - NxtWave-GenAI-Webinar - text-to-image - stable-diffusion --- ### My--ai-project- Dreambooth model trained by Varun29 following the "Build your own Gen AI model" session by NxtWave. Project Submission Code: IIITB-109 Sample pictures of this concept: ![0](https://huggingface.co/Varun29/my-ai-project/resolve/main/sample_images/xzg_(8).jpg)
Ori/lama-2-13b-peft-mh-retrieval-at-1-v2-seed-0
Ori
2023-09-10T20:10:58Z
0
0
peft
[ "peft", "safetensors", "region:us" ]
null
2023-09-10T19:47:49Z
--- library_name: peft --- ## Training procedure ### Framework versions - PEFT 0.5.0.dev0
osieosie/llama-samsum-4bit-13b-bnb-seed65
osieosie
2023-09-10T20:04:53Z
1
0
peft
[ "peft", "region:us" ]
null
2023-09-10T20:04:51Z
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: True - bnb_4bit_compute_dtype: float16 ### Framework versions - PEFT 0.5.0.dev0
mindchain/llama2-ooiiioo
mindchain
2023-09-10T20:03:10Z
0
0
peft
[ "peft", "region:us" ]
null
2023-09-10T20:03:03Z
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float16 ### Framework versions - PEFT 0.6.0.dev0
Ori/lama-2-13b-peft-mh-no-retrieval-v2-seed-0
Ori
2023-09-10T19:41:38Z
1
0
peft
[ "peft", "safetensors", "region:us" ]
null
2023-09-10T19:17:00Z
--- library_name: peft --- ## Training procedure ### Framework versions - PEFT 0.5.0.dev0
aaronamortegui/relismoilumi
aaronamortegui
2023-09-10T19:37:27Z
87
1
diffusers
[ "diffusers", "safetensors", "stable-diffusion", "text-to-image", "arxiv:2112.10752", "arxiv:2202.00512", "arxiv:1910.09700", "license:openrail++", "region:us" ]
text-to-image
2023-03-09T01:49:06Z
--- license: openrail++ tags: - stable-diffusion - text-to-image pinned: true --- # Stable Diffusion v2-1 Model Card This model card focuses on the model associated with the Stable Diffusion v2-1 model, codebase available [here](https://github.com/Stability-AI/stablediffusion). This `stable-diffusion-2-1` model is fine-tuned from [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) (`768-v-ema.ckpt`) with an additional 55k steps on the same dataset (with `punsafe=0.1`), and then fine-tuned for another 155k extra steps with `punsafe=0.98`. - Use it with the [`stablediffusion`](https://github.com/Stability-AI/stablediffusion) repository: download the `v2-1_768-ema-pruned.ckpt` [here](https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/v2-1_768-ema-pruned.ckpt). - Use it with 🧨 [`diffusers`](#examples) ## Model Details - **Developed by:** Robin Rombach, Patrick Esser - **Model type:** Diffusion-based text-to-image generation model - **Language(s):** English - **License:** [CreativeML Open RAIL++-M License](https://huggingface.co/stabilityai/stable-diffusion-2/blob/main/LICENSE-MODEL) - **Model Description:** This is a model that can be used to generate and modify images based on text prompts. It is a [Latent Diffusion Model](https://arxiv.org/abs/2112.10752) that uses a fixed, pretrained text encoder ([OpenCLIP-ViT/H](https://github.com/mlfoundations/open_clip)). - **Resources for more information:** [GitHub Repository](https://github.com/Stability-AI/). - **Cite as:** @InProceedings{Rombach_2022_CVPR, author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn}, title = {High-Resolution Image Synthesis With Latent Diffusion Models}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2022}, pages = {10684-10695} } ## Examples Using the [🤗's Diffusers library](https://github.com/huggingface/diffusers) to run Stable Diffusion 2 in a simple and efficient manner. ```bash pip install diffusers transformers accelerate scipy safetensors ``` Running the pipeline (if you don't swap the scheduler it will run with the default DDIM, in this example we are swapping it to DPMSolverMultistepScheduler): ```python from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler model_id = "stabilityai/stable-diffusion-2-1" # Use the DPMSolverMultistepScheduler (DPM-Solver++) scheduler here instead pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe = pipe.to("cuda") prompt = "a photo of an astronaut riding a horse on mars" image = pipe(prompt).images[0] image.save("astronaut_rides_horse.png") ``` **Notes**: - Despite not being a dependency, we highly recommend you to install [xformers](https://github.com/facebookresearch/xformers) for memory efficient attention (better performance) - If you have low GPU RAM available, make sure to add a `pipe.enable_attention_slicing()` after sending it to `cuda` for less VRAM usage (to the cost of speed) # Uses ## Direct Use The model is intended for research purposes only. Possible research areas and tasks include - Safe deployment of models which have the potential to generate harmful content. - Probing and understanding the limitations and biases of generative models. - Generation of artworks and use in design and other artistic processes. - Applications in educational or creative tools. - Research on generative models. Excluded uses are described below. ### Misuse, Malicious Use, and Out-of-Scope Use _Note: This section is originally taken from the [DALLE-MINI model card](https://huggingface.co/dalle-mini/dalle-mini), was used for Stable Diffusion v1, but applies in the same way to Stable Diffusion v2_. The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes. #### Out-of-Scope Use The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model. #### Misuse and Malicious Use Using the model to generate content that is cruel to individuals is a misuse of this model. This includes, but is not limited to: - Generating demeaning, dehumanizing, or otherwise harmful representations of people or their environments, cultures, religions, etc. - Intentionally promoting or propagating discriminatory content or harmful stereotypes. - Impersonating individuals without their consent. - Sexual content without consent of the people who might see it. - Mis- and disinformation - Representations of egregious violence and gore - Sharing of copyrighted or licensed material in violation of its terms of use. - Sharing content that is an alteration of copyrighted or licensed material in violation of its terms of use. ## Limitations and Bias ### Limitations - The model does not achieve perfect photorealism - The model cannot render legible text - The model does not perform well on more difficult tasks which involve compositionality, such as rendering an image corresponding to “A red cube on top of a blue sphere” - Faces and people in general may not be generated properly. - The model was trained mainly with English captions and will not work as well in other languages. - The autoencoding part of the model is lossy - The model was trained on a subset of the large-scale dataset [LAION-5B](https://laion.ai/blog/laion-5b/), which contains adult, violent and sexual content. To partially mitigate this, we have filtered the dataset using LAION's NFSW detector (see Training section). ### Bias While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases. Stable Diffusion was primarily trained on subsets of [LAION-2B(en)](https://laion.ai/blog/laion-5b/), which consists of images that are limited to English descriptions. Texts and images from communities and cultures that use other languages are likely to be insufficiently accounted for. This affects the overall output of the model, as white and western cultures are often set as the default. Further, the ability of the model to generate content with non-English prompts is significantly worse than with English-language prompts. Stable Diffusion v2 mirrors and exacerbates biases to such a degree that viewer discretion must be advised irrespective of the input or its intent. ## Training **Training Data** The model developers used the following dataset for training the model: - LAION-5B and subsets (details below). The training data is further filtered using LAION's NSFW detector, with a "p_unsafe" score of 0.1 (conservative). For more details, please refer to LAION-5B's [NeurIPS 2022](https://openreview.net/forum?id=M3Y74vmsMcY) paper and reviewer discussions on the topic. **Training Procedure** Stable Diffusion v2 is a latent diffusion model which combines an autoencoder with a diffusion model that is trained in the latent space of the autoencoder. During training, - Images are encoded through an encoder, which turns images into latent representations. The autoencoder uses a relative downsampling factor of 8 and maps images of shape H x W x 3 to latents of shape H/f x W/f x 4 - Text prompts are encoded through the OpenCLIP-ViT/H text-encoder. - The output of the text encoder is fed into the UNet backbone of the latent diffusion model via cross-attention. - The loss is a reconstruction objective between the noise that was added to the latent and the prediction made by the UNet. We also use the so-called _v-objective_, see https://arxiv.org/abs/2202.00512. We currently provide the following checkpoints: - `512-base-ema.ckpt`: 550k steps at resolution `256x256` on a subset of [LAION-5B](https://laion.ai/blog/laion-5b/) filtered for explicit pornographic material, using the [LAION-NSFW classifier](https://github.com/LAION-AI/CLIP-based-NSFW-Detector) with `punsafe=0.1` and an [aesthetic score](https://github.com/christophschuhmann/improved-aesthetic-predictor) >= `4.5`. 850k steps at resolution `512x512` on the same dataset with resolution `>= 512x512`. - `768-v-ema.ckpt`: Resumed from `512-base-ema.ckpt` and trained for 150k steps using a [v-objective](https://arxiv.org/abs/2202.00512) on the same dataset. Resumed for another 140k steps on a `768x768` subset of our dataset. - `512-depth-ema.ckpt`: Resumed from `512-base-ema.ckpt` and finetuned for 200k steps. Added an extra input channel to process the (relative) depth prediction produced by [MiDaS](https://github.com/isl-org/MiDaS) (`dpt_hybrid`) which is used as an additional conditioning. The additional input channels of the U-Net which process this extra information were zero-initialized. - `512-inpainting-ema.ckpt`: Resumed from `512-base-ema.ckpt` and trained for another 200k steps. Follows the mask-generation strategy presented in [LAMA](https://github.com/saic-mdal/lama) which, in combination with the latent VAE representations of the masked image, are used as an additional conditioning. The additional input channels of the U-Net which process this extra information were zero-initialized. The same strategy was used to train the [1.5-inpainting checkpoint](https://huggingface.co/runwayml/stable-diffusion-inpainting). - `x4-upscaling-ema.ckpt`: Trained for 1.25M steps on a 10M subset of LAION containing images `>2048x2048`. The model was trained on crops of size `512x512` and is a text-guided [latent upscaling diffusion model](https://arxiv.org/abs/2112.10752). In addition to the textual input, it receives a `noise_level` as an input parameter, which can be used to add noise to the low-resolution input according to a [predefined diffusion schedule](configs/stable-diffusion/x4-upscaling.yaml). - **Hardware:** 32 x 8 x A100 GPUs - **Optimizer:** AdamW - **Gradient Accumulations**: 1 - **Batch:** 32 x 8 x 2 x 4 = 2048 - **Learning rate:** warmup to 0.0001 for 10,000 steps and then kept constant ## Evaluation Results Evaluations with different classifier-free guidance scales (1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0) and 50 steps DDIM sampling steps show the relative improvements of the checkpoints: ![pareto](model-variants.jpg) Evaluated using 50 DDIM steps and 10000 random prompts from the COCO2017 validation set, evaluated at 512x512 resolution. Not optimized for FID scores. ## Environmental Impact **Stable Diffusion v1** **Estimated Emissions** Based on that information, we estimate the following CO2 emissions using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). The hardware, runtime, cloud provider, and compute region were utilized to estimate the carbon impact. - **Hardware Type:** A100 PCIe 40GB - **Hours used:** 200000 - **Cloud Provider:** AWS - **Compute Region:** US-east - **Carbon Emitted (Power consumption x Time x Carbon produced based on location of power grid):** 15000 kg CO2 eq. ## Citation @InProceedings{Rombach_2022_CVPR, author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn}, title = {High-Resolution Image Synthesis With Latent Diffusion Models}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2022}, pages = {10684-10695} } *This model card was written by: Robin Rombach, Patrick Esser and David Ha and is based on the [Stable Diffusion v1](https://github.com/CompVis/stable-diffusion/blob/main/Stable_Diffusion_v1_Model_Card.md) and [DALL-E Mini model card](https://huggingface.co/dalle-mini/dalle-mini).*
omthkkr/speecht5_finetuned_voxpopuli_sl
omthkkr
2023-09-10T19:28:13Z
82
0
transformers
[ "transformers", "pytorch", "speecht5", "text-to-audio", "generated_from_trainer", "text-to-speech", "dataset:facebook/voxpopuli", "base_model:microsoft/speecht5_tts", "base_model:finetune:microsoft/speecht5_tts", "license:mit", "endpoints_compatible", "region:us" ]
text-to-speech
2023-09-10T18:21:41Z
--- license: mit base_model: microsoft/speecht5_tts tags: - generated_from_trainer - text-to-speech datasets: - facebook/voxpopuli model-index: - name: speecht5_finetuned_voxpopuli_sl results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # speecht5_finetuned_voxpopuli_sl This model is a fine-tuned version of [microsoft/speecht5_tts](https://huggingface.co/microsoft/speecht5_tts) on the VoxPopuli dataset. It achieves the following results on the evaluation set: - Loss: 0.4594 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 4 - eval_batch_size: 2 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 1000 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 0.5181 | 8.46 | 500 | 0.4795 | | 0.498 | 16.91 | 1000 | 0.4594 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
s3nh/Photolens-llama-2-13b-langchain-chat-GGUF
s3nh
2023-09-10T19:26:59Z
57
2
transformers
[ "transformers", "gguf", "text-generation", "en", "license:openrail", "endpoints_compatible", "region:us" ]
text-generation
2023-09-01T11:23:57Z
--- license: openrail pipeline_tag: text-generation library_name: transformers language: - en --- ## Original model card Buy me a coffee if you like this project ;) <a href="https://www.buymeacoffee.com/s3nh"><img src="https://www.buymeacoffee.com/assets/img/guidelines/download-assets-sm-1.svg" alt=""></a> #### Description GGUF Format model files for [This project](https://huggingface.co/Photolens/llama-2-13b-langchain-chat). ### GGUF Specs GGUF is a format based on the existing GGJT, but makes a few changes to the format to make it more extensible and easier to use. The following features are desired: Single-file deployment: they can be easily distributed and loaded, and do not require any external files for additional information. Extensible: new features can be added to GGML-based executors/new information can be added to GGUF models without breaking compatibility with existing models. mmap compatibility: models can be loaded using mmap for fast loading and saving. Easy to use: models can be easily loaded and saved using a small amount of code, with no need for external libraries, regardless of the language used. Full information: all information needed to load a model is contained in the model file, and no additional information needs to be provided by the user. The key difference between GGJT and GGUF is the use of a key-value structure for the hyperparameters (now referred to as metadata), rather than a list of untyped values. This allows for new metadata to be added without breaking compatibility with existing models, and to annotate the model with additional information that may be useful for inference or for identifying the model. ### Perplexity params Model Measure Q2_K Q3_K_S Q3_K_M Q3_K_L Q4_0 Q4_1 Q4_K_S Q4_K_M Q5_0 Q5_1 Q5_K_S Q5_K_M Q6_K Q8_0 F16 7B perplexity 6.7764 6.4571 6.1503 6.0869 6.1565 6.0912 6.0215 5.9601 5.9862 5.9481 5.9419 5.9208 5.9110 5.9070 5.9066 13B perplexity 5.8545 5.6033 5.4498 5.4063 5.3860 5.3608 5.3404 5.3002 5.2856 5.2706 5.2785 5.2638 5.2568 5.2548 5.2543 ### inference TODO # Original model card
adyprat/a2c-PandaReachDense-v3
adyprat
2023-09-10T19:10:17Z
1
0
stable-baselines3
[ "stable-baselines3", "PandaReachDense-v3", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2023-09-10T19:04:49Z
--- library_name: stable-baselines3 tags: - PandaReachDense-v3 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v3 type: PandaReachDense-v3 metrics: - type: mean_reward value: -0.22 +/- 0.13 name: mean_reward verified: false --- # **A2C** Agent playing **PandaReachDense-v3** This is a trained model of a **A2C** agent playing **PandaReachDense-v3** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
robertquest/controlnetv1.1
robertquest
2023-09-10T19:01:35Z
0
0
null
[ "license:creativeml-openrail-m", "region:us" ]
null
2023-09-10T16:47:14Z
--- license: creativeml-openrail-m ---
CyberHarem/oberon_fgo
CyberHarem
2023-09-10T18:48:41Z
0
0
null
[ "art", "text-to-image", "dataset:CyberHarem/oberon_fgo", "license:mit", "region:us" ]
text-to-image
2023-09-10T18:36:00Z
--- license: mit datasets: - CyberHarem/oberon_fgo pipeline_tag: text-to-image tags: - art --- # Lora of oberon_fgo This model is trained with [HCP-Diffusion](https://github.com/7eu7d7/HCP-Diffusion). And the auto-training framework is maintained by [DeepGHS Team](https://huggingface.co/deepghs). The base model used during training is [NAI](https://huggingface.co/deepghs/animefull-latest), and the base model used for generating preview images is [Meina/MeinaMix_V11](https://huggingface.co/Meina/MeinaMix_V11). After downloading the pt and safetensors files for the specified step, you need to use them simultaneously. The pt file will be used as an embedding, while the safetensors file will be loaded for Lora. For example, if you want to use the model from step 6600, you need to download `6600/oberon_fgo.pt` as the embedding and `6600/oberon_fgo.safetensors` for loading Lora. By using both files together, you can generate images for the desired characters. **The best step we recommend is 6600**, with the score of 0.905. The trigger words are: 1. `oberon_fgo` 2. `male_focus, bangs, blue_eyes, crown, medium_hair, cape, grey_hair, wings, smile, insect_wings, black_hair` For the following groups, it is not recommended to use this model and we express regret: 1. Individuals who cannot tolerate any deviations from the original character design, even in the slightest detail. 2. Individuals who are facing the application scenarios with high demands for accuracy in recreating character outfits. 3. Individuals who cannot accept the potential randomness in AI-generated images based on the Stable Diffusion algorithm. 4. Individuals who are not comfortable with the fully automated process of training character models using LoRA, or those who believe that training character models must be done purely through manual operations to avoid disrespecting the characters. 5. Individuals who finds the generated image content offensive to their values. These are available steps: | Steps | Score | Download | pattern_1 | pattern_2 | pattern_3 | pattern_4 | pattern_5 | pattern_6 | bikini | bondage | free | maid | miko | nude | nude2 | suit | yukata | |:---------|:----------|:------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------|:--------------------------------------------------|:-------------------------------------|:-------------------------------------|:-------------------------------------|:-----------------------------------------------|:------------------------------------------------|:-------------------------------------|:-----------------------------------------| | **6600** | **0.905** | [**Download**](6600/oberon_fgo.zip) | ![pattern_1-6600](6600/previews/pattern_1.png) | ![pattern_2-6600](6600/previews/pattern_2.png) | ![pattern_3-6600](6600/previews/pattern_3.png) | ![pattern_4-6600](6600/previews/pattern_4.png) | ![pattern_5-6600](6600/previews/pattern_5.png) | ![pattern_6-6600](6600/previews/pattern_6.png) | ![bikini-6600](6600/previews/bikini.png) | [<NSFW, click to see>](6600/previews/bondage.png) | ![free-6600](6600/previews/free.png) | ![maid-6600](6600/previews/maid.png) | ![miko-6600](6600/previews/miko.png) | [<NSFW, click to see>](6600/previews/nude.png) | [<NSFW, click to see>](6600/previews/nude2.png) | ![suit-6600](6600/previews/suit.png) | ![yukata-6600](6600/previews/yukata.png) | | 6160 | 0.853 | [Download](6160/oberon_fgo.zip) | ![pattern_1-6160](6160/previews/pattern_1.png) | ![pattern_2-6160](6160/previews/pattern_2.png) | ![pattern_3-6160](6160/previews/pattern_3.png) | ![pattern_4-6160](6160/previews/pattern_4.png) | ![pattern_5-6160](6160/previews/pattern_5.png) | ![pattern_6-6160](6160/previews/pattern_6.png) | ![bikini-6160](6160/previews/bikini.png) | [<NSFW, click to see>](6160/previews/bondage.png) | ![free-6160](6160/previews/free.png) | ![maid-6160](6160/previews/maid.png) | ![miko-6160](6160/previews/miko.png) | [<NSFW, click to see>](6160/previews/nude.png) | [<NSFW, click to see>](6160/previews/nude2.png) | ![suit-6160](6160/previews/suit.png) | ![yukata-6160](6160/previews/yukata.png) | | 5720 | 0.883 | [Download](5720/oberon_fgo.zip) | ![pattern_1-5720](5720/previews/pattern_1.png) | ![pattern_2-5720](5720/previews/pattern_2.png) | ![pattern_3-5720](5720/previews/pattern_3.png) | ![pattern_4-5720](5720/previews/pattern_4.png) | ![pattern_5-5720](5720/previews/pattern_5.png) | ![pattern_6-5720](5720/previews/pattern_6.png) | ![bikini-5720](5720/previews/bikini.png) | [<NSFW, click to see>](5720/previews/bondage.png) | ![free-5720](5720/previews/free.png) | ![maid-5720](5720/previews/maid.png) | ![miko-5720](5720/previews/miko.png) | [<NSFW, click to see>](5720/previews/nude.png) | [<NSFW, click to see>](5720/previews/nude2.png) | ![suit-5720](5720/previews/suit.png) | ![yukata-5720](5720/previews/yukata.png) | | 5280 | 0.863 | [Download](5280/oberon_fgo.zip) | ![pattern_1-5280](5280/previews/pattern_1.png) | ![pattern_2-5280](5280/previews/pattern_2.png) | ![pattern_3-5280](5280/previews/pattern_3.png) | ![pattern_4-5280](5280/previews/pattern_4.png) | ![pattern_5-5280](5280/previews/pattern_5.png) | ![pattern_6-5280](5280/previews/pattern_6.png) | ![bikini-5280](5280/previews/bikini.png) | [<NSFW, click to see>](5280/previews/bondage.png) | ![free-5280](5280/previews/free.png) | ![maid-5280](5280/previews/maid.png) | ![miko-5280](5280/previews/miko.png) | [<NSFW, click to see>](5280/previews/nude.png) | [<NSFW, click to see>](5280/previews/nude2.png) | ![suit-5280](5280/previews/suit.png) | ![yukata-5280](5280/previews/yukata.png) | | 4840 | 0.799 | [Download](4840/oberon_fgo.zip) | ![pattern_1-4840](4840/previews/pattern_1.png) | ![pattern_2-4840](4840/previews/pattern_2.png) | ![pattern_3-4840](4840/previews/pattern_3.png) | ![pattern_4-4840](4840/previews/pattern_4.png) | ![pattern_5-4840](4840/previews/pattern_5.png) | ![pattern_6-4840](4840/previews/pattern_6.png) | ![bikini-4840](4840/previews/bikini.png) | [<NSFW, click to see>](4840/previews/bondage.png) | ![free-4840](4840/previews/free.png) | ![maid-4840](4840/previews/maid.png) | ![miko-4840](4840/previews/miko.png) | [<NSFW, click to see>](4840/previews/nude.png) | [<NSFW, click to see>](4840/previews/nude2.png) | ![suit-4840](4840/previews/suit.png) | ![yukata-4840](4840/previews/yukata.png) | | 4400 | 0.811 | [Download](4400/oberon_fgo.zip) | ![pattern_1-4400](4400/previews/pattern_1.png) | ![pattern_2-4400](4400/previews/pattern_2.png) | ![pattern_3-4400](4400/previews/pattern_3.png) | ![pattern_4-4400](4400/previews/pattern_4.png) | ![pattern_5-4400](4400/previews/pattern_5.png) | ![pattern_6-4400](4400/previews/pattern_6.png) | ![bikini-4400](4400/previews/bikini.png) | [<NSFW, click to see>](4400/previews/bondage.png) | ![free-4400](4400/previews/free.png) | ![maid-4400](4400/previews/maid.png) | ![miko-4400](4400/previews/miko.png) | [<NSFW, click to see>](4400/previews/nude.png) | [<NSFW, click to see>](4400/previews/nude2.png) | ![suit-4400](4400/previews/suit.png) | ![yukata-4400](4400/previews/yukata.png) | | 3960 | 0.854 | [Download](3960/oberon_fgo.zip) | ![pattern_1-3960](3960/previews/pattern_1.png) | ![pattern_2-3960](3960/previews/pattern_2.png) | ![pattern_3-3960](3960/previews/pattern_3.png) | ![pattern_4-3960](3960/previews/pattern_4.png) | ![pattern_5-3960](3960/previews/pattern_5.png) | ![pattern_6-3960](3960/previews/pattern_6.png) | ![bikini-3960](3960/previews/bikini.png) | [<NSFW, click to see>](3960/previews/bondage.png) | ![free-3960](3960/previews/free.png) | ![maid-3960](3960/previews/maid.png) | ![miko-3960](3960/previews/miko.png) | [<NSFW, click to see>](3960/previews/nude.png) | [<NSFW, click to see>](3960/previews/nude2.png) | ![suit-3960](3960/previews/suit.png) | ![yukata-3960](3960/previews/yukata.png) | | 3520 | 0.819 | [Download](3520/oberon_fgo.zip) | ![pattern_1-3520](3520/previews/pattern_1.png) | ![pattern_2-3520](3520/previews/pattern_2.png) | ![pattern_3-3520](3520/previews/pattern_3.png) | ![pattern_4-3520](3520/previews/pattern_4.png) | ![pattern_5-3520](3520/previews/pattern_5.png) | ![pattern_6-3520](3520/previews/pattern_6.png) | ![bikini-3520](3520/previews/bikini.png) | [<NSFW, click to see>](3520/previews/bondage.png) | ![free-3520](3520/previews/free.png) | ![maid-3520](3520/previews/maid.png) | ![miko-3520](3520/previews/miko.png) | [<NSFW, click to see>](3520/previews/nude.png) | [<NSFW, click to see>](3520/previews/nude2.png) | ![suit-3520](3520/previews/suit.png) | ![yukata-3520](3520/previews/yukata.png) | | 3080 | 0.783 | [Download](3080/oberon_fgo.zip) | ![pattern_1-3080](3080/previews/pattern_1.png) | ![pattern_2-3080](3080/previews/pattern_2.png) | ![pattern_3-3080](3080/previews/pattern_3.png) | ![pattern_4-3080](3080/previews/pattern_4.png) | ![pattern_5-3080](3080/previews/pattern_5.png) | ![pattern_6-3080](3080/previews/pattern_6.png) | ![bikini-3080](3080/previews/bikini.png) | [<NSFW, click to see>](3080/previews/bondage.png) | ![free-3080](3080/previews/free.png) | ![maid-3080](3080/previews/maid.png) | ![miko-3080](3080/previews/miko.png) | [<NSFW, click to see>](3080/previews/nude.png) | [<NSFW, click to see>](3080/previews/nude2.png) | ![suit-3080](3080/previews/suit.png) | ![yukata-3080](3080/previews/yukata.png) | | 2640 | 0.825 | [Download](2640/oberon_fgo.zip) | ![pattern_1-2640](2640/previews/pattern_1.png) | ![pattern_2-2640](2640/previews/pattern_2.png) | ![pattern_3-2640](2640/previews/pattern_3.png) | ![pattern_4-2640](2640/previews/pattern_4.png) | ![pattern_5-2640](2640/previews/pattern_5.png) | ![pattern_6-2640](2640/previews/pattern_6.png) | ![bikini-2640](2640/previews/bikini.png) | [<NSFW, click to see>](2640/previews/bondage.png) | ![free-2640](2640/previews/free.png) | ![maid-2640](2640/previews/maid.png) | ![miko-2640](2640/previews/miko.png) | [<NSFW, click to see>](2640/previews/nude.png) | [<NSFW, click to see>](2640/previews/nude2.png) | ![suit-2640](2640/previews/suit.png) | ![yukata-2640](2640/previews/yukata.png) | | 2200 | 0.760 | [Download](2200/oberon_fgo.zip) | ![pattern_1-2200](2200/previews/pattern_1.png) | ![pattern_2-2200](2200/previews/pattern_2.png) | ![pattern_3-2200](2200/previews/pattern_3.png) | ![pattern_4-2200](2200/previews/pattern_4.png) | ![pattern_5-2200](2200/previews/pattern_5.png) | ![pattern_6-2200](2200/previews/pattern_6.png) | ![bikini-2200](2200/previews/bikini.png) | [<NSFW, click to see>](2200/previews/bondage.png) | ![free-2200](2200/previews/free.png) | ![maid-2200](2200/previews/maid.png) | ![miko-2200](2200/previews/miko.png) | [<NSFW, click to see>](2200/previews/nude.png) | [<NSFW, click to see>](2200/previews/nude2.png) | ![suit-2200](2200/previews/suit.png) | ![yukata-2200](2200/previews/yukata.png) | | 1760 | 0.719 | [Download](1760/oberon_fgo.zip) | ![pattern_1-1760](1760/previews/pattern_1.png) | ![pattern_2-1760](1760/previews/pattern_2.png) | ![pattern_3-1760](1760/previews/pattern_3.png) | ![pattern_4-1760](1760/previews/pattern_4.png) | ![pattern_5-1760](1760/previews/pattern_5.png) | ![pattern_6-1760](1760/previews/pattern_6.png) | ![bikini-1760](1760/previews/bikini.png) | [<NSFW, click to see>](1760/previews/bondage.png) | ![free-1760](1760/previews/free.png) | ![maid-1760](1760/previews/maid.png) | ![miko-1760](1760/previews/miko.png) | [<NSFW, click to see>](1760/previews/nude.png) | [<NSFW, click to see>](1760/previews/nude2.png) | ![suit-1760](1760/previews/suit.png) | ![yukata-1760](1760/previews/yukata.png) | | 1320 | 0.772 | [Download](1320/oberon_fgo.zip) | ![pattern_1-1320](1320/previews/pattern_1.png) | ![pattern_2-1320](1320/previews/pattern_2.png) | ![pattern_3-1320](1320/previews/pattern_3.png) | ![pattern_4-1320](1320/previews/pattern_4.png) | ![pattern_5-1320](1320/previews/pattern_5.png) | ![pattern_6-1320](1320/previews/pattern_6.png) | ![bikini-1320](1320/previews/bikini.png) | [<NSFW, click to see>](1320/previews/bondage.png) | ![free-1320](1320/previews/free.png) | ![maid-1320](1320/previews/maid.png) | ![miko-1320](1320/previews/miko.png) | [<NSFW, click to see>](1320/previews/nude.png) | [<NSFW, click to see>](1320/previews/nude2.png) | ![suit-1320](1320/previews/suit.png) | ![yukata-1320](1320/previews/yukata.png) | | 880 | 0.686 | [Download](880/oberon_fgo.zip) | ![pattern_1-880](880/previews/pattern_1.png) | ![pattern_2-880](880/previews/pattern_2.png) | ![pattern_3-880](880/previews/pattern_3.png) | ![pattern_4-880](880/previews/pattern_4.png) | ![pattern_5-880](880/previews/pattern_5.png) | ![pattern_6-880](880/previews/pattern_6.png) | ![bikini-880](880/previews/bikini.png) | [<NSFW, click to see>](880/previews/bondage.png) | ![free-880](880/previews/free.png) | ![maid-880](880/previews/maid.png) | ![miko-880](880/previews/miko.png) | [<NSFW, click to see>](880/previews/nude.png) | [<NSFW, click to see>](880/previews/nude2.png) | ![suit-880](880/previews/suit.png) | ![yukata-880](880/previews/yukata.png) | | 440 | 0.669 | [Download](440/oberon_fgo.zip) | ![pattern_1-440](440/previews/pattern_1.png) | ![pattern_2-440](440/previews/pattern_2.png) | ![pattern_3-440](440/previews/pattern_3.png) | ![pattern_4-440](440/previews/pattern_4.png) | ![pattern_5-440](440/previews/pattern_5.png) | ![pattern_6-440](440/previews/pattern_6.png) | ![bikini-440](440/previews/bikini.png) | [<NSFW, click to see>](440/previews/bondage.png) | ![free-440](440/previews/free.png) | ![maid-440](440/previews/maid.png) | ![miko-440](440/previews/miko.png) | [<NSFW, click to see>](440/previews/nude.png) | [<NSFW, click to see>](440/previews/nude2.png) | ![suit-440](440/previews/suit.png) | ![yukata-440](440/previews/yukata.png) |
Saugatkafley/Llama-7B-Chat-GPTQ-lora-nepali-sentence
Saugatkafley
2023-09-10T18:48:30Z
0
0
peft
[ "peft", "region:us" ]
null
2023-09-10T18:48:07Z
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - quant_method: gptq - bits: 4 - tokenizer: None - dataset: None - group_size: 128 - damp_percent: 0.01 - desc_act: False - sym: True - true_sequential: True - use_cuda_fp16: False - model_seqlen: None - block_name_to_quantize: None - module_name_preceding_first_block: None - batch_size: 1 - pad_token_id: None - disable_exllama: False ### Framework versions - PEFT 0.5.0
HasanPA/distilbert-base-uncased-finetuned-cola
HasanPA
2023-09-10T18:41:05Z
5
0
transformers
[ "transformers", "pytorch", "distilbert", "text-classification", "generated_from_trainer", "dataset:glue", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2023-08-24T07:17:34Z
--- license: apache-2.0 base_model: distilbert-base-uncased tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: distilbert-base-uncased-finetuned-cola results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue config: cola split: validation args: cola metrics: - name: Matthews Correlation type: matthews_correlation value: 0.5263989868108533 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cola This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.8446 - Matthews Correlation: 0.5264 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.5218 | 1.0 | 535 | 0.4631 | 0.4977 | | 0.347 | 2.0 | 1070 | 0.5200 | 0.5002 | | 0.2277 | 3.0 | 1605 | 0.6188 | 0.5193 | | 0.1783 | 4.0 | 2140 | 0.7643 | 0.5154 | | 0.1303 | 5.0 | 2675 | 0.8446 | 0.5264 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
stefan-it/electra-base-gc4-64k-100000-cased-generator
stefan-it
2023-09-10T18:39:49Z
127
0
transformers
[ "transformers", "pytorch", "tf", "safetensors", "electra", "fill-mask", "de", "dataset:german-nlp-group/german_common_crawl", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2022-03-02T23:29:05Z
--- language: de license: mit datasets: - german-nlp-group/german_common_crawl widget: - text: "Heute ist ein [MASK] Tag" --- # GC4LM: A Colossal (Biased) language model for German This repository presents a colossal (and biased) language model for German trained on the recently released ["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4), with a total dataset size of ~844GB. --- **Disclaimer**: the presented and trained language models in this repository are for **research only** purposes. The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race, ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended to read: [On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf) from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell. The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially for identifying biases and how to prevent them, as most research is currently done only for English. --- Please use the new GitHub Discussions feature in order to discuss or present further research questions. Feel free to use `#gc4lm` on Twitter 🐦.
actionpace/spicyboros-13b-2.2
actionpace
2023-09-10T18:37:39Z
1
0
null
[ "gguf", "en", "license:other", "endpoints_compatible", "region:us" ]
null
2023-09-10T18:32:52Z
--- license: other language: - en --- **Some of my own quants:** * spicyboros-13b-2.2_Q5_1.gguf **Source:** [jondurbin](https://huggingface.co/jondurbin) **Source Model:** [spicyboros-13b-2.2](https://huggingface.co/jondurbin/spicyboros-13b-2.2) **Source models for jondurbin/spicyboros-13b-2.2 (Finetune, jondurbin/airoboros-2.2)** - [meta-llama/Llama-2-13b-hf](https://huggingface.co/meta-llama/Llama-2-13b-hf) ([Ref](https://huggingface.co/actionpace/Llama-2-13b-hf))
DriveMyScream/News_Similarity_Checking
DriveMyScream
2023-09-10T18:33:49Z
0
0
keras
[ "keras", "tf-keras", "region:us" ]
null
2023-09-10T18:31:20Z
--- library_name: keras --- ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: | Hyperparameters | Value | | :-- | :-- | | name | Adam | | weight_decay | None | | clipnorm | None | | global_clipnorm | None | | clipvalue | None | | use_ema | False | | ema_momentum | 0.99 | | ema_overwrite_frequency | None | | jit_compile | True | | is_legacy_optimizer | False | | learning_rate | 0.0010000000474974513 | | beta_1 | 0.9 | | beta_2 | 0.999 | | epsilon | 1e-07 | | amsgrad | False | | training_precision | float32 | ## Model Plot <details> <summary>View Model Plot</summary> ![Model Image](./model.png) </details>
victornica/molgpt_selfies_100mzinc_384width
victornica
2023-09-10T18:22:28Z
139
0
transformers
[ "transformers", "pytorch", "gpt2", "text-generation", "generated_from_trainer", "base_model:openai-community/gpt2", "base_model:finetune:openai-community/gpt2", "license:mit", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2023-09-09T23:21:58Z
--- license: mit base_model: gpt2 tags: - generated_from_trainer model-index: - name: molgpt_selfies_100mzinc_384width results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # molgpt_selfies_100mzinc_384width This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5005 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 192 - eval_batch_size: 192 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 5000 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:------:|:---------------:| | 0.578 | 0.1 | 50000 | 0.5535 | | 0.5564 | 0.2 | 100000 | 0.5450 | | 0.5487 | 0.3 | 150000 | 0.5389 | | 0.5429 | 0.4 | 200000 | 0.5336 | | 0.5369 | 0.5 | 250000 | 0.5275 | | 0.5306 | 0.6 | 300000 | 0.5213 | | 0.5236 | 0.7 | 350000 | 0.5146 | | 0.5163 | 0.8 | 400000 | 0.5079 | | 0.5095 | 0.9 | 450000 | 0.5022 | | 0.5055 | 1.0 | 500000 | 0.5005 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1 - Datasets 2.14.5 - Tokenizers 0.13.3
osieosie/llama-samsum-4bit-7b-bnb-seed87
osieosie
2023-09-10T18:18:27Z
2
0
peft
[ "peft", "region:us" ]
null
2023-09-10T14:00:11Z
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: True - bnb_4bit_compute_dtype: float16 ### Framework versions - PEFT 0.5.0.dev0
Buseak/spellcorrector_1009_v4
Buseak
2023-09-10T18:17:13Z
3
0
transformers
[ "transformers", "pytorch", "tensorboard", "canine", "token-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2023-09-10T17:15:30Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: spellcorrector_1009_v4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # spellcorrector_1009_v4 This model is a fine-tuned version of [google/canine-s](https://huggingface.co/google/canine-s) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3307 - Precision: 0.9681 - Recall: 0.9681 - F1: 0.9681 - Accuracy: 0.9573 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.2767 | 1.0 | 1951 | 0.2331 | 0.96 | 0.9562 | 0.9581 | 0.9383 | | 0.2181 | 2.0 | 3902 | 0.2028 | 0.9524 | 0.9562 | 0.9543 | 0.9450 | | 0.1776 | 3.0 | 5853 | 0.2019 | 0.96 | 0.9562 | 0.9581 | 0.9498 | | 0.1519 | 4.0 | 7804 | 0.2038 | 0.9526 | 0.9602 | 0.9563 | 0.9498 | | 0.1277 | 5.0 | 9755 | 0.2091 | 0.9567 | 0.9681 | 0.9624 | 0.9521 | | 0.1133 | 6.0 | 11706 | 0.2187 | 0.9449 | 0.9562 | 0.9505 | 0.9540 | | 0.1041 | 7.0 | 13657 | 0.2378 | 0.9762 | 0.9801 | 0.9781 | 0.9545 | | 0.0906 | 8.0 | 15608 | 0.2371 | 0.9603 | 0.9641 | 0.9622 | 0.9558 | | 0.0806 | 9.0 | 17559 | 0.2509 | 0.976 | 0.9721 | 0.9741 | 0.9532 | | 0.0689 | 10.0 | 19510 | 0.2624 | 0.9681 | 0.9681 | 0.9681 | 0.9563 | | 0.0623 | 11.0 | 21461 | 0.2623 | 0.976 | 0.9721 | 0.9741 | 0.9559 | | 0.06 | 12.0 | 23412 | 0.2783 | 0.9643 | 0.9681 | 0.9662 | 0.9564 | | 0.0537 | 13.0 | 25363 | 0.2938 | 0.976 | 0.9721 | 0.9741 | 0.9569 | | 0.0507 | 14.0 | 27314 | 0.2976 | 0.9603 | 0.9641 | 0.9622 | 0.9565 | | 0.0491 | 15.0 | 29265 | 0.3075 | 0.9681 | 0.9681 | 0.9681 | 0.9576 | | 0.0426 | 16.0 | 31216 | 0.3182 | 0.9681 | 0.9681 | 0.9681 | 0.9571 | | 0.0426 | 17.0 | 33167 | 0.3154 | 0.9681 | 0.9681 | 0.9681 | 0.9572 | | 0.0387 | 18.0 | 35118 | 0.3266 | 0.9681 | 0.9681 | 0.9681 | 0.9573 | | 0.0336 | 19.0 | 37069 | 0.3317 | 0.9681 | 0.9681 | 0.9681 | 0.9574 | | 0.0341 | 20.0 | 39020 | 0.3307 | 0.9681 | 0.9681 | 0.9681 | 0.9573 | ### Framework versions - Transformers 4.28.0 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
cosmic-cactus/dqn-SpaceInvadersNoFrameskip-v4
cosmic-cactus
2023-09-10T18:11:02Z
0
0
stable-baselines3
[ "stable-baselines3", "SpaceInvadersNoFrameskip-v4", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2023-09-10T18:10:30Z
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 391.50 +/- 122.11 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga cosmic-cactus -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga cosmic-cactus -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga cosmic-cactus ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ``` # Environment Arguments ```python {'render_mode': 'rgb_array'} ```
johaanm/test-planner-alpha-V8.0
johaanm
2023-09-10T18:10:48Z
0
0
peft
[ "peft", "region:us" ]
null
2023-09-10T18:10:44Z
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float16 The following `bitsandbytes` quantization config was used during training: - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float16 ### Framework versions - PEFT 0.4.0 - PEFT 0.4.0
darkandpure/LLama2-qlora-ecom
darkandpure
2023-09-10T18:08:25Z
6
1
peft
[ "peft", "pytorch", "region:us" ]
null
2023-08-19T20:26:08Z
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float16 ### Framework versions - PEFT 0.6.0.dev0
acdg1214/rl_course_vizdoom_health_gathering_supreme
acdg1214
2023-09-10T18:05:24Z
0
0
sample-factory
[ "sample-factory", "tensorboard", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2023-09-10T17:41:36Z
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 8.01 +/- 2.66 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r acdg1214/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m .usr.local.lib.python3.10.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m .usr.local.lib.python3.10.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
mindchain/llama2-qlora-finetunined-french_aktuell
mindchain
2023-09-10T18:05:02Z
0
0
peft
[ "peft", "region:us" ]
null
2023-09-10T18:04:55Z
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float16 ### Framework versions - PEFT 0.6.0.dev0
antonwonton/Llama-2-7b-chat-hf-int4-ft-0.75
antonwonton
2023-09-10T18:01:12Z
0
0
null
[ "generated_from_trainer", "base_model:meta-llama/Llama-2-7b-chat-hf", "base_model:finetune:meta-llama/Llama-2-7b-chat-hf", "region:us" ]
null
2023-09-10T18:01:00Z
--- base_model: meta-llama/Llama-2-7b-chat-hf tags: - generated_from_trainer model-index: - name: Llama-2-7b-chat-hf-int4-ft-0.75 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Llama-2-7b-chat-hf-int4-ft-0.75 This model is a fine-tuned version of [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 6 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 12 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - lr_scheduler_warmup_ratio: 0.03 - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.13.0 - Tokenizers 0.13.3
Specky/C-Jon
Specky
2023-09-10T17:56:10Z
0
0
null
[ "license:cc-by-nc-nd-4.0", "region:us" ]
null
2023-09-09T19:10:32Z
--- license: cc-by-nc-nd-4.0 ---
samkitjain/my-pet-dog-xzp
samkitjain
2023-09-10T17:29:15Z
0
0
null
[ "safetensors", "NxtWave-GenAI-Webinar", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "region:us" ]
text-to-image
2023-09-10T17:26:38Z
--- license: creativeml-openrail-m tags: - NxtWave-GenAI-Webinar - text-to-image - stable-diffusion --- ### My-Pet-dog-xzp Dreambooth model trained by samkitjain following the "Build your own Gen AI model" session by NxtWave. Project Submission Code: IIITB-278 Sample pictures of this concept: ![0](https://huggingface.co/samkitjain/my-pet-dog-xzp/resolve/main/sample_images/00000-3133028948.png)
chmanoj/xls-r-1B-te
chmanoj
2023-09-10T17:17:07Z
21
0
transformers
[ "transformers", "pytorch", "tensorboard", "safetensors", "wav2vec2", "automatic-speech-recognition", "openslr_SLR66", "generated_from_trainer", "robust-speech-event", "hf-asr-leaderboard", "te", "dataset:openslr", "dataset:SLR66", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-03-02T23:29:05Z
--- language: - te license: apache-2.0 tags: - automatic-speech-recognition - openslr_SLR66 - generated_from_trainer - robust-speech-event - hf-asr-leaderboard datasets: - openslr - SLR66 metrics: - wer model-index: - name: xls-r-1B-te results: - task: type: automatic-speech-recognition name: Speech Recognition dataset: type: openslr name: Open SLR args: SLR66 metrics: - type: wer value: 20.624 name: Test WER - type: cer value: 3.979 name: Test CER - type: wer value: 26.14777618364419 name: Test WER (without LM) - type: cer value: 4.932543184970369 name: Test CER (without LM) --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # This model is a fine-tuned version of [facebook/wav2vec2-xls-r-1b](https://huggingface.co/facebook/wav2vec2-xls-r-1b) on the OPENSLR_SLR66 - NA dataset. It achieves the following results on the evaluation set: - Loss: 0.3119 - Wer: 0.2613 ### Evaluation metrics | Metric | Split | Decode with LM | Value | |:------:|:------:|:--------------:|:---------:| | WER | Train | No | 5.36 | | CER | Train | No | 1.11 | | WER | Test | No | 26.14 | | CER | Test | No | 4.93 | | WER | Train | Yes | 5.04 | | CER | Train | Yes | 1.07 | | WER | Test | Yes | 20.69 | | CER | Test | Yes | 3.986 | ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 2000 - num_epochs: 150.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:------:|:-----:|:---------------:|:------:| | 2.9038 | 4.8 | 500 | 3.0125 | 1.0 | | 1.3777 | 9.61 | 1000 | 0.8681 | 0.8753 | | 1.1436 | 14.42 | 1500 | 0.6256 | 0.7961 | | 1.0997 | 19.23 | 2000 | 0.5244 | 0.6875 | | 1.0363 | 24.04 | 2500 | 0.4585 | 0.6276 | | 0.7996 | 28.84 | 3000 | 0.4072 | 0.5295 | | 0.825 | 33.65 | 3500 | 0.3590 | 0.5222 | | 0.8018 | 38.46 | 4000 | 0.3678 | 0.4671 | | 0.7545 | 43.27 | 4500 | 0.3474 | 0.3962 | | 0.7375 | 48.08 | 5000 | 0.3224 | 0.3869 | | 0.6198 | 52.88 | 5500 | 0.3233 | 0.3630 | | 0.6608 | 57.69 | 6000 | 0.3029 | 0.3308 | | 0.645 | 62.5 | 6500 | 0.3195 | 0.3722 | | 0.5249 | 67.31 | 7000 | 0.3004 | 0.3202 | | 0.4875 | 72.11 | 7500 | 0.2826 | 0.2992 | | 0.5171 | 76.92 | 8000 | 0.2962 | 0.2976 | | 0.4974 | 81.73 | 8500 | 0.2990 | 0.2933 | | 0.4387 | 86.54 | 9000 | 0.2834 | 0.2755 | | 0.4511 | 91.34 | 9500 | 0.2886 | 0.2787 | | 0.4112 | 96.15 | 10000 | 0.3093 | 0.2976 | | 0.4064 | 100.96 | 10500 | 0.3123 | 0.2863 | | 0.4047 | 105.77 | 11000 | 0.2968 | 0.2719 | | 0.3519 | 110.57 | 11500 | 0.3106 | 0.2832 | | 0.3719 | 115.38 | 12000 | 0.3030 | 0.2737 | | 0.3669 | 120.19 | 12500 | 0.2964 | 0.2714 | | 0.3386 | 125.0 | 13000 | 0.3101 | 0.2714 | | 0.3137 | 129.8 | 13500 | 0.3063 | 0.2710 | | 0.3008 | 134.61 | 14000 | 0.3082 | 0.2617 | | 0.301 | 139.42 | 14500 | 0.3121 | 0.2628 | | 0.3291 | 144.23 | 15000 | 0.3105 | 0.2612 | | 0.3133 | 149.04 | 15500 | 0.3114 | 0.2624 | ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.1+cu102 - Datasets 1.17.1.dev0 - Tokenizers 0.11.0
CyberHarem/mem_oshinoko
CyberHarem
2023-09-10T17:09:32Z
0
0
null
[ "art", "text-to-image", "dataset:CyberHarem/mem_oshinoko", "license:mit", "region:us" ]
text-to-image
2023-09-10T16:49:28Z
--- license: mit datasets: - CyberHarem/mem_oshinoko pipeline_tag: text-to-image tags: - art --- # Lora of mem_oshinoko This model is trained with [HCP-Diffusion](https://github.com/7eu7d7/HCP-Diffusion). And the auto-training framework is maintained by [DeepGHS Team](https://huggingface.co/deepghs). The base model used during training is [NAI](https://huggingface.co/deepghs/animefull-latest), and the base model used for generating preview images is [Meina/MeinaMix_V11](https://huggingface.co/Meina/MeinaMix_V11). After downloading the pt and safetensors files for the specified step, you need to use them simultaneously. The pt file will be used as an embedding, while the safetensors file will be loaded for Lora. For example, if you want to use the model from step 2940, you need to download `2940/mem_oshinoko.pt` as the embedding and `2940/mem_oshinoko.safetensors` for loading Lora. By using both files together, you can generate images for the desired characters. **The best step we recommend is 2940**, with the score of 0.978. The trigger words are: 1. `mem_oshinoko` 2. `bangs, short_hair, blonde_hair, blunt_bangs, horns, :3, smile, multicolored_hair, blue_eyes` For the following groups, it is not recommended to use this model and we express regret: 1. Individuals who cannot tolerate any deviations from the original character design, even in the slightest detail. 2. Individuals who are facing the application scenarios with high demands for accuracy in recreating character outfits. 3. Individuals who cannot accept the potential randomness in AI-generated images based on the Stable Diffusion algorithm. 4. Individuals who are not comfortable with the fully automated process of training character models using LoRA, or those who believe that training character models must be done purely through manual operations to avoid disrespecting the characters. 5. Individuals who finds the generated image content offensive to their values. These are available steps: | Steps | Score | Download | pattern_1 | pattern_2 | pattern_3 | pattern_4 | pattern_5 | pattern_6 | pattern_7 | pattern_8 | pattern_9 | pattern_10 | pattern_11 | pattern_12 | pattern_13 | pattern_14 | pattern_15 | pattern_16 | bikini | bondage | free | maid | miko | nude | nude2 | suit | yukata | |:---------|:----------|:--------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-------------------------------------------------|:-------------------------------------------------|:-------------------------------------------------|:-------------------------------------------------|:-------------------------------------------------|:-------------------------------------------------|:-------------------------------------------------|:-----------------------------------------|:--------------------------------------------------|:-------------------------------------|:-------------------------------------|:-------------------------------------|:-----------------------------------------------|:------------------------------------------------|:-------------------------------------|:-----------------------------------------| | 6300 | 0.969 | [Download](6300/mem_oshinoko.zip) | ![pattern_1-6300](6300/previews/pattern_1.png) | ![pattern_2-6300](6300/previews/pattern_2.png) | ![pattern_3-6300](6300/previews/pattern_3.png) | ![pattern_4-6300](6300/previews/pattern_4.png) | ![pattern_5-6300](6300/previews/pattern_5.png) | ![pattern_6-6300](6300/previews/pattern_6.png) | ![pattern_7-6300](6300/previews/pattern_7.png) | ![pattern_8-6300](6300/previews/pattern_8.png) | ![pattern_9-6300](6300/previews/pattern_9.png) | ![pattern_10-6300](6300/previews/pattern_10.png) | ![pattern_11-6300](6300/previews/pattern_11.png) | ![pattern_12-6300](6300/previews/pattern_12.png) | ![pattern_13-6300](6300/previews/pattern_13.png) | ![pattern_14-6300](6300/previews/pattern_14.png) | ![pattern_15-6300](6300/previews/pattern_15.png) | ![pattern_16-6300](6300/previews/pattern_16.png) | ![bikini-6300](6300/previews/bikini.png) | [<NSFW, click to see>](6300/previews/bondage.png) | ![free-6300](6300/previews/free.png) | ![maid-6300](6300/previews/maid.png) | ![miko-6300](6300/previews/miko.png) | [<NSFW, click to see>](6300/previews/nude.png) | [<NSFW, click to see>](6300/previews/nude2.png) | ![suit-6300](6300/previews/suit.png) | ![yukata-6300](6300/previews/yukata.png) | | 5880 | 0.974 | [Download](5880/mem_oshinoko.zip) | ![pattern_1-5880](5880/previews/pattern_1.png) | ![pattern_2-5880](5880/previews/pattern_2.png) | ![pattern_3-5880](5880/previews/pattern_3.png) | ![pattern_4-5880](5880/previews/pattern_4.png) | ![pattern_5-5880](5880/previews/pattern_5.png) | ![pattern_6-5880](5880/previews/pattern_6.png) | ![pattern_7-5880](5880/previews/pattern_7.png) | ![pattern_8-5880](5880/previews/pattern_8.png) | ![pattern_9-5880](5880/previews/pattern_9.png) | ![pattern_10-5880](5880/previews/pattern_10.png) | ![pattern_11-5880](5880/previews/pattern_11.png) | ![pattern_12-5880](5880/previews/pattern_12.png) | ![pattern_13-5880](5880/previews/pattern_13.png) | ![pattern_14-5880](5880/previews/pattern_14.png) | ![pattern_15-5880](5880/previews/pattern_15.png) | ![pattern_16-5880](5880/previews/pattern_16.png) | ![bikini-5880](5880/previews/bikini.png) | [<NSFW, click to see>](5880/previews/bondage.png) | ![free-5880](5880/previews/free.png) | ![maid-5880](5880/previews/maid.png) | ![miko-5880](5880/previews/miko.png) | [<NSFW, click to see>](5880/previews/nude.png) | [<NSFW, click to see>](5880/previews/nude2.png) | ![suit-5880](5880/previews/suit.png) | ![yukata-5880](5880/previews/yukata.png) | | 5460 | 0.974 | [Download](5460/mem_oshinoko.zip) | ![pattern_1-5460](5460/previews/pattern_1.png) | ![pattern_2-5460](5460/previews/pattern_2.png) | ![pattern_3-5460](5460/previews/pattern_3.png) | ![pattern_4-5460](5460/previews/pattern_4.png) | ![pattern_5-5460](5460/previews/pattern_5.png) | ![pattern_6-5460](5460/previews/pattern_6.png) | ![pattern_7-5460](5460/previews/pattern_7.png) | ![pattern_8-5460](5460/previews/pattern_8.png) | ![pattern_9-5460](5460/previews/pattern_9.png) | ![pattern_10-5460](5460/previews/pattern_10.png) | ![pattern_11-5460](5460/previews/pattern_11.png) | ![pattern_12-5460](5460/previews/pattern_12.png) | ![pattern_13-5460](5460/previews/pattern_13.png) | ![pattern_14-5460](5460/previews/pattern_14.png) | ![pattern_15-5460](5460/previews/pattern_15.png) | ![pattern_16-5460](5460/previews/pattern_16.png) | ![bikini-5460](5460/previews/bikini.png) | [<NSFW, click to see>](5460/previews/bondage.png) | ![free-5460](5460/previews/free.png) | ![maid-5460](5460/previews/maid.png) | ![miko-5460](5460/previews/miko.png) | [<NSFW, click to see>](5460/previews/nude.png) | [<NSFW, click to see>](5460/previews/nude2.png) | ![suit-5460](5460/previews/suit.png) | ![yukata-5460](5460/previews/yukata.png) | | 5040 | 0.970 | [Download](5040/mem_oshinoko.zip) | ![pattern_1-5040](5040/previews/pattern_1.png) | ![pattern_2-5040](5040/previews/pattern_2.png) | ![pattern_3-5040](5040/previews/pattern_3.png) | ![pattern_4-5040](5040/previews/pattern_4.png) | ![pattern_5-5040](5040/previews/pattern_5.png) | ![pattern_6-5040](5040/previews/pattern_6.png) | ![pattern_7-5040](5040/previews/pattern_7.png) | ![pattern_8-5040](5040/previews/pattern_8.png) | ![pattern_9-5040](5040/previews/pattern_9.png) | ![pattern_10-5040](5040/previews/pattern_10.png) | ![pattern_11-5040](5040/previews/pattern_11.png) | ![pattern_12-5040](5040/previews/pattern_12.png) | ![pattern_13-5040](5040/previews/pattern_13.png) | ![pattern_14-5040](5040/previews/pattern_14.png) | ![pattern_15-5040](5040/previews/pattern_15.png) | ![pattern_16-5040](5040/previews/pattern_16.png) | ![bikini-5040](5040/previews/bikini.png) | [<NSFW, click to see>](5040/previews/bondage.png) | ![free-5040](5040/previews/free.png) | ![maid-5040](5040/previews/maid.png) | ![miko-5040](5040/previews/miko.png) | [<NSFW, click to see>](5040/previews/nude.png) | [<NSFW, click to see>](5040/previews/nude2.png) | ![suit-5040](5040/previews/suit.png) | ![yukata-5040](5040/previews/yukata.png) | | 4620 | 0.965 | [Download](4620/mem_oshinoko.zip) | ![pattern_1-4620](4620/previews/pattern_1.png) | ![pattern_2-4620](4620/previews/pattern_2.png) | ![pattern_3-4620](4620/previews/pattern_3.png) | ![pattern_4-4620](4620/previews/pattern_4.png) | ![pattern_5-4620](4620/previews/pattern_5.png) | ![pattern_6-4620](4620/previews/pattern_6.png) | ![pattern_7-4620](4620/previews/pattern_7.png) | ![pattern_8-4620](4620/previews/pattern_8.png) | ![pattern_9-4620](4620/previews/pattern_9.png) | ![pattern_10-4620](4620/previews/pattern_10.png) | ![pattern_11-4620](4620/previews/pattern_11.png) | ![pattern_12-4620](4620/previews/pattern_12.png) | ![pattern_13-4620](4620/previews/pattern_13.png) | ![pattern_14-4620](4620/previews/pattern_14.png) | ![pattern_15-4620](4620/previews/pattern_15.png) | ![pattern_16-4620](4620/previews/pattern_16.png) | ![bikini-4620](4620/previews/bikini.png) | [<NSFW, click to see>](4620/previews/bondage.png) | ![free-4620](4620/previews/free.png) | ![maid-4620](4620/previews/maid.png) | ![miko-4620](4620/previews/miko.png) | [<NSFW, click to see>](4620/previews/nude.png) | [<NSFW, click to see>](4620/previews/nude2.png) | ![suit-4620](4620/previews/suit.png) | ![yukata-4620](4620/previews/yukata.png) | | 4200 | 0.975 | [Download](4200/mem_oshinoko.zip) | ![pattern_1-4200](4200/previews/pattern_1.png) | ![pattern_2-4200](4200/previews/pattern_2.png) | ![pattern_3-4200](4200/previews/pattern_3.png) | ![pattern_4-4200](4200/previews/pattern_4.png) | ![pattern_5-4200](4200/previews/pattern_5.png) | ![pattern_6-4200](4200/previews/pattern_6.png) | ![pattern_7-4200](4200/previews/pattern_7.png) | ![pattern_8-4200](4200/previews/pattern_8.png) | ![pattern_9-4200](4200/previews/pattern_9.png) | ![pattern_10-4200](4200/previews/pattern_10.png) | ![pattern_11-4200](4200/previews/pattern_11.png) | ![pattern_12-4200](4200/previews/pattern_12.png) | ![pattern_13-4200](4200/previews/pattern_13.png) | ![pattern_14-4200](4200/previews/pattern_14.png) | ![pattern_15-4200](4200/previews/pattern_15.png) | ![pattern_16-4200](4200/previews/pattern_16.png) | ![bikini-4200](4200/previews/bikini.png) | [<NSFW, click to see>](4200/previews/bondage.png) | ![free-4200](4200/previews/free.png) | ![maid-4200](4200/previews/maid.png) | ![miko-4200](4200/previews/miko.png) | [<NSFW, click to see>](4200/previews/nude.png) | [<NSFW, click to see>](4200/previews/nude2.png) | ![suit-4200](4200/previews/suit.png) | ![yukata-4200](4200/previews/yukata.png) | | 3780 | 0.934 | [Download](3780/mem_oshinoko.zip) | ![pattern_1-3780](3780/previews/pattern_1.png) | ![pattern_2-3780](3780/previews/pattern_2.png) | ![pattern_3-3780](3780/previews/pattern_3.png) | ![pattern_4-3780](3780/previews/pattern_4.png) | ![pattern_5-3780](3780/previews/pattern_5.png) | ![pattern_6-3780](3780/previews/pattern_6.png) | ![pattern_7-3780](3780/previews/pattern_7.png) | ![pattern_8-3780](3780/previews/pattern_8.png) | ![pattern_9-3780](3780/previews/pattern_9.png) | ![pattern_10-3780](3780/previews/pattern_10.png) | ![pattern_11-3780](3780/previews/pattern_11.png) | ![pattern_12-3780](3780/previews/pattern_12.png) | ![pattern_13-3780](3780/previews/pattern_13.png) | ![pattern_14-3780](3780/previews/pattern_14.png) | ![pattern_15-3780](3780/previews/pattern_15.png) | ![pattern_16-3780](3780/previews/pattern_16.png) | ![bikini-3780](3780/previews/bikini.png) | [<NSFW, click to see>](3780/previews/bondage.png) | ![free-3780](3780/previews/free.png) | ![maid-3780](3780/previews/maid.png) | ![miko-3780](3780/previews/miko.png) | [<NSFW, click to see>](3780/previews/nude.png) | [<NSFW, click to see>](3780/previews/nude2.png) | ![suit-3780](3780/previews/suit.png) | ![yukata-3780](3780/previews/yukata.png) | | 3360 | 0.969 | [Download](3360/mem_oshinoko.zip) | ![pattern_1-3360](3360/previews/pattern_1.png) | ![pattern_2-3360](3360/previews/pattern_2.png) | ![pattern_3-3360](3360/previews/pattern_3.png) | ![pattern_4-3360](3360/previews/pattern_4.png) | ![pattern_5-3360](3360/previews/pattern_5.png) | ![pattern_6-3360](3360/previews/pattern_6.png) | ![pattern_7-3360](3360/previews/pattern_7.png) | ![pattern_8-3360](3360/previews/pattern_8.png) | ![pattern_9-3360](3360/previews/pattern_9.png) | ![pattern_10-3360](3360/previews/pattern_10.png) | ![pattern_11-3360](3360/previews/pattern_11.png) | ![pattern_12-3360](3360/previews/pattern_12.png) | ![pattern_13-3360](3360/previews/pattern_13.png) | ![pattern_14-3360](3360/previews/pattern_14.png) | ![pattern_15-3360](3360/previews/pattern_15.png) | ![pattern_16-3360](3360/previews/pattern_16.png) | ![bikini-3360](3360/previews/bikini.png) | [<NSFW, click to see>](3360/previews/bondage.png) | ![free-3360](3360/previews/free.png) | ![maid-3360](3360/previews/maid.png) | ![miko-3360](3360/previews/miko.png) | [<NSFW, click to see>](3360/previews/nude.png) | [<NSFW, click to see>](3360/previews/nude2.png) | ![suit-3360](3360/previews/suit.png) | ![yukata-3360](3360/previews/yukata.png) | | **2940** | **0.978** | [**Download**](2940/mem_oshinoko.zip) | ![pattern_1-2940](2940/previews/pattern_1.png) | ![pattern_2-2940](2940/previews/pattern_2.png) | ![pattern_3-2940](2940/previews/pattern_3.png) | ![pattern_4-2940](2940/previews/pattern_4.png) | ![pattern_5-2940](2940/previews/pattern_5.png) | ![pattern_6-2940](2940/previews/pattern_6.png) | ![pattern_7-2940](2940/previews/pattern_7.png) | ![pattern_8-2940](2940/previews/pattern_8.png) | ![pattern_9-2940](2940/previews/pattern_9.png) | ![pattern_10-2940](2940/previews/pattern_10.png) | ![pattern_11-2940](2940/previews/pattern_11.png) | ![pattern_12-2940](2940/previews/pattern_12.png) | ![pattern_13-2940](2940/previews/pattern_13.png) | ![pattern_14-2940](2940/previews/pattern_14.png) | ![pattern_15-2940](2940/previews/pattern_15.png) | ![pattern_16-2940](2940/previews/pattern_16.png) | ![bikini-2940](2940/previews/bikini.png) | [<NSFW, click to see>](2940/previews/bondage.png) | ![free-2940](2940/previews/free.png) | ![maid-2940](2940/previews/maid.png) | ![miko-2940](2940/previews/miko.png) | [<NSFW, click to see>](2940/previews/nude.png) | [<NSFW, click to see>](2940/previews/nude2.png) | ![suit-2940](2940/previews/suit.png) | ![yukata-2940](2940/previews/yukata.png) | | 2520 | 0.969 | [Download](2520/mem_oshinoko.zip) | ![pattern_1-2520](2520/previews/pattern_1.png) | ![pattern_2-2520](2520/previews/pattern_2.png) | ![pattern_3-2520](2520/previews/pattern_3.png) | ![pattern_4-2520](2520/previews/pattern_4.png) | ![pattern_5-2520](2520/previews/pattern_5.png) | ![pattern_6-2520](2520/previews/pattern_6.png) | ![pattern_7-2520](2520/previews/pattern_7.png) | ![pattern_8-2520](2520/previews/pattern_8.png) | ![pattern_9-2520](2520/previews/pattern_9.png) | ![pattern_10-2520](2520/previews/pattern_10.png) | ![pattern_11-2520](2520/previews/pattern_11.png) | ![pattern_12-2520](2520/previews/pattern_12.png) | ![pattern_13-2520](2520/previews/pattern_13.png) | ![pattern_14-2520](2520/previews/pattern_14.png) | ![pattern_15-2520](2520/previews/pattern_15.png) | ![pattern_16-2520](2520/previews/pattern_16.png) | ![bikini-2520](2520/previews/bikini.png) | [<NSFW, click to see>](2520/previews/bondage.png) | ![free-2520](2520/previews/free.png) | ![maid-2520](2520/previews/maid.png) | ![miko-2520](2520/previews/miko.png) | [<NSFW, click to see>](2520/previews/nude.png) | [<NSFW, click to see>](2520/previews/nude2.png) | ![suit-2520](2520/previews/suit.png) | ![yukata-2520](2520/previews/yukata.png) | | 2100 | 0.980 | [Download](2100/mem_oshinoko.zip) | ![pattern_1-2100](2100/previews/pattern_1.png) | ![pattern_2-2100](2100/previews/pattern_2.png) | ![pattern_3-2100](2100/previews/pattern_3.png) | ![pattern_4-2100](2100/previews/pattern_4.png) | ![pattern_5-2100](2100/previews/pattern_5.png) | ![pattern_6-2100](2100/previews/pattern_6.png) | ![pattern_7-2100](2100/previews/pattern_7.png) | ![pattern_8-2100](2100/previews/pattern_8.png) | ![pattern_9-2100](2100/previews/pattern_9.png) | ![pattern_10-2100](2100/previews/pattern_10.png) | ![pattern_11-2100](2100/previews/pattern_11.png) | ![pattern_12-2100](2100/previews/pattern_12.png) | ![pattern_13-2100](2100/previews/pattern_13.png) | ![pattern_14-2100](2100/previews/pattern_14.png) | ![pattern_15-2100](2100/previews/pattern_15.png) | ![pattern_16-2100](2100/previews/pattern_16.png) | ![bikini-2100](2100/previews/bikini.png) | [<NSFW, click to see>](2100/previews/bondage.png) | ![free-2100](2100/previews/free.png) | ![maid-2100](2100/previews/maid.png) | ![miko-2100](2100/previews/miko.png) | [<NSFW, click to see>](2100/previews/nude.png) | [<NSFW, click to see>](2100/previews/nude2.png) | ![suit-2100](2100/previews/suit.png) | ![yukata-2100](2100/previews/yukata.png) | | 1680 | 0.965 | [Download](1680/mem_oshinoko.zip) | ![pattern_1-1680](1680/previews/pattern_1.png) | ![pattern_2-1680](1680/previews/pattern_2.png) | ![pattern_3-1680](1680/previews/pattern_3.png) | ![pattern_4-1680](1680/previews/pattern_4.png) | ![pattern_5-1680](1680/previews/pattern_5.png) | ![pattern_6-1680](1680/previews/pattern_6.png) | ![pattern_7-1680](1680/previews/pattern_7.png) | ![pattern_8-1680](1680/previews/pattern_8.png) | ![pattern_9-1680](1680/previews/pattern_9.png) | ![pattern_10-1680](1680/previews/pattern_10.png) | ![pattern_11-1680](1680/previews/pattern_11.png) | ![pattern_12-1680](1680/previews/pattern_12.png) | ![pattern_13-1680](1680/previews/pattern_13.png) | ![pattern_14-1680](1680/previews/pattern_14.png) | ![pattern_15-1680](1680/previews/pattern_15.png) | ![pattern_16-1680](1680/previews/pattern_16.png) | ![bikini-1680](1680/previews/bikini.png) | [<NSFW, click to see>](1680/previews/bondage.png) | ![free-1680](1680/previews/free.png) | ![maid-1680](1680/previews/maid.png) | ![miko-1680](1680/previews/miko.png) | [<NSFW, click to see>](1680/previews/nude.png) | [<NSFW, click to see>](1680/previews/nude2.png) | ![suit-1680](1680/previews/suit.png) | ![yukata-1680](1680/previews/yukata.png) | | 1260 | 0.958 | [Download](1260/mem_oshinoko.zip) | ![pattern_1-1260](1260/previews/pattern_1.png) | ![pattern_2-1260](1260/previews/pattern_2.png) | ![pattern_3-1260](1260/previews/pattern_3.png) | ![pattern_4-1260](1260/previews/pattern_4.png) | ![pattern_5-1260](1260/previews/pattern_5.png) | ![pattern_6-1260](1260/previews/pattern_6.png) | ![pattern_7-1260](1260/previews/pattern_7.png) | ![pattern_8-1260](1260/previews/pattern_8.png) | ![pattern_9-1260](1260/previews/pattern_9.png) | ![pattern_10-1260](1260/previews/pattern_10.png) | ![pattern_11-1260](1260/previews/pattern_11.png) | ![pattern_12-1260](1260/previews/pattern_12.png) | ![pattern_13-1260](1260/previews/pattern_13.png) | ![pattern_14-1260](1260/previews/pattern_14.png) | ![pattern_15-1260](1260/previews/pattern_15.png) | ![pattern_16-1260](1260/previews/pattern_16.png) | ![bikini-1260](1260/previews/bikini.png) | [<NSFW, click to see>](1260/previews/bondage.png) | ![free-1260](1260/previews/free.png) | ![maid-1260](1260/previews/maid.png) | ![miko-1260](1260/previews/miko.png) | [<NSFW, click to see>](1260/previews/nude.png) | [<NSFW, click to see>](1260/previews/nude2.png) | ![suit-1260](1260/previews/suit.png) | ![yukata-1260](1260/previews/yukata.png) | | 840 | 0.935 | [Download](840/mem_oshinoko.zip) | ![pattern_1-840](840/previews/pattern_1.png) | ![pattern_2-840](840/previews/pattern_2.png) | ![pattern_3-840](840/previews/pattern_3.png) | ![pattern_4-840](840/previews/pattern_4.png) | ![pattern_5-840](840/previews/pattern_5.png) | ![pattern_6-840](840/previews/pattern_6.png) | ![pattern_7-840](840/previews/pattern_7.png) | ![pattern_8-840](840/previews/pattern_8.png) | ![pattern_9-840](840/previews/pattern_9.png) | ![pattern_10-840](840/previews/pattern_10.png) | ![pattern_11-840](840/previews/pattern_11.png) | ![pattern_12-840](840/previews/pattern_12.png) | ![pattern_13-840](840/previews/pattern_13.png) | ![pattern_14-840](840/previews/pattern_14.png) | ![pattern_15-840](840/previews/pattern_15.png) | ![pattern_16-840](840/previews/pattern_16.png) | ![bikini-840](840/previews/bikini.png) | [<NSFW, click to see>](840/previews/bondage.png) | ![free-840](840/previews/free.png) | ![maid-840](840/previews/maid.png) | ![miko-840](840/previews/miko.png) | [<NSFW, click to see>](840/previews/nude.png) | [<NSFW, click to see>](840/previews/nude2.png) | ![suit-840](840/previews/suit.png) | ![yukata-840](840/previews/yukata.png) | | 420 | 0.902 | [Download](420/mem_oshinoko.zip) | ![pattern_1-420](420/previews/pattern_1.png) | ![pattern_2-420](420/previews/pattern_2.png) | ![pattern_3-420](420/previews/pattern_3.png) | ![pattern_4-420](420/previews/pattern_4.png) | ![pattern_5-420](420/previews/pattern_5.png) | ![pattern_6-420](420/previews/pattern_6.png) | ![pattern_7-420](420/previews/pattern_7.png) | ![pattern_8-420](420/previews/pattern_8.png) | ![pattern_9-420](420/previews/pattern_9.png) | ![pattern_10-420](420/previews/pattern_10.png) | ![pattern_11-420](420/previews/pattern_11.png) | ![pattern_12-420](420/previews/pattern_12.png) | ![pattern_13-420](420/previews/pattern_13.png) | ![pattern_14-420](420/previews/pattern_14.png) | ![pattern_15-420](420/previews/pattern_15.png) | ![pattern_16-420](420/previews/pattern_16.png) | ![bikini-420](420/previews/bikini.png) | [<NSFW, click to see>](420/previews/bondage.png) | ![free-420](420/previews/free.png) | ![maid-420](420/previews/maid.png) | ![miko-420](420/previews/miko.png) | [<NSFW, click to see>](420/previews/nude.png) | [<NSFW, click to see>](420/previews/nude2.png) | ![suit-420](420/previews/suit.png) | ![yukata-420](420/previews/yukata.png) |
fedbor/13bllama2_lora16_modello
fedbor
2023-09-10T17:08:02Z
3
0
peft
[ "peft", "region:us" ]
null
2023-09-10T17:08:00Z
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: fp4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: bfloat16 ### Framework versions - PEFT 0.6.0.dev0
StefanoCaloni/Pyramid
StefanoCaloni
2023-09-10T17:07:01Z
5
0
ml-agents
[ "ml-agents", "tensorboard", "onnx", "Pyramids", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Pyramids", "region:us" ]
reinforcement-learning
2023-09-10T17:05:21Z
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/ We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: - A *short tutorial* where you teach Huggy the Dog 🐶 to fetch the stick and then play with him directly in your browser: https://huggingface.co/learn/deep-rl-course/unitbonus1/introduction - A *longer tutorial* to understand how works ML-Agents: https://huggingface.co/learn/deep-rl-course/unit5/introduction ### Resume the training ```bash mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser** 1. If the environment is part of ML-Agents official environments, go to https://huggingface.co/unity 2. Step 1: Find your model_id: StefanoCaloni/Pyramid 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
NewstaR/OpenStar-1b
NewstaR
2023-09-10T17:01:06Z
131
0
transformers
[ "transformers", "pytorch", "safetensors", "llama", "text-generation", "en", "dataset:NewstaR/AverageData", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2023-09-10T16:51:45Z
--- license: apache-2.0 datasets: - NewstaR/AverageData language: - en metrics: - accuracy - bertscore - character ---
alemoraesc/alemoraesc-sg161222-realistic-vision-v5-1-novae-autocrop-0001
alemoraesc
2023-09-10T16:51:17Z
29
0
diffusers
[ "diffusers", "safetensors", "text-to-image", "EMMA VAE", "realistic vision", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
2023-09-09T17:42:36Z
--- license: creativeml-openrail-m tags: - text-to-image - EMMA VAE - realistic vision --- ### alemoraesc/sg161222-realistic-vision-v5-1-novae-autocrop-0001 on Stable Diffusion via Dreambooth #### model by alemoraesc This your the Stable Diffusion model fine-tuned the alemoraesc/sg161222-realistic-vision-v5-1-novae-autocrop-0001 concept taught to Stable Diffusion with Dreambooth. It can be used by modifying the `instance_prompt`: **ukj alex** You can also train your own concepts and upload them to the library by using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb). And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts) Model used: stablediffusionapi/realistic-vision VAE was embeded in the model ---- TRAIN #@title Model training !/usr/bin/python3 train_dreambooth.py --pretrained_model_name_or_path="/content/stable_diffusion_weights" --pretrained_vae_name_or_path="/content/stable_diffusion_weights/vae" --instance_data_dir="/content/data/ukj" --instance_prompt="photo of ukj alex person" --output_dir=$OUTPUT_DIR --revision="fp16" --seed=1337 --resolution=512 --train_batch_size=1 --train_text_encoder --mixed_precision="fp16" --use_8bit_adam --gradient_accumulation_steps=1 --learning_rate=1e-6 --lr_scheduler="constant" --lr_warmup_steps=120 --sample_batch_size=4 --max_train_steps=1200 --save_sample_negative_prompt="(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime:1.4), text, close up, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck" --save_interval=600 --save_sample_prompt="photo of ukj alex" ---- Prompt: RAW photo, subject, 8k uhd, dslr, soft lighting, high quality, film grain, Fujifilm XT3 ᅠ Negative Prompt: (deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime), text, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, UnrealisticDream #used birme to cut the image Here are the images used for training this concept: ![image 0](https://huggingface.co/alemoraesc/alemoraesc-sg161222-realistic-vision-v5-1-novae-autocrop-0001/resolve/main/concept_images/alex_12.jpg) ![image 1](https://huggingface.co/alemoraesc/alemoraesc-sg161222-realistic-vision-v5-1-novae-autocrop-0001/resolve/main/concept_images/alex_10.jpg) ![image 2](https://huggingface.co/alemoraesc/alemoraesc-sg161222-realistic-vision-v5-1-novae-autocrop-0001/resolve/main/concept_images/.ipynb_checkpoints) ![image 3](https://huggingface.co/alemoraesc/alemoraesc-sg161222-realistic-vision-v5-1-novae-autocrop-0001/resolve/main/concept_images/alex_11.jpg) ![image 4](https://huggingface.co/alemoraesc/alemoraesc-sg161222-realistic-vision-v5-1-novae-autocrop-0001/resolve/main/concept_images/alex_1.jpg) ![image 5](https://huggingface.co/alemoraesc/alemoraesc-sg161222-realistic-vision-v5-1-novae-autocrop-0001/resolve/main/concept_images/alex_14.jpg) ![image 6](https://huggingface.co/alemoraesc/alemoraesc-sg161222-realistic-vision-v5-1-novae-autocrop-0001/resolve/main/concept_images/alex_15.jpg) ![image 7](https://huggingface.co/alemoraesc/alemoraesc-sg161222-realistic-vision-v5-1-novae-autocrop-0001/resolve/main/concept_images/alex_9.jpg) ![image 8](https://huggingface.co/alemoraesc/alemoraesc-sg161222-realistic-vision-v5-1-novae-autocrop-0001/resolve/main/concept_images/alex_13.jpg) ![image 9](https://huggingface.co/alemoraesc/alemoraesc-sg161222-realistic-vision-v5-1-novae-autocrop-0001/resolve/main/concept_images/alex_16.jpg) ![image 10](https://huggingface.co/alemoraesc/alemoraesc-sg161222-realistic-vision-v5-1-novae-autocrop-0001/resolve/main/concept_images/alex_0.jpg)
Ridealist/xlm-roberta-base-finetuned-panx-all
Ridealist
2023-09-10T16:47:36Z
112
0
transformers
[ "transformers", "pytorch", "xlm-roberta", "token-classification", "generated_from_trainer", "base_model:FacebookAI/xlm-roberta-base", "base_model:finetune:FacebookAI/xlm-roberta-base", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2023-09-10T16:38:34Z
--- license: mit base_model: xlm-roberta-base tags: - generated_from_trainer metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-all results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-all This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1745 - F1: 0.8577 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2915 | 1.0 | 835 | 0.1859 | 0.8171 | | 0.1544 | 2.0 | 1670 | 0.1631 | 0.8509 | | 0.1014 | 3.0 | 2505 | 0.1745 | 0.8577 | ### Framework versions - Transformers 4.33.1 - Pytorch 1.12.1+cu116 - Datasets 2.4.0 - Tokenizers 0.12.1
Q317/EmoraBert1
Q317
2023-09-10T16:44:46Z
62
0
transformers
[ "transformers", "tf", "roberta", "text-classification", "generated_from_keras_callback", "base_model:wonrax/phobert-base-vietnamese-sentiment", "base_model:finetune:wonrax/phobert-base-vietnamese-sentiment", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2023-09-10T16:18:19Z
--- license: mit base_model: wonrax/phobert-base-vietnamese-sentiment tags: - generated_from_keras_callback model-index: - name: Q317/EmoraBert1 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # Q317/EmoraBert1 This model is a fine-tuned version of [wonrax/phobert-base-vietnamese-sentiment](https://huggingface.co/wonrax/phobert-base-vietnamese-sentiment) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.3123 - Validation Loss: 0.8557 - Train Accuracy: 0.7158 - Epoch: 4 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'module': 'keras.optimizers.schedules', 'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 146205, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, 'registered_name': None}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 0.7587 | 0.6693 | 0.7181 | 0 | | 0.6184 | 0.6566 | 0.7267 | 1 | | 0.5107 | 0.6663 | 0.7274 | 2 | | 0.4007 | 0.7829 | 0.7262 | 3 | | 0.3123 | 0.8557 | 0.7158 | 4 | ### Framework versions - Transformers 4.33.1 - TensorFlow 2.13.0 - Datasets 2.14.5 - Tokenizers 0.13.3
nikolaalx/sample-model
nikolaalx
2023-09-10T16:44:43Z
0
0
peft
[ "peft", "region:us" ]
null
2023-09-10T16:42:49Z
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: True - load_in_4bit: False - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: fp4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float32 The following `bitsandbytes` quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: True - load_in_4bit: False - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: fp4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float32 ### Framework versions - PEFT 0.6.0.dev0 - PEFT 0.6.0.dev0
Ridealist/xlm-roberta-base-finetuned-panx-en
Ridealist
2023-09-10T16:38:31Z
124
0
transformers
[ "transformers", "pytorch", "xlm-roberta", "token-classification", "generated_from_trainer", "dataset:xtreme", "base_model:FacebookAI/xlm-roberta-base", "base_model:finetune:FacebookAI/xlm-roberta-base", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2023-09-10T16:37:28Z
--- license: mit base_model: xlm-roberta-base tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-en results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme config: PAN-X.en split: validation args: PAN-X.en metrics: - name: F1 type: f1 value: 0.6973094170403586 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-en This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.3911 - F1: 0.6973 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.9995 | 1.0 | 50 | 0.5467 | 0.5618 | | 0.4997 | 2.0 | 100 | 0.4371 | 0.6535 | | 0.3801 | 3.0 | 150 | 0.3911 | 0.6973 | ### Framework versions - Transformers 4.33.1 - Pytorch 1.12.1+cu116 - Datasets 2.4.0 - Tokenizers 0.12.1
Ridealist/xlm-roberta-base-finetuned-panx-it
Ridealist
2023-09-10T16:37:25Z
103
0
transformers
[ "transformers", "pytorch", "xlm-roberta", "token-classification", "generated_from_trainer", "dataset:xtreme", "base_model:FacebookAI/xlm-roberta-base", "base_model:finetune:FacebookAI/xlm-roberta-base", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2023-09-10T16:35:23Z
--- license: mit base_model: xlm-roberta-base tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-it results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme config: PAN-X.it split: validation args: PAN-X.it metrics: - name: F1 type: f1 value: 0.8241577649958916 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-it This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.2504 - F1: 0.8242 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.7373 | 1.0 | 70 | 0.2876 | 0.7604 | | 0.2725 | 2.0 | 140 | 0.2869 | 0.8115 | | 0.1718 | 3.0 | 210 | 0.2504 | 0.8242 | ### Framework versions - Transformers 4.33.1 - Pytorch 1.12.1+cu116 - Datasets 2.4.0 - Tokenizers 0.12.1
baebee/OpenStar-1b
baebee
2023-09-10T16:35:42Z
0
0
peft
[ "peft", "region:us" ]
null
2023-09-10T16:35:40Z
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float16 ### Framework versions - PEFT 0.6.0.dev0
DriveMyScream/Fake_News_Classification_model
DriveMyScream
2023-09-10T16:31:45Z
0
0
keras
[ "keras", "tf-keras", "region:us" ]
null
2023-09-10T16:30:23Z
--- library_name: keras --- ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: | Hyperparameters | Value | | :-- | :-- | | name | Adam | | weight_decay | None | | clipnorm | None | | global_clipnorm | None | | clipvalue | None | | use_ema | False | | ema_momentum | 0.99 | | ema_overwrite_frequency | None | | jit_compile | False | | is_legacy_optimizer | False | | learning_rate | 0.0010000000474974513 | | beta_1 | 0.9 | | beta_2 | 0.999 | | epsilon | 1e-07 | | amsgrad | False | | training_precision | float32 | ## Model Plot <details> <summary>View Model Plot</summary> ![Model Image](./model.png) </details>
SaniyatMushrat/SusathoASR
SaniyatMushrat
2023-09-10T16:27:10Z
3
0
transformers
[ "transformers", "wav2vec2", "automatic-speech-recognition", "hf-asr-leaderboard", "openslr_SLR53", "robust-speech-event", "bn", "dataset:openslr", "dataset:SLR53", "dataset:Harveenchadha/indic-text", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2023-09-10T16:25:59Z
--- language: - bn license: apache-2.0 tags: - automatic-speech-recognition - hf-asr-leaderboard - openslr_SLR53 - robust-speech-event datasets: - openslr - SLR53 - Harveenchadha/indic-text metrics: - wer - cer model-index: - name: Tahsin-Mayeesha/wav2vec2-bn-300m results: - task: type: automatic-speech-recognition name: Speech Recognition dataset: type: openslr name: Open SLR args: SLR66 metrics: - type: wer value: 0.31104373941386626 name: Test WER - type: cer value: 0.07263099973420006 name: Test CER - type: wer value: 0.17776164652632478 name: Test WER with lm - type: cer value: 0.04394092712884769 name: Test CER with lm --- This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the OPENSLR_SLR53 - bengali dataset. It achieves the following results on the evaluation set. Without language model : - Wer: 0.3110 - Cer : 0.072 With 5 gram language model trained on [indic-text](https://huggingface.co/datasets/Harveenchadha/indic-text/tree/main) dataset : - Wer: 0.17776 - Cer : 0.04394 Note : 10% of a total 218703 samples have been used for evaluation. Evaluation set has 21871 examples. Training was stopped after 30k steps. Output predictions are available under files section. ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 7.5e-05 - train_batch_size: 16 - eval_batch_size: 16 - gradient_accumulation_steps: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 2000 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.1+cu102 - Datasets 1.17.1.dev0 - Tokenizers 0.11.0 Note : Training and evaluation script modified from https://huggingface.co/chmanoj/xls-r-300m-te and https://github.com/huggingface/transformers/tree/master/examples/research_projects/robust-speech-event. Bengali speech data was not available from common voice or librispeech multilingual datasets, so OpenSLR53 has been used. Note 2 : Minimum audio duration of 0.1s has been used to filter the training data which excluded may be 10-20 samples. # Citation @misc {tahsin_mayeesha_2023, author = { {Tahsin Mayeesha} }, title = { wav2vec2-bn-300m (Revision e10defc) }, year = 2023, url = { https://huggingface.co/Tahsin-Mayeesha/wav2vec2-bn-300m }, doi = { 10.57967/hf/0939 }, publisher = { Hugging Face } }
shengqin/bloomz-xss-sqli-3b
shengqin
2023-09-10T16:24:42Z
0
0
peft
[ "peft", "region:us" ]
null
2023-09-10T14:58:02Z
--- library_name: peft --- ## Training procedure ### Framework versions - PEFT 0.5.0
waqasobeidy/sdxldemo0001
waqasobeidy
2023-09-10T16:22:11Z
1
1
diffusers
[ "diffusers", "text-to-image", "autotrain", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:finetune:stabilityai/stable-diffusion-xl-base-1.0", "region:us" ]
text-to-image
2023-09-10T13:57:56Z
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 instance_prompt: photo of a sks imran tags: - text-to-image - diffusers - autotrain inference: true --- # DreamBooth trained by AutoTrain Text encoder was not trained.
Ridealist/xlm-roberta-base-finetuned-panx-de
Ridealist
2023-09-10T16:12:10Z
112
0
transformers
[ "transformers", "pytorch", "xlm-roberta", "token-classification", "generated_from_trainer", "dataset:xtreme", "base_model:FacebookAI/xlm-roberta-base", "base_model:finetune:FacebookAI/xlm-roberta-base", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2023-09-10T16:05:16Z
--- license: mit base_model: xlm-roberta-base tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme config: PAN-X.de split: validation args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8643238940065961 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1363 - F1: 0.8643 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2536 | 1.0 | 525 | 0.1531 | 0.8247 | | 0.1243 | 2.0 | 1050 | 0.1415 | 0.8546 | | 0.08 | 3.0 | 1575 | 0.1363 | 0.8643 | ### Framework versions - Transformers 4.33.1 - Pytorch 1.12.1+cu116 - Datasets 2.4.0 - Tokenizers 0.12.1
CyberHarem/arima_kana_oshinoko
CyberHarem
2023-09-10T16:09:58Z
0
0
null
[ "art", "text-to-image", "dataset:CyberHarem/arima_kana_oshinoko", "license:mit", "region:us" ]
text-to-image
2023-09-10T15:50:26Z
--- license: mit datasets: - CyberHarem/arima_kana_oshinoko pipeline_tag: text-to-image tags: - art --- # Lora of arima_kana_oshinoko This model is trained with [HCP-Diffusion](https://github.com/7eu7d7/HCP-Diffusion). And the auto-training framework is maintained by [DeepGHS Team](https://huggingface.co/deepghs). The base model used during training is [NAI](https://huggingface.co/deepghs/animefull-latest), and the base model used for generating preview images is [Meina/MeinaMix_V11](https://huggingface.co/Meina/MeinaMix_V11). After downloading the pt and safetensors files for the specified step, you need to use them simultaneously. The pt file will be used as an embedding, while the safetensors file will be loaded for Lora. For example, if you want to use the model from step 6000, you need to download `6000/arima_kana_oshinoko.pt` as the embedding and `6000/arima_kana_oshinoko.safetensors` for loading Lora. By using both files together, you can generate images for the desired characters. **The best step we recommend is 6000**, with the score of 0.952. The trigger words are: 1. `arima_kana_oshinoko` 2. `short_hair, bangs, red_hair, blunt_bangs, bob_cut, red_eyes, hat` For the following groups, it is not recommended to use this model and we express regret: 1. Individuals who cannot tolerate any deviations from the original character design, even in the slightest detail. 2. Individuals who are facing the application scenarios with high demands for accuracy in recreating character outfits. 3. Individuals who cannot accept the potential randomness in AI-generated images based on the Stable Diffusion algorithm. 4. Individuals who are not comfortable with the fully automated process of training character models using LoRA, or those who believe that training character models must be done purely through manual operations to avoid disrespecting the characters. 5. Individuals who finds the generated image content offensive to their values. These are available steps: | Steps | Score | Download | pattern_1 | pattern_2 | pattern_3 | pattern_4 | pattern_5 | pattern_6 | pattern_7 | pattern_8 | pattern_9 | pattern_10 | pattern_11 | pattern_12 | pattern_13 | bikini | bondage | free | maid | miko | nude | nude2 | suit | yukata | |:---------|:----------|:---------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-------------------------------------------------|:-------------------------------------------------|:-------------------------------------------------|:-------------------------------------------------|:-----------------------------------------|:--------------------------------------------------|:-------------------------------------|:-------------------------------------|:-------------------------------------|:-----------------------------------------------|:------------------------------------------------|:-------------------------------------|:-----------------------------------------| | **6000** | **0.952** | [**Download**](6000/arima_kana_oshinoko.zip) | ![pattern_1-6000](6000/previews/pattern_1.png) | ![pattern_2-6000](6000/previews/pattern_2.png) | ![pattern_3-6000](6000/previews/pattern_3.png) | ![pattern_4-6000](6000/previews/pattern_4.png) | ![pattern_5-6000](6000/previews/pattern_5.png) | ![pattern_6-6000](6000/previews/pattern_6.png) | ![pattern_7-6000](6000/previews/pattern_7.png) | ![pattern_8-6000](6000/previews/pattern_8.png) | ![pattern_9-6000](6000/previews/pattern_9.png) | ![pattern_10-6000](6000/previews/pattern_10.png) | ![pattern_11-6000](6000/previews/pattern_11.png) | ![pattern_12-6000](6000/previews/pattern_12.png) | ![pattern_13-6000](6000/previews/pattern_13.png) | ![bikini-6000](6000/previews/bikini.png) | [<NSFW, click to see>](6000/previews/bondage.png) | ![free-6000](6000/previews/free.png) | ![maid-6000](6000/previews/maid.png) | ![miko-6000](6000/previews/miko.png) | [<NSFW, click to see>](6000/previews/nude.png) | [<NSFW, click to see>](6000/previews/nude2.png) | ![suit-6000](6000/previews/suit.png) | ![yukata-6000](6000/previews/yukata.png) | | 5600 | 0.937 | [Download](5600/arima_kana_oshinoko.zip) | ![pattern_1-5600](5600/previews/pattern_1.png) | ![pattern_2-5600](5600/previews/pattern_2.png) | ![pattern_3-5600](5600/previews/pattern_3.png) | ![pattern_4-5600](5600/previews/pattern_4.png) | ![pattern_5-5600](5600/previews/pattern_5.png) | ![pattern_6-5600](5600/previews/pattern_6.png) | ![pattern_7-5600](5600/previews/pattern_7.png) | ![pattern_8-5600](5600/previews/pattern_8.png) | ![pattern_9-5600](5600/previews/pattern_9.png) | ![pattern_10-5600](5600/previews/pattern_10.png) | ![pattern_11-5600](5600/previews/pattern_11.png) | ![pattern_12-5600](5600/previews/pattern_12.png) | ![pattern_13-5600](5600/previews/pattern_13.png) | ![bikini-5600](5600/previews/bikini.png) | [<NSFW, click to see>](5600/previews/bondage.png) | ![free-5600](5600/previews/free.png) | ![maid-5600](5600/previews/maid.png) | ![miko-5600](5600/previews/miko.png) | [<NSFW, click to see>](5600/previews/nude.png) | [<NSFW, click to see>](5600/previews/nude2.png) | ![suit-5600](5600/previews/suit.png) | ![yukata-5600](5600/previews/yukata.png) | | 5200 | 0.931 | [Download](5200/arima_kana_oshinoko.zip) | ![pattern_1-5200](5200/previews/pattern_1.png) | ![pattern_2-5200](5200/previews/pattern_2.png) | ![pattern_3-5200](5200/previews/pattern_3.png) | ![pattern_4-5200](5200/previews/pattern_4.png) | ![pattern_5-5200](5200/previews/pattern_5.png) | ![pattern_6-5200](5200/previews/pattern_6.png) | ![pattern_7-5200](5200/previews/pattern_7.png) | ![pattern_8-5200](5200/previews/pattern_8.png) | ![pattern_9-5200](5200/previews/pattern_9.png) | ![pattern_10-5200](5200/previews/pattern_10.png) | ![pattern_11-5200](5200/previews/pattern_11.png) | ![pattern_12-5200](5200/previews/pattern_12.png) | ![pattern_13-5200](5200/previews/pattern_13.png) | ![bikini-5200](5200/previews/bikini.png) | [<NSFW, click to see>](5200/previews/bondage.png) | ![free-5200](5200/previews/free.png) | ![maid-5200](5200/previews/maid.png) | ![miko-5200](5200/previews/miko.png) | [<NSFW, click to see>](5200/previews/nude.png) | [<NSFW, click to see>](5200/previews/nude2.png) | ![suit-5200](5200/previews/suit.png) | ![yukata-5200](5200/previews/yukata.png) | | 4800 | 0.941 | [Download](4800/arima_kana_oshinoko.zip) | ![pattern_1-4800](4800/previews/pattern_1.png) | ![pattern_2-4800](4800/previews/pattern_2.png) | ![pattern_3-4800](4800/previews/pattern_3.png) | ![pattern_4-4800](4800/previews/pattern_4.png) | ![pattern_5-4800](4800/previews/pattern_5.png) | ![pattern_6-4800](4800/previews/pattern_6.png) | ![pattern_7-4800](4800/previews/pattern_7.png) | ![pattern_8-4800](4800/previews/pattern_8.png) | ![pattern_9-4800](4800/previews/pattern_9.png) | ![pattern_10-4800](4800/previews/pattern_10.png) | ![pattern_11-4800](4800/previews/pattern_11.png) | ![pattern_12-4800](4800/previews/pattern_12.png) | ![pattern_13-4800](4800/previews/pattern_13.png) | ![bikini-4800](4800/previews/bikini.png) | [<NSFW, click to see>](4800/previews/bondage.png) | ![free-4800](4800/previews/free.png) | ![maid-4800](4800/previews/maid.png) | ![miko-4800](4800/previews/miko.png) | [<NSFW, click to see>](4800/previews/nude.png) | [<NSFW, click to see>](4800/previews/nude2.png) | ![suit-4800](4800/previews/suit.png) | ![yukata-4800](4800/previews/yukata.png) | | 4400 | 0.931 | [Download](4400/arima_kana_oshinoko.zip) | ![pattern_1-4400](4400/previews/pattern_1.png) | ![pattern_2-4400](4400/previews/pattern_2.png) | ![pattern_3-4400](4400/previews/pattern_3.png) | ![pattern_4-4400](4400/previews/pattern_4.png) | ![pattern_5-4400](4400/previews/pattern_5.png) | ![pattern_6-4400](4400/previews/pattern_6.png) | ![pattern_7-4400](4400/previews/pattern_7.png) | ![pattern_8-4400](4400/previews/pattern_8.png) | ![pattern_9-4400](4400/previews/pattern_9.png) | ![pattern_10-4400](4400/previews/pattern_10.png) | ![pattern_11-4400](4400/previews/pattern_11.png) | ![pattern_12-4400](4400/previews/pattern_12.png) | ![pattern_13-4400](4400/previews/pattern_13.png) | ![bikini-4400](4400/previews/bikini.png) | [<NSFW, click to see>](4400/previews/bondage.png) | ![free-4400](4400/previews/free.png) | ![maid-4400](4400/previews/maid.png) | ![miko-4400](4400/previews/miko.png) | [<NSFW, click to see>](4400/previews/nude.png) | [<NSFW, click to see>](4400/previews/nude2.png) | ![suit-4400](4400/previews/suit.png) | ![yukata-4400](4400/previews/yukata.png) | | 4000 | 0.904 | [Download](4000/arima_kana_oshinoko.zip) | ![pattern_1-4000](4000/previews/pattern_1.png) | ![pattern_2-4000](4000/previews/pattern_2.png) | ![pattern_3-4000](4000/previews/pattern_3.png) | ![pattern_4-4000](4000/previews/pattern_4.png) | ![pattern_5-4000](4000/previews/pattern_5.png) | ![pattern_6-4000](4000/previews/pattern_6.png) | ![pattern_7-4000](4000/previews/pattern_7.png) | ![pattern_8-4000](4000/previews/pattern_8.png) | ![pattern_9-4000](4000/previews/pattern_9.png) | ![pattern_10-4000](4000/previews/pattern_10.png) | ![pattern_11-4000](4000/previews/pattern_11.png) | ![pattern_12-4000](4000/previews/pattern_12.png) | ![pattern_13-4000](4000/previews/pattern_13.png) | ![bikini-4000](4000/previews/bikini.png) | [<NSFW, click to see>](4000/previews/bondage.png) | ![free-4000](4000/previews/free.png) | ![maid-4000](4000/previews/maid.png) | ![miko-4000](4000/previews/miko.png) | [<NSFW, click to see>](4000/previews/nude.png) | [<NSFW, click to see>](4000/previews/nude2.png) | ![suit-4000](4000/previews/suit.png) | ![yukata-4000](4000/previews/yukata.png) | | 3600 | 0.924 | [Download](3600/arima_kana_oshinoko.zip) | ![pattern_1-3600](3600/previews/pattern_1.png) | ![pattern_2-3600](3600/previews/pattern_2.png) | ![pattern_3-3600](3600/previews/pattern_3.png) | ![pattern_4-3600](3600/previews/pattern_4.png) | ![pattern_5-3600](3600/previews/pattern_5.png) | ![pattern_6-3600](3600/previews/pattern_6.png) | ![pattern_7-3600](3600/previews/pattern_7.png) | ![pattern_8-3600](3600/previews/pattern_8.png) | ![pattern_9-3600](3600/previews/pattern_9.png) | ![pattern_10-3600](3600/previews/pattern_10.png) | ![pattern_11-3600](3600/previews/pattern_11.png) | ![pattern_12-3600](3600/previews/pattern_12.png) | ![pattern_13-3600](3600/previews/pattern_13.png) | ![bikini-3600](3600/previews/bikini.png) | [<NSFW, click to see>](3600/previews/bondage.png) | ![free-3600](3600/previews/free.png) | ![maid-3600](3600/previews/maid.png) | ![miko-3600](3600/previews/miko.png) | [<NSFW, click to see>](3600/previews/nude.png) | [<NSFW, click to see>](3600/previews/nude2.png) | ![suit-3600](3600/previews/suit.png) | ![yukata-3600](3600/previews/yukata.png) | | 3200 | 0.922 | [Download](3200/arima_kana_oshinoko.zip) | ![pattern_1-3200](3200/previews/pattern_1.png) | ![pattern_2-3200](3200/previews/pattern_2.png) | ![pattern_3-3200](3200/previews/pattern_3.png) | ![pattern_4-3200](3200/previews/pattern_4.png) | ![pattern_5-3200](3200/previews/pattern_5.png) | ![pattern_6-3200](3200/previews/pattern_6.png) | ![pattern_7-3200](3200/previews/pattern_7.png) | ![pattern_8-3200](3200/previews/pattern_8.png) | ![pattern_9-3200](3200/previews/pattern_9.png) | ![pattern_10-3200](3200/previews/pattern_10.png) | ![pattern_11-3200](3200/previews/pattern_11.png) | ![pattern_12-3200](3200/previews/pattern_12.png) | ![pattern_13-3200](3200/previews/pattern_13.png) | ![bikini-3200](3200/previews/bikini.png) | [<NSFW, click to see>](3200/previews/bondage.png) | ![free-3200](3200/previews/free.png) | ![maid-3200](3200/previews/maid.png) | ![miko-3200](3200/previews/miko.png) | [<NSFW, click to see>](3200/previews/nude.png) | [<NSFW, click to see>](3200/previews/nude2.png) | ![suit-3200](3200/previews/suit.png) | ![yukata-3200](3200/previews/yukata.png) | | 2800 | 0.914 | [Download](2800/arima_kana_oshinoko.zip) | ![pattern_1-2800](2800/previews/pattern_1.png) | ![pattern_2-2800](2800/previews/pattern_2.png) | ![pattern_3-2800](2800/previews/pattern_3.png) | ![pattern_4-2800](2800/previews/pattern_4.png) | ![pattern_5-2800](2800/previews/pattern_5.png) | ![pattern_6-2800](2800/previews/pattern_6.png) | ![pattern_7-2800](2800/previews/pattern_7.png) | ![pattern_8-2800](2800/previews/pattern_8.png) | ![pattern_9-2800](2800/previews/pattern_9.png) | ![pattern_10-2800](2800/previews/pattern_10.png) | ![pattern_11-2800](2800/previews/pattern_11.png) | ![pattern_12-2800](2800/previews/pattern_12.png) | ![pattern_13-2800](2800/previews/pattern_13.png) | ![bikini-2800](2800/previews/bikini.png) | [<NSFW, click to see>](2800/previews/bondage.png) | ![free-2800](2800/previews/free.png) | ![maid-2800](2800/previews/maid.png) | ![miko-2800](2800/previews/miko.png) | [<NSFW, click to see>](2800/previews/nude.png) | [<NSFW, click to see>](2800/previews/nude2.png) | ![suit-2800](2800/previews/suit.png) | ![yukata-2800](2800/previews/yukata.png) | | 2400 | 0.894 | [Download](2400/arima_kana_oshinoko.zip) | ![pattern_1-2400](2400/previews/pattern_1.png) | ![pattern_2-2400](2400/previews/pattern_2.png) | ![pattern_3-2400](2400/previews/pattern_3.png) | ![pattern_4-2400](2400/previews/pattern_4.png) | ![pattern_5-2400](2400/previews/pattern_5.png) | ![pattern_6-2400](2400/previews/pattern_6.png) | ![pattern_7-2400](2400/previews/pattern_7.png) | ![pattern_8-2400](2400/previews/pattern_8.png) | ![pattern_9-2400](2400/previews/pattern_9.png) | ![pattern_10-2400](2400/previews/pattern_10.png) | ![pattern_11-2400](2400/previews/pattern_11.png) | ![pattern_12-2400](2400/previews/pattern_12.png) | ![pattern_13-2400](2400/previews/pattern_13.png) | ![bikini-2400](2400/previews/bikini.png) | [<NSFW, click to see>](2400/previews/bondage.png) | ![free-2400](2400/previews/free.png) | ![maid-2400](2400/previews/maid.png) | ![miko-2400](2400/previews/miko.png) | [<NSFW, click to see>](2400/previews/nude.png) | [<NSFW, click to see>](2400/previews/nude2.png) | ![suit-2400](2400/previews/suit.png) | ![yukata-2400](2400/previews/yukata.png) | | 2000 | 0.881 | [Download](2000/arima_kana_oshinoko.zip) | ![pattern_1-2000](2000/previews/pattern_1.png) | ![pattern_2-2000](2000/previews/pattern_2.png) | ![pattern_3-2000](2000/previews/pattern_3.png) | ![pattern_4-2000](2000/previews/pattern_4.png) | ![pattern_5-2000](2000/previews/pattern_5.png) | ![pattern_6-2000](2000/previews/pattern_6.png) | ![pattern_7-2000](2000/previews/pattern_7.png) | ![pattern_8-2000](2000/previews/pattern_8.png) | ![pattern_9-2000](2000/previews/pattern_9.png) | ![pattern_10-2000](2000/previews/pattern_10.png) | ![pattern_11-2000](2000/previews/pattern_11.png) | ![pattern_12-2000](2000/previews/pattern_12.png) | ![pattern_13-2000](2000/previews/pattern_13.png) | ![bikini-2000](2000/previews/bikini.png) | [<NSFW, click to see>](2000/previews/bondage.png) | ![free-2000](2000/previews/free.png) | ![maid-2000](2000/previews/maid.png) | ![miko-2000](2000/previews/miko.png) | [<NSFW, click to see>](2000/previews/nude.png) | [<NSFW, click to see>](2000/previews/nude2.png) | ![suit-2000](2000/previews/suit.png) | ![yukata-2000](2000/previews/yukata.png) | | 1600 | 0.876 | [Download](1600/arima_kana_oshinoko.zip) | ![pattern_1-1600](1600/previews/pattern_1.png) | ![pattern_2-1600](1600/previews/pattern_2.png) | ![pattern_3-1600](1600/previews/pattern_3.png) | ![pattern_4-1600](1600/previews/pattern_4.png) | ![pattern_5-1600](1600/previews/pattern_5.png) | ![pattern_6-1600](1600/previews/pattern_6.png) | ![pattern_7-1600](1600/previews/pattern_7.png) | ![pattern_8-1600](1600/previews/pattern_8.png) | ![pattern_9-1600](1600/previews/pattern_9.png) | ![pattern_10-1600](1600/previews/pattern_10.png) | ![pattern_11-1600](1600/previews/pattern_11.png) | ![pattern_12-1600](1600/previews/pattern_12.png) | ![pattern_13-1600](1600/previews/pattern_13.png) | ![bikini-1600](1600/previews/bikini.png) | [<NSFW, click to see>](1600/previews/bondage.png) | ![free-1600](1600/previews/free.png) | ![maid-1600](1600/previews/maid.png) | ![miko-1600](1600/previews/miko.png) | [<NSFW, click to see>](1600/previews/nude.png) | [<NSFW, click to see>](1600/previews/nude2.png) | ![suit-1600](1600/previews/suit.png) | ![yukata-1600](1600/previews/yukata.png) | | 1200 | 0.750 | [Download](1200/arima_kana_oshinoko.zip) | ![pattern_1-1200](1200/previews/pattern_1.png) | ![pattern_2-1200](1200/previews/pattern_2.png) | ![pattern_3-1200](1200/previews/pattern_3.png) | ![pattern_4-1200](1200/previews/pattern_4.png) | ![pattern_5-1200](1200/previews/pattern_5.png) | ![pattern_6-1200](1200/previews/pattern_6.png) | ![pattern_7-1200](1200/previews/pattern_7.png) | ![pattern_8-1200](1200/previews/pattern_8.png) | ![pattern_9-1200](1200/previews/pattern_9.png) | ![pattern_10-1200](1200/previews/pattern_10.png) | ![pattern_11-1200](1200/previews/pattern_11.png) | ![pattern_12-1200](1200/previews/pattern_12.png) | ![pattern_13-1200](1200/previews/pattern_13.png) | ![bikini-1200](1200/previews/bikini.png) | [<NSFW, click to see>](1200/previews/bondage.png) | ![free-1200](1200/previews/free.png) | ![maid-1200](1200/previews/maid.png) | ![miko-1200](1200/previews/miko.png) | [<NSFW, click to see>](1200/previews/nude.png) | [<NSFW, click to see>](1200/previews/nude2.png) | ![suit-1200](1200/previews/suit.png) | ![yukata-1200](1200/previews/yukata.png) | | 800 | 0.838 | [Download](800/arima_kana_oshinoko.zip) | ![pattern_1-800](800/previews/pattern_1.png) | ![pattern_2-800](800/previews/pattern_2.png) | ![pattern_3-800](800/previews/pattern_3.png) | ![pattern_4-800](800/previews/pattern_4.png) | ![pattern_5-800](800/previews/pattern_5.png) | ![pattern_6-800](800/previews/pattern_6.png) | ![pattern_7-800](800/previews/pattern_7.png) | ![pattern_8-800](800/previews/pattern_8.png) | ![pattern_9-800](800/previews/pattern_9.png) | ![pattern_10-800](800/previews/pattern_10.png) | ![pattern_11-800](800/previews/pattern_11.png) | ![pattern_12-800](800/previews/pattern_12.png) | ![pattern_13-800](800/previews/pattern_13.png) | ![bikini-800](800/previews/bikini.png) | [<NSFW, click to see>](800/previews/bondage.png) | ![free-800](800/previews/free.png) | ![maid-800](800/previews/maid.png) | ![miko-800](800/previews/miko.png) | [<NSFW, click to see>](800/previews/nude.png) | [<NSFW, click to see>](800/previews/nude2.png) | ![suit-800](800/previews/suit.png) | ![yukata-800](800/previews/yukata.png) | | 400 | 0.726 | [Download](400/arima_kana_oshinoko.zip) | ![pattern_1-400](400/previews/pattern_1.png) | ![pattern_2-400](400/previews/pattern_2.png) | ![pattern_3-400](400/previews/pattern_3.png) | ![pattern_4-400](400/previews/pattern_4.png) | ![pattern_5-400](400/previews/pattern_5.png) | ![pattern_6-400](400/previews/pattern_6.png) | ![pattern_7-400](400/previews/pattern_7.png) | ![pattern_8-400](400/previews/pattern_8.png) | ![pattern_9-400](400/previews/pattern_9.png) | ![pattern_10-400](400/previews/pattern_10.png) | ![pattern_11-400](400/previews/pattern_11.png) | ![pattern_12-400](400/previews/pattern_12.png) | ![pattern_13-400](400/previews/pattern_13.png) | ![bikini-400](400/previews/bikini.png) | [<NSFW, click to see>](400/previews/bondage.png) | ![free-400](400/previews/free.png) | ![maid-400](400/previews/maid.png) | ![miko-400](400/previews/miko.png) | [<NSFW, click to see>](400/previews/nude.png) | [<NSFW, click to see>](400/previews/nude2.png) | ![suit-400](400/previews/suit.png) | ![yukata-400](400/previews/yukata.png) |
yacht/latte-mc-bert-base-chinese-ws
yacht
2023-09-10T16:09:39Z
109
1
transformers
[ "transformers", "pytorch", "bert", "feature-extraction", "word segmentation", "token-classification", "zh", "dataset:ctb6", "dataset:as", "dataset:cityu", "dataset:msra", "dataset:pku", "dataset:sxu", "dataset:cnc", "license:cc-by-sa-4.0", "endpoints_compatible", "region:us" ]
token-classification
2023-08-31T09:38:45Z
--- language: zh license: cc-by-sa-4.0 tags: - word segmentation datasets: - ctb6 - as - cityu - msra - pku - sxu - cnc pipeline_tag: token-classification --- # Multi-criteria BERT base Chinese with Lattice for Word Segmentation This is a variant of the pre-trained model [BERT](https://github.com/google-research/bert) model. The model was pre-trained on texts in the Chinese language and fine-tuned for word segmentation based on [bert-base-chinese](https://huggingface.co/bert-base-chinese). This version of the model processes input texts with character-level with word-level incorporated with a lattice structure. The scripts for the pre-training are available at [tchayintr/latte-ptm-ws](https://github.com/tchayintr/latte-ptm-ws). The LATTE scripts are available at [tchayintr/latte-ws](https://github.com/tchayintr/latte-ws). ## Model architecture The model architecture is described in this [paper](https://www.jstage.jst.go.jp/article/jnlp/30/2/30_456/_article/-char/ja). ## Training Data The model is trained on multiple Chinese word segmented datasets, including ctb6, sighan2005 (as, cityu, msra, pku), sighan2008 (sxu), and cnc. The datasets can be accessed from [here](https://github.com/hankcs/multi-criteria-cws/tree/master/data). ## Licenses The pre-trained model is distributed under the terms of the [Creative Commons Attribution-ShareAlike 4.0](https://creativecommons.org/licenses/by-sa/4.0/). ## Acknowledgments This model was trained with GPU servers provided by [Okumura-Funakoshi NLP Group](https://lr-www.pi.titech.ac.jp).
Ashutosh94/my-pet-character
Ashutosh94
2023-09-10T16:05:38Z
4
1
diffusers
[ "diffusers", "safetensors", "NxtWave-GenAI-Webinar", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
2023-09-10T16:01:50Z
--- license: creativeml-openrail-m tags: - NxtWave-GenAI-Webinar - text-to-image - stable-diffusion --- ### my-pet-character Dreambooth model trained by Ashutosh94 following the "Build your own Gen AI model" session by NxtWave. Project Submission Code: IIITB-430 Sample pictures of this concept:
deepcyber/Enhanced-CIFAR10-CNN
deepcyber
2023-09-10T15:45:13Z
0
1
keras
[ "keras", "image-classification", "dataset:cifar10", "license:mit", "region:us" ]
image-classification
2023-09-09T12:20:07Z
--- license: mit datasets: - cifar10 library_name: keras pipeline_tag: image-classification --- ### Model Name: `Enhanced-CIFAR10-CNN` **Description:** Introducing `Enhanced-CIFAR10-CNN`, a state-of-the-art Convolutional Neural Network (CNN) trained on the CIFAR dataset. Based on extensive research, with an impressive accuracy of 89%, this model sets a new benchmark in image classification tasks. What sets it apart? - **High Performance**: Achieves an accuracy rate of 86%, surpassing standard benchmarks. - **Fast Inference**: Optimized for speed, this model ensures quick predictions without compromising on accuracy. - **Compact Size**: Its small footprint makes it ideal for edge deployments and integration into existing systems. - **Transfer Learning Ready**: The model's architecture and pre-trained weights make it an excellent candidate for fine-tuning and further development in various applications. **Usage Examples:** ```python from keras.models import load_model # Load the model model = load_model('path/to/enhancedCIFAR-10-CNN.h5') # Perform inference result = model.predict(input_data) ``` **Dependencies:** - Keras >= 2.4.0 - TensorFlow >= 2.5.0 **Citation:** Ogundokun, Roseline Oluwaseun, et al. "Improved CNN based on batch normalization and adam optimizer." International Conference on Computational Science and Its Applications. Cham: Springer International Publishing, 2022. If you find this model useful, please cite our work.
qnfino091/space-tour
qnfino091
2023-09-10T15:29:35Z
14
2
diffusers
[ "diffusers", "safetensors", "NxtWave-GenAI-Webinar", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
2023-09-10T14:14:58Z
--- license: creativeml-openrail-m tags: - NxtWave-GenAI-Webinar - text-to-image - stable-diffusion --- ### space-tour- Dreambooth model trained by qnfino091 following the "Build your own Gen AI model" session by NxtWave. Project Submission Code: IIITB-146 Sample pictures of this concept: ![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/64fdbb690e486522f86b2453/AlECRSNLDNyqisn3lLOgs.jpeg)
gyesibiney/Sentiment-review-analysis-roberta-3
gyesibiney
2023-09-10T15:13:06Z
112
0
transformers
[ "transformers", "pytorch", "roberta", "text-classification", "generated_from_trainer", "base_model:FacebookAI/roberta-base", "base_model:finetune:FacebookAI/roberta-base", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2023-09-08T16:58:40Z
--- license: mit base_model: roberta-base tags: - generated_from_trainer metrics: - accuracy model-index: - name: Roberta-capstone_2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Roberta-capstone_2 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.3485 - Accuracy: 0.9400 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2568 | 1.0 | 623 | 0.1971 | 0.9265 | | 0.1581 | 2.0 | 1246 | 0.2102 | 0.9339 | | 0.109 | 3.0 | 1869 | 0.3126 | 0.9356 | | 0.0687 | 4.0 | 2492 | 0.3040 | 0.9382 | | 0.0199 | 5.0 | 3115 | 0.3485 | 0.9400 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
Dhruv21/my-white-horse-xzc
Dhruv21
2023-09-10T15:10:39Z
1
0
diffusers
[ "diffusers", "safetensors", "NxtWave-GenAI-Webinar", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
2023-09-10T15:06:42Z
--- license: creativeml-openrail-m tags: - NxtWave-GenAI-Webinar - text-to-image - stable-diffusion --- ### My-white-horse-xzc Dreambooth model trained by Dhruv21 following the "Build your own Gen AI model" session by NxtWave. Project Submission Code: IIITB-92 Sample pictures of this concept: ![0](https://huggingface.co/Dhruv21/my-white-horse-xzc/resolve/main/sample_images/00001-2281381523.png)
BanUrsus/tqc-PandaPickAndPlace-v3
BanUrsus
2023-09-10T15:09:11Z
1
1
stable-baselines3
[ "stable-baselines3", "PandaPickAndPlace-v3", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2023-09-10T15:04:46Z
--- library_name: stable-baselines3 tags: - PandaPickAndPlace-v3 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: TQC results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaPickAndPlace-v3 type: PandaPickAndPlace-v3 metrics: - type: mean_reward value: -6.90 +/- 1.58 name: mean_reward verified: false --- # **TQC** Agent playing **PandaPickAndPlace-v3** This is a trained model of a **TQC** agent playing **PandaPickAndPlace-v3** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Vadu/ppo-HuggyMyBeloved
Vadu
2023-09-10T14:51:50Z
8
0
ml-agents
[ "ml-agents", "tensorboard", "onnx", "Huggy", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Huggy", "region:us" ]
reinforcement-learning
2023-09-10T14:51:42Z
--- library_name: ml-agents tags: - Huggy - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/ We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: - A *short tutorial* where you teach Huggy the Dog 🐶 to fetch the stick and then play with him directly in your browser: https://huggingface.co/learn/deep-rl-course/unitbonus1/introduction - A *longer tutorial* to understand how works ML-Agents: https://huggingface.co/learn/deep-rl-course/unit5/introduction ### Resume the training ```bash mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser** 1. If the environment is part of ML-Agents official environments, go to https://huggingface.co/unity 2. Step 1: Find your model_id: Vadu/ppo-HuggyMyBeloved 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
vasaicrow/bert-finetuned-ner
vasaicrow
2023-09-10T14:41:10Z
106
0
transformers
[ "transformers", "pytorch", "bert", "token-classification", "generated_from_trainer", "dataset:conll2003", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2023-09-10T14:19:01Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: bert-finetuned-ner results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 config: conll2003 split: validation args: conll2003 metrics: - name: Precision type: precision value: 0.9375621066578337 - name: Recall type: recall value: 0.9527095254123191 - name: F1 type: f1 value: 0.9450751252086811 - name: Accuracy type: accuracy value: 0.9865632542532525 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-ner This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0613 - Precision: 0.9376 - Recall: 0.9527 - F1: 0.9451 - Accuracy: 0.9866 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.0886 | 1.0 | 1756 | 0.0764 | 0.9176 | 0.9315 | 0.9245 | 0.9801 | | 0.0342 | 2.0 | 3512 | 0.0618 | 0.9292 | 0.9482 | 0.9386 | 0.9859 | | 0.0168 | 3.0 | 5268 | 0.0613 | 0.9376 | 0.9527 | 0.9451 | 0.9866 | ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu117 - Datasets 2.13.1 - Tokenizers 0.13.3
SlytheeTove/BluePossum
SlytheeTove
2023-09-10T14:13:02Z
0
0
null
[ "license:apache-2.0", "region:us" ]
null
2023-09-10T14:10:57Z
--- license: apache-2.0 --- Initial training model based on the Llama2 7b foundation model.
codelion/whisper-age-estimator
codelion
2023-09-10T13:46:00Z
93
3
transformers
[ "transformers", "pytorch", "tensorboard", "safetensors", "whisper", "automatic-speech-recognition", "hf-asr-leaderboard", "generated_from_trainer", "hi", "base_model:openai/whisper-base", "base_model:finetune:openai/whisper-base", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2023-07-17T08:30:57Z
--- language: - hi license: apache-2.0 base_model: openai/whisper-base tags: - hf-asr-leaderboard - generated_from_trainer metrics: - accuracy model-index: - name: Whisper Base Hi - Age Estimation results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Base Hi - Age Estimation This model is a fine-tuned version of [openai/whisper-base](https://huggingface.co/openai/whisper-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.0118 - Accuracy: 0.6259 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-06 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 50 - training_steps: 300 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.0 | 0.47 | 100 | 0.9908 | 0.6774 | | 0.0 | 0.93 | 200 | 0.9996 | 0.6677 | | 0.0 | 1.4 | 300 | 1.0118 | 0.6259 | ### Framework versions - Transformers 4.32.0.dev0 - Pytorch 2.0.1+cu117 - Datasets 2.6.1 - Tokenizers 0.13.3
strumber/Llama2letsmod
strumber
2023-09-10T13:35:49Z
0
0
peft
[ "peft", "tensorboard", "region:us" ]
null
2023-09-10T13:34:51Z
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: fp4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float32 ### Framework versions - PEFT 0.5.0
CyberHarem/hoshino_ai_oshinoko
CyberHarem
2023-09-10T13:17:45Z
0
1
null
[ "art", "text-to-image", "dataset:CyberHarem/hoshino_ai_oshinoko", "license:mit", "region:us" ]
text-to-image
2023-09-10T12:59:09Z
--- license: mit datasets: - CyberHarem/hoshino_ai_oshinoko pipeline_tag: text-to-image tags: - art --- # Lora of hoshino_ai_oshinoko This model is trained with [HCP-Diffusion](https://github.com/7eu7d7/HCP-Diffusion). And the auto-training framework is maintained by [DeepGHS Team](https://huggingface.co/deepghs). The base model used during training is [NAI](https://huggingface.co/deepghs/animefull-latest), and the base model used for generating preview images is [Meina/MeinaMix_V11](https://huggingface.co/Meina/MeinaMix_V11). After downloading the pt and safetensors files for the specified step, you need to use them simultaneously. The pt file will be used as an embedding, while the safetensors file will be loaded for Lora. For example, if you want to use the model from step 3960, you need to download `3960/hoshino_ai_oshinoko.pt` as the embedding and `3960/hoshino_ai_oshinoko.safetensors` for loading Lora. By using both files together, you can generate images for the desired characters. **The best step we recommend is 3960**, with the score of 0.966. The trigger words are: 1. `hoshino_ai_oshinoko` 2. `long_hair, purple_eyes, purple_hair, bangs, smile, symbol-shaped_pupils, multicolored_hair, star-shaped_pupils` For the following groups, it is not recommended to use this model and we express regret: 1. Individuals who cannot tolerate any deviations from the original character design, even in the slightest detail. 2. Individuals who are facing the application scenarios with high demands for accuracy in recreating character outfits. 3. Individuals who cannot accept the potential randomness in AI-generated images based on the Stable Diffusion algorithm. 4. Individuals who are not comfortable with the fully automated process of training character models using LoRA, or those who believe that training character models must be done purely through manual operations to avoid disrespecting the characters. 5. Individuals who finds the generated image content offensive to their values. These are available steps: | Steps | Score | Download | pattern_1 | pattern_2 | pattern_3 | pattern_4 | pattern_5 | pattern_6 | pattern_7 | pattern_8 | pattern_9 | pattern_10 | pattern_11 | pattern_12 | pattern_13 | pattern_14 | bikini | bondage | free | maid | miko | nude | nude2 | suit | yukata | |:---------|:----------|:---------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-----------------------------------------------|:-------------------------------------------------|:-------------------------------------------------|:-------------------------------------------------|:-------------------------------------------------|:-------------------------------------------------|:-----------------------------------------|:--------------------------------------------------|:-------------------------------------|:-------------------------------------|:-------------------------------------|:-----------------------------------------------|:------------------------------------------------|:-------------------------------------|:-----------------------------------------| | 6600 | 0.966 | [Download](6600/hoshino_ai_oshinoko.zip) | ![pattern_1-6600](6600/previews/pattern_1.png) | ![pattern_2-6600](6600/previews/pattern_2.png) | ![pattern_3-6600](6600/previews/pattern_3.png) | ![pattern_4-6600](6600/previews/pattern_4.png) | ![pattern_5-6600](6600/previews/pattern_5.png) | ![pattern_6-6600](6600/previews/pattern_6.png) | ![pattern_7-6600](6600/previews/pattern_7.png) | ![pattern_8-6600](6600/previews/pattern_8.png) | ![pattern_9-6600](6600/previews/pattern_9.png) | ![pattern_10-6600](6600/previews/pattern_10.png) | ![pattern_11-6600](6600/previews/pattern_11.png) | ![pattern_12-6600](6600/previews/pattern_12.png) | ![pattern_13-6600](6600/previews/pattern_13.png) | ![pattern_14-6600](6600/previews/pattern_14.png) | ![bikini-6600](6600/previews/bikini.png) | [<NSFW, click to see>](6600/previews/bondage.png) | ![free-6600](6600/previews/free.png) | ![maid-6600](6600/previews/maid.png) | ![miko-6600](6600/previews/miko.png) | [<NSFW, click to see>](6600/previews/nude.png) | [<NSFW, click to see>](6600/previews/nude2.png) | ![suit-6600](6600/previews/suit.png) | ![yukata-6600](6600/previews/yukata.png) | | 6160 | 0.956 | [Download](6160/hoshino_ai_oshinoko.zip) | ![pattern_1-6160](6160/previews/pattern_1.png) | ![pattern_2-6160](6160/previews/pattern_2.png) | ![pattern_3-6160](6160/previews/pattern_3.png) | ![pattern_4-6160](6160/previews/pattern_4.png) | ![pattern_5-6160](6160/previews/pattern_5.png) | ![pattern_6-6160](6160/previews/pattern_6.png) | ![pattern_7-6160](6160/previews/pattern_7.png) | ![pattern_8-6160](6160/previews/pattern_8.png) | ![pattern_9-6160](6160/previews/pattern_9.png) | ![pattern_10-6160](6160/previews/pattern_10.png) | ![pattern_11-6160](6160/previews/pattern_11.png) | ![pattern_12-6160](6160/previews/pattern_12.png) | ![pattern_13-6160](6160/previews/pattern_13.png) | ![pattern_14-6160](6160/previews/pattern_14.png) | ![bikini-6160](6160/previews/bikini.png) | [<NSFW, click to see>](6160/previews/bondage.png) | ![free-6160](6160/previews/free.png) | ![maid-6160](6160/previews/maid.png) | ![miko-6160](6160/previews/miko.png) | [<NSFW, click to see>](6160/previews/nude.png) | [<NSFW, click to see>](6160/previews/nude2.png) | ![suit-6160](6160/previews/suit.png) | ![yukata-6160](6160/previews/yukata.png) | | 5720 | 0.952 | [Download](5720/hoshino_ai_oshinoko.zip) | ![pattern_1-5720](5720/previews/pattern_1.png) | ![pattern_2-5720](5720/previews/pattern_2.png) | ![pattern_3-5720](5720/previews/pattern_3.png) | ![pattern_4-5720](5720/previews/pattern_4.png) | ![pattern_5-5720](5720/previews/pattern_5.png) | ![pattern_6-5720](5720/previews/pattern_6.png) | ![pattern_7-5720](5720/previews/pattern_7.png) | ![pattern_8-5720](5720/previews/pattern_8.png) | ![pattern_9-5720](5720/previews/pattern_9.png) | ![pattern_10-5720](5720/previews/pattern_10.png) | ![pattern_11-5720](5720/previews/pattern_11.png) | ![pattern_12-5720](5720/previews/pattern_12.png) | ![pattern_13-5720](5720/previews/pattern_13.png) | ![pattern_14-5720](5720/previews/pattern_14.png) | ![bikini-5720](5720/previews/bikini.png) | [<NSFW, click to see>](5720/previews/bondage.png) | ![free-5720](5720/previews/free.png) | ![maid-5720](5720/previews/maid.png) | ![miko-5720](5720/previews/miko.png) | [<NSFW, click to see>](5720/previews/nude.png) | [<NSFW, click to see>](5720/previews/nude2.png) | ![suit-5720](5720/previews/suit.png) | ![yukata-5720](5720/previews/yukata.png) | | 5280 | 0.960 | [Download](5280/hoshino_ai_oshinoko.zip) | ![pattern_1-5280](5280/previews/pattern_1.png) | ![pattern_2-5280](5280/previews/pattern_2.png) | ![pattern_3-5280](5280/previews/pattern_3.png) | ![pattern_4-5280](5280/previews/pattern_4.png) | ![pattern_5-5280](5280/previews/pattern_5.png) | ![pattern_6-5280](5280/previews/pattern_6.png) | ![pattern_7-5280](5280/previews/pattern_7.png) | ![pattern_8-5280](5280/previews/pattern_8.png) | ![pattern_9-5280](5280/previews/pattern_9.png) | ![pattern_10-5280](5280/previews/pattern_10.png) | ![pattern_11-5280](5280/previews/pattern_11.png) | ![pattern_12-5280](5280/previews/pattern_12.png) | ![pattern_13-5280](5280/previews/pattern_13.png) | ![pattern_14-5280](5280/previews/pattern_14.png) | ![bikini-5280](5280/previews/bikini.png) | [<NSFW, click to see>](5280/previews/bondage.png) | ![free-5280](5280/previews/free.png) | ![maid-5280](5280/previews/maid.png) | ![miko-5280](5280/previews/miko.png) | [<NSFW, click to see>](5280/previews/nude.png) | [<NSFW, click to see>](5280/previews/nude2.png) | ![suit-5280](5280/previews/suit.png) | ![yukata-5280](5280/previews/yukata.png) | | 4840 | 0.955 | [Download](4840/hoshino_ai_oshinoko.zip) | ![pattern_1-4840](4840/previews/pattern_1.png) | ![pattern_2-4840](4840/previews/pattern_2.png) | ![pattern_3-4840](4840/previews/pattern_3.png) | ![pattern_4-4840](4840/previews/pattern_4.png) | ![pattern_5-4840](4840/previews/pattern_5.png) | ![pattern_6-4840](4840/previews/pattern_6.png) | ![pattern_7-4840](4840/previews/pattern_7.png) | ![pattern_8-4840](4840/previews/pattern_8.png) | ![pattern_9-4840](4840/previews/pattern_9.png) | ![pattern_10-4840](4840/previews/pattern_10.png) | ![pattern_11-4840](4840/previews/pattern_11.png) | ![pattern_12-4840](4840/previews/pattern_12.png) | ![pattern_13-4840](4840/previews/pattern_13.png) | ![pattern_14-4840](4840/previews/pattern_14.png) | ![bikini-4840](4840/previews/bikini.png) | [<NSFW, click to see>](4840/previews/bondage.png) | ![free-4840](4840/previews/free.png) | ![maid-4840](4840/previews/maid.png) | ![miko-4840](4840/previews/miko.png) | [<NSFW, click to see>](4840/previews/nude.png) | [<NSFW, click to see>](4840/previews/nude2.png) | ![suit-4840](4840/previews/suit.png) | ![yukata-4840](4840/previews/yukata.png) | | 4400 | 0.960 | [Download](4400/hoshino_ai_oshinoko.zip) | ![pattern_1-4400](4400/previews/pattern_1.png) | ![pattern_2-4400](4400/previews/pattern_2.png) | ![pattern_3-4400](4400/previews/pattern_3.png) | ![pattern_4-4400](4400/previews/pattern_4.png) | ![pattern_5-4400](4400/previews/pattern_5.png) | ![pattern_6-4400](4400/previews/pattern_6.png) | ![pattern_7-4400](4400/previews/pattern_7.png) | ![pattern_8-4400](4400/previews/pattern_8.png) | ![pattern_9-4400](4400/previews/pattern_9.png) | ![pattern_10-4400](4400/previews/pattern_10.png) | ![pattern_11-4400](4400/previews/pattern_11.png) | ![pattern_12-4400](4400/previews/pattern_12.png) | ![pattern_13-4400](4400/previews/pattern_13.png) | ![pattern_14-4400](4400/previews/pattern_14.png) | ![bikini-4400](4400/previews/bikini.png) | [<NSFW, click to see>](4400/previews/bondage.png) | ![free-4400](4400/previews/free.png) | ![maid-4400](4400/previews/maid.png) | ![miko-4400](4400/previews/miko.png) | [<NSFW, click to see>](4400/previews/nude.png) | [<NSFW, click to see>](4400/previews/nude2.png) | ![suit-4400](4400/previews/suit.png) | ![yukata-4400](4400/previews/yukata.png) | | **3960** | **0.966** | [**Download**](3960/hoshino_ai_oshinoko.zip) | ![pattern_1-3960](3960/previews/pattern_1.png) | ![pattern_2-3960](3960/previews/pattern_2.png) | ![pattern_3-3960](3960/previews/pattern_3.png) | ![pattern_4-3960](3960/previews/pattern_4.png) | ![pattern_5-3960](3960/previews/pattern_5.png) | ![pattern_6-3960](3960/previews/pattern_6.png) | ![pattern_7-3960](3960/previews/pattern_7.png) | ![pattern_8-3960](3960/previews/pattern_8.png) | ![pattern_9-3960](3960/previews/pattern_9.png) | ![pattern_10-3960](3960/previews/pattern_10.png) | ![pattern_11-3960](3960/previews/pattern_11.png) | ![pattern_12-3960](3960/previews/pattern_12.png) | ![pattern_13-3960](3960/previews/pattern_13.png) | ![pattern_14-3960](3960/previews/pattern_14.png) | ![bikini-3960](3960/previews/bikini.png) | [<NSFW, click to see>](3960/previews/bondage.png) | ![free-3960](3960/previews/free.png) | ![maid-3960](3960/previews/maid.png) | ![miko-3960](3960/previews/miko.png) | [<NSFW, click to see>](3960/previews/nude.png) | [<NSFW, click to see>](3960/previews/nude2.png) | ![suit-3960](3960/previews/suit.png) | ![yukata-3960](3960/previews/yukata.png) | | 3520 | 0.949 | [Download](3520/hoshino_ai_oshinoko.zip) | ![pattern_1-3520](3520/previews/pattern_1.png) | ![pattern_2-3520](3520/previews/pattern_2.png) | ![pattern_3-3520](3520/previews/pattern_3.png) | ![pattern_4-3520](3520/previews/pattern_4.png) | ![pattern_5-3520](3520/previews/pattern_5.png) | ![pattern_6-3520](3520/previews/pattern_6.png) | ![pattern_7-3520](3520/previews/pattern_7.png) | ![pattern_8-3520](3520/previews/pattern_8.png) | ![pattern_9-3520](3520/previews/pattern_9.png) | ![pattern_10-3520](3520/previews/pattern_10.png) | ![pattern_11-3520](3520/previews/pattern_11.png) | ![pattern_12-3520](3520/previews/pattern_12.png) | ![pattern_13-3520](3520/previews/pattern_13.png) | ![pattern_14-3520](3520/previews/pattern_14.png) | ![bikini-3520](3520/previews/bikini.png) | [<NSFW, click to see>](3520/previews/bondage.png) | ![free-3520](3520/previews/free.png) | ![maid-3520](3520/previews/maid.png) | ![miko-3520](3520/previews/miko.png) | [<NSFW, click to see>](3520/previews/nude.png) | [<NSFW, click to see>](3520/previews/nude2.png) | ![suit-3520](3520/previews/suit.png) | ![yukata-3520](3520/previews/yukata.png) | | 3080 | 0.956 | [Download](3080/hoshino_ai_oshinoko.zip) | ![pattern_1-3080](3080/previews/pattern_1.png) | ![pattern_2-3080](3080/previews/pattern_2.png) | ![pattern_3-3080](3080/previews/pattern_3.png) | ![pattern_4-3080](3080/previews/pattern_4.png) | ![pattern_5-3080](3080/previews/pattern_5.png) | ![pattern_6-3080](3080/previews/pattern_6.png) | ![pattern_7-3080](3080/previews/pattern_7.png) | ![pattern_8-3080](3080/previews/pattern_8.png) | ![pattern_9-3080](3080/previews/pattern_9.png) | ![pattern_10-3080](3080/previews/pattern_10.png) | ![pattern_11-3080](3080/previews/pattern_11.png) | ![pattern_12-3080](3080/previews/pattern_12.png) | ![pattern_13-3080](3080/previews/pattern_13.png) | ![pattern_14-3080](3080/previews/pattern_14.png) | ![bikini-3080](3080/previews/bikini.png) | [<NSFW, click to see>](3080/previews/bondage.png) | ![free-3080](3080/previews/free.png) | ![maid-3080](3080/previews/maid.png) | ![miko-3080](3080/previews/miko.png) | [<NSFW, click to see>](3080/previews/nude.png) | [<NSFW, click to see>](3080/previews/nude2.png) | ![suit-3080](3080/previews/suit.png) | ![yukata-3080](3080/previews/yukata.png) | | 2640 | 0.955 | [Download](2640/hoshino_ai_oshinoko.zip) | ![pattern_1-2640](2640/previews/pattern_1.png) | ![pattern_2-2640](2640/previews/pattern_2.png) | ![pattern_3-2640](2640/previews/pattern_3.png) | ![pattern_4-2640](2640/previews/pattern_4.png) | ![pattern_5-2640](2640/previews/pattern_5.png) | ![pattern_6-2640](2640/previews/pattern_6.png) | ![pattern_7-2640](2640/previews/pattern_7.png) | ![pattern_8-2640](2640/previews/pattern_8.png) | ![pattern_9-2640](2640/previews/pattern_9.png) | ![pattern_10-2640](2640/previews/pattern_10.png) | ![pattern_11-2640](2640/previews/pattern_11.png) | ![pattern_12-2640](2640/previews/pattern_12.png) | ![pattern_13-2640](2640/previews/pattern_13.png) | ![pattern_14-2640](2640/previews/pattern_14.png) | ![bikini-2640](2640/previews/bikini.png) | [<NSFW, click to see>](2640/previews/bondage.png) | ![free-2640](2640/previews/free.png) | ![maid-2640](2640/previews/maid.png) | ![miko-2640](2640/previews/miko.png) | [<NSFW, click to see>](2640/previews/nude.png) | [<NSFW, click to see>](2640/previews/nude2.png) | ![suit-2640](2640/previews/suit.png) | ![yukata-2640](2640/previews/yukata.png) | | 2200 | 0.939 | [Download](2200/hoshino_ai_oshinoko.zip) | ![pattern_1-2200](2200/previews/pattern_1.png) | ![pattern_2-2200](2200/previews/pattern_2.png) | ![pattern_3-2200](2200/previews/pattern_3.png) | ![pattern_4-2200](2200/previews/pattern_4.png) | ![pattern_5-2200](2200/previews/pattern_5.png) | ![pattern_6-2200](2200/previews/pattern_6.png) | ![pattern_7-2200](2200/previews/pattern_7.png) | ![pattern_8-2200](2200/previews/pattern_8.png) | ![pattern_9-2200](2200/previews/pattern_9.png) | ![pattern_10-2200](2200/previews/pattern_10.png) | ![pattern_11-2200](2200/previews/pattern_11.png) | ![pattern_12-2200](2200/previews/pattern_12.png) | ![pattern_13-2200](2200/previews/pattern_13.png) | ![pattern_14-2200](2200/previews/pattern_14.png) | ![bikini-2200](2200/previews/bikini.png) | [<NSFW, click to see>](2200/previews/bondage.png) | ![free-2200](2200/previews/free.png) | ![maid-2200](2200/previews/maid.png) | ![miko-2200](2200/previews/miko.png) | [<NSFW, click to see>](2200/previews/nude.png) | [<NSFW, click to see>](2200/previews/nude2.png) | ![suit-2200](2200/previews/suit.png) | ![yukata-2200](2200/previews/yukata.png) | | 1760 | 0.939 | [Download](1760/hoshino_ai_oshinoko.zip) | ![pattern_1-1760](1760/previews/pattern_1.png) | ![pattern_2-1760](1760/previews/pattern_2.png) | ![pattern_3-1760](1760/previews/pattern_3.png) | ![pattern_4-1760](1760/previews/pattern_4.png) | ![pattern_5-1760](1760/previews/pattern_5.png) | ![pattern_6-1760](1760/previews/pattern_6.png) | ![pattern_7-1760](1760/previews/pattern_7.png) | ![pattern_8-1760](1760/previews/pattern_8.png) | ![pattern_9-1760](1760/previews/pattern_9.png) | ![pattern_10-1760](1760/previews/pattern_10.png) | ![pattern_11-1760](1760/previews/pattern_11.png) | ![pattern_12-1760](1760/previews/pattern_12.png) | ![pattern_13-1760](1760/previews/pattern_13.png) | ![pattern_14-1760](1760/previews/pattern_14.png) | ![bikini-1760](1760/previews/bikini.png) | [<NSFW, click to see>](1760/previews/bondage.png) | ![free-1760](1760/previews/free.png) | ![maid-1760](1760/previews/maid.png) | ![miko-1760](1760/previews/miko.png) | [<NSFW, click to see>](1760/previews/nude.png) | [<NSFW, click to see>](1760/previews/nude2.png) | ![suit-1760](1760/previews/suit.png) | ![yukata-1760](1760/previews/yukata.png) | | 1320 | 0.931 | [Download](1320/hoshino_ai_oshinoko.zip) | ![pattern_1-1320](1320/previews/pattern_1.png) | ![pattern_2-1320](1320/previews/pattern_2.png) | ![pattern_3-1320](1320/previews/pattern_3.png) | ![pattern_4-1320](1320/previews/pattern_4.png) | ![pattern_5-1320](1320/previews/pattern_5.png) | ![pattern_6-1320](1320/previews/pattern_6.png) | ![pattern_7-1320](1320/previews/pattern_7.png) | ![pattern_8-1320](1320/previews/pattern_8.png) | ![pattern_9-1320](1320/previews/pattern_9.png) | ![pattern_10-1320](1320/previews/pattern_10.png) | ![pattern_11-1320](1320/previews/pattern_11.png) | ![pattern_12-1320](1320/previews/pattern_12.png) | ![pattern_13-1320](1320/previews/pattern_13.png) | ![pattern_14-1320](1320/previews/pattern_14.png) | ![bikini-1320](1320/previews/bikini.png) | [<NSFW, click to see>](1320/previews/bondage.png) | ![free-1320](1320/previews/free.png) | ![maid-1320](1320/previews/maid.png) | ![miko-1320](1320/previews/miko.png) | [<NSFW, click to see>](1320/previews/nude.png) | [<NSFW, click to see>](1320/previews/nude2.png) | ![suit-1320](1320/previews/suit.png) | ![yukata-1320](1320/previews/yukata.png) | | 880 | 0.933 | [Download](880/hoshino_ai_oshinoko.zip) | ![pattern_1-880](880/previews/pattern_1.png) | ![pattern_2-880](880/previews/pattern_2.png) | ![pattern_3-880](880/previews/pattern_3.png) | ![pattern_4-880](880/previews/pattern_4.png) | ![pattern_5-880](880/previews/pattern_5.png) | ![pattern_6-880](880/previews/pattern_6.png) | ![pattern_7-880](880/previews/pattern_7.png) | ![pattern_8-880](880/previews/pattern_8.png) | ![pattern_9-880](880/previews/pattern_9.png) | ![pattern_10-880](880/previews/pattern_10.png) | ![pattern_11-880](880/previews/pattern_11.png) | ![pattern_12-880](880/previews/pattern_12.png) | ![pattern_13-880](880/previews/pattern_13.png) | ![pattern_14-880](880/previews/pattern_14.png) | ![bikini-880](880/previews/bikini.png) | [<NSFW, click to see>](880/previews/bondage.png) | ![free-880](880/previews/free.png) | ![maid-880](880/previews/maid.png) | ![miko-880](880/previews/miko.png) | [<NSFW, click to see>](880/previews/nude.png) | [<NSFW, click to see>](880/previews/nude2.png) | ![suit-880](880/previews/suit.png) | ![yukata-880](880/previews/yukata.png) | | 440 | 0.900 | [Download](440/hoshino_ai_oshinoko.zip) | ![pattern_1-440](440/previews/pattern_1.png) | ![pattern_2-440](440/previews/pattern_2.png) | ![pattern_3-440](440/previews/pattern_3.png) | ![pattern_4-440](440/previews/pattern_4.png) | ![pattern_5-440](440/previews/pattern_5.png) | ![pattern_6-440](440/previews/pattern_6.png) | ![pattern_7-440](440/previews/pattern_7.png) | ![pattern_8-440](440/previews/pattern_8.png) | ![pattern_9-440](440/previews/pattern_9.png) | ![pattern_10-440](440/previews/pattern_10.png) | ![pattern_11-440](440/previews/pattern_11.png) | ![pattern_12-440](440/previews/pattern_12.png) | ![pattern_13-440](440/previews/pattern_13.png) | ![pattern_14-440](440/previews/pattern_14.png) | ![bikini-440](440/previews/bikini.png) | [<NSFW, click to see>](440/previews/bondage.png) | ![free-440](440/previews/free.png) | ![maid-440](440/previews/maid.png) | ![miko-440](440/previews/miko.png) | [<NSFW, click to see>](440/previews/nude.png) | [<NSFW, click to see>](440/previews/nude2.png) | ![suit-440](440/previews/suit.png) | ![yukata-440](440/previews/yukata.png) |
aldigobbler/models
aldigobbler
2023-09-10T13:14:56Z
0
0
fairseq
[ "fairseq", "music", "audio-to-audio", "en", "license:apache-2.0", "region:us" ]
audio-to-audio
2023-08-24T10:43:04Z
--- license: apache-2.0 language: - en library_name: fairseq pipeline_tag: audio-to-audio tags: - music --- These are my models for many projects. Most are RVCv2/v1
dsmsb/only_esg-class_bert_1009_v1
dsmsb
2023-09-10T13:08:42Z
3
0
transformers
[ "transformers", "pytorch", "bert", "text-classification", "generated_from_trainer", "base_model:google-bert/bert-base-multilingual-cased", "base_model:finetune:google-bert/bert-base-multilingual-cased", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2023-09-10T08:33:53Z
--- license: apache-2.0 base_model: bert-base-multilingual-cased tags: - generated_from_trainer metrics: - accuracy model-index: - name: only_esg-class_bert_1009_v1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # only_esg-class_bert_1009_v1 This model is a fine-tuned version of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1359 - Accuracy: 0.9649 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 73 | 0.1758 | 0.9563 | | No log | 2.0 | 146 | 0.1359 | 0.9649 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
joe-xhedi/rl_course_vizdoom_health_gathering_supreme
joe-xhedi
2023-09-10T13:06:37Z
0
0
sample-factory
[ "sample-factory", "tensorboard", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2023-09-10T12:20:50Z
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 8.45 +/- 3.21 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r joe-xhedi/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m .usr.local.lib.python3.10.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m .usr.local.lib.python3.10.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
zayuki/computer_generated_fake_review_detection
zayuki
2023-09-10T12:59:36Z
245
1
transformers
[ "transformers", "tf", "distilbert", "text-classification", "generated_from_keras_callback", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2023-09-06T04:13:13Z
--- license: apache-2.0 base_model: distilbert-base-uncased tags: - generated_from_keras_callback model-index: - name: zayuki/computer_generated_fake_review_detection results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # zayuki/computer_generated_fake_review_detection This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.0116 - Validation Loss: 0.0731 - Train Accuracy: 0.9780 - Train F1: 0.9781 - Epoch: 2 ## Model description This model was empowered by fine-tuned version of Distilbert and trained on [Amazon Review Dataset](https://osf.io/tyue9/), comprising of computer-generated fake reviews and genuine Amazon reviews. The fake reviews were generated by GPT-2, an AI text algoritm. This model was trained to detect computer-generated fake reviews which generated by AI text algorithm. ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 7580, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Train F1 | Epoch | |:----------:|:---------------:|:--------------:|:--------:|:-----:| | 0.1207 | 0.0677 | 0.9723 | 0.9726 | 0 | | 0.0343 | 0.0736 | 0.9753 | 0.9756 | 1 | | 0.0116 | 0.0731 | 0.9780 | 0.9781 | 2 | ### Framework versions - Transformers 4.33.1 - TensorFlow 2.12.0 - Datasets 2.14.5 - Tokenizers 0.13.3
joe-xhedi/unit8-LunarLander-v2
joe-xhedi
2023-09-10T12:51:34Z
0
0
null
[ "tensorboard", "LunarLander-v2", "ppo", "deep-reinforcement-learning", "reinforcement-learning", "custom-implementation", "deep-rl-course", "model-index", "region:us" ]
reinforcement-learning
2023-09-10T12:51:29Z
--- tags: - LunarLander-v2 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -188.88 +/- 122.54 name: mean_reward verified: false --- # PPO Agent Playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2. # Hyperparameters ```python {'exp_name': 'ppo' 'seed': 1 'torch_deterministic': True 'cuda': True 'track': False 'wandb_project_name': 'cleanRL' 'wandb_entity': None 'capture_video': False 'env_id': 'LunarLander-v2' 'total_timesteps': 50000 'learning_rate': 0.00025 'num_envs': 4 'num_steps': 128 'anneal_lr': True 'gae': True 'gamma': 0.99 'gae_lambda': 0.95 'num_minibatches': 4 'update_epochs': 4 'norm_adv': True 'clip_coef': 0.2 'clip_vloss': True 'ent_coef': 0.01 'vf_coef': 0.5 'max_grad_norm': 0.5 'target_kl': None 'repo_id': 'joe-xhedi/unit8-LunarLander-v2' 'batch_size': 512 'minibatch_size': 128} ```
ps259/lora-1b1
ps259
2023-09-10T12:44:35Z
0
0
peft
[ "peft", "region:us" ]
null
2023-09-10T12:44:33Z
--- library_name: peft --- ## Training procedure ### Framework versions - PEFT 0.6.0.dev0
IsaacSarps/sentiment_analysis
IsaacSarps
2023-09-10T12:27:19Z
107
0
transformers
[ "transformers", "pytorch", "roberta", "text-classification", "generated_from_trainer", "base_model:FacebookAI/roberta-base", "base_model:finetune:FacebookAI/roberta-base", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2023-09-10T10:08:09Z
--- license: mit base_model: roberta-base tags: - generated_from_trainer model-index: - name: sentiment_analysis results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # sentiment_analysis This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.8114 - F1 Score: 0.7322 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.7577 | 1.0 | 1000 | 0.7996 | 0.6603 | | 0.7168 | 2.0 | 2000 | 0.7362 | 0.6627 | | 0.7201 | 3.0 | 3000 | 0.7231 | 0.6675 | | 0.6752 | 4.0 | 4000 | 0.7051 | 0.6970 | | 0.6374 | 5.0 | 5000 | 0.7167 | 0.7007 | | 0.6288 | 6.0 | 6000 | 0.7278 | 0.7193 | | 0.5579 | 7.0 | 7000 | 0.8242 | 0.7190 | | 0.5485 | 8.0 | 8000 | 0.7587 | 0.7291 | | 0.5309 | 9.0 | 9000 | 0.7876 | 0.7269 | | 0.4767 | 10.0 | 10000 | 0.8114 | 0.7322 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
goat923/my_awesome_qa_model
goat923
2023-09-10T12:22:50Z
68
0
transformers
[ "transformers", "tf", "distilbert", "question-answering", "generated_from_keras_callback", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "endpoints_compatible", "region:us" ]
question-answering
2023-09-02T11:16:18Z
--- license: apache-2.0 base_model: distilbert-base-uncased tags: - generated_from_keras_callback model-index: - name: goat923/my_awesome_qa_model results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # goat923/my_awesome_qa_model This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 1.4850 - Validation Loss: 1.8321 - Epoch: 2 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'module': 'keras.optimizers.schedules', 'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 500, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, 'registered_name': None}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 3.3901 | 2.1715 | 0 | | 1.7137 | 1.8321 | 1 | | 1.4850 | 1.8321 | 2 | ### Framework versions - Transformers 4.34.0.dev0 - TensorFlow 2.13.0 - Datasets 2.14.5 - Tokenizers 0.13.3
Ai-user1028/wildlife-tiger
Ai-user1028
2023-09-10T12:20:53Z
0
0
null
[ "NxtWave-GenAI-Webinar", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "region:us" ]
text-to-image
2023-09-10T12:19:42Z
--- license: creativeml-openrail-m tags: - NxtWave-GenAI-Webinar - text-to-image - stable-diffusion --- ### Wildlife-Tiger Dreambooth model trained by Ai-user1028 following the "Build your own Gen AI model" session by NxtWave. Project Submission Code: TIIPS-65 Sample pictures of this concept: ![0](https://huggingface.co/Ai-user1028/wildlife-tiger/resolve/main/sample_images/prom2.png)
MekeyPan/mt5-small-finetuned-amazon-en-zh
MekeyPan
2023-09-10T11:58:35Z
16
1
transformers
[ "transformers", "pytorch", "mt5", "text2text-generation", "summarization", "generated_from_trainer", "base_model:google/mt5-small", "base_model:finetune:google/mt5-small", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
summarization
2023-09-10T10:56:40Z
--- license: apache-2.0 base_model: google/mt5-small tags: - summarization - generated_from_trainer metrics: - rouge model-index: - name: mt5-small-finetuned-amazon-en-zh results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-small-finetuned-amazon-en-zh This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.1950 - Rouge1: 15.5597 - Rouge2: 6.7429 - Rougel: 15.1794 - Rougelsum: 15.063 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:| | 8.0083 | 1.0 | 838 | 3.5147 | 13.2577 | 6.0411 | 12.9176 | 12.8293 | | 4.0156 | 2.0 | 1676 | 3.3382 | 14.2493 | 6.3606 | 13.9407 | 13.7391 | | 3.6492 | 3.0 | 2514 | 3.2576 | 15.915 | 7.4853 | 15.8512 | 15.72 | | 3.473 | 4.0 | 3352 | 3.2266 | 16.3162 | 6.6844 | 15.9962 | 15.8693 | | 3.3509 | 5.0 | 4190 | 3.2010 | 15.2992 | 6.2211 | 14.9191 | 14.8807 | | 3.2828 | 6.0 | 5028 | 3.2008 | 15.379 | 6.38 | 15.1408 | 15.0073 | | 3.2304 | 7.0 | 5866 | 3.2003 | 15.8089 | 6.7429 | 15.4859 | 15.3334 | | 3.191 | 8.0 | 6704 | 3.1950 | 15.5597 | 6.7429 | 15.1794 | 15.063 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
AlexZhukov/llm-bloom-ti-lora
AlexZhukov
2023-09-10T11:28:35Z
0
0
peft
[ "peft", "endpoints_compatible", "region:us" ]
null
2023-09-10T07:57:04Z
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - load_in_8bit: True - load_in_4bit: False - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: fp4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float32 ### Framework versions - PEFT 0.5.0
matam/ppo-Huggy
matam
2023-09-10T11:13:25Z
14
0
ml-agents
[ "ml-agents", "tensorboard", "onnx", "Huggy", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Huggy", "region:us" ]
reinforcement-learning
2023-09-10T11:13:21Z
--- library_name: ml-agents tags: - Huggy - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/ We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: - A *short tutorial* where you teach Huggy the Dog 🐶 to fetch the stick and then play with him directly in your browser: https://huggingface.co/learn/deep-rl-course/unitbonus1/introduction - A *longer tutorial* to understand how works ML-Agents: https://huggingface.co/learn/deep-rl-course/unit5/introduction ### Resume the training ```bash mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser** 1. If the environment is part of ML-Agents official environments, go to https://huggingface.co/unity 2. Step 1: Find your model_id: matam/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
makinaAI/makina_lora
makinaAI
2023-09-10T11:12:03Z
0
0
null
[ "license:creativeml-openrail-m", "region:us" ]
null
2023-09-10T10:47:09Z
--- license: creativeml-openrail-m ---
ru4rg/msr
ru4rg
2023-09-10T11:07:02Z
190
0
transformers
[ "transformers", "pytorch", "tensorboard", "vit", "image-classification", "huggingpics", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
2023-09-10T11:06:56Z
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: msr results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.6696428656578064 --- # msr Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### cast iron bathtub ![cast iron bathtub](images/cast_iron_bathtub.jpg) #### heating radiator ![heating radiator](images/heating_radiator.jpg) #### steel bathtub ![steel bathtub](images/steel_bathtub.jpg) #### steel cable ![steel cable](images/steel_cable.jpg) #### steel pipe ![steel pipe](images/steel_pipe.jpg)
buddhilive/bert-base-zero
buddhilive
2023-09-10T10:49:10Z
86
0
transformers
[ "transformers", "tf", "bert", "fill-mask", "generated_from_keras_callback", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2023-09-10T10:28:16Z
--- tags: - generated_from_keras_callback model-index: - name: bert-base-zero results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-zero This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset. It achieves the following results on the evaluation set: ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': False, 'is_legacy_optimizer': False, 'learning_rate': 1e-04, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} - training_precision: float32 ### Training results ### Framework versions - Transformers 4.33.1 - TensorFlow 2.13.0 - Tokenizers 0.13.3
NewCosmos/distilhubert-finetuned-gtzan
NewCosmos
2023-09-10T10:32:09Z
167
0
transformers
[ "transformers", "pytorch", "hubert", "audio-classification", "generated_from_trainer", "dataset:marsyas/gtzan", "base_model:ntu-spml/distilhubert", "base_model:finetune:ntu-spml/distilhubert", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
audio-classification
2023-08-17T07:05:02Z
--- license: apache-2.0 base_model: ntu-spml/distilhubert tags: - generated_from_trainer datasets: - marsyas/gtzan metrics: - accuracy model-index: - name: distilhubert-finetuned-gtzan results: - task: name: Audio Classification type: audio-classification dataset: name: GTZAN type: marsyas/gtzan config: all split: train args: all metrics: - name: Accuracy type: accuracy value: 0.1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilhubert-finetuned-gtzan This model is a fine-tuned version of [ntu-spml/distilhubert](https://huggingface.co/ntu-spml/distilhubert) on the GTZAN dataset. It achieves the following results on the evaluation set: - Loss: nan - Accuracy: 0.1 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.0 | 1.0 | 225 | nan | 0.1 | ### Framework versions - Transformers 4.34.0.dev0 - Pytorch 2.0.1+cu117 - Datasets 2.12.0 - Tokenizers 0.13.3
Venkatesh4342/xlm-roberta-helpdesk-sentiment
Venkatesh4342
2023-09-10T10:04:31Z
4
0
transformers
[ "transformers", "pytorch", "tensorboard", "xlm-roberta", "text-classification", "generated_from_trainer", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2023-09-10T07:40:46Z
--- license: mit tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: xlm-roberta-helpdesk-sentiment results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-helpdesk-sentiment This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.1923 - Accuracy: 0.9556 - F1: 0.9549 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 6 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | No log | 0.88 | 100 | 0.4935 | 0.7889 | 0.7840 | | No log | 1.77 | 200 | 0.2955 | 0.8889 | 0.8867 | | No log | 2.65 | 300 | 0.1830 | 0.9111 | 0.9093 | | No log | 3.54 | 400 | 0.1461 | 0.9444 | 0.9431 | | 0.5007 | 4.42 | 500 | 0.1554 | 0.9556 | 0.9549 | | 0.5007 | 5.31 | 600 | 0.1923 | 0.9556 | 0.9549 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
robertquest/adetailer
robertquest
2023-09-10T10:04:15Z
86
1
ultralytics
[ "ultralytics", "pytorch", "dataset:wider_face", "dataset:skytnt/anime-segmentation", "license:agpl-3.0", "region:us" ]
null
2023-09-10T10:01:31Z
--- license: agpl-3.0 library_name: ultralytics datasets: - wider_face - skytnt/anime-segmentation tags: - pytorch --- # YOLOv8 Detection Model ## Datasets ### Face - [Anime Face CreateML](https://universe.roboflow.com/my-workspace-mph8o/anime-face-createml) - [xml2txt](https://universe.roboflow.com/0oooooo0/xml2txt-njqx1) - [AN](https://universe.roboflow.com/sed-b8vkf/an-lfg5i) - [wider face](http://shuoyang1213.me/WIDERFACE/index.html) ### Hand - [AnHDet](https://universe.roboflow.com/1-yshhi/anhdet) - [hand-detection-fuao9](https://universe.roboflow.com/catwithawand/hand-detection-fuao9) ### Person - [coco2017](https://cocodataset.org/#home) (only person) - [AniSeg](https://github.com/jerryli27/AniSeg) - [skytnt/anime-segmentation](https://huggingface.co/datasets/skytnt/anime-segmentation) ### deepfashion2 - [deepfashion2](https://github.com/switchablenorms/DeepFashion2) | id | label | | --- | --------------------- | | 0 | short_sleeved_shirt | | 1 | long_sleeved_shirt | | 2 | short_sleeved_outwear | | 3 | long_sleeved_outwear | | 4 | vest | | 5 | sling | | 6 | shorts | | 7 | trousers | | 8 | skirt | | 9 | short_sleeved_dress | | 10 | long_sleeved_dress | | 11 | vest_dress | | 12 | sling_dress | ## Info | Model | Target | mAP 50 | mAP 50-95 | | --------------------------- | --------------------- | ----------------------------- | ----------------------------- | | face_yolov8n.pt | 2D / realistic face | 0.660 | 0.366 | | face_yolov8n_v2.pt | 2D / realistic face | 0.669 | 0.372 | | face_yolov8s.pt | 2D / realistic face | 0.713 | 0.404 | | face_yolov8m.pt | 2D / realistic face | 0.737 | 0.424 | | hand_yolov8n.pt | 2D / realistic hand | 0.767 | 0.505 | | hand_yolov8s.pt | 2D / realistic hand | 0.794 | 0.527 | | person_yolov8n-seg.pt | 2D / realistic person | 0.782 (bbox)<br/>0.761 (mask) | 0.555 (bbox)<br/>0.460 (mask) | | person_yolov8s-seg.pt | 2D / realistic person | 0.824 (bbox)<br/>0.809 (mask) | 0.605 (bbox)<br/>0.508 (mask) | | person_yolov8m-seg.pt | 2D / realistic person | 0.849 (bbox)<br/>0.831 (mask) | 0.636 (bbox)<br/>0.533 (mask) | | deepfashion2_yolov8s-seg.pt | realistic clothes | 0.849 (bbox)<br/>0.840 (mask) | 0.763 (bbox)<br/>0.675 (mask) | ## Usage ```python from huggingface_hub import hf_hub_download from ultralytics import YOLO path = hf_hub_download("Bingsu/adetailer", "face_yolov8n.pt") model = YOLO(path) ``` ```python import cv2 from PIL import Image img = "https://farm5.staticflickr.com/4139/4887614566_6b57ec4422_z.jpg" output = model(img) pred = output[0].plot() pred = cv2.cvtColor(pred, cv2.COLOR_BGR2RGB) pred = Image.fromarray(pred) pred ``` ![image](https://i.imgur.com/9ny1wmD.png)
SouthMemphis/t5-small_for_summarization
SouthMemphis
2023-09-10T09:50:23Z
59
0
transformers
[ "transformers", "tf", "t5", "text2text-generation", "generated_from_keras_callback", "base_model:google-t5/t5-small", "base_model:finetune:google-t5/t5-small", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text2text-generation
2023-09-10T06:56:59Z
--- license: apache-2.0 base_model: t5-small tags: - generated_from_keras_callback model-index: - name: SouthMemphis/t5-small_for_summarization results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # SouthMemphis/t5-small_for_summarization This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 3.0656 - Validation Loss: 2.6739 - Train Rouge1: 23.7763 - Train Rouge2: 5.3102 - Train Rougel: 18.5812 - Train Rougelsum: 18.5773 - Train Gen Len: 18.667 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 2e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Rouge1 | Train Rouge2 | Train Rougel | Train Rougelsum | Train Gen Len | Epoch | |:----------:|:---------------:|:------------:|:------------:|:------------:|:---------------:|:-------------:|:-----:| | 3.0656 | 2.6739 | 23.7763 | 5.3102 | 18.5812 | 18.5773 | 18.667 | 0 | ### Framework versions - Transformers 4.33.1 - TensorFlow 2.15.0-dev20230905 - Datasets 2.14.4 - Tokenizers 0.13.3
swechatelangana/whisper-small-te-146h
swechatelangana
2023-09-10T09:40:54Z
265
2
transformers
[ "transformers", "pytorch", "tensorboard", "whisper", "automatic-speech-recognition", "hf-asr-leaderboard", "generated_from_trainer", "te", "dataset:INDIC_SUPERB_MUCS_OPENSLR", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-12-19T19:20:57Z
--- language: - te license: apache-2.0 tags: - hf-asr-leaderboard - generated_from_trainer datasets: - INDIC_SUPERB_MUCS_OPENSLR metrics: - wer model-index: - name: Swecha Gonthuka - Limited Release results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Crowed sourced dataset type: INDIC SUPERB, MUCS, OPENSLR config: None split: None args: 'config: te, split: test' metrics: - name: Wer type: wer value: 28.59758159493464 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Swecha Gonthuka - Limited Release This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Crowed sourced dataset dataset. It achieves the following results on the evaluation set: - Loss: 0.0768 - Wer: 28.5976 # Collaborators Trained by Naga Budigam ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 3000 - training_steps: 10000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:-------:| | 0.0729 | 1.08 | 5000 | 0.0934 | 33.3306 | | 0.0519 | 2.16 | 10000 | 0.0768 | 28.5976 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0 - Datasets 2.7.1 - Tokenizers 0.13.2
starfin/W.D.Gaster
starfin
2023-09-10T09:39:29Z
0
0
null
[ "music", "en", "ja", "license:openrail", "region:us" ]
null
2023-08-05T10:11:01Z
--- license: openrail language: - en - ja tags: - music --- Rvc model of Gaster made from 12 minutes of dataset on 500 epochs. Two other models: 50 epoch version and one with other shorter dataset
rurulemon/lora-trained-xl-colab
rurulemon
2023-09-10T09:21:45Z
2
1
diffusers
[ "diffusers", "tensorboard", "stable-diffusion-xl", "stable-diffusion-xl-diffusers", "text-to-image", "lora", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:openrail++", "region:us" ]
text-to-image
2023-09-09T14:07:47Z
--- license: openrail++ base_model: stabilityai/stable-diffusion-xl-base-1.0 instance_prompt: a photo of celebrity tags: - stable-diffusion-xl - stable-diffusion-xl-diffusers - text-to-image - diffusers - lora inference: true --- # LoRA DreamBooth - rurulemon/lora-trained-xl-colab These are LoRA adaption weights for stabilityai/stable-diffusion-xl-base-1.0. The weights were trained on a photo of celebrity using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. LoRA for the text encoder was enabled: False. Special VAE used for training: madebyollin/sdxl-vae-fp16-fix.
osieosie/llama-samsum-4bit-13b-bnb-seed43
osieosie
2023-09-10T09:19:50Z
0
0
peft
[ "peft", "region:us" ]
null
2023-09-10T07:49:27Z
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: True - bnb_4bit_compute_dtype: float16 ### Framework versions - PEFT 0.5.0.dev0
csukuangfj/sherpa-onnx-streaming-zipformer-en-20M-2023-02-17
csukuangfj
2023-09-10T09:07:28Z
0
0
null
[ "onnx", "license:apache-2.0", "region:us" ]
null
2023-09-10T08:47:59Z
--- license: apache-2.0 --- # Introduction This model is exported from https://huggingface.co/desh2608/icefall-asr-librispeech-pruned-transducer-stateless7-streaming-small. Please see https://github.com/k2-fsa/icefall/pull/903. It can be run with [sherpa-onnx](https://github.com/k2-fsa/sherpa-onnx).
andrew45/distilbert-base-uncased-finetuned-emotion
andrew45
2023-09-10T08:47:29Z
105
0
transformers
[ "transformers", "pytorch", "distilbert", "text-classification", "generated_from_trainer", "dataset:emotion", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2023-09-10T08:08:48Z
--- license: apache-2.0 base_model: distilbert-base-uncased tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - name: Accuracy type: accuracy value: 0.921 - name: F1 type: f1 value: 0.9212643452162217 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2328 - Accuracy: 0.921 - F1: 0.9213 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8876 | 1.0 | 250 | 0.3498 | 0.9015 | 0.9004 | | 0.2726 | 2.0 | 500 | 0.2328 | 0.921 | 0.9213 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
ugshanyu/llama2-qlora-finetunined-french
ugshanyu
2023-09-10T08:46:39Z
0
0
peft
[ "peft", "region:us" ]
null
2023-09-10T08:46:34Z
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float16 ### Framework versions - PEFT 0.6.0.dev0
Mira-LeafTown/GPT-2-Chinese-AnimeThesaurus
Mira-LeafTown
2023-09-10T08:23:06Z
179
4
transformers
[ "transformers", "pytorch", "safetensors", "gpt2", "text-generation", "zh", "license:mit", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2023-09-01T16:52:45Z
--- license: mit language: - zh pipeline_tag: text-generation widget: - text: "[CLS]笨蛋" --- # GPT-2-Chinese-AnimeThesaurus GPT-2文爱模型 数据集来自https://github.com/Kyomotoi/AnimeThesaurus 训练用的项目https://github.com/yangjianxin1/GPT2-chitchat
Pablo94/roberta-base-bne-finetuned-detests
Pablo94
2023-09-10T08:11:40Z
5
0
transformers
[ "transformers", "pytorch", "tensorboard", "roberta", "text-classification", "generated_from_trainer", "base_model:BSC-LT/roberta-base-bne", "base_model:finetune:BSC-LT/roberta-base-bne", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2022-05-12T14:18:07Z
--- license: apache-2.0 base_model: BSC-TeMU/roberta-base-bne tags: - generated_from_trainer metrics: - accuracy - precision - recall model-index: - name: roberta-base-bne-finetuned-detests results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-bne-finetuned-detests This model is a fine-tuned version of [BSC-TeMU/roberta-base-bne](https://huggingface.co/BSC-TeMU/roberta-base-bne) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.1686 - Accuracy: 0.8494 - F1-score: 0.7869 - Precision: 0.7855 - Recall: 0.7883 - Auc: 0.7883 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1-score | Precision | Recall | Auc | |:-------------:|:-----:|:----:|:---------------:|:--------:|:--------:|:---------:|:------:|:------:| | 0.0238 | 1.0 | 174 | 0.6262 | 0.8543 | 0.7656 | 0.8161 | 0.7382 | 0.7382 | | 0.0269 | 2.0 | 348 | 1.1233 | 0.8298 | 0.6964 | 0.7997 | 0.6665 | 0.6665 | | 0.0003 | 3.0 | 522 | 0.9814 | 0.8429 | 0.7600 | 0.7839 | 0.7435 | 0.7435 | | 0.0001 | 4.0 | 696 | 1.1054 | 0.8445 | 0.7794 | 0.7787 | 0.7801 | 0.7801 | | 0.0001 | 5.0 | 870 | 1.1088 | 0.8511 | 0.7948 | 0.7865 | 0.8046 | 0.8046 | | 0.0001 | 6.0 | 1044 | 1.1265 | 0.8511 | 0.7908 | 0.7873 | 0.7945 | 0.7945 | | 0.0001 | 7.0 | 1218 | 1.1441 | 0.8494 | 0.7879 | 0.7852 | 0.7909 | 0.7909 | | 0.0 | 8.0 | 1392 | 1.1574 | 0.8494 | 0.7869 | 0.7855 | 0.7883 | 0.7883 | | 0.0 | 9.0 | 1566 | 1.1657 | 0.8494 | 0.7869 | 0.7855 | 0.7883 | 0.7883 | | 0.0 | 10.0 | 1740 | 1.1686 | 0.8494 | 0.7869 | 0.7855 | 0.7883 | 0.7883 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
Chang-Su/llama-2-13b-chat-ko
Chang-Su
2023-09-10T08:02:21Z
64
5
transformers
[ "transformers", "pytorch", "llama", "text-generation", "license:cc-by-nc-sa-4.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2023-08-09T14:42:53Z
--- license: cc-by-nc-sa-4.0 ---
Adbhut/whisper-small-dv
Adbhut
2023-09-10T07:38:50Z
75
0
transformers
[ "transformers", "pytorch", "whisper", "automatic-speech-recognition", "dv", "dataset:mozilla-foundation/common_voice_13_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2023-09-09T14:24:47Z
--- license: apache-2.0 datasets: - mozilla-foundation/common_voice_13_0 language: - dv metrics: - wer pipeline_tag: automatic-speech-recognition --- This is whisper-small finetuned for 500 steps on Common Voice 13, lang=Divehi. Achieves normalized wer=12.66 on test set.
csukuangfj/sherpa-onnx-streaming-zipformer-zh-14M-2023-02-23
csukuangfj
2023-09-10T07:33:07Z
0
1
null
[ "onnx", "license:apache-2.0", "region:us" ]
null
2023-09-10T07:29:01Z
--- license: apache-2.0 --- # Introduction Models in this repo are converted from https://huggingface.co/csukuangfj/sherpa-onnx-streaming-zipformer-zh-14M-2023-02-23 using [./export-onnx-zh-14M.sh](./export-onnx-zh-14M.sh).