{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "ew25zGr2oXpx", "outputId": "22349eb1-d8d4-47f2-ec49-3a558512ec66" }, "outputs": [], "source": [ "!pip install tqdm\n", "!pip install transformers==4.40.1\n", "!pip install sentencepiece\n", "!pip install datasets\n", "!pip install \"unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git\"\n", "!pip install trl\n", "!pip install triton\n", "!pip install bitsandbytes\n", "!pip install --no-deps trl peft accelerate bitsandbytes\n", "!pip install xformers\n", "!pip install pytorch-cuda==12.1 torch xformers\n", "#!pip install --no-deps xformers trl peft accelerate bitsandbytes\n", "#!pip install \"unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git\"\n", "!pip install hyperopt\n", "!pip install optuna" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "dH4JvbO9oiHE", "outputId": "399bc210-c095-4807-900f-6b4cf2fe133f" }, "outputs": [], "source": [ "!python -m xformers.info\n", "!python -m bitsandbytes\n", "!nvidia-smi\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "3sl1jjxFMeVx", "outputId": "f0221fec-8c3f-4fbe-eb8a-e58df86399ce" }, "outputs": [], "source": [ "import json\n", "import torch\n", "from datasets import load_dataset\n", "from huggingface_hub import notebook_login\n", "from transformers import TrainingArguments\n", "from trl import SFTTrainer\n", "from unsloth import FastLanguageModel\n", "print(torch.__version__)\n", "print(torch.version.cuda)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "CIwMK9N7j8Dx" }, "outputs": [], "source": [ "# Defining the configuration for the base model, LoRA and training\n", "config = {\n", " \"hugging_face_username\":\"ruslanmv\",\n", " \"model_config\": {\n", " \"base_model\":\"meta-llama/Meta-Llama-3-8B-Instruct\", # The base model\n", " \"finetuned_model\":\"ruslanmv/Medical-Mind-Llama-3-8b\", # The finetuned model\n", " \"max_seq_length\": 2048, # The maximum sequence length\n", " # \"dtype\":torch.float16, # The data type\n", " # \"dtype\": torch.float32, # Use float32 instead of half CUDA capability < 8\n", " \"dtype\" : None, # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+\n", "\n", " \"load_in_4bit\": True, # Load the model in 4-bit\n", " },\n", " \"lora_config\": {\n", " \"r\": 16, # The number of LoRA layers 8, 16, 32, 64\n", " \"target_modules\": [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\",\n", " \"gate_proj\", \"up_proj\", \"down_proj\"], # The target modules\n", " \"lora_alpha\":16, # The alpha value for LoRA\n", " #\"lora_alpha\":15, # The alpha value for LoRA by search grid\n", " \"lora_dropout\":0, # The dropout value for LoRA\n", " \"bias\":\"none\", # The bias for LoRA\n", " \"use_gradient_checkpointing\":True, # Use gradient checkpointing\n", " \"use_rslora\":False, # Use RSLora\n", " \"use_dora\":False, # Use DoRa\n", " \"loftq_config\":None # The LoFTQ configuration\n", " },\n", "\n", " \"training_config\": {\n", " \"per_device_train_batch_size\": 2, # The batch size\n", " #\"per_device_train_batch_size\": 6, # The batch size by search grid\n", " \"gradient_accumulation_steps\": 4, # The gradient accumulation steps\n", " #\"gradient_accumulation_steps\": 7, # The gradient accumulation steps by search grid\n", " \"warmup_steps\": 5, # The warmup steps\n", " \"max_steps\":0, # The maximum steps (0 if the epochs are defined)\n", " \"num_train_epochs\": 1, # The number of training epochs(0 if the maximum steps are defined)\n", " \"learning_rate\": 2e-4, # The learning rate\n", " #\"learning_rate\": 9.5e-05, # The learning rate by search grid\n", " \"fp16\": not torch.cuda.is_bf16_supported(), # The fp16\n", " \"bf16\": torch.cuda.is_bf16_supported(), # The bf16\n", " \"logging_steps\": 1, # The logging steps\n", " \"optim\" :\"adamw_8bit\", # The optimizer\n", " \"weight_decay\" : 0.01, # The weight decay\n", " \"lr_scheduler_type\": \"linear\", # The learning rate scheduler\n", " \"seed\" : 42, # The seed\n", " \"output_dir\" : \"outputs\", # The output directory\n", " }\n", "}" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "pyN9TpNj8lUQ" }, "outputs": [], "source": [ "config_dataset={ \"training_dataset\": {\n", " \"name\": \"ruslanmv/ai-medical-dataset\", # The dataset name(huggingface/datasets)\n", " \"split\": \"train\", # The dataset split\n", " \"input_fields\": [\"question\", \"context\"] ,# The input fields\n", " \"input_field\": \"text\",# The input field\n", " },\n", " }" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "ztqrczgo9Zrg", "outputId": "09eaabca-aba6-485a-8bab-6ddd96b077b9" }, "outputs": [], "source": [ "# Loading the model and the tokinizer for the model\n", "model, tokenizer = FastLanguageModel.from_pretrained(\n", " model_name = config.get(\"model_config\").get(\"base_model\"),\n", " max_seq_length = config.get(\"model_config\").get(\"max_seq_length\"),\n", " dtype = config.get(\"model_config\").get(\"dtype\"),\n", " load_in_4bit = config.get(\"model_config\").get(\"load_in_4bit\"),\n", "\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "hSQMljYD9hCh", "outputId": "7081b216-b5a7-4b36-fe01-91c166d9e491" }, "outputs": [], "source": [ "# Setup for QLoRA/LoRA peft of the base model\n", "model = FastLanguageModel.get_peft_model(\n", " model,\n", " r = config.get(\"lora_config\").get(\"r\"),\n", " target_modules = config.get(\"lora_config\").get(\"target_modules\"),\n", " lora_alpha = config.get(\"lora_config\").get(\"lora_alpha\"),\n", " lora_dropout = config.get(\"lora_config\").get(\"lora_dropout\"),\n", " bias = config.get(\"lora_config\").get(\"bias\"),\n", " use_gradient_checkpointing = config.get(\"lora_config\").get(\"use_gradient_checkpointing\"),\n", " random_state = 42,\n", " use_rslora = config.get(\"lora_config\").get(\"use_rslora\"),\n", " use_dora = config.get(\"lora_config\").get(\"use_dora\"),\n", " loftq_config = config.get(\"lora_config\").get(\"loftq_config\"),\n", ")\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 131, "referenced_widgets": [ "5d1fbd3c62d94df7befdefc451221414", "8ad6abb48f38469f9d399eea8f5e5b70", "6cea0da24cf54811a43168c606759bab", "eb8c88f5c06c49fe9099371b3cf112ae", "89a1354722e640758978befc06ed4a78", "39d3b72ab6214bcf9b0bb6b6294e957c", "696e82ec6a174974a90d5abc7c101ee7", "dade882aca304a31b693a2c58807d825", "02fc530028ea4d538b7f6b48463ae700", "00eea4b0c6e44c62900ea8e7d919efe9", "fe17bedb5ef04d8b9e064fa1e0d75185", "bb1156b7d349440d9cc8a2f0328465a7", "23a71f8847e647daba35e495706fc846", "3f7afd4bd28842cbb73e62c155667030", "a419499622cd4374937423a79677298f", "64539b4212fe4d989976f56369bb746b", "22ea45365d21439fb5069974bbe69711", "bd087d0aa3214c5dbecc9b0bd4d976df", "9a5fd3a68fd1445f92bea51a7fec3e6b", "37803098ceed4528bb690ebee028c840", "b93514308ae44afbb1a0511f5f9c6ddf", "58b932a03b2c4aa4891d541f186244b9", "3564e3cf0fe84281838d84525794e735", "912164947c5847908424f3e60c5adb64", "7517ce80636040e29665a9353afab183", "e14b9d980a1a41fb9e81385cb0f73d3a", "ada78aafba3f47ab8eb45cf3c83a6805", "ff108c92fb5547869ee545cf9a094b07", "2c5564fb033346afbe7692a24a52b302", "bb078c8c1f6a48359dc654d91ece684d", "9b9322336b564a409086955ebda07fc3", "9bceb9eddb2147c1abbf3391c70e6784", "8a195771bdc0462e8f9fbb60eb9141b1" ] }, "id": "ty1UIoRd9Hlv", "outputId": "59bba8f0-2329-465f-dbe7-b5ee5adf3ee2" }, "outputs": [], "source": [ "from transformers import AutoModelForCausalLM, BitsAndBytesConfig, AutoTokenizer\n", "tokenizer = AutoTokenizer.from_pretrained(config.get(\"model_config\").get(\"base_model\"))\n", "\n", "\n", "tokenizer.add_eos_token = True\n", "tokenizer.pad_token_id = 0\n", "tokenizer.padding_side = \"left\"\n", "\n", "# Loading the training dataset\n", "train_dataset = load_dataset(config_dataset.get(\"training_dataset\").get(\"name\"), split = config_dataset.get(\"training_dataset\").get(\"split\"))\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "Vk-n3n_x9wh1" }, "outputs": [], "source": [ "# Select the first 100 rows of the dataset\n", "test_dataset = train_dataset.select(range(100))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 174, "referenced_widgets": [ "e3bd7f85ce194cd4b697c2eb82038658", "734b6d3e3406403293c4bc955a643528", "0005f2d9fe1e4cc98ea58b0c2868b433", "be6162f66e594d3ebd8c53ebab3bbfa6", "7e11cccce8be49008f8db3a0c3ea603d", "dc3b2edc3f5d480a93b57b15b4444608", "7967d420aff1414e9fe53eb04c928eb4", "45c1d5b0df0e420a87f791dd4cf0e425", "9ed49f1a099846a3a65cd6608bafb0e4", "963c0aa5620b4ea8b5a903894646121c", "31a203cdd2f54cda8a05214844888156" ] }, "id": "x8U2HpEh-OFi", "outputId": "837b69f8-88f2-48a9-8e11-6178b4a5c269" }, "outputs": [], "source": [ "medical_prompt = \"\"\"You are an AI Medical Assistant Chatbot, trained to answer medical questions. Below is an instruction that describes a task, paired with an response context. Write a response that appropriately completes the request.\n", "\n", "### Instruction:\n", "{}\n", "\n", "\n", "### Response:\n", "{}\"\"\"\n", "\n", "EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN\n", "def formatting_prompts_func(examples):\n", " instructions = examples[\"question\"]\n", " outputs = examples[\"context\"]\n", " texts = []\n", " for instruction, output in zip(instructions, outputs):\n", " # Must add EOS_TOKEN, otherwise your generation will go on forever!\n", " text = medical_prompt.format(instruction, output) + EOS_TOKEN\n", " texts.append(text)\n", " return { \"text\" : texts, }\n", "pass\n", "\n", "test_dataset= test_dataset.map(formatting_prompts_func, batched = True,)\n", "\n", "\n", "\n", "test_dataset['text'][1]" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "DKAZ3zhx-TZA", "outputId": "5fc0788a-3d11-4bcf-e502-717e7b3b5b2c" }, "outputs": [], "source": [ "test_dataset" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 140, "referenced_widgets": [ "72eca1e2871b458abd3383d9711215a2", "058b2b9959b84b6f9f5d3862ef53d029", "85d4879bd7d64766905db34cef052fed", "44f189b81bbd48ca8cb146ead641d2b5", "f89c5c949e984361bce7f97d86d2a2e5", "7807f312425b4f4d9249aa1ac77d7461", "d8e7ea9552a84b8284b31d77090b54af", "0058ed544fed4272848a891a68b9adc0", "33fb10908c23457aa4796626102fc8c5", "e903140c8c794c48b231924d3975b7a6", "7e74d789c82747e0b5066a00b9e36c1d" ] }, "id": "JkMVp2ZplGPA", "outputId": "3c0777d0-e2a1-4a27-f035-615da4495e45" }, "outputs": [], "source": [ "# Setting up the trainer for the model\n", "trainer_test = SFTTrainer(\n", " model = model,\n", " tokenizer = tokenizer,\n", " train_dataset = test_dataset,\n", " dataset_text_field = config_dataset.get(\"training_dataset\").get(\"input_field\"),\n", " max_seq_length = config.get(\"model_config\").get(\"max_seq_length\"),\n", " dataset_num_proc = 2,\n", " packing = False,\n", " args = TrainingArguments(\n", " per_device_train_batch_size = config.get(\"training_config\").get(\"per_device_train_batch_size\"),\n", " gradient_accumulation_steps = config.get(\"training_config\").get(\"gradient_accumulation_steps\"),\n", " warmup_steps = config.get(\"training_config\").get(\"warmup_steps\"),\n", " max_steps = config.get(\"training_config\").get(\"max_steps\"),\n", " num_train_epochs= config.get(\"training_config\").get(\"num_train_epochs\"),\n", " learning_rate = config.get(\"training_config\").get(\"learning_rate\"),\n", " fp16 = config.get(\"training_config\").get(\"fp16\"),\n", " bf16 = config.get(\"training_config\").get(\"bf16\"),\n", " logging_steps = config.get(\"training_config\").get(\"logging_steps\"),\n", " optim = config.get(\"training_config\").get(\"optim\"),\n", " weight_decay = config.get(\"training_config\").get(\"weight_decay\"),\n", " lr_scheduler_type = config.get(\"training_config\").get(\"lr_scheduler_type\"),\n", " seed = 42,\n", " output_dir = config.get(\"training_config\").get(\"output_dir\"),\n", " ),\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "ZeRzS2N0kADu" }, "outputs": [], "source": [] }, { "cell_type": "markdown", "metadata": { "id": "t00fCPO9zf8x" }, "source": [ "## Method 1 optuna" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 725 }, "id": "H_MOQOYBj5jx", "outputId": "699f77e3-8754-4087-dd78-565bca527d08" }, "outputs": [], "source": [ "from optuna import create_study, Trial\n", "\n", "# Define search space\n", "search_space = {\n", " \"learning_rate\": [1e-5, 5e-5, 1e-4, 2e-4],\n", " \"per_device_train_batch_size\": [2, 4, 8],\n", " \"lora_alpha\": [8, 16, 32],\n", "}\n", "\n", "def objective(trial):\n", " # Set hyperparameters based on trial values\n", " config[\"training_config\"][\"learning_rate\"] = trial.suggest_float(\"learning_rate\", search_space[\"learning_rate\"][0], search_space[\"learning_rate\"][-1])\n", " config[\"training_config\"][\"per_device_train_batch_size\"] = trial.suggest_int(\"per_device_train_batch_size\", search_space[\"per_device_train_batch_size\"][0], search_space[\"per_device_train_batch_size\"][-1])\n", " config[\"lora_config\"][\"lora_alpha\"] = trial.suggest_int(\"lora_alpha\", search_space[\"lora_alpha\"][0], search_space[\"lora_alpha\"][-1])\n", "\n", " # Train the model with the current hyperparameters\n", " try:\n", " trainer_stats = trainer_test.train() # Assuming this trains the model\n", " return trainer_stats[\"train_loss\"] # Assuming this is the metric to minimize\n", " except Exception as e:\n", " return float(\"inf\") # Assign a high value if training fails\n", "\n", "study = create_study(direction=\"minimize\")\n", "study.optimize(objective, n_trials=2) # Adjust the number of trials\n", "\n", "# Access the best trial and its hyperparameters after optimization\n", "best_trial = study.best_trial\n", "best_params = best_trial.params\n", "\n", "print(\"Best Trial:\", best_trial.number)\n", "print(\"Best Hyperparameters:\", best_params)\n", "print(\"Best Training Loss:\", best_trial.value)\n" ] }, { "cell_type": "markdown", "metadata": { "id": "-84LcTvQ_xtH" }, "source": [ "## Analyzing Hyperparameters:\n", "\n", "* **Batch Size**: Generally, increasing the batch size can improve\n", "\n", "\n", "training speed by utilizing hardware resources more efficiently. However, there's a limit beyond which performance degrades. You can tune the batch size within a reasonable range (e.g., 2, 4, 8, 16) to see its impact.\n", "* **Learning Rate**: A higher learning rate can accelerate training initially. But, a too high value can lead to unstable training and potentially slower convergence. Consider a range of learning rates (e.g., log-uniform distribution between 1e-5 and 1e-3) for exploration.\n", "* **Gradient Accumulation Steps**: This technique accumulates gradients over multiple batches before updating model weights. It can help reduce memory requirements but might slow down training per epoch. Experiment with different accumulation steps (e.g., 1, 2, 4) to find a balance.\n", "* **Optimizer Choice**: Some optimizers like Adam or SGD with momentum can be faster than others depending on the model and dataset. Explore different optimizers and their hyperparameters (e.g., momentum coefficient) to see if they lead to faster convergence.\n", "## Additional Considerations:\n", "\n", "Early Stopping: Implement early stopping to automatically terminate training if the validation loss doesn't improve for a certain number of epochs. This can save training time if the model starts overfitting.\n", "Warmup Steps: A gradual increase in the learning rate during the initial training phase (warmup steps) can improve stability and potentially accelerate convergence compared to a fixed learning rate from the beginning.\n", "\n", "\n", "* Experimentation and Profiling:\n", "\n", "The best hyperparameters for faster training depend on your specific model, dataset, and hardware. You'll need to experiment with different configurations using tools like Hyperopt to find the optimal settings.\n", "Consider using profiling tools to identify bottlenecks in your training pipeline. This can help you focus on optimizing specific parts of the training process that are most time-consuming.\n", "By analyzing these hyperparameters and implementing techniques like early stopping and warmup steps, you can potentially achieve faster fine-tuning while maintaining good model performance." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "SdhZf88L_xdk" }, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "jRbfR2n1wZrt" }, "outputs": [], "source": [ "## Method 1b Speed" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 636 }, "id": "uf-zwbRPteGH", "outputId": "33a23501-2a5b-4c7d-faf8-e97e4c653811" }, "outputs": [], "source": [ "from optuna import create_study, Trial\n", "import time # Assuming you can use time.time() to measure training time\n", "\n", "# Define search space with additional hyperparameter\n", "search_space = {\n", " \"learning_rate\": [1e-5, 5e-5, 1e-4, 2e-4],\n", " \"per_device_train_batch_size\": [2, 4, 8],\n", " \"lora_alpha\": [8, 16, 32],\n", " \"gradient_accumulation_steps\": [1, 2, 4, 8], # Added gradient accumulation steps\n", "}\n", "\n", "def objective(trial):\n", " # Set hyperparameters based on trial values\n", " config[\"training_config\"][\"learning_rate\"] = trial.suggest_float(\"learning_rate\", search_space[\"learning_rate\"][0], search_space[\"learning_rate\"][-1])\n", " config[\"training_config\"][\"per_device_train_batch_size\"] = trial.suggest_int(\"per_device_train_batch_size\", search_space[\"per_device_train_batch_size\"][0], search_space[\"per_device_train_batch_size\"][-1])\n", " config[\"training_config\"][\"gradient_accumulation_steps\"] = trial.suggest_int(\"gradient_accumulation_steps\", search_space[\"gradient_accumulation_steps\"][0], search_space[\"gradient_accumulation_steps\"][-1])\n", " config[\"lora_config\"][\"lora_alpha\"] = trial.suggest_int(\"lora_alpha\", search_space[\"lora_alpha\"][0], search_space[\"lora_alpha\"][-1])\n", "\n", " # Train the model with the current hyperparameters\n", " start_time = time.time()\n", " try:\n", " trainer_stats = trainer_test.train()\n", " training_time = time.time() - start_time\n", " return training_time # Minimize training time\n", " except Exception as e:\n", " return float(\"inf\") # Assign a high value if training fails\n", "\n", "study = create_study(direction=\"minimize\")\n", "study.optimize(objective, n_trials=2) # Adjust the number of trials\n", "\n", "# Access the best trial and its hyperparameters after optimization\n", "best_trial = study.best_trial\n", "best_params = best_trial.params\n", "\n", "print(\"Best Trial:\", best_trial.number)\n", "print(\"Best Hyperparameters (Likely Fastest):\", best_params)\n", "print(\"Best Training Time:\", best_trial.value, \"seconds\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "1Vz6NAbxxxlM", "outputId": "ec166d41-fa4d-40a3-df3e-f44890010f07" }, "outputs": [], "source": [ "import hyperopt\n", "from hyperopt import hp\n", "from hyperopt import Trials\n", "from hyperopt import fmin, tpe, Trials\n", "# Define the search space for hyperparameters\n", "space = {\n", " 'learning_rate': hp.loguniform('learning_rate', -5, -1), # Learning rate in log scale\n", " 'lora_alpha': hp.quniform('lora_alpha', 1, 32, 1), # LoRA alpha with quantized steps\n", " 'lora_dropout': hp.uniform('lora_dropout', 0, 0.5), # LoRA dropout rate\n", " # Uncomment these if you want to tune them\n", " # 'per_device_train_batch_size': hp.quniform('per_device_train_batch_size', 2, 16, 1),\n", " # 'gradient_accumulation_steps': hp.quniform('gradient_accumulation_steps', 1, 8, 1),\n", " # 'warmup_steps': hp.quniform('warmup_steps', 0, 1000, 1),\n", " # 'num_train_epochs': hp.quniform('num_train_epochs', 1, 5, 1),\n", "}\n", "def objective(params):\n", " # Set hyperparameters in the config dictionary (assuming it's defined elsewhere)\n", " config['training_config']['learning_rate'] = params['learning_rate']\n", " config['lora_config']['lora_alpha'] = params['lora_alpha']\n", " config['lora_config']['lora_dropout'] = params['lora_dropout']\n", " # ... Set other hyperparameters from params dictionary ...\n", " #config['training_config']['per_device_train_batch_size'] = params['per_device_train_batch_size']\n", " #config['training_config']['gradient_accumulation_steps'] = params['gradient_accumulation_steps']\n", " #config['training_config']['warmup_steps'] = params['warmup_steps']\n", " #config['training_config']['num_train_epochs'] = params['num_train_epochs']\n", "\n", " # Load the model and tokenizer (assuming these are defined elsewhere)\n", " try:\n", " model, tokenizer = FastLanguageModel.from_pretrained(\n", " model_name=config.get(\"model_config\").get(\"base_model\"),\n", " max_seq_length=config.get(\"model_config\").get(\"max_seq_length\"),\n", " dtype=config.get(\"model_config\").get(\"dtype\"),\n", " load_in_4bit=config.get(\"model_config\").get(\"load_in_4bit\"),\n", " )\n", " except Exception as e:\n", " print(f\"Error loading model and tokenizer: {e}\")\n", " return float(\"inf\") # Return high value for errors\n", "\n", " # Setup LoRA for the model (assuming FastLanguageModel supports LoRA)\n", " try:\n", " model = FastLanguageModel.get_peft_model(\n", " model,\n", " r=config.get(\"lora_config\").get(\"r\"),\n", " target_modules=config.get(\"lora_config\").get(\"target_modules\"),\n", " lora_alpha=params['lora_alpha'],\n", " lora_dropout=params['lora_dropout'],\n", " bias=config.get(\"lora_config\").get(\"bias\"),\n", " use_gradient_checkpointing=config.get(\"lora_config\").get(\"use_gradient_checkpointing\"),\n", " random_state=42,\n", " use_rslora=config.get(\"lora_config\").get(\"use_rslora\"),\n", " use_dora=config.get(\"lora_config\").get(\"use_dora\"),\n", " loftq_config=config.get(\"lora_config\").get(\"loftq_config\")\n", " )\n", " except Exception as e:\n", " print(f\"Error setting up LoRA: {e}\")\n", " return float(\"inf\") # Return high value for errors\n", " # Train the model on the test dataset (assuming SFTTrainer and training arguments are defined)\n", " try:\n", " trainer = SFTTrainer(\n", " model=model,\n", " tokenizer=tokenizer,\n", " train_dataset=test_dataset,\n", " dataset_text_field=config_dataset.get(\"training_dataset\").get(\"input_field\"),\n", " max_seq_length=config.get(\"model_config\").get(\"max_seq_length\"),\n", " dataset_num_proc=2,\n", " packing=False,\n", " args=TrainingArguments(\n", " per_device_train_batch_size=int(params['per_device_train_batch_size']),\n", " gradient_accumulation_steps=params['gradient_accumulation_steps'],\n", " warmup_steps=params['warmup_steps'],\n", " max_steps=config.get(\"training_config\").get(\"max_steps\"),\n", " num_train_epochs=params['num_train_epochs'],\n", " learning_rate=params['learning_rate'],\n", " fp16=config.get(\"training_config\").get(\"fp16\"),\n", " bf16=config.get(\"training_config\").get(\"bf16\"),\n", " logging_steps=config.get(\"training_config\").get(\"logging_steps\"),\n", " optim=config.get(\"training_config\").get(\"optim\"),\n", " weight_decay=config.get(\"training_config\").get(\"weight_decay\"),\n", " lr_scheduler_type=config.get(\"training_config\").get(\"lr_scheduler_type\"),\n", " seed=42,\n", " output_dir=config.get(\"training_config\").get(\"output_dir\")\n", " )\n", " )\n", " trainer_stats = trainer.train()\n", " return trainer_stats.loss # Assuming loss is the metric to minimize\n", " except Exception as e:\n", " print(f\"Error during training: {e}\")\n", " return float(\"inf\") # Return high value for failed trials\n", "\n", "# Create a Trials object to track hyperparameter evaluations\n", "trials = Trials()\n", "\n", "# Run hyperparameter optimization using TPE algorithm\n", "best = fmin(objective, space, algo=tpe.suggest, trials=trials, max_evals=2)\n", "\n", "# Print the best hyperparameters found during optimization\n", "print(\"Best Hyperparameters:\", best)\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "RnjkQJ852_c2", "outputId": "6dd97901-3c17-4d46-d04f-39a7a335a760" }, "outputs": [], "source": [ "import hyperopt\n", "from hyperopt import hp\n", "from hyperopt import Trials\n", "from hyperopt import fmin, tpe, Trials\n", "\n", "# Define the search space for hyperparameters with uncommented additions\n", "space = {\n", " 'learning_rate': hp.loguniform('learning_rate', -5, -1), # Learning rate in log scale\n", " 'lora_alpha': hp.quniform('lora_alpha', 1, 32, 1), # LoRA alpha with quantized steps\n", " 'lora_dropout': hp.uniform('lora_dropout', 0, 0.5), # LoRA dropout rate\n", " 'per_device_train_batch_size': hp.quniform('per_device_train_batch_size', 2, 16, 1), # Added for exploration\n", " 'gradient_accumulation_steps': hp.quniform('gradient_accumulation_steps', 1, 8, 1), # Added for exploration\n", " # Uncomment these if you want to tune other hyperparameters\n", " # 'warmup_steps': hp.quniform('warmup_steps', 0, 1000, 1),\n", " # 'num_train_epochs': hp.quniform('num_train_epochs', 1, 5, 1),\n", "}\n", "\n", "\n", "def objective(params):\n", " # Set hyperparameters in the config dictionary (assuming it's defined elsewhere)\n", " config['training_config']['learning_rate'] = params['learning_rate']\n", " config['lora_config']['lora_alpha'] = params['lora_alpha']\n", " config['lora_config']['lora_dropout'] = params['lora_dropout']\n", " config['training_config']['per_device_train_batch_size'] = params['per_device_train_batch_size']\n", " config['training_config']['gradient_accumulation_steps'] = params['gradient_accumulation_steps']\n", " # ... Set other hyperparameters from params dictionary ...\n", "\n", " # Load the model and tokenizer (assuming these are defined elsewhere)\n", " try:\n", " model, tokenizer = FastLanguageModel.from_pretrained(\n", " model_name=config.get(\"model_config\").get(\"base_model\"),\n", " max_seq_length=config.get(\"model_config\").get(\"max_seq_length\"),\n", " dtype=config.get(\"model_config\").get(\"dtype\"),\n", " load_in_4bit=config.get(\"model_config\").get(\"load_in_4bit\"),\n", " )\n", " except Exception as e:\n", " print(f\"Error loading model and tokenizer: {e}\")\n", " return float(\"inf\") # Return high value for errors\n", "\n", " # Setup LoRA for the model (assuming FastLanguageModel supports LoRA)\n", " try:\n", " model = FastLanguageModel.get_peft_model(\n", " model,\n", " r=config.get(\"lora_config\").get(\"r\"),\n", " target_modules=config.get(\"lora_config\").get(\"target_modules\"),\n", " lora_alpha=params['lora_alpha'],\n", " lora_dropout=params['lora_dropout'],\n", " bias=config.get(\"lora_config\").get(\"bias\"),\n", " use_gradient_checkpointing=config.get(\"lora_config\").get(\"use_gradient_checkpointing\"),\n", " random_state=42,\n", " use_rslora=config.get(\"lora_config\").get(\"use_rslora\"),\n", " use_dora=config.get(\"lora_config\").get(\"use_dora\"),\n", " loftq_config=config.get(\"lora_config\").get(\"loftq_config\")\n", " )\n", " except Exception as e:\n", " print(f\"Error setting up LoRA: {e}\")\n", " return float(\"inf\") # Return high value for errors\n", "\n", " # Train the model on the test dataset (assuming SFTTrainer and training arguments are defined)\n", " try:\n", " trainer = SFTTrainer(\n", " model=model,\n", " tokenizer=tokenizer,\n", " train_dataset=test_dataset,\n", " dataset_text_field=config_dataset.get(\"training_dataset\").get(\"input_field\"),\n", " max_seq_length=config.get(\"model_config\").get(\"max_seq_length\"),\n", " dataset_num_proc=2,\n", " packing=False,\n", " args=TrainingArguments(\n", " per_device_train_batch_size=int(params['per_device_train_batch_size']),\n", " gradient_accumulation_steps=params['gradient_accumulation_steps'],\n", " warmup_steps=params['warmup_steps'],\n", " max_steps=config.get(\"training_config\").get(\"max_steps\"),\n", " num_train_epochs=params['num_train_epochs'],\n", " learning_rate=params['learning_rate'],\n", " fp16=config.get(\"training_config\").get(\"fp16\"),\n", " bf16=config.get(\"training_config\").get(\"bf16\"),\n", " logging_steps=config.get(\"training_config\").get(\"logging_steps\"),\n", " optim=config.get(\"training_config\").get(\"optim\"),\n", " weight_decay=config.get(\"training_config\").get(\"weight_decay\"),\n", " lr_scheduler_type=config.get(\"training_config\").get(\"lr_scheduler_type\"),\n", " seed=42,\n", " output_dir=config.get(\"training_config\").get(\"output_dir\")\n", " )\n", " )\n", " trainer_stats = trainer.train()\n", " return trainer_stats.loss # Assuming loss is the metric to minimize\n", " except Exception as e:\n", " print(f\"Error during training: {e}\")\n", " return float(\"inf\") # Return high value for failed trials\n", "\n", "# Create a Trials object to track hyperparameter evaluations\n", "trials = Trials()\n", "\n", "# Run hyperparameter optimization using TPE algorithm\n", "best = fmin(objective, space, algo=tpe.suggest, trials=trials, max_evals=2)\n", "\n", "# Print the best hyperparameters found during optimization\n", "print(\"Best Hyperparameters:\", best)\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "ID7nFKsV5urO" }, "outputs": [], "source": [ "## Method" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "xp6S8LGg4lUG", "outputId": "537667cd-7711-4d10-e2da-d2337b80c43a" }, "outputs": [], "source": [ "import hyperopt\n", "from hyperopt import hp\n", "from hyperopt import Trials\n", "from hyperopt import fmin, tpe, Trials\n", "import time # Import time for measuring training duration\n", "\n", "# Define the search space for hyperparameters with uncommented additions\n", "space = {\n", " 'learning_rate': hp.loguniform('learning_rate', -5, -1), # Learning rate in log scale\n", " 'lora_alpha': hp.quniform('lora_alpha', 1, 32, 1), # LoRA alpha with quantized steps\n", " 'lora_dropout': hp.uniform('lora_dropout', 0, 0.5), # LoRA dropout rate\n", " 'per_device_train_batch_size': hp.quniform('per_device_train_batch_size', 2, 16, 1), # Added for exploration\n", " 'gradient_accumulation_steps': hp.quniform('gradient_accumulation_steps', 1, 8, 1), # Added for exploration\n", " # Uncomment these if you want to tune other hyperparameters\n", " # 'warmup_steps': hp.quniform('warmup_steps', 0, 1000, 1),\n", " # 'num_train_epochs': hp.quniform('num_train_epochs', 1, 5, 1),\n", "}\n", "\n", "\n", "def objective(params):\n", " # Set hyperparameters in the config dictionary (assuming it's defined elsewhere)\n", " config['training_config']['learning_rate'] = params['learning_rate']\n", " config['lora_config']['lora_alpha'] = params['lora_alpha']\n", " config['lora_config']['lora_dropout'] = params['lora_dropout']\n", " config['training_config']['per_device_train_batch_size'] = params['per_device_train_batch_size']\n", " config['training_config']['gradient_accumulation_steps'] = params['gradient_accumulation_steps']\n", " # ... Set other hyperparameters from params dictionary ...\n", "\n", " # Load the model and tokenizer (assuming these are defined elsewhere)\n", " try:\n", " model, tokenizer = FastLanguageModel.from_pretrained(\n", " model_name=config.get(\"model_config\").get(\"base_model\"),\n", " max_seq_length=config.get(\"model_config\").get(\"max_seq_length\"),\n", " dtype=config.get(\"model_config\").get(\"dtype\"),\n", " load_in_4bit=config.get(\"model_config\").get(\"load_in_4bit\"),\n", " )\n", " except Exception as e:\n", " print(f\"Error loading model and tokenizer: {e}\")\n", " return float(\"inf\") # Return high value for errors\n", "\n", " # Setup LoRA for the model (assuming FastLanguageModel supports LoRA)\n", " try:\n", " model = FastLanguageModel.get_peft_model(\n", " model,\n", " r=config.get(\"lora_config\").get(\"r\"),\n", " target_modules=config.get(\"lora_config\").get(\"target_modules\"),\n", " lora_alpha=params['lora_alpha'],\n", " lora_dropout=params['lora_dropout'],\n", " bias=config.get(\"lora_config\").get(\"bias\"),\n", " use_gradient_checkpointing=config.get(\"lora_config\").get(\"use_gradient_checkpointing\"),\n", " random_state=42,\n", " use_rslora=config.get(\"lora_config\").get(\"use_rslora\"),\n", " use_dora=config.get(\"lora_config\").get(\"use_dora\"),\n", " loftq_config=config.get(\"lora_config\").get(\"loftq_config\")\n", " )\n", " except Exception as e:\n", " print(f\"Error setting up LoRA: {e}\")\n", " return float(\"inf\") # Return high value for errors\n", "\n", " # Train the model on the test dataset (assuming SFTTrainer and training arguments are defined)\n", " try:\n", " start_time = time.time() # Measure training start time\n", " trainer = SFTTrainer(\n", " model=model,\n", " tokenizer=tokenizer,\n", " train_dataset=test_dataset,\n", " dataset_text_field=config_dataset.get(\"training_dataset\").get(\"input_field\"),\n", " max_seq_length=config.get(\"model_config\").get(\"max_seq_length\"),\n", " dataset_num_proc=2,\n", " packing=False,\n", " args=TrainingArguments(\n", " per_device_train_batch_size=int(params['per_device_train_batch_size']),\n", " gradient_accumulation_steps=params['gradient_accumulation_steps'],\n", " warmup_steps=params['warmup_steps'],\n", " max_steps=config.get(\"training_config\").get(\"max_steps\"),\n", " num_train_epochs=params['num_train_epochs'],\n", " learning_rate=params['learning_rate'],\n", " fp16=config.get(\"training_config\").get(\"fp16\"),\n", " bf16=config.get(\"training_config\").get(\"bf16\"),\n", " logging_steps=config.get(\"training_config\").get(\"logging_steps\"),\n", " optim=config.get(\"training_config\").get(\"optim\"),\n", " weight_decay=config.get(\"training_config\").get(\"weight_decay\"),\n", " lr_scheduler_type=config.get(\"training_config\").get(\"lr_scheduler_type\"),\n", " seed=42,\n", " output_dir=config.get(\"training_config\").get(\"output_dir\")\n", " )\n", " )\n", " trainer_stats = trainer.train()\n", " end_time = time.time() # Measure training end time\n", " training_time = end_time - start_time # Calculate training time\n", "\n", " return training_time # Return training time for minimization\n", " except Exception as e:\n", " print(f\"Error during training: {e}\")\n", " return float(\"inf\") # Return high value for failed trials\n", "\n", "# Create a Trials object to track hyperparameter evaluations\n", "trials = Trials()\n", "\n", "# Run hyperparameter optimization using TPE algorithm\n", "best = fmin(objective, space, algo=tpe.suggest, trials=trials, max_evals=2)\n", "\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "1LIpTKWI5NTV", "outputId": "930484d7-c820-4cd3-80ed-f74ae6761346" }, "outputs": [], "source": [ "# Print the best hyperparameters found during optimization\n", "print(\"Best Hyperparameters:\", best)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "Y70d0UUS5Izr" }, "outputs": [], "source": [] }, { "cell_type": "markdown", "metadata": { "id": "vKqIDJIGYV11" }, "source": [ "# Hyperparameter search\n", "**Step 1: Define the Hyperparameter Search Space**\n", "We need to define the search space for the hyperparameters we want to tune. For example, let's say we want to tune the following hyperparameters:\n", "\n", "* `learning_rate`\n", "* `per_device_train_batch_size`\n", "* `gradient_accumulation_steps`\n", "* `warmup_steps`\n", "* `num_train_epochs`\n", "* `lora_alpha`\n", "* `lora_dropout`\n", "\n", "We can define the search space as follows:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "ETCIc-5JYvEq" }, "outputs": [], "source": [ "import hyperopt\n", "from hyperopt import hp\n", "from hyperopt import Trials\n", "from hyperopt import fmin, tpe, Trials\n", "# Define the search space for hyperparameters\n", "space = {\n", " 'learning_rate': hp.loguniform('learning_rate', -5, -1), # Learning rate in log scale\n", " 'lora_alpha': hp.quniform('lora_alpha', 1, 32, 1), # LoRA alpha with quantized steps\n", " 'lora_dropout': hp.uniform('lora_dropout', 0, 0.5), # LoRA dropout rate\n", " # Uncomment these if you want to tune them\n", " # 'per_device_train_batch_size': hp.quniform('per_device_train_batch_size', 2, 16, 1),\n", " # 'gradient_accumulation_steps': hp.quniform('gradient_accumulation_steps', 1, 8, 1),\n", " # 'warmup_steps': hp.quniform('warmup_steps', 0, 1000, 1),\n", " # 'num_train_epochs': hp.quniform('num_train_epochs', 1, 5, 1),\n", "}" ] }, { "cell_type": "markdown", "metadata": { "id": "t1i7r2glY2Df" }, "source": [ "**Step 2. Define the Objective Function**\n", "\n", "The objective function is a function that takes in the hyperparameters, sets them in the `config` dictionary, trains the model, and returns the loss or metric to minimize. We need to modify the previous fine-tuning code to define the objective function." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "mUTbsbJQb08e" }, "outputs": [], "source": [ "def objective(params):\n", " # Set hyperparameters in the config dictionary (assuming it's defined elsewhere)\n", " config['training_config']['learning_rate'] = params['learning_rate']\n", " config['lora_config']['lora_alpha'] = params['lora_alpha']\n", " config['lora_config']['lora_dropout'] = params['lora_dropout']\n", " # ... Set other hyperparameters from params dictionary ...\n", " #config['training_config']['per_device_train_batch_size'] = params['per_device_train_batch_size']\n", " #config['training_config']['gradient_accumulation_steps'] = params['gradient_accumulation_steps']\n", " #config['training_config']['warmup_steps'] = params['warmup_steps']\n", " #config['training_config']['num_train_epochs'] = params['num_train_epochs']\n", "\n", " # Load the model and tokenizer (assuming these are defined elsewhere)\n", " try:\n", " model, tokenizer = FastLanguageModel.from_pretrained(\n", " model_name=config.get(\"model_config\").get(\"base_model\"),\n", " max_seq_length=config.get(\"model_config\").get(\"max_seq_length\"),\n", " dtype=config.get(\"model_config\").get(\"dtype\"),\n", " load_in_4bit=config.get(\"model_config\").get(\"load_in_4bit\"),\n", " )\n", " except Exception as e:\n", " print(f\"Error loading model and tokenizer: {e}\")\n", " return float(\"inf\") # Return high value for errors\n", "\n", " # Setup LoRA for the model (assuming FastLanguageModel supports LoRA)\n", " try:\n", " model = FastLanguageModel.get_peft_model(\n", " model,\n", " r=config.get(\"lora_config\").get(\"r\"),\n", " target_modules=config.get(\"lora_config\").get(\"target_modules\"),\n", " lora_alpha=params['lora_alpha'],\n", " lora_dropout=params['lora_dropout'],\n", " bias=config.get(\"lora_config\").get(\"bias\"),\n", " use_gradient_checkpointing=config.get(\"lora_config\").get(\"use_gradient_checkpointing\"),\n", " random_state=42,\n", " use_rslora=config.get(\"lora_config\").get(\"use_rslora\"),\n", " use_dora=config.get(\"lora_config\").get(\"use_dora\"),\n", " loftq_config=config.get(\"lora_config\").get(\"loftq_config\")\n", " )\n", " except Exception as e:\n", " print(f\"Error setting up LoRA: {e}\")\n", " return float(\"inf\") # Return high value for errors\n", " # Train the model on the test dataset (assuming SFTTrainer and training arguments are defined)\n", " try:\n", " trainer = SFTTrainer(\n", " model=model,\n", " tokenizer=tokenizer,\n", " train_dataset=test_dataset,\n", " dataset_text_field=config_dataset.get(\"training_dataset\").get(\"input_field\"),\n", " max_seq_length=config.get(\"model_config\").get(\"max_seq_length\"),\n", " dataset_num_proc=2,\n", " packing=False,\n", " args=TrainingArguments(\n", " per_device_train_batch_size=int(params['per_device_train_batch_size']),\n", " gradient_accumulation_steps=params['gradient_accumulation_steps'],\n", " warmup_steps=params['warmup_steps'],\n", " max_steps=config.get(\"training_config\").get(\"max_steps\"),\n", " num_train_epochs=params['num_train_epochs'],\n", " learning_rate=params['learning_rate'],\n", " fp16=config.get(\"training_config\").get(\"fp16\"),\n", " bf16=config.get(\"training_config\").get(\"bf16\"),\n", " logging_steps=config.get(\"training_config\").get(\"logging_steps\"),\n", " optim=config.get(\"training_config\").get(\"optim\"),\n", " weight_decay=config.get(\"training_config\").get(\"weight_decay\"),\n", " lr_scheduler_type=config.get(\"training_config\").get(\"lr_scheduler_type\"),\n", " seed=42,\n", " output_dir=config.get(\"training_config\").get(\"output_dir\")\n", " )\n", " )\n", " trainer_stats = trainer.train()\n", " return trainer_stats.loss # Assuming loss is the metric to minimize\n", " except Exception as e:\n", " print(f\"Error during training: {e}\")\n", " return float(\"inf\") # Return high value for failed trials\n", "\n" ] }, { "cell_type": "markdown", "metadata": { "id": "z7od3txJaZbm" }, "source": [ "**Step 3: Perform Hyperparameter Search**\n", "\n", "Now that we have defined the objective function, we can perform the hyperparameter search using Hyperopt's `fmin` function. We need to specify the objective function, the search space, and the maximum number of evaluations." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "vLTpYBVzbpmP", "outputId": "ce2d2b57-2e40-4ae8-ec20-880b78be3a56" }, "outputs": [], "source": [ "\n", "# Create a Trials object to track hyperparameter evaluations\n", "trials = Trials()\n", "# Run hyperparameter optimization using TPE algorithm\n", "best = fmin(objective, space, algo=tpe.suggest, trials=trials, max_evals=2)\n", "# Print the best hyperparameters found during optimization\n", "print(\"Best Hyperparameters:\", best)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "FYO9wV8IoXpy" }, "outputs": [], "source": [ "from huggingface_hub import login, logout" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "z03TocnqoXpy", "outputId": "c598ea52-e319-41ed-cc51-935f61201178" }, "outputs": [], "source": [ "#login(token) # non-blocking login" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "dwDh_WpSoXpy", "outputId": "28fbd65b-61fd-433c-df4f-819a06d4ba05" }, "outputs": [], "source": [ "import torch\n", "import gc\n", "def reset_gpu_memory():\n", " torch.cuda.empty_cache()\n", " gc.collect()\n", " print(\"GPU memory cleared!\")\n", "# Example usage:\n", "reset_gpu_memory()" ] }, { "cell_type": "markdown", "metadata": { "id": "yhpc3w89A3A_" }, "source": [ "Best Hyperparameters: {'learning_rate': 0.03347123299210303, 'lora_alpha': 19.0, 'lora_dropout': 0.4819141472093197}\n", "\n", "Best Hyperparameters: {'gradient_accumulation_steps': 8.0, 'learning_rate': 0.23274337759179295, 'lora_alpha': 8.0, 'lora_dropout': 0.0491660925212421, 'per_device_train_batch_size': 13.0}\n", "\n", "Best Hyperparameters: {'gradient_accumulation_steps': 4.0, 'learning_rate': 0.186066529001672, 'lora_alpha': 32.0, 'lora_dropout': 0.24368804023352264, 'per_device_train_batch_size': 10.0}\n", "\n", "Best Hyperparameters: {'learning_rate': 0.011846192509972951, 'lora_alpha': 8.0, 'lora_dropout': 0.2087248476879589}\n", "\n", "\n", "\n", "Best Hyperparameters (Likely Fastest): {'learning_rate': 1.881999040862022e-05, 'per_device_train_batch_size': 2, 'gradient_accumulation_steps': 2, 'lora_alpha': 29}\n", "Best Training Time: 48.178661584854126 seconds\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "Oh4LwgiZ6d3L" }, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "qQQN-uSUzB8h" }, "outputs": [], "source": [ "# Defining the configuration for the base model, LoRA and training\n", "config = {\n", " \"hugging_face_username\":\"ruslanmv\",\n", " \"model_config\": {\n", " \"base_model\":\"meta-llama/Meta-Llama-3-8B-Instruct\", # The base model\n", " \"finetuned_model\":\"ruslanmv/Medical-Mind-Llama-3-8b\", # The finetuned model\n", " \"max_seq_length\": 2048, # The maximum sequence length\n", " # \"dtype\":torch.float16, # The data type\n", " # \"dtype\": torch.float32, # Use float32 instead of half CUDA capability < 8\n", " \"dtype\" : None, # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+\n", "\n", " \"load_in_4bit\": True, # Load the model in 4-bit\n", " },\n", " \"lora_config\": {\n", " \"r\": 16, # The number of LoRA layers 8, 16, 32, 64\n", " \"target_modules\": [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\",\n", " \"gate_proj\", \"up_proj\", \"down_proj\"], # The target modules\n", " #\"lora_alpha\":16, # The alpha value for LoRA\n", " \"lora_alpha\":29, # The alpha value for LoRA by search grid\n", " \"lora_dropout\":0, # The dropout value for LoRA\n", " \"bias\":\"none\", # The bias for LoRA\n", " \"use_gradient_checkpointing\":True, # Use gradient checkpointing\n", " \"use_rslora\":False, # Use RSLora\n", " \"use_dora\":False, # Use DoRa\n", " \"loftq_config\":None # The LoFTQ configuration\n", " },\n", "\n", " \"training_config\": {\n", " #\"per_device_train_batch_size\": 2, # The batch size\n", " \"per_device_train_batch_size\": 2, # The batch size by search grid\n", " #\"gradient_accumulation_steps\": 4, # The gradient accumulation steps\n", " \"gradient_accumulation_steps\": 2, # The gradient accumulation steps by search grid\n", " \"warmup_steps\": 5, # The warmup steps\n", " \"max_steps\":0, # The maximum steps (0 if the epochs are defined)\n", " \"num_train_epochs\": 1, # The number of training epochs(0 if the maximum steps are defined)\n", " #\"learning_rate\": 2e-4, # The learning rate\n", " \"learning_rate\": 1.88e-05, # The learning rate by search grid\n", " \"fp16\": not torch.cuda.is_bf16_supported(), # The fp16\n", " \"bf16\": torch.cuda.is_bf16_supported(), # The bf16\n", " \"logging_steps\": 1, # The logging steps\n", " \"optim\" :\"adamw_8bit\", # The optimizer\n", " \"weight_decay\" : 0.01, # The weight decay\n", " \"lr_scheduler_type\": \"linear\", # The learning rate scheduler\n", " \"seed\" : 42, # The seed\n", " \"output_dir\" : \"outputs\", # The output directory\n", " }\n", "}" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "bX5eLb3Ss-39" }, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "LEMLkJAeoXpy", "outputId": "6814c878-7716-4201-c287-3a02b4ce6f62" }, "outputs": [], "source": [ "# Loading the model and the tokinizer for the model\n", "model, tokenizer = FastLanguageModel.from_pretrained(\n", " model_name = config.get(\"model_config\").get(\"base_model\"),\n", " max_seq_length = config.get(\"model_config\").get(\"max_seq_length\"),\n", " dtype = config.get(\"model_config\").get(\"dtype\"),\n", " load_in_4bit = config.get(\"model_config\").get(\"load_in_4bit\"),\n", "\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "kvValJd0oXpz", "outputId": "b2aa892b-92f3-4c4c-9688-781069b48585" }, "outputs": [], "source": [ "# Set up GPU acceleration\n", "if torch.cuda.device_count() > 1:\n", " print(\"Multiple GPUs enabled\")\n", " devices = [f\"cuda:{i}\" for i in range(torch.cuda.device_count())]\n", " model_parallel = torch.nn.DataParallel(model, device_ids=[0, 1])\n", " # Access the original model from the DataParallel object\n", " model = model_parallel.module\n", "else:\n", " print(\"No DataParallel \")\n", " #device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "DrQUZ0jtoXpz" }, "outputs": [], "source": [ "#model = model.half() # the model to half precision (float16)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "ZPLenE03oXpz" }, "outputs": [], "source": [ "# Setup for QLoRA/LoRA peft of the base model\n", "model = FastLanguageModel.get_peft_model(\n", " model,\n", " r = config.get(\"lora_config\").get(\"r\"),\n", " target_modules = config.get(\"lora_config\").get(\"target_modules\"),\n", " lora_alpha = config.get(\"lora_config\").get(\"lora_alpha\"),\n", " lora_dropout = config.get(\"lora_config\").get(\"lora_dropout\"),\n", " bias = config.get(\"lora_config\").get(\"bias\"),\n", " use_gradient_checkpointing = config.get(\"lora_config\").get(\"use_gradient_checkpointing\"),\n", " random_state = 42,\n", " use_rslora = config.get(\"lora_config\").get(\"use_rslora\"),\n", " use_dora = config.get(\"lora_config\").get(\"use_dora\"),\n", " loftq_config = config.get(\"lora_config\").get(\"loftq_config\"),\n", ")\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "dCg9_NAfoXpz", "outputId": "75303ebe-3299-496e-faea-afa89f4e4c01" }, "outputs": [], "source": [ "from transformers import AutoModelForCausalLM, BitsAndBytesConfig, AutoTokenizer\n", "tokenizer = AutoTokenizer.from_pretrained(config.get(\"model_config\").get(\"base_model\"))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "V4soybR7oXpz" }, "outputs": [], "source": [ "tokenizer.add_eos_token = True\n", "tokenizer.pad_token_id = 0\n", "tokenizer.padding_side = \"left\"" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "_sm6yQFPWNXY" }, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "M0qTbrawoXpz" }, "outputs": [], "source": [ "config_dataset={ \"training_dataset\": {\n", " \"name\": \"ruslanmv/ai-medical-dataset\", # The dataset name(huggingface/datasets)\n", " \"split\": \"train\", # The dataset split\n", " \"input_fields\": [\"question\", \"context\"] ,# The input fields\n", " \"input_field\": \"text\",# The input field\n", " },\n", " }" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "DMscf5cdoXpz", "outputId": "c7799645-c6bf-4b31-8070-1aa0ef60df33" }, "outputs": [], "source": [ "config_dataset.get(\"training_dataset\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 113, "referenced_widgets": [ "3a97281be4c1433aa3abe6c25b7113e2", "4e19e78059b842a5832ccae2f765a30c", "1a72b512e1374e67a858edf2844fc157", "c9cfd66b68a1437d946c83163fa877df", "cccd970273ae43d2a6e60ac421bdc882", "32cff795f8bc490dbf63ed130e1f581f", "4a0426a353ca41cba39d4dfeba925451", "284192f01a924f87afd8b5087ca9af6c", "273bf76f74bc4fb492ccb67d9e202f7b", "45b3259e3cac4de8bd19d12f07de2adb", "b7e7896aeac74b6eae27de0677100e57", "11dc1dcf6b29471580c32c818fa41d88", "9344b22940c64654a82bb2ce06530e30", "4f68a26f64e844c7be21cc180eb6c1a2", "769b40273bab41af8eb66e494b613241", "320c09781518483e82defa86c28316d1", "793f49f397b54daab63194cee8d04256", "fa79cfa23f3a430dab69a59d93383cd0", "341dca5ac74348dd9b5a347e38fa0b40", "8ba6fd1bf16a4680b8a8c9c55ecf23e7", "dc85f5e365f4488fa185d0ae35fde806", "51a6d3c97480476e8c22d9ad670bdc47", "b8b277831f1a45109b3a4a3565fbdb9d", "9f91f7ce62e243f59d72e5ba36f97b8f", "1634ba52355b4681a913039666926f85", "217ca5cd404d4756a399fba3aa4fbc15", "bc6d92cb8837428bb7038d75e6af604e", "af0233735d744b7e838f50f52c9d6cbe", "8a8d3a006ee24c4393d7c2f2d040ce52", "eff94d2d010e4b4f93a6dfcb61103a52", "da5cd094aaae45f4a0ca051ad5babd78", "8f88a5b04723482ea430679e504c65f9", "8d153f070a8d4ad1b32996a9fd82beda" ] }, "id": "g2h5E--2oXp0", "outputId": "8b8a3e49-a0bd-4aea-bf8d-5d2414f66547" }, "outputs": [], "source": [ "# Loading the training dataset\n", "train_dataset = load_dataset(config_dataset.get(\"training_dataset\").get(\"name\"), split = config_dataset.get(\"training_dataset\").get(\"split\"))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "nxGRQ9sCoXp0", "outputId": "1e2ce893-2a39-4521-9062-490a9e9de016" }, "outputs": [], "source": [ "train_dataset" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "sGtF6NvpoXp0" }, "outputs": [], "source": [ "# Select the first 10 rows of the dataset\n", "test_dataset = train_dataset.select(range(100))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "LipkaBaBoXp0", "outputId": "4484e420-1693-4524-bf2a-19db669c5543" }, "outputs": [], "source": [ "test_dataset" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "bBIK97mjoXp0", "outputId": "930efcd4-5b32-4a8d-ff68-859b63293e7e" }, "outputs": [], "source": [ "test_dataset[1]" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "HERoJEG2oXp0" }, "outputs": [], "source": [ "medical_prompt = \"\"\"You are an AI Medical Assistant Chatbot, trained to answer medical questions. Below is an instruction that describes a task, paired with an response context. Write a response that appropriately completes the request.\n", "\n", "### Instruction:\n", "{}\n", "\n", "\n", "### Response:\n", "{}\"\"\"\n", "\n", "EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN\n", "def formatting_prompts_func(examples):\n", " instructions = examples[\"question\"]\n", " outputs = examples[\"context\"]\n", " texts = []\n", " for instruction, output in zip(instructions, outputs):\n", " # Must add EOS_TOKEN, otherwise your generation will go on forever!\n", " text = medical_prompt.format(instruction, output) + EOS_TOKEN\n", " texts.append(text)\n", " return { \"text\" : texts, }\n", "pass" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "Redv7cdFoXp0" }, "outputs": [], "source": [ "test_dataset= test_dataset.map(formatting_prompts_func, batched = True,)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "CSGjoG8voXp0", "outputId": "92689d46-6795-4591-bc5e-211c8cc9797a" }, "outputs": [], "source": [ "test_dataset" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 142 }, "id": "z5Q2wwfjoXp0", "outputId": "a76d49b3-48bf-43d6-fc80-a5aa88a12634" }, "outputs": [], "source": [ "test_dataset['text'][1]" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "urBn0WMSoXp0" }, "outputs": [], "source": [ "is_test=True\n", "if is_test:\n", " train_dataset=test_dataset\n", "else:\n", " train_dataset= train_dataset.map(formatting_prompts_func, batched = True,)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 142 }, "id": "UevHEqo7oXp0", "outputId": "dfe3869c-fd9e-4734-9462-e9eb391792e1" }, "outputs": [], "source": [ "train_dataset['text'][1]" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 140, "referenced_widgets": [ "ffa74977e7464cebb16d3cf8ee976d51", "e257e4a2bfdb48038102173d397ab2e4", "67b9a3505ae644dbb3c4fc14781a2731", "c4d39c87c16c4961b942d896742ff7ce", "e5880b946aae4b84a94226a5d6acaf45", "82c6c2752a0746f3935e069c0f8811d6", "1850ab17bafd4a43b5ab5899d1875a40", "53ee8f5e8b7d4076bdb0167baf2e5729", "d70fd9035f9b4d82892fae34c28c46d5", "af0096de28414303ba5324f4087cd92e", "0f55ae30c2704632941cca4727c1c4f2" ] }, "id": "X4wxJAgnM2W0", "outputId": "38c58ce9-6f4c-49c9-e21d-49bc34f5cc2e" }, "outputs": [], "source": [ "# Setting up the trainer for the model\n", "trainer = SFTTrainer(\n", " model = model,\n", " tokenizer = tokenizer,\n", " train_dataset = train_dataset,\n", " dataset_text_field = config_dataset.get(\"training_dataset\").get(\"input_field\"),\n", " max_seq_length = config.get(\"model_config\").get(\"max_seq_length\"),\n", " dataset_num_proc = 2,\n", " packing = False,\n", " args = TrainingArguments(\n", " per_device_train_batch_size = config.get(\"training_config\").get(\"per_device_train_batch_size\"),\n", " gradient_accumulation_steps = config.get(\"training_config\").get(\"gradient_accumulation_steps\"),\n", " warmup_steps = config.get(\"training_config\").get(\"warmup_steps\"),\n", " max_steps = config.get(\"training_config\").get(\"max_steps\"),\n", " num_train_epochs= config.get(\"training_config\").get(\"num_train_epochs\"),\n", " learning_rate = config.get(\"training_config\").get(\"learning_rate\"),\n", " fp16 = config.get(\"training_config\").get(\"fp16\"),\n", " bf16 = config.get(\"training_config\").get(\"bf16\"),\n", " logging_steps = config.get(\"training_config\").get(\"logging_steps\"),\n", " optim = config.get(\"training_config\").get(\"optim\"),\n", " weight_decay = config.get(\"training_config\").get(\"weight_decay\"),\n", " lr_scheduler_type = config.get(\"training_config\").get(\"lr_scheduler_type\"),\n", " seed = 42,\n", " output_dir = config.get(\"training_config\").get(\"output_dir\"),\n", " ),\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "RJl7dk8yoXp1", "outputId": "b37c70bb-50a6-4319-b99c-c0d9c05f035b" }, "outputs": [], "source": [ "# Memory statistics before training\n", "gpu_statistics = torch.cuda.get_device_properties(0)\n", "reserved_memory = round(torch.cuda.max_memory_reserved() / 1024**3, 2)\n", "max_memory = round(gpu_statistics.total_memory / 1024**3, 2)\n", "print(f\"Reserved Memory: {reserved_memory}GB\")\n", "print(f\"Max Memory: {max_memory}GB\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "Q-g-4RvNXkyD" }, "outputs": [], "source": [ "## [ 1038/2651250 53:49 < 2295:10:28, 0.32 it/s, Epoch 0.00/1] old" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 948 }, "id": "yI9mEQ7ZOUx2", "outputId": "6466d591-76f8-45e2-e665-39ad9bf8ae7f", "scrolled": false }, "outputs": [], "source": [ "# Training the model\n", "trainer_stats = trainer.train()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "YQFEr64koXp1", "outputId": "2e1b3775-1f5d-4b8e-a0ef-32266cb7fa2a" }, "outputs": [], "source": [ "# Memory statistics after training\n", "used_memory = round(torch.cuda.max_memory_allocated() / 1024**3, 2)\n", "used_memory_lora = round(used_memory - reserved_memory, 2)\n", "used_memory_persentage = round((used_memory / max_memory) * 100, 2)\n", "used_memory_lora_persentage = round((used_memory_lora / max_memory) * 100, 2)\n", "print(f\"Used Memory: {used_memory}GB ({used_memory_persentage}%)\")\n", "print(f\"Used Memory for training(fine-tuning) LoRA: {used_memory_lora}GB ({used_memory_lora_persentage}%)\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "1YJB4bZyoXp1" }, "outputs": [], "source": [ "# Saving the trainer stats\n", "with open(\"trainer_stats.json\", \"w\") as f:\n", " json.dump(trainer_stats, f, indent=4)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "-1HtsRpVnHTj" }, "outputs": [], "source": [ "# Locally saving the model and pushing it to the Hugging Face Hub (only LoRA adapters)\n", "model.save_pretrained(config.get(\"model_config\").get(\"finetuned_model\"))\n", "model.push_to_hub(config.get(\"model_config\").get(\"finetuned_model\"), tokenizer = tokenizer)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "yRO4pPP0oXp1" }, "outputs": [], "source": [ "# Saving the model using merged_16bit(float16), merged_4bit(int4) or quantization options(q8_0, q4_k_m, q5_k_m)...\n", "model.save_pretrained_merged(config.get(\"model_config\").get(\"finetuned_model\"), tokenizer, save_method = \"merged_16bit\",)\n", "model.push_to_hub_merged(config.get(\"model_config\").get(\"finetuned_model\"), tokenizer, save_method = \"merged_16bit\")\n", "\n", "model.save_pretrained_merged(config.get(\"model_config\").get(\"finetuned_model\"), tokenizer, save_method = \"merged_4bit\",)\n", "model.push_to_hub_merged(config.get(\"model_config\").get(\"finetuned_model\"), tokenizer, save_method = \"merged_4bit\")\n", "\n", "model.save_pretrained_gguf(config.get(\"model_config\").get(\"finetuned_model\"), tokenizer)\n", "model.push_to_hub_gguf(config.get(\"model_config\").get(\"finetuned_model\"), tokenizer)\n", "\n", "model.save_pretrained_gguf(config.get(\"model_config\").get(\"finetuned_model\"), tokenizer, quantization_method = \"f16\")\n", "model.push_to_hub_gguf(config.get(\"model_config\").get(\"finetuned_model\"), tokenizer, quantization_method = \"f16\")\n", "\n", "model.save_pretrained_gguf(config.get(\"model_config\").get(\"finetuned_model\"), tokenizer, quantization_method = \"q4_k_m\")\n", "model.push_to_hub_gguf(config.get(\"model_config\").get(\"finetuned_model\"), tokenizer, quantization_method = \"q4_k_m\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "ozVcalyP_JLs" }, "outputs": [], "source": [ "# Loading the fine-tuned model and the tokenizer for inference\n", "model, tokenizer = FastLanguageModel.from_pretrained(\n", " model_name = config.get(\"model_config\").get(\"finetuned_model\"),\n", " max_seq_length = config.get(\"model_config\").get(\"max_seq_length\"),\n", " dtype = config.get(\"model_config\").get(\"dtype\"),\n", " load_in_4bit = config.get(\"model_config\").get(\"load_in_4bit\"),\n", " )\n", "\n", "# Using FastLanguageModel for fast inference\n", "FastLanguageModel.for_inference(model)\n", "\n", "# Tokenizing the input and generating the output\n", "inputs = tokenizer(\n", "[\n", " \"<|start_header_id|>system<|end_header_id|> You are a Medical AI chatbot assistant .<|eot_id|><|start_header_id|>user<|end_header_id|> This is the question: What was the main cause of the inflammatory CD4+ T cells?<|eot_id|>\"\n", "], return_tensors = \"pt\").to(\"cuda\")\n", "outputs = model.generate(**inputs, max_new_tokens = 256, use_cache = True)\n", "tokenizer.batch_decode(outputs, skip_special_tokens = True)" ] } ], "metadata": { "accelerator": "GPU", "colab": { "gpuType": "L4", "machine_shape": "hm", "provenance": [] }, "kernelspec": { "display_name": "Python 3", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.14" }, "widgets": { "application/vnd.jupyter.widget-state+json": { "0005f2d9fe1e4cc98ea58b0c2868b433": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_45c1d5b0df0e420a87f791dd4cf0e425", "max": 100, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_9ed49f1a099846a3a65cd6608bafb0e4", "value": 100 } }, "0058ed544fed4272848a891a68b9adc0": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "00eea4b0c6e44c62900ea8e7d919efe9": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "02fc530028ea4d538b7f6b48463ae700": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "058b2b9959b84b6f9f5d3862ef53d029": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_7807f312425b4f4d9249aa1ac77d7461", "placeholder": "​", "style": "IPY_MODEL_d8e7ea9552a84b8284b31d77090b54af", "value": "Map (num_proc=2): 100%" } }, "0f55ae30c2704632941cca4727c1c4f2": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "11dc1dcf6b29471580c32c818fa41d88": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_9344b22940c64654a82bb2ce06530e30", "IPY_MODEL_4f68a26f64e844c7be21cc180eb6c1a2", "IPY_MODEL_769b40273bab41af8eb66e494b613241" ], "layout": "IPY_MODEL_320c09781518483e82defa86c28316d1" } }, "1634ba52355b4681a913039666926f85": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_eff94d2d010e4b4f93a6dfcb61103a52", "max": 18, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_da5cd094aaae45f4a0ca051ad5babd78", "value": 18 } }, "1850ab17bafd4a43b5ab5899d1875a40": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "1a72b512e1374e67a858edf2844fc157": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_284192f01a924f87afd8b5087ca9af6c", "max": 18, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_273bf76f74bc4fb492ccb67d9e202f7b", "value": 18 } }, "217ca5cd404d4756a399fba3aa4fbc15": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_8f88a5b04723482ea430679e504c65f9", "placeholder": "​", "style": "IPY_MODEL_8d153f070a8d4ad1b32996a9fd82beda", "value": " 18/18 [00:00<00:00,  9.43it/s]" } }, "22ea45365d21439fb5069974bbe69711": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "23a71f8847e647daba35e495706fc846": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_22ea45365d21439fb5069974bbe69711", "placeholder": "​", "style": "IPY_MODEL_bd087d0aa3214c5dbecc9b0bd4d976df", "value": "Resolving data files: 100%" } }, "273bf76f74bc4fb492ccb67d9e202f7b": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "284192f01a924f87afd8b5087ca9af6c": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "2c5564fb033346afbe7692a24a52b302": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "31a203cdd2f54cda8a05214844888156": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "320c09781518483e82defa86c28316d1": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "32cff795f8bc490dbf63ed130e1f581f": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "33fb10908c23457aa4796626102fc8c5": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "341dca5ac74348dd9b5a347e38fa0b40": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "3564e3cf0fe84281838d84525794e735": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_912164947c5847908424f3e60c5adb64", "IPY_MODEL_7517ce80636040e29665a9353afab183", "IPY_MODEL_e14b9d980a1a41fb9e81385cb0f73d3a" ], "layout": "IPY_MODEL_ada78aafba3f47ab8eb45cf3c83a6805" } }, "37803098ceed4528bb690ebee028c840": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "39d3b72ab6214bcf9b0bb6b6294e957c": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "3a97281be4c1433aa3abe6c25b7113e2": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_4e19e78059b842a5832ccae2f765a30c", "IPY_MODEL_1a72b512e1374e67a858edf2844fc157", "IPY_MODEL_c9cfd66b68a1437d946c83163fa877df" ], "layout": "IPY_MODEL_cccd970273ae43d2a6e60ac421bdc882" } }, "3f7afd4bd28842cbb73e62c155667030": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_9a5fd3a68fd1445f92bea51a7fec3e6b", "max": 18, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_37803098ceed4528bb690ebee028c840", "value": 18 } }, "44f189b81bbd48ca8cb146ead641d2b5": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_e903140c8c794c48b231924d3975b7a6", "placeholder": "​", "style": "IPY_MODEL_7e74d789c82747e0b5066a00b9e36c1d", "value": " 100/100 [00:00<00:00, 125.88 examples/s]" } }, "45b3259e3cac4de8bd19d12f07de2adb": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "45c1d5b0df0e420a87f791dd4cf0e425": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "4a0426a353ca41cba39d4dfeba925451": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "4e19e78059b842a5832ccae2f765a30c": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_32cff795f8bc490dbf63ed130e1f581f", "placeholder": "​", "style": "IPY_MODEL_4a0426a353ca41cba39d4dfeba925451", "value": "Resolving data files: 100%" } }, "4f68a26f64e844c7be21cc180eb6c1a2": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_341dca5ac74348dd9b5a347e38fa0b40", "max": 18, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_8ba6fd1bf16a4680b8a8c9c55ecf23e7", "value": 18 } }, "51a6d3c97480476e8c22d9ad670bdc47": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "53ee8f5e8b7d4076bdb0167baf2e5729": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "58b932a03b2c4aa4891d541f186244b9": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "5d1fbd3c62d94df7befdefc451221414": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_8ad6abb48f38469f9d399eea8f5e5b70", "IPY_MODEL_6cea0da24cf54811a43168c606759bab", "IPY_MODEL_eb8c88f5c06c49fe9099371b3cf112ae" ], "layout": "IPY_MODEL_89a1354722e640758978befc06ed4a78" } }, "64539b4212fe4d989976f56369bb746b": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "67b9a3505ae644dbb3c4fc14781a2731": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_53ee8f5e8b7d4076bdb0167baf2e5729", "max": 100, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_d70fd9035f9b4d82892fae34c28c46d5", "value": 100 } }, "696e82ec6a174974a90d5abc7c101ee7": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "6cea0da24cf54811a43168c606759bab": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_dade882aca304a31b693a2c58807d825", "max": 18, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_02fc530028ea4d538b7f6b48463ae700", "value": 18 } }, "72eca1e2871b458abd3383d9711215a2": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_058b2b9959b84b6f9f5d3862ef53d029", "IPY_MODEL_85d4879bd7d64766905db34cef052fed", "IPY_MODEL_44f189b81bbd48ca8cb146ead641d2b5" ], "layout": "IPY_MODEL_f89c5c949e984361bce7f97d86d2a2e5" } }, "734b6d3e3406403293c4bc955a643528": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_dc3b2edc3f5d480a93b57b15b4444608", "placeholder": "​", "style": "IPY_MODEL_7967d420aff1414e9fe53eb04c928eb4", "value": "Map: 100%" } }, "7517ce80636040e29665a9353afab183": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_bb078c8c1f6a48359dc654d91ece684d", "max": 18, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_9b9322336b564a409086955ebda07fc3", "value": 18 } }, "769b40273bab41af8eb66e494b613241": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_dc85f5e365f4488fa185d0ae35fde806", "placeholder": "​", "style": "IPY_MODEL_51a6d3c97480476e8c22d9ad670bdc47", "value": " 18/18 [00:00<00:00, 1567.70it/s]" } }, "7807f312425b4f4d9249aa1ac77d7461": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "793f49f397b54daab63194cee8d04256": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "7967d420aff1414e9fe53eb04c928eb4": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "7e11cccce8be49008f8db3a0c3ea603d": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "7e74d789c82747e0b5066a00b9e36c1d": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "82c6c2752a0746f3935e069c0f8811d6": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "85d4879bd7d64766905db34cef052fed": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_0058ed544fed4272848a891a68b9adc0", "max": 100, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_33fb10908c23457aa4796626102fc8c5", "value": 100 } }, "89a1354722e640758978befc06ed4a78": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "8a195771bdc0462e8f9fbb60eb9141b1": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "8a8d3a006ee24c4393d7c2f2d040ce52": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "8ad6abb48f38469f9d399eea8f5e5b70": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_39d3b72ab6214bcf9b0bb6b6294e957c", "placeholder": "​", "style": "IPY_MODEL_696e82ec6a174974a90d5abc7c101ee7", "value": "Resolving data files: 100%" } }, "8ba6fd1bf16a4680b8a8c9c55ecf23e7": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "8d153f070a8d4ad1b32996a9fd82beda": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "8f88a5b04723482ea430679e504c65f9": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "912164947c5847908424f3e60c5adb64": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_ff108c92fb5547869ee545cf9a094b07", "placeholder": "​", "style": "IPY_MODEL_2c5564fb033346afbe7692a24a52b302", "value": "Loading dataset shards: 100%" } }, "9344b22940c64654a82bb2ce06530e30": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_793f49f397b54daab63194cee8d04256", "placeholder": "​", "style": "IPY_MODEL_fa79cfa23f3a430dab69a59d93383cd0", "value": "Resolving data files: 100%" } }, "963c0aa5620b4ea8b5a903894646121c": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "9a5fd3a68fd1445f92bea51a7fec3e6b": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "9b9322336b564a409086955ebda07fc3": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "9bceb9eddb2147c1abbf3391c70e6784": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "9ed49f1a099846a3a65cd6608bafb0e4": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "9f91f7ce62e243f59d72e5ba36f97b8f": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_af0233735d744b7e838f50f52c9d6cbe", "placeholder": "​", "style": "IPY_MODEL_8a8d3a006ee24c4393d7c2f2d040ce52", "value": "Loading dataset shards: 100%" } }, "a419499622cd4374937423a79677298f": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_b93514308ae44afbb1a0511f5f9c6ddf", "placeholder": "​", "style": "IPY_MODEL_58b932a03b2c4aa4891d541f186244b9", "value": " 18/18 [00:00<00:00, 1458.49it/s]" } }, "ada78aafba3f47ab8eb45cf3c83a6805": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "af0096de28414303ba5324f4087cd92e": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "af0233735d744b7e838f50f52c9d6cbe": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "b7e7896aeac74b6eae27de0677100e57": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "b8b277831f1a45109b3a4a3565fbdb9d": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_9f91f7ce62e243f59d72e5ba36f97b8f", "IPY_MODEL_1634ba52355b4681a913039666926f85", "IPY_MODEL_217ca5cd404d4756a399fba3aa4fbc15" ], "layout": "IPY_MODEL_bc6d92cb8837428bb7038d75e6af604e" } }, "b93514308ae44afbb1a0511f5f9c6ddf": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "bb078c8c1f6a48359dc654d91ece684d": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "bb1156b7d349440d9cc8a2f0328465a7": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_23a71f8847e647daba35e495706fc846", "IPY_MODEL_3f7afd4bd28842cbb73e62c155667030", "IPY_MODEL_a419499622cd4374937423a79677298f" ], "layout": "IPY_MODEL_64539b4212fe4d989976f56369bb746b" } }, "bc6d92cb8837428bb7038d75e6af604e": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "bd087d0aa3214c5dbecc9b0bd4d976df": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "be6162f66e594d3ebd8c53ebab3bbfa6": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_963c0aa5620b4ea8b5a903894646121c", "placeholder": "​", "style": "IPY_MODEL_31a203cdd2f54cda8a05214844888156", "value": " 100/100 [00:00<00:00, 5440.44 examples/s]" } }, "c4d39c87c16c4961b942d896742ff7ce": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_af0096de28414303ba5324f4087cd92e", "placeholder": "​", "style": "IPY_MODEL_0f55ae30c2704632941cca4727c1c4f2", "value": " 100/100 [00:01<00:00, 113.55 examples/s]" } }, "c9cfd66b68a1437d946c83163fa877df": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_45b3259e3cac4de8bd19d12f07de2adb", "placeholder": "​", "style": "IPY_MODEL_b7e7896aeac74b6eae27de0677100e57", "value": " 18/18 [00:00<00:00,  1.32it/s]" } }, "cccd970273ae43d2a6e60ac421bdc882": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "d70fd9035f9b4d82892fae34c28c46d5": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "d8e7ea9552a84b8284b31d77090b54af": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "da5cd094aaae45f4a0ca051ad5babd78": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "dade882aca304a31b693a2c58807d825": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "dc3b2edc3f5d480a93b57b15b4444608": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "dc85f5e365f4488fa185d0ae35fde806": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "e14b9d980a1a41fb9e81385cb0f73d3a": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_9bceb9eddb2147c1abbf3391c70e6784", "placeholder": "​", "style": "IPY_MODEL_8a195771bdc0462e8f9fbb60eb9141b1", "value": " 18/18 [00:35<00:00,  1.20it/s]" } }, "e257e4a2bfdb48038102173d397ab2e4": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_82c6c2752a0746f3935e069c0f8811d6", "placeholder": "​", "style": "IPY_MODEL_1850ab17bafd4a43b5ab5899d1875a40", "value": "Map (num_proc=2): 100%" } }, "e3bd7f85ce194cd4b697c2eb82038658": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_734b6d3e3406403293c4bc955a643528", "IPY_MODEL_0005f2d9fe1e4cc98ea58b0c2868b433", "IPY_MODEL_be6162f66e594d3ebd8c53ebab3bbfa6" ], "layout": "IPY_MODEL_7e11cccce8be49008f8db3a0c3ea603d" } }, "e5880b946aae4b84a94226a5d6acaf45": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "e903140c8c794c48b231924d3975b7a6": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "eb8c88f5c06c49fe9099371b3cf112ae": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_00eea4b0c6e44c62900ea8e7d919efe9", "placeholder": "​", "style": "IPY_MODEL_fe17bedb5ef04d8b9e064fa1e0d75185", "value": " 18/18 [00:00<00:00,  1.42it/s]" } }, "eff94d2d010e4b4f93a6dfcb61103a52": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "f89c5c949e984361bce7f97d86d2a2e5": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "fa79cfa23f3a430dab69a59d93383cd0": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "fe17bedb5ef04d8b9e064fa1e0d75185": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "ff108c92fb5547869ee545cf9a094b07": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "ffa74977e7464cebb16d3cf8ee976d51": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_e257e4a2bfdb48038102173d397ab2e4", "IPY_MODEL_67b9a3505ae644dbb3c4fc14781a2731", "IPY_MODEL_c4d39c87c16c4961b942d896742ff7ce" ], "layout": "IPY_MODEL_e5880b946aae4b84a94226a5d6acaf45" } } } } }, "nbformat": 4, "nbformat_minor": 0 }