{
  "model_name": "unsloth/gemma-2-2b",
  "max_seq_length": 2048,
  "dtype": null,
  "load_in_4bit": false,
  "lora_params": {
    "r": 16,
    "target_modules": [
      "q_proj",
      "k_proj",
      "v_proj",
      "o_proj",
      "gate_proj",
      "up_proj",
      "down_proj"
    ],
    "lora_alpha": 16,
    "lora_dropout": 0,
    "bias": "none",
    "use_gradient_checkpointing": "unsloth",
    "random_state": 3407,
    "use_rslora": false,
    "loftq_config": null
  },
  "training_args": {
    "per_device_train_batch_size": 4,
    "gradient_accumulation_steps": 4,
    "warmup_steps": 5,
    "num_train_epochs": 1,
    "learning_rate": 0.0002,
    "fp16": false,
    "bf16": true,
    "logging_steps": 1,
    "optim": "adamw_8bit",
    "weight_decay": 0.01,
    "lr_scheduler_type": "linear",
    "seed": 3407,
    "output_dir": "outputs",
    "report_to": "none"
  },
  "data_config": {
    "dataset_name": "marmikpandya/mental-health",
    "dataset_split": "train",
    "dataset_rows": null,
    "conversation_format": {
      "instruction": "Provide an instruction to the model, e.g., 'Provide advice on coping with stress.'",
      "input": "Include user details or context, e.g., 'I've been feeling overwhelmed with work and personal responsibilities.'",
      "response": "The model's response based on the instruction and input."
    },
    "max_seq_length": 2048,
    "train_on_responses_only": false
  },
  "inference_config": {
    "temperature": 1.0,
    "min_p": 0.9,
    "max_new_tokens": 512
  },
  "alpaca_prompt": {
    "template": "If you are a licensed psychologist, please provide this patient with a helpful response to their concern.\n\n### Instruction:\n{}\n\n### Input:\n{}\n\n### Response:\n{}",
    "eos_token": true
  }
}