File size: 1,687 Bytes
c5ca37a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
description: Train Causal on Yahoo Dataset

auth:
  # which virtual cluster you belong to (msrlabs, etc.). Everyone has access to "pnrsy".
  vc: resrchprojvc6
  # physical cluster to use (cam, gcr, rr1) or Azure clusters (eu1, eu2, etc.)
  # cluster: rr2, eu2, eu1 et1 
  cluster: rr1 # eu2
  # docker environment (vm) in which your job will run. we provide "generic" dockers
  # with the main deep learning toolkit installed (PyTorch, TF, Chainer, etc.)
  docker:
    # image: philly/jobs/custom/generic-docker:py27
    # registry: phillyregistry.azurecr.io
    image: chunyl/pytorch-transformers:v0
    registry: index.docker.io

storage:
  _default:
    #use_phillyfs: True
    storage_account_name: textae
    container_name: bigtextae
    mount_path: /mnt/_default

code:
  # local directory of the code. this will be uploaded to the server.
  # $CONFIG_DIR is expanded to the directory of this config file
  code_upload: False
  remote_dir: code/
  local_dir: $CONFIG_DIR/code

#data:
  # data upload is not required for this example
  #data_upload: False

search:
  job_template:
    name: gpt2_{experiment_name:s}_{bs_option:.0f}
    sku: G8 # G4 # G1
    command:
    - pip install --user --editable .
    - python examples/big_ae/run_lm_finetuning_baseline.py --output_dir ../output/philly_clm_yahoo_gpt2 --dataset Yahoo --model_type gpt2 --model_name_or_path gpt2  --do_train --train_data_file ../data/datasets/yahoo_data/train.txt --do_eval --eval_data_file ../data/datasets/yahoo_data/valid.txt --overwrite_output_dir  --per_gpu_train_batch_size {bs_option}
  max_trials: 20
  type: grid
  params:
    - name: bs_option
      spec: discrete
      values: [4] # [top,bottom]