forked from yuyangw/MolCLR
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconfig_finetune.yaml
31 lines (29 loc) · 1.57 KB
/
config_finetune.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
name: 'new_BT_7'
patience: 150
batch_size: 32 # batch size
epochs: 800 # total number of epochs
eval_every_n_epochs: 1 # validation frequency
fine_tune_from: BT8 # sub directory of pre-trained model in ./ckpt
log_every_n_steps: 5 # print training log frequency
fp16_precision: False # float precision 16 (i.e. True/False)
init_lr: 0.0005 # initial learning rate for the prediction head
init_base_lr: 0.0005 # initial learning rate for the base GNN encoder
warmup: 250
weight_decay: 1e-6 # weight decay of Adam
gpu: cuda:0 # training GPU
task_name: visc # name of fine-tuning benchmark, inlcuding
# classifications: BBBP/BACE/ClinTox/Tox21/HIV/SIDER/MUV
# regressions: FreeSolv/ESOL/Lipo/qm7/qm8/qm9
model_type: gcn # GNN backbone (i.e., gin/gcn)
model:
num_layer: 3 # number of graph conv layers
emb_dim: 300 # embedding dimension in graph conv layers
feat_dim: 512 # output feature dimention
drop_ratio: 0.1 # dropout ratio
pool: mean # readout pooling (i.e., mean/max/add)
dataset:
num_workers: 0 # dataloader number of workers
valid_size: 0.1 # ratio of validation data
test_size: 0.2 # ratio of test data
train_size: 0.7 # ratio of training data
splitting: random # data splitting (i.e., random/scaffold)