-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathrun.sh
executable file
·69 lines (63 loc) · 1.22 KB
/
run.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
#!/bin/bash
# input
GPU=${1-0}
DATA="${2-DD}" # DD, PROTEINS, COLLAB, REDDIT-BINARY
mode="${3-mpr}" # mpr, diffpool, mincut
fold=${4-0} # evaluation fold; set to 0 if want to run on all folds
# model and optimisation
cluster_dims="20 5"
epochs=30
hidden_dims="128 128"
interval_overlap=0.1
lrate=0.001
pooling_ratio=0.25
sim_batch_size=32
std_hidden_dim=32
case ${mode} in
mpr)
lrate=0.0005
# task hyperparameters
case ${DATA} in
PROTEINS)
cluster_dims="8 2"
interval_overlap=0.25
sim_batch_size=128
;;
REDDIT-BINARY)
interval_overlap=0.25
;;
esac
;;
diffpool)
pooling_ratio=0.1
# task hyperparameters
case ${DATA} in
COLLAB)
std_hidden_dim=128
;;
REDDIT-BINARY)
std_hidden_dim=128
;;
esac
;;
mincut)
# task hyperparameters
case ${DATA} in
DD)
pooling_ratio=0.1
;;
esac
;;
esac
CUDA_VISIBLE_DEVICES=${GPU} python -u eval.py \
--data $DATA \
--fold $fold \
--mode $mode \
--cluster_dims $cluster_dims \
--interval_overlap $interval_overlap \
--hidden_dims $hidden_dims \
--lrate $lrate \
--epochs $epochs \
--sim_batch_size $sim_batch_size \
--pooling_ratio $pooling_ratio \
--std_hidden_dim $std_hidden_dim