-
Notifications
You must be signed in to change notification settings - Fork 5
/
glue_2_finetune_adapter.sh
executable file
·48 lines (40 loc) · 1.4 KB
/
glue_2_finetune_adapter.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
#!/usr/bin/env bash
#Step1:
#run_classifier_adapter_tune_all.py ->
#
#<!-- Comment -->
#Need to load the Adapter Model
#Here it is probably recommended to use the orginal optimiser as it optimises BERT
TRAINING_UTILITY=training_utility
export CUDA_VISIBLE_DEVICES=8
BERT_DIR="models/BERT_BASE_UNCASED"
BERT_CONFIG=$BERT_DIR/bert_config.json
VOCAB_DIR=$BERT_DIR/vocab.txt
BERT_EXTENDED_DIR="models/output_pretrain_adapter"
OUTPUT_DIR="models/output_model_finetunning"
OUTPUT_SUFFIX=_tune_all
GLUE_DIR='data/GLUE'
### the second finetuning variant
for STEP in "0" "99000"; do
CHECKPOINT=${BERT_EXTENDED_DIR}/model.ckpt-${STEP}
for task_name in "QNLI" "QQP" "MNLI"; do
echo $task_name
echo $CHECKPOINT
GLUE_DATA="$GLUE_DIR/$task_name"
python3.6 $TRAINING_UTILITY/run_classifier_adapter_tune_all.py \
--task_name=$task_name \
--do_train=true \
--do_eval=true \
--do_early_stopping=false \
--data_dir=$GLUE_DATA \
--vocab_file=$VOCAB_DIR \
--bert_config_file=$BERT_CONFIG \
--init_checkpoint=$CHECKPOINT\
--max_seq_length=128 \
--train_batch_size="[16]" \
--learning_rate="[2e-5, 3e-5]" \
--num_train_epochs="[3,4]" \
--original_model=True \
--output_dir=${OUTPUT_DIR}${OUTPUT_SUFFIX}/${STEP}/${task_name} | tee ${OUTPUT_DIR}${OUTPUT_SUFFIX}/${STEP}/${task_name}.out
done
done