Added ability to finetune models
This commit is contained in:
24
train.sh
24
train.sh
@@ -1,21 +1,23 @@
|
||||
#!/bin/sh
|
||||
python3 -m train \
|
||||
--save_checkpoints_every 10 \
|
||||
--experiment_name "wlasl" \
|
||||
--epochs 600 \
|
||||
--optimizer "SGD" \
|
||||
--lr 0.001 \
|
||||
--save_checkpoints_every 1 \
|
||||
--experiment_name "Finetune Basic Signs" \
|
||||
--epochs 100 \
|
||||
--optimizer "ADAM" \
|
||||
--lr 0.00001 \
|
||||
--batch_size 16 \
|
||||
--dataset_name "WLASL" \
|
||||
--training_set_path "WLASL100_train.csv" \
|
||||
--validation_set_path "WLASL100_val.csv" \
|
||||
--dataset_name "BasicSigns" \
|
||||
--training_set_path "train.csv" \
|
||||
--validation_set_path "val.csv" \
|
||||
--vector_length 32 \
|
||||
--epoch_iters -1 \
|
||||
--scheduler_factor 0.2 \
|
||||
--hard_triplet_mining "in_batch" \
|
||||
--scheduler_factor 0.05 \
|
||||
--hard_triplet_mining "None" \
|
||||
--filter_easy_triplets \
|
||||
--triplet_loss_margin 2 \
|
||||
--dropout 0.2 \
|
||||
--tracker=clearml \
|
||||
--dataset_loader=clearml \
|
||||
--dataset_project="SpoterEmbedding"
|
||||
--dataset_project="SpoterEmbedding" \
|
||||
--finetune \
|
||||
--checkpoint_path "checkpoints/checkpoint_embed_3006.pth"
|
||||
Reference in New Issue
Block a user