Some changes to allow training with kaggle data
This commit is contained in:
19
train.sh
19
train.sh
@@ -1,22 +1,21 @@
|
||||
#!/bin/sh
|
||||
python -m train \
|
||||
--save_checkpoints_every 10 \
|
||||
--experiment_name "augment_rotate_75_x8" \
|
||||
--experiment_name "basic" \
|
||||
--epochs 300 \
|
||||
--optimizer "ADAM" \
|
||||
--lr 0.001 \
|
||||
--lr 0.0001 \
|
||||
--batch_size 16 \
|
||||
--dataset_name "processed" \
|
||||
--dataset_name "GoogleWLASL" \
|
||||
--training_set_path "spoter_train.csv" \
|
||||
--validation_set_path "spoter_test.csv" \
|
||||
--vector_length 32 \
|
||||
--epoch_iters -1 \
|
||||
--scheduler_factor 0 \
|
||||
--hard_triplet_mining "in_batch" \
|
||||
--scheduler_factor 0.2 \
|
||||
--hard_triplet_mining "None" \
|
||||
--filter_easy_triplets \
|
||||
--triplet_loss_margin 1 \
|
||||
--triplet_loss_margin 2 \
|
||||
--dropout 0.2 \
|
||||
--augmentations_prob=0.75 \
|
||||
--hard_mining_scheduler_triplets_threshold=0 \
|
||||
--normalize_embeddings \
|
||||
--num_classes 100 \
|
||||
--tracker=clearml \
|
||||
--dataset_loader=clearml \
|
||||
--dataset_project="SpoterEmbedding"
|
||||
|
||||
Reference in New Issue
Block a user