Nik-fedorov's workspace
Runs
77
Name
3 visualized
State
Notes
User
Tags
Created
Runtime
Sweep
dataloader_num_workers
dataset
model
num_epochs
optimizer
sampler.n_instances
sampler.n_labels
sampler.name
script
checkpoint_period
valid_period
cmc/1
fnmr@fmr/1
map/5
precision/5
train/cmc/1
train/map/5
d(a,n)-d(a,p)/max
d(a,n)-d(a,p)/mean
d(a,n)-d(a,p)/min
d(a,n)-d(a,p)/std
d(a,n)/max
d(a,n)/mean
d(a,n)/min
d(a,n)/std
d(a,p)/max
d(a,p)/mean
d(a,p)/min
d(a,p)/std
d(p,n)/max
d(p,n)/mean
d(p,n)/min
d(p,n)/std
additional/class_sizes/max
additional/class_sizes/mean
additional/class_sizes/min
additional/inter_class_dist/max
additional/inter_class_dist/mean
additional/inter_class_dist/min
train/additional/class_sizes/max
train/additional/class_sizes/mean
train/additional/class_sizes/min
train/additional/inter_class_dist/max
train/additional/inter_class_dist/mean
Crashed
nik-fedorov
3h 13m 31s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
24
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84656
-
0.87588
-
0.92974
0.94345
0.42859
0.14166
-0.23456
0.11431
1.0806
0.62914
0.39028
0.078598
1.16426
0.48748
0.25485
0.084328
0.96829
0.65417
0.39028
0.084094
0.41472
0.33542
0.27581
0.85571
0.55872
0.00024414
0.37771
0.31934
0.25522
0.88953
0.68205
Failed
nik-fedorov
1h 21m 3s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
24
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.85179
-
0.87774
-
0.90297
0.92258
0.6762
0.18753
-0.36543
0.17271
1.14628
0.90585
0.45531
0.10568
1.23584
0.71832
0.3743
0.12308
1.22194
0.94733
0.56191
0.11407
0.61191
0.49235
0.37451
1.17709
0.8179
0.00024414
0.56462
0.4793
0.37107
1.19067
0.93194
Failed
nik-fedorov
4h 19m 46s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
24
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.83288
-
0.86171
-
0.95259
0.96238
0.66612
0.26389
-0.50272
0.16975
1.26117
0.95779
0.44095
0.11133
1.23431
0.69391
0.40548
0.12104
1.30746
0.99203
0.44095
0.11906
0.65737
0.51003
0.37114
1.15664
0.82783
0.00024414
0.54869
0.45696
0.35723
1.2368
1.01566
Failed
nik-fedorov
1h 16m 43s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
24
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.85348
-
0.8784
-
0.90177
0.92124
0.61636
0.18523
-0.34773
0.16953
1.16717
0.91513
0.53374
0.10834
1.33753
0.72989
0.43188
0.12438
1.34867
0.95693
0.54865
0.11992
0.62647
0.49423
0.37807
1.17533
0.81928
0.00024414
0.58325
0.48583
0.38169
1.19153
0.93714
Failed
nik-fedorov
1h 43m 17s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
24
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84386
-
0.87393
-
0.9132
0.93005
0.64106
0.19584
-0.42907
0.16984
1.18628
0.90086
0.44315
0.11334
1.23489
0.70502
0.36058
0.11643
1.23637
0.9381
0.44315
0.12422
0.61526
0.48588
0.36184
1.18018
0.81904
0.00024414
0.56563
0.47223
0.36868
1.18278
0.94675
Crashed
nik-fedorov
46m 1s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
24
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.85719
-
0.8828
-
0.89103
0.91182
0.69539
0.15742
-0.47903
0.17248
1.2338
0.92454
0.55033
0.10905
1.323
0.76712
0.39856
0.13198
1.2393
0.96942
0.56856
0.119
0.63732
0.51213
0.38585
1.17483
0.8364
0.00024414
0.6055
0.50695
0.38979
1.18917
0.92996
Failed
nik-fedorov
2h 23m 3s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
24
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84031
-
0.87221
-
0.92241
0.93817
0.59336
0.19428
-0.44831
0.1673
1.28207
0.86366
0.48999
0.10911
1.35833
0.66938
0.36784
0.11836
1.19861
0.9001
0.48999
0.11559
0.59021
0.46641
0.34696
1.10948
0.76779
0.00024414
0.52396
0.43953
0.33528
1.15053
0.92129
Failed
nik-fedorov
4h 50s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
24
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.83153
-
0.8619
-
0.93435
0.94841
0.51594
0.14401
-0.36709
0.1443
0.88086
0.6656
0.38122
0.092593
0.84629
0.52159
0.28813
0.092906
0.96553
0.6907
0.38122
0.09169
0.43358
0.3391
0.25851
0.83812
0.57512
0.00024414
0.38849
0.30593
0.23227
0.9583
0.75031
Failed
nik-fedorov
1h 18m 18s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
24
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84875
-
0.87383
-
0.90331
0.92551
0.79524
0.25466
-0.47522
0.19339
1.25627
1.03925
0.53975
0.11792
1.32352
0.7846
0.42318
0.14204
1.38932
1.07366
0.53975
0.13667
0.71002
0.53689
0.37729
1.24828
0.91347
0.00024414
0.62162
0.52046
0.39911
1.2851
1.04806
Failed
nik-fedorov
50m 29s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
24
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84808
-
0.87507
-
0.89444
0.91589
0.72531
0.22434
-0.40374
0.19263
1.28578
1.01467
0.52202
0.11935
1.33969
0.79033
0.43368
0.14205
1.35347
1.05114
0.52202
0.13512
0.69778
0.53013
0.36217
1.2377
0.91082
0.00024414
0.62623
0.52301
0.38827
1.26783
1.02341
Failed
nik-fedorov
13m 46s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84352
-
0.87456
-
0.87483
0.89796
0.79848
0.27025
-0.38923
0.20166
1.30527
1.06721
0.53155
0.13191
1.26249
0.79696
0.40518
0.14366
1.36269
1.10173
0.56735
0.13641
0.68356
0.53195
0.37305
1.23306
0.9024
0.00024414
0.6426
0.51935
0.38268
1.26351
1.00852
Failed
nik-fedorov
1h 8m 13s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84352
-
0.87078
-
0.89359
0.91778
0.72502
0.29436
-0.30357
0.18703
1.27713
1.00754
0.47352
0.12951
1.22175
0.71317
0.39291
0.12885
1.31534
1.03557
0.47352
0.13703
0.64405
0.48513
0.34809
1.21618
0.84684
0.00024414
0.55306
0.46243
0.36189
1.22847
0.99171
Failed
nik-fedorov
1h 19m 15s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
24
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.85128
-
0.88052
-
0.9028
0.92391
0.68857
0.17462
-0.31244
0.16421
1.09638
0.86493
0.43072
0.10904
1.17782
0.69031
0.35918
0.11626
1.17514
0.90675
0.43072
0.11735
0.58837
0.4731
0.36458
1.13267
0.77863
0.00024414
0.54666
0.46061
0.33684
1.15258
0.89335
Failed
nik-fedorov
3h 41m 43s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
24
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.83052
-
0.85987
-
0.94611
0.95629
0.60143
0.24325
-0.40892
0.16894
1.26829
0.90243
0.48662
0.10445
1.33002
0.65917
0.37077
0.12397
1.24836
0.93548
0.48662
0.11366
0.61407
0.4756
0.34206
1.11603
0.78842
0.00024414
0.52204
0.43337
0.33749
1.18958
0.97054
Failed
nik-fedorov
3h 23m 13s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
24
balanced
# torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.82968
-
0.86207
-
0.95464
0.96323
0.69835
0.26022
-0.39955
0.17232
1.29558
0.92764
0.39786
0.11011
1.31451
0.66741
0.35931
0.12686
1.2284
0.96232
0.39786
0.11816
0.63589
0.49344
0.36078
1.11313
0.7982
0.00024414
0.52022
0.43999
0.33629
1.19837
0.98972
Crashed
nik-fedorov
1h 50m 1s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
24
balanced
# torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84656
-
0.87112
-
0.92582
0.93935
0.74225
0.25555
-0.34563
0.18953
1.24315
0.97354
0.47872
0.11899
1.33425
0.71799
0.34695
0.1353
1.31047
1.01276
0.47872
0.12908
0.656
0.50956
0.36959
1.20557
0.86089
0.00024414
0.58611
0.47308
0.35814
1.27296
1.02827
Failed
nik-fedorov
3h 40m 8s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
24
balanced
# torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.82225
-
0.8517
-
0.96112
0.9686
0.85635
0.33751
-0.33903
0.18712
1.27313
1.04207
0.42965
0.11951
1.39046
0.70455
0.36315
0.1424
1.40411
1.0779
0.55356
0.12987
0.71001
0.53577
0.36654
1.21016
0.8907
0.00017263
0.55618
0.46122
0.36462
1.31966
1.10645
Failed
nik-fedorov
1h 17m 16s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
24
balanced
# torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84723
-
0.87586
-
0.90672
0.9279
0.68225
0.21971
-0.26049
0.18307
1.23364
0.95082
0.55559
0.11663
1.25756
0.73111
0.37181
0.13168
1.29055
0.9902
0.55559
0.12422
0.64756
0.50994
0.36153
1.20016
0.86891
0.00024414
0.59213
0.48447
0.36346
1.24624
1.00426
Failed
nik-fedorov
2h 27m 53s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
24
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84402
-
0.87155
-
0.92838
0.94223
0.75068
0.26414
-0.4159
0.19311
1.22868
0.98711
0.57391
0.12442
1.25269
0.72297
0.31585
0.13507
1.33586
1.02696
0.58408
0.13304
0.67654
0.52484
0.36631
1.19841
0.87478
0.00024414
0.58697
0.47752
0.36289
1.25648
1.04271
Failed
nik-fedorov
1h 28m 49s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
24
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84824
-
0.87731
-
0.90433
0.92415
0.63373
0.19512
-0.27852
0.16911
1.11288
0.88041
0.51624
0.10526
1.20389
0.6853
0.37034
0.12114
1.26293
0.91684
0.51624
0.11481
0.60115
0.47403
0.35794
1.13119
0.7901
0.00024414
0.54268
0.45731
0.34197
1.15415
0.91236
Failed
nik-fedorov
2h 20m 4s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.83828
-
0.86702
-
0.90842
0.92623
0.50376
0.18915
-0.29987
0.13528
0.92713
0.71875
0.34552
0.099213
0.77883
0.52959
0.30565
0.083314
0.95498
0.73467
0.40401
0.098743
0.44361
0.34153
0.27027
0.89541
0.60456
0.00024414
0.37646
0.31597
0.25222
0.95801
0.76609
Failed
nik-fedorov
2h 3m 6s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.83896
-
0.86602
-
0.90433
0.92455
0.62063
0.22381
-0.35332
0.1616
1.09108
0.85367
0.42445
0.11651
0.9406
0.62986
0.36091
0.096392
1.1074
0.87499
0.44947
0.11355
0.52693
0.41046
0.29881
1.0192
0.71012
0.00024414
0.44505
0.37663
0.29608
1.09442
0.88192
Failed
nik-fedorov
1h 44m 2s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.83964
-
0.86683
-
0.90194
0.92329
0.57511
0.21108
-0.34921
0.14678
1.04371
0.82452
0.40638
0.10667
0.9271
0.61344
0.35151
0.091769
1.08936
0.84127
0.4107
0.10801
0.50088
0.38628
0.28723
0.99252
0.68101
0.00024414
0.4321
0.36674
0.30264
1.04091
0.84463
Crashed
nik-fedorov
5h 21m 32s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.81009
-
0.84333
-
0.93912
0.95449
0.68879
0.2942
-0.40157
0.1512
1.09169
0.86738
0.42992
0.093215
0.98451
0.57318
0.2883
0.10318
1.12945
0.88235
0.42992
0.096927
0.53043
0.40316
0.28793
0.95795
0.66967
0.00024414
0.407
0.34063
0.2667
1.14527
0.90777
Failed
nik-fedorov
1h 35m
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.83693
-
0.86588
-
0.897
0.91805
0.62086
0.21901
-0.38154
0.16812
1.10764
0.88436
0.40422
0.11378
1.01761
0.66535
0.38196
0.10662
1.16981
0.90766
0.40422
0.10868
0.53198
0.4228
0.31489
1.03568
0.73462
0.00024414
0.47654
0.39718
0.31336
1.10117
0.89435
Failed
nik-fedorov
1h 22m 25s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84166
-
0.86848
-
0.8941
0.9168
0.59935
0.22497
-0.30237
0.16278
1.10514
0.90616
0.47756
0.11114
1.06914
0.68119
0.38191
0.10615
1.17904
0.92875
0.50612
0.1064
0.55023
0.43236
0.32182
1.04954
0.74202
0.00024414
0.48511
0.41006
0.32961
1.11852
0.9048
Failed
nik-fedorov
53m 46s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
criterion.tri_loss.margin += 0.005
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.83069
-
0.86095
-
0.88233
0.90649
0.75121
0.26995
-0.42465
0.22017
1.32875
1.05219
0.41242
0.15862
1.24574
0.78223
0.40916
0.14732
1.37934
1.08422
0.48606
0.1553
0.67583
0.50637
0.35586
1.2343
0.91854
0.00024414
0.56757
0.45271
0.3553
1.30344
1.10208
Failed
nik-fedorov
4h 18m 38s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
criterion.tri_loss.margin += 0.005
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.75439
-
0.79294
-
0.92769
0.94333
1.1151
0.6764
-0.52949
0.32137
1.41859
1.21224
0.21952
0.24785
1.40547
0.53583
0.25803
0.20928
1.50524
1.22859
0.27025
0.24353
0.78905
0.56636
0.27762
1.39567
1.00927
0.00017263
0.34041
0.26624
0.21602
1.53511
1.35372
Failed
nik-fedorov
30m 11s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
criterion.tri_loss.margin += 0.05
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.81955
-
0.85061
-
0.87295
0.89784
1.09241
0.35883
-0.99014
0.33377
1.39869
1.10765
0.33914
0.23282
1.43716
0.74882
0.23593
0.23327
1.47716
1.15164
0.38298
0.21988
0.73038
0.5231
0.26867
1.34396
1.02311
0.00024414
0.64027
0.40669
0.26816
1.4536
1.22759
Failed
nik-fedorov
58m 55s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.83913
-
0.86756
-
0.88915
0.91184
0.80701
0.26397
-0.68636
0.20958
1.24872
1.00674
0.47774
0.14536
1.18262
0.74276
0.40357
0.13135
1.30215
1.03219
0.47774
0.13876
0.63178
0.47547
0.31789
1.21235
0.86239
0.00024414
0.539
0.4404
0.34043
1.26072
1.03655
Failed
nik-fedorov
56m 49s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.83474
-
0.86589
-
-
-
0.7415
0.28572
-0.37373
0.19872
1.25659
1.00594
0.55722
0.13501
1.16693
0.72022
0.39818
0.13178
1.30524
1.02122
0.55722
0.13614
0.62295
0.47719
0.34445
1.18297
0.84997
0.00024414
-
-
-
-
-
Failed
nik-fedorov
1h 10m 14s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.83052
-
0.8601
-
-
-
0.67232
0.2725
-0.35348
0.18065
1.22459
0.94408
0.47917
0.12851
1.0709
0.67158
0.38743
0.11071
1.22947
0.96198
0.50338
0.12874
20.0225
11.72018
5.49468
1.14814
0.79449
0
-
-
-
-
-
Failed
nik-fedorov
1h 44m 48s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.82833
-
0.85589
-
-
-
0.57983
0.23888
-0.48477
0.15444
0.99655
0.81564
0.42294
0.10343
0.96309
0.57676
0.3188
0.10066
1.06052
0.82899
0.42294
0.10557
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
11m
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
16
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.8182
-
0.85157
-
-
-
0.36447
0.036978
-0.36162
0.13284
0.78633
0.5187
0.1328
0.12414
0.82754
0.48172
0.31698
0.070918
0.87148
0.60847
0.3154
0.10807
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
9m 36s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
16
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.82073
-
0.85447
-
-
-
0.47091
0.049124
-0.4418
0.16383
0.85284
0.55518
0.15068
0.15403
0.83517
0.50605
0.30902
0.081975
0.95355
0.65102
0.31101
0.10594
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
23m 15s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.83964
-
0.87278
-
-
-
0.74772
0.25122
-0.44586
0.17332
1.23187
1.02408
0.57471
0.10403
1.17463
0.77286
0.37553
0.12186
1.32661
1.05128
0.57471
0.10884
-
-
-
-
-
-
-
-
-
-
-
Crashed
nik-fedorov
1h 6m 31s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.83862
-
0.8682
-
-
-
0.78724
0.33842
-0.37841
0.17718
1.34657
1.06983
0.37267
0.12132
1.29868
0.73141
0.41148
0.12995
1.38995
1.09583
0.37267
0.12966
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
33m 28s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.83288
-
0.8616
-
-
-
0.87939
0.31753
-0.6442
0.24199
1.33718
1.10361
0.51496
0.1638
1.25746
0.78607
0.35456
0.15651
1.43584
1.14018
0.54302
0.15532
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
42m 9s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84369
-
0.87029
-
-
-
0.78095
0.29432
-0.29732
0.18714
1.35487
1.02581
0.5145
0.12059
1.33367
0.73149
0.3413
0.13405
1.32492
1.05281
0.5145
0.12673
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
52m 58s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84014
-
0.86972
-
-
-
0.78969
0.35719
-0.14694
0.16681
1.32954
1.13056
0.71424
0.10684
1.30506
0.77337
0.4338
0.12907
1.39097
1.15204
0.76171
0.11822
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
1h 8m 5s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.83879
-
0.86751
-
-
-
0.86789
0.34441
-0.56181
0.17639
1.32774
1.07957
0.47188
0.11579
1.31916
0.73516
0.31408
0.1343
1.38338
1.10337
0.50813
0.12378
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
28m 11s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84115
-
0.87228
-
-
-
0.66736
0.21227
-0.36313
0.17353
1.20042
0.97396
0.50288
0.11244
1.13532
0.76169
0.42065
0.11574
1.24548
0.99885
0.54249
0.11092
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
8m 43s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.842
-
0.87055
-
-
-
0.82146
0.30611
-0.46163
0.17021
1.3651
1.19468
0.63306
0.08529
1.27093
0.88857
0.46472
0.13528
1.43605
1.19774
0.60966
0.093669
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
12m 14s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84841
-
0.8752
-
-
-
0.78965
0.23553
-0.37343
0.17129
1.28689
1.11205
0.60566
0.098818
1.21687
0.87651
0.45618
0.13033
1.35977
1.13021
0.59968
0.10954
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
30m 26s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84318
-
0.87095
-
-
-
0.68514
0.2131
-0.34014
0.15966
1.21549
1.01991
0.63283
0.099828
1.14542
0.8068
0.46499
0.11128
1.28479
1.03986
0.62087
0.10848
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
31m 6s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.83862
-
0.87012
-
-
-
0.26465
0.062058
-0.14947
0.07113
0.67288
0.47381
0.27738
0.051277
0.6774
0.41175
0.25965
0.048357
0.67288
0.49189
0.27738
0.052372
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
14m 53s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.83457
-
0.86375
-
-
-
0.42147
0.072971
-0.30506
0.11366
1.06995
0.88489
0.61683
0.064549
1.07074
0.81192
0.53587
0.087731
1.11241
0.92522
0.61683
0.067702
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
34m 47s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84031
-
0.86919
-
-
-
0.45686
0.14331
-0.27502
0.11645
0.92158
0.73774
0.41674
0.078564
0.84547
0.59442
0.37616
0.073134
0.95453
0.76027
0.41674
0.076376
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
24m 35s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84504
-
0.87445
-
-
-
0.66737
0.185
-0.43615
0.17383
1.15882
0.95805
0.50182
0.11867
1.11185
0.77306
0.43263
0.12129
1.24307
0.98989
0.50182
0.11699
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
31m 59s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
# add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb_metrics_value.update(criterion.tri_loss.summary())
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.8366
-
0.86851
-
-
-
0.82042
0.24633
-0.47387
0.19631
1.25314
1.02588
0.44921
0.13701
1.2912
0.77955
0.35003
0.13407
1.31721
1.05646
0.53964
0.13224
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
1h 13m 58s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
print('Grad norm:', torch.nn.utils.clip_grad_norm_(model.parameters(), 3).item())
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
add_train_metrics(wandb_metrics_value, model, train_loader_metrics,
metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.83812
-
0.86605
-
0.84789
0.87624
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
25m 4s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
print('Grad norm:', torch.nn.utils.clip_grad_norm_(model.parameters(), 3).item())
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.8366
-
0.8662
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
7m 14s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
print('Grad norm:', torch.nn.utils.clip_grad_norm_(model.parameters(), 3).item())
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.73768
-
0.78733
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
6m 13s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.83069
-
0.86205
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
26m 51s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.8447
-
0.87283
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
16m 32s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84402
-
0.87262
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
18m 18s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.001
)
4
20
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84926
-
0.87725
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
30m 56s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
20
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 5
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
5
0.84858
-
0.87712
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
33m 8s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
20
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 10
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
10
0.84537
-
0.87412
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
23m 16s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
16
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 10
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
10
0.84352
-
0.87423
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
16m 4s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 10
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
10
0.8447
-
0.87389
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
18m 56s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 10
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
10
0.83609
-
0.8672
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
28m 38s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 10
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
10
0.84099
-
0.86797
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
9m 57s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 10
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
10
0.82275
-
0.85717
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
25m 57s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 10
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
10
0.84301
-
0.87068
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
11m 59s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 10
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
10
0.82157
-
0.85556
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
36m 55s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 10
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
10
0.81381
-
0.84379
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
22m 41s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 10
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
10
0.71708
-
0.76256
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
35m 16s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 10
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
10
0.82444
-
0.85767
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
18m 25s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 10
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
10
0.82833
-
0.86008
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
30m 57s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 10
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
10
0.7213
-
0.76768
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
25m 48s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 10
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
10
0.72316
-
0.76976
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
30m 48s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 10
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
10
0.66357
-
0.72259
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
5m 12s
-
2
CUB_200_2011
ViT
10000
AdamW (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
eps: 1e-08
foreach: None
lr: 1e-05
maximize: False
weight_decay: 0.01
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 10
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 3) # gradient clipping
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
10
0
-
0.0057323
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
16m 57s
-
2
CUB_200_2011
ViT
10000
Adam (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
differentiable: False
eps: 1e-08
foreach: None
fused: False
lr: 1e-05
maximize: False
weight_decay: 0
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 10
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'save_code': True,
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
with wandb.init(**wandb_init_data) as run:
print('Evaluating pre-trained model before training')
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
best_cmc1 = wandb_metrics_value['cmc/1']
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
wandb_metrics_value = validation(model, valid_loader, metrics, device)
wandb.log(wandb_metrics_value)
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
print(f'\nNew best CMC@1 {best_cmc1} at {epoch + 1} epoch\n')
-
10
0
-
0.0057323
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Finished
nik-fedorov
41s
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
nik-fedorov
1h 11m 19s
-
2
CUB_200_2011
ViT
10000
Adam (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
capturable: False
differentiable: False
eps: 1e-08
foreach: None
fused: False
lr: 1e-05
maximize: False
weight_decay: 0
)
4
8
balanced
torch.manual_seed(42)
n_epochs = 10000
valid_period = 10
metrics = {
'cmc_top_k': [1], # to calculate cmc@1
'map_top_k': [5], # to calculate map@5
'precision_top_k': [],
'fmr_vals': []
}
wandb_init_data = {
'project': 'TP3',
'name': 'run',
'config': {
'model': 'ViT',
'optimizer': optimizer,
'scheduler': scheduler,
'sampler': {
'name': 'balanced',
'n_labels': n_labels,
'n_instances': n_instances
},
'valid_period': valid_period,
'dataset': 'CUB_200_2011',
'num_epochs': n_epochs,
'dataloader_num_workers': num_workers,
'script': _ih[-1]
}
}
best_cmc1 = 0.0
with wandb.init(**wandb_init_data) as run:
for epoch in range(n_epochs):
model.train()
for batch in train_loader:
optimizer.zero_grad()
embeddings = model(batch['input_tensors'].to(device))
loss = criterion(embeddings, batch['labels'].to(device))
loss.backward()
optimizer.step()
if (epoch + 1) % valid_period == 0:
print(f'{epoch + 1} training epochs finished\nValidation started: {dt.datetime.now()}')
with torch.inference_mode():
model.eval()
embeds = inference(model, valid_loader, device)
print(f'Inference finished: {dt.datetime.now()}')
labels = df_valid['label'].values
dist_mat = torch.cdist(embeds, embeds, p=2)
mask = torch.ones(len(embeds))
metrics_value = compute_metrics(dist_mat, labels, mask, mask, **metrics)
wandb_metrics_value = transform_metrics_for_wandb_logging(metrics_value)
wandb.log(wandb_metrics_value)
print(wandb_metrics_value, end='\n\n')
if wandb_metrics_value['cmc/1'] > best_cmc1:
best_cmc1 = wandb_metrics_value['cmc/1']
save_model('best.pt', epoch + 1, model, optimizer, scheduler)
wandb.save('best.pt')
-
10
0.65885
-
0.71049
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
1-77
of 77