Pierrotlc's workspace
Runs
105
Name
10 visualized
State
Notes
User
Tags
Created
Runtime
Sweep
batch_size
betas_d
betas_g
dataloader
device
dim_image
dim_z
dropout
epochs
gamma_d
gamma_g
gp_factor
iter_D
lr_d
lr_g
milestones_d
milestones_g
n_channels
n_first_channels
n_iter_d
n_iter_g
n_iter_log
n_layers_block
n_layers_d_block
n_layers_z
n_noise
netD
netG
optimD
optimG
penalyze_fake
running_avg_factor_D
running_avg_factor_G
seed
stepD
stepG
step_size_d
step_size_g
test_loader
train_loader
weight_GP
weight_avg_factor_d
weight_avg_factor_g
weight_decay_d
Finished
pierrotlc
5m 17s
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Failed
pierrotlc
11mo 18d 1h 16m 7s
-
256
0.745
0.5
["<torch.utils.data.dataloader.DataLoader object at 0x7f71b83383d0>","<torch.utils.data.dataloader.DataLoader object at 0x7fa89ee7bfd0>","<torch.utils.data.dataloader.DataLoader object at 0x7fb7fcaa8fd0>","<torch.utils.data.dataloader.DataLoader object at 0x7fd5e8047fd0>"]
cuda
64
114
0.3
40
0.1
0.1
-
-
0.0001
0.0001
15
15
512
12
-
-
10
3
5
4
10
Discriminator(
(first_conv): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(3, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(blocks): ModuleList(
(0): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((12, 64, 64), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((12, 64, 64), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((12, 64, 64), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((12, 64, 64), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(4): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((12, 64, 64), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(12, 24, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(1): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((24, 32, 32), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((24, 32, 32), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((24, 32, 32), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((24, 32, 32), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(4): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((24, 32, 32), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(24, 48, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(2): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((48, 16, 16), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((48, 16, 16), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((48, 16, 16), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((48, 16, 16), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(4): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((48, 16, 16), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(48, 96, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(3): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((96, 8, 8), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((96, 8, 8), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((96, 8, 8), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((96, 8, 8), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(4): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((96, 8, 8), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(96, 192, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(4): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((192, 4, 4), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((192, 4, 4), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((192, 4, 4), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((192, 4, 4), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(4): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((192, 4, 4), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(192, 384, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(5): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((384, 2, 2), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((384, 2, 2), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((384, 2, 2), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((384, 2, 2), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(4): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((384, 2, 2), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(384, 768, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
)
(classify): Sequential(
(0): Conv2d(768, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): Flatten(start_dim=1, end_dim=-1)
)
)
["StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((196,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=196, out_features=196, bias=True)\n (1): LayerNorm((196,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=196, out_features=196, bias=True)\n (1): LayerNorm((196,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Linear(in_features=196, out_features=196, bias=True)\n (1): LayerNorm((196,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Linear(in_features=196, out_features=196, bias=True)\n (1): LayerNorm((196,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=196, out_features=196, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((512, 2, 2), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((512, 2, 2), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((512, 2, 2), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=196, out_features=1024, bias=True)\n (A2): Linear(in_features=196, out_features=1024, bias=True)\n (B1): Conv2d(10, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(512, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((256, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((256, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((256, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((256, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=196, out_features=512, bias=True)\n (A2): Linear(in_features=196, out_features=512, bias=True)\n (B1): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((128, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((128, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((128, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((128, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=196, out_features=256, bias=True)\n (A2): Linear(in_features=196, out_features=256, bias=True)\n (B1): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((64, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((64, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((64, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((64, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=196, out_features=128, bias=True)\n (A2): Linear(in_features=196, out_features=128, bias=True)\n (B1): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (4): SynthesisBlock(\n (upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((32, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((32, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((32, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((32, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=196, out_features=64, bias=True)\n (A2): Linear(in_features=196, out_features=64, bias=True)\n (B1): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (5): SynthesisBlock(\n (upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((16, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((16, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((16, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((16, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=196, out_features=32, bias=True)\n (A2): Linear(in_features=196, out_features=32, bias=True)\n (B1): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n )\n (to_rgb): Conv2d(16, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)","StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=32, out_features=32, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((512, 2, 2), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((512, 2, 2), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((512, 2, 2), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=1024, bias=True)\n (A2): Linear(in_features=32, out_features=1024, bias=True)\n (B1): Conv2d(10, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(512, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((256, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((256, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((256, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((256, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=512, bias=True)\n (A2): Linear(in_features=32, out_features=512, bias=True)\n (B1): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((128, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((128, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((128, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((128, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=256, bias=True)\n (A2): Linear(in_features=32, out_features=256, bias=True)\n (B1): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((64, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((64, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((64, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((64, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=128, bias=True)\n (A2): Linear(in_features=32, out_features=128, bias=True)\n (B1): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (4): SynthesisBlock(\n (upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((32, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((32, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((32, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((32, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=64, bias=True)\n (A2): Linear(in_features=32, out_features=64, bias=True)\n (B1): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (5): SynthesisBlock(\n (upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((16, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((16, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((16, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((16, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=32, bias=True)\n (A2): Linear(in_features=32, out_features=32, bias=True)\n (B1): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n )\n (to_rgb): Conv2d(16, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)"]
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n capturable: False\n differentiable: False\n eps: 1e-08\n foreach: None\n fused: False\n initial_lr: 0.0001\n lr: 0.0001\n maximize: False\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n initial_lr: 0.0001\n lr: 0.0001\n weight_decay: 0\n)"]
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.5)\n capturable: False\n differentiable: False\n eps: 1e-08\n foreach: None\n fused: False\n initial_lr: 0.0001\n lr: 0.0001\n maximize: False\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.5)\n eps: 1e-08\n initial_lr: 0.0001\n lr: 0.0001\n weight_decay: 0\n)"]
-
0.9
0.9
0
["<torch.optim.lr_scheduler.MultiStepLR object at 0x7f71c1fb58d0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fa8c3b6e400>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fb81d775400>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fd5f4fc6400>"]
["<torch.optim.lr_scheduler.MultiStepLR object at 0x7f71b8338370>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fa89ee7bd00>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fb7fcaa8d00>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fd5e8047d00>"]
-
-
-
-
-
0.5
0.5
-
Finished
pierrotlc
4h 42m 16s
-
66.8
0.7405
0.5985
["<torch.utils.data.dataloader.DataLoader object at 0x7f1c98118970>","<torch.utils.data.dataloader.DataLoader object at 0x7f44cc5e7970>","<torch.utils.data.dataloader.DataLoader object at 0x7f45b41b5970>","<torch.utils.data.dataloader.DataLoader object at 0x7f45c010b970>","<torch.utils.data.dataloader.DataLoader object at 0x7f477011c970>","<torch.utils.data.dataloader.DataLoader object at 0x7f4c3c1b5970>","<torch.utils.data.dataloader.DataLoader object at 0x7f57bc060970>","<torch.utils.data.dataloader.DataLoader object at 0x7f7de443d970>","<torch.utils.data.dataloader.DataLoader object at 0x7fb9e00da970>","<torch.utils.data.dataloader.DataLoader object at 0x7fc9d00ca970>"]
cuda
64
32
0.3
76.5
0.1
0.1
-
-
0.000046
0.000281
-
-
512
12
-
-
10
3
5
4
10
Discriminator(
(first_conv): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(3, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(blocks): ModuleList(
(0): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((12, 64, 64), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((12, 64, 64), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((12, 64, 64), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((12, 64, 64), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(4): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((12, 64, 64), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(12, 24, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(1): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((24, 32, 32), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((24, 32, 32), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((24, 32, 32), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((24, 32, 32), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(4): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((24, 32, 32), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(24, 48, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(2): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((48, 16, 16), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((48, 16, 16), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((48, 16, 16), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((48, 16, 16), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(4): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((48, 16, 16), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(48, 96, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(3): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((96, 8, 8), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((96, 8, 8), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((96, 8, 8), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((96, 8, 8), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(4): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((96, 8, 8), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(96, 192, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(4): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((192, 4, 4), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((192, 4, 4), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((192, 4, 4), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((192, 4, 4), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(4): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((192, 4, 4), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(192, 384, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(5): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((384, 2, 2), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((384, 2, 2), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((384, 2, 2), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((384, 2, 2), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(4): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): LayerNorm((384, 2, 2), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(384, 768, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
)
(classify): Sequential(
(0): Conv2d(768, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): Flatten(start_dim=1, end_dim=-1)
)
)
StyleGAN(
(mapping): MappingNetwork(
(norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(layers): ModuleList(
(0): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(out): Linear(in_features=32, out_features=32, bias=True)
)
(synthesis): SynthesisNetwork(
(blocks): ModuleList(
(0): SynthesisBlock(
(layers): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((512, 2, 2), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((512, 2, 2), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((512, 2, 2), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(ada_in): AdaIN()
(A1): Linear(in_features=32, out_features=1024, bias=True)
(A2): Linear(in_features=32, out_features=1024, bias=True)
(B1): Conv2d(10, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(B2): Conv2d(10, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(1): SynthesisBlock(
(upsample): ConvTranspose2d(512, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((256, 4, 4), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(layers): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((256, 4, 4), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((256, 4, 4), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((256, 4, 4), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(ada_in): AdaIN()
(A1): Linear(in_features=32, out_features=512, bias=True)
(A2): Linear(in_features=32, out_features=512, bias=True)
(B1): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(B2): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(2): SynthesisBlock(
(upsample): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((128, 8, 8), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(layers): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((128, 8, 8), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((128, 8, 8), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((128, 8, 8), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(ada_in): AdaIN()
(A1): Linear(in_features=32, out_features=256, bias=True)
(A2): Linear(in_features=32, out_features=256, bias=True)
(B1): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(B2): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(3): SynthesisBlock(
(upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((64, 16, 16), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(layers): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((64, 16, 16), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((64, 16, 16), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((64, 16, 16), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(ada_in): AdaIN()
(A1): Linear(in_features=32, out_features=128, bias=True)
(A2): Linear(in_features=32, out_features=128, bias=True)
(B1): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(B2): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(4): SynthesisBlock(
(upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((32, 32, 32), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(layers): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((32, 32, 32), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((32, 32, 32), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((32, 32, 32), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(ada_in): AdaIN()
(A1): Linear(in_features=32, out_features=64, bias=True)
(A2): Linear(in_features=32, out_features=64, bias=True)
(B1): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(B2): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(5): SynthesisBlock(
(upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((16, 64, 64), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(layers): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((16, 64, 64), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((16, 64, 64), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LayerNorm((16, 64, 64), eps=1e-05, elementwise_affine=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(ada_in): AdaIN()
(A1): Linear(in_features=32, out_features=32, bias=True)
(A2): Linear(in_features=32, out_features=32, bias=True)
(B1): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(B2): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
(to_rgb): Conv2d(16, 3, kernel_size=(1, 1), stride=(1, 1))
)
)
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.9)\n eps: 1e-08\n initial_lr: 1e-05\n lr: 1e-05\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n initial_lr: 0.0001\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n initial_lr: 1e-05\n lr: 1e-05\n weight_decay: 0\n)"]
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.5)\n eps: 1e-08\n initial_lr: 0.0001\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.5)\n eps: 1e-08\n initial_lr: 0.0005\n lr: 0.0005\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.5)\n eps: 1e-08\n initial_lr: 1e-05\n lr: 1e-05\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.5)\n eps: 1e-08\n initial_lr: 5e-05\n lr: 5e-05\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.7)\n eps: 1e-08\n initial_lr: 0.0005\n lr: 0.0005\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.8)\n eps: 1e-08\n initial_lr: 0.0005\n lr: 0.0005\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n initial_lr: 0.0001\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n initial_lr: 0.0005\n lr: 0.0005\n weight_decay: 0\n)"]
-
0.9
0.9
0
["<torch.optim.lr_scheduler.MultiStepLR object at 0x7f1ca695d0a0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f44f65880a0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f45d16580a0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f45de1650a0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f478a07f0a0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f4c6214b0a0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f57cad260a0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f7e0def00a0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fb9f73740a0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fc9e19e70a0>"]
["<torch.optim.lr_scheduler.MultiStepLR object at 0x7f1c980e74c0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f44cc5b64c0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f45b41844c0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f45c00d94c0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f47700eb4c0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f4c3c1834c0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f57bc02e4c0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f7de440b4c0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fb9e00a84c0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fc9d00984c0>"]
-
-
-
-
-
0.5
0.18
-
Finished
pierrotlc
45m 53s
-
92
0.745
0.5
["<torch.utils.data.dataloader.DataLoader object at 0x7f6112e0cdf0>","<torch.utils.data.dataloader.DataLoader object at 0x7f6885951df0>","<torch.utils.data.dataloader.DataLoader object at 0x7fef964ebdf0>"]
cuda
64
12
0.1
86.66667
0.1
0.1
-
-
0.0001
0.00023333
-
-
170.66667
5.33333
-
-
10
3
3
4
10
["Discriminator(\n (first_conv): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(3, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (blocks): ModuleList(\n (0): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((4, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((4, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((4, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(4, 8, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (1): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((8, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((8, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((8, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(8, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (2): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((16, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((16, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((16, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (3): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((32, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((32, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((32, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (4): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((64, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((64, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((64, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (5): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((128, 2, 2), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((128, 2, 2), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((128, 2, 2), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n )\n (classify): Sequential(\n (0): Conv2d(256, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): Flatten(start_dim=1, end_dim=-1)\n )\n)","Discriminator(\n (first_conv): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(3, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (blocks): ModuleList(\n (0): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((8, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((8, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((8, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(8, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (1): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((16, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((16, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((16, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (2): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((32, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((32, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((32, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (3): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((64, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((64, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((64, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (4): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((128, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((128, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((128, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (5): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((256, 2, 2), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((256, 2, 2), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): LayerNorm((256, 2, 2), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(256, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n )\n (classify): Sequential(\n (0): Conv2d(512, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): Flatten(start_dim=1, end_dim=-1)\n )\n)"]
["StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((12,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=12, out_features=12, bias=True)\n (1): LayerNorm((12,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=12, out_features=12, bias=True)\n (1): LayerNorm((12,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Linear(in_features=12, out_features=12, bias=True)\n (1): LayerNorm((12,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Linear(in_features=12, out_features=12, bias=True)\n (1): LayerNorm((12,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=12, out_features=12, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((128, 2, 2), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((128, 2, 2), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((128, 2, 2), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=12, out_features=256, bias=True)\n (A2): Linear(in_features=12, out_features=256, bias=True)\n (B1): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((64, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((64, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((64, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((64, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=12, out_features=128, bias=True)\n (A2): Linear(in_features=12, out_features=128, bias=True)\n (B1): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((32, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((32, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((32, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((32, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=12, out_features=64, bias=True)\n (A2): Linear(in_features=12, out_features=64, bias=True)\n (B1): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((16, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((16, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((16, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((16, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=12, out_features=32, bias=True)\n (A2): Linear(in_features=12, out_features=32, bias=True)\n (B1): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (4): SynthesisBlock(\n (upsample): ConvTranspose2d(16, 8, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((8, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((8, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((8, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((8, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=12, out_features=16, bias=True)\n (A2): Linear(in_features=12, out_features=16, bias=True)\n (B1): Conv2d(10, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (5): SynthesisBlock(\n (upsample): ConvTranspose2d(8, 4, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((4, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((4, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((4, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((4, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=12, out_features=8, bias=True)\n (A2): Linear(in_features=12, out_features=8, bias=True)\n (B1): Conv2d(10, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n )\n (to_rgb): Conv2d(4, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)","StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((12,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=12, out_features=12, bias=True)\n (1): LayerNorm((12,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=12, out_features=12, bias=True)\n (1): LayerNorm((12,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Linear(in_features=12, out_features=12, bias=True)\n (1): LayerNorm((12,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Linear(in_features=12, out_features=12, bias=True)\n (1): LayerNorm((12,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=12, out_features=12, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((256, 2, 2), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((256, 2, 2), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((256, 2, 2), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=12, out_features=512, bias=True)\n (A2): Linear(in_features=12, out_features=512, bias=True)\n (B1): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((128, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((128, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((128, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((128, 4, 4), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=12, out_features=256, bias=True)\n (A2): Linear(in_features=12, out_features=256, bias=True)\n (B1): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((64, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((64, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((64, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((64, 8, 8), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=12, out_features=128, bias=True)\n (A2): Linear(in_features=12, out_features=128, bias=True)\n (B1): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((32, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((32, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((32, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((32, 16, 16), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=12, out_features=64, bias=True)\n (A2): Linear(in_features=12, out_features=64, bias=True)\n (B1): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (4): SynthesisBlock(\n (upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((16, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((16, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((16, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((16, 32, 32), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=12, out_features=32, bias=True)\n (A2): Linear(in_features=12, out_features=32, bias=True)\n (B1): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (5): SynthesisBlock(\n (upsample): ConvTranspose2d(16, 8, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((8, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((8, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((8, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.1, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LayerNorm((8, 64, 64), eps=1e-05, elementwise_affine=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=12, out_features=16, bias=True)\n (A2): Linear(in_features=12, out_features=16, bias=True)\n (B1): Conv2d(10, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n )\n (to_rgb): Conv2d(8, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)"]
Adam (
Parameter Group 0
amsgrad: False
betas: (0.5, 0.99)
eps: 1e-08
initial_lr: 0.0001
lr: 0.0001
weight_decay: 0
)
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.5)\n eps: 1e-08\n initial_lr: 0.0001\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.5)\n eps: 1e-08\n initial_lr: 0.0005\n lr: 0.0005\n weight_decay: 0\n)"]
-
0.9
0.9
0
["<torch.optim.lr_scheduler.MultiStepLR object at 0x7f611cfa6580>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f688faeb580>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fefa0f49580>"]
["<torch.optim.lr_scheduler.MultiStepLR object at 0x7f6112e0cb20>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f6885951b20>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fef964ebb20>"]
-
-
-
-
-
0.5
0.5
-
Failed
pierrotlc
1d 19h 53m 34s
-
256
0.745
0.5
["<torch.utils.data.dataloader.DataLoader object at 0x7f3bfd1f4c40>","<torch.utils.data.dataloader.DataLoader object at 0x7f3ce0780070>","<torch.utils.data.dataloader.DataLoader object at 0x7f3d719f5c40>","<torch.utils.data.dataloader.DataLoader object at 0x7f44a5293ca0>","<torch.utils.data.dataloader.DataLoader object at 0x7f8ec00dec40>","<torch.utils.data.dataloader.DataLoader object at 0x7fb0c1527610>","<torch.utils.data.dataloader.DataLoader object at 0x7fb96c105c40>","<torch.utils.data.dataloader.DataLoader object at 0x7fd208123be0>","<torch.utils.data.dataloader.DataLoader object at 0x7fddbd199070>","<torch.utils.data.dataloader.DataLoader object at 0x7fe0758590d0>"]
cuda
64
74.18182
0.3
59.09091
0.68636
0.64545
-
-
0.00034545
0.00038182
42.65909
42.65909
512
12
-
1.5
10
3.22222
4.45455
4
10
["Discriminator(\n (first_conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(3, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (blocks): ModuleList(\n (0): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(12, 24, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (1): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(24, 48, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (2): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(48, 96, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (3): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(96, 192, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (4): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(192, 384, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (5): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(384, 768, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n )\n (classify): Sequential(\n (0): Conv2d(768, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): Flatten(start_dim=1, end_dim=-1)\n )\n)","Discriminator(\n (first_conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(3, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (blocks): ModuleList(\n (0): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(12, 24, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (1): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(24, 48, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (2): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(48, 96, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (3): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(96, 192, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (4): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(192, 384, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (5): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(384, 768, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n )\n (classify): Sequential(\n (0): Conv2d(768, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): Flatten(start_dim=1, end_dim=-1)\n )\n)"]
["StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((196,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=196, out_features=196, bias=True)\n (1): LayerNorm((196,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=196, out_features=196, bias=True)\n (1): LayerNorm((196,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Linear(in_features=196, out_features=196, bias=True)\n (1): LayerNorm((196,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Linear(in_features=196, out_features=196, bias=True)\n (1): LayerNorm((196,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=196, out_features=196, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=196, out_features=1024, bias=True)\n (A2): Linear(in_features=196, out_features=1024, bias=True)\n (B1): Conv2d(10, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(512, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=196, out_features=512, bias=True)\n (A2): Linear(in_features=196, out_features=512, bias=True)\n (B1): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=196, out_features=256, bias=True)\n (A2): Linear(in_features=196, out_features=256, bias=True)\n (B1): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=196, out_features=128, bias=True)\n (A2): Linear(in_features=196, out_features=128, bias=True)\n (B1): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (4): SynthesisBlock(\n (upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=196, out_features=64, bias=True)\n (A2): Linear(in_features=196, out_features=64, bias=True)\n (B1): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (5): SynthesisBlock(\n (upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=196, out_features=32, bias=True)\n (A2): Linear(in_features=196, out_features=32, bias=True)\n (B1): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n )\n (to_rgb): Conv2d(16, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)","StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=32, out_features=32, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=1024, bias=True)\n (A2): Linear(in_features=32, out_features=1024, bias=True)\n (B1): Conv2d(10, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(512, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=512, bias=True)\n (A2): Linear(in_features=32, out_features=512, bias=True)\n (B1): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=256, bias=True)\n (A2): Linear(in_features=32, out_features=256, bias=True)\n (B1): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=128, bias=True)\n (A2): Linear(in_features=32, out_features=128, bias=True)\n (B1): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (4): SynthesisBlock(\n (upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=64, bias=True)\n (A2): Linear(in_features=32, out_features=64, bias=True)\n (B1): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (5): SynthesisBlock(\n (upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=32, bias=True)\n (A2): Linear(in_features=32, out_features=32, bias=True)\n (B1): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n )\n (to_rgb): Conv2d(16, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)","StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=32, out_features=32, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=1024, bias=True)\n (A2): Linear(in_features=32, out_features=1024, bias=True)\n (B1): Conv2d(10, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(512, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=512, bias=True)\n (A2): Linear(in_features=32, out_features=512, bias=True)\n (B1): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=256, bias=True)\n (A2): Linear(in_features=32, out_features=256, bias=True)\n (B1): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=128, bias=True)\n (A2): Linear(in_features=32, out_features=128, bias=True)\n (B1): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (4): SynthesisBlock(\n (upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=64, bias=True)\n (A2): Linear(in_features=32, out_features=64, bias=True)\n (B1): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (5): SynthesisBlock(\n (upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=32, bias=True)\n (A2): Linear(in_features=32, out_features=32, bias=True)\n (B1): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n )\n (to_rgb): Conv2d(16, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)","StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=32, out_features=32, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=1024, bias=True)\n (A2): Linear(in_features=32, out_features=1024, bias=True)\n (B1): Conv2d(10, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(512, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=512, bias=True)\n (A2): Linear(in_features=32, out_features=512, bias=True)\n (B1): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=256, bias=True)\n (A2): Linear(in_features=32, out_features=256, bias=True)\n (B1): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=128, bias=True)\n (A2): Linear(in_features=32, out_features=128, bias=True)\n (B1): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (4): SynthesisBlock(\n (upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=64, bias=True)\n (A2): Linear(in_features=32, out_features=64, bias=True)\n (B1): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (5): SynthesisBlock(\n (upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=32, bias=True)\n (A2): Linear(in_features=32, out_features=32, bias=True)\n (B1): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n )\n (to_rgb): Conv2d(16, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)","StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((92,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=92, out_features=92, bias=True)\n (1): LayerNorm((92,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=92, out_features=92, bias=True)\n (1): LayerNorm((92,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Linear(in_features=92, out_features=92, bias=True)\n (1): LayerNorm((92,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Linear(in_features=92, out_features=92, bias=True)\n (1): LayerNorm((92,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=92, out_features=92, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=92, out_features=1024, bias=True)\n (A2): Linear(in_features=92, out_features=1024, bias=True)\n (B1): Conv2d(10, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(512, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=92, out_features=512, bias=True)\n (A2): Linear(in_features=92, out_features=512, bias=True)\n (B1): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=92, out_features=256, bias=True)\n (A2): Linear(in_features=92, out_features=256, bias=True)\n (B1): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=92, out_features=128, bias=True)\n (A2): Linear(in_features=92, out_features=128, bias=True)\n (B1): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (4): SynthesisBlock(\n (upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=92, out_features=64, bias=True)\n (A2): Linear(in_features=92, out_features=64, bias=True)\n (B1): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (5): SynthesisBlock(\n (upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=92, out_features=32, bias=True)\n (A2): Linear(in_features=92, out_features=32, bias=True)\n (B1): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n )\n (to_rgb): Conv2d(16, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)","StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((92,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=92, out_features=92, bias=True)\n (1): LayerNorm((92,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=92, out_features=92, bias=True)\n (1): LayerNorm((92,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Linear(in_features=92, out_features=92, bias=True)\n (1): LayerNorm((92,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Linear(in_features=92, out_features=92, bias=True)\n (1): LayerNorm((92,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=92, out_features=92, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=92, out_features=1024, bias=True)\n (A2): Linear(in_features=92, out_features=1024, bias=True)\n (B1): Conv2d(10, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(512, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=92, out_features=512, bias=True)\n (A2): Linear(in_features=92, out_features=512, bias=True)\n (B1): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=92, out_features=256, bias=True)\n (A2): Linear(in_features=92, out_features=256, bias=True)\n (B1): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=92, out_features=128, bias=True)\n (A2): Linear(in_features=92, out_features=128, bias=True)\n (B1): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (4): SynthesisBlock(\n (upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=92, out_features=64, bias=True)\n (A2): Linear(in_features=92, out_features=64, bias=True)\n (B1): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (5): SynthesisBlock(\n (upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=92, out_features=32, bias=True)\n (A2): Linear(in_features=92, out_features=32, bias=True)\n (B1): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n )\n (to_rgb): Conv2d(16, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)"]
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n initial_lr: 0.0001\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n initial_lr: 0.001\n lr: 0.001\n weight_decay: 0\n)"]
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.5)\n eps: 1e-08\n initial_lr: 0.0001\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.5)\n eps: 1e-08\n initial_lr: 0.0005\n lr: 0.0005\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.5)\n eps: 1e-08\n initial_lr: 0.001\n lr: 0.001\n weight_decay: 0\n)"]
-
0.9
0.9
0
["<torch.optim.lr_scheduler.MultiStepLR object at 0x7f3c16904790>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f3cedaa14f0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f3d7f129790>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f44ad2987f0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f8ec8964790>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fb0d0333820>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fb97408a790>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fd2134224f0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fddda4cf4f0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fe086c1e550>"]
["<torch.optim.lr_scheduler.MultiStepLR object at 0x7f3bfd1f4910>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f3ce0757d30>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f3d719f5910>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f44a5293970>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f8ec00de910>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fb0c15272e0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fb96c105910>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fd2081238b0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fddbd170d30>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fe07582fd90>"]
-
-
-
-
-
0.81818
0.43727
0
Failed
pierrotlc
12h 59m 2s
-
208
0.71562
0.5
["<torch.utils.data.dataloader.DataLoader object at 0x7efeaab5ed30>","<torch.utils.data.dataloader.DataLoader object at 0x7f0c91aebd60>","<torch.utils.data.dataloader.DataLoader object at 0x7f3259bfc520>","<torch.utils.data.dataloader.DataLoader object at 0x7f3dc8857520>","<torch.utils.data.dataloader.DataLoader object at 0x7f60001a8100>","<torch.utils.data.dataloader.DataLoader object at 0x7f96b846a070>","<torch.utils.data.dataloader.DataLoader object at 0x7fb048cb8070>","<torch.utils.data.dataloader.DataLoader object at 0x7fe42009dd60>"]
cuda
32
32
0.3
87.5
0.8375
0.8
0.64
-
0.0029125
0.00042625
34
34
160
8.5
-
-
77.5
5.5
4.25
4
10
["Discriminator(\n (first_conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(3, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (blocks): ModuleList(\n (0): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(12, 24, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (1): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(24, 48, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (2): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(48, 96, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (3): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(96, 192, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (4): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(192, 384, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n )\n (classify): Sequential(\n (0): Conv2d(384, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): Flatten(start_dim=1, end_dim=-1)\n )\n)","Discriminator(\n (first_conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(3, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (blocks): ModuleList(\n (0): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(4, 8, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (1): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(8, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (2): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (3): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (4): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n )\n (classify): Sequential(\n (0): Conv2d(128, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): Flatten(start_dim=1, end_dim=-1)\n )\n)","Discriminator(\n (first_conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(3, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (blocks): ModuleList(\n (0): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(8, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (1): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (2): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (3): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (4): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n )\n (classify): Sequential(\n (0): Conv2d(256, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): Flatten(start_dim=1, end_dim=-1)\n )\n)"]
["StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=32, out_features=32, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=128, bias=True)\n (A2): Linear(in_features=32, out_features=128, bias=True)\n (B1): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=64, bias=True)\n (A2): Linear(in_features=32, out_features=64, bias=True)\n (B1): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=32, bias=True)\n (A2): Linear(in_features=32, out_features=32, bias=True)\n (B1): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(16, 8, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=16, bias=True)\n (A2): Linear(in_features=32, out_features=16, bias=True)\n (B1): Conv2d(10, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (4): SynthesisBlock(\n (upsample): ConvTranspose2d(8, 4, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=8, bias=True)\n (A2): Linear(in_features=32, out_features=8, bias=True)\n (B1): Conv2d(10, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n )\n (to_rgb): Conv2d(4, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)","StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=32, out_features=32, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (5): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (6): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (7): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=256, bias=True)\n (A2): Linear(in_features=32, out_features=256, bias=True)\n (B1): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (5): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (6): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (7): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=128, bias=True)\n (A2): Linear(in_features=32, out_features=128, bias=True)\n (B1): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (5): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (6): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (7): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=64, bias=True)\n (A2): Linear(in_features=32, out_features=64, bias=True)\n (B1): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (5): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (6): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (7): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=32, bias=True)\n (A2): Linear(in_features=32, out_features=32, bias=True)\n (B1): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (4): SynthesisBlock(\n (upsample): ConvTranspose2d(16, 8, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (4): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (5): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (6): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (7): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=16, bias=True)\n (A2): Linear(in_features=32, out_features=16, bias=True)\n (B1): Conv2d(10, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n )\n (to_rgb): Conv2d(8, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)","StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=32, out_features=32, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=512, bias=True)\n (A2): Linear(in_features=32, out_features=512, bias=True)\n (B1): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=256, bias=True)\n (A2): Linear(in_features=32, out_features=256, bias=True)\n (B1): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=128, bias=True)\n (A2): Linear(in_features=32, out_features=128, bias=True)\n (B1): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=64, bias=True)\n (A2): Linear(in_features=32, out_features=64, bias=True)\n (B1): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (4): SynthesisBlock(\n (upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (layers): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (ada_in): AdaIN()\n (A1): Linear(in_features=32, out_features=32, bias=True)\n (A2): Linear(in_features=32, out_features=32, bias=True)\n (B1): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (B2): Conv2d(10, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n )\n (to_rgb): Conv2d(16, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)"]
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.7)\n eps: 1e-08\n initial_lr: 0.01\n lr: 0.01\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.9)\n eps: 1e-08\n initial_lr: 0.0001\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.9)\n eps: 1e-08\n initial_lr: 0.01\n lr: 0.01\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n initial_lr: 0.0001\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n initial_lr: 0.001\n lr: 0.001\n weight_decay: 0\n)"]
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.5)\n eps: 1e-08\n initial_lr: 0.0001\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.5)\n eps: 1e-08\n initial_lr: 0.001\n lr: 0.001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.5)\n eps: 1e-08\n initial_lr: 1e-05\n lr: 1e-05\n weight_decay: 0\n)"]
-
0.945
0.7
0
["<torch.optim.lr_scheduler.MultiStepLR object at 0x7efeb55bf5e0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f0c9c54d610>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f326047f7f0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f3dcf0da7f0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f6006ae78b0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f96c26195e0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fb04f5f5820>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fe429ebc610>"]
["<torch.optim.lr_scheduler.MultiStepLR object at 0x7efeaab5ea00>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f0c91aeba30>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f3259bfc220>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f3dc8857220>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f600017fd90>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f96b8446d00>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fb048c92d00>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fe42009da30>"]
-
-
-
-
-
1.5
1.2125
0
Failed
pierrotlc
1h 11m 13s
-
32
0.745
0.5
<torch.utils.data.dataloader.DataLoader object at 0x7f8c5806b880>
cuda
64
32
0.3
50
0.65
0.5
-
-
0.001
0.001
43.5
43.5
512
12
1
-
1000
-
2
4
-
Discriminator(
(first_conv): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(3, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(blocks): ModuleList(
(0): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(12, 24, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(1): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(24, 48, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(2): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(48, 96, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(3): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(96, 192, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(4): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(192, 384, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(5): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(384, 768, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
)
(classify): Sequential(
(0): Conv2d(768, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): Flatten(start_dim=1, end_dim=-1)
)
)
StyleGAN(
(mapping): MappingNetwork(
(norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(layers): ModuleList(
(0): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(out): Linear(in_features=32, out_features=32, bias=True)
)
(synthesis): SynthesisNetwork(
(blocks): ModuleList(
(0): SynthesisBlock(
(conv2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
(1): SynthesisBlock(
(upsample): ConvTranspose2d(512, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LeakyReLU(negative_slope=0.01)
)
(conv2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
(2): SynthesisBlock(
(upsample): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LeakyReLU(negative_slope=0.01)
)
(conv2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
(3): SynthesisBlock(
(upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LeakyReLU(negative_slope=0.01)
)
(conv2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
(4): SynthesisBlock(
(upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LeakyReLU(negative_slope=0.01)
)
(conv2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
(5): SynthesisBlock(
(upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LeakyReLU(negative_slope=0.01)
)
(conv2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
)
(to_rgb): Conv2d(16, 3, kernel_size=(1, 1), stride=(1, 1))
)
)
Adam (
Parameter Group 0
amsgrad: False
betas: (0.5, 0.99)
eps: 1e-08
initial_lr: 0.001
lr: 0.001
weight_decay: 0
)
Adam (
Parameter Group 0
amsgrad: False
betas: (0.5, 0.5)
eps: 1e-08
initial_lr: 0.001
lr: 0.001
weight_decay: 0
)
-
0.99
0.99
0
<torch.optim.lr_scheduler.MultiStepLR object at 0x7f8c5f9f8910>
<torch.optim.lr_scheduler.MultiStepLR object at 0x7f8c5806b550>
-
-
-
-
-
5
5
0
Failed
pierrotlc
9h 52m 14s
-
224
0.745
0.5
["<torch.utils.data.dataloader.DataLoader object at 0x7f153563e160>","<torch.utils.data.dataloader.DataLoader object at 0x7f29c79ff160>","<torch.utils.data.dataloader.DataLoader object at 0x7f476d68d1c0>","<torch.utils.data.dataloader.DataLoader object at 0x7f6efbe251c0>","<torch.utils.data.dataloader.DataLoader object at 0x7f9f40071160>","<torch.utils.data.dataloader.DataLoader object at 0x7fb1ebb64160>","<torch.utils.data.dataloader.DataLoader object at 0x7fc7c0342160>"]
cuda
32
32
0.3
221.42857
0.7
0.55714
-
-
0.00074286
0.00074286
26
26
146.28571
6.85714
1
-
50
-
2
4
-
["Discriminator(\n (first_conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(3, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (blocks): ModuleList(\n (0): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(12, 24, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (1): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(24, 48, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (2): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(48, 96, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (3): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(96, 192, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (4): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(192, 384, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n )\n (classify): Sequential(\n (0): Conv2d(384, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): Flatten(start_dim=1, end_dim=-1)\n )\n)","Discriminator(\n (first_conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(3, 6, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (blocks): ModuleList(\n (0): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(6, 6, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(6, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(6, 6, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(6, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(6, 12, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (1): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(12, 24, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (2): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(24, 48, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (3): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(48, 96, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (4): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(96, 192, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n )\n (classify): Sequential(\n (0): Conv2d(192, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): Flatten(start_dim=1, end_dim=-1)\n )\n)"]
["StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=32, out_features=32, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (4): SynthesisBlock(\n (upsample): ConvTranspose2d(16, 8, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n )\n (to_rgb): Conv2d(8, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)","StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=32, out_features=32, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (4): SynthesisBlock(\n (upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n )\n (to_rgb): Conv2d(16, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)"]
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n initial_lr: 0.0001\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n initial_lr: 0.001\n lr: 0.001\n weight_decay: 0\n)"]
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.5)\n eps: 1e-08\n initial_lr: 0.0001\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.5)\n eps: 1e-08\n initial_lr: 0.001\n lr: 0.001\n weight_decay: 0\n)"]
-
0.89714
0.89714
0
["<torch.optim.lr_scheduler.MultiStepLR object at 0x7f153f6c34f0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f29d1a844f0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f477435b910>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f6f026b2910>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f9f4a1754f0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fb1f5bec4f0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fc7ca43d4f0>"]
["<torch.optim.lr_scheduler.MultiStepLR object at 0x7f1535614e20>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f29c79d5e20>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f476d663e80>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f6efbdfce80>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f9f40047e20>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fb1ebb3ce20>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fc7c0319e20>"]
-
-
-
-
-
6.66667
6.66667
0
Failed
pierrotlc
23h 31m 2s
-
256
0.745
0.67091
["<torch.utils.data.dataloader.DataLoader object at 0x7f386ab40b80>","<torch.utils.data.dataloader.DataLoader object at 0x7f3acb539f40>","<torch.utils.data.dataloader.DataLoader object at 0x7f625f50cee0>","<torch.utils.data.dataloader.DataLoader object at 0x7f80d06cdfd0>","<torch.utils.data.dataloader.DataLoader object at 0x7f8950b50b50>","<torch.utils.data.dataloader.DataLoader object at 0x7f91881f9ee0>","<torch.utils.data.dataloader.DataLoader object at 0x7fa3acb6bee0>","<torch.utils.data.dataloader.DataLoader object at 0x7facbeb46ee0>","<torch.utils.data.dataloader.DataLoader object at 0x7fb96753cf40>","<torch.utils.data.dataloader.DataLoader object at 0x7fca6597afd0>"]
cuda
32
27.63636
0.3
92.72727
0.8
0.7
-
-
0.00034545
0.00034545
10.74194
13.42424
112
5.09091
2.54545
-
-
-
2
3.45455
-
["Discriminator(\n (first_conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(3, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (blocks): ModuleList(\n (0): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(2, 4, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (1): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(4, 8, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (2): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(8, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (3): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (4): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n )\n (classify): Sequential(\n (0): Conv2d(64, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): Flatten(start_dim=1, end_dim=-1)\n )\n)","Discriminator(\n (first_conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(3, 6, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (blocks): ModuleList(\n (0): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(6, 6, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(6, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(6, 6, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(6, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(6, 12, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (1): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(12, 24, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (2): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(24, 48, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (3): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(48, 96, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (4): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(96, 192, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n )\n (classify): Sequential(\n (0): Conv2d(192, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): Flatten(start_dim=1, end_dim=-1)\n )\n)","Discriminator(\n (first_conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(3, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (blocks): ModuleList(\n (0): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(8, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (1): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (2): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (3): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (4): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n )\n (classify): Sequential(\n (0): Conv2d(256, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): Flatten(start_dim=1, end_dim=-1)\n )\n)"]
["StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((16,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=16, out_features=16, bias=True)\n (1): LayerNorm((16,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=16, out_features=16, bias=True)\n (1): LayerNorm((16,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=16, out_features=16, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(48, 24, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(24, 12, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(12, 6, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(6, 6, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(6, 6, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n )\n (to_rgb): Conv2d(6, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)","StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=32, out_features=32, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (4): SynthesisBlock(\n (upsample): ConvTranspose2d(16, 8, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n )\n (to_rgb): Conv2d(8, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)","StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=32, out_features=32, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(192, 96, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(96, 48, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(48, 24, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (4): SynthesisBlock(\n (upsample): ConvTranspose2d(24, 12, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(12, 12, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n )\n (to_rgb): Conv2d(12, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)"]
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n initial_lr: 0.0001\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n initial_lr: 0.001\n lr: 0.001\n weight_decay: 0\n)"]
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.5)\n eps: 1e-08\n initial_lr: 0.0001\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.8)\n eps: 1e-08\n initial_lr: 0.0001\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n initial_lr: 0.0001\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n initial_lr: 0.001\n lr: 0.001\n weight_decay: 0\n)"]
-
-
-
0
["<torch.optim.lr_scheduler.MultiStepLR object at 0x7f3878ed9580>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f3ad55e6310>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f62695b9280>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f80da77b3a0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f895f986fd0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f91922a6280>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fa3b6d5d280>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7facc8bf2280>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fb9715e9310>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fca6fa283a0>"]
["<torch.optim.lr_scheduler.MultiStepLR object at 0x7f386ab40850>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f3acb539c40>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f625f50cbe0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f80d06cdcd0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f8950b50820>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7f91881f9be0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fa3acb6bbe0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7facbeb46be0>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fb96753cc40>","<torch.optim.lr_scheduler.MultiStepLR object at 0x7fca6597acd0>"]
-
-
-
-
-
-
-
0
Failed
pierrotlc
55m 6s
-
64
0.745
0.745
["<torch.utils.data.dataloader.DataLoader object at 0x7f3eaacd7b20>","<torch.utils.data.dataloader.DataLoader object at 0x7f6677a71b20>"]
cuda
32
64
0.3
50
0.8
0.8
-
-
0.001
0.0001
-
-
64
4
-
-
-
-
2
2
-
Discriminator(
(first_conv): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(3, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(blocks): ModuleList(
(0): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(4, 8, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(1): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(8, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(2): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(3): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(4): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
)
(classify): Sequential(
(0): Conv2d(128, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): Flatten(start_dim=1, end_dim=-1)
)
)
StyleGAN(
(mapping): MappingNetwork(
(norm): LayerNorm((64,), eps=1e-05, elementwise_affine=True)
(layers): ModuleList(
(0): Sequential(
(0): Linear(in_features=64, out_features=64, bias=True)
(1): LayerNorm((64,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Linear(in_features=64, out_features=64, bias=True)
(1): LayerNorm((64,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(out): Linear(in_features=64, out_features=64, bias=True)
)
(synthesis): SynthesisNetwork(
(blocks): ModuleList(
(0): SynthesisBlock(
(conv2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
(1): SynthesisBlock(
(upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LeakyReLU(negative_slope=0.01)
)
(conv2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
(2): SynthesisBlock(
(upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LeakyReLU(negative_slope=0.01)
)
(conv2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
(3): SynthesisBlock(
(upsample): ConvTranspose2d(16, 8, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LeakyReLU(negative_slope=0.01)
)
(conv2): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(2): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
)
(to_rgb): Conv2d(8, 3, kernel_size=(1, 1), stride=(1, 1))
)
)
Adam (
Parameter Group 0
amsgrad: False
betas: (0.5, 0.99)
eps: 1e-08
initial_lr: 0.001
lr: 0.001
weight_decay: 0
)
Adam (
Parameter Group 0
amsgrad: False
betas: (0.5, 0.99)
eps: 1e-08
initial_lr: 0.0001
lr: 0.0001
weight_decay: 0
)
7.5
-
-
0
["<torch.optim.lr_scheduler.StepLR object at 0x7f3eb9b0ef70>","<torch.optim.lr_scheduler.StepLR object at 0x7f66868a9f70>"]
["<torch.optim.lr_scheduler.StepLR object at 0x7f3eaacd77f0>","<torch.optim.lr_scheduler.StepLR object at 0x7f6677a717f0>"]
50
26
-
-
-
-
-
0
Failed
pierrotlc
2d 1h 12m 10s
-
242.28571
0.75214
0.79857
["<torch.utils.data.dataloader.DataLoader object at 0x7f13f672ca90>","<torch.utils.data.dataloader.DataLoader object at 0x7f2a8f7c1af0>","<torch.utils.data.dataloader.DataLoader object at 0x7f7df5169ac0>","<torch.utils.data.dataloader.DataLoader object at 0x7f816104fac0>","<torch.utils.data.dataloader.DataLoader object at 0x7f8b8c4daaf0>","<torch.utils.data.dataloader.DataLoader object at 0x7f9871e8fac0>","<torch.utils.data.dataloader.DataLoader object at 0x7f99600afa60>","<torch.utils.data.dataloader.DataLoader object at 0x7faa4a00eac0>","<torch.utils.data.dataloader.DataLoader object at 0x7fdad3577a90>","<torch.utils.data.dataloader.DataLoader object at 0x7ff7b8aa0ac0>"]
cuda
32
36.57143
0.3
92.85714
-
-
-
-
0.00038571
0.000093571
-
-
50.28571
3.14286
-
-
-
-
2
2
-
["Discriminator(\n (first_conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(3, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (blocks): ModuleList(\n (0): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(2, 4, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (1): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(4, 8, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (2): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(8, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (3): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (4): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n )\n (classify): Sequential(\n (0): Conv2d(64, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): Flatten(start_dim=1, end_dim=-1)\n )\n)","Discriminator(\n (first_conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(3, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (blocks): ModuleList(\n (0): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(4, 8, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (1): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(8, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (2): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (3): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (4): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n )\n (classify): Sequential(\n (0): Conv2d(128, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): Flatten(start_dim=1, end_dim=-1)\n )\n)"]
["StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=32, out_features=32, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(16, 8, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(8, 4, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n )\n (to_rgb): Conv2d(4, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)","StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=32, out_features=32, bias=True)\n (1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=32, out_features=32, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(16, 8, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n )\n (to_rgb): Conv2d(8, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)","StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n (layers): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=64, out_features=64, bias=True)\n (1): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=64, out_features=64, bias=True)\n (1): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out): Linear(in_features=64, out_features=64, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(16, 8, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (2): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n )\n (to_rgb): Conv2d(8, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)"]
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n lr: 0.0005\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n lr: 0.001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n lr: 0.001\n weight_decay: 0.001\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n lr: 0.001\n weight_decay: 0.01\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.7, 0.99)\n eps: 1e-08\n lr: 0.0001\n weight_decay: 0\n)"]
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.7, 0.99)\n eps: 1e-08\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.7, 0.99)\n eps: 1e-08\n lr: 0.0001\n weight_decay: 0.001\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.7, 0.99)\n eps: 1e-08\n lr: 0.0001\n weight_decay: 0.01\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.7, 0.99)\n eps: 1e-08\n lr: 1e-05\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.8, 0.99)\n eps: 1e-08\n lr: 0.0001\n weight_decay: 0\n)"]
-
-
-
0
-
-
-
-
-
-
-
-
-
0.001
Finished
pierrotlc
1h 16m 42s
-
64
0.745
0.745
-
cuda
32
100
0.3
15
-
-
-
5
0.00065
0.0000425
-
-
192
12
-
-
-
-
2
4
-
["Discriminator(\n (first_conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(3, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (blocks): ModuleList(\n (0): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (1): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (2): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (3): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (4): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(256, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n )\n (classify): Sequential(\n (0): Conv2d(512, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): Flatten(start_dim=1, end_dim=-1)\n )\n)","Discriminator(\n (first_conv): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(3, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (blocks): ModuleList(\n (0): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(8, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (1): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (2): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (3): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (4): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Dropout(p=0.3, inplace=False)\n (1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (3): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n )\n (classify): Sequential(\n (0): Conv2d(256, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): Flatten(start_dim=1, end_dim=-1)\n )\n)"]
["StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((100,), eps=1e-05, elementwise_affine=True)\n (fully_connected): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=100, out_features=100, bias=True)\n (1): LayerNorm((100,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=100, out_features=100, bias=True)\n (1): LayerNorm((100,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Linear(in_features=100, out_features=100, bias=True)\n (1): LayerNorm((100,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Linear(in_features=100, out_features=100, bias=True)\n (1): LayerNorm((100,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out_layer): Linear(in_features=100, out_features=100, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (conv2): Sequential(\n (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n )\n (to_rgb): Conv2d(16, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)","StyleGAN(\n (mapping): MappingNetwork(\n (norm): LayerNorm((100,), eps=1e-05, elementwise_affine=True)\n (fully_connected): ModuleList(\n (0): Sequential(\n (0): Linear(in_features=100, out_features=100, bias=True)\n (1): LayerNorm((100,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Linear(in_features=100, out_features=100, bias=True)\n (1): LayerNorm((100,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Linear(in_features=100, out_features=100, bias=True)\n (1): LayerNorm((100,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Linear(in_features=100, out_features=100, bias=True)\n (1): LayerNorm((100,), eps=1e-05, elementwise_affine=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (out_layer): Linear(in_features=100, out_features=100, bias=True)\n )\n (synthesis): SynthesisNetwork(\n (blocks): ModuleList(\n (0): SynthesisBlock(\n (conv2): Sequential(\n (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (1): SynthesisBlock(\n (upsample): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (2): SynthesisBlock(\n (upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n (3): SynthesisBlock(\n (upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (conv1): Sequential(\n (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01)\n )\n (conv2): Sequential(\n (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01)\n )\n (ada_in): AdaIN()\n )\n )\n (to_rgb): Conv2d(32, 3, kernel_size=(1, 1), stride=(1, 1))\n )\n)"]
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n lr: 0.0005\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n lr: 0.001\n weight_decay: 0\n)"]
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n lr: 1e-05\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n lr: 5e-05\n weight_decay: 0\n)"]
-
-
-
0
-
-
-
-
["<torch.utils.data.dataloader.DataLoader object at 0x7f0dea770c40>","<torch.utils.data.dataloader.DataLoader object at 0x7f12fb5a0cd0>","<torch.utils.data.dataloader.DataLoader object at 0x7f95abaf8c40>","<torch.utils.data.dataloader.DataLoader object at 0x7fc0d8196c40>"]
["<torch.utils.data.dataloader.DataLoader object at 0x7f0dea770b50>","<torch.utils.data.dataloader.DataLoader object at 0x7f12fb5a0be0>","<torch.utils.data.dataloader.DataLoader object at 0x7f95abaf8b50>","<torch.utils.data.dataloader.DataLoader object at 0x7fc0d8196b50>"]
5
-
-
-
Failed
pierrotlc
16h 14m 13s
-
128
0.745
0.81167
-
cuda
32
32
0.3
55
-
-
-
3
0.00001
0.00003
-
-
128
8
-
-
-
-
2
3
-
Discriminator(
(first_conv): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(3, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(blocks): ModuleList(
(0): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(8, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(1): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(2): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(3): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(4): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Dropout(p=0.3, inplace=False)
(1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
)
(classify): Sequential(
(0): Conv2d(256, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): Flatten(start_dim=1, end_dim=-1)
)
)
StyleGAN(
(mapping): MappingNetwork(
(norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(fully_connected): ModuleList(
(0): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(out_layer): Linear(in_features=32, out_features=32, bias=True)
)
(synthesis): SynthesisNetwork(
(blocks): ModuleList(
(0): SynthesisBlock(
(conv2): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
(1): SynthesisBlock(
(upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv1): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(conv2): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
(2): SynthesisBlock(
(upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv1): Sequential(
(0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(conv2): Sequential(
(0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
(3): SynthesisBlock(
(upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv1): Sequential(
(0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(conv2): Sequential(
(0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
)
(to_rgb): Conv2d(16, 3, kernel_size=(1, 1), stride=(1, 1))
)
)
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n lr: 1e-05\n weight_decay: 0\n)","SGD (\nParameter Group 0\n dampening: 0\n lr: 1e-05\n momentum: 0\n nesterov: False\n weight_decay: 0\n)"]
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.5, 0.99)\n eps: 1e-08\n lr: 1e-05\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.7, 0.99)\n eps: 1e-08\n lr: 5e-05\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n eps: 1e-08\n lr: 1e-05\n weight_decay: 0\n)"]
-
-
-
0
-
-
-
-
["<torch.utils.data.dataloader.DataLoader object at 0x7f3b7c30ecd0>","<torch.utils.data.dataloader.DataLoader object at 0x7f65d24dac10>","<torch.utils.data.dataloader.DataLoader object at 0x7f734e257c10>","<torch.utils.data.dataloader.DataLoader object at 0x7f8e97469c10>"]
["<torch.utils.data.dataloader.DataLoader object at 0x7f3b7c30ebe0>","<torch.utils.data.dataloader.DataLoader object at 0x7f65d24dab20>","<torch.utils.data.dataloader.DataLoader object at 0x7f734e257b20>","<torch.utils.data.dataloader.DataLoader object at 0x7f8e97469b20>"]
3
-
-
-
Finished
pierrotlc
9h 40m 23s
-
128
-
-
-
cuda
32
32
0.2
300
-
-
-
-
0.00055
0.0000325
-
-
128
8
-
-
-
-
3.5
3
-
["Discriminator(\n (first_conv): Sequential(\n (0): Dropout(p=0.2, inplace=False)\n (1): Conv2d(3, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (blocks): ModuleList(\n (0): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(8, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (1): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (2): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (3): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (4): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n )\n (classify): Sequential(\n (0): Conv2d(256, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): Flatten(start_dim=1, end_dim=-1)\n )\n)","Discriminator(\n (first_conv): Sequential(\n (0): Dropout(p=0.2, inplace=False)\n (1): Conv2d(3, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n )\n (blocks): ModuleList(\n (0): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(8, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (1): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (2): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (3): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (4): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n )\n (classify): Sequential(\n (0): Conv2d(256, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): Flatten(start_dim=1, end_dim=-1)\n )\n)"]
StyleGAN(
(mapping): MappingNetwork(
(norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(fully_connected): ModuleList(
(0): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(out_layer): Linear(in_features=32, out_features=32, bias=True)
)
(synthesis): SynthesisNetwork(
(blocks): ModuleList(
(0): SynthesisBlock(
(conv2): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
(1): SynthesisBlock(
(upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv1): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(conv2): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
(2): SynthesisBlock(
(upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv1): Sequential(
(0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(conv2): Sequential(
(0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
(3): SynthesisBlock(
(upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv1): Sequential(
(0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(conv2): Sequential(
(0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
)
(to_rgb): Conv2d(16, 3, kernel_size=(1, 1), stride=(1, 1))
)
)
["SGD (\nParameter Group 0\n dampening: 0\n lr: 0.0001\n momentum: 0\n nesterov: False\n weight_decay: 0\n)","SGD (\nParameter Group 0\n dampening: 0\n lr: 0.001\n momentum: 0\n nesterov: False\n weight_decay: 0\n)"]
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n eps: 1e-08\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n eps: 1e-08\n lr: 1e-05\n weight_decay: 0\n)"]
-
-
-
0
-
-
-
-
["<torch.utils.data.dataloader.DataLoader object at 0x7f0203ae8bb0>","<torch.utils.data.dataloader.DataLoader object at 0x7f6e12388bb0>","<torch.utils.data.dataloader.DataLoader object at 0x7f9fc01c8880>","<torch.utils.data.dataloader.DataLoader object at 0x7ff3243f1bb0>"]
["<torch.utils.data.dataloader.DataLoader object at 0x7f0203ae8ac0>","<torch.utils.data.dataloader.DataLoader object at 0x7f6e12388ac0>","<torch.utils.data.dataloader.DataLoader object at 0x7f9fc01c8790>","<torch.utils.data.dataloader.DataLoader object at 0x7ff3243f1ac0>"]
-
-
-
-
Finished
pierrotlc
4h 13m 8s
-
128
-
-
-
cuda
32
32
-
500
-
-
-
-
0.001
0.0001
-
-
128
8
-
-
-
-
4
3
-
Discriminator(
(first_conv): Conv2d(3, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(blocks): ModuleList(
(0): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(8, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(1): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(2): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(3): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(4): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
)
(classify): Sequential(
(0): Conv2d(256, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): Flatten(start_dim=1, end_dim=-1)
)
)
StyleGAN(
(mapping): MappingNetwork(
(norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(fully_connected): ModuleList(
(0): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(out_layer): Linear(in_features=32, out_features=32, bias=True)
)
(synthesis): SynthesisNetwork(
(blocks): ModuleList(
(0): SynthesisBlock(
(conv2): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
(1): SynthesisBlock(
(upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv1): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(conv2): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
(2): SynthesisBlock(
(upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv1): Sequential(
(0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(conv2): Sequential(
(0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
(3): SynthesisBlock(
(upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv1): Sequential(
(0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(conv2): Sequential(
(0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): LeakyReLU(negative_slope=0.01)
)
(ada_in): AdaIN()
)
)
(to_rgb): Conv2d(16, 3, kernel_size=(1, 1), stride=(1, 1))
)
)
SGD (
Parameter Group 0
dampening: 0
lr: 0.001
momentum: 0
nesterov: False
weight_decay: 0
)
Adam (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
eps: 1e-08
lr: 0.0001
weight_decay: 0
)
-
-
-
0
-
-
-
-
<torch.utils.data.dataloader.DataLoader object at 0x7f42b3754af0>
<torch.utils.data.dataloader.DataLoader object at 0x7f42b3754a00>
-
-
-
-
Finished
pierrotlc
25m 43s
-
128
-
-
-
cuda
32
32
-
50
-
-
-
-
0.0001
0.00001
-
-
128
8
-
-
-
-
4
3
-
Discriminator(
(first_conv): Conv2d(3, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(blocks): ModuleList(
(0): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(8, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(1): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(2): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(3): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(4): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
)
(classify): Sequential(
(0): Conv2d(256, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): Flatten(start_dim=1, end_dim=-1)
)
)
StyleGAN(
(mapping): MappingNetwork(
(norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(fully_connected): ModuleList(
(0): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(out_layer): Linear(in_features=32, out_features=32, bias=True)
)
(synthesis): SynthesisNetwork(
(blocks): ModuleList(
(0): SynthesisBlock(
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(ada_in): AdaIN()
)
(1): SynthesisBlock(
(upsample): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(ada_in): AdaIN()
)
(2): SynthesisBlock(
(upsample): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(ada_in): AdaIN()
)
(3): SynthesisBlock(
(upsample): ConvTranspose2d(32, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
(conv1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(ada_in): AdaIN()
)
)
(to_rgb): Conv2d(16, 3, kernel_size=(1, 1), stride=(1, 1))
)
)
SGD (
Parameter Group 0
dampening: 0
lr: 0.0001
momentum: 0
nesterov: False
weight_decay: 0
)
Adam (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
eps: 1e-08
lr: 1e-05
weight_decay: 0
)
-
-
-
0
-
-
-
-
<torch.utils.data.dataloader.DataLoader object at 0x7f57367168b0>
<torch.utils.data.dataloader.DataLoader object at 0x7f57367167c0>
-
-
-
-
Failed
pierrotlc
5h 34m 29s
-
128
-
-
-
cuda
32
32
-
26.66667
-
-
-
-
0.000025
0.0000058333
-
-
128
8
-
-
-
-
4
3
-
Discriminator(
(first_conv): Conv2d(3, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(blocks): ModuleList(
(0): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(8, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(1): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(2): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(3): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(4): DiscriminatorBlock(
(convs): ModuleList(
(0): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
(3): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(downsample): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
)
(classify): Sequential(
(0): Conv2d(256, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): Flatten(start_dim=1, end_dim=-1)
)
)
StyleGAN(
(mapping): MappingNetwork(
(norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(fully_connected): ModuleList(
(0): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(out_layer): Linear(in_features=32, out_features=32, bias=True)
)
(synthesis): SynthesisNetwork(
(blocks): ModuleList(
(0): SynthesisBlock(
(upsample): Upsample(scale_factor=2.0, mode=nearest)
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(ada_in): AdaIN()
)
(1): SynthesisBlock(
(upsample): Upsample(scale_factor=2.0, mode=nearest)
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(ada_in): AdaIN()
)
(2): SynthesisBlock(
(upsample): Upsample(scale_factor=2.0, mode=nearest)
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(ada_in): AdaIN()
)
(3): SynthesisBlock(
(upsample): Upsample(scale_factor=2.0, mode=nearest)
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(ada_in): AdaIN()
)
)
(to_rgb): Conv2d(128, 3, kernel_size=(1, 1), stride=(1, 1))
)
)
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n eps: 1e-08\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n eps: 1e-08\n lr: 1e-05\n weight_decay: 0\n)"]
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n eps: 1e-08\n lr: 1e-05\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n eps: 1e-08\n lr: 5e-06\n weight_decay: 0\n)"]
-
-
-
0
-
-
-
-
["<torch.utils.data.dataloader.DataLoader object at 0x7f14d969e790>","<torch.utils.data.dataloader.DataLoader object at 0x7f23158d17c0>","<torch.utils.data.dataloader.DataLoader object at 0x7f7f83f317f0>","<torch.utils.data.dataloader.DataLoader object at 0x7f84079827f0>","<torch.utils.data.dataloader.DataLoader object at 0x7fabf5974790>","<torch.utils.data.dataloader.DataLoader object at 0x7fafb1e05790>"]
["<torch.utils.data.dataloader.DataLoader object at 0x7f14d969e6a0>","<torch.utils.data.dataloader.DataLoader object at 0x7f23158d16d0>","<torch.utils.data.dataloader.DataLoader object at 0x7f7f83f31700>","<torch.utils.data.dataloader.DataLoader object at 0x7f8407982700>","<torch.utils.data.dataloader.DataLoader object at 0x7fabf59746a0>","<torch.utils.data.dataloader.DataLoader object at 0x7fafb1e056a0>"]
-
-
-
-
Failed
pierrotlc
18h 18m 13s
-
74.66667
-
-
-
["cpu","cuda"]
32
32
-
21.66667
-
-
-
-
0.000085
0.000036667
-
-
128
8
-
-
-
-
4
3
-
["Discriminator(\n (first_conv): Conv2d(3, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (blocks): ModuleList(\n (0): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(8, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (1): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (2): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (3): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (4): DiscriminatorBlock(\n (convs): ModuleList(\n (0): Sequential(\n (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (1): Sequential(\n (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (2): Sequential(\n (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n (3): Sequential(\n (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n )\n )\n (downsample): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n )\n (classify): Sequential(\n (0): Conv2d(256, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): Flatten(start_dim=1, end_dim=-1)\n )\n)","Discriminator(\n (first_conv): Conv2d(3, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (blocks): ModuleList(\n (0): DiscriminatorBlock(\n (convs): Sequential(\n (0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n (3): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (4): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (5): LeakyReLU(negative_slope=0.01)\n )\n (downsample): Conv2d(8, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (1): DiscriminatorBlock(\n (convs): Sequential(\n (0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n (3): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (4): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (5): LeakyReLU(negative_slope=0.01)\n )\n (downsample): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (2): DiscriminatorBlock(\n (convs): Sequential(\n (0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n (3): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (4): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (5): LeakyReLU(negative_slope=0.01)\n )\n (downsample): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (3): DiscriminatorBlock(\n (convs): Sequential(\n (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n (3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (5): LeakyReLU(negative_slope=0.01)\n )\n (downsample): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n (4): DiscriminatorBlock(\n (convs): Sequential(\n (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): LeakyReLU(negative_slope=0.01)\n (3): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (4): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (5): LeakyReLU(negative_slope=0.01)\n )\n (downsample): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n )\n )\n (classify): Sequential(\n (0): Conv2d(256, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): Flatten(start_dim=1, end_dim=-1)\n )\n)"]
StyleGAN(
(mapping): MappingNetwork(
(norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(fully_connected): ModuleList(
(0): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(out_layer): Linear(in_features=32, out_features=32, bias=True)
)
(synthesis): SynthesisNetwork(
(blocks): ModuleList(
(0): SynthesisBlock(
(upsample): Upsample(scale_factor=2.0, mode=nearest)
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(ada_in): AdaIN()
)
(1): SynthesisBlock(
(upsample): Upsample(scale_factor=2.0, mode=nearest)
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(ada_in): AdaIN()
)
(2): SynthesisBlock(
(upsample): Upsample(scale_factor=2.0, mode=nearest)
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(ada_in): AdaIN()
)
(3): SynthesisBlock(
(upsample): Upsample(scale_factor=2.0, mode=nearest)
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(ada_in): AdaIN()
)
)
(to_rgb): Conv2d(128, 3, kernel_size=(1, 1), stride=(1, 1))
)
)
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n eps: 1e-08\n lr: 0.0001\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n eps: 1e-08\n lr: 1e-05\n weight_decay: 0\n)"]
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n eps: 1e-08\n lr: 1e-05\n weight_decay: 0\n)","Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n eps: 1e-08\n lr: 5e-05\n weight_decay: 0\n)"]
-
-
-
0
-
-
-
-
["<torch.utils.data.dataloader.DataLoader object at 0x7f493453a730>","<torch.utils.data.dataloader.DataLoader object at 0x7f551be82700>","<torch.utils.data.dataloader.DataLoader object at 0x7f72ec0a3700>","<torch.utils.data.dataloader.DataLoader object at 0x7f7be29c76d0>","<torch.utils.data.dataloader.DataLoader object at 0x7fa71bd22100>","<torch.utils.data.dataloader.DataLoader object at 0x7faa85567700>"]
["<torch.utils.data.dataloader.DataLoader object at 0x7f493453a640>","<torch.utils.data.dataloader.DataLoader object at 0x7f551be82610>","<torch.utils.data.dataloader.DataLoader object at 0x7f72ec0a3610>","<torch.utils.data.dataloader.DataLoader object at 0x7f7be29c75e0>","<torch.utils.data.dataloader.DataLoader object at 0x7fa71c215fd0>","<torch.utils.data.dataloader.DataLoader object at 0x7faa85567610>"]
-
-
-
-
Failed
pierrotlc
4h 1m 28s
-
128
-
-
-
cuda
32
32
-
6.42857
-
-
-
-
0.00067143
0.00005
-
-
128
8
-
-
-
-
-
3
-
Discriminator(
(first_conv): Conv2d(3, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(blocks): ModuleList(
(0): DiscriminatorBlock(
(convs): Sequential(
(0): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
(3): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(4): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): LeakyReLU(negative_slope=0.01)
)
(downsample): Conv2d(8, 16, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(1): DiscriminatorBlock(
(convs): Sequential(
(0): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
(3): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(4): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): LeakyReLU(negative_slope=0.01)
)
(downsample): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(2): DiscriminatorBlock(
(convs): Sequential(
(0): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
(3): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(4): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): LeakyReLU(negative_slope=0.01)
)
(downsample): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(3): DiscriminatorBlock(
(convs): Sequential(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
(3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): LeakyReLU(negative_slope=0.01)
)
(downsample): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
(4): DiscriminatorBlock(
(convs): Sequential(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.01)
(3): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(4): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): LeakyReLU(negative_slope=0.01)
)
(downsample): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
)
)
(classify): Sequential(
(0): Conv2d(256, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): Flatten(start_dim=1, end_dim=-1)
)
)
StyleGAN(
(mapping): MappingNetwork(
(norm): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(fully_connected): ModuleList(
(0): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(1): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
(2): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): LayerNorm((32,), eps=1e-05, elementwise_affine=True)
(2): LeakyReLU(negative_slope=0.01)
)
)
(out_layer): Linear(in_features=32, out_features=32, bias=True)
)
(synthesis): SynthesisNetwork(
(blocks): ModuleList(
(0): SynthesisBlock(
(upsample): Upsample(scale_factor=2.0, mode=nearest)
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(ada_in): AdaIN()
)
(1): SynthesisBlock(
(upsample): Upsample(scale_factor=2.0, mode=nearest)
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(ada_in): AdaIN()
)
(2): SynthesisBlock(
(upsample): Upsample(scale_factor=2.0, mode=nearest)
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(ada_in): AdaIN()
)
(3): SynthesisBlock(
(upsample): Upsample(scale_factor=2.0, mode=nearest)
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(ada_in): AdaIN()
)
)
(to_rgb): Conv2d(128, 3, kernel_size=(1, 1), stride=(1, 1))
)
)
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n eps: 1e-08\n lr: 0.0001\n weight_decay: 0\n)","RMSprop (\nParameter Group 0\n alpha: 0.99\n centered: False\n eps: 1e-08\n lr: 0.0001\n momentum: 0\n weight_decay: 0\n)","RMSprop (\nParameter Group 0\n alpha: 0.99\n centered: False\n eps: 1e-08\n lr: 0.0005\n momentum: 0\n weight_decay: 0\n)","RMSprop (\nParameter Group 0\n alpha: 0.99\n centered: False\n eps: 1e-08\n lr: 0.001\n momentum: 0\n weight_decay: 0\n)"]
["Adam (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n eps: 1e-08\n lr: 5e-05\n weight_decay: 0\n)","RMSprop (\nParameter Group 0\n alpha: 0.99\n centered: False\n eps: 1e-08\n lr: 5e-05\n momentum: 0\n weight_decay: 0\n)"]
-
-
-
0
-
-
-
-
["<torch.utils.data.dataloader.DataLoader object at 0x7f380ea146d0>","<torch.utils.data.dataloader.DataLoader object at 0x7f52d219b700>","<torch.utils.data.dataloader.DataLoader object at 0x7f6314a17700>","<torch.utils.data.dataloader.DataLoader object at 0x7f960a9566d0>","<torch.utils.data.dataloader.DataLoader object at 0x7fadadda66d0>","<torch.utils.data.dataloader.DataLoader object at 0x7fca7b2016d0>","<torch.utils.data.dataloader.DataLoader object at 0x7feac370e6d0>"]
["<torch.utils.data.dataloader.DataLoader object at 0x7f380ea145e0>","<torch.utils.data.dataloader.DataLoader object at 0x7f52d219b610>","<torch.utils.data.dataloader.DataLoader object at 0x7f6314a17610>","<torch.utils.data.dataloader.DataLoader object at 0x7f960a9565e0>","<torch.utils.data.dataloader.DataLoader object at 0x7fadadda65e0>","<torch.utils.data.dataloader.DataLoader object at 0x7fca7b2015e0>","<torch.utils.data.dataloader.DataLoader object at 0x7feac370e5e0>"]
-
-
-
-
1-19
of 19