• (0): Conv2D(3 -> 8, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
    (1): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=8)
    (2): Activation(relu)
    (3): Conv2D(1 -> 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=8, bias=False)
    (4): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=8)
    (5): Activation(relu)
    (6): Conv2D(8 -> 16, kernel_size=(1, 1), stride=(1, 1), bias=False)
    (7): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=16)
    (8): Activation(relu)
    (9): Conv2D(1 -> 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=16, bias=False)
    (10): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=16)
    (11): Activation(relu)
    (12): Conv2D(16 -> 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
    (13): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=32)
    (14): Activation(relu)
    (15): Conv2D(1 -> 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)
    (16): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=32)
    (17): Activation(relu)
    (18): Conv2D(32 -> 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
    (19): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=32)
    (20): Activation(relu)
    (21): Conv2D(1 -> 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=32, bias=False)
    (22): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=32)
    (23): Activation(relu)
    (24): Conv2D(32 -> 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
    (25): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=64)
    (26): Activation(relu)
    (27): Conv2D(1 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=64, bias=False)
    (28): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=64)
    (29): Activation(relu)
    (30): Conv2D(64 -> 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
    (31): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=64)
    (32): Activation(relu)
    (33): Conv2D(1 -> 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=64, bias=False)
    (34): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=64)
    (35): Activation(relu)
    (36): Conv2D(64 -> 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
    (37): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=128)
    (38): Activation(relu)
    (39): Conv2D(1 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
    (40): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=128)
    (41): Activation(relu)
    (42): Conv2D(128 -> 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
    (43): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=128)
    (44): Activation(relu)
    (45): Conv2D(1 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
    (46): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=128)
    (47): Activation(relu)
    (48): Conv2D(128 -> 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
    (49): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=128)
    (50): Activation(relu)
    (51): Conv2D(1 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
    (52): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=128)
    (53): Activation(relu)
    (54): Conv2D(128 -> 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
    (55): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=128)
    (56): Activation(relu)
    (57): Conv2D(1 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
    (58): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=128)
    (59): Activation(relu)
    (60): Conv2D(128 -> 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
    (61): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=128)
    (62): Activation(relu)
    (63): Conv2D(1 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
    (64): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=128)
    (65): Activation(relu)
    (66): Conv2D(128 -> 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
    (67): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=128)
    (68): Activation(relu)
    (69): Conv2D(1 -> 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=128, bias=False)
    (70): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=128)
    (71): Activation(relu)
    (72): Conv2D(128 -> 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
    (73): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=256)
    (74): Activation(relu)
    (75): Conv2D(1 -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=256, bias=False)
    (76): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=256)
    (77): Activation(relu)
    (78): Conv2D(256 -> 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
    (79): BatchNorm(axis=1, use_global_stats=False, momentum=0.9, eps=1e-05, fix_gamma=False, in_channels=256)
    (80): Activation(relu)
    (81): GlobalAvgPool2D(size=(1, 1), stride=(1, 1), padding=(0, 0), ceil_mode=True)
    (82): Flatten
    Edited by Chia-Wei Chang
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment