UNet Pytorch to Keras

how to convert pytorch to keras. The conv2d is confusing. I need to define the Dense layers of nested unet (Unet++) in keras. It requires changing the input shape. Attaching the pytorch code. Please show a sample on how to do. Need help in converting the decoder part into keras.

  1. The in_ch is defined in pytorch how to give it in keras.
  2. I’m not sure on what size to give for upsampling in keras.

class UNet(nn.Module):
def init(self, in_ch=1, out_ch=1):

super(UNet, self).__init__()

n1 = 64

filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

#self.Maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding = 2)

#self.Maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2,  padding = 2)

#self.Maxpool3 = nn.MaxPool2d(kernel_size=3, stride=2,  padding = 2)

#self.Maxpool4 = nn.MaxPool2d(kernel_size=3, stride=2,  padding = 2)

self.pool = nn.MaxPool2d(kernel_size=2, stride=2)

self.Up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)

self.Conv1 = conv_block(in_ch, filters[0])

self.Conv2 = conv_block(filters[0], filters[1])

self.Conv3 = conv_block(filters[1], filters[2])

self.Conv4 = conv_block(filters[2], filters[3])

self.Conv5 = conv_block(filters[3], filters[4])

self.conv0_1 = conv_block(filters[0] + filters[1], filters[0])

self.conv1_1 = conv_block(filters[1] + filters[2], filters[1])

self.conv2_1 = conv_block(filters[2] + filters[3], filters[2])

self.conv3_1 = conv_block(filters[3] + filters[4], filters[3])

self.conv0_2 = conv_block(filters[0]*2 + filters[1], filters[0])

self.conv1_2 = conv_block(filters[1]*2 + filters[2], filters[1])

self.conv2_2 = conv_block(filters[2]*2 + filters[3], filters[2])

self.conv0_3 = conv_block(filters[0]*3 + filters[1], filters[0])

self.conv1_3 = conv_block(filters[1]*3 + filters[2], filters[1])

self.conv0_4 = conv_block(filters[0]*4 + filters[1], filters[0])

self.final = nn.Conv2d(filters[0], out_ch, kernel_size=1)

def encoder(self, x):

print("Shape After Convolution and Max pooling [<Count, Filters, Height, Width>]")

print("Original Shape",  x.shape)

x0_0 = self.Conv1(x)

print("e1: Convolution 1:\t", x0_0.shape)

e2 = self.pool(x0_0)

print("After Max pool 1:\t", e2.shape)

x1_0 = self.Conv2(e2)

print("e2: Convolution 2:\t", x1_0.shape)

e3 = self.pool(x1_0)

print("After Max pool 2:\t", e3.shape)

x2_0 = self.Conv3(e3)

print("e3: Convolution 3:\t", x2_0.shape)

e4 = self.pool(x2_0)

print("After Max pool 3:\t", e4.shape)

x3_0 = self.Conv4(e4)

print("e4: Convolution 4:\t", x3_0.shape)

e5 = self.pool(x3_0)

print("After Max Pool 4:\t", e5.shape)

x4_0 = self.Conv5(e5)

print("e5:Convolution 5:\t", x4_0.shape)

return x0_0,x1_0,x2_0,x3_0,x4_0

def decoder(self, x0_0,x1_0,x2_0,x3_0,x4_0):

x0_1 = self.conv0_1(torch.cat([x0_0, self.Up(x1_0)], 1))

x1_1 = self.conv1_1(torch.cat([x1_0, self.Up(x2_0)], 1))

x0_2 = self.conv0_2(torch.cat([x0_0, x0_1, self.Up(x1_1)], 1))

x2_1 = self.conv2_1(torch.cat([x2_0, self.Up(x3_0)], 1))

x1_2 = self.conv1_2(torch.cat([x1_0, x1_1, self.Up(x2_1)], 1))

x0_3 = self.conv0_3(torch.cat([x0_0, x0_1, x0_2, self.Up(x1_2)], 1))

x3_1 = self.conv3_1(torch.cat([x3_0, self.Up(x4_0)], 1))

x2_2 = self.conv2_2(torch.cat([x2_0, x2_1, self.Up(x3_1)], 1))

x1_3 = self.conv1_3(torch.cat([x1_0, x1_1, x1_2, self.Up(x2_2)], 1))

x0_4 = self.conv0_4(torch.cat([x0_0, x0_1, x0_2, x0_3, self.Up(x1_3)], 1))

print(“Decoder:\t”, x0_4.shape)

output_dec = self.final(x0_4)

print(“Final:\t”, output_dec.shape)

#print(“Decoder:”, output_dec)

return output_dec

def forward(self,input):

x1,x2,x3,x4,x5 = self.encoder(input)

d1 = self.decoder(x1,x2,x3,x4,x5)

return d1

Hey @innat if I rember correctly you had a Unet++ Keras prototype right?

1 Like

The author of UNet++ (aka XNet) provided its implementation in both keras and pytorch.

Officially, they provide many image-net models(LIKE) as an encoder.

But the backbone with efficientnet was missing. So, I made an extension for that.

3 Likes