import torch import torch.nn as nn import torch.nn.functional as F from torchvision.models import resnet18 class DIQA(nn.Module): def __init__(self): super(DIQA, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=48, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(in_channels=48, out_channels=48, kernel_size=3, stride=2, padding=1) self.conv3 = nn.Conv2d(in_channels=48, out_channels=64, kernel_size=3, stride=1, padding=1) self.conv4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=2, padding=1) self.conv5 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1) self.conv6 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1) self.conv7 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1) self.conv8 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1) # self.conv9 = nn.Conv2d(in_channels=128, out_channels=1, kernel_size=1, stride=1, padding=0) self.fc1 = nn.Linear(128, 128) # self.fc1 = nn.Linear(100352, 128) self.fc2 = nn.Linear(128, 64) # 上面定义模型 下面真正意义上的搭建模型 def forward_once(self, input): x = input.view(-1, input[0].size(-3), input[0].size(-2), input[0].size(-1)) x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x = F.relu(self.conv5(x)) x = F.relu(self.conv6(x)) x = F.relu(self.conv7(x)) conv8 = F.relu(self.conv8(x)) # 12 128 28 28 q = torch.nn.functional.adaptive_avg_pool2d(conv8, (1, 1)) # 12 128 1 1 q = q.squeeze(3).squeeze(2) # q = conv8.view(conv8.size(0), -1) q = self.fc1(q) q = self.fc2(q) q = F.normalize(q, p=2, dim=1) return q def forward(self, input1, input2): output1 = self.forward_once(input1) output2 = self.forward_once(input2) return output1, output2 class SiameseNetwork(nn.Module): def __init__(self): super(SiameseNetwork, self).__init__() self.cnn1 = nn.Sequential( nn.Conv2d(1,32,5), nn.ReLU(True), nn.MaxPool2d(2,2), # Currently 45x55x32 nn.Conv2d(32,64,3), nn.ReLU(True), nn.MaxPool2d(2,2), #Currently 21x26x64 nn.Conv2d(64,64,3), nn.ReLU(True), nn.MaxPool2d(2,2), # #Currently 9x12x64 # nn.Conv2d(64, 64, 3), # nn.BatchNorm2d(64), # nn.ReLU(True), # nn.MaxPool2d(2, 2), # nn.Conv2d(64, 64, 3), # nn.BatchNorm2d(64), # nn.ReLU(True) # nn.MaxPool2d(2, 2), ) self.fc1 = nn.Sequential( nn.Linear(20736 , 4096), # nn.Sigmoid(), # nn.Dropout(0.5,False), nn.Linear(4096,256)) # self.out = nn.Linear(4096,1) def forward_once(self, x): output = self.cnn1(x) output = output.view(-1,20736) output = self.fc1(output) output = F.normalize(output, p=2, dim=1) return output def forward(self, input1,input2): output1 = self.forward_once(input1) output2 = self.forward_once(input2) # out = self.out(torch.abs(output1-output2)) # return out.view(out.size()) return output1,output2 class FaceModel(nn.Module): def __init__(self,embedding_size,num_classes,pretrained=False): super(FaceModel, self).__init__() self.model = resnet18(pretrained) self.embedding_size = embedding_size self.model.fc = nn.Linear(512*8*8, self.embedding_size) self.model.classifier = nn.Linear(self.embedding_size, num_classes) def l2_norm(self,input): input_size = input.size() buffer = torch.pow(input, 2) normp = torch.sum(buffer, 1).add_(1e-10) norm = torch.sqrt(normp) _output = torch.div(input, norm.view(-1, 1).expand_as(input)) output = _output.view(input_size) return output def forward_once(self, x): x = self.model.conv1(x) x = self.model.bn1(x) x = self.model.relu(x) x = self.model.maxpool(x) x = self.model.layer1(x) x = self.model.layer2(x) x = self.model.layer3(x) x = self.model.layer4(x) x = x.view(x.size(0), -1) x = self.model.fc(x) self.features = self.l2_norm(x) # Multiply by alpha = 10 as suggested in https://arxiv.org/pdf/1703.09507.pdf alpha=10 self.features = self.features*alpha #x = self.model.classifier(self.features) return self.features def forward(self, input): output = self.forward_once(input) # output2 = self.forward_once(input2) # out = self.out(torch.abs(output1-output2)) # return out.view(out.size()) return output def forward_classifier(self, x): features = self.forward(x) res = self.model.classifier(features) return res class SignaturesNetwork(nn.Module): def __init__(self): super(SignaturesNetwork, self).__init__() # Setting up the Sequential of CNN Layers self.cnn1 = nn.Sequential( nn.Conv2d(3, 96, kernel_size=11,stride=1), nn.ReLU(inplace=True), nn.LocalResponseNorm(5,alpha=0.0001,beta=0.75,k=2), nn.MaxPool2d(3, stride=2), nn.Conv2d(96, 256, kernel_size=5,stride=1,padding=2), nn.ReLU(inplace=True), nn.LocalResponseNorm(5,alpha=0.0001,beta=0.75,k=2), nn.MaxPool2d(3, stride=2), nn.Dropout2d(p=0.3), nn.Conv2d(256,384 , kernel_size=3,stride=1,padding=1), nn.ReLU(inplace=True), nn.Conv2d(384,256, kernel_size=3,stride=1,padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(3, stride=2), nn.Dropout2d(p=0.3), ) # Defining the fully connected layers self.fc1 = nn.Sequential( # First Dense Layer nn.Linear(952576, 1024), nn.ReLU(inplace=True), nn.Dropout2d(p=0.5), # Second Dense Layer nn.Linear(1024, 128), nn.ReLU(inplace=True), # # Final Dense Layer nn.Linear(128,128)) def forward_once(self, x): # Forward pass output = self.cnn1(x) output = output.view(output.size()[0], -1) output = self.fc1(output) return output def forward(self, input1, input2): # forward pass of input 1 output1 = self.forward_once(input1) # forward pass of input 2 output2 = self.forward_once(input2) # returning the feature vectors of two inputs return output1, output2