ValueError: Using a target size (torch.Size([64])) that is different to the input size (torch.Size([
ValueErrorTraceback (most recent call last)~\AppData\Local\Temp/ipykernel_19156/279535578.py in <module>54#计算损失函数55label.data.fill_(1)---> 56error_real=criterion(output, lab.
·
ValueError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_19156/279535578.py in <module>
54 #计算损失函数
55 label.data.fill_(1)
---> 56 error_real=criterion(output, label)
57 error_real.backward() #辨别器的反向误差传播
58 D_x=output.data.mean()
c:\users\25566\miniconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1101 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102 return forward_call(*input, **kwargs)
1103 # Do not call functions when jit is used
1104 full_backward_hooks, non_full_backward_hooks = [], []
报错信息如上。报错信息说输入的尺寸和目标尺寸不同,导致的错误,所以解决方法是找到之前代码的output和label的尺寸赋值,改变如图上output和label的尺寸。
# 构造判别器
class ModelD(nn.Module):
def __init__(self):
super(ModelD,self).__init__()
self.model=nn.Sequential() #序列化模块构造的神经网络
self.model.add_module('conv1',nn.Conv2d(num_channels, num_features, 5, 2, 0, bias=False)) #卷积层
self.model.add_module('relu1',nn.ReLU()) #激活函数使用了ReLu
#self.model.add_module('relu1',nn.LeakyReLU(0.2, inplace = True)) #激活函数使用了leakyReLu,可以防止dead ReLu的问题
#第二层卷积
self.model.add_module('conv2',nn.Conv2d(num_features, num_features * 2, 5, 2, 0, bias=False))
self.model.add_module('bnorm2',nn.BatchNorm2d(num_features * 2))
self.model.add_module('linear1', nn.Linear(num_features * 2 * 4 * 4, #全链接网络层
num_features))
self.model.add_module('linear2', nn.Linear(num_features, 1)) #全链接网络层
self.model.add_module('sigmoid',nn.Sigmoid())
def forward(self,input):
output = input
# 对网络中的所有神经模块进行循环,并挑选出特定的模块linear1,将feature map展平
for name, module in self.model.named_children():
if name == 'linear1':
output = output.view(-1, num_features * 2 * 4 * 4)
output = module(output)
output = output.squeeze(-1)
return output
源代码里面是没有 output = output.squeeze(-1)的,添加这行代码即可解决问题。
更多推荐
已为社区贡献1条内容
所有评论(0)