stack expects each tensor to be equal size, but got [3, 224, 224] at entry 0 and [1,224,224] at entr
首先可以确定地是在数据读取的以及处理发生的问题。目前可以归纳为一下两种:1:使用Image.open()没有添加.convert('RGB')错误代码例子:class Mydataset(data.Dataset):def __init__(self,root,labels,transform):super(Mydataset,self).__init__()self.imgs_path = ro
·
首先可以确定地是在数据读取的以及处理发生的问题。目前可以归纳为一下两种:
1:使用Image.open()没有添加.convert('RGB')
错误代码例子:
class Mydataset(data.Dataset):
def __init__(self,root,labels,transform):
super(Mydataset,self).__init__()
self.imgs_path = root
self.labels = labels
self.transform = transform
def __getitem__(self,index):
ig_path = self.imgs_path[index]
label=self.labels[index]
######################################
pil_image = Image.open(ig_path)
#########################################
data = self.transform(pil_image)
return data,label
def __len__(self):
return len(self.imgs_path)
正确代码例子:
class Mydataset(data.Dataset):
def __init__(self,root,labels,transform):
super(Mydataset,self).__init__()
self.imgs_path = root
self.labels = labels
self.transform = transform
def __getitem__(self,index):
ig_path = self.imgs_path[index]
label=self.labels[index]
######################################
pil_image = Image.open(ig_path).convert('RGB')
#########################################
data = self.transform(pil_image)
return data,label
def __len__(self):
return len(self.imgs_path)
2:transform过程中错误使用Resize功能
错误示范:
transform = transforms.Compose([
transforms.Resize((224)),
transforms.ToTensor()
])
正确代码:
train_dataset = datasets.ImageFolder(
train_data,
transforms.Compose([
transforms.Resize((224,224))
验证方法:
path_load = glob.glob(r'D:\BaiduNetdiskDownload\pytorch_learning\dataset\dataset2\*.jpg')
#图片路径
transform = transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor()
])
all_labels=[]
species = ['cloudy', 'rain', 'shine', 'sunrise']
#图片类别
for img in path_load:
for i,c in enumerate(species):
if c in img:
all_labels.append(i)
#图片标签
species_to_idx = dict((c, i) for i, c in enumerate(species))
label_to_class = dict((v,k) for k,v in species_to_idx.items())
class Mydataset(data.Dataset):
def __init__(self,root,labels,transform):
super(Mydataset,self).__init__()
self.imgs_path = root
self.labels = labels
self.transform = transform
def __getitem__(self,index):
ig_path = self.imgs_path[index]
label=self.labels[index]
pil_image = Image.open(ig_path).convert('RGB')
data = self.transform(pil_image)
return data,label
def __len__(self):
return len(self.imgs_path)
wheather_dataset = Mydataset(path_load,all_labels,transform)
wheather_dl = data.DataLoader(wheather_dataset,
batch_size=16,
shuffle=True,
drop_last=True)
plt.figure(figsize=(12,8))
imgs_batch,labels_batch=next(iter(wheather_dl))
for i,(img,label) in enumerate(zip(imgs_batch,labels_batch)):
img = img.permute(1,2,0).numpy()
plt.subplot(4,4,i+1)
plt.title(label_to_class.get(label.item()))
plt.imshow(img)
result:
更多推荐
已为社区贡献2条内容
所有评论(0)