笔记|(b站)刘二大人:pytorch深度学习实践(代码详细笔记,适合零基础)
pytorch深度学习实践笔记中的代码是根据b站刘二大人的课程所做的笔记,代码每一行都有注释方便理解,可以配套刘二大人视频一同使用。用PyTorch实现线性回归# 1、算预测值# 2、算loss# 3、梯度设为0,并反向传播# 3、梯度更新import torchx_data = torch.Tensor([[1.0], [2.0], [3.0]])y_data = torch.Tensor([[
·
pytorch深度学习实践
笔记中的代码是根据b站刘二大人的课程所做的笔记,代码每一行都有注释方便理解,可以配套刘二大人视频一同使用。
用PyTorch实现线性回归
# 1、算预测值
# 2、算loss
# 3、梯度设为0,并反向传播
# 3、梯度更新
import torch
x_data = torch.Tensor([[1.0], [2.0], [3.0]])
y_data = torch.Tensor([[2.0], [4.0], [6.0]])
# 构造线性模型,后面都是使用这样的模板
# 至少实现两个函数,__init__构造函数和forward()前馈函数
# backward()会根据我们的计算图自动构建
# 可以继承Functions来构建自己的计算块
class LinerModel(torch.nn.Module):
def __init__(self):
# 调用父类的构造
super(LinerModel, self).__init__()
# 构造Linear这个对象,对输入数据做线性变换
# class torch.nn.Linear(in_features, out_features, bias=True)
# in_features - 每个输入样本的大小
# out_features - 每个输出样本的大小
# bias - 若设置为False,这层不会学习偏置。默认值:True
self.linear = torch.nn.Linear(1, 1)
def forward(self, x):
y_pred = self.linear(x)
return y_pred
model = LinerModel()
# 定义MSE(均方差)损失函数,size_average=False不求均值
criterion = torch.nn.MSELoss(size_average=False)
# optim优化模块的SGD,第一个参数就是传递权重,model.parameters()model的所有权重
# 优化器对象
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
for epoch in range(100):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
# loss为一个对象,但会自动调用__str__()所以不会出错
print(epoch, loss)
# 梯度归零
optimizer.zero_grad()
# 反向传播
loss.backward()
# 根据梯度和预先设置的学习率进行更新
optimizer.step()
# 打印权重和偏置值,weight是一个值但是一个矩阵
print('w=', model.linear.weight.item())
print('b=', model.linear.bias.item())
# 测试
x_test = torch.Tensor([4.0])
y_test = model(x_test)
print('y_pred=', y_test.data)
逻辑斯蒂回归
# 逻辑斯蒂回归
import torch.nn
import torch.nn.functional as F
x_data = torch.Tensor([[1.0], [2.0], [3.0]])
y_data = torch.Tensor([[0], [0], [1]])
class LogisticRegressionModel(torch.nn.Module):
def __init__(self):
super(LogisticRegressionModel, self).__init__()
self.linear = torch.nn.Linear(1, 1)
def forward(self, x):
# 将sigmoid函数应用到结果中
y_pred = F.sigmoid(self.linear(x))
return y_pred
model = LogisticRegressionModel()
# 定义MSE(均方差)损失函数,size_average=False不求均值
criterion = torch.nn.BCELoss(size_average=False)
# optim优化模块的SGD,第一个参数就是传递权重,model.parameters()model的所有权重
# 优化器对象
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
for epoch in range(100):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
# loss为一个对象,但会自动调用__str__()所以不会出错
print(epoch, loss)
# 梯度归零
optimizer.zero_grad()
# 反向传播
loss.backward()
# 根据梯度和预先蛇者的学习率进行更新
optimizer.step()
# 打印权重和偏置值,weight是一个值但是一个矩阵
print('w=', model.linear.weight.item())
print('b=', model.linear.bias.item())
# 测试
x_test = torch.Tensor([4.0])
y_test = model(x_test)
print('y_pred=', y_test.data)
处理多维特征的输入
import numpy as np
import torch
xy = np.loadtxt('diabetes.csv.gz', delimiter=',', dtype=np.float32)
x_data = torch.from_numpy(xy[:, :-1])
# [-1]加中括号拿出来是矩阵,不加是向量
y_data = torch.from_numpy(xy[:, [-1]])
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear1 = torch.nn.Linear(8, 6)
self.linear2 = torch.nn.Linear(6, 4)
self.linear3 = torch.nn.Linear(4, 1)
# 这是nn下的Sigmoid是一个模块没有参数,在function调用的Sigmoid是函数
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
x = self.sigmoid(self.linear1(x))
x = self.sigmoid(self.linear2(x))
x = self.sigmoid(self.linear3(x))
return x
model = Model()
criterion = torch.nn.BCELoss(size_average=True) # 损失函数
optimizer = torch.optim.SGD(model.parameters(), lr=0.1) # 优化函数,随机梯度递减
for epoch in range(100):
# 前馈
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
print(epoch, loss.item())
# 反馈
optimizer.zero_grad()
loss.backward()
# 更新
optimizer.step()
加载数据集
import numpy as np
import torch
from torch.utils.data import Dataset # Dataset是一个抽象类,只能被继承,不能实例化
from torch.utils.data import DataLoader # 可以直接实例化
'''
四步:准备数据集-设计模型-构建损失函数和优化器-周期训练
'''
class DiabetesDataset(Dataset):
def __init__(self, filepath):
xy = np.loadtxt(filepath, delimiter=',', dtype=np.float32)
self.len = xy.shape[0]
self.x_data = torch.from_numpy(xy[:, :-1])
self.y_data = torch.from_numpy(xy[:, [-1]])
def __getitem__(self, index): # 实例化对象后,该类能支持下标操作,通过index拿出数据
return self.x_data[index], self.y_data[index]
def __len__(self):
return self.len
dataset = DiabetesDataset('diabetes.csv.gz')
# dataset数据集,batch_size小批量的容量,shuffle是否要打乱,num_workers要几个并行进程来读
# DataLoader的实例化对象不能直接使用,因为windows和linux的多线程运行不一样,所以一般要放在函数里运行
train_loader = DataLoader(dataset=dataset, batch_size=32, shuffle=True, num_workers=2)
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear1 = torch.nn.Linear(8, 6)
self.linear2 = torch.nn.Linear(6, 4)
self.linear3 = torch.nn.Linear(4, 1)
# 这是nn下的Sigmoid是一个模块没有参数,在function调用的Sigmoid是函数
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
x = self.sigmoid(self.linear1(x))
x = self.sigmoid(self.linear2(x))
x = self.sigmoid(self.linear3(x))
return x
model = Model()
criterion = torch.nn.BCELoss(size_average=True) # 损失函数
optimizer = torch.optim.SGD(model.parameters(), lr=0.1) # 优化函数,随机梯度递减
# 变成嵌套循环,实现Mini-Batch
for epoch in range(100):
# 从数据集0开始迭代
# 可以简写为for i, (inputs, labels) in enumerate(train_loader, 0):
for i, data in enumerate(train_loader, 0):
# 准备数据
inputs, labels = data
# 前馈
y_pred = model(inputs)
loss = criterion(y_pred, labels)
print(epoch, i, loss.item())
# 反馈
optimizer.zero_grad()
loss.backward()
# 更新
optimizer.step()
多分类问题
import torch
from torchvision import transforms # 对图像进行处理的工具
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F # 使用激活函数relu()的包
import torch.optim as optim # 优化器的包
batch_size = 64
# 对图像进行预处理,将图像转换为
transform = transforms.Compose([
# 将原始图像PIL变为张量tensor(H*W*C),再将[0,255]区间转换为[0.1,1.0]
transforms.ToTensor(),
# 使用均值和标准差对张量图像进行归一化
transforms.Normalize((0.1307,), (0.3081,))
])
train_dataset = datasets.MNIST(root='dataset/mnist/', train=True, download=True, transform=transform)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
test_dataset = datasets.MNIST(root='dataset/mnist/', train=False, download=True, transform=transform)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = torch.nn.Linear(784, 512)
self.l2 = torch.nn.Linear(512, 256)
self.l3 = torch.nn.Linear(256, 128)
self.l4 = torch.nn.Linear(128, 64)
self.l5 = torch.nn.Linear(64, 10)
def forward(self, x):
# 改变形状,相当于numpy的reshape
# view中一个参数定为-1,代表动态调整这个维度上的元素个数,以保证元素的总数不变。
x = x.view(-1, 784)
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
x = F.relu(self.l3(x))
x = F.relu(self.l4(x))
return self.l5(x)
model = Net()
# 交叉熵损失函数
criterion = torch.nn.CrossEntropyLoss()
# model.parameters()直接使用的模型的所有参数
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5) # momentum动量
def train(epoch):
running_loss = 0.0
# 返回了数据下标和数据
for batch_idx, data in enumerate(train_loader, 0):
# 送入两个张量,一个张量是64个图像的特征,一个张量图片对应的数字
inputs, target = data
# 梯度归零
optimizer.zero_grad()
# forward+backward+update
outputs = model(inputs)
# 计算损失,用的交叉熵损失函数
loss = criterion(outputs, target)
# 反馈
loss.backward()
# 随机梯度下降更新
optimizer.step()
# 每300次输出一次
running_loss += loss.item()
if batch_idx % 300 == 299:
print('[%d,%5d] loss:%.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
running_loss = 0.0
def test():
correct = 0
total = 0
# 不会计算梯度
with torch.no_grad():
for data in test_loader: # 拿数据
images, labels = data
outputs = model(images) # 预测
# outputs.data是一个矩阵,每一行10个量,最大值的下标就是预测值
_, predicted = torch.max(outputs.data, dim=1) # 沿着第一维度,找最大值的下标,返回最大值和下标
total += labels.size(0) # labels.size(0)=64 每个都是64个元素,就可以计算总的元素
# (predicted == labels).sum()这个是张量,而加了item()变为一个数字,即相等的数量
correct += (predicted == labels).sum().item()
print('Accuracy on test set:%d %%' % (100 * correct / total)) # 正确的数量除以总数
if __name__ == '__main__':
for epoch in range(10):
train(epoch)
test()
卷积神经网络
简单的构建
import torch
# 输入的通道就是上图的n,输出的通道就是上图的m
in_channels, out_channels = 5, 10
width, height = 100, 100 # 图像的大小
kernel_size = 3 # 卷积盒的大小
batch_size = 1 # 批量大小
# 随机生成了一个小批量=1的5*100*100的张量
input = torch.randn(batch_size, in_channels, width, height)
# Conv2d对由多个输入平面组成的输入信号进行二维卷积
conv_layer = torch.nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size)
output = conv_layer(input)
# print(input)
print(input.shape)
print(output.shape)
print(conv_layer.weight.shape)
运行结果:
torch.Size([1, 5, 100, 100])
torch.Size([1, 10, 98, 98])
torch.Size([10, 5, 3, 3])
padding
import torch
input = [3, 4, 6, 5, 7,
2, 4, 6, 8, 2,
1, 6, 7, 8, 4,
9, 7, 4, 6, 2,
3, 7, 5, 4, 1]
input = torch.Tensor(input).view(1, 1, 5, 5)
# bias=False不加偏置量
conv_layer = torch.nn.Conv2d(1, 1, kernel_size=3, padding=1, bias=False)
kernel = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9]).view(1, 1, 3, 3)
# 把kernel赋值给卷积层权重,做初始化
conv_layer.weight.data = kernel.data
output = conv_layer(input)
print(output)
运行结果:
tensor([[[[ 91., 168., 224., 215., 127.],
[114., 211., 295., 262., 149.],
[192., 259., 282., 214., 122.],
[194., 251., 253., 169., 86.],
[ 96., 112., 110., 68., 31.]]]], grad_fn=<ThnnConv2DBackward>)
Layer-stride
步长
import torch
input = [3, 4, 6, 5, 7,
2, 4, 6, 8, 2,
1, 6, 7, 8, 4,
9, 7, 4, 6, 2,
3, 7, 5, 4, 1]
input = torch.Tensor(input).view(1, 1, 5, 5)
# bias=False不加偏置量
conv_layer = torch.nn.Conv2d(1, 1, kernel_size=3, stride=2, bias=False)
kernel = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9]).view(1, 1, 3, 3)
# 把kernel赋值给卷积层权重,做初始化
conv_layer.weight.data = kernel.data
output = conv_layer(input)
print(output)
运行结果:
tensor([[[[211., 262.],
[251., 169.]]]], grad_fn=<ThnnConv2DBackward>)
Max Pooling Layer最大池化层
最大池化层是没有权重的
import torch
input = [3, 9, 6, 5,
2, 4, 6, 8,
1, 6, 2, 1,
3, 7, 4, 6]
input = torch.Tensor(input).view(1, 1, 4, 4)
maxpooling_layer = torch.nn.MaxPool2d(kernel_size=2)
output = maxpooling_layer(input)
print(output)
运行结果:
tensor([[[[9., 8.],
[7., 6.]]]])
import torch
from torchvision import transforms # 对图像进行处理的工具
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F # 使用激活函数relu()的包
import torch.optim as optim # 优化器的包
batch_size = 64
# 对图像进行预处理,将图像转换为
transform = transforms.Compose([
# 将原始图像PIL变为张量tensor(H*W*C),再将[0,255]区间转换为[0.1,1.0]
transforms.ToTensor(),
# 使用均值和标准差对张量图像进行归一化
transforms.Normalize((0.1307,), (0.3081,))
])
train_dataset = datasets.MNIST(root='dataset/mnist/', train=True, download=True, transform=transform)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
test_dataset = datasets.MNIST(root='dataset/mnist/', train=False, download=True, transform=transform)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
# 定义两个卷积层
self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5)
# 定义一个池化层
self.pooling = torch.nn.MaxPool2d(2)
# 定义一个全连接的线性层
self.fc = torch.nn.Linear(320, 10)
def forward(self, x):
# Flatten data from (n, 1, 28, 28) to (n, 784)
# x.size(0)就是取的n
batch_size = x.size(0)
# 用relu做非线性激活
# 先做卷积再做池化再做relu
x = F.relu(self.pooling(self.conv1(x)))
x = F.relu(self.pooling(self.conv2(x)))
# 做view把数据变为做全连接网络所需要的输入
x = x.view(batch_size, -1)
return self.fc(x)
# 因为最后一层要做交叉熵损失,所以最后一层不做激活
model = Net()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
# 交叉熵损失函数
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5) # momentum动量
def train(epoch):
running_loss = 0.0
# 返回了数据下标和数据
for batch_idx, data in enumerate(train_loader, 0):
# 送入两个张量,一个张量是64个图像的特征,一个张量图片对应的数字
inputs, target = data
# 把输入输出迁入GPU
inputs, target = inputs.to(device), target.to(device)
# 梯度归零
optimizer.zero_grad()
# forward+backward+update
outputs = model(inputs)
# 计算损失,用的交叉熵损失函数
loss = criterion(outputs, target)
# 反馈
loss.backward()
# 随机梯度下降更新
optimizer.step()
# 每300次输出一次
running_loss += loss.item()
if batch_idx % 300 == 299:
print('[%d,%5d] loss:%.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
running_loss = 0.0
def test():
correct = 0
total = 0
# 不会计算梯度
with torch.no_grad():
for data in test_loader: # 拿数据
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = model(images) # 预测
# outputs.data是一个矩阵,每一行10个量,最大值的下标就是预测值
_, predicted = torch.max(outputs.data, dim=1) # 沿着第一维度,找最大值的下标,返回最大值和下标
total += labels.size(0) # labels.size(0)=64 每个都是64个元素,就可以计算总的元素
# (predicted == labels).sum()这个是张量,而加了item()变为一个数字,即相等的数量
correct += (predicted == labels).sum().item()
print('Accuracy on test set:%d %%' % (100 * correct / total)) # 正确的数量除以总数
if __name__ == '__main__':
for epoch in range(10):
train(epoch)
test()
卷积神经网络(高级)
import torch
import torch.nn as nn
from torchvision import transforms # 对图像进行处理的工具
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F # 使用激活函数relu()的包
import torch.optim as optim # 优化器的包
batch_size = 64
# 对图像进行预处理,将图像转换为
transform = transforms.Compose([
# 将原始图像PIL变为张量tensor(H*W*C),再将[0,255]区间转换为[0.1,1.0]
transforms.ToTensor(),
# 使用均值和标准差对张量图像进行归一化
transforms.Normalize((0.1307,), (0.3081,))
])
train_dataset = datasets.MNIST(root='dataset/mnist/', train=True, download=True, transform=transform)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
test_dataset = datasets.MNIST(root='dataset/mnist/', train=False, download=True, transform=transform)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)
class InceptionA(nn.Module):
def __init__(self, in_channels):
super(InceptionA, self).__init__()
# 第一个通道,输入通道为in_channels,输出通道为16,卷积盒的大小为1*1的卷积层
self.branch1x1 = nn.Conv2d(in_channels, 16, kernel_size=1)
# 第二个通道
self.branch5x5_1 = nn.Conv2d(in_channels, 16, kernel_size=1)
self.branch5x5_2 = nn.Conv2d(16, 24, kernel_size=5, padding=2)
# 第三个通道
self.branch3x3_1 = nn.Conv2d(in_channels, 16, kernel_size=1)
self.branch3x3_2 = nn.Conv2d(16, 24, kernel_size=3, padding=1)
self.branch3x3_3 = nn.Conv2d(24, 24, kernel_size=3, padding=1)
# 第四个通道
self.branch_pool = nn.Conv2d(in_channels, 24, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch3x3 = self.branch3x3_3(branch3x3)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
# 拼接
outputs = [branch1x1, branch5x5, branch3x3, branch_pool]
return torch.cat(outputs, dim=1)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(88, 20, kernel_size=5)
self.incep1 = InceptionA(in_channels=10)
self.incep2 = InceptionA(in_channels=20)
self.mp = nn.MaxPool2d(2)
self.fc = nn.Linear(1408, 10)
def forward(self, x):
in_size = x.size(0)
x = F.relu(self.mp(self.conv1(x)))
x = self.incep1(x)
x = F.relu(self.mp(self.conv2(x)))
x = self.incep2(x)
x = x.view(in_size, -1)
x = self.fc(x)
return x
model = Net()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
# 交叉熵损失函数
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5) # momentum动量
def train(epoch):
running_loss = 0.0
# 返回了数据下标和数据
for batch_idx, data in enumerate(train_loader, 0):
# 送入两个张量,一个张量是64个图像的特征,一个张量图片对应的数字
inputs, target = data
# 把输入输出迁入GPU
inputs, target = inputs.to(device), target.to(device)
# 梯度归零
optimizer.zero_grad()
# forward+backward+update
outputs = model(inputs)
# 计算损失,用的交叉熵损失函数
loss = criterion(outputs, target)
# 反馈
loss.backward()
# 随机梯度下降更新
optimizer.step()
# 每300次输出一次
running_loss += loss.item()
if batch_idx % 300 == 299:
print('[%d,%5d] loss:%.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
running_loss = 0.0
def test():
correct = 0
total = 0
# 不会计算梯度
with torch.no_grad():
for data in test_loader: # 拿数据
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = model(images) # 预测
# outputs.data是一个矩阵,每一行10个量,最大值的下标就是预测值
_, predicted = torch.max(outputs.data, dim=1) # 沿着第一维度,找最大值的下标,返回最大值和下标
total += labels.size(0) # labels.size(0)=64 每个都是64个元素,就可以计算总的元素
# (predicted == labels).sum()这个是张量,而加了item()变为一个数字,即相等的数量
correct += (predicted == labels).sum().item()
print('Accuracy on test set:%d %%' % (100 * correct / total)) # 正确的数量除以总数
if __name__ == '__main__':
for epoch in range(10):
train(epoch)
test()
Residual net残差结构块
定义的该层输入和输出的大小是一样的
import torch.nn as nn
import torch.nn.functional as F
class ResidualBlock(nn.Module):
def __init__(self,channels):
super(ResidualBlock,self).__init__()
self.channels = channels
self.conv1 = nn.Conv2d(channels,channels,kernel_size=3,padding=1)
self.conv2 = nn.Conv2d(channels,channels,kernel_size=3,padding=1)
def forward(self,x):
y = F.relu(self.conv1(x))
y = self.conv2(y)
return F.relu(x+y)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=5)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5)
self.mp = nn.MaxPool2d(2)
self.rblock1 = ResidualBlock(16)
self.rblock2 = ResidualBlock(32)
self.fc = nn.Linear(512, 10)
def forward(self, x):
in_size = x.size(0)
x = self.mp(F.relu(self.conv1(x)))
x = self.rblock1(x)
x = self.mp(F.relu(self.conv2(x)))
x = self.rblock2(x)
x = x.view(in_size, -1)
x = self.fc(x)
return x
RNNCell
import torch
batch_size = 1 # 批量数
seq_len = 3 # 有几个输入队列x1,x2,x3
input_size = 4 # 每个输入是几维向量
hidden_size = 2 # 每个隐藏层是几维向量
cell = torch.nn.RNNCell(input_size=input_size, hidden_size=hidden_size)
dataset = torch.randn(seq_len, batch_size, input_size)
hidden = torch.zeros(batch_size, hidden_size)
for idx, input in enumerate(dataset):
print('=' * 20, idx, '=' * 20)
print('Input size:', input.shape)
hidden = cell(input, hidden)
print('Outputs size:', hidden.shape)
print(hidden)
RNN
import torch
batch_size = 1
seq_len = 3
input_size = 4
hidden_size = 2
num_layers = 1
cell = torch.nn.RNN(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers)
# (seqLen, batchSize, inputSize)
inputs = torch.randn(seq_len, batch_size, input_size)
hidden = torch.zeros(num_layers, batch_size, hidden_size)
out, hidden = cell(inputs, hidden)
print('Output size:', out.shape)
print('Output:', out)
print('Hidden size: ', hidden.shape)
print('Hidden: ', hidden)
更多推荐
已为社区贡献1条内容
所有评论(0)