2 回答
TA贡献1836条经验 获得超13个赞
如果我没记错的话,可以这样做:
import torch.nn as nn
import torch.nn.functional as F
class RnnWith2HiddenSizesModel(nn.Module):
def __init__(self):
super(RnnWith2HiddenSizesModel, self).__init__()
self.rnn = nn.LSTM(input_size=10, hidden_size=20, num_layers=2)
self.rnn_two = nn.LSTM(input_size=20, hidden_size=2)
def forward(self, inp, hc):
output, _ = self.rnn(inp, hc)
output2, _ = self.rnn_two(output)
return output2
inp = torch.randn(5, 3, 10)
h0 = torch.randn(2, 3, 20)
c0 = torch.randn(2, 3, 20)
rnn = RnnWith2HiddenSizesModel()
output = RnnWith2HiddenSizesModel()(inp, (h0, c0))
tensor([[[-0.0305, 0.0327],
[-0.1534, -0.1193],
[-0.1393, 0.0474]],
[[-0.0370, 0.0519],
[-0.2081, -0.0693],
[-0.1809, 0.0826]],
[[-0.0561, 0.0731],
[-0.2307, -0.0229],
[-0.1780, 0.0901]],
[[-0.0612, 0.0891],
[-0.2378, 0.0164],
[-0.1760, 0.0929]],
[[-0.0660, 0.1023],
[-0.2176, 0.0508],
[-0.1611, 0.1053]]], grad_fn=<CatBackward>)
TA贡献1860条经验 获得超8个赞
尽管@Mikhail Berlinkov 的回答按需要工作这一事实,但它并没有推广到一般情况(甚至在问题中都没有要求),为此我想提出第二个解决方案:
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import reduce
class RNNModel(nn.Module):
def __init__(self, *models):
super(RNNModel, self).__init__()
self.models = models
def forward(self, inp):
return reduce(lambda arg, model: model(arg, None)[0], self.models, inp)
并且可以称为:
rnn = nn.LSTM(input_size=10, hidden_size=20, num_layers=2)
rnn_two = nn.LSTM(input_size=20, hidden_size=2)
inp = torch.randn(5, 3, 10)
rnn_model = RNNModel(rnn, rnn_two)
output = rnn_model(inp)
output.shape等于预期(即,5,3,2)
添加回答
举报