What kind of activation function is being used in the second hidden layer:
class NetTanh(nn.Module):
# Constructor
def __init__(self, D_in, H1, H2, D_out):
super(NetTanh, self).__init__()
self.linear1 = nn.Linear(D_in, H1)
self.linear2 = nn.Linear(H1, H2)
self.linear3 = nn.Linear(H2, D_out)
# Prediction
def forward(self, x):
x = torch.sigmoid(self.linear1(x))
x = torch.tanh(self.linear2(x))
x = self.linear3(x)
return x
answ: tanh