Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- class Tanh200(nn.Module):
- def __init__(self):
- super(Tanh200, self).__init__()
- def forward(self, x):
- return torch.tanh(x / 200)
- class Agent(nn.Module):
- def __init__(self):
- super().__init__()
- self.fc1 = nn.Linear(833, 2048,dtype=torch.float32).to(d)
- self.bn1 = nn.BatchNorm1d(2048, dtype=torch.float32).to(d)
- self.dropout1 = nn.Dropout(p=0.45).to(d)
- self.relu = nn.LeakyReLU(0.05).to(d)
- self.layer2 = nn.Linear(2048, 1,dtype=torch.float32).to(d)
- self.dropout2 = nn.Dropout(p=0.45).to(d)
- self.tanh200 = Tanh200().to(d)
- self.hidden_layers = nn.ModuleList().to(d)
- # Initialize weights of Linear layers using Xavier initialization
- init.xavier_uniform_(self.fc1.weight).to(d)
- init.xavier_uniform_(self.layer2.weight).to(d)
- self.loss = nn.MSELoss().to(d)
- def forward(self, x):
- x = self.fc1(x).to(d)
- x = self.bn1(x).to(d)
- x = self.dropout1(x).to(d)
- x = self.relu(x).to(d)
- x = self.layer2(x).to(d)
- x = self.dropout2(x).to(d)
- x = self.tanh200(x).to(d)
- return x
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement