Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import torch
- import torch.nn as nn
- import torch.optim as optim
- # Define the neural network to approximate the solution
- class Net(nn.Module):
- def __init__(self):
- super(Net, self).__init__()
- # A simple neural network with one hidden layer
- self.hidden = nn.Linear(1, 10)
- self.output = nn.Linear(10, 1)
- def forward(self, x):
- # Apply a non-linearity (ReLU in this case)
- x = torch.relu(self.hidden(x))
- # Output the approximation for y(x)
- x = self.output(x)
- return x
- # Define the differential equation dy/dx = -y
- def ode_loss(x, model):
- y = model(x) # Network prediction for y
- dy_dx = torch.autograd.grad(y, x, grad_outputs=torch.ones_like(x), create_graph=True)[0]
- # Loss is based on the equation: dy/dx + y = 0
- loss = torch.mean((dy_dx + y) ** 2)
- return loss
- # Generate training data
- x_train = torch.linspace(0, 2, 100).view(-1, 1) # x from 0 to 2
- x_train.requires_grad = True # We need gradients for x
- # Initialize the neural network and optimizer
- model = Net()
- optimizer = optim.Adam(model.parameters(), lr=0.01)
- # Training loop
- epochs = 2000
- for epoch in range(epochs):
- optimizer.zero_grad() # Zero the gradients
- loss = ode_loss(x_train, model) # Compute the loss
- loss.backward() # Backpropagate
- optimizer.step() # Update the weights
- if epoch % 100 == 0:
- print(f'Epoch {epoch}, Loss: {loss.item()}')
- # Test the model
- with torch.no_grad():
- x_test = torch.linspace(0, 2, 100).view(-1, 1)
- y_pred = model(x_test)
- # Plotting the result
- import matplotlib.pyplot as plt
- plt.plot(x_test, y_pred, label="NN solution")
- plt.plot(x_test, torch.exp(-x_test), label="Exact solution")
- plt.legend()
- plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement