import torch import torch.nn as nn import numpy as np import matplotlib.pyplot as plt
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("Using device:", device)
np.random.seed(42) x_train = np.random.rand(100, 1)*10 y_train = 2*x_train + 3 + np.random.randn(100,1) * 2
plt.scatter(x_train, y_train, color='blue', label='Original data') plt.xlabel('X') plt.ylabel('Y') plt.legend() plt.show()
x_train_tensor = torch.from_numpy(x_train).float().to(device) y_train_tensor = torch.from_numpy(y_train).float().to(device)
class LinearRegressionModel(nn.Module): def __init__(self, input_size, output_size): super(LinearRegressionModel, self).__init__() self.linear = nn.Linear(input_size, output_size) def forward(self,x): return self.linear(x)
input_dim = 1 output_dim = 1
model = LinearRegressionModel(input_dim, output_dim).to(device)
criterion = nn.MSELoss() learning_rate = 0.01 optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
num_epochs = 1000 loss_history = []
for epoch in range(num_epochs): outputs = model(x_train_tensor) loss = criterion(outputs, y_train_tensor)
optimizer.zero_grad() loss.backward() optimizer.step()
loss_history.append(loss.item())
if(epoch+1)%100 == 0: print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
plt.plot(loss_history, label='loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.title('Training Loss') plt.show()
model.eval() with torch.no_grad(): x_test = np.linspace(0,10,100).reshape(-1,1) x_test_tensor = torch.from_numpy(x_test).float().to(device) y_pred_tensor = model(x_test_tensor)
y_pred = y_pred_tensor.cpu().numpy()
plt.scatter(x_train,y_train, color='blue', label='Original data') plt.plot(x_test, y_pred, color='red', label='Fitted line') plt.xlabel('X') plt.ylabel('Y') plt.legend() plt.show()
print("学习到的参数:") for name, param in model.named_parameters(): if param.requires_grad: print(f"{name}: {param.data}")
|