eval()
or train()
import pytorch_lightning as pl
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import pytorch_lightning as pl
nn.Module
成 pl.LightningModule
,那這樣的改寫不會影響到任何舊的撰寫,原因是 pl.LightningModule
中有提供所有 nn.Module
中的所有 function,且多提供了 Lightning 會用的 function
class FeedForwardNeuralNet(pl.LightningModule):
def __init__(self, input_size, hidden_size, num_classes):
super(FeedForwardNeuralNet, self).__init__()
# define first layer
self.l1 = nn.Linear(input_size, hidden_size)
# activation function
self.relu = nn.ReLU()
# define second layer
self.l2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.l1(x)
out = self.relu(out)
out = self.l2(out)
return out
configure_optimizers
,所以我們會多一個
def configure_optimizers(self, learning_rate):
optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)
return optimizer
def training_step(self, batch, batch_idx):
datas, labels = batch
outputs = self(datas)
loss = F.cross_entropy(outputs, labels)
return loss
torch.nn.functional
套件的 functionsclass FeedForwardNeuralNet(pl.LightningModule):
def __init__(self, input_size, hidden_size, num_classes):
super(FeedForwardNeuralNet, self).__init__()
# define first layer
self.l1 = nn.Linear(input_size, hidden_size)
# activation function
self.relu = nn.ReLU()
# define second layer
self.l2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.l1(x)
out = self.relu(out)
out = self.l2(out)
return out
def training_step(self, batch, batch_idx):
datas, labels = batch
outputs = self(datas)
loss = F.cross_entropy(outputs, labels)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)
return optimizer
Trainer
from pytorch_lightning import Trainer
trainer = Trainer()
train_dataset = torchvision.datasets.MNIST(root='./data',
train=True,
transform=transforms.ToTensor(),
download=False)
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
model = FeedForwardNeuralNet(input_size, hidden_size, num_classes)
trainer = Trainer()
trainer.fit(model, train_loader)