https://roboflow.com/models/semantic-segmentation
我使用robotflow跑一下我的成果在colab上結果發生一些錯誤,錯誤如下
程式碼1
import torch
import torch.nn as nn
import pytorch_lightning as pl
# 創建一個繼承 pl.LightningModule 的子類
class LightningSegformerForSemanticSegmentation(pl.LightningModule):
def __init__(self, segformer):
super().__init__()
self.segformer = segformer # 初始化時儲存一個 Segformer 模型
self.criterion = nn.CrossEntropyLoss() # 定義損失函數為交叉熵損失
def forward(self, x):
return self.segformer(x) # 定義前向傳播函數,直接調用 Segformer 的前向傳播函數
def training_step(self, batch, batch_idx):
x, y = batch # 從 batch 中讀取圖像和標籤
out = self.segformer(x) # 使用 Segformer 模型進行預測
loss = self.criterion(out, y) # 計算預測結果和標籤之間的損失
self.log('train_loss', loss) # 使用 pl.Logger 記錄訓練集損失
return loss
def validation_step(self, batch, batch_idx):
x, y = batch # 從 batch 中讀取圖像和標籤
out = self.segformer(x) # 使用 Segformer 模型進行預測
loss = self.criterion(out, y) # 計算預測結果和標籤之間的損失
self.log('val_loss', loss) # 使用 pl.Logger 記錄驗證集損失
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3) # 定義優化器為 Adam,學習率為 1e-3
return optimizer
程式碼2
early_stop_callback = EarlyStopping(
monitor="val_loss",
min_delta=0.00,
patience=10,
verbose=False,
mode="min",
)
checkpoint_callback = ModelCheckpoint(save_top_k=1, monitor="val_loss")
trainer = pl.Trainer(
#gpus='1',
accelerator='auto',
callbacks=[early_stop_callback, checkpoint_callback],
max_epochs=500,
val_check_interval=len(train_dataloader),
)
trainer.fit(segformer_finetuner)
錯誤訊息如下
INFO:pytorch_lightning.utilities.rank_zero:GPU available: True (cuda), used: True
INFO:pytorch_lightning.utilities.rank_zero:TPU available: False, using: 0 TPU cores
INFO:pytorch_lightning.utilities.rank_zero:IPU available: False, using: 0 IPUs
INFO:pytorch_lightning.utilities.rank_zero:HPU available: False, using: 0 HPUs
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-f576c25d9a20> in <cell line: 18>()
16 val_check_interval=len(train_dataloader),
17 )
---> 18 trainer.fit(segformer_finetuner)
1 frames
/usr/local/lib/python3.9/dist-packages/pytorch_lightning/utilities/compile.py in _maybe_unwrap_optimized(model)
123 if isinstance(model, pl.LightningModule):
124 return model
--> 125 raise TypeError(
126 f"`model` must be a `LightningModule` or `torch._dynamo.OptimizedModule`, got `{type(model).__qualname__}`"
127 )
TypeError: `model` must be a `LightningModule` or `torch._dynamo.OptimizedModule`, got `SegformerForSemanticSegmentation`
type(segformer_finetuner)
transformers.models.segformer.modeling_segformer.SegformerForSemanticSegmentation
想要試試看結果卡了好久,哪位大神救救我QQ