iT邦幫忙

0

使用robotflow給的語意分割的程式碼發生錯誤。

  • 分享至 

  • xImage

https://roboflow.com/models/semantic-segmentation
我使用robotflow跑一下我的成果在colab上結果發生一些錯誤,錯誤如下
程式碼1

import torch
import torch.nn as nn
import pytorch_lightning as pl

# 創建一個繼承 pl.LightningModule 的子類
class LightningSegformerForSemanticSegmentation(pl.LightningModule):
    def __init__(self, segformer):
        super().__init__()
        self.segformer = segformer  # 初始化時儲存一個 Segformer 模型
        self.criterion = nn.CrossEntropyLoss()  # 定義損失函數為交叉熵損失

    def forward(self, x):
        return self.segformer(x)  # 定義前向傳播函數,直接調用 Segformer 的前向傳播函數

    def training_step(self, batch, batch_idx):
        x, y = batch  # 從 batch 中讀取圖像和標籤
        out = self.segformer(x)  # 使用 Segformer 模型進行預測
        loss = self.criterion(out, y)  # 計算預測結果和標籤之間的損失
        self.log('train_loss', loss)  # 使用 pl.Logger 記錄訓練集損失
        return loss

    def validation_step(self, batch, batch_idx):
        x, y = batch  # 從 batch 中讀取圖像和標籤
        out = self.segformer(x)  # 使用 Segformer 模型進行預測
        loss = self.criterion(out, y)  # 計算預測結果和標籤之間的損失
        self.log('val_loss', loss)  # 使用 pl.Logger 記錄驗證集損失

    def configure_optimizers(self):
        optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)  # 定義優化器為 Adam,學習率為 1e-3
        return optimizer

程式碼2

early_stop_callback = EarlyStopping(
    monitor="val_loss", 
    min_delta=0.00, 
    patience=10, 
    verbose=False, 
    mode="min",
)

checkpoint_callback = ModelCheckpoint(save_top_k=1, monitor="val_loss")

trainer = pl.Trainer(
    #gpus='1',  
    accelerator='auto',
    callbacks=[early_stop_callback, checkpoint_callback],
    max_epochs=500,
    val_check_interval=len(train_dataloader),
)

trainer.fit(segformer_finetuner)

錯誤訊息如下

INFO:pytorch_lightning.utilities.rank_zero:GPU available: True (cuda), used: True
INFO:pytorch_lightning.utilities.rank_zero:TPU available: False, using: 0 TPU cores
INFO:pytorch_lightning.utilities.rank_zero:IPU available: False, using: 0 IPUs
INFO:pytorch_lightning.utilities.rank_zero:HPU available: False, using: 0 HPUs
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-15-f576c25d9a20> in <cell line: 18>()
     16     val_check_interval=len(train_dataloader),
     17 )
---> 18 trainer.fit(segformer_finetuner)

1 frames
/usr/local/lib/python3.9/dist-packages/pytorch_lightning/utilities/compile.py in _maybe_unwrap_optimized(model)
    123     if isinstance(model, pl.LightningModule):
    124         return model
--> 125     raise TypeError(
    126         f"`model` must be a `LightningModule` or `torch._dynamo.OptimizedModule`, got `{type(model).__qualname__}`"
    127     )

TypeError: `model` must be a `LightningModule` or `torch._dynamo.OptimizedModule`, got `SegformerForSemanticSegmentation`
type(segformer_finetuner)

transformers.models.segformer.modeling_segformer.SegformerForSemanticSegmentation
想要試試看結果卡了好久,哪位大神救救我QQ

看更多先前的討論...收起先前的討論...
貓虎皮 iT邦新手 3 級 ‧ 2023-04-23 07:13:45 檢舉
請提供變數`segformer_finetuner`實體化的code謝謝。
type(segformer_finetuner)請問是不是這個呢
dir(segformer_finetuner):'T_destination',
'__annotations__',
'__call__',
'__class__',
'__delattr__',
'__dict__',
'__dir__',
'__doc__',
'__eq__',
'__format__',
'__ge__',
'__getattr__',
'__getattribute__',
'__gt__',
'__hash__',
'__init__',
'__init_subclass__',
'__le__',
'__lt__',
'__module__',
'__ne__',
'__new__',
'__reduce__',
'__reduce_ex__',
'__repr__',
'__setattr__',
'__setstate__',
'__sizeof__',
'__str__',
'__subclasshook__',
'__weakref__',
'_apply',
'_auto_class',
'_backward_compatibility_gradient_checkpointing',
'_backward_hooks',
'_backward_pre_hooks',
'_buffers',
'_call_impl',
'_convert_head_mask_to_5d',
'_create_repo',
'_expand_inputs_for_generation',
'_extract_past_from_model_output',
'_forward_hooks',
'_forward_hooks_with_kwargs',
'_forward_pre_hooks',
'_forward_pre_hooks_with_kwargs',
'_from_config',
'_get_backward_hooks',
'_get_backward_pre_hooks',
'_get_decoder_start_token_id',
'_get_files_timestamps',
'_get_logits_processor',
'_get_logits_warper',
'_get_name',
'_get_resized_embeddings',
'_get_resized_lm_head',
'_get_stopping_criteria',
'_hook_rss_memory_post_forward',
'_hook_rss_memory_pre_forward',
'_init_weights',
'_initialize_weights',
'_is_full_backward_hook',
'_is_hf_initialized',
'_keep_in_fp32_modules',
'_keys_to_ignore_on_load_missing',
'_keys_to_ignore_on_load_unexpected',
'_keys_to_ignore_on_save',
'_load_from_state_dict',
'_load_pretrained_model',
'_load_pretrained_model_low_mem',
'_load_state_dict_post_hooks',
'_load_state_dict_pre_hooks',
'_maybe_initialize_input_ids_for_generation',
'_maybe_warn_non_full_backward_hook',
'_merge_criteria_processor_list',
'_modules',
'_named_members',
'_no_split_modules',
'_non_persistent_buffers_set',
'_parameters',
'_prepare_attention_mask_for_generation',
'_prepare_decoder_input_ids_for_generation',
'_prepare_encoder_decoder_kwargs_for_generation',
'_prepare_model_inputs',
'_register_load_state_dict_pre_hook',
'_register_state_dict_hook',
'_reorder_cache',
'_replicate_for_data_parallel',
'_resize_token_embeddings',
'_save_to_state_dict',
'_set_default_torch_dtype',
'_slow_forward',
'_state_dict_hooks',
'_state_dict_pre_hooks',
'_tie_encoder_decoder_weights',
'_tie_or_clone_weights',
'_update_model_kwargs_for_generation',
'_upload_modified_files',
'_validate_model_class',
'_validate_model_kwargs',
'_version',
'add_memory_hooks',
'add_module',
'adjust_logits_during_generation',
'apply',
'base_model',
'base_model_prefix',
'beam_sample',
'beam_search',
'bfloat16',
'buffers',
'call_super_init',
'can_generate',
'children',
'compute_transition_scores',
'config',
'config_class',
'constrained_beam_search',
'contrastive_search',
'cpu',
'create_extended_attention_mask_for_decoder',
'cuda',
'decode_head',
'device',
'disable_input_require_grads',
'double',
'dtype',
'dummy_inputs',
'dump_patches',
'enable_input_require_grads',
'estimate_tokens',
'eval',
'extra_repr',
'float',
'floating_point_ops',
'forward',
'framework',
'from_pretrained',
'generate',
'generation_config',
'get_buffer',
'get_extended_attention_mask',
'get_extra_state',
'get_head_mask',
'get_input_embeddings',
'get_memory_footprint',
'get_output_embeddings',
'get_parameter',
'get_position_embeddings',
'get_submodule',
'gradient_checkpointing_disable',
'gradient_checkpointing_enable',
'greedy_search',
'group_beam_search',
'half',
'init_weights',
'invert_attention_mask',
'ipu',
'is_gradient_checkpointing',
'is_loaded_in_8bit',
'is_parallelizable',
'load_state_dict',
'main_input_name',
'modules',
'name_or_path',
'named_buffers',
'named_children',
'named_modules',
'named_parameters',
'num_parameters',
'parameters',
'post_init',
'prepare_inputs_for_generation',
'prune_heads',
'push_to_hub',
'register_backward_hook',
'register_buffer',
'register_for_auto_class',
'register_forward_hook',
'register_forward_pre_hook',
'register_full_backward_hook',
'register_full_backward_pre_hook',
'register_load_state_dict_post_hook',
'register_module',
'register_parameter',
'register_state_dict_pre_hook',
'requires_grad_',
'reset_memory_hooks_state',
'resize_position_embeddings',
'resize_token_embeddings',
'retrieve_modules_from_names',
'sample',
'save_pretrained',
'segformer',
'set_extra_state',
'set_input_embeddings',
'share_memory',
'state_dict',
'supports_gradient_checkpointing',
'test_dataloader',
'tie_weights',
'to',
'to_empty',
'train',
'train_dataloader',
'training',
'type',
'val_dataloader',
'warnings_issued',
'xpu',
'zero_grad'
print(issubclass(LightningSegformerForSemanticSegmentation, pl.LightningModule)):True
re.Zero iT邦研究生 5 級 ‧ 2023-04-24 22:04:48 檢舉
@topshow950071: 貓虎皮大 問的大概是 `segformer_finetuner = LightningSegformerForSemanticSegmentation()` 這類的語法。
PS: https://docs.python.org/zh-tw/3/tutorial/classes.html?highlight=%E5%AF%A6%E4%BE%8B#class-objects
圖片
  直播研討會
圖片
{{ item.channelVendor }} {{ item.webinarstarted }} |
{{ formatDate(item.duration) }}
直播中

尚未有邦友回答

立即登入回答