mmsegmentation icon indicating copy to clipboard operation
mmsegmentation copied to clipboard

Error when adding BoundaryLoss to loss_decode

Open Foolssss opened this issue 1 year ago • 9 comments

crop_size = (512, 512) data_preprocessor = dict(size=crop_size)

model = dict( pretrained='open-mmlab://msra/hrnetv2_w48', backbone=dict( extra=dict( stage2=dict(num_channels=(48, 96)), stage3=dict(num_channels=(48, 96, 192)), stage4=dict(num_channels=(48, 96, 192, 384)))), decode_head=dict( in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384]), loss_decode=[ dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=3.0, use_sigmoid=False), dict(type='BoundaryLoss', loss_weight=1.0)]))

Traceback (most recent call last): File "train.py", line 109, in main() File "train.py", line 105, in main runner.train() File "/home/fyh/anaconda3/envs/mmseg/lib/python3.8/site-packages/mmengine/runner/runner.py", line 1721, in train model = self.train_loop.run() # type: ignore File "/home/fyh/anaconda3/envs/mmseg/lib/python3.8/site-packages/mmengine/runner/loops.py", line 278, in run self.run_iter(data_batch) File "/home/fyh/anaconda3/envs/mmseg/lib/python3.8/site-packages/mmengine/runner/loops.py", line 301, in run_iter outputs = self.runner.model.train_step( File "/home/fyh/anaconda3/envs/mmseg/lib/python3.8/site-packages/mmengine/model/base_model/base_model.py", line 114, in train_step losses = self._run_forward(data, mode='loss') # type: ignore File "/home/fyh/anaconda3/envs/mmseg/lib/python3.8/site-packages/mmengine/model/base_model/base_model.py", line 340, in _run_forward results = self(**data, mode=mode) File "/home/fyh/anaconda3/envs/mmseg/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl return forward_call(*input, **kwargs) File "/data/fyh/projects/newseg/mmseg/models/segmentors/base.py", line 94, in forward return self.loss(inputs, data_samples) File "/data/fyh/projects/newseg/mmseg/models/segmentors/encoder_decoder.py", line 176, in loss loss_decode = self._decode_head_forward_train(x, data_samples) File "/data/fyh/projects/newseg/mmseg/models/segmentors/encoder_decoder.py", line 137, in _decode_head_forward_train loss_decode = self.decode_head.loss(inputs, data_samples, File "/data/fyh/projects/newseg/mmseg/models/decode_heads/decode_head.py", line 262, in loss losses = self.loss_by_feat(seg_logits, batch_data_samples) File "/data/fyh/projects/newseg/mmseg/models/decode_heads/decode_head.py", line 324, in loss_by_feat loss[loss_decode.loss_name] = loss_decode( File "/home/fyh/anaconda3/envs/mmseg/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1194, in _call_impl return forward_call(*input, **kwargs) TypeError: forward() got an unexpected keyword argument 'weight'

Foolssss avatar Jun 01 '23 18:06 Foolssss

I have the same issue, it is work when I use a loss function. But when I use more than one, I report the same error. my environment: mmcv 2.0.0 mmdet 3.0.0 mmengine 0.7.4 mmsegmentation dev-1.x mmpretrain 1.0.0rc8

It is not work for me in this issue #2237

stone-cloud avatar Jul 04 '23 06:07 stone-cloud

Same problem here

olegsij avatar Jul 24 '23 11:07 olegsij

Hello, I have the same problem, how can I solve it

zclyaya avatar Dec 01 '23 12:12 zclyaya

Same problem,did you solve it?

Kingsley530 avatar Dec 23 '23 07:12 Kingsley530

Same problem,did you solve it?

Darren759 avatar Jan 02 '24 08:01 Darren759

Same problem,did you solve it?

Same problem,did you solve it?

Darren759 avatar Jan 02 '24 08:01 Darren759

Im very sorry, the problem still happen when I use other loss. And when I use the improved dice loss by other author, but the iou result is 0. I dont know why this will happen. And all other loss type will have different problems, there is a bug in the program. If you could solve this problem, please let me know, and if I have some method, I will contact you at once. Thank you so much.

Shaowei Wang Phone:+086 13793509309

在 2024-01-02 16:39:57,"Darren759" @.***> 写道:

Same problem,did you solve it?

Same problem,did you solve it?

— Reply to this email directly, view it on GitHub, or unsubscribe. You are receiving this because you commented.Message ID: @.***>

Kingsley530 avatar Jan 06 '24 07:01 Kingsley530

As a workaround I used this (it's another implementation, but at least it works, also it requires low loss weight (or low learning rate) Based on https://github.com/yiskw713/boundary_loss_for_remote_sensing/blob/master/boundary_loss.py

# Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch import einsum
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
import numpy as np
from mmseg.registry import MODELS
from scipy.ndimage import distance_transform_edt as eucl_distance
from typing import Tuple, Callable, Union
from torchvision import transforms
from functools import partial
from PIL import Image

def one_hot(label, n_classes, requires_grad=True):
    """Return One Hot Label"""
    device = label.device
    one_hot_label = torch.eye(
        n_classes, device=device, requires_grad=requires_grad)[label]
    one_hot_label = one_hot_label.transpose(1, 3).transpose(2, 3)

    return one_hot_label

@MODELS.register_module()
class MyBoundaryLoss(nn.Module):

    def __init__(self,
                 theta0: int = 3,
                 theta: int = 5,
                 loss_weight: float = 1.0,
                 loss_name: str = 'loss_boundary'):
        super().__init__()
        self.theta0 = theta0
        self.theta = theta
        self.loss_weight = loss_weight
        self.loss_name_ = loss_name

    def crop(self, w, h, target):
        nt, ht, wt = target.size()
        offset_w, offset_h = (wt - w) // 2, (ht - h) // 2
        if offset_w > 0 and offset_h > 0:
            target = target[:, offset_h:-offset_h, offset_w:-offset_w]

        return target

    def to_one_hot(self, target, size):
        n, c, h, w = size

        ymask = torch.FloatTensor(size).zero_()
        new_target = torch.LongTensor(n, 1, h, w)
        if target.is_cuda:
            ymask = ymask.cuda(target.get_device())
            new_target = new_target.cuda(target.get_device())

        new_target[:, 0, :, :] = torch.clamp(target.detach(), 0, c - 1)
        ymask.scatter_(1, new_target, 1.0)

        return torch.autograd.Variable(ymask)

    def forward(self, pred, gt, **kwargs):
        """
        Input:
            - pred: the output from model (before softmax)
                    shape (N, C, H, W)
            - gt: ground truth map
                    shape (N, H, w)
        Return:
            - boundary loss, averaged over mini-bathc
        """
        n, c, h, w = pred.shape
        log_p = F.log_softmax(pred, dim=1)

        # softmax so that predicted map can be distributed in [0, 1]
        pred = torch.softmax(pred, dim=1)

        # one-hot vector of ground truth
        gt = self.crop(w, h, gt)
        one_hot_gt = self.to_one_hot(gt, log_p.size())

        # boundary map
        gt_b = F.max_pool2d(
            1 - one_hot_gt, kernel_size=self.theta0, stride=1, padding=(self.theta0 - 1) // 2)
        gt_b -= 1 - one_hot_gt

        pred_b = F.max_pool2d(
            1 - pred, kernel_size=self.theta0, stride=1, padding=(self.theta0 - 1) // 2)
        pred_b -= 1 - pred

        # extended boundary map
        gt_b_ext = F.max_pool2d(
            gt_b, kernel_size=self.theta, stride=1, padding=(self.theta - 1) // 2)

        pred_b_ext = F.max_pool2d(
            pred_b, kernel_size=self.theta, stride=1, padding=(self.theta - 1) // 2)

        # reshape
        gt_b = gt_b.view(n, c, -1)
        pred_b = pred_b.view(n, c, -1)
        gt_b_ext = gt_b_ext.view(n, c, -1)
        pred_b_ext = pred_b_ext.view(n, c, -1)

        # Precision, Recall
        P = torch.sum(pred_b * gt_b_ext, dim=2) / (torch.sum(pred_b, dim=2) + 1e-7)
        R = torch.sum(pred_b_ext * gt_b, dim=2) / (torch.sum(gt_b, dim=2) + 1e-7)

        # Boundary F1 Score
        BF1 = 2 * P * R / (P + R + 1e-7)

        # summing BF1 Score for each class and average over mini-batch
        loss = torch.mean(1 - BF1)

        return loss

    @property
    def loss_name(self):
        return self.loss_name_

olegsij avatar Jan 11 '24 13:01 olegsij

Hi, I think I found a possible solution: add **kwargs to the __init__() and forward() interfaces of the loss class. image

After I jadded, the same problem disappeared.

ChunmingLi-SJTU avatar May 08 '24 09:05 ChunmingLi-SJTU