yolov9 icon indicating copy to clipboard operation
yolov9 copied to clipboard

RuntimeError: Boolean value of Tensor with more than one value is ambiguous

Open wq247726404 opened this issue 1 year ago • 0 comments

Hello, I run train_dual.py target detection, encountered this error need to solve? Thank you very much for your contribution. File "D:\Study\yolov9-main\models\yolo.py", line 577, in forward if augment: RuntimeError: Boolean value of Tensor with more than one value is ambiguous

class DetectionModel(BaseModel): # YOLO detection model def init(self, cfg='yolo.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes super().init() if isinstance(cfg, dict): self.yaml = cfg # model dict else: # is *.yaml import yaml # for torch hub self.yaml_file = Path(cfg).name with open(cfg, encoding='ascii', errors='ignore') as f: self.yaml = yaml.safe_load(f) # model dict

    # Define model
    ch = self.yaml['ch'] = self.yaml.get('ch', ch)  # input channels
    if nc and nc != self.yaml['nc']:
        LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
        self.yaml['nc'] = nc  # override yaml value
    if anchors:
        LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}')
        self.yaml['anchors'] = round(anchors)  # override yaml value
    self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch])  # model, savelist
    self.names = [str(i) for i in range(self.yaml['nc'])]  # default names
    self.inplace = self.yaml.get('inplace', True)

    # Build strides, anchors
    m = self.model[-1]  # Detect()
    if isinstance(m, (Detect, DDetect, Segment)):
        s = 256  # 2x min stride
        m.inplace = self.inplace
        forward = lambda x: self.forward(x)[0] if isinstance(m, (Segment)) else self.forward(x)
        m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))])  # forward
        # check_anchor_order(m)
        # m.anchors /= m.stride.view(-1, 1, 1)
        self.stride = m.stride
        m.bias_init()  # only run once
    if isinstance(m, (DualDetect, TripleDetect, DualDDetect, TripleDDetect)):
        s = 256  # 2x min stride
        m.inplace = self.inplace
        #forward = lambda x: self.forward(x)[0][0] if isinstance(m, (DualSegment)) else self.forward(x)[0]
        forward = lambda x: self.forward(x)[0]
        m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))])  # forward
        # check_anchor_order(m)
        # m.anchors /= m.stride.view(-1, 1, 1)
        self.stride = m.stride
        m.bias_init()  # only run once

    # Init weights, biases
    initialize_weights(self)
    self.info()
    LOGGER.info('')

def forward(self, x, augment=False, profile=False, visualize=False):
    if augment:
        return self._forward_augment(x)  # augmented inference, None
    return self._forward_once(x, profile, visualize)  # single-scale inference, train

def _forward_augment(self, x):
    img_size = x.shape[-2:]  # height, width
    s = [1, 0.83, 0.67]  # scales
    f = [None, 3, None]  # flips (2-ud, 3-lr)
    y = []  # outputs
    for si, fi in zip(s, f):
        xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
        yi = self._forward_once(xi)[0]  # forward
        # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1])  # save
        yi = self._descale_pred(yi, fi, si, img_size)
        y.append(yi)
    y = self._clip_augmented(y)  # clip augmented tails
    return torch.cat(y, 1), None  # augmented inference, train

wq247726404 avatar Feb 24 '24 14:02 wq247726404