what.models.detection.ssd.ssd.multibox_loss

 1import torch
 2import torch.nn as nn
 3import torch.nn.functional as F
 4
 5from ..utils import box_utils
 6
 7
 8class MultiboxLoss(nn.Module):
 9    def __init__(self, priors, iou_threshold, neg_pos_ratio,
10                 center_variance, size_variance, device):
11        """Implement SSD Multibox Loss.
12
13        Basically, Multibox loss combines classification loss
14         and Smooth L1 regression loss.
15        """
16        super(MultiboxLoss, self).__init__()
17        self.iou_threshold = iou_threshold
18        self.neg_pos_ratio = neg_pos_ratio
19        self.center_variance = center_variance
20        self.size_variance = size_variance
21        self.priors = priors
22        self.priors.to(device)
23
24    def forward(self, confidence, predicted_locations, labels, gt_locations):
25        """Compute classification loss and smooth l1 loss.
26
27        Args:
28            confidence (batch_size, num_priors, num_classes): class predictions.
29            locations (batch_size, num_priors, 4): predicted locations.
30            labels (batch_size, num_priors): real labels of all the priors.
31            boxes (batch_size, num_priors, 4): real boxes corresponding all the priors.
32        """
33        num_classes = confidence.size(2)
34        with torch.no_grad():
35            # derived from cross_entropy=sum(log(p))
36            loss = -F.log_softmax(confidence, dim=2)[:, :, 0]
37            mask = box_utils.hard_negative_mining(loss, labels, self.neg_pos_ratio)
38
39        confidence = confidence[mask, :]
40        classification_loss = F.cross_entropy(confidence.reshape(-1, num_classes), labels[mask], reduction='sum')
41        pos_mask = labels > 0
42        predicted_locations = predicted_locations[pos_mask, :].reshape(-1, 4)
43        gt_locations = gt_locations[pos_mask, :].reshape(-1, 4)
44        smooth_l1_loss = F.smooth_l1_loss(predicted_locations, gt_locations, reduction='sum')
45        num_pos = gt_locations.size(0)
46        return smooth_l1_loss/num_pos, classification_loss/num_pos
class MultiboxLoss(torch.nn.modules.module.Module):
 9class MultiboxLoss(nn.Module):
10    def __init__(self, priors, iou_threshold, neg_pos_ratio,
11                 center_variance, size_variance, device):
12        """Implement SSD Multibox Loss.
13
14        Basically, Multibox loss combines classification loss
15         and Smooth L1 regression loss.
16        """
17        super(MultiboxLoss, self).__init__()
18        self.iou_threshold = iou_threshold
19        self.neg_pos_ratio = neg_pos_ratio
20        self.center_variance = center_variance
21        self.size_variance = size_variance
22        self.priors = priors
23        self.priors.to(device)
24
25    def forward(self, confidence, predicted_locations, labels, gt_locations):
26        """Compute classification loss and smooth l1 loss.
27
28        Args:
29            confidence (batch_size, num_priors, num_classes): class predictions.
30            locations (batch_size, num_priors, 4): predicted locations.
31            labels (batch_size, num_priors): real labels of all the priors.
32            boxes (batch_size, num_priors, 4): real boxes corresponding all the priors.
33        """
34        num_classes = confidence.size(2)
35        with torch.no_grad():
36            # derived from cross_entropy=sum(log(p))
37            loss = -F.log_softmax(confidence, dim=2)[:, :, 0]
38            mask = box_utils.hard_negative_mining(loss, labels, self.neg_pos_ratio)
39
40        confidence = confidence[mask, :]
41        classification_loss = F.cross_entropy(confidence.reshape(-1, num_classes), labels[mask], reduction='sum')
42        pos_mask = labels > 0
43        predicted_locations = predicted_locations[pos_mask, :].reshape(-1, 4)
44        gt_locations = gt_locations[pos_mask, :].reshape(-1, 4)
45        smooth_l1_loss = F.smooth_l1_loss(predicted_locations, gt_locations, reduction='sum')
46        num_pos = gt_locations.size(0)
47        return smooth_l1_loss/num_pos, classification_loss/num_pos

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their parameters converted too when you call to(), etc.

As per the example above, an __init__() call to the parent class must be made before assignment on the child.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

MultiboxLoss( priors, iou_threshold, neg_pos_ratio, center_variance, size_variance, device)
10    def __init__(self, priors, iou_threshold, neg_pos_ratio,
11                 center_variance, size_variance, device):
12        """Implement SSD Multibox Loss.
13
14        Basically, Multibox loss combines classification loss
15         and Smooth L1 regression loss.
16        """
17        super(MultiboxLoss, self).__init__()
18        self.iou_threshold = iou_threshold
19        self.neg_pos_ratio = neg_pos_ratio
20        self.center_variance = center_variance
21        self.size_variance = size_variance
22        self.priors = priors
23        self.priors.to(device)

Implement SSD Multibox Loss.

Basically, Multibox loss combines classification loss and Smooth L1 regression loss.

def forward(self, confidence, predicted_locations, labels, gt_locations):
25    def forward(self, confidence, predicted_locations, labels, gt_locations):
26        """Compute classification loss and smooth l1 loss.
27
28        Args:
29            confidence (batch_size, num_priors, num_classes): class predictions.
30            locations (batch_size, num_priors, 4): predicted locations.
31            labels (batch_size, num_priors): real labels of all the priors.
32            boxes (batch_size, num_priors, 4): real boxes corresponding all the priors.
33        """
34        num_classes = confidence.size(2)
35        with torch.no_grad():
36            # derived from cross_entropy=sum(log(p))
37            loss = -F.log_softmax(confidence, dim=2)[:, :, 0]
38            mask = box_utils.hard_negative_mining(loss, labels, self.neg_pos_ratio)
39
40        confidence = confidence[mask, :]
41        classification_loss = F.cross_entropy(confidence.reshape(-1, num_classes), labels[mask], reduction='sum')
42        pos_mask = labels > 0
43        predicted_locations = predicted_locations[pos_mask, :].reshape(-1, 4)
44        gt_locations = gt_locations[pos_mask, :].reshape(-1, 4)
45        smooth_l1_loss = F.smooth_l1_loss(predicted_locations, gt_locations, reduction='sum')
46        num_pos = gt_locations.size(0)
47        return smooth_l1_loss/num_pos, classification_loss/num_pos

Compute classification loss and smooth l1 loss.

Args: confidence (batch_size, num_priors, num_classes): class predictions. locations (batch_size, num_priors, 4): predicted locations. labels (batch_size, num_priors): real labels of all the priors. boxes (batch_size, num_priors, 4): real boxes corresponding all the priors.

Inherited Members
torch.nn.modules.module.Module
dump_patches
register_buffer
register_parameter
add_module
register_module
get_submodule
get_parameter
get_buffer
get_extra_state
set_extra_state
apply
cuda
xpu
cpu
type
float
double
half
bfloat16
to_empty
to
register_backward_hook
register_full_backward_hook
register_forward_pre_hook
register_forward_hook
state_dict
load_state_dict
parameters
named_parameters
buffers
named_buffers
children
named_children
modules
named_modules
train
eval
requires_grad_
zero_grad
share_memory
extra_repr