Source code for mmedit.models.losses.perceptual_loss

import torch
import torch.nn as nn
import torchvision.models.vgg as vgg
from mmcv.runner import load_checkpoint

from mmedit.utils import get_root_logger
from ..registry import LOSSES


[docs]class PerceptualVGG(nn.Module): """VGG network used in calculating perceptual loss. In this implementation, we allow users to choose whether use normalization in the input feature and the type of vgg network. Note that the pretrained path must fit the vgg type. Args: layer_name_list (list[str]): According to the name in this list, forward function will return the corresponding features. This list contains the name each layer in `vgg.feature`. An example of this list is ['4', '10']. vgg_type (str): Set the type of vgg network. Default: 'vgg19'. use_input_norm (bool): If True, normalize the input image. Importantly, the input feature must in the range [0, 1]. Default: True. pretrained (str): Path for pretrained weights. Default: 'torchvision://vgg19' """ def __init__(self, layer_name_list, vgg_type='vgg19', use_input_norm=True, pretrained='torchvision://vgg19'): super(PerceptualVGG, self).__init__() if pretrained.startswith('torchvision://'): assert vgg_type in pretrained self.layer_name_list = layer_name_list self.use_input_norm = use_input_norm # get vgg model and load pretrained vgg weight # remove _vgg from attributes to avoid `find_unused_parameters` bug _vgg = getattr(vgg, vgg_type)() self.init_weights(_vgg, pretrained) num_layers = max(map(int, layer_name_list)) + 1 assert len(_vgg.features) >= num_layers # only borrow layers that will be used from _vgg to avoid unused params self.vgg_layers = _vgg.features[:num_layers] if self.use_input_norm: # the mean is for image with range [0, 1] self.register_buffer( 'mean', torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)) # the std is for image with range [-1, 1] self.register_buffer( 'std', torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)) for v in self.vgg_layers.parameters(): v.requires_grad = False
[docs] def forward(self, x): """Forward function. Args: x (Tensor): Input tensor with shape (n, c, h, w). Returns: Tensor: Forward results. """ if self.use_input_norm: x = (x - self.mean) / self.std output = {} for name, module in self.vgg_layers.named_children(): x = module(x) if name in self.layer_name_list: output[name] = x.clone() return output
[docs] def init_weights(self, model, pretrained): """Init weights. Args: model (nn.Module): Models to be inited. pretrained (str): Path for pretrained weights. """ logger = get_root_logger() load_checkpoint(model, pretrained, logger=logger)
[docs]@LOSSES.register_module() class PerceptualLoss(nn.Module): """Perceptual loss with commonly used style loss. Args: layers_weights (dict): The weight for each layer of vgg feature. Here is an example: {'4': 1., '9': 1., '18': 1.}, which means the 5th, 10th and 18th feature layer will be extracted with weight 1.0 in calculting losses. vgg_type (str): The type of vgg network used as feature extractor. Default: 'vgg19'. use_input_norm (bool): If True, normalize the input image in vgg. Default: True. perceptual_weight (float): If `perceptual_weight > 0`, the perceptual loss will be calculated and the loss will multiplied by the weight. Default: 1.0. style_weight (float): If `style_weight > 0`, the style loss will be calculated and the loss will multiplied by the weight. Default: 1.0. norm_img (bool): If True, the image will be normed to [0, 1]. Note that this is different from the `use_input_norm` which norm the input in in forward function of vgg according to the statistics of dataset. Importantly, the input image must be in range [-1, 1]. pretrained (str): Path for pretrained weights. Default: 'torchvision://vgg19' """ def __init__(self, layer_weights, vgg_type='vgg19', use_input_norm=True, perceptual_weight=1.0, style_weight=1.0, norm_img=True, pretrained='torchvision://vgg19', criterion='l1'): super(PerceptualLoss, self).__init__() self.norm_img = norm_img self.perceptual_weight = perceptual_weight self.style_weight = style_weight self.layer_weights = layer_weights self.vgg = PerceptualVGG( layer_name_list=list(layer_weights.keys()), vgg_type=vgg_type, use_input_norm=use_input_norm, pretrained=pretrained) if criterion == 'l1': self.criterion = torch.nn.L1Loss() else: raise NotImplementedError( f'{criterion} criterion has not been supported in' ' this version.')
[docs] def forward(self, x, gt): """Forward function. Args: x (Tensor): Input tensor with shape (n, c, h, w). gt (Tensor): Ground-truth tensor with shape (n, c, h, w). Returns: Tensor: Forward results. """ if self.norm_img: x = (x + 1.) * 0.5 gt = (gt + 1.) * 0.5 # extract vgg features x_features = self.vgg(x) gt_features = self.vgg(gt.detach()) # calculate perceptual loss if self.perceptual_weight > 0: percep_loss = 0 for k in x_features.keys(): percep_loss += self.criterion( x_features[k], gt_features[k]) * self.layer_weights[k] percep_loss *= self.perceptual_weight else: percep_loss = None # calculate style loss if self.style_weight > 0: style_loss = 0 for k in x_features.keys(): style_loss += self.criterion( self._gram_mat(x_features[k]), self._gram_mat(gt_features[k])) * self.layer_weights[k] style_loss *= self.style_weight else: style_loss = None return percep_loss, style_loss
def _gram_mat(self, x): """Calculate Gram matrix. Args: x (torch.Tensor): Tensor with shape of (n, c, h, w). Returns: torch.Tensor: Gram matrix. """ (n, c, h, w) = x.size() features = x.view(n, c, w * h) features_t = features.transpose(1, 2) gram = features.bmm(features_t) / (c * h * w) return gram