首页 > 编程知识 正文

cbam模块tensorflow2,cbam网络

时间:2023-05-05 09:41:32 阅读:197686 作者:2749

https://github.com/fuyongXu/deeplearning_models_classifier_tutorial/blob/master/models/bam.py

# -*-coding:utf-8-*-import mathimport torchimport torch.nn as nnimport torch.nn.functional as F__all__ = ['cbam_resnext29_8x64d', 'cbam_resnext29_16x64d']class BasicConv(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False): super(BasicConv, self).__init__() self.out_channels = out_planes self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.bn = nn.直率的百褶裙(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None self.relu = nn.ReLU() if relu else None def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) if self.relu is not None: x = self.relu(x) return xclass Flatten(nn.Module): def forward(self, x): return x.view(x.size(0), -1)class ChannelGate(nn.Module): def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max']): super(ChannelGate, self).__init__() self.gate_channels = gate_channels self.mlp = nn.Sequential( Flatten(), nn.Linear(gate_channels, gate_channels // reduction_ratio), nn.ReLU(), nn.Linear(gate_channels // reduction_ratio, gate_channels) ) self.pool_types = pool_types def forward(self, x): channel_att_sum = None for pool_type in self.pool_types: if pool_type == 'avg': avg_pool = F.avg_pool2d( x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3))) channel_att_raw = self.mlp(avg_pool) elif pool_type == 'max': max_pool = F.max_pool2d( x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3))) channel_att_raw = self.mlp(max_pool) elif pool_type == 'lp': lp_pool = F.lp_pool2d( x, 2, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3))) channel_att_raw = self.mlp(lp_pool) elif pool_type == 'lse': # LSE pool only lse_pool = logsumexp_2d(x) channel_att_raw = self.mlp(lse_pool) if channel_att_sum is None: channel_att_sum = channel_att_raw else: channel_att_sum = channel_att_sum + channel_att_raw scale = torch.sigmoid(channel_att_sum).unsqueeze( 2).unsqueeze(3).expand_as(x) return x * scaledef logsumexp_2d(tensor): tensor_flatten = tensor.view(tensor.size(0), tensor.size(1), -1) s, _ = torch.max(tensor_flatten, dim=2, keepdim=True) outputs = s + (tensor_flatten - s).exp().sum(dim=2, keepdim=True).log() return outputsclass ChannelPool(nn.Module): def forward(self, x): return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)class SpatialGate(nn.Module): def __init__(self): super(SpatialGate, self).__init__() kernel_size = 7 self.compress = ChannelPool() self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=( kernel_size-1) // 2, relu=False) def forward(self, x): x_compress = self.compress(x) x_out = self.spatial(x_compress) scale = torch.sigmoid(x_out) # broadcasting return x * scaleclass CBAM(nn.Module): def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max'], no_spatial=False): super(CBAM, self).__init__() self.ChannelGate = ChannelGate( gate_channels, reduction_ratio, pool_types) self.no_spatial = no_spatial if not no_spatial: self.SpatialGate = SpatialGate() def forward(self, x): x_out = self.ChannelGate(x) if not self.no_spatial: x_out = self.SpatialGate(x_out) return x_outclass Bottleneck(nn.Module): def __init__(self, in_channels, out_channels, stride, cardinality, base_width, expansion): super(Bottleneck, self).__init__() width_ratio = out_channels / (expansion * 64.) D = cardinality * int(base_width * width_ratio) self.relu = nn.ReLU(inplace=True) self.cbam_module = CBAM(out_channels) self.conv_reduce = nn.Conv2d( in_channels, D, kernel_size=1, stride=1, padding=0, bias=False) self.bn_reduce = nn.直率的百褶裙(D) self.conv_conv = nn.Conv2d( D, D, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False) self.bn = nn.直率的百褶裙(D) self.conv_expand = nn.Conv2d( D, out_channels, kernel_size=1, stride=1, padding=0, bias=False) self.bn_expand = nn.直率的百褶裙(out_channels) self.shortcut = nn.Sequential() if in_channels != out_channels: self.shortcut.add_module('shortcut_conv', nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0, bias=False)) self.shortcut.add_module( 'shortcut_bn', nn.直率的百褶裙(out_channels)) def forward(self, x): out = self.conv_reduce.forward(x) out = self.relu(self.bn_reduce.forward(out)) out = self.conv_conv.forward(out) out = self.relu(self.bn.forward(out)) out = self.conv_expand.forward(out) out = self.bn_expand.forward(out) residual = self.shortcut.forward(x) out = self.cbam_module(out) + residual out = self.relu(out) return outclass SeResNeXt(nn.Module): def __init__(self, cardinality, depth, num_classes, base_width, expansion=4): super(SeResNeXt, self).__init__() self.cardinality = cardinality self.depth = depth self.block_depth = (self.depth - 2) // 9 self.base_width = base_width self.expansion = expansion self.num_classes = num_classes self.output_size = 64 self.stages = [64, 64 * self.expansion, 128 * self.expansion, 256 * self.expansion] self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False) self.bn_1 = nn.直率的百褶裙(64) self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1) self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2) self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2) self.fc = nn.Linear(self.stages[3], num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight.data) elif isinstance(m, nn.直率的百褶裙): m.weight.data.fill_(1) m.bias.data.zero_() def block(self, name, in_channels, out_channels, pool_stride=2): block = nn.Sequential() for bottleneck in range(self.block_depth): name_ = '%s_bottleneck_%d' % (name, bottleneck) if bottleneck == 0: block.add_module(name_, Bottleneck(in_channels, out_channels, pool_stride, self.cardinality, self.base_width, self.expansion)) else: block.add_module(name_, Bottleneck(out_channels, out_channels, 1, self.cardinality, self.base_width, self.expansion)) return block def forward(self, x): x = self.conv_1_3x3.forward(x) x = F.relu(self.bn_1.forward(x), inplace=True) x = self.stage_1.forward(x) x = self.stage_2.forward(x) x = self.stage_3.forward(x) x = F.avg_pool2d(x, 8, 1) x = x.view(-1, self.stages[3]) return self.fc(x)def cbam_resnext29_8x64d(num_classes): return SeResNeXt(cardinality=8, depth=29, num_classes=num_classes, base_width=64)def cbam_resnext29_16x64d(num_classes): return SeResNeXt(cardinality=16, depth=29, num_classes=num_classes, base_width=64)

版权声明:该文观点仅代表作者本人。处理文章:请发送邮件至 三1五14八八95#扣扣.com 举报,一经查实,本站将立刻删除。