mobileNetV3介绍
mobileNet系统主要针对在手机上运能运行高性能低资源的网络。而mobileNetv3主要利用网络架构搜索(NAS),搜索出来的一种算法。它比v2版本的精度提高了ImageNet分类上提高了6%。
mobilev3主要由如下优点:
1.大量使用11和33的卷积代替55的卷积,减少了参数量。在v2中先使用33的卷积在使用11的卷积,而在v3中则先11在使用3*3。论文里说这样使用是保留了高维特征空间,减少了反向传播的延迟
2.引入残差块和轻量级的注意力机制,轻量级注意力模块
并激发成瓶颈结构,更好的提取特征。
3、采用了h-swish计算速度更快,在移动设备中提高了网络的准确性。
网络复现实现代码 mobilenetV3 large
from keras.layers import Conv2D, DepthwiseConv2D, Dense, GlobalAveragePooling2D,Input
from keras.layers import Activation, BatchNormalization, Add, Multiply, Reshape
from keras.models import Model
from keras import backend as K
alpha = 1
def relu6(x): # relu函数 return K.relu(x, max_value=6.0)
def hard_swish(x): # 利用relu函数乘上x模拟sigmoid return x * K.relu(x + 3.0, max_value=6.0) / 6.0
def return_activation(x, nl): # 用于判断使用哪个激活函数 if nl == 'HS': x = Activation(hard_swish)(x) if nl == 'RE': x = Activation(relu6)(x) return x
def conv_block(inputs, filters, kernel, strides, nl): # 一个卷积单元,也就是conv2d + batchnormalization + activation channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(filters, kernel, padding='same', strides=strides)(inputs) x = BatchNormalization(axis=channel_axis)(x) return return_activation(x, nl)
def squeeze(inputs): # 注意力机制单元 input_channels = int(inputs.shape[-1]) x = GlobalAveragePooling2D()(inputs) x = Dense(int(input_channels/4))(x) x = Activation(relu6)(x) x = Dense(input_channels)(x) x = Activation(hard_swish)(x) x = Reshape((1, 1, input_channels))(x) x = Multiply()([inputs, x]) return x
def bottleneck(inputs, filters, kernel, up_dim, stride, sq, nl): channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 input_shape = K.int_shape(inputs) tchannel = int(up_dim) cchannel = int(alpha * filters) r = stride == 1 and input_shape[3] == filters # 1x1卷积调整通道数,通道数上升 x = conv_block(inputs, tchannel, (1, 1), (1, 1), nl) # 进行3x3深度可分离卷积 x = DepthwiseConv2D(kernel, strides=(stride, stride), depth_multiplier=1, padding='same')(x) x = BatchNormalization(axis=channel_axis)(x) x = return_activation(x, nl) # 引入注意力机制 if sq: x = squeeze(x) # 下降通道数 x = Conv2D(cchannel, (1, 1), strides=(1, 1), padding='same')(x) x = BatchNormalization(axis=channel_axis)(x) if r: x = Add()([x, inputs]) return x
def MobileNetv3_large(shape = (224,224,3),n_class = 1000): inputs = Input(shape) # 224,224,3 -> 112,112,16 x = conv_block(inputs, 16, (3, 3), strides=(2, 2), nl='HS') x = bottleneck(x, 16, (3, 3), up_dim=16, stride=1, sq=False, nl='RE') # 112,112,16 -> 56,56,24 x = bottleneck(x, 24, (3, 3), up_dim=64, stride=2, sq=False, nl='RE') x = bottleneck(x, 24, (3, 3), up_dim=72, stride=1, sq=False, nl='RE') # 56,56,24 -> 28,28,40 x = bottleneck(x, 40, (5, 5), up_dim=72, stride=2, sq=True, nl='RE') x = bottleneck(x, 40, (5, 5), up_dim=120, stride=1, sq=True, nl='RE') x = bottleneck(x, 40, (5, 5), up_dim=120, stride=1, sq=True, nl='RE') # 28,28,40 -> 14,14,80 x = bottleneck(x, 80, (3, 3), up_dim=240, stride=2, sq=False, nl='HS') x = bottleneck(x, 80, (3, 3), up_dim=200, stride=1, sq=False, nl='HS') x = bottleneck(x, 80, (3, 3), up_dim=184, stride=1, sq=False, nl='HS') x = bottleneck(x, 80, (3, 3), up_dim=184, stride=1, sq=False, nl='HS') # 14,14,80 -> 14,14,112 x = bottleneck(x, 112, (3, 3), up_dim=480, stride=1, sq=True, nl='HS') x = bottleneck(x, 112, (3, 3), up_dim=672, stride=1, sq=True, nl='HS') # 14,14,112 -> 7,7,160 x = bottleneck(x, 160, (5, 5), up_dim=672, stride=2, sq=True, nl='HS') x = bottleneck(x, 160, (5, 5), up_dim=960, stride=1, sq=True, nl='HS') x = bottleneck(x, 160, (5, 5), up_dim=960, stride=1, sq=True, nl='HS') # 7,7,160 -> 7,7,960 x = conv_block(x, 960, (1, 1), strides=(1, 1), nl='HS') x = GlobalAveragePooling2D()(x) x = Reshape((1, 1, 960))(x) x = Conv2D(1280, (1, 1), padding='same')(x) x = return_activation(x, 'HS') x = Conv2D(n_class, (1, 1), padding='same', activation='softmax')(x) x = Reshape((n_class,))(x) model = Model(inputs, x) return model
if __name__ == "__main__": model = MobileNetv3_large() model.summary()
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
- 66
- 67
- 68
- 69
- 70
- 71
- 72
- 73
- 74
- 75
- 76
- 77
- 78
- 79
- 80
- 81
- 82
- 83
- 84
- 85
- 86
- 87
- 88
- 89
- 90
- 91
- 92
- 93
- 94
- 95
- 96
- 97
- 98
- 99
- 100
- 101
- 102
- 103
- 104
- 105
- 106
- 107
- 108
- 109
- 110
- 111
- 112
- 113
- 114
- 115
- 116
- 117
- 118
- 119
- 120
- 121
网络复现实现代码 mobilenetV3 small
small主要减少了通道数,降低了参数量
from keras.layers import Conv2D, DepthwiseConv2D, Dense, GlobalAveragePooling2D,Input
from keras.layers import Activation, BatchNormalization, Add, Multiply, Reshape
from keras.models import Model
from keras import backend as K
alpha = 1
def relu6(x): # relu函数 return K.relu(x, max_value=6.0)
def hard_swish(x): # 利用relu函数乘上x模拟sigmoid return x * K.relu(x + 3.0, max_value=6.0) / 6.0
def return_activation(x, nl): # 用于判断使用哪个激活函数 if nl == 'HS': x = Activation(hard_swish)(x) if nl == 'RE': x = Activation(relu6)(x) return x
def conv_block(inputs, filters, kernel, strides, nl): # 一个卷积单元,也就是conv2d + batchnormalization + activation channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(filters, kernel, padding='same', strides=strides)(inputs) x = BatchNormalization(axis=channel_axis)(x) return return_activation(x, nl)
def squeeze(inputs): # 注意力机制单元 input_channels = int(inputs.shape[-1]) x = GlobalAveragePooling2D()(inputs) x = Dense(int(input_channels/4))(x) x = Activation(relu6)(x) x = Dense(input_channels)(x) x = Activation(hard_swish)(x) x = Reshape((1, 1, input_channels))(x) x = Multiply()([inputs, x]) return x
def bottleneck(inputs, filters, kernel, up_dim, stride, sq, nl): channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 input_shape = K.int_shape(inputs) tchannel = int(up_dim) cchannel = int(alpha * filters) r = stride == 1 and input_shape[3] == filters # 1x1卷积调整通道数,通道数上升 x = conv_block(inputs, tchannel, (1, 1), (1, 1), nl) # 进行3x3深度可分离卷积 x = DepthwiseConv2D(kernel, strides=(stride, stride), depth_multiplier=1, padding='same')(x) x = BatchNormalization(axis=channel_axis)(x) x = return_activation(x, nl) # 引入注意力机制 if sq: x = squeeze(x) # 下降通道数 x = Conv2D(cchannel, (1, 1), strides=(1, 1), padding='same')(x) x = BatchNormalization(axis=channel_axis)(x) if r: x = Add()([x, inputs]) return x
def MobileNetv3_small(shape = (224,224,3),n_class = 1000): inputs = Input(shape) # 224,224,3 -> 112,112,16 x = conv_block(inputs, 16, (3, 3), strides=(2, 2), nl='HS') # 112,112,16 -> 56,56,16 x = bottleneck(x, 16, (3, 3), up_dim=16, stride=2, sq=True, nl='RE') # 56,56,16 -> 28,28,24 x = bottleneck(x, 24, (3, 3), up_dim=72, stride=2, sq=False, nl='RE') x = bottleneck(x, 24, (3, 3), up_dim=88, stride=1, sq=False, nl='RE') # 28,28,24 -> 14,14,40 x = bottleneck(x, 40, (5, 5), up_dim=96, stride=2, sq=True, nl='HS') x = bottleneck(x, 40, (5, 5), up_dim=240, stride=1, sq=True, nl='HS') x = bottleneck(x, 40, (5, 5), up_dim=240, stride=1, sq=True, nl='HS') # 14,14,40 -> 14,14,48 x = bottleneck(x, 48, (5, 5), up_dim=120, stride=1, sq=True, nl='HS') x = bottleneck(x, 48, (5, 5), up_dim=144, stride=1, sq=True, nl='HS') # 14,14,48 -> 7,7,96 x = bottleneck(x, 96, (5, 5), up_dim=288, stride=2, sq=True, nl='HS') x = bottleneck(x, 96, (5, 5), up_dim=576, stride=1, sq=True, nl='HS') x = bottleneck(x, 96, (5, 5), up_dim=576, stride=1, sq=True, nl='HS') x = conv_block(x, 576, (1, 1), strides=(1, 1), nl='HS') x = GlobalAveragePooling2D()(x) x = Reshape((1, 1, 576))(x) x = Conv2D(1024, (1, 1), padding='same')(x) x = return_activation(x, 'HS') x = Conv2D(n_class, (1, 1), padding='same', activation='softmax')(x) x = Reshape((n_class,))(x) model = Model(inputs, x) return model
if __name__ == "__main__": model = MobileNetv3_small() model.summary()
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
- 66
- 67
- 68
- 69
- 70
- 71
- 72
- 73
- 74
- 75
- 76
- 77
- 78
- 79
- 80
- 81
- 82
- 83
- 84
- 85
- 86
- 87
- 88
- 89
- 90
- 91
- 92
- 93
- 94
- 95
- 96
- 97
- 98
- 99
- 100
- 101
- 102
- 103
- 104
- 105
- 106
- 107
- 108
- 109
- 110
- 111
- 112
- 113
- 114
- 115
- 116
- 117
文章来源: blog.csdn.net,作者:快了的程序猿小可哥,版权归原作者所有,如需转载,请联系作者。
原文链接:blog.csdn.net/qq_35914625/article/details/108015789