北京网站开发哪家强,梅州南站,百度推广需要多少钱,ui自学网站利用Inception-V3训练的权重微调实现猫狗的分类#xff0c;其中权重的下载在我的博客下载资源处#xff0c;https://download.csdn.net/download/fanzonghao/10566634
第一种权重不改变直接用mixed7层#xff08;mixed7呆会把打印结果一放就知道了#xff09;进行特征提取…利用Inception-V3训练的权重微调实现猫狗的分类其中权重的下载在我的博客下载资源处https://download.csdn.net/download/fanzonghao/10566634
第一种权重不改变直接用mixed7层mixed7呆会把打印结果一放就知道了进行特征提取然后在拉平连上两层神经网络
def define_model():InceptionV3_weight_path./model_weight/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5pre_trained_modelInceptionV3(input_shape(150,150,3),include_topFalse,#不包含全连接层weightsNone)pre_trained_model.load_weights(InceptionV3_weight_path)#下面两种取其一#仅仅用其做特征提取 不需要更新权值for layer in pre_trained_model.layers:print(layer.name)layer.trainableFalse#微调权值# unfreezeFalse# for layer in pre_trained_model.layers:# if unfreeze:# layer.trainableTrue# if layer.namemixed6:# unfreezeTruelast_layerpre_trained_model.get_layer(mixed7)print(last_layer.output_shape)last_outputlast_layer.output#以下是在模型的基础上增加的xlayers.Flatten()(last_output)xlayers.Dense(1024,activationrelu)(x)xlayers.Dropout(0.2)(x)xlayers.Dense(1,activationsigmoid)(x)modelModel(inputspre_trained_model.input,outputsx)return model
第一种完全利用Inception-V3训练的权重代码
import os
import tensorflow as tf
import matplotlib.pyplot as pltfrom keras.applications.inception_v3 import InceptionV3
from keras import layers
from keras.models import Model
from keras.optimizers import RMSprop
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
import data_read#获得所需求的图片--进行了图像增强def data_deal_overfit():# 获取数据的路径train_dir, validation_dir, next_cat_pix, next_dog_pix data_read.read_data()#图像增强train_datagenImageDataGenerator(rescale1./255,rotation_range40,width_shift_range0.2,height_shift_range0.2,shear_range0.2,zoom_range0.2,horizontal_flipTrue,fill_modenearest)test_datagenImageDataGenerator(rescale1./255)#从文件夹获取所需要求的图片train_generatortrain_datagen.flow_from_directory(train_dir,target_size(150,150),batch_size20,class_modebinary)test_generator test_datagen.flow_from_directory(validation_dir,target_size(150, 150),batch_size20,class_modebinary)return train_generator,test_generator#定义模型并加入了dropoutdef define_model():InceptionV3_weight_path./model_weight/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5pre_trained_modelInceptionV3(input_shape(150,150,3),include_topFalse,#不包含全连接层weightsNone)pre_trained_model.load_weights(InceptionV3_weight_path)#下面两种取其一#仅仅用其做特征提取 不需要更新权值for layer in pre_trained_model.layers:print(layer.name)layer.trainableFalse#微调权值# unfreezeFalse# for layer in pre_trained_model.layers:# if unfreeze:# layer.trainableTrue# if layer.namemixed6:# unfreezeTruelast_layerpre_trained_model.get_layer(mixed7)print(last_layer.output_shape)last_outputlast_layer.output#以下实在模型的基础上增加的xlayers.Flatten()(last_output)xlayers.Dense(1024,activationrelu)(x)xlayers.Dropout(0.2)(x)xlayers.Dense(1,activationsigmoid)(x)modelModel(inputspre_trained_model.input,outputsx)return model
训练模型def train_model():modeldefine_model()model.compile(optimizerRMSprop(lr0.001), lossbinary_crossentropy, metrics[accuracy])train_generator, test_generator data_deal_overfit()# verbose日志显示0为不在标准输出流输出日志信息1为输出进度条记录2为每个epoch输出一行记录# 训练模型 返回history包含各种精度和损失history model.fit_generator(train_generator,steps_per_epoch100, # 2000 imagesbatch_szie*stepsepochs50,validation_datatest_generator,validation_steps50, # 100020*50verbose2)#精度acchistory.history[acc]val_acchistory.history[val_acc]#损失losshistory.history[loss]val_losshistory.history[val_loss]#epochs的数量epochsrange(len(acc))plt.plot(epochs,acc)plt.plot(epochs, val_acc)plt.title(training and validation accuracy)plt.figure()plt.plot(epochs, loss)plt.plot(epochs, val_loss)plt.title(training and validation loss)plt.show()if __name__ __main__:train_model()
打印结果其中这些代表每一层的名字直接利用mixed7的特征none,7,7,768就是该层的shape, 直接拉平添加两层神经网络进行分类。打印结果这是每一层的名字mixed7层的shape是None,7,7,768第一种做法就是直接利用该层及之前层的权重进行训练分类的。 第二种进行微调要不是需要对整个权重都进行重新赋值因为前面层数学习到的特征是一些简单的特征,只是随着层数增强才更加具有针对性故把mixed7层的卷积层权重 重新训练代码
unfreezeFalse
for layer in pre_trained_model.layers:if unfreeze:layer.trainableTrueif layer.namemixed6:unfreezeTrue
也就是把我上段完整的代码注释替换一下即可。