Tensorflow入门七-迁移学习实现VGG16微调猫狗分类(迁移学习源1000分类权重文件百度云可直接再学习,线程)
上一篇:Tensorflow入门六-图像处理及Python实现(DeepDream,1×1卷积核,捷径连接)https://blog.csdn.net/qq_36187544/article/details/89742830下一篇:Tensorflow入门八-生成式对抗网络GAN(百度云源码数据资源)https://blog.csdn.net/qq_36187544/article/detai..
上一篇:Tensorflow入门六-图像处理及Python实现(DeepDream,1×1卷积核,捷径连接)https://blog.csdn.net/qq_36187544/article/details/89742830
下一篇:Tensorflow入门八-生成式对抗网络GAN(百度云源码数据资源)https://blog.csdn.net/qq_36187544/article/details/89919656
目录
资源说明
这是可复用的图片分类源代码及资源!
注意:如果不能直接复用,检查导入包的路径,因为我的整个网络在一个aTensorFlow目录下,像这样,改成正确路径即可直接用了
百度云链接:https://pan.baidu.com/s/1Xd7C7rUr6-g9CFf9puXPhQ
提取码:a5bv
文件结构如下:
data | 为数据集,train数据集是kaggle猫狗分类的数据集,为了防止各种问题,直接附在源代码里,避免网上下载的猫狗数据集格式不一样(有的是以文件夹名为分类名,有的是dog.1.jpg这样的分类)(900M左右) |
model | 是执行一段时间后的模型保存文件,可用来进行预测,这里只迭代了80次,效果一般,但若没有model保存数据则无法进行预测(这个文件有点大,1G左右,记录了整个迭代过程的权重信息) |
__init__.py | 什么都没有 |
imagenet_classes.py | 分类文件,迁移学习,这个文件是别人做的1000个分类的信息 |
tf009.py | 训练的主文件,运行这个文件就可以进行训练了,当然,训练是非常慢的,训练80代,需要40分钟左右(垃圾电脑) |
tf009_prediction.py | 预测的文件,对/data/test1/测试集图片进行预测,后文有测试效果图片 |
VGG16_RAW.py | 一个单纯的VGG16模型,因为其复用性好,所以留下来,这样可以多次利用这个文件改写 |
VGG16_UP.py | VGG16_RAW.py是未指定分成几类的,也未指定网络模型哪部分可以更改哪部分不可改,这个文件是对VGG16_RAW.py微调后适用于猫狗二分类 |
vgg16_weights.npz | 这个文件是别人做1000分类的权重源文件,不可更改,必须完整。(500M左右) |
vgg_preprocess.py | 这是标准文件,图片预处理文件,但是支持的是python2所以会有提示,但是不影响使用 |
所以,如果需要进行预测,执行tf009_prediction.py即可,需要训练修改tf009.py和VGG16_UP.py即可。
这个文件如果后续进行其他的分类,可以稍微改动即可再使用,这里做一个标记,如果后续能用上就返回来再用。
迁移学习
迁移学习指利用已经训练好的模型作为新模型训练的初始化的学习方式。如下图,上部分是别人已经训练好的1000分类问题的模型,下部分是利用别人的模型的一部分,加上自己的一部分训练出模型。
优点:所需样本数量更少,达到收敛时间更短。
使用情况:当新数据集较小且和原数据集相似,算力有限时。
核心源码与运行结果
由VGG16网络进行微调得到的VGG16_UP.py:
import tensorflow as tf
import numpy as np
'''
VGG16模型的微调复用
主要针对迁移学习中部分网络无需调整更改
'''
class vgg16:
def __init__(self, imgs):
self.parameters = []#类的初始化加入全局列表,将所需共享的参数加载进来
self.imgs = imgs
self.convlayers()
self.fc_layers()
self.probs = tf.nn.softmax(self.fc8)#输出属于每个类别的分类
def saver(self):
return tf.train.Saver()
def maxpool(self, name, input_data):
out = tf.nn.max_pool(input_data,[1,2,2,1],[1,2,2,1],padding="SAME",name=name)
return out
def conv(self,name,input_data,out_channel,trainable = False):#trainable参数变动
in_channel = input_data.get_shape()[-1]
with tf.variable_scope(name):
kernel = tf.get_variable("weights",[3,3,in_channel,out_channel],dtype=tf.float32,trainable = False)#trainable参数变动
biases = tf.get_variable("biases",[out_channel],dtype=tf.float32,trainable = False)#trainable参数变动
conv_res = tf.nn.conv2d(input_data,kernel,[1,1,1,1],padding="SAME")
res = tf.nn.bias_add(conv_res,biases)
out = tf.nn.relu(res,name=name)
self.parameters += [kernel,biases] # 将卷积层定义的参数加入列表
return out
def fc(self,name,input_data,out_channel,trainable = False):#trainable参数变动
shape = input_data.get_shape().as_list()
if len(shape) == 4:
size = shape[-1]*shape[-2]*shape[-3]
else:
size = shape[1]
input_data_flat = tf.reshape(input_data,[-1,size])
with tf.variable_scope(name):
weights = tf.get_variable(name="weight",shape=[size,out_channel],dtype=tf.float32,trainable = trainable) #trainable参数变动
biases = tf.get_variable(name='biases',shape=[out_channel],dtype=tf.float32,trainable = trainable) #trainable参数变动
res = tf.matmul(input_data_flat,weights)
out = tf.nn.relu(tf.nn.bias_add(res,biases))
self.parameters += [weights, biases] # 将卷积层定义的参数加入列表
return out
def convlayers(self):
self.conv1_1 = self.conv("conv1re_1",self.imgs,64,trainable = False) #trainable参数变动
self.conv1_2 = self.conv("conv1_2",self.conv1_1,64,trainable = False) #trainable参数变动
self.pool1 = self.maxpool("poolre1",self.conv1_2)
self.conv2_1 = self.conv("conv2_1",self.pool1,128,trainable = False) #trainable参数变动
self.conv2_2 = self.conv("convwe2_2",self.conv2_1,128,trainable = False) #trainable参数变动
self.pool2 = self.maxpool("pool2",self.conv2_2)
self.conv3_1 = self.conv("conv3_1",self.pool2,256,trainable = False) #trainable参数变动
self.conv3_2 = self.conv("convrwe3_2", self.conv3_1,256,trainable = False) #trainable参数变动
self.conv3_3 = self.conv("convrew3_3", self.conv3_2,256,trainable = False) #trainable参数变动
self.pool3 = self.maxpool("poolre3",self.conv3_3)
self.conv4_1 = self.conv("conv4_1", self.pool3,512,trainable = False) #trainable参数变动
self.conv4_2 = self.conv("convrwe4_2",self.conv4_1,512,trainable = False) #trainable参数变动
self.conv4_3 = self.conv("convrwe4_3",self.conv4_2,512,trainable = False) #trainable参数变动
self.pool4 = self.maxpool("pool4",self.conv4_3)
self.conv5_1 = self.conv("conv5_1",self.pool4,512,trainable = False) #trainable参数变动
self.conv5_2 = self.conv("convrew5_2",self.conv5_1,512,trainable = False) #trainable参数变动
self.conv5_3 = self.conv("conv5_3",self.conv5_2,512,trainable = False) #trainable参数变动
self.pool5 = self.maxpool("poolwel5",self.conv5_3)
#全连接层
def fc_layers(self):
self.fc6 = self.fc("fc1",self.pool5,4096,trainable = False) #trainable参数变动
self.fc7 = self.fc("fc2",self.fc6,4096,trainable = False) #trainable参数变动
self.fc8 = self.fc("fc3",self.fc7,2,trainable=True) # 这是一个二分类问题所以设置参数为2
#载入权重
def load_weights(self,weight_file,sess):#获取权重载入VGG模型
weights = np.load(weight_file) #'./vgg16/vgg16_weights.npz'
keys = sorted(weights.keys())
for i,k in enumerate(keys):
if i not in [30,31]:#剔除不需要载入的层
sess.run(self.parameters[i].assign(weights[k]))
print("-----weights loaded")
训练主函数tf009.py:
'''
VGG16迁移学习训练主函数
'''
import tensorflow as tf
import os
import numpy as np
from time import time
import aTensorflow.vgg16.VGG16_UP as model
from aTensorflow.vgg16.vgg_preprocess import preprocess_for_train
img_width = 224
img_height = 224
def get_batch(image_list,label_list,img_width,img_height,batch_size,capacity):#通过读取列表来载入批量图片及标签
image = tf.cast(image_list,tf.string)
label = tf.cast(label_list,tf.int32)
input_queue = tf.train.slice_input_producer([image,label])
label = input_queue[1]
image_contents = tf.read_file(input_queue[0])
image = tf.image.decode_jpeg(image_contents,channels=3)
image = preprocess_for_train(image,224,224)
image_batch,label_batch = tf.train.batch([image,label],batch_size=batch_size,num_threads=64,capacity=capacity)
label_batch = tf.reshape(label_batch,[batch_size])
return image_batch,label_batch
'''
#课程源代码,但是由于下载下来的格式不同,做修改
def get_file(file_dir):
images = []
temp = []
for root,sub_folders,files in os.walk(file_dir):
for name in files:
images.append(os.path.join(root,name))
for name in sub_folders:
temp.append(os.path.join(root,name))
labels = []
for one_folder in temp:
n_img = len(os.listdir(one_folder))
letter = one_folder.split("/")[-1]
if letter =="cat":
labels = np.append(labels,n_img*[0])
else:
labels = np.append(labels,n_img*[1])
#shuffle
temp = np.array([images,labels])
temp = temp.transpose()
np.random.shuffle(temp)
image_list = list(temp[:,0])
label_list = list(temp[:,1])
label_list = [int(float(i)) for i in label_list]
return image_list,label_list
'''
def get_file(file_dir):
images = []
for root,sub_folders,files in os.walk(file_dir):
for name in files:
images.append(os.path.join(root,name))
labels = []
for label_name in images:
letter = label_name.split("\\")[-1].split('.')[0]
if letter =="cat":
labels.append(0)
else:
labels.append(1)
#shuffle
temp = np.array([images,labels])
temp = temp.transpose()
np.random.shuffle(temp)
image_list = list(temp[:,0])
label_list = list(temp[:,1])
label_list = [int(float(i)) for i in label_list]
return image_list,label_list
#标签格式重构
def onehot(labels):
n_sample = len(labels)
n_class = max(labels) + 1
onehot_labels = np.zeros((n_sample,n_class))
onehot_labels[np.arange(n_sample),labels] = 1
return onehot_labels
startTime =time()
batch_size = 32
capacity = 256#内存中存储的最大数据容量
means = [123.68,116.779,103.939]#VGG训练时图像预处理减去的均值
xs,ys = get_file('./data/train')#获取图像列表与标签列表
image_batch,label_batch = get_batch(xs,ys,224,224,batch_size,capacity)
x = tf.placeholder(tf.float32,[None,224,224,3])
y = tf.placeholder(tf.int32,[None,2])#二分类
vgg = model.vgg16(x)
fc8_fineuining = vgg.probs#即softmax(fc8)
loss_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=fc8_fineuining,labels=y))#损失函数
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss_function)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
vgg.load_weights('vgg16_weights.npz',sess)
saver = tf.train.Saver()
#启动线程
coord = tf.train.Coordinator()#使用协调器管理线程
threads = tf.train.start_queue_runners(coord=coord,sess=sess)
epoch_start_time = time()
for i in range(90):
images,labels = sess.run([image_batch,label_batch])
labels = onehot(labels)
sess.run(optimizer,feed_dict={x:images,y:labels})
loss = sess.run(loss_function,feed_dict={x:images,y:labels})
print("Now loss is %f"%loss)
epoch_end_time =time()
print("current epoch takes:",(epoch_end_time-epoch_start_time))
epoch_start_time = epoch_end_time
if (i+1)%30==0:
saver.save(sess,os.path.join("./model/",'epoch{:06d}.ckpt'.format(i)))
print("-------------Epoch %d is finished"%i)
saver.save(sess,"./model/")
print("optimization finished")
duration = time() - startTime
print("train takes:","{:.2f}".format(duration))
coord.request_stop()#通知线程关闭
coord.join(threads)#等其他线程关闭这一函数才返回
预测文件tf009_prediction.py:
import tensorflow as tf
import numpy as np
from scipy.misc import imread,imresize
import aTensorflow.vgg16.VGG16_UP as model
means = [123.68,116.779,103.939]#VGG训练时图像预处理减去的均值
x = tf.placeholder(tf.float32,[None,224,224,3])
sess =tf.Session()
vgg = model.vgg16(x)
fc8_finetuining = vgg.probs
saver = tf.train.Saver()
print("model restoring")
saver.restore(sess,"./model/")
real_answer = ["dog","dog","dog","dog","cat",
"cat","cat","cat","cat","cat",
"cat","dog","cat","cat","cat",
"cat","dog","dog","cat","cat"]
for i in range(1,21):
filepath = './data/test1/'+str(i)+'.JPG'
img = imread(filepath,mode="RGB")
img = imresize(img,(224,224))
img = img.astype(np.float32)
for c in range(3):
img[:,:c] -= means[c]
prob = sess.run(fc8_finetuining,feed_dict={x:[img]})
max_index = np.argmax(prob)
if max_index == 0:
max_index = "cat"
elif max_index == 1:
max_index = 'dog'
print("this picture ",i,"prediction is ",max_index,"correct is :",real_answer[i-1])
预测运行结果:
更多推荐
所有评论(0)