這篇文章將為大家詳細講解有關Tensorflow之如何構建自己的圖片數據集TFrecords,小編覺得挺實用的,因此分享給大家做個參考,希望大家閱讀完這篇文章后可以有所收獲。
tensorflow的官方中文文檔比較生澀,數據集一直采用的MNIST二進制數據集。并沒有過多講述怎么構建自己的圖片數據集tfrecords。
流程是:制作數據集—讀取數據集—-加入隊列
先貼完整的代碼:
#encoding=utf-8
import os
import tensorflow as tf
from PIL import Image
cwd = os.getcwd()
classes = {'test','test1','test2'}
#制作二進制數據
def create_record():
writer = tf.python_io.TFRecordWriter("train.tfrecords")
for index, name in enumerate(classes):
class_path = cwd +"/"+ name+"/"
for img_name in os.listdir(class_path):
img_path = class_path + img_name
img = Image.open(img_path)
img = img.resize((64, 64))
img_raw = img.tobytes() #將圖片轉化為原生bytes
print index,img_raw
example = tf.train.Example(
features=tf.train.Features(feature={
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=[index])),
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw]))
}))
writer.write(example.SerializeToString())
writer.close()
data = create_record()
#讀取二進制數據
def read_and_decode(filename):
# 創建文件隊列,不限讀取的數量
filename_queue = tf.train.string_input_producer([filename])
# create a reader from file queue
reader = tf.TFRecordReader()
# reader從文件隊列中讀入一個序列化的樣本
_, serialized_example = reader.read(filename_queue)
# get feature from serialized example
# 解析符號化的樣本
features = tf.parse_single_example(
serialized_example,
features={
'label': tf.FixedLenFeature([], tf.int64),
'img_raw': tf.FixedLenFeature([], tf.string)
}
)
label = features['label']
img = features['img_raw']
img = tf.decode_raw(img, tf.uint8)
img = tf.reshape(img, [64, 64, 3])
img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
label = tf.cast(label, tf.int32)
return img, label
if __name__ == '__main__':
if 0:
data = create_record("train.tfrecords")
else:
img, label = read_and_decode("train.tfrecords")
print "tengxing",img,label
#使用shuffle_batch可以隨機打亂輸入 next_batch挨著往下取
# shuffle_batch才能實現[img,label]的同步,也即特征和label的同步,不然可能輸入的特征和label不匹配
# 比如只有這樣使用,才能使img和label一一對應,每次提取一個image和對應的label
# shuffle_batch返回的值就是RandomShuffleQueue.dequeue_many()的結果
# Shuffle_batch構建了一個RandomShuffleQueue,并不斷地把單個的[img,label],送入隊列中
img_batch, label_batch = tf.train.shuffle_batch([img, label],
batch_size=4, capacity=2000,
min_after_dequeue=1000)
# 初始化所有的op
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
# 啟動隊列
threads = tf.train.start_queue_runners(sess=sess)
for i in range(5):
print img_batch.shape,label_batch
val, l = sess.run([img_batch, label_batch])
# l = to_categorical(l, 12)
print(val.shape, l)制作數據集
#制作二進制數據
def create_record():
cwd = os.getcwd()
classes = {'1','2','3'}
writer = tf.python_io.TFRecordWriter("train.tfrecords")
for index, name in enumerate(classes):
class_path = cwd +"/"+ name+"/"
for img_name in os.listdir(class_path):
img_path = class_path + img_name
img = Image.open(img_path)
img = img.resize((28, 28))
img_raw = img.tobytes() #將圖片轉化為原生bytes
#print index,img_raw
example = tf.train.Example(
features=tf.train.Features(
feature={
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=[index])),
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw]))
}
)
)
writer.write(example.SerializeToString())
writer.close()TFRecords文件包含了tf.train.Example 協議內存塊(protocol buffer)(協議內存塊包含了字段 Features)。我們可以寫一段代碼獲取你的數據, 將數據填入到Example協議內存塊(protocol buffer),將協議內存塊序列化為一個字符串, 并且通過tf.python_io.TFRecordWriter 寫入到TFRecords文件。
讀取數據集
#讀取二進制數據
def read_and_decode(filename):
# 創建文件隊列,不限讀取的數量
filename_queue = tf.train.string_input_producer([filename])
# create a reader from file queue
reader = tf.TFRecordReader()
# reader從文件隊列中讀入一個序列化的樣本
_, serialized_example = reader.read(filename_queue)
# get feature from serialized example
# 解析符號化的樣本
features = tf.parse_single_example(
serialized_example,
features={
'label': tf.FixedLenFeature([], tf.int64),
'img_raw': tf.FixedLenFeature([], tf.string)
}
)
label = features['label']
img = features['img_raw']
img = tf.decode_raw(img, tf.uint8)
img = tf.reshape(img, [64, 64, 3])
img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
label = tf.cast(label, tf.int32)
return img, label一個Example中包含Features,Features里包含Feature(這里沒s)的字典。最后,Feature里包含有一個 FloatList, 或者ByteList,或者Int64List
加入隊列
with tf.Session() as sess: sess.run(init) # 啟動隊列 threads = tf.train.start_queue_runners(sess=sess) for i in range(5): print img_batch.shape,label_batch val, l = sess.run([img_batch, label_batch]) # l = to_categorical(l, 12) print(val.shape, l)
這樣就可以的到和tensorflow官方的二進制數據集了,
注意:
啟動隊列那條code不要忘記,不然卡死
使用的時候記得使用val和l,不然會報類型錯誤:TypeError: The value of a feed cannot be a tf.Tensor object. Acceptable feed values include Python scalars, strings, lists, or numpy ndarrays.
算交叉熵時候:cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits,labels)算交叉熵
最后評估的時候用tf.nn.in_top_k(logits,labels,1)選logits最大的數的索引和label比較
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))算交叉熵,所以label必須轉成one-hot向量
實例2:將圖片文件夾下的圖片轉存tfrecords的數據集。
############################################################################################
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
#Author : zhaoqinghui
#Date : 2016.5.10
#Function: image convert to tfrecords
#############################################################################################
import tensorflow as tf
import numpy as np
import cv2
import os
import os.path
from PIL import Image
#參數設置
###############################################################################################
train_file = 'train.txt' #訓練圖片
name='train' #生成train.tfrecords
output_directory='./tfrecords'
resize_height=32 #存儲圖片高度
resize_width=32 #存儲圖片寬度
###############################################################################################
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def load_file(examples_list_file):
lines = np.genfromtxt(examples_list_file, delimiter=" ", dtype=[('col1', 'S120'), ('col2', 'i8')])
examples = []
labels = []
for example, label in lines:
examples.append(example)
labels.append(label)
return np.asarray(examples), np.asarray(labels), len(lines)
def extract_image(filename, resize_height, resize_width):
image = cv2.imread(filename)
image = cv2.resize(image, (resize_height, resize_width))
b,g,r = cv2.split(image)
rgb_image = cv2.merge([r,g,b])
return rgb_image
def transform2tfrecord(train_file, name, output_directory, resize_height, resize_width):
if not os.path.exists(output_directory) or os.path.isfile(output_directory):
os.makedirs(output_directory)
_examples, _labels, examples_num = load_file(train_file)
filename = output_directory + "/" + name + '.tfrecords'
writer = tf.python_io.TFRecordWriter(filename)
for i, [example, label] in enumerate(zip(_examples, _labels)):
print('No.%d' % (i))
image = extract_image(example, resize_height, resize_width)
print('shape: %d, %d, %d, label: %d' % (image.shape[0], image.shape[1], image.shape[2], label))
image_raw = image.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'image_raw': _bytes_feature(image_raw),
'height': _int64_feature(image.shape[0]),
'width': _int64_feature(image.shape[1]),
'depth': _int64_feature(image.shape[2]),
'label': _int64_feature(label)
}))
writer.write(example.SerializeToString())
writer.close()
def disp_tfrecords(tfrecord_list_file):
filename_queue = tf.train.string_input_producer([tfrecord_list_file])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'depth': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64)
}
)
image = tf.decode_raw(features['image_raw'], tf.uint8)
#print(repr(image))
height = features['height']
width = features['width']
depth = features['depth']
label = tf.cast(features['label'], tf.int32)
init_op = tf.initialize_all_variables()
resultImg=[]
resultLabel=[]
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(21):
image_eval = image.eval()
resultLabel.append(label.eval())
image_eval_reshape = image_eval.reshape([height.eval(), width.eval(), depth.eval()])
resultImg.append(image_eval_reshape)
pilimg = Image.fromarray(np.asarray(image_eval_reshape))
pilimg.show()
coord.request_stop()
coord.join(threads)
sess.close()
return resultImg,resultLabel
def read_tfrecord(filename_queuetemp):
filename_queue = tf.train.string_input_producer([filename_queuetemp])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'width': tf.FixedLenFeature([], tf.int64),
'depth': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64)
}
)
image = tf.decode_raw(features['image_raw'], tf.uint8)
# image
tf.reshape(image, [256, 256, 3])
# normalize
image = tf.cast(image, tf.float32) * (1. /255) - 0.5
# label
label = tf.cast(features['label'], tf.int32)
return image, label
def test():
transform2tfrecord(train_file, name , output_directory, resize_height, resize_width) #轉化函數
img,label=disp_tfrecords(output_directory+'/'+name+'.tfrecords') #顯示函數
img,label=read_tfrecord(output_directory+'/'+name+'.tfrecords') #讀取函數
print label
if __name__ == '__main__':
test()這樣就可以得到自己專屬的數據集.tfrecords了 ,它可以直接用于tensorflow的數據集。
關于“Tensorflow之如何構建自己的圖片數據集TFrecords”這篇文章就分享到這里了,希望以上內容可以對大家有一定的幫助,使各位可以學到更多知識,如果覺得文章不錯,請把它分享出去讓更多的人看到。
免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。