voc資料集(xml)轉yolov5資料格式(txt)訓練自己的資料集
阿新 • • 發佈:2022-05-11
#為方便自己檢視,比較囉嗦。。。。。
1、資料集劃分(程式碼來自別人的分享專案中的一個檔案,在專案中能跑通,單獨檔案能否跑通,還沒試):
import os import random import xml.etree.ElementTree as ET from PIL import Image import numpy as np #from utils.utils import get_classes #為方便單檔案執行,這裡將utils模組中的get_classes方法直接引入 # 獲得類 #---------------------------------------------------#def get_classes(classes_path): with open(classes_path, encoding='utf-8') as f: class_names = f.readlines() class_names = [c.strip() for c in class_names] return class_names, len(class_names) #--------------------------------------------------------------------------------------------------------------------------------## annotation_mode用於指定該檔案執行時計算的內容 # annotation_mode為0代表整個標籤處理過程,包括獲得VOCdevkit/VOC2007/ImageSets裡面的txt以及訓練用的2007_train.txt、2007_val.txt # annotation_mode為1代表獲得VOCdevkit/VOC2007/ImageSets裡面的txt # annotation_mode為2代表獲得訓練用的2007_train.txt、2007_val.txt #--------------------------------------------------------------------------------------------------------------------------------#annotation_mode = 0 #-------------------------------------------------------------------# # 必須要修改,用於生成2007_train.txt、2007_val.txt的目標資訊 # 與訓練和預測所用的classes_path一致即可 # 如果生成的2007_train.txt裡面沒有目標資訊 # 那麼就是因為classes沒有設定正確 # 僅在annotation_mode為0和2的時候有效 #-------------------------------------------------------------------# classes_path = 'model_data/cls_classes.txt' #--------------------------------------------------------------------------------------------------------------------------------# # trainval_percent用於指定(訓練集+驗證集)與測試集的比例,預設情況下 (訓練集+驗證集):測試集 = 9:1 # train_percent用於指定(訓練集+驗證集)中訓練集與驗證集的比例,預設情況下 訓練集:驗證集 = 9:1 # 僅在annotation_mode為0和1的時候有效 #--------------------------------------------------------------------------------------------------------------------------------# trainval_percent = 0.9 train_percent = 0.9 #-------------------------------------------------------# # 指向VOC資料集所在的資料夾 # 預設指向根目錄下的VOC資料集 #-------------------------------------------------------# VOCdevkit_path = 'VOCdevkit' VOCdevkit_sets = [('2007', 'train'), ('2007', 'val')] classes, _ = get_classes(classes_path) #-------------------------------------------------------# # 統計目標數量 #-------------------------------------------------------# photo_nums = np.zeros(len(VOCdevkit_sets)) nums = np.zeros(len(classes)) def convert_annotation(year, image_id, list_file): in_file = open(os.path.join(VOCdevkit_path, 'VOC%s/Annotations/%s.xml'%(year, image_id)), encoding='utf-8') tree=ET.parse(in_file) root = tree.getroot() for obj in root.iter('object'): difficult = 0 if obj.find('difficult')!=None: difficult = obj.find('difficult').text cls = obj.find('name').text if cls not in classes or int(difficult)==1: continue cls_id = classes.index(cls) xmlbox = obj.find('bndbox') b = (int(float(xmlbox.find('xmin').text)), int(float(xmlbox.find('ymin').text)), int(float(xmlbox.find('xmax').text)), int(float(xmlbox.find('ymax').text))) list_file.write(" " + ",".join([str(a) for a in b]) + ',' + str(cls_id)) nums[classes.index(cls)] = nums[classes.index(cls)] + 1 if __name__ == "__main__": random.seed(0) if " " in os.path.abspath(VOCdevkit_path): raise ValueError("資料集存放的資料夾路徑與圖片名稱中不可以存在空格,否則會影響正常的模型訓練,請注意修改。") if annotation_mode == 0 or annotation_mode == 1: print("Generate txt in ImageSets.") xmlfilepath = os.path.join(VOCdevkit_path, 'VOC2007/Annotations') saveBasePath = os.path.join(VOCdevkit_path, 'VOC2007/ImageSets/Main') temp_xml = os.listdir(xmlfilepath) total_xml = [] for xml in temp_xml: if xml.endswith(".xml"): total_xml.append(xml) num = len(total_xml) list = range(num) tv = int(num*trainval_percent) tr = int(tv*train_percent) trainval= random.sample(list,tv) train = random.sample(trainval,tr) print("train and val size",tv) print("train size",tr) ftrainval = open(os.path.join(saveBasePath,'trainval.txt'), 'w') ftest = open(os.path.join(saveBasePath,'test.txt'), 'w') ftrain = open(os.path.join(saveBasePath,'train.txt'), 'w') fval = open(os.path.join(saveBasePath,'val.txt'), 'w') for i in list: name=total_xml[i][:-4]+'\n' if i in trainval: ftrainval.write(name) if i in train: ftrain.write(name) else: fval.write(name) else: ftest.write(name) ftrainval.close() ftrain.close() fval.close() ftest.close() print("Generate txt in ImageSets done.") if annotation_mode == 0 or annotation_mode == 2: print("Generate 2007_train.txt and 2007_val.txt for train.") type_index = 0 for year, image_set in VOCdevkit_sets: image_ids = open(os.path.join(VOCdevkit_path, 'VOC%s/ImageSets/Main/%s.txt'%(year, image_set)), encoding='utf-8').read().strip().split() list_file = open('%s_%s.txt'%(year, image_set), 'w', encoding='utf-8') for image_id in image_ids: list_file.write('%s/VOC%s/JPEGImages/%s.jpg'%(os.path.abspath(VOCdevkit_path), year, image_id)) convert_annotation(year, image_id, list_file) list_file.write('\n') photo_nums[type_index] = len(image_ids) type_index += 1 list_file.close() print("Generate 2007_train.txt and 2007_val.txt for train done.") def printTable(List1, List2): for i in range(len(List1[0])): print("|", end=' ') for j in range(len(List1)): print(List1[j][i].rjust(int(List2[j])), end=' ') print("|", end=' ') print() str_nums = [str(int(x)) for x in nums] tableData = [ classes, str_nums ] colWidths = [0]*len(tableData) len1 = 0 for i in range(len(tableData)): for j in range(len(tableData[i])): if len(tableData[i][j]) > colWidths[i]: colWidths[i] = len(tableData[i][j]) printTable(tableData, colWidths) if photo_nums[0] <= 500: print("訓練集數量小於500,屬於較小的資料量,請注意設定較大的訓練世代(Epoch)以滿足足夠的梯度下降次數(Step)。") if np.sum(nums) == 0: print("在資料集中並未獲得任何目標,請注意修改classes_path對應自己的資料集,並且保證標籤名字正確,否則訓練將會沒有任何效果!") print("在資料集中並未獲得任何目標,請注意修改classes_path對應自己的資料集,並且保證標籤名字正確,否則訓練將會沒有任何效果!") print("在資料集中並未獲得任何目標,請注意修改classes_path對應自己的資料集,並且保證標籤名字正確,否則訓練將會沒有任何效果!") print("(重要的事情說三遍)。")
2、根據train,test,中的txt檔案,去檢索Annotations檔案中的xml格式的標籤資訊,轉化為txt格式的標籤資訊
import xml.etree.ElementTree as ET import pickle import os from os import listdir, getcwd from os.path import join sets=['train', 'test',] classes = ["fire","smoke"]#這裡輸入你的資料集類別 def convert(size, box):#讀取xml檔案中的資料,xywh dw = 1./size[0] dh = 1./size[1] x = (box[0] + box[1])/2.0 y = (box[2] + box[3])/2.0 w = box[1] - box[0] h = box[3] - box[2] x = x*dw w = w*dw y = y*dh h = h*dh return (x,y,w,h) def convert_annotation(image_id): in_file = open('Annotations/%s.xml'%(image_id),encoding='utf-8')#這裡是讀取xml的資料夾 out_file = open('Annotations/%s.txt'%(image_id), 'w',encoding='utf-8')#存入txt檔案的資料夾 tree=ET.parse(in_file) root = tree.getroot() size = root.find('size') w = int(size.find('width').text) h = int(size.find('height').text) for obj in root.iter('object'): difficult = obj.find('difficult').text cls = obj.find('name').text if cls not in classes or int(difficult) == 1: continue cls_id = classes.index(cls) xmlbox = obj.find('bndbox') b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)) bb = convert((w,h), b) out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n') wd = getcwd() for image_set in sets: # if not os.path.exists('labels/'): # os.makedirs('labels/') image_ids = open('ImageSets/Main/%s.txt'%(image_set)).read().strip().split()#讀取train.txt或者test.txt從而找到每個xml檔案的檔名,這裡的train.txt中僅包含檔名,不包好路徑。 #list_file = open('%s.txt'%(image_set), 'w') for image_id in image_ids: #list_file.write('/root/object-detection/yolov5-master/data/police_obj/images/%s.jpg\n'%(image_id))#從寫train.txt或者test.txt檔案,把圖片檔案的絕對路徑寫入,方便讀取圖片 convert_annotation(image_id) #list_file.close()
3、根據train,test,中的txt檔案,將圖片分開
# -*- coding: UTF-8 -*- # !/usr/bin/env python import sys import re from PIL import Image #sys.path.append('E:\\CODE') # f1 = open('E:\CODE\TX\dir.txt','r') # f2 = open('E:\CODE\TX\dir.txt','w+') # for line in f1.readlines(): # if re.findall(' 1',line): #查詢“空格1”的行 每行的格式000005 -1\n 000007 # f2.write(line)#把查詢到的行寫入f2. # f1.close() # f2.close() # data = [] import numpy as np data = [] for line in open("ImageSets/Main/test.txt", "r"): # 設定檔案物件並讀取每一行檔案 data.append(line) # print(data) # f3 = open('E:\CODE\TX\dir.txt','r') for a in data: # print(a) # line3=line2[:-4] #讀取每行去掉後四位的數# im = Image.open('JPEGImages/{}.jpg'.format(a[:-1])) # 開啟改路徑下的line3記錄的的檔名 im.save('fenli_image/{}.jpg'.format(a[:-1])) # 把資料夾中指定的檔名稱的圖片另存到該路徑下 im.close()
4、最後達到yolov5支援的資料組織形式
mydaya-------image -----test
| |
| |__train
|
|
|---labels-----test
|
|__train