|
前言最近实习需要搞的工程涉及到姿态估计,对这2天的学习做一下总结归纳,第一步就是准备数据集,查资料后发现window环境下使用的标注软件就是labelme(coco-annotator好像就可以直接输出coco格式的JSON文件,后期继续学习),labelme产出的JSON文件需要转换到coco格式的JSON文件,然后将JSON文件转成Yolo需要的txt文件,才能送工程进行训练,转来转去的个人感觉非常麻烦。本博文主要内容是分享关键点的labelme2coco——JSON文件转换,cocoJSON2cocotxt的转换,以及coco_kpts文件夹的内容布局。一、labelme标注关键点的使用:用labelme标注关键点与后续的labelme2coco转换代码息息相关,假设标注对象是人体,关键点有17个:“nose”,“left_eye”,“right_eye”,“left_ear”,“right_ear”,“left_shoulder”,“right_shoulder”,“left_elbow”,“right_elbow”,“left_wrist”,“right_wrist”,“left_hip”,“right_hip”,“left_knee”,“right_knee”,“left_ankle”,“right_ankle”,label编号从1依次排到17,然后再把人用rectangle框出来label编号为bbox(Yolopose是需要画框的,并且numclass已经固定好了为person),所有的Groupid都不用写,JSON文件会自动补上null但不影响我们后续操作,填写label及Groupid的界面如下图所示:选取一张图片,把17个关键点都标注出来,再把人框出来,如果对象显示半个身体,不满17个关键点,也无所谓,没有哪个部位就把那个部位标注数值空出来就可以了,一张图片操作结束后,就如下图所示:coco图片的格式是12位数字,标注完后的JSON文件用相同的名称命名即可。二、labelme2coco_keypoints.pyCSDN上的其他转换代码,我都没法直接跑,也没有相关的讲解,于是我就拿了现有的代码修改了一些,并记录了我的学习过程。先介绍一下哪些部分需要修改的,然后在附上整个代码,以17个人体关键点标注好的JSON文件为例。"keypoints":#这是固定的,要是其他的关键点标注,则需要修改内容并且与前面的label编号一一对应。["nose","left_eye","right_eye","left_ear","right_ear","left_shoulder","right_shoulder","left_elbow","right_elbow","left_wrist","right_wrist","left_hip","right_hip","left_knee","right_knee","left_ankle","right_ankle"],"skeleton":[[16,14],[14,12],[17,15],[15,13],[12,13],[6,12],[7,13],[6,7],[6,8],[7,9],[8,10],[9,11],[2,3],[1,2],[1,3],[2,4],[3,5],[4,6],[5,7]]123456789101112131415161718192021222324252627282930313233json_file[-17:-5]#这是为了取文件的名称,比如000000000001,如果文件名称变了,则需要修改json_file[-17:-4]#这是为了取文件的名称,比如000000000001.(多一个.)12keypoints里面放的是(x,y,v):x,y是坐标值,v一般取2,0代表没有该点,1代表该点存在但是被遮挡了,2代表该点存在且没有被遮挡。(具体可以参考coco数据集的格式,本文主要介绍如何应用)keypoints=[0]*3*17#这里是我们标注的关节点个数如有改动,需要修改1json_path=r'E:\val2017\Json'#存放需要转换的json文件夹,变动需要修改1c=tococo(json_path,save_path=r'E:\val2017\annotations\val.json',a=1)#输出的地址及名称,变动需要修改1整个代码如下,可以做到一个文件夹下所有的JSON文件一起转换并输出一个coco格式的JSON文件importnumpyasnpimportjsonimportglobimportcodecsimportosclassMyEncoder(json.JSONEncoder):defdefault(self,obj):ifisinstance(obj,np.integer):returnint(obj)elifisinstance(obj,np.floating):returnfloat(obj)elifisinstance(obj,np.ndarray):returnobj.tolist()else:returnsuper(MyEncoder,self).default(obj)classtococo(object):def__init__(self,jsonfile,save_path,a):self.images=[]self.categories=[{"supercategory":"person","id":1,"name":"person","keypoints":["nose","left_eye","right_eye","left_ear","right_ear","left_shoulder","right_shoulder","left_elbow","right_elbow","left_wrist","right_wrist","left_hip","right_hip","left_knee","right_knee","left_ankle","right_ankle"],"skeleton":[[16,14],[14,12],[17,15],[15,13],[12,13],[6,12],[7,13],[6,7],[6,8],[7,9],[8,10],[9,11],[2,3],[1,2],[1,3],[2,4],[3,5],[4,6],[5,7]]}]self.annotations=[]self.jsonfile=os.listdir(jsonfile)self.save_path=save_path#保存json的路径self.class_id=a#class我们的类别只有一个personself.coco={}self.path=jsonfiledeflabelme_to_coco(self):fornum,json_fileinenumerate(self.jsonfile):json_file=os.path.join(self.path,json_file)data=codecs.open(json_file,'r')data=json.load(data)self.images.append(self.get_images(json_file[-17:-4]+'jpg',data["imageHeight"],data["imageWidth"]))shapes=data["shapes"]annotation={}#一个annotation代表一张图片中的所有samplesnum_keypoints=0keypoints=[0]*3*17#这里是我们标注的关节点个数如有改动,需要修改flag=0forshapeinshapes:ifshape['shape_type']=='rectangle'orshape["label"]=='bbox':bbox=[]temp=shape["points"]try:x_min=min(temp[0][0],temp[1][0])exceptIndexErrorase:print('class:{},image:{}'.format(self.class_id,int(json_file[-17:-5])))x_max=max(temp[0][0],temp[1][0])y_min=min(temp[0][1],temp[1][1])y_max=max(temp[0][1],temp[1][1])bbox.append(x_min)bbox.append(y_min)w=x_max-x_min+1h=y_max-y_min+1bbox.append(w)bbox.append(h)annotation['bbox']=bboxflag=flag+1else:idx=int(shape['label'])try:keypoints[(idx-1)*3+0]=shape['points'][0][0]keypoints[(idx-1)*3+1]=shape['points'][0][1]keypoints[(idx-1)*3+2]=2num_keypoints=num_keypoints+1exceptIndexErrorase:print('class:{},image:{}'.format(self.class_id,int(json_file[-17:-5])))ifflag==0:print('{}\\{}doesnotcontainbbox\n'.format(self.class_id,json_file))annotation['segmentation']=[[]]annotation['num_keypoints']=num_keypointsannotation['iscrowd']=0annotation['keypoints']=keypointsannotation['image_id']=int(json_file[-17:-5])#对应的图片IDif'bbox'notinannotation:annotation['bbox']=[0,0,data['imageWidth'],data['imageHeight']]annotation['area']=0else:annotation['area']=int(bbox[2]*bbox[3])annotation['category_id']=1annotation['id']=int(json_file[-17:-5])#对象idself.annotations.append(annotation)self.image_id=int(json_file[-17:-5])self.coco["images"]=self.imagesself.coco["categories"]=self.categoriesself.coco["annotations"]=self.annotationsdefget_images(self,filename,height,width):image={}image["height"]=heightimage['width']=widthimage["id"]=int(filename[-16:-4])image["file_name"]=filenamereturnimagedefget_categories(self,name,class_id):category={}category["supercategory"]="person"category['id']=class_idcategory['name']=namereturncategorydefsave_json(self):self.labelme_to_coco()coco_data=self.coco#保存json文件json.dump(coco_data,open(self.save_path,'w'),indent=4,cls=MyEncoder)#indent=4更加美观显示returnself.image_idjson_path=r'E:\val2017\Json_old'#保存json的文件夹路径c=tococo(json_path,save_path=r'E:\val2017\annotations_old\val.json',a=1)#我们将我们的左右json文件合成为一个json文件,这是最后json文件的名称image_id=c.save_json()123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159本人将5个JSON文件放在同一个文件夹下运行转换代码,最后val.json通过VS2022(其他软件也可以)打开后内容如下所示:三、cocoapi来检查coco格式的JSON文件是否有问题#!/usr/binpython3#encoding:utf-8-*-#@author:liangjian#@softwareyCharm#@file:0408.py#@Time:2021/4/821:51importskimage.ioasioimportpylabimporttimeastimeimportjsonimportnumpyasnpfromcollectionsimportdefaultdictimportitertoolsimportmatplotlib.pyplotaspltfrommatplotlib.collectionsimportPatchCollectiondef_isArrayLike(obj):returnhasattr(obj,'__iter__')andhasattr(obj,'__len__')classCOCO:def__init__(self,annotation_file=None):"""ConstructorofMicrosoftCOCOhelperclassforreadingandvisualizingannotations.:paramannotation_file(str):locationofannotationfile:paramimage_folder(str):locationtothefolderthathostsimages.:return:"""#loaddatasetself.dataset,self.anns,self.cats,self.imgs=dict(),dict(),dict(),dict()self.imgToAnns,self.catToImgs=defaultdict(list),defaultdict(list)ifnotannotation_file==None:print('loadingannotationsintomemory...')tic=time.time()dataset=json.load(open(annotation_file,'r'))asserttype(dataset)==dict,'annotationfileformat{}notsupported'.format(type(dataset))print('Done(t={:0.2f}s)'.format(time.time()-tic))self.dataset=datasetself.createIndex()defcreateIndex(self):#createindexprint('creatingindex...')anns,cats,imgs={},{},{}imgToAnns,catToImgs=defaultdict(list),defaultdict(list)if'annotations'inself.dataset:foranninself.dataset['annotations']:imgToAnns[ann['image_id']].append(ann)anns[ann['id']]=annif'images'inself.dataset:forimginself.dataset['images']:imgs[img['id']]=imgif'categories'inself.dataset:forcatinself.dataset['categories']:cats[cat['id']]=catif'annotations'inself.datasetand'categories'inself.dataset:foranninself.dataset['annotations']:catToImgs[ann['category_id']].append(ann['image_id'])print('indexcreated!')#createclassmembersself.anns=annsself.imgToAnns=imgToAnnsself.catToImgs=catToImgsself.imgs=imgsself.cats=catsdefgetCatIds(self,catNms=[],supNms=[],catIds=[]):"""filteringparameters.defaultskipsthatfilter.:paramcatNms(strarray):getcatsforgivencatnames:paramsupNms(strarray):getcatsforgivensupercategorynames:paramcatIds(intarray):getcatsforgivencatids:return:ids(intarray):integerarrayofcatids"""catNms=catNmsif_isArrayLike(catNms)else[catNms]supNms=supNmsif_isArrayLike(supNms)else[supNms]catIds=catIdsif_isArrayLike(catIds)else[catIds]iflen(catNms)==len(supNms)==len(catIds)==0:cats=self.dataset['categories']else:cats=self.dataset['categories']#print('')#print('keypoints的cat就只有人1种')#print(cats)cats=catsiflen(catNms)==0else[catforcatincatsifcat['name']incatNms]cats=catsiflen(supNms)==0else[catforcatincatsifcat['supercategory']insupNms]cats=catsiflen(catIds)==0else[catforcatincatsifcat['id']incatIds]#print(cats)ids=[cat['id']forcatincats]returnidsdefloadCats(self,ids=[]):"""Loadcatswiththespecifiedids.:paramids(intarray):integeridsspecifyingcats:return:cats(objectarray):loadedcatobjects"""if_isArrayLike(ids):return[self.cats[id]foridinids]eliftype(ids)==int:return[self.cats[ids]]defgetImgIds(self,imgIds=[],catIds=[]):'''Getimgidsthatsatisfygivenfilterconditions.:paramimgIds(intarray):getimgsforgivenids:paramcatIds(intarray):getimgswithallgivencats:return:ids(intarray):integerarrayofimgids'''imgIds=imgIdsif_isArrayLike(imgIds)else[imgIds]catIds=catIdsif_isArrayLike(catIds)else[catIds]iflen(imgIds)==len(catIds)==0:ids=self.imgs.keys()else:ids=set(imgIds)fori,catIdinenumerate(catIds):ifi==0andlen(ids)==0:ids=set(self.catToImgs[catId])else:ids&=set(self.catToImgs[catId])returnlist(ids)defloadImgs(self,ids=[]):"""Loadannswiththespecifiedids.:paramids(intarray):integeridsspecifyingimg:return:imgs(objectarray):loadedimgobjects"""if_isArrayLike(ids):return[self.imgs[id]foridinids]eliftype(ids)==int:return[self.imgs[ids]]defgetAnnIds(self,imgIds=[],catIds=[],areaRng=[],iscrowd=None):"""Getannidsthatsatisfygivenfilterconditions.defaultskipsthatfilter:paramimgIds(intarray):getannsforgivenimgscatIds(intarray):getannsforgivencatsareaRng(floatarray):getannsforgivenarearange(e.g.[0inf])iscrowd(boolean):getannsforgivencrowdlabel(FalseorTrue):return:ids(intarray):integerarrayofannids"""imgIds=imgIdsif_isArrayLike(imgIds)else[imgIds]catIds=catIdsif_isArrayLike(catIds)else[catIds]iflen(imgIds)==len(catIds)==len(areaRng)==0:anns=self.dataset['annotations']else:#根据imgIds找到所有的annifnotlen(imgIds)==0:lists=[self.imgToAnns[imgId]forimgIdinimgIdsifimgIdinself.imgToAnns]anns=list(itertools.chain.from_iterable(lists))else:anns=self.dataset['annotations']#通过各类条件如catIds对anns进行筛选anns=annsiflen(catIds)==0else[annforanninannsifann['category_id']incatIds]anns=annsiflen(areaRng)==0else[annforanninannsifann['area']>areaRng[0]andann['area']0):##画点之间的连接线plt.plot(x[sk],y[sk],linewidth=1,color=c)#画点plt.plot(x[v>0],y[v>0],'o',markersize=4,markerfacecolor=c,markeredgecolor='k',markeredgewidth=1)plt.plot(x[v>1],y[v>1],'o',markersize=4,markerfacecolor=c,markeredgecolor=c,markeredgewidth=1)p=PatchCollection(polygons,facecolor=color,linewidths=0,alpha=0.4)ax.add_collection(p)p=PatchCollection(polygons,facecolor='none',edgecolors=color,linewidths=2)ax.add_collection(p)elifdatasetType=='captions':foranninanns:print(ann['caption'])pylab.rcParams['figure.figsize']=(8.0,10.0)annFile=r'E:\val2017\annotations_old\val.json'#转换之后的json文件路径img_prefix=r'E:\val2017' #图片文件夹路径#initializeCOCOapiforinstanceannotationscoco=COCO(annFile)#getCatIds(catNms=[],supNms=[],catIds=[])#通过输入类别的名字、大类的名字或是种类的id,来筛选得到图片所属类别的idcatIds=coco.getCatIds(catNms=['person'])#getImgIds(imgIds=[],catIds=[])#通过图片的id或是所属种类的id得到图片的idimgIds=coco.getImgIds(catIds=catIds)#imgIds=coco.getImgIds(imgIds=[1407])#loadImgs(ids=[])#得到图片的id信息后,就可以用loadImgs得到图片的信息了#在这里我们随机选取之前list中的一张图片img=coco.loadImgs(imgIds[np.random.randint(0,len(imgIds))])[0]I=io.imread('%s/%s'%(img_prefix,img['file_name']))plt.imshow(I)plt.axis('off')ax=plt.gca()#getAnnIds(imgIds=[],catIds=[],areaRng=[],iscrowd=None)#通过输入图片的id、类别的id、实例的面积、是否是人群来得到图片的注释idannIds=coco.getAnnIds(imgIds=img['id'],catIds=catIds,iscrowd=None)#loadAnns(ids=[])#通过注释的id,得到注释的信息anns=coco.loadAnns(annIds)print('anns:',anns)coco.showAnns(anns)plt.imshow(I)plt.axis('off')plt.show()123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292需要修改的地方为annFile=r'E:\val2017\annotations\val.json'#转换之后的coco格式json文件路径img_prefix=r'E:\val2017' #图片文件夹路径12如果不出意外的话,就不会出现意外了,可以看到你标注的效果如下图所示:总结至此,labelme下的JSON转coco格式的JSON就完成了,由于篇幅问题,coco格式的JSON转yolo需要的txt相关介绍,在Yolopose关键点检测:自己标注数据集,制作数据集(二)中有相关讲解。
|
|