以python讀取yolo的輸出-darknet.py

在安裝完yolo後,也實際操作yolo後,就會想要把yolo辨識完的座標,再加以應用,照理說要取找到yolo資料夾的c語言(yolo是用c寫的)檔案,做完更改後,還要在yolo的根目錄(~darknet)裡,把yolo編譯一次,才算完成修改。
不過在yolo資料夾裡,藏著一個python的資料夾,裡面有個darknet.py,這個檔案可以把yolo辨識完的結果,使用python 輸出。
一開始打開這個文件,不是直接執行就可以啟動的,還要做些修改。
https://blog.csdn.net/weixin_39198406/article/details/83931727
詳情請看這個文件

主要就是yolo辨識完會傳一個r陣列,裡面有辨識的名稱,座標(注意不是邊界座標,是中心點以及方格的高跟寬)

之後就能應用darknet.py來實現你的作品了

那如何用這個darknet.py實現影片的目標檢測呢,有辦法只是需要更改程式碼,之後再說

程式碼如下:
應該在darknet.py複製貼上就可以了

from ctypes import *
import math
import random
import os
import cv2
import numpy as np
import random


def sample(probs):
    s = sum(probs)
    probs = [a/s for a in probs]
    r = random.uniform(0, 1)
    for i in range(len(probs)):
        r = r - probs[i]
        if r <= 0:
            return i
    return len(probs)-1

def c_array(ctype, values):
    arr = (ctype*len(values))()
    arr[:] = values
    return arr

class BOX(Structure):
    _fields_ = [("x", c_float),
                ("y", c_float),
                ("w", c_float),
                ("h", c_float)]

class DETECTION(Structure):
    _fields_ = [("bbox", BOX),
                ("classes", c_int),
                ("prob", POINTER(c_float)),
                ("mask", POINTER(c_float)),
                ("objectness", c_float),
                ("sort_class", c_int)]


class IMAGE(Structure):
    _fields_ = [("w", c_int),
                ("h", c_int),
                ("c", c_int),
                ("data", POINTER(c_float))]

class METADATA(Structure):
    _fields_ = [("classes", c_int),
                ("names", POINTER(c_char_p))]

    

#lib = CDLL("/home/pjreddie/documents/darknet/libdarknet.so", RTLD_GLOBAL)
#lib = CDLL(os.path.join(os.getcwd(), "libdarknet.so"), RTLD_GLOBAL)
lib = CDLL("/home/sfgx880/darknet/libdarknet.so", RTLD_GLOBAL) #你自己的darknet絕對路徑
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int

predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)

set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]

make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE

get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)]
get_network_boxes.restype = POINTER(DETECTION)

make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)

free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]

free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]

network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]

reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]

load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p

do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]

do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]

free_image = lib.free_image
free_image.argtypes = [IMAGE]

letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE

load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA

load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE

rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]

predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)

def classify(net, meta, im):
    out = predict_image(net, im)
    res = []
    for i in range(meta.classes):
        res.append((meta.names[i], out[i]))
    res = sorted(res, key=lambda x: -x[1])
    return res

def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
    im = load_image(image, 0, 0)
    num = c_int(0)
    pnum = pointer(num)
    predict_image(net, im)
    dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
    num = pnum[0]
    if (nms): do_nms_obj(dets, num, meta.classes, nms);

    res = []
    for j in range(num):
        for i in range(meta.classes):
            if dets[j].prob[i] > 0:
                b = dets[j].bbox
                res.append((meta.names[i], dets[j].prob[i], (b.x, b.y, b.w, b.h)))
    res = sorted(res, key=lambda x: -x[1])
    free_image(im)
    free_detections(dets, num)
    return res
            
            
if __name__ == "__main__":
    net = load_net("cfg/yolov3.cfg".encode('utf-8'), "cfg/yolov3.weights".encode('utf-8'), 0)
    meta = load_meta("cfg/coco.data".encode('utf-8'))
    r = detect(net, meta, "data/car.jpg".encode('utf-8'))    
    #print (r)
    img = cv2.imread("data/car.jpg") #輸入要檢測的圖片
    print("偵測到"+str(len(r))+"種物體")
    for k in range(len(r)):
        width =  r[k][2][2]
        height = r[k][2][3]
        center_x = r[k][2][0]
        center_y = r[k][2][1]
        x1=center_x-(width/2)
        x2=center_x+(width/2)
        y1=center_y-(height/2)
        y2=center_y+(height/2)
        print(str(width)+" "+str(height)+" "+str(center_x)+" "+str(center_y))
        print("第"+str(int(k+1))+"種物體是"+str(r[k][0])+"x1座標=",str(x1)[0:6]+" y1座標=",str(y1)[0:6]+" x2座標=",str(x2)[0:6]+" y2座標=",str(y2)[0:6])
        cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), (random.randint(100,255),random.randint(100,255),random.randint(100,255)), 2)
        do_mosaic(img, int(x1),int(y1),int(x2)-int(x1),int(y2)-int(y1))
    cv2.imshow('python',img)
    cv2.waitKey (0)

留言

這個網誌中的熱門文章

以dlib實現人臉辨識打卡系統

使用Python達成影像形態學處理(不使用Opencv函式庫)

使用DLIB函式庫達成即時人臉辨識功能