iT邦幫忙

0

我用Raspberry Pi 4 + OpenCV + yolo v4 作了Smart 拼裝 cam ,如何改善效率呢?

  • 分享至 

  • xImage

用 Raspberry Pi 4 作了個智慧監視系統,東抄西剪,拼裝了還能用的程式:

  1. 先裝個 PIR ,有人時就會在 ThinkSpeak 作記錄
  2. 後來裝了個 CAM ,有人時會 take cam 再 IFTTT + Line
  3. 因為太頻煩了,所以要清空一小時都沒人,再有人時再傳
  4. 後來發現yolo可在 Raspberry Pi上跑,參照 AIOT OpenCV書上的程式,加入yolo 的判斷,有人時延長 10 分鐘,拍照,結合,再傳 Line

因為都是書上,網路上亂拼裝,有什地方可以作精簡的嗎?
尤其是合併yolo 圖片那段,有時拍不到5 張就跳掉了,但傳照時就會合併到硬碟中的舊照片。

        if i==6 or (time.time()- begin_time > 300):
            cap.release()
            
            # COMBINE VERTICALLY            
#             for i in range (1,6):
#                 locals()['im' + str(i)] = cv2.imread ('/home/pi/Pictures/yolo' + str(i) + '.jpeg')
#                 print ( 'im' + str(i) )
            im1=cv2.imread('/home/pi/Pictures/yolo1.jpeg')
            im2=cv2.imread('/home/pi/Pictures/yolo2.jpeg')
            im3=cv2.imread('/home/pi/Pictures/yolo3.jpeg')          
            im4=cv2.imread('/home/pi/Pictures/yolo4.jpeg')        
            im5=cv2.imread('/home/pi/Pictures/yolo5.jpeg')      
            
            im_v = cv2.vconcat([im1,im2,im3, im4, im5]) 
            cv2.imwrite('/home/pi/Pictures/yolopic'  + '.jpeg', im_v)
            yolocamline()

完整的程式如下:


# Import Python header files
from gpiozero import MotionSensor
import time
import os

import sys
import http.client as httplib  # for Python 3 http.client as Python 2 httplib
import urllib
# import httplib, urllib
# for IFTTT n Line
import requests

key = 'D8622'  # Thingspeak channel to update

# Set a variable to hold the GPIO Pin identity
pir = MotionSensor(22)
print("Waiting for PIR to settle")
pir.wait_for_no_motion()
print("PIR Module Test (CTRL-C to exit)")
# Variables to hold the current and last states
pircounter = 0

lastmin = int(time.strftime("%M"))

#Report Raspberry Pi internal temperature to Thingspeak Channel
#MUST put ahead of try

def tothingspeak():
    params = urllib.parse.urlencode({'field3': str(int(pircounter)), 'key':key }) 
    headers = {"Content-typZZe": "application/x-www-form-urlencoded","Accept": "text/plain"}
    conn = httplib.HTTPConnection("api.thingspeak.com:80")
    conn.request("POST", "/update", params, headers)
    response = conn.getresponse()
    print (response.status, response.reason)
    data = response.read()
    conn.close()

historylist =[0,0,0,0,0,0]

# a minor data in the python using list 
def torecords(latest):
# (1) for last in first out and
# (2) then add the latest pircount to the first
    for i in range(1,6): # total 6 element in list, so range  (1,6)
            historylist[-i]=historylist[-i-1]
    print (historylist)
    historylist[0]= latest
    print (historylist)

# take picture and sent to line
def takecamline():
    # Take Pictures
    if cap.isOpened() == False:
        cap.open(0)
    
    
    result, image = cap.read()
    image = cv2.resize(image, (WIDTH, HEIGHT))
    cv2.imwrite( '/home/pi/Pictures/yolo' + '.jpeg', image)
#     action = "fswebcam -r 640x480 /home/pi/Pictures/pirimage"  + ".jpg"
#     os.system(action)

    # LINE Notify 權杖
    token = 'CCCCCCCCCCCCCCCC'

    # 要發送的訊息
    message = 'Sb at Study' + ' ' + str(time.strftime("%Y_%m_%d-%H_%M_%S"))

    # HTTP 標頭參數與資料
    headers = { "Authorization": "Bearer " + token }
    data = { 'message': message }

    # 要傳送的圖片檔案
    image = open('/home/pi/Pictures/yolo.jpeg', 'rb')
    files = { 'imageFile': image }

    # 以 requests 發送 POST 請求
    requests.post("https://notify-api.line.me/api/notify",
        headers = headers, data = data, files = files)
    cap.release()
    print ('send line msg')


def yolocamline():
    # LINE Notify 權杖
    token = 'djjdjfdjfjaeafdjsf'

    # 要發送的訊息
    message = 'Sb at Study yolo model' + ' ' + str(time.strftime("%Y_%m_%d-%H_%M_%S"))

    # HTTP 標頭參數與資料
    headers = { "Authorization": "Bearer " + token }
    data = { 'message': message }

    # 要傳送的圖片檔案
    image = open('/home/pi/Pictures/yolopic.jpeg', 'rb')
    files = { 'imageFile': image }

    # 以 requests 發送 POST 請求
    requests.post("https://notify-api.line.me/api/notify",
        headers = headers, data = data, files = files)
    print ('yolo send line msg')



import cv2
import numpy as np
import time

def initNet():
    CONFIG = 'yolov4-tiny.cfg'
    WEIGHT = 'yolov4-tiny.weights'
    NAMES = 'coco.names'

    # 讀取物件名稱以及設定外框顏色
    with open(NAMES, 'r') as f:
        names = [line.strip() for line in f.readlines()]
        colors = np.random.uniform(0, 255, size=(len(names), 3))

    # 設定神經網路
    net = cv2.dnn.readNet(CONFIG, WEIGHT)
    model = cv2.dnn_DetectionModel(net)
    model.setInputParams(size=(416, 416), scale=1/255.0)
    
    # YOLO 調顏色
    model.setInputSwapRB(True)

    return model, names, colors
    
def nnProcess(image, model):
    classes, confs, boxes = model.detect(image, 0.6, 0.3)
    return classes, confs, boxes

def drawBox(image, classes, confs, boxes, names, colors):
    # for the get the label out the function
    global whatsthis
    whatsthis = ''
    
    new_image = image.copy()
    for (classid, conf, box) in zip(classes, confs, boxes):
        x, y, w , h = box 
        label = '{}: {:.2f}'.format(names[int(classid)], float(conf))
        
        whatsthis = str(label)
        print(label)
        print('whatsthis ' + whatsthis)
        
        color = colors[int(classid)]
        cv2.rectangle(new_image, (x, y), (x + w, y + h), color, 2)
        cv2.putText(new_image, label, (x, y - 10), 
            cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2
        )
    return new_image

model, names, colors = initNet()
cap = cv2.VideoCapture(0)
ratio = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
WIDTH = 180
HEIGHT = int(WIDTH / ratio)


def yoloactive():
    if cap.isOpened() == False:
        cap.open(0)
    
    i= 1
    while True:
        begin_time = time.time()
        ret, frame = cap.read()
        frame = cv2.resize(frame, (WIDTH, HEIGHT))
        
        classes, confs, boxes = nnProcess(frame, model)
        frame = drawBox(frame, classes, confs, boxes, names, colors)

        fps = 'fps: {:.2f}'.format(1 / (time.time() - begin_time))
        cv2.putText(frame, fps, (10, 30), 
            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 204, 255), 2
        )

        #cv2.imshow('video', frame)
        
        time.sleep(1)

        if whatsthis[0:6] == 'person':
            print ( whatsthis + '抓到了 ' + str(i) +'th') 
            cv2.imwrite( "/home/pi/Pictures/yolo" + str(i) + '.jpeg', frame)
            i=i+1
            time.sleep(1)    
        else:
            print ('沒')
        
        
        if i==6 or (time.time()- begin_time > 300):
            cap.release()
            
            # COMBINE VERTICALLY            
#             for i in range (1,6):
#                 locals()['im' + str(i)] = cv2.imread ('/home/pi/Pictures/yolo' + str(i) + '.jpeg')
#                 print ( 'im' + str(i) )
            im1=cv2.imread('/home/pi/Pictures/yolo1.jpeg')
            im2=cv2.imread('/home/pi/Pictures/yolo2.jpeg')
            im3=cv2.imread('/home/pi/Pictures/yolo3.jpeg')          
            im4=cv2.imread('/home/pi/Pictures/yolo4.jpeg')        
            im5=cv2.imread('/home/pi/Pictures/yolo5.jpeg')      
            
            im_v = cv2.vconcat([im1,im2,im3, im4, im5]) 
            cv2.imwrite('/home/pi/Pictures/yolopic'  + '.jpeg', im_v)
            yolocamline()
 
            cv2.destroyAllWindows()
            break
    




try:
    # Loop until users quits with CTRL-C
    while True:
        cap.release()
        if pir.motion_detected == True: 
            print(" Motion detected!" + " " +str(time.strftime("%Y_%m_%d-%H_%M_%S")))
            time.sleep(1)
        elif pir.motion_detected == False: 
            print(" No Motion" + " " + str(time.strftime("%M")) + "-" + str(lastmin))
            print (historylist)
            time.sleep(1)

        latestpir = int(pir.motion_detected)
        if (int(time.strftime("%M")) - lastmin > 10) or (int(time.strftime("%M")) - lastmin < 0):
            print ("Starting ThingSpeak!" + time.strftime("%S"))
            tothingspeak()
            torecords(pircounter)
            print ("Ended ThingSpeak!" + time.strftime("%S"))
            time.sleep(1)
            pircounter = 0
            lastmin = int(time.strftime("%M"))
        else: 
            pircounter = pircounter +latestpir
            print(str(pircounter))     


        # if nobody at home for a logn time and sb show,
        # sent telegram notice; take pic and line
        if (historylist == [0,0,0,0,0,0]) and (pircounter == 1):

            #2. trans pic to line
            takecamline()
            time.sleep(2)
            #1. yolo model active 
            yoloactive()
            time.sleep(1)

            #3. IFTTT to telegram
            requests.post('https://maker.ifttt.com/trigger/sbAtHome/with/key/dfffffXhHfDbM?value1=study')
            pircounter = 2 # prevent from stay at 1 and keep taking pic and line

except KeyboardInterrupt: print(" Quit")

圖片
  直播研討會
圖片
{{ item.channelVendor }} {{ item.webinarstarted }} |
{{ formatDate(item.duration) }}
直播中

尚未有邦友回答

立即登入回答