mirror of
https://github.com/blakeblackshear/frigate.git
synced 2024-11-21 08:34:21 -06:00
use a different method for blur and contrast to reduce CPU (#6940)
* use a different method for blur and contrast to reduce CPU * blur with radius instead * use faster interpolation for motion * improve contrast based on averages * increase default threshold to 30 * ensure mask is applied after contrast improvement * update opencv * update benchmark script
This commit is contained in:
parent
d2a2643cd6
commit
d51197eaa2
@ -12,16 +12,32 @@ from frigate.util import create_mask
|
|||||||
# get info on the video
|
# get info on the video
|
||||||
# cap = cv2.VideoCapture("debug/front_cam_2023_05_23_08_41__2023_05_23_08_43.mp4")
|
# cap = cv2.VideoCapture("debug/front_cam_2023_05_23_08_41__2023_05_23_08_43.mp4")
|
||||||
# cap = cv2.VideoCapture("debug/motion_test_clips/rain_1.mp4")
|
# cap = cv2.VideoCapture("debug/motion_test_clips/rain_1.mp4")
|
||||||
cap = cv2.VideoCapture("debug/motion_test_clips/ir_off.mp4")
|
cap = cv2.VideoCapture("debug/motion_test_clips/lawn_mower_night_1.mp4")
|
||||||
# cap = cv2.VideoCapture("airport.mp4")
|
# cap = cv2.VideoCapture("airport.mp4")
|
||||||
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||||
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||||
fps = cap.get(cv2.CAP_PROP_FPS)
|
fps = cap.get(cv2.CAP_PROP_FPS)
|
||||||
frame_shape = (height, width, 3)
|
frame_shape = (height, width, 3)
|
||||||
|
# Nick back:
|
||||||
|
# "1280,0,1280,316,1170,216,1146,126,1016,127,979,82,839,0",
|
||||||
|
# "310,350,300,402,224,405,241,354",
|
||||||
|
# "378,0,375,26,0,23,0,0",
|
||||||
|
# Front door:
|
||||||
|
# "1080,0,1080,339,1010,280,1020,169,777,163,452,170,318,299,191,365,186,417,139,470,108,516,40,530,0,514,0,0",
|
||||||
|
# "336,833,438,1024,346,1093,103,1052,24,814",
|
||||||
|
# Back
|
||||||
|
# "1855,0,1851,100,1289,96,1105,161,1045,119,890,121,890,0",
|
||||||
|
# "505,95,506,138,388,153,384,114",
|
||||||
|
# "689,72,689,122,549,134,547,89",
|
||||||
|
# "261,134,264,176,169,195,167,158",
|
||||||
|
# "145,159,146,202,70,220,65,183",
|
||||||
|
|
||||||
mask = create_mask(
|
mask = create_mask(
|
||||||
(height, width),
|
(height, width),
|
||||||
[],
|
[
|
||||||
|
"1080,0,1080,339,1010,280,1020,169,777,163,452,170,318,299,191,365,186,417,139,470,108,516,40,530,0,514,0,0",
|
||||||
|
"336,833,438,1024,346,1093,103,1052,24,814",
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
# create the motion config
|
# create the motion config
|
||||||
@ -29,7 +45,7 @@ motion_config_1 = MotionConfig()
|
|||||||
motion_config_1.mask = np.zeros((height, width), np.uint8)
|
motion_config_1.mask = np.zeros((height, width), np.uint8)
|
||||||
motion_config_1.mask[:] = mask
|
motion_config_1.mask[:] = mask
|
||||||
# motion_config_1.improve_contrast = 1
|
# motion_config_1.improve_contrast = 1
|
||||||
# motion_config_1.frame_height = 150
|
motion_config_1.frame_height = 150
|
||||||
# motion_config_1.frame_alpha = 0.02
|
# motion_config_1.frame_alpha = 0.02
|
||||||
# motion_config_1.threshold = 30
|
# motion_config_1.threshold = 30
|
||||||
# motion_config_1.contour_area = 10
|
# motion_config_1.contour_area = 10
|
||||||
@ -38,10 +54,11 @@ motion_config_2 = MotionConfig()
|
|||||||
motion_config_2.mask = np.zeros((height, width), np.uint8)
|
motion_config_2.mask = np.zeros((height, width), np.uint8)
|
||||||
motion_config_2.mask[:] = mask
|
motion_config_2.mask[:] = mask
|
||||||
# motion_config_2.improve_contrast = 1
|
# motion_config_2.improve_contrast = 1
|
||||||
# motion_config_2.frame_height = 150
|
motion_config_2.frame_height = 150
|
||||||
# motion_config_2.frame_alpha = 0.01
|
# motion_config_2.frame_alpha = 0.01
|
||||||
# motion_config_2.threshold = 20
|
motion_config_2.threshold = 20
|
||||||
# motion_config.contour_area = 10
|
# motion_config.contour_area = 10
|
||||||
|
|
||||||
save_images = True
|
save_images = True
|
||||||
|
|
||||||
improved_motion_detector_1 = ImprovedMotionDetector(
|
improved_motion_detector_1 = ImprovedMotionDetector(
|
||||||
@ -52,8 +69,6 @@ improved_motion_detector_1 = ImprovedMotionDetector(
|
|||||||
threshold=mp.Value("i", motion_config_1.threshold),
|
threshold=mp.Value("i", motion_config_1.threshold),
|
||||||
contour_area=mp.Value("i", motion_config_1.contour_area),
|
contour_area=mp.Value("i", motion_config_1.contour_area),
|
||||||
name="default",
|
name="default",
|
||||||
clipLimit=2.0,
|
|
||||||
tileGridSize=(8, 8),
|
|
||||||
)
|
)
|
||||||
improved_motion_detector_1.save_images = save_images
|
improved_motion_detector_1.save_images = save_images
|
||||||
|
|
||||||
|
@ -280,7 +280,7 @@ motion:
|
|||||||
# Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below)
|
# Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below)
|
||||||
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
|
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
|
||||||
# The value should be between 1 and 255.
|
# The value should be between 1 and 255.
|
||||||
threshold: 20
|
threshold: 30
|
||||||
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection
|
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection
|
||||||
# needs to recalibrate. (default: shown below)
|
# needs to recalibrate. (default: shown below)
|
||||||
# Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion.
|
# Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion.
|
||||||
|
@ -187,7 +187,7 @@ class RecordConfig(FrigateBaseModel):
|
|||||||
|
|
||||||
class MotionConfig(FrigateBaseModel):
|
class MotionConfig(FrigateBaseModel):
|
||||||
threshold: int = Field(
|
threshold: int = Field(
|
||||||
default=20,
|
default=30,
|
||||||
title="Motion detection threshold (1-255).",
|
title="Motion detection threshold (1-255).",
|
||||||
ge=1,
|
ge=1,
|
||||||
le=255,
|
le=255,
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
import cv2
|
import cv2
|
||||||
import imutils
|
import imutils
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from scipy.ndimage import gaussian_filter
|
||||||
|
|
||||||
from frigate.config import MotionConfig
|
from frigate.config import MotionConfig
|
||||||
from frigate.motion import MotionDetector
|
from frigate.motion import MotionDetector
|
||||||
@ -15,9 +16,10 @@ class ImprovedMotionDetector(MotionDetector):
|
|||||||
improve_contrast,
|
improve_contrast,
|
||||||
threshold,
|
threshold,
|
||||||
contour_area,
|
contour_area,
|
||||||
clipLimit=2.0,
|
|
||||||
tileGridSize=(2, 2),
|
|
||||||
name="improved",
|
name="improved",
|
||||||
|
blur_radius=1,
|
||||||
|
interpolation=cv2.INTER_NEAREST,
|
||||||
|
contrast_frame_history=50,
|
||||||
):
|
):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.config = config
|
self.config = config
|
||||||
@ -28,13 +30,12 @@ class ImprovedMotionDetector(MotionDetector):
|
|||||||
config.frame_height * frame_shape[1] // frame_shape[0],
|
config.frame_height * frame_shape[1] // frame_shape[0],
|
||||||
)
|
)
|
||||||
self.avg_frame = np.zeros(self.motion_frame_size, np.float32)
|
self.avg_frame = np.zeros(self.motion_frame_size, np.float32)
|
||||||
self.avg_delta = np.zeros(self.motion_frame_size, np.float32)
|
|
||||||
self.motion_frame_count = 0
|
self.motion_frame_count = 0
|
||||||
self.frame_counter = 0
|
self.frame_counter = 0
|
||||||
resized_mask = cv2.resize(
|
resized_mask = cv2.resize(
|
||||||
config.mask,
|
config.mask,
|
||||||
dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
|
dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
|
||||||
interpolation=cv2.INTER_LINEAR,
|
interpolation=cv2.INTER_AREA,
|
||||||
)
|
)
|
||||||
self.mask = np.where(resized_mask == [0])
|
self.mask = np.where(resized_mask == [0])
|
||||||
self.save_images = False
|
self.save_images = False
|
||||||
@ -42,7 +43,11 @@ class ImprovedMotionDetector(MotionDetector):
|
|||||||
self.improve_contrast = improve_contrast
|
self.improve_contrast = improve_contrast
|
||||||
self.threshold = threshold
|
self.threshold = threshold
|
||||||
self.contour_area = contour_area
|
self.contour_area = contour_area
|
||||||
self.clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
|
self.blur_radius = blur_radius
|
||||||
|
self.interpolation = interpolation
|
||||||
|
self.contrast_values = np.zeros((contrast_frame_history, 2), np.uint8)
|
||||||
|
self.contrast_values[:, 1:2] = 255
|
||||||
|
self.contrast_values_index = 0
|
||||||
|
|
||||||
def detect(self, frame):
|
def detect(self, frame):
|
||||||
motion_boxes = []
|
motion_boxes = []
|
||||||
@ -53,27 +58,44 @@ class ImprovedMotionDetector(MotionDetector):
|
|||||||
resized_frame = cv2.resize(
|
resized_frame = cv2.resize(
|
||||||
gray,
|
gray,
|
||||||
dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
|
dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
|
||||||
interpolation=cv2.INTER_LINEAR,
|
interpolation=self.interpolation,
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.save_images:
|
if self.save_images:
|
||||||
resized_saved = resized_frame.copy()
|
resized_saved = resized_frame.copy()
|
||||||
|
|
||||||
resized_frame = cv2.GaussianBlur(resized_frame, (3, 3), cv2.BORDER_DEFAULT)
|
|
||||||
|
|
||||||
if self.save_images:
|
|
||||||
blurred_saved = resized_frame.copy()
|
|
||||||
|
|
||||||
# Improve contrast
|
# Improve contrast
|
||||||
if self.improve_contrast.value:
|
if self.improve_contrast.value:
|
||||||
resized_frame = self.clahe.apply(resized_frame)
|
# TODO tracking moving average of min/max to avoid sudden contrast changes
|
||||||
|
minval = np.percentile(resized_frame, 4).astype(np.uint8)
|
||||||
|
maxval = np.percentile(resized_frame, 96).astype(np.uint8)
|
||||||
|
# skip contrast calcs if the image is a single color
|
||||||
|
if minval < maxval:
|
||||||
|
# keep track of the last 50 contrast values
|
||||||
|
self.contrast_values[self.contrast_values_index] = [minval, maxval]
|
||||||
|
self.contrast_values_index += 1
|
||||||
|
if self.contrast_values_index == len(self.contrast_values):
|
||||||
|
self.contrast_values_index = 0
|
||||||
|
|
||||||
|
avg_min, avg_max = np.mean(self.contrast_values, axis=0)
|
||||||
|
|
||||||
|
resized_frame = np.clip(resized_frame, avg_min, avg_max)
|
||||||
|
resized_frame = (
|
||||||
|
((resized_frame - avg_min) / (avg_max - avg_min)) * 255
|
||||||
|
).astype(np.uint8)
|
||||||
|
|
||||||
if self.save_images:
|
if self.save_images:
|
||||||
contrasted_saved = resized_frame.copy()
|
contrasted_saved = resized_frame.copy()
|
||||||
|
|
||||||
# mask frame
|
# mask frame
|
||||||
|
# this has to come after contrast improvement
|
||||||
resized_frame[self.mask] = [255]
|
resized_frame[self.mask] = [255]
|
||||||
|
|
||||||
|
resized_frame = gaussian_filter(resized_frame, sigma=1, radius=self.blur_radius)
|
||||||
|
|
||||||
|
if self.save_images:
|
||||||
|
blurred_saved = resized_frame.copy()
|
||||||
|
|
||||||
if self.save_images or self.calibrating:
|
if self.save_images or self.calibrating:
|
||||||
self.frame_counter += 1
|
self.frame_counter += 1
|
||||||
# compare to average
|
# compare to average
|
||||||
@ -134,8 +156,8 @@ class ImprovedMotionDetector(MotionDetector):
|
|||||||
)
|
)
|
||||||
frames = [
|
frames = [
|
||||||
cv2.cvtColor(resized_saved, cv2.COLOR_GRAY2BGR),
|
cv2.cvtColor(resized_saved, cv2.COLOR_GRAY2BGR),
|
||||||
cv2.cvtColor(blurred_saved, cv2.COLOR_GRAY2BGR),
|
|
||||||
cv2.cvtColor(contrasted_saved, cv2.COLOR_GRAY2BGR),
|
cv2.cvtColor(contrasted_saved, cv2.COLOR_GRAY2BGR),
|
||||||
|
cv2.cvtColor(blurred_saved, cv2.COLOR_GRAY2BGR),
|
||||||
cv2.cvtColor(frameDelta, cv2.COLOR_GRAY2BGR),
|
cv2.cvtColor(frameDelta, cv2.COLOR_GRAY2BGR),
|
||||||
cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR),
|
cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR),
|
||||||
thresh_dilated,
|
thresh_dilated,
|
||||||
|
@ -6,7 +6,7 @@ matplotlib == 3.7.*
|
|||||||
mypy == 0.942
|
mypy == 0.942
|
||||||
numpy == 1.23.*
|
numpy == 1.23.*
|
||||||
onvif_zeep == 0.2.12
|
onvif_zeep == 0.2.12
|
||||||
opencv-python-headless == 4.5.5.*
|
opencv-python-headless == 4.7.0.*
|
||||||
paho-mqtt == 1.6.*
|
paho-mqtt == 1.6.*
|
||||||
peewee == 3.16.*
|
peewee == 3.16.*
|
||||||
peewee_migrate == 1.10.*
|
peewee_migrate == 1.10.*
|
||||||
|
Loading…
Reference in New Issue
Block a user