Custom classes for Process and Metrics (#13950)

* Subclass Process for audio_process

* Introduce custom mp.Process subclass

In preparation to switch the multiprocessing startup method away from
"fork", we cannot rely on os.fork cloning the log state at fork time.
Instead, we have to set up logging before we run the business logic of
each process.

* Make camera_metrics into a class

* Make ptz_metrics into a class

* Fixed PtzMotionEstimator.ptz_metrics type annotation

* Removed pointless variables

* Do not start audio processor when no audio cameras are configured
This commit is contained in:
gtsiam 2024-09-27 15:53:23 +03:00 committed by GitHub
parent 1f328be1bd
commit c0bd3b362c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 471 additions and 448 deletions

View File

@ -4,6 +4,7 @@ from statistics import mean
import numpy as np
import frigate.util as util
from frigate.config import DetectorTypeEnum
from frigate.object_detection import (
ObjectDetectProcess,
@ -90,7 +91,7 @@ edgetpu_process_2 = ObjectDetectProcess(
)
for x in range(0, 10):
camera_process = mp.Process(
camera_process = util.Process(
target=start, args=(x, 300, detection_queue, events[str(x)])
)
camera_process.daemon = True

View File

@ -1,6 +1,5 @@
import argparse
import faulthandler
import logging
import signal
import sys
import threading
@ -9,29 +8,20 @@ from pydantic import ValidationError
from frigate.app import FrigateApp
from frigate.config import FrigateConfig
from frigate.log import log_thread
from frigate.log import setup_logging
def main() -> None:
faulthandler.enable()
# Clear all existing handlers.
logging.basicConfig(
level=logging.INFO,
handlers=[],
force=True,
)
# Setup the logging thread
setup_logging()
threading.current_thread().name = "frigate"
# Make sure we exit cleanly on SIGTERM.
signal.signal(signal.SIGTERM, lambda sig, frame: sys.exit())
run()
@log_thread()
def run() -> None:
# Parse the cli arguments.
parser = argparse.ArgumentParser(
prog="Frigate",

View File

@ -6,7 +6,7 @@ import secrets
import shutil
from multiprocessing import Queue
from multiprocessing.synchronize import Event as MpEvent
from typing import Any
from typing import Any, Optional
import psutil
import uvicorn
@ -14,8 +14,10 @@ from peewee_migrate import Router
from playhouse.sqlite_ext import SqliteExtDatabase
from playhouse.sqliteq import SqliteQueueDatabase
import frigate.util as util
from frigate.api.auth import hash_password
from frigate.api.fastapi_app import create_fastapi_app
from frigate.camera import CameraMetrics, PTZMetrics
from frigate.comms.config_updater import ConfigPublisher
from frigate.comms.dispatcher import Communicator, Dispatcher
from frigate.comms.event_metadata_updater import (
@ -37,7 +39,7 @@ from frigate.const import (
RECORD_DIR,
)
from frigate.embeddings import EmbeddingsContext, manage_embeddings
from frigate.events.audio import listen_to_audio
from frigate.events.audio import AudioProcessor
from frigate.events.cleanup import EventCleanup
from frigate.events.external import ExternalEventProcessor
from frigate.events.maintainer import EventProcessor
@ -65,7 +67,6 @@ from frigate.stats.emitter import StatsEmitter
from frigate.stats.util import stats_init
from frigate.storage import StorageMaintainer
from frigate.timeline import TimelineProcessor
from frigate.types import CameraMetricsTypes, PTZMetricsTypes
from frigate.util.builtin import empty_and_close_queue
from frigate.util.object import get_camera_regions_grid
from frigate.version import VERSION
@ -76,6 +77,8 @@ logger = logging.getLogger(__name__)
class FrigateApp:
audio_process: Optional[mp.Process] = None
# TODO: Fix FrigateConfig usage, so we can properly annotate it here without mypy erroring out.
def __init__(self, config: Any) -> None:
self.stop_event: MpEvent = mp.Event()
@ -84,8 +87,8 @@ class FrigateApp:
self.detection_out_events: dict[str, MpEvent] = {}
self.detection_shms: list[mp.shared_memory.SharedMemory] = []
self.log_queue: Queue = mp.Queue()
self.camera_metrics: dict[str, CameraMetricsTypes] = {}
self.ptz_metrics: dict[str, PTZMetricsTypes] = {}
self.camera_metrics: dict[str, CameraMetrics] = {}
self.ptz_metrics: dict[str, PTZMetrics] = {}
self.processes: dict[str, int] = {}
self.region_grids: dict[str, list[list[dict[str, int]]]] = {}
self.config = config
@ -106,64 +109,14 @@ class FrigateApp:
logger.debug(f"Skipping directory: {d}")
def init_camera_metrics(self) -> None:
# create camera_metrics
for camera_name in self.config.cameras.keys():
# create camera_metrics
self.camera_metrics[camera_name] = {
"camera_fps": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"skipped_fps": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"process_fps": mp.Value("d", 0.0), # type: ignore[typeddict-item]
"detection_fps": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"detection_frame": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"read_start": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"ffmpeg_pid": mp.Value("i", 0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"frame_queue": mp.Queue(maxsize=2),
"capture_process": None,
"process": None,
"audio_rms": mp.Value("d", 0.0), # type: ignore[typeddict-item]
"audio_dBFS": mp.Value("d", 0.0), # type: ignore[typeddict-item]
}
self.ptz_metrics[camera_name] = {
"ptz_autotracker_enabled": mp.Value( # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"i",
self.config.cameras[camera_name].onvif.autotracking.enabled,
),
"ptz_tracking_active": mp.Event(),
"ptz_motor_stopped": mp.Event(),
"ptz_reset": mp.Event(),
"ptz_start_time": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"ptz_stop_time": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"ptz_frame_time": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"ptz_zoom_level": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"ptz_max_zoom": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"ptz_min_zoom": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
}
self.ptz_metrics[camera_name]["ptz_motor_stopped"].set()
self.camera_metrics[camera_name] = CameraMetrics()
self.ptz_metrics[camera_name] = PTZMetrics(
autotracker_enabled=self.config.cameras[
camera_name
].onvif.autotracking.enabled
)
def init_queues(self) -> None:
# Queue for cameras to push tracked objects to
@ -251,7 +204,7 @@ class FrigateApp:
self.processes["go2rtc"] = proc.info["pid"]
def init_recording_manager(self) -> None:
recording_process = mp.Process(
recording_process = util.Process(
target=manage_recordings,
name="recording_manager",
args=(self.config,),
@ -263,7 +216,7 @@ class FrigateApp:
logger.info(f"Recording process started: {recording_process.pid}")
def init_review_segment_manager(self) -> None:
review_segment_process = mp.Process(
review_segment_process = util.Process(
target=manage_review_segments,
name="review_segment_manager",
args=(self.config,),
@ -281,7 +234,7 @@ class FrigateApp:
# Create a client for other processes to use
self.embeddings = EmbeddingsContext()
embedding_process = mp.Process(
embedding_process = util.Process(
target=manage_embeddings,
name="embeddings_manager",
args=(self.config,),
@ -422,7 +375,7 @@ class FrigateApp:
self.detected_frames_processor.start()
def start_video_output_processor(self) -> None:
output_processor = mp.Process(
output_processor = util.Process(
target=output_frames,
name="output_processor",
args=(self.config,),
@ -451,7 +404,7 @@ class FrigateApp:
logger.info(f"Camera processor not started for disabled camera {name}")
continue
camera_process = mp.Process(
camera_process = util.Process(
target=track_camera,
name=f"camera_processor:{name}",
args=(
@ -466,9 +419,9 @@ class FrigateApp:
self.ptz_metrics[name],
self.region_grids[name],
),
daemon=True,
)
camera_process.daemon = True
self.camera_metrics[name]["process"] = camera_process
self.camera_metrics[name].process = camera_process
camera_process.start()
logger.info(f"Camera processor started for {name}: {camera_process.pid}")
@ -478,31 +431,27 @@ class FrigateApp:
logger.info(f"Capture process not started for disabled camera {name}")
continue
capture_process = mp.Process(
capture_process = util.Process(
target=capture_camera,
name=f"camera_capture:{name}",
args=(name, config, self.shm_frame_count(), self.camera_metrics[name]),
)
capture_process.daemon = True
self.camera_metrics[name]["capture_process"] = capture_process
self.camera_metrics[name].capture_process = capture_process
capture_process.start()
logger.info(f"Capture process started for {name}: {capture_process.pid}")
def start_audio_processors(self) -> None:
self.audio_process = None
if len([c for c in self.config.cameras.values() if c.audio.enabled]) > 0:
self.audio_process = mp.Process(
target=listen_to_audio,
name="audio_capture",
args=(
self.config,
self.camera_metrics,
),
)
self.audio_process.daemon = True
def start_audio_processor(self) -> None:
audio_cameras = [
c
for c in self.config.cameras.values()
if c.enabled and c.audio.enabled_in_config
]
if audio_cameras:
self.audio_process = AudioProcessor(audio_cameras, self.camera_metrics)
self.audio_process.start()
self.processes["audio_detector"] = self.audio_process.pid or 0
logger.info(f"Audio process started: {self.audio_process.pid}")
def start_timeline_processor(self) -> None:
self.timeline_processor = TimelineProcessor(
@ -640,7 +589,7 @@ class FrigateApp:
self.start_detected_frames_processor()
self.start_camera_processors()
self.start_camera_capture_processes()
self.start_audio_processors()
self.start_audio_processor()
self.start_storage_maintainer()
self.init_external_event_processor()
self.start_stats_emitter()
@ -686,28 +635,27 @@ class FrigateApp:
).execute()
# stop the audio process
if self.audio_process is not None:
if self.audio_process:
self.audio_process.terminate()
self.audio_process.join()
# ensure the capture processes are done
for camera in self.camera_metrics.keys():
capture_process = self.camera_metrics[camera]["capture_process"]
for camera, metrics in self.camera_metrics.items():
capture_process = metrics.capture_process
if capture_process is not None:
logger.info(f"Waiting for capture process for {camera} to stop")
capture_process.terminate()
capture_process.join()
# ensure the camera processors are done
for camera in self.camera_metrics.keys():
camera_process = self.camera_metrics[camera]["process"]
for camera, metrics in self.camera_metrics.items():
camera_process = metrics.process
if camera_process is not None:
logger.info(f"Waiting for process for {camera} to stop")
camera_process.terminate()
camera_process.join()
logger.info(f"Closing frame queue for {camera}")
frame_queue = self.camera_metrics[camera]["frame_queue"]
empty_and_close_queue(frame_queue)
empty_and_close_queue(metrics.frame_queue)
# ensure the detectors are done
for detector in self.detectors.values():

View File

@ -0,0 +1,68 @@
import multiprocessing as mp
from multiprocessing.sharedctypes import Synchronized
from multiprocessing.synchronize import Event
from typing import Optional
class CameraMetrics:
camera_fps: Synchronized
detection_fps: Synchronized
detection_frame: Synchronized
process_fps: Synchronized
skipped_fps: Synchronized
read_start: Synchronized
audio_rms: Synchronized
audio_dBFS: Synchronized
frame_queue: mp.Queue
process: Optional[mp.Process]
capture_process: Optional[mp.Process]
ffmpeg_pid: Synchronized
def __init__(self):
self.camera_fps = mp.Value("d", 0)
self.detection_fps = mp.Value("d", 0)
self.detection_frame = mp.Value("d", 0)
self.process_fps = mp.Value("d", 0)
self.skipped_fps = mp.Value("d", 0)
self.read_start = mp.Value("d", 0)
self.audio_rms = mp.Value("d", 0)
self.audio_dBFS = mp.Value("d", 0)
self.frame_queue = mp.Queue(maxsize=2)
self.process = None
self.capture_process = None
self.ffmpeg_pid = mp.Value("i", 0)
class PTZMetrics:
autotracker_enabled: Synchronized
start_time: Synchronized
stop_time: Synchronized
frame_time: Synchronized
zoom_level: Synchronized
max_zoom: Synchronized
min_zoom: Synchronized
tracking_active: Event
motor_stopped: Event
reset: Event
def __init__(self, *, autotracker_enabled: bool):
self.autotracker_enabled = mp.Value("i", autotracker_enabled)
self.start_time = mp.Value("d", 0)
self.stop_time = mp.Value("d", 0)
self.frame_time = mp.Value("d", 0)
self.zoom_level = mp.Value("d", 0)
self.max_zoom = mp.Value("d", 0)
self.min_zoom = mp.Value("d", 0)
self.tracking_active = mp.Event()
self.motor_stopped = mp.Event()
self.reset = mp.Event()
self.motor_stopped.set()

View File

@ -6,6 +6,7 @@ import logging
from abc import ABC, abstractmethod
from typing import Any, Callable, Optional
from frigate.camera import PTZMetrics
from frigate.comms.config_updater import ConfigPublisher
from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.const import (
@ -19,7 +20,6 @@ from frigate.const import (
)
from frigate.models import Event, Previews, Recordings, ReviewSegment
from frigate.ptz.onvif import OnvifCommandEnum, OnvifController
from frigate.types import PTZMetricsTypes
from frigate.util.object import get_camera_regions_grid
from frigate.util.services import restart_frigate
@ -53,7 +53,7 @@ class Dispatcher:
config: FrigateConfig,
config_updater: ConfigPublisher,
onvif: OnvifController,
ptz_metrics: dict[str, PTZMetricsTypes],
ptz_metrics: dict[str, PTZMetrics],
communicators: list[Communicator],
) -> None:
self.config = config
@ -251,16 +251,16 @@ class Dispatcher:
"Autotracking must be enabled in the config to be turned on via MQTT."
)
return
if not self.ptz_metrics[camera_name]["ptz_autotracker_enabled"].value:
if not self.ptz_metrics[camera_name].autotracker_enabled.value:
logger.info(f"Turning on ptz autotracker for {camera_name}")
self.ptz_metrics[camera_name]["ptz_autotracker_enabled"].value = True
self.ptz_metrics[camera_name]["ptz_start_time"].value = 0
self.ptz_metrics[camera_name].autotracker_enabled.value = True
self.ptz_metrics[camera_name].start_time.value = 0
ptz_autotracker_settings.enabled = True
elif payload == "OFF":
if self.ptz_metrics[camera_name]["ptz_autotracker_enabled"].value:
if self.ptz_metrics[camera_name].autotracker_enabled.value:
logger.info(f"Turning off ptz autotracker for {camera_name}")
self.ptz_metrics[camera_name]["ptz_autotracker_enabled"].value = False
self.ptz_metrics[camera_name]["ptz_start_time"].value = 0
self.ptz_metrics[camera_name].autotracker_enabled.value = False
self.ptz_metrics[camera_name].start_time.value = 0
ptz_autotracker_settings.enabled = False
self.publish(f"{camera_name}/ptz_autotracker/state", payload, retain=True)

View File

@ -2,21 +2,21 @@
import datetime
import logging
import multiprocessing as mp
import signal
import sys
import threading
import time
from types import FrameType
from typing import Optional, Tuple
from typing import Tuple
import numpy as np
import requests
from setproctitle import setproctitle
import frigate.util as util
from frigate.camera import CameraMetrics
from frigate.comms.config_updater import ConfigSubscriber
from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import CameraConfig, CameraInput, FfmpegConfig, FrigateConfig
from frigate.config import CameraConfig, CameraInput, FfmpegConfig
from frigate.const import (
AUDIO_DURATION,
AUDIO_FORMAT,
@ -28,9 +28,7 @@ from frigate.const import (
from frigate.ffmpeg_presets import parse_preset_input
from frigate.log import LogPipe
from frigate.object_detection import load_labels
from frigate.types import CameraMetricsTypes
from frigate.util.builtin import get_ffmpeg_arg_list
from frigate.util.services import listen
from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg
try:
@ -38,8 +36,6 @@ try:
except ModuleNotFoundError:
from tensorflow.lite.python.interpreter import Interpreter
logger = logging.getLogger(__name__)
def get_ffmpeg_command(ffmpeg: FfmpegConfig) -> list[str]:
ffmpeg_input: CameraInput = [i for i in ffmpeg.inputs if "audio" in i.roles][0]
@ -69,108 +65,65 @@ def get_ffmpeg_command(ffmpeg: FfmpegConfig) -> list[str]:
)
def listen_to_audio(
config: FrigateConfig,
camera_metrics: dict[str, CameraMetricsTypes],
) -> None:
stop_event = mp.Event()
audio_threads: list[threading.Thread] = []
class AudioProcessor(util.Process):
def __init__(
self,
cameras: list[CameraConfig],
camera_metrics: dict[str, CameraMetrics],
):
super().__init__(name="frigate.audio_manager", daemon=True)
def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None:
stop_event.set()
self.logger = logging.getLogger(self.name)
self.camera_metrics = camera_metrics
self.cameras = cameras
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
def run(self) -> None:
stop_event = threading.Event()
audio_threads: list[AudioEventMaintainer] = []
threading.current_thread().name = "process:audio_manager"
setproctitle("frigate.audio_manager")
listen()
threading.current_thread().name = "process:audio_manager"
signal.signal(signal.SIGTERM, lambda sig, frame: sys.exit())
for camera in config.cameras.values():
if camera.enabled and camera.audio.enabled_in_config:
audio = AudioEventMaintainer(
camera,
camera_metrics,
stop_event,
)
audio_threads.append(audio)
audio.start()
if len(self.cameras) == 0:
return
for thread in audio_threads:
thread.join()
try:
for camera in self.cameras:
audio_thread = AudioEventMaintainer(
camera,
self.camera_metrics,
stop_event,
)
audio_threads.append(audio_thread)
audio_thread.start()
logger.info("Exiting audio detector...")
self.logger.info(f"Audio processor started (pid: {self.pid})")
while True:
signal.pause()
finally:
stop_event.set()
for thread in audio_threads:
thread.join(1)
if thread.is_alive():
self.logger.info(f"Waiting for thread {thread.name:s} to exit")
thread.join(10)
class AudioTfl:
def __init__(self, stop_event: mp.Event, num_threads=2):
self.stop_event = stop_event
self.num_threads = num_threads
self.labels = load_labels("/audio-labelmap.txt", prefill=521)
self.interpreter = Interpreter(
model_path="/cpu_audio_model.tflite",
num_threads=self.num_threads,
)
self.interpreter.allocate_tensors()
self.tensor_input_details = self.interpreter.get_input_details()
self.tensor_output_details = self.interpreter.get_output_details()
def _detect_raw(self, tensor_input):
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
self.interpreter.invoke()
detections = np.zeros((20, 6), np.float32)
res = self.interpreter.get_tensor(self.tensor_output_details[0]["index"])[0]
non_zero_indices = res > 0
class_ids = np.argpartition(-res, 20)[:20]
class_ids = class_ids[np.argsort(-res[class_ids])]
class_ids = class_ids[non_zero_indices[class_ids]]
scores = res[class_ids]
boxes = np.full((scores.shape[0], 4), -1, np.float32)
count = len(scores)
for i in range(count):
if scores[i] < AUDIO_MIN_CONFIDENCE or i == 20:
break
detections[i] = [
class_ids[i],
float(scores[i]),
boxes[i][0],
boxes[i][1],
boxes[i][2],
boxes[i][3],
]
return detections
def detect(self, tensor_input, threshold=AUDIO_MIN_CONFIDENCE):
detections = []
if self.stop_event.is_set():
return detections
raw_detections = self._detect_raw(tensor_input)
for d in raw_detections:
if d[1] < threshold:
break
detections.append(
(self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5]))
)
return detections
for thread in audio_threads:
if thread.is_alive():
self.logger.warning(f"Thread {thread.name} is still alive")
self.logger.info("Exiting audio processor")
class AudioEventMaintainer(threading.Thread):
def __init__(
self,
camera: CameraConfig,
camera_metrics: dict[str, CameraMetricsTypes],
stop_event: mp.Event,
camera_metrics: dict[str, CameraMetrics],
stop_event: threading.Event,
) -> None:
threading.Thread.__init__(self)
self.name = f"{camera.name}_audio_event_processor"
super().__init__(name=f"{camera.name}_audio_event_processor")
self.config = camera
self.camera_metrics = camera_metrics
self.detections: dict[dict[str, any]] = {}
@ -195,8 +148,8 @@ class AudioEventMaintainer(threading.Thread):
audio_as_float = audio.astype(np.float32)
rms, dBFS = self.calculate_audio_levels(audio_as_float)
self.camera_metrics[self.config.name]["audio_rms"].value = rms
self.camera_metrics[self.config.name]["audio_dBFS"].value = dBFS
self.camera_metrics[self.config.name].audio_rms.value = rms
self.camera_metrics[self.config.name].audio_dBFS.value = dBFS
# only run audio detection when volume is above min_volume
if rms >= self.config.audio.min_volume:
@ -206,7 +159,7 @@ class AudioEventMaintainer(threading.Thread):
audio_detections = []
for label, score, _ in model_detections:
logger.debug(
self.logger.debug(
f"{self.config.name} heard {label} with a score of {score}"
)
@ -291,7 +244,7 @@ class AudioEventMaintainer(threading.Thread):
if resp.status_code == 200:
self.detections[detection["label"]] = None
else:
self.logger.warn(
self.logger.warning(
f"Failed to end audio event {detection['id']} with status code {resp.status_code}"
)
@ -350,3 +303,63 @@ class AudioEventMaintainer(threading.Thread):
self.requestor.stop()
self.config_subscriber.stop()
self.detection_publisher.stop()
class AudioTfl:
def __init__(self, stop_event: threading.Event, num_threads=2):
self.stop_event = stop_event
self.num_threads = num_threads
self.labels = load_labels("/audio-labelmap.txt", prefill=521)
self.interpreter = Interpreter(
model_path="/cpu_audio_model.tflite",
num_threads=self.num_threads,
)
self.interpreter.allocate_tensors()
self.tensor_input_details = self.interpreter.get_input_details()
self.tensor_output_details = self.interpreter.get_output_details()
def _detect_raw(self, tensor_input):
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
self.interpreter.invoke()
detections = np.zeros((20, 6), np.float32)
res = self.interpreter.get_tensor(self.tensor_output_details[0]["index"])[0]
non_zero_indices = res > 0
class_ids = np.argpartition(-res, 20)[:20]
class_ids = class_ids[np.argsort(-res[class_ids])]
class_ids = class_ids[non_zero_indices[class_ids]]
scores = res[class_ids]
boxes = np.full((scores.shape[0], 4), -1, np.float32)
count = len(scores)
for i in range(count):
if scores[i] < AUDIO_MIN_CONFIDENCE or i == 20:
break
detections[i] = [
class_ids[i],
float(scores[i]),
boxes[i][0],
boxes[i][1],
boxes[i][2],
boxes[i][3],
]
return detections
def detect(self, tensor_input, threshold=AUDIO_MIN_CONFIDENCE):
detections = []
if self.stop_event.is_set():
return detections
raw_detections = self._detect_raw(tensor_input)
for d in raw_detections:
if d[1] < threshold:
break
detections.append(
(self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5]))
)
return detections

View File

@ -5,13 +5,9 @@ import os
import sys
import threading
from collections import deque
from contextlib import AbstractContextManager, ContextDecorator
from logging.handlers import QueueHandler, QueueListener
from types import TracebackType
from typing import Deque, Optional
from typing_extensions import Self
from frigate.util.builtin import clean_camera_user_pass
LOG_HANDLER = logging.StreamHandler()
@ -28,45 +24,33 @@ LOG_HANDLER.addFilter(
)
)
log_listener: Optional[QueueListener] = None
class log_thread(AbstractContextManager, ContextDecorator):
def __init__(self, *, handler: logging.Handler = LOG_HANDLER):
super().__init__()
self._handler = handler
def setup_logging() -> None:
global log_listener
log_queue: mp.Queue = mp.Queue()
self._queue_handler = QueueHandler(log_queue)
log_queue: mp.Queue = mp.Queue()
log_listener = QueueListener(log_queue, LOG_HANDLER, respect_handler_level=True)
self._log_listener = QueueListener(
log_queue, self._handler, respect_handler_level=True
)
atexit.register(_stop_logging)
log_listener.start()
@property
def handler(self) -> logging.Handler:
return self._handler
logging.basicConfig(
level=logging.INFO,
handlers=[],
force=True,
)
def _stop_thread(self) -> None:
self._log_listener.stop()
logging.getLogger().addHandler(QueueHandler(log_listener.queue))
def __enter__(self) -> Self:
logging.getLogger().addHandler(self._queue_handler)
atexit.register(self._stop_thread)
self._log_listener.start()
def _stop_logging() -> None:
global log_listener
return self
def __exit__(
self,
exc_type: Optional[type[BaseException]],
exc_info: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
logging.getLogger().removeHandler(self._queue_handler)
atexit.unregister(self._stop_thread)
self._stop_thread()
if log_listener is not None:
log_listener.stop()
log_listener = None
# When a multiprocessing.Process exits, python tries to flush stdout and stderr. However, if the

View File

@ -10,6 +10,7 @@ from abc import ABC, abstractmethod
import numpy as np
from setproctitle import setproctitle
import frigate.util as util
from frigate.detectors import create_detector
from frigate.detectors.detector_config import InputTensorEnum
from frigate.util.builtin import EventsPerSecond, load_labels
@ -168,7 +169,7 @@ class ObjectDetectProcess:
self.detection_start.value = 0.0
if (self.detect_process is not None) and self.detect_process.is_alive():
self.stop()
self.detect_process = mp.Process(
self.detect_process = util.Process(
target=run_detector,
name=f"detector:{self.name}",
args=(

View File

@ -18,6 +18,7 @@ from norfair.camera_motion import (
TranslationTransformationGetter,
)
from frigate.camera import PTZMetrics
from frigate.comms.dispatcher import Dispatcher
from frigate.config import CameraConfig, FrigateConfig, ZoomingModeEnum
from frigate.const import (
@ -31,7 +32,6 @@ from frigate.const import (
CONFIG_DIR,
)
from frigate.ptz.onvif import OnvifController
from frigate.types import PTZMetricsTypes
from frigate.util.builtin import update_yaml_file
from frigate.util.image import SharedMemoryFrameManager, intersection_over_union
@ -49,24 +49,19 @@ def ptz_moving_at_frame_time(frame_time, ptz_start_time, ptz_stop_time):
class PtzMotionEstimator:
def __init__(
self, config: CameraConfig, ptz_metrics: dict[str, PTZMetricsTypes]
) -> None:
def __init__(self, config: CameraConfig, ptz_metrics: PTZMetrics) -> None:
self.frame_manager = SharedMemoryFrameManager()
self.norfair_motion_estimator = None
self.camera_config = config
self.coord_transformations = None
self.ptz_metrics = ptz_metrics
self.ptz_start_time = self.ptz_metrics["ptz_start_time"]
self.ptz_stop_time = self.ptz_metrics["ptz_stop_time"]
self.ptz_metrics["ptz_reset"].set()
self.ptz_metrics.reset.set()
logger.debug(f"{config.name}: Motion estimator init")
def motion_estimator(self, detections, frame_time, camera):
# If we've just started up or returned to our preset, reset motion estimator for new tracking session
if self.ptz_metrics["ptz_reset"].is_set():
self.ptz_metrics["ptz_reset"].clear()
if self.ptz_metrics.reset.is_set():
self.ptz_metrics.reset.clear()
# homography is nice (zooming) but slow, translation is pan/tilt only but fast.
if (
@ -88,7 +83,9 @@ class PtzMotionEstimator:
self.coord_transformations = None
if ptz_moving_at_frame_time(
frame_time, self.ptz_start_time.value, self.ptz_stop_time.value
frame_time,
self.ptz_metrics.start_time.value,
self.ptz_metrics.stop_time.value,
):
logger.debug(
f"{camera}: Motion estimator running - frame time: {frame_time}"
@ -148,7 +145,7 @@ class PtzAutoTrackerThread(threading.Thread):
self,
config: FrigateConfig,
onvif: OnvifController,
ptz_metrics: dict[str, PTZMetricsTypes],
ptz_metrics: dict[str, PTZMetrics],
dispatcher: Dispatcher,
stop_event: MpEvent,
) -> None:
@ -182,7 +179,7 @@ class PtzAutoTracker:
self,
config: FrigateConfig,
onvif: OnvifController,
ptz_metrics: PTZMetricsTypes,
ptz_metrics: PTZMetrics,
dispatcher: Dispatcher,
stop_event: MpEvent,
) -> None:
@ -248,7 +245,7 @@ class PtzAutoTracker:
f"Disabling autotracking for {camera}: onvif connection failed"
)
camera_config.onvif.autotracking.enabled = False
self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False
self.ptz_metrics[camera].autotracker_enabled.value = False
return
if not self.onvif.cams[camera]["init"]:
@ -257,7 +254,7 @@ class PtzAutoTracker:
f"Disabling autotracking for {camera}: Unable to initialize onvif"
)
camera_config.onvif.autotracking.enabled = False
self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False
self.ptz_metrics[camera].autotracker_enabled.value = False
return
if "pt-r-fov" not in self.onvif.cams[camera]["features"]:
@ -265,7 +262,7 @@ class PtzAutoTracker:
f"Disabling autotracking for {camera}: FOV relative movement not supported"
)
camera_config.onvif.autotracking.enabled = False
self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False
self.ptz_metrics[camera].autotracker_enabled.value = False
return
move_status_supported = self.onvif.get_service_capabilities(camera)
@ -275,7 +272,7 @@ class PtzAutoTracker:
f"Disabling autotracking for {camera}: ONVIF MoveStatus not supported"
)
camera_config.onvif.autotracking.enabled = False
self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False
self.ptz_metrics[camera].autotracker_enabled.value = False
return
if self.onvif.cams[camera]["init"]:
@ -295,12 +292,16 @@ class PtzAutoTracker:
float(val)
for val in camera_config.onvif.autotracking.movement_weights
]
self.ptz_metrics[camera][
"ptz_min_zoom"
].value = camera_config.onvif.autotracking.movement_weights[0]
self.ptz_metrics[camera][
"ptz_max_zoom"
].value = camera_config.onvif.autotracking.movement_weights[1]
self.ptz_metrics[
camera
].min_zoom.value = (
camera_config.onvif.autotracking.movement_weights[0]
)
self.ptz_metrics[
camera
].max_zoom.value = (
camera_config.onvif.autotracking.movement_weights[1]
)
self.intercept[camera] = (
camera_config.onvif.autotracking.movement_weights[2]
)
@ -309,7 +310,7 @@ class PtzAutoTracker:
)
else:
camera_config.onvif.autotracking.enabled = False
self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False
self.ptz_metrics[camera].autotracker_enabled.value = False
logger.warning(
f"Autotracker recalibration is required for {camera}. Disabling autotracking."
)
@ -317,7 +318,7 @@ class PtzAutoTracker:
if camera_config.onvif.autotracking.calibrate_on_startup:
self._calibrate_camera(camera)
self.ptz_metrics[camera]["ptz_tracking_active"].clear()
self.ptz_metrics[camera].tracking_active.clear()
self.dispatcher.publish(f"{camera}/ptz_autotracker/active", "OFF", retain=False)
self.autotracker_init[camera] = True
@ -363,10 +364,10 @@ class PtzAutoTracker:
1,
)
while not self.ptz_metrics[camera]["ptz_motor_stopped"].is_set():
while not self.ptz_metrics[camera].motor_stopped.is_set():
self.onvif.get_camera_status(camera)
zoom_out_values.append(self.ptz_metrics[camera]["ptz_zoom_level"].value)
zoom_out_values.append(self.ptz_metrics[camera].zoom_level.value)
self.onvif._zoom_absolute(
camera,
@ -374,10 +375,10 @@ class PtzAutoTracker:
1,
)
while not self.ptz_metrics[camera]["ptz_motor_stopped"].is_set():
while not self.ptz_metrics[camera].motor_stopped.is_set():
self.onvif.get_camera_status(camera)
zoom_in_values.append(self.ptz_metrics[camera]["ptz_zoom_level"].value)
zoom_in_values.append(self.ptz_metrics[camera].zoom_level.value)
if (
self.config.cameras[camera].onvif.autotracking.zooming
@ -392,12 +393,10 @@ class PtzAutoTracker:
1,
)
while not self.ptz_metrics[camera]["ptz_motor_stopped"].is_set():
while not self.ptz_metrics[camera].motor_stopped.is_set():
self.onvif.get_camera_status(camera)
zoom_out_values.append(
self.ptz_metrics[camera]["ptz_zoom_level"].value
)
zoom_out_values.append(self.ptz_metrics[camera].zoom_level.value)
# relative move to 0.01
self.onvif._move_relative(
@ -408,33 +407,31 @@ class PtzAutoTracker:
1,
)
while not self.ptz_metrics[camera]["ptz_motor_stopped"].is_set():
while not self.ptz_metrics[camera].motor_stopped.is_set():
self.onvif.get_camera_status(camera)
zoom_in_values.append(
self.ptz_metrics[camera]["ptz_zoom_level"].value
)
zoom_in_values.append(self.ptz_metrics[camera].zoom_level.value)
self.ptz_metrics[camera]["ptz_max_zoom"].value = max(zoom_in_values)
self.ptz_metrics[camera]["ptz_min_zoom"].value = min(zoom_out_values)
self.ptz_metrics[camera].max_zoom.value = max(zoom_in_values)
self.ptz_metrics[camera].min_zoom.value = min(zoom_out_values)
logger.debug(
f'{camera}: Calibration values: max zoom: {self.ptz_metrics[camera]["ptz_max_zoom"].value}, min zoom: {self.ptz_metrics[camera]["ptz_min_zoom"].value}'
f"{camera}: Calibration values: max zoom: {self.ptz_metrics[camera].max_zoom.value}, min zoom: {self.ptz_metrics[camera].min_zoom.value}"
)
else:
self.ptz_metrics[camera]["ptz_max_zoom"].value = 1
self.ptz_metrics[camera]["ptz_min_zoom"].value = 0
self.ptz_metrics[camera].max_zoom.value = 1
self.ptz_metrics[camera].min_zoom.value = 0
self.onvif._move_to_preset(
camera,
self.config.cameras[camera].onvif.autotracking.return_preset.lower(),
)
self.ptz_metrics[camera]["ptz_reset"].set()
self.ptz_metrics[camera]["ptz_motor_stopped"].clear()
self.ptz_metrics[camera].reset.set()
self.ptz_metrics[camera].motor_stopped.clear()
# Wait until the camera finishes moving
while not self.ptz_metrics[camera]["ptz_motor_stopped"].is_set():
while not self.ptz_metrics[camera].motor_stopped.is_set():
self.onvif.get_camera_status(camera)
for step in range(num_steps):
@ -445,7 +442,7 @@ class PtzAutoTracker:
self.onvif._move_relative(camera, pan, tilt, 0, 1)
# Wait until the camera finishes moving
while not self.ptz_metrics[camera]["ptz_motor_stopped"].is_set():
while not self.ptz_metrics[camera].motor_stopped.is_set():
self.onvif.get_camera_status(camera)
stop_time = time.time()
@ -462,11 +459,11 @@ class PtzAutoTracker:
camera,
self.config.cameras[camera].onvif.autotracking.return_preset.lower(),
)
self.ptz_metrics[camera]["ptz_reset"].set()
self.ptz_metrics[camera]["ptz_motor_stopped"].clear()
self.ptz_metrics[camera].reset.set()
self.ptz_metrics[camera].motor_stopped.clear()
# Wait until the camera finishes moving
while not self.ptz_metrics[camera]["ptz_motor_stopped"].is_set():
while not self.ptz_metrics[camera].motor_stopped.is_set():
self.onvif.get_camera_status(camera)
logger.info(
@ -512,8 +509,8 @@ class PtzAutoTracker:
self.config.cameras[camera].onvif.autotracking.movement_weights = ", ".join(
str(v)
for v in [
self.ptz_metrics[camera]["ptz_min_zoom"].value,
self.ptz_metrics[camera]["ptz_max_zoom"].value,
self.ptz_metrics[camera].min_zoom.value,
self.ptz_metrics[camera].max_zoom.value,
self.intercept[camera],
*self.move_coefficients[camera],
]
@ -649,8 +646,8 @@ class PtzAutoTracker:
# if we're receiving move requests during a PTZ move, ignore them
if ptz_moving_at_frame_time(
frame_time,
self.ptz_metrics[camera]["ptz_start_time"].value,
self.ptz_metrics[camera]["ptz_stop_time"].value,
self.ptz_metrics[camera].start_time.value,
self.ptz_metrics[camera].stop_time.value,
):
# instead of dequeueing this might be a good place to preemptively move based
# on an estimate - for fast moving objects, etc.
@ -671,19 +668,17 @@ class PtzAutoTracker:
self.onvif._move_relative(camera, pan, tilt, 0, 1)
# Wait until the camera finishes moving
while not self.ptz_metrics[camera][
"ptz_motor_stopped"
].is_set():
while not self.ptz_metrics[camera].motor_stopped.is_set():
self.onvif.get_camera_status(camera)
if (
zoom > 0
and self.ptz_metrics[camera]["ptz_zoom_level"].value != zoom
and self.ptz_metrics[camera].zoom_level.value != zoom
):
self.onvif._zoom_absolute(camera, zoom, 1)
# Wait until the camera finishes moving
while not self.ptz_metrics[camera]["ptz_motor_stopped"].is_set():
while not self.ptz_metrics[camera].motor_stopped.is_set():
self.onvif.get_camera_status(camera)
if self.config.cameras[camera].onvif.autotracking.movement_weights:
@ -691,7 +686,7 @@ class PtzAutoTracker:
f"{camera}: Predicted movement time: {self._predict_movement_time(camera, pan, tilt)}"
)
logger.debug(
f'{camera}: Actual movement time: {self.ptz_metrics[camera]["ptz_stop_time"].value-self.ptz_metrics[camera]["ptz_start_time"].value}'
f"{camera}: Actual movement time: {self.ptz_metrics[camera].stop_time.value-self.ptz_metrics[camera].start_time.value}"
)
# save metrics for better estimate calculations
@ -709,12 +704,12 @@ class PtzAutoTracker:
{
"pan": pan,
"tilt": tilt,
"start_timestamp": self.ptz_metrics[camera][
"ptz_start_time"
].value,
"end_timestamp": self.ptz_metrics[camera][
"ptz_stop_time"
].value,
"start_timestamp": self.ptz_metrics[
camera
].start_time.value,
"end_timestamp": self.ptz_metrics[
camera
].stop_time.value,
}
)
@ -734,8 +729,8 @@ class PtzAutoTracker:
return clipped, diff
if (
frame_time > self.ptz_metrics[camera]["ptz_start_time"].value
and frame_time > self.ptz_metrics[camera]["ptz_stop_time"].value
frame_time > self.ptz_metrics[camera].start_time.value
and frame_time > self.ptz_metrics[camera].stop_time.value
and not self.move_queue_locks[camera].locked()
):
# we can split up any large moves caused by velocity estimated movements if necessary
@ -954,12 +949,12 @@ class PtzAutoTracker:
)
at_max_zoom = (
self.ptz_metrics[camera]["ptz_zoom_level"].value
== self.ptz_metrics[camera]["ptz_max_zoom"].value
self.ptz_metrics[camera].zoom_level.value
== self.ptz_metrics[camera].max_zoom.value
)
at_min_zoom = (
self.ptz_metrics[camera]["ptz_zoom_level"].value
== self.ptz_metrics[camera]["ptz_min_zoom"].value
self.ptz_metrics[camera].zoom_level.value
== self.ptz_metrics[camera].min_zoom.value
)
# debug zooming
@ -1100,7 +1095,7 @@ class PtzAutoTracker:
zoom = 0
result = None
current_zoom_level = self.ptz_metrics[camera]["ptz_zoom_level"].value
current_zoom_level = self.ptz_metrics[camera].zoom_level.value
target_box = max(
obj.obj_data["box"][2] - obj.obj_data["box"][0],
obj.obj_data["box"][3] - obj.obj_data["box"][1],
@ -1123,8 +1118,8 @@ class PtzAutoTracker:
) is not None:
# divide zoom in 10 increments and always zoom out more than in
level = (
self.ptz_metrics[camera]["ptz_max_zoom"].value
- self.ptz_metrics[camera]["ptz_min_zoom"].value
self.ptz_metrics[camera].max_zoom.value
- self.ptz_metrics[camera].min_zoom.value
) / 20
if result:
zoom = min(1.0, current_zoom_level + level)
@ -1219,7 +1214,7 @@ class PtzAutoTracker:
logger.debug(
f"{camera}: New object: {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}"
)
self.ptz_metrics[camera]["ptz_tracking_active"].set()
self.ptz_metrics[camera].tracking_active.set()
self.dispatcher.publish(
f"{camera}/ptz_autotracker/active", "ON", retain=False
)
@ -1243,8 +1238,8 @@ class PtzAutoTracker:
if not ptz_moving_at_frame_time(
obj.obj_data["frame_time"],
self.ptz_metrics[camera]["ptz_start_time"].value,
self.ptz_metrics[camera]["ptz_stop_time"].value,
self.ptz_metrics[camera].start_time.value,
self.ptz_metrics[camera].stop_time.value,
):
if self.tracked_object_metrics[camera]["below_distance_threshold"]:
logger.debug(
@ -1325,7 +1320,7 @@ class PtzAutoTracker:
if not self.autotracker_init[camera]:
self._autotracker_setup(self.config.cameras[camera], camera)
# regularly update camera status
if not self.ptz_metrics[camera]["ptz_motor_stopped"].is_set():
if not self.ptz_metrics[camera].motor_stopped.is_set():
self.onvif.get_camera_status(camera)
# return to preset if tracking is over
@ -1334,7 +1329,7 @@ class PtzAutoTracker:
and self.tracked_object_history[camera]
and (
# might want to use a different timestamp here?
self.ptz_metrics[camera]["ptz_frame_time"].value
self.ptz_metrics[camera].frame_time.value
- self.tracked_object_history[camera][-1]["frame_time"]
>= autotracker_config.timeout
)
@ -1348,9 +1343,9 @@ class PtzAutoTracker:
while not self.move_queues[camera].empty():
self.move_queues[camera].get()
self.ptz_metrics[camera]["ptz_motor_stopped"].wait()
self.ptz_metrics[camera].motor_stopped.wait()
logger.debug(
f"{camera}: Time is {self.ptz_metrics[camera]['ptz_frame_time'].value}, returning to preset: {autotracker_config.return_preset}"
f"{camera}: Time is {self.ptz_metrics[camera].frame_time.value}, returning to preset: {autotracker_config.return_preset}"
)
self.onvif._move_to_preset(
camera,
@ -1358,11 +1353,11 @@ class PtzAutoTracker:
)
# update stored zoom level from preset
if not self.ptz_metrics[camera]["ptz_motor_stopped"].is_set():
if not self.ptz_metrics[camera].motor_stopped.is_set():
self.onvif.get_camera_status(camera)
self.ptz_metrics[camera]["ptz_tracking_active"].clear()
self.ptz_metrics[camera].tracking_active.clear()
self.dispatcher.publish(
f"{camera}/ptz_autotracker/active", "OFF", retain=False
)
self.ptz_metrics[camera]["ptz_reset"].set()
self.ptz_metrics[camera].reset.set()

View File

@ -10,8 +10,8 @@ from onvif import ONVIFCamera, ONVIFError
from zeep.exceptions import Fault, TransportError
from zeep.transports import Transport
from frigate.camera import PTZMetrics
from frigate.config import FrigateConfig, ZoomingModeEnum
from frigate.types import PTZMetricsTypes
from frigate.util.builtin import find_by_key
logger = logging.getLogger(__name__)
@ -33,8 +33,10 @@ class OnvifCommandEnum(str, Enum):
class OnvifController:
ptz_metrics: dict[str, PTZMetrics]
def __init__(
self, config: FrigateConfig, ptz_metrics: dict[str, PTZMetricsTypes]
self, config: FrigateConfig, ptz_metrics: dict[str, PTZMetrics]
) -> None:
self.cams: dict[str, ONVIFCamera] = {}
self.config = config
@ -389,14 +391,14 @@ class OnvifController:
return
self.cams[camera_name]["active"] = True
self.ptz_metrics[camera_name]["ptz_motor_stopped"].clear()
self.ptz_metrics[camera_name].motor_stopped.clear()
logger.debug(
f"{camera_name}: PTZ start time: {self.ptz_metrics[camera_name]['ptz_frame_time'].value}"
f"{camera_name}: PTZ start time: {self.ptz_metrics[camera_name].frame_time.value}"
)
self.ptz_metrics[camera_name]["ptz_start_time"].value = self.ptz_metrics[
self.ptz_metrics[camera_name].start_time.value = self.ptz_metrics[
camera_name
]["ptz_frame_time"].value
self.ptz_metrics[camera_name]["ptz_stop_time"].value = 0
].frame_time.value
self.ptz_metrics[camera_name].stop_time.value = 0
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
move_request = self.cams[camera_name]["relative_move_request"]
@ -464,9 +466,9 @@ class OnvifController:
return
self.cams[camera_name]["active"] = True
self.ptz_metrics[camera_name]["ptz_motor_stopped"].clear()
self.ptz_metrics[camera_name]["ptz_start_time"].value = 0
self.ptz_metrics[camera_name]["ptz_stop_time"].value = 0
self.ptz_metrics[camera_name].motor_stopped.clear()
self.ptz_metrics[camera_name].start_time.value = 0
self.ptz_metrics[camera_name].stop_time.value = 0
move_request = self.cams[camera_name]["move_request"]
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
preset_token = self.cams[camera_name]["presets"][preset]
@ -515,14 +517,14 @@ class OnvifController:
return
self.cams[camera_name]["active"] = True
self.ptz_metrics[camera_name]["ptz_motor_stopped"].clear()
self.ptz_metrics[camera_name].motor_stopped.clear()
logger.debug(
f"{camera_name}: PTZ start time: {self.ptz_metrics[camera_name]['ptz_frame_time'].value}"
f"{camera_name}: PTZ start time: {self.ptz_metrics[camera_name].frame_time.value}"
)
self.ptz_metrics[camera_name]["ptz_start_time"].value = self.ptz_metrics[
self.ptz_metrics[camera_name].start_time.value = self.ptz_metrics[
camera_name
]["ptz_frame_time"].value
self.ptz_metrics[camera_name]["ptz_stop_time"].value = 0
].frame_time.value
self.ptz_metrics[camera_name].stop_time.value = 0
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
move_request = self.cams[camera_name]["absolute_move_request"]
@ -653,36 +655,36 @@ class OnvifController:
if pan_tilt_status == "IDLE" and (zoom_status is None or zoom_status == "IDLE"):
self.cams[camera_name]["active"] = False
if not self.ptz_metrics[camera_name]["ptz_motor_stopped"].is_set():
self.ptz_metrics[camera_name]["ptz_motor_stopped"].set()
if not self.ptz_metrics[camera_name].motor_stopped.is_set():
self.ptz_metrics[camera_name].motor_stopped.set()
logger.debug(
f"{camera_name}: PTZ stop time: {self.ptz_metrics[camera_name]['ptz_frame_time'].value}"
f"{camera_name}: PTZ stop time: {self.ptz_metrics[camera_name].frame_time.value}"
)
self.ptz_metrics[camera_name]["ptz_stop_time"].value = self.ptz_metrics[
self.ptz_metrics[camera_name].stop_time.value = self.ptz_metrics[
camera_name
]["ptz_frame_time"].value
].frame_time.value
else:
self.cams[camera_name]["active"] = True
if self.ptz_metrics[camera_name]["ptz_motor_stopped"].is_set():
self.ptz_metrics[camera_name]["ptz_motor_stopped"].clear()
if self.ptz_metrics[camera_name].motor_stopped.is_set():
self.ptz_metrics[camera_name].motor_stopped.clear()
logger.debug(
f"{camera_name}: PTZ start time: {self.ptz_metrics[camera_name]['ptz_frame_time'].value}"
f"{camera_name}: PTZ start time: {self.ptz_metrics[camera_name].frame_time.value}"
)
self.ptz_metrics[camera_name][
"ptz_start_time"
].value = self.ptz_metrics[camera_name]["ptz_frame_time"].value
self.ptz_metrics[camera_name]["ptz_stop_time"].value = 0
self.ptz_metrics[camera_name].start_time.value = self.ptz_metrics[
camera_name
].frame_time.value
self.ptz_metrics[camera_name].stop_time.value = 0
if (
self.config.cameras[camera_name].onvif.autotracking.zooming
!= ZoomingModeEnum.disabled
):
# store absolute zoom level as 0 to 1 interpolated from the values of the camera
self.ptz_metrics[camera_name]["ptz_zoom_level"].value = numpy.interp(
self.ptz_metrics[camera_name].zoom_level.value = numpy.interp(
round(status.Position.Zoom.x, 2),
[0, 1],
[
@ -691,23 +693,23 @@ class OnvifController:
],
)
logger.debug(
f'{camera_name}: Camera zoom level: {self.ptz_metrics[camera_name]["ptz_zoom_level"].value}'
f"{camera_name}: Camera zoom level: {self.ptz_metrics[camera_name].zoom_level.value}"
)
# some hikvision cams won't update MoveStatus, so warn if it hasn't changed
if (
not self.ptz_metrics[camera_name]["ptz_motor_stopped"].is_set()
and not self.ptz_metrics[camera_name]["ptz_reset"].is_set()
and self.ptz_metrics[camera_name]["ptz_start_time"].value != 0
and self.ptz_metrics[camera_name]["ptz_frame_time"].value
> (self.ptz_metrics[camera_name]["ptz_start_time"].value + 10)
and self.ptz_metrics[camera_name]["ptz_stop_time"].value == 0
not self.ptz_metrics[camera_name].motor_stopped.is_set()
and not self.ptz_metrics[camera_name].reset.is_set()
and self.ptz_metrics[camera_name].start_time.value != 0
and self.ptz_metrics[camera_name].frame_time.value
> (self.ptz_metrics[camera_name].start_time.value + 10)
and self.ptz_metrics[camera_name].stop_time.value == 0
):
logger.debug(
f'Start time: {self.ptz_metrics[camera_name]["ptz_start_time"].value}, Stop time: {self.ptz_metrics[camera_name]["ptz_stop_time"].value}, Frame time: {self.ptz_metrics[camera_name]["ptz_frame_time"].value}'
f"Start time: {self.ptz_metrics[camera_name].start_time.value}, Stop time: {self.ptz_metrics[camera_name].stop_time.value}, Frame time: {self.ptz_metrics[camera_name].frame_time.value}"
)
# set the stop time so we don't come back into this again and spam the logs
self.ptz_metrics[camera_name]["ptz_stop_time"].value = self.ptz_metrics[
self.ptz_metrics[camera_name].stop_time.value = self.ptz_metrics[
camera_name
]["ptz_frame_time"].value
].frame_time.value
logger.warning(f"Camera {camera_name} is still in ONVIF 'MOVING' status.")

View File

@ -11,10 +11,11 @@ import psutil
import requests
from requests.exceptions import RequestException
from frigate.camera import CameraMetrics
from frigate.config import FrigateConfig
from frigate.const import CACHE_DIR, CLIPS_DIR, RECORD_DIR
from frigate.object_detection import ObjectDetectProcess
from frigate.types import CameraMetricsTypes, StatsTrackingTypes
from frigate.types import StatsTrackingTypes
from frigate.util.services import (
get_amd_gpu_stats,
get_bandwidth_stats,
@ -49,7 +50,7 @@ def get_latest_version(config: FrigateConfig) -> str:
def stats_init(
config: FrigateConfig,
camera_metrics: dict[str, CameraMetricsTypes],
camera_metrics: dict[str, CameraMetrics],
detectors: dict[str, ObjectDetectProcess],
processes: dict[str, int],
) -> StatsTrackingTypes:
@ -245,27 +246,23 @@ def stats_snapshot(
stats["cameras"] = {}
for name, camera_stats in camera_metrics.items():
total_detection_fps += camera_stats["detection_fps"].value
pid = camera_stats["process"].pid if camera_stats["process"] else None
ffmpeg_pid = (
camera_stats["ffmpeg_pid"].value if camera_stats["ffmpeg_pid"] else None
)
total_detection_fps += camera_stats.detection_fps.value
pid = camera_stats.process.pid if camera_stats.process else None
ffmpeg_pid = camera_stats.ffmpeg_pid.value if camera_stats.ffmpeg_pid else None
capture_pid = (
camera_stats["capture_process"].pid
if camera_stats["capture_process"]
else None
camera_stats.capture_process.pid if camera_stats.capture_process else None
)
stats["cameras"][name] = {
"camera_fps": round(camera_stats["camera_fps"].value, 2),
"process_fps": round(camera_stats["process_fps"].value, 2),
"skipped_fps": round(camera_stats["skipped_fps"].value, 2),
"detection_fps": round(camera_stats["detection_fps"].value, 2),
"camera_fps": round(camera_stats.camera_fps.value, 2),
"process_fps": round(camera_stats.process_fps.value, 2),
"skipped_fps": round(camera_stats.skipped_fps.value, 2),
"detection_fps": round(camera_stats.detection_fps.value, 2),
"detection_enabled": config.cameras[name].detect.enabled,
"pid": pid,
"capture_pid": capture_pid,
"ffmpeg_pid": ffmpeg_pid,
"audio_rms": round(camera_stats["audio_rms"].value, 4),
"audio_dBFS": round(camera_stats["audio_dBFS"].value, 4),
"audio_rms": round(camera_stats.audio_rms.value, 4),
"audio_dBFS": round(camera_stats.audio_dBFS.value, 4),
}
stats["detectors"] = {}

View File

@ -12,10 +12,10 @@ from norfair import (
)
from norfair.drawing.drawer import Drawer
from frigate.camera import PTZMetrics
from frigate.config import CameraConfig
from frigate.ptz.autotrack import PtzMotionEstimator
from frigate.track import ObjectTracker
from frigate.types import PTZMetricsTypes
from frigate.util.image import intersection_over_union
from frigate.util.object import average_boxes, median_of_boxes
@ -75,7 +75,7 @@ class NorfairTracker(ObjectTracker):
def __init__(
self,
config: CameraConfig,
ptz_metrics: PTZMetricsTypes,
ptz_metrics: PTZMetrics,
):
self.tracked_objects = {}
self.untracked_object_boxes: list[list[int]] = []
@ -85,7 +85,6 @@ class NorfairTracker(ObjectTracker):
self.camera_config = config
self.detect_config = config.detect
self.ptz_metrics = ptz_metrics
self.ptz_autotracker_enabled = ptz_metrics["ptz_autotracker_enabled"]
self.ptz_motion_estimator = {}
self.camera_name = config.name
self.track_id_map = {}
@ -104,7 +103,7 @@ class NorfairTracker(ObjectTracker):
# the different tracker per object class
filter_factory=OptimizedKalmanFilterFactory(R=3.4),
)
if self.ptz_autotracker_enabled.value:
if self.ptz_metrics.autotracker_enabled.value:
self.ptz_motion_estimator = PtzMotionEstimator(
self.camera_config, self.ptz_metrics
)
@ -315,7 +314,7 @@ class NorfairTracker(ObjectTracker):
coord_transformations = None
if self.ptz_autotracker_enabled.value:
if self.ptz_metrics.autotracker_enabled.value:
# we must have been enabled by mqtt, so set up the estimator
if not self.ptz_motion_estimator:
self.ptz_motion_estimator = PtzMotionEstimator(

View File

@ -1,42 +1,11 @@
from multiprocessing import Queue
from multiprocessing.context import Process
from multiprocessing.sharedctypes import Synchronized
from multiprocessing.synchronize import Event
from typing import Optional, TypedDict
from typing import TypedDict
from frigate.camera import CameraMetrics
from frigate.object_detection import ObjectDetectProcess
class CameraMetricsTypes(TypedDict):
camera_fps: Synchronized
capture_process: Optional[Process]
detection_fps: Synchronized
detection_frame: Synchronized
ffmpeg_pid: Synchronized
frame_queue: Queue
process: Optional[Process]
process_fps: Synchronized
read_start: Synchronized
skipped_fps: Synchronized
audio_rms: Synchronized
audio_dBFS: Synchronized
class PTZMetricsTypes(TypedDict):
ptz_autotracker_enabled: Synchronized
ptz_tracking_active: Event
ptz_motor_stopped: Event
ptz_reset: Event
ptz_start_time: Synchronized
ptz_stop_time: Synchronized
ptz_frame_time: Synchronized
ptz_zoom_level: Synchronized
ptz_max_zoom: Synchronized
ptz_min_zoom: Synchronized
class StatsTrackingTypes(TypedDict):
camera_metrics: dict[str, CameraMetricsTypes]
camera_metrics: dict[str, CameraMetrics]
detectors: dict[str, ObjectDetectProcess]
started: int
latest_frigate_version: str

3
frigate/util/__init__.py Normal file
View File

@ -0,0 +1,3 @@
from .process import Process
__all__ = ["Process"]

55
frigate/util/process.py Normal file
View File

@ -0,0 +1,55 @@
import logging
import multiprocessing as mp
from functools import wraps
from logging.handlers import QueueHandler
from typing import Any
import frigate.log
class BaseProcess(mp.Process):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def start(self, *args, **kwargs):
self.before_start()
super().start(*args, **kwargs)
self.after_start()
def __getattribute__(self, name: str) -> Any:
if name == "run":
run = super().__getattribute__("run")
@wraps(run)
def run_wrapper(*args, **kwargs):
try:
self.before_run()
return run(*args, **kwargs)
finally:
self.after_run()
return run_wrapper
return super().__getattribute__(name)
def before_start(self) -> None:
pass
def after_start(self) -> None:
pass
def before_run(self) -> None:
pass
def after_run(self) -> None:
pass
class Process(BaseProcess):
def before_start(self) -> None:
self.__log_queue = frigate.log.log_listener.queue
def before_run(self) -> None:
if self.__log_queue:
logging.basicConfig(handlers=[], force=True)
logging.getLogger().addHandler(QueueHandler(self.__log_queue))

View File

@ -11,6 +11,7 @@ import time
import cv2
from setproctitle import setproctitle
from frigate.camera import CameraMetrics, PTZMetrics
from frigate.comms.config_updater import ConfigSubscriber
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import CameraConfig, DetectConfig, ModelConfig
@ -28,7 +29,6 @@ from frigate.object_detection import RemoteObjectDetector
from frigate.ptz.autotrack import ptz_moving_at_frame_time
from frigate.track import ObjectTracker
from frigate.track.norfair_tracker import NorfairTracker
from frigate.types import PTZMetricsTypes
from frigate.util.builtin import EventsPerSecond, get_tomorrow_at_time
from frigate.util.image import (
FrameManager,
@ -386,7 +386,9 @@ class CameraCapture(threading.Thread):
)
def capture_camera(name, config: CameraConfig, shm_frame_count: int, process_info):
def capture_camera(
name, config: CameraConfig, shm_frame_count: int, camera_metrics: CameraMetrics
):
stop_event = mp.Event()
def receiveSignal(signalNumber, frame):
@ -398,15 +400,14 @@ def capture_camera(name, config: CameraConfig, shm_frame_count: int, process_inf
threading.current_thread().name = f"capture:{name}"
setproctitle(f"frigate.capture:{name}")
frame_queue = process_info["frame_queue"]
camera_watchdog = CameraWatchdog(
name,
config,
shm_frame_count,
frame_queue,
process_info["camera_fps"],
process_info["skipped_fps"],
process_info["ffmpeg_pid"],
camera_metrics.frame_queue,
camera_metrics.camera_fps,
camera_metrics.skipped_fps,
camera_metrics.ffmpeg_pid,
stop_event,
)
camera_watchdog.start()
@ -421,8 +422,8 @@ def track_camera(
detection_queue,
result_connection,
detected_objects_queue,
process_info,
ptz_metrics,
camera_metrics: CameraMetrics,
ptz_metrics: PTZMetrics,
region_grid,
):
stop_event = mp.Event()
@ -437,7 +438,7 @@ def track_camera(
setproctitle(f"frigate.process:{name}")
listen()
frame_queue = process_info["frame_queue"]
frame_queue = camera_metrics.frame_queue
frame_shape = config.frame_shape
objects_to_track = config.objects.track
@ -469,7 +470,7 @@ def track_camera(
object_detector,
object_tracker,
detected_objects_queue,
process_info,
camera_metrics,
objects_to_track,
object_filters,
stop_event,
@ -542,17 +543,14 @@ def process_frames(
object_detector: RemoteObjectDetector,
object_tracker: ObjectTracker,
detected_objects_queue: mp.Queue,
process_info: dict,
camera_metrics: CameraMetrics,
objects_to_track: list[str],
object_filters,
stop_event,
ptz_metrics: PTZMetricsTypes,
ptz_metrics: PTZMetrics,
region_grid,
exit_on_empty: bool = False,
):
fps = process_info["process_fps"]
detection_fps = process_info["detection_fps"]
current_frame_time = process_info["detection_frame"]
next_region_update = get_tomorrow_at_time(2)
config_subscriber = ConfigSubscriber(f"config/detect/{camera_name}")
@ -589,8 +587,8 @@ def process_frames(
break
continue
current_frame_time.value = frame_time
ptz_metrics["ptz_frame_time"].value = frame_time
camera_metrics.detection_frame.value = frame_time
ptz_metrics.frame_time.value = frame_time
frame = frame_manager.get(
f"{camera_name}{frame_time}", (frame_shape[0] * 3 // 2, frame_shape[1])
@ -660,8 +658,8 @@ def process_frames(
# ptz_moving_at_frame_time() always returns False for non-autotracking cameras
if not motion_detector.is_calibrating() and not ptz_moving_at_frame_time(
frame_time,
ptz_metrics["ptz_start_time"].value,
ptz_metrics["ptz_stop_time"].value,
ptz_metrics.start_time.value,
ptz_metrics.stop_time.value,
):
# find motion boxes that are not inside tracked object regions
standalone_motion_boxes = [
@ -839,7 +837,7 @@ def process_frames(
continue
else:
fps_tracker.update()
fps.value = fps_tracker.eps()
camera_metrics.process_fps.value = fps_tracker.eps()
detected_objects_queue.put(
(
camera_name,
@ -849,7 +847,7 @@ def process_frames(
regions,
)
)
detection_fps.value = object_detector.fps.eps()
camera_metrics.detection_fps.value = object_detector.fps.eps()
frame_manager.close(f"{camera_name}{frame_time}")
motion_detector.stop()