-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathobjectconfig.EXAMPLE.ini
519 lines (435 loc) · 18.9 KB
/
objectconfig.EXAMPLE.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
# Configuration file for object detection
# NOTE: ALL parameters here can be overridden
# on a per monitor basis if you want. Just
# duplicate it inside the correct [monitor-<num>] section
# REMEMBER: Most of these are just variables. The place where all
# of this comes together is the ml_sequence and stream_sequence
# structures.
[general]
# Please don't change this. It is used by the config upgrade script
version=1.2
# You can now limit the # of detection process
# per target processor. If not specified, default is 1
# Other detection processes will wait to acquire lock
cpu_max_processes=3
tpu_max_processes=1
gpu_max_processes=1
# Time to wait in seconds per processor to be free, before
# erroring out. Default is 120 (2 mins)
cpu_max_lock_wait=100
tpu_max_lock_wait=100
gpu_max_lock_wait=100
#pyzm_overrides={'conf_path':'/etc/zm','log_level_debug':0}
pyzm_overrides={'log_level_debug':5}
# This is an optional file
# If specified, you can specify tokens with secret values in that file
# and onlt refer to the tokens in your main config file
secrets = /etc/zm/secrets.ini
# portal/user/password are needed if you plan on using ZM's legacy
# auth mechanism to get images
portal=!ZM_PORTAL
user=!ZM_USER
password=!ZM_PASSWORD
# api portal is needed if you plan to use tokens to get images
# requires ZM 1.33 or above
api_portal=!ZM_API_PORTAL
allow_self_signed=yes
# if yes, last detection will be stored for monitors
# and bounding boxes that match, along with labels
# will be discarded for new detections. This may be helpful
# in getting rid of static objects that get detected
# due to some motion.
match_past_detections=no
# The max difference in area between the objects if match_past_detection is on
# can also be specified in px like 300px. Default is 5%. Basically, bounding boxes of the same
# object can slightly differ ever so slightly between detection. Contributor @neillbell put in this PR
# to calculate the difference in areas and based on his tests, 5% worked well. YMMV. Change it if needed.
# Note: You can specify label/object specific max_diff_areas as well. If present, they override this value
# example:
# person_past_det_max_diff_area=5%
# car_past_det_max_diff_area=5000px
past_det_max_diff_area=5%
# this is the maximum size a detected object can have. You can specify it in px or % just like past_det_max_diff_area
# This is pretty useful to eliminate bogus detection. In my case, depending on shadows and other lighting conditions,
# I sometimes see "car" or "person" detected that covers most of my driveway view. That is practically impossible
# and therefore I set mine to 70% because I know any valid detected objected cannot be larger than that area
max_detection_size=90%
# sequence of models to run for detection
my_model_sequence=object,face,alpr
# if all, then we will loop through all models
# if first then the first success will break out
detection_mode=all
# If you need basic auth to access ZM
#basic_user=user
#basic_password=password
# base data path for various files the ES+OD needs
# we support in config variable substitution as well
base_data_path=/var/lib/zmeventnotification
# global settings for
# bestmatch, alarm, snapshot OR a specific frame ID
frame_id=bestmatch
# this is the to resize the image before analysis is done
resize=800
# set to yes, if you want to remove images after analysis
# setting to yes is recommended to avoid filling up space
# keep to no while debugging/inspecting masks
# Note this does NOT delete debug images later
delete_after_analyze=yes
# If yes, will write an image called <filename>-bbox.jpg as well
# which contains the bounding boxes. This has NO relation to
# write_image_to_zm
# Typically, if you enable delete_after_analyze you may
# also want to set write_debug_image to no.
write_debug_image=no
# if yes, will write an image with bounding boxes
# this needs to be yes to be able to write a bounding box
# image to ZoneMinder that is visible from its console
write_image_to_zm=yes
# Adds percentage to detections
# hog/face shows 100% always
show_percent=yes
# color to be used to draw the polygons you specified
poly_color=(255,255,255)
poly_thickness=2
#import_zm_zones=yes
only_triggered_zm_zones=no
# This section gives you an option to get brief animations
# of the event, delivered as part of the push notification to mobile devices
# Animations are created only if an object is detected
#
# NOTE: This will DELAY the time taken to send you push notifications
# It will try to first creat the animation, which may take upto a minute
# depending on how soon it gets access to frames. See notes below
[animation]
# If yes, object detection will attempt to create
# a short GIF file around the object detection frame
# that can be sent via push notifications for instant playback
# Note this required additional software support. Default:no
create_animation=no
# Format of animation burst
# valid options are "mp4", "gif", "mp4,gif"
# Note that gifs will be of a shorter duration
# as they take up much more disk space than mp4
animation_types='mp4,gif'
# default width of animation image. Be cautious when you increase this
# most mobile platforms give a very brief amount of time (in seconds)
# to download the image.
# Given your ZM instance will be serving the image, it will anyway be slow
# Making the total animation size bigger resulted in the notification not
# getting an image at all (timed out)
animation_width=640
# When an event is detected, ZM it writes frames a little late
# On top of that, it looks like with caching enabled, the API layer doesn't
# get access to DB records for much longer (around 30 seconds), at least on my
# system. animation_retry_sleep refers to how long to wait before trying to grab
# frame information if it failed. animation_max_tries defines how many times it
# will try and retrieve frames before it gives up
animation_retry_sleep=15
animation_max_tries=4
# if animation_types is gif then when can generate a fast preview gif
# every second frame is skipped and the frame rate doubled
# to give quick preview, Default (no)
fast_gif=no
[remote]
# You can now run the machine learning code on a different server
# This frees up your ZM server for other things
# To do this, you need to setup https://github.com/pliablepixels/mlapi
# on your desired server and confiure it with a user. See its instructions
# once set up, you can choose to do object/face recognition via that
# external serer
# URL that will be used
#ml_gateway=http://192.168.1.183:5000/api/v1
#ml_gateway=http://10.6.1.13:5000/api/v1
#ml_gateway=http://192.168.1.21:5000/api/v1
#ml_gateway=http://10.9.0.2:5000/api/v1
#ml_fallback_local=yes
# API/password for remote gateway
ml_user=!ML_USER
ml_password=!ML_PASSWORD
# config for object
[object]
# Updated note: Don't use use_sequence=no. No longer supported reliably
# If you are using legacy format (use_sequence=no) then these parameters will
# be used during ML inferencing
object_detection_pattern=(person|car|motorbike|bus|truck|boat)
object_min_confidence=0.3
object_framework=coral_edgetpu
object_processor=tpu
object_weights={{base_data_path}}/models/coral_edgetpu/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite
object_labels={{base_data_path}}/models/coral_edgetpu/coco_indexed.names
# If you are using the new ml_sequence format (use_sequence=yes) then
# you can fiddle with these parameters and look at ml_sequence later
# Note that these can be named anything. You can add custom variables, ad-infinitum
# Google Coral
# The mobiledet model came out in Nov 2020 and is supposed to be faster and more accurate but YMMV
tpu_object_weights_mobiledet={{base_data_path}}/models/coral_edgetpu/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite
tpu_object_weights_mobilenet={{base_data_path}}/models/coral_edgetpu/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite
tpu_object_labels={{base_data_path}}/models/coral_edgetpu/coco_indexed.names
tpu_object_framework=coral_edgetpu
tpu_object_processor=tpu
tpu_min_confidence=0.6
# Yolo v4 on GPU (falls back to CPU if no GPU)
yolo4_object_weights={{base_data_path}}/models/yolov4/yolov4.weights
yolo4_object_labels={{base_data_path}}/models/yolov4/coco.names
yolo4_object_config={{base_data_path}}/models/yolov4/yolov4.cfg
yolo4_object_framework=opencv
yolo4_object_processor=gpu
# Yolo v3 on GPU (falls back to CPU if no GPU)
yolo3_object_weights={{base_data_path}}/models/yolov3/yolov3.weights
yolo3_object_labels={{base_data_path}}/models/yolov3/coco.names
yolo3_object_config={{base_data_path}}/models/yolov3/yolov3.cfg
yolo3_object_framework=opencv
yolo3_object_processor=gpu
# Tiny Yolo V4 on GPU (falls back to CPU if no GPU)
tinyyolo_object_config={{base_data_path}}/models/tinyyolov4/yolov4-tiny.cfg
tinyyolo_object_weights={{base_data_path}}/models/tinyyolov4/yolov4-tiny.weights
tinyyolo_object_labels={{base_data_path}}/models/tinyyolov4/coco.names
tinyyolo_object_framework=opencv
tinyyolo_object_processor=gpu
[face]
face_detection_pattern=.*
known_images_path={{base_data_path}}/known_faces
unknown_images_path={{base_data_path}}/unknown_faces
save_unknown_faces=yes
save_unknown_faces_leeway_pixels=100
face_detection_framework=dlib
# read https://github.com/ageitgey/face_recognition/wiki/Face-Recognition-Accuracy-Problems
# read https://github.com/ageitgey/face_recognition#automatically-find-all-the-faces-in-an-image
# and play around
# quick overview:
# num_jitters is how many times to distort images
# upsample_times is how many times to upsample input images (for small faces, for example)
# model can be hog or cnn. cnn may be more accurate, but I haven't found it to be
face_num_jitters=1
face_model=cnn
face_upsample_times=1
# This is maximum distance of the face under test to the closest matched
# face cluster. The larger this distance, larger the chances of misclassification.
#
face_recog_dist_threshold=0.6
# When we are first training the face recognition model with known faces,
# by default we use hog because we assume you will supply well lit, front facing faces
# However, if you are planning to train with profile photos or hard to see faces, you
# may want to change this to cnn. Note that this increases training time, but training only
# happens once, unless you retrain again by removing the training model
face_train_model=cnn
#if a face doesn't match known names, we will detect it as 'unknown face'
# you can change that to something that suits your personality better ;-)
#unknown_face_name=invader
[alpr]
alpr_detection_pattern=.*
alpr_use_after_detection_only=yes
# Many of the ALPR providers offer both a cloud version
# and local SDK version. Sometimes local SDK format differs from
# the cloud instance. Set this to local or cloud. Default cloud
alpr_api_type=cloud
# -----| If you are using plate recognizer | ------
alpr_service=plate_recognizer
#alpr_service=open_alpr_cmdline
# If you want to host a local SDK https://app.platerecognizer.com/sdk/
#alpr_url=http://192.168.1.21:8080/alpr
# Plate recog replace with your api key
alpr_key=!PLATEREC_ALPR_KEY
# if yes, then it will log usage statistics of the ALPR service
platerec_stats=yes
# If you want to specify regions. See http://docs.platerecognizer.com/#regions-supported
#platerec_regions=['us','cn','kr']
# minimal confidence for actually detecting a plate
platerec_min_dscore=0.1
# minimal confidence for the translated text
platerec_min_score=0.2
# ----| If you are using openALPR |-----
#alpr_service=open_alpr
#alpr_key=!OPENALPR_ALPR_KEY
# For an explanation of params, see http://doc.openalpr.com/api/?api=cloudapi
#openalpr_recognize_vehicle=1
#openalpr_country=us
#openalpr_state=ca
# openalpr returns percents, but we convert to between 0 and 1
#openalpr_min_confidence=0.3
# ----| If you are using openALPR command line |-----
openalpr_cmdline_binary=alpr
# Do an alpr -help to see options, plug them in here
# like say '-j -p ca -c US' etc.
# keep the -j because its JSON
# Note that alpr_pattern is honored
# For the rest, just stuff them in the cmd line options
openalpr_cmdline_params=-j -d
openalpr_cmdline_min_confidence=0.3
## Monitor specific settings
# Examples:
# Let's assume your monitor ID is 999
[monitor-999]
# my driveway
match_past_detections=no
wait=5
object_detection_pattern=(person)
# Advanced example - here we want anything except potted plant
# exclusion in regular expressions is not
# as straightforward as you may think, so
# follow this pattern
# object_detection_pattern = ^(?!object1|object2|objectN)
# the characters in front implement what is
# called a negative look ahead
# object_detection_pattern=^(?!potted plant|pottedplant|bench|broccoli)
#alpr_detection_pattern=^(.*x11)
#delete_after_analyze=no
#detection_pattern=.*
#import_zm_zones=yes
# polygon areas where object detection will be done.
# You can name them anything except the keywords defined in the optional
# params below. You can put as many polygons as you want per [monitor-<mid>]
# (see examples).
my_driveway=306,356 1003,341 1074,683 154,715
# You are now allowed to specify detection pattern per zone
# the format is <polygonname>_zone_detection_pattern=<regexp>
# So if your polygon is called my_driveway, its associated
# detection pattern will be my_driveway_zone_detection_pattern
# If none is specified, the value in object_detection_pattern
# will be used
# This also applies to ZM zones. Let's assume you have
# import_zm_zones=yes and let's suppose you have a zone in ZM
# called Front_Door. In that case, all you need to do is put in a
# front_door_zone_detection_pattern=(person|car) here
#
# NOTE: ZM Zones are converted to lowercase, and spaces are replaced
# with underscores@3
my_driveway_zone_detection_pattern=(person)
some_other_area=0,0 200,300 700,900
# use license plate recognition for my driveway
# see alpr section later for more data needed
resize=no
my_model_sequence=object,alpr
[ml]
# When enabled, you can specify complex ML inferencing logic in ml_sequence
# Anything specified in ml_sequence will override any other ml attributes
# Also, when enabled, stream_sequence will override any other frame related
# attributes
# Updated note: Don't set use_sequence=no. No longer supported reliably
use_sequence = yes
# if enabled, will not grab exclusive locks before running inferencing
# locking seems to cause issues on some unique file systems
disable_locks= no
# Chain of frames
# See https://zmeventnotification.readthedocs.io/en/latest/guides/hooks.html#understanding-detection-configuration
# Also see https://pyzm.readthedocs.io/en/latest/source/pyzm.html#pyzm.ml.detect_sequence.DetectSequence.detect_stream
# Very important: Make sure final ending brace is indented
stream_sequence = {
'frame_strategy': 'most_models',
'frame_set': 'snapshot,alarm',
'contig_frames_before_error': 5,
'max_attempts': 3,
'sleep_between_attempts': 4,
'resize':800
}
# Chain of ML models to use
# See https://zmeventnotification.readthedocs.io/en/latest/guides/hooks.html#understanding-detection-configuration
# Also see https://pyzm.readthedocs.io/en/latest/source/pyzm.html#pyzm.ml.detect_sequence.DetectSequence
# Very important: Make sure final ending brace is indented
ml_sequence= {
'general': {
'model_sequence': '{{my_model_sequence}}',
'disable_locks': '{{disable_locks}}',
'match_past_detections': '{{match_past_detections}}',
'past_det_max_diff_area': '5%',
'car_past_det_max_diff_area': '10%',
#'ignore_past_detection_labels': ['dog', 'cat'],
# when matching past detections, names in a group are treated the same
'aliases': [['car','bus','truck','boat'], ['broccoli', 'pottedplant']]
},
'object': {
'general':{
'pattern':'{{object_detection_pattern}}',
'same_model_sequence_strategy': 'first' # also 'most', 'most_unique's
},
'sequence': [{
#First run on TPU with higher confidence
'name': 'TPU object detection',
'enabled': 'no',
'object_weights':'{{tpu_object_weights_mobiledet}}',
'object_labels': '{{tpu_object_labels}}',
'object_min_confidence': {{tpu_min_confidence}},
'object_framework':'{{tpu_object_framework}}',
'tpu_max_processes': {{tpu_max_processes}},
'tpu_max_lock_wait': {{tpu_max_lock_wait}},
'max_detection_size':'{{max_detection_size}}'
},
{
# YoloV4 on GPU if TPU fails (because sequence strategy is 'first')
'name': 'YoloV4 GPU/CPU',
'enabled': 'yes', # don't really need to say this explicitly
'object_config':'{{yolo4_object_config}}',
'object_weights':'{{yolo4_object_weights}}',
'object_labels': '{{yolo4_object_labels}}',
'object_min_confidence': {{object_min_confidence}},
'object_framework':'{{yolo4_object_framework}}',
'object_processor': '{{yolo4_object_processor}}',
'gpu_max_processes': {{gpu_max_processes}},
'gpu_max_lock_wait': {{gpu_max_lock_wait}},
'cpu_max_processes': {{cpu_max_processes}},
'cpu_max_lock_wait': {{cpu_max_lock_wait}},
'max_detection_size':'{{max_detection_size}}'
}]
},
'face': {
'general':{
'pattern': '{{face_detection_pattern}}',
#'pre_existing_labels': ['person'], # when put in general section, it will check if a previous detection type (like object) found this label
'same_model_sequence_strategy': 'union' # combines all outputs of this sequence
},
'sequence': [
{
'name': 'TPU face detection',
'enabled': 'no',
'face_detection_framework': 'tpu',
'face_weights':'/var/lib/zmeventnotification/models/coral_edgetpu/ssd_mobilenet_v2_face_quant_postprocess_edgetpu.tflite',
'face_min_confidence': 0.3,
},
{
'name': 'DLIB based face recognition',
'enabled': 'yes',
#'pre_existing_labels': ['face'], # If you use TPU detection first, we can run this ONLY if TPU detects a face first
'save_unknown_faces':'{{save_unknown_faces}}',
'save_unknown_faces_leeway_pixels':{{save_unknown_faces_leeway_pixels}},
'face_detection_framework': '{{face_detection_framework}}',
'known_images_path': '{{known_images_path}}',
'unknown_images_path': '{{unknown_images_path}}',
'face_model': '{{face_model}}',
'face_train_model': '{{face_train_model}}',
'face_recog_dist_threshold': '{{face_recog_dist_threshold}}',
'face_num_jitters': '{{face_num_jitters}}',
'face_upsample_times':'{{face_upsample_times}}',
'gpu_max_processes': {{gpu_max_processes}},
'gpu_max_lock_wait': {{gpu_max_lock_wait}},
'cpu_max_processes': {{cpu_max_processes}},
'cpu_max_lock_wait': {{cpu_max_lock_wait}},
'max_size':800
}]
},
'alpr': {
'general':{
'same_model_sequence_strategy': 'first',
'pre_existing_labels':['car', 'motorbike', 'bus', 'truck', 'boat'],
'pattern': '{{alpr_detection_pattern}}'
},
'sequence': [{
'name': 'Platerecognizer cloud',
'enabled': 'yes',
'alpr_api_type': '{{alpr_api_type}}',
'alpr_service': '{{alpr_service}}',
'alpr_key': '{{alpr_key}}',
'platrec_stats': '{{platerec_stats}}',
'platerec_min_dscore': {{platerec_min_dscore}},
'platerec_min_score': {{platerec_min_score}},
'max_size':1600,
#'platerec_payload': {
#'regions':['us'],
#'camera_id':12,
#},
#'platerec_config': {
# 'region':'strict',
# 'mode': 'fast'
#}
}]
}
}