-
Notifications
You must be signed in to change notification settings - Fork 52
/
Copy pathdemo.yml
109 lines (100 loc) · 3.78 KB
/
demo.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
# module name, required
name: conditional_video_processing
# base module parameters
parameters:
# pipeline processing frame parameters
frame:
width: 1280
height: 720
output_frame:
codec: ${oc.env:CODEC, 'h264'}
# Encode only frames with tag "encode"
condition:
tag: encode
# PyFunc for drawing on frames
draw_func:
# Draw only on frames with tag "draw"
condition:
tag: draw
# Etcd storage to manage processing sources
etcd:
# Etcd hosts to connect to
hosts: [etcd:2379]
# Path in Etcd to watch changes
watch_path: savant
batch_size: 1
# pipeline definition
pipeline:
# source definition is skipped, zeromq source is used by default to connect with source adapters
# define pipeline's main elements
elements:
# check the source is set for processing in etcd, skip processing otherwise
- element: pyfunc
module: samples.conditional_video_processing.conditional_video_processing
class_name: ConditionalSkipProcessing
# primary detector element, inference is provided by the nvinfer Deepstream element
# model type is detector (other available types are: classifier, custom)
- element: nvinfer@detector
# Model's name in the pipeline, mandatory
name: peoplenet
# model definition
model:
# format of the provided model file
format: etlt
# remote storage where the model files can be found
# skip if providing model files locally
remote:
url: s3://savant-data/models/peoplenet/peoplenet_pruned_v2.0.zip
checksum_url: s3://savant-data/models/peoplenet/peoplenet_pruned_v2.0.md5
parameters:
endpoint: https://eu-central-1.linodeobjects.com
# model file name, without location
model_file: resnet34_peoplenet_pruned.etlt # v2.0 Accuracy: 84.3 Size 20.9 MB
batch_size: ${parameters.batch_size}
# configuration of input data and custom preprocessing methods
input:
# model input layer name
layer_name: input_1
# model input layer shape
shape: [3, 544, 960]
# pixel scaling/normalization factor
scale_factor: 0.0039215697906911373
# configuration of model output
output:
# model output layer names
layer_names: [output_bbox/BiasAdd, output_cov/Sigmoid]
# number of detected classes for detector model
num_detected_classes: 3
# specify which detected objects are included in output
objects:
# object class id
- class_id: 0
# label assigned to objects of this class id
label: person
selector:
kwargs:
# minimal width of the objects of this class to be included in output
min_width: 32
# minimal height of the objects of this class to be included in output
min_height: 32
# analytics element realized in custom pyfunc
- element: pyfunc
# specify the pyfunc's python module
module: samples.conditional_video_processing.conditional_video_processing
# specify the pyfunc's python class from the module
class_name: ConditionalVideoProcessing
# pyfunc's class init keyword arguments
# will be available as pyfunc object's attributes
kwargs:
detections:
# Tag frames with if there are "person" detections from model "peoplenet"
- element_name: peoplenet
labels:
- person
set_tags:
# Set these tags to frames
- encode
- draw
# Switch period from state to state
protection_interval_ms: 1000
# sink definition is skipped, zeromq sink is used by default to connect with sink adapters