From f70dcc966d3a7db5389425d725f056a9a3899b84 Mon Sep 17 00:00:00 2001 From: Rahool Paliwal <93539796+nv-rpaliwal@users.noreply.github.com> Date: Tue, 30 Aug 2022 08:57:40 -0700 Subject: [PATCH] Update to 1.1.4 release This release is compatible with DeepStream SDK 6.1.1 Ubuntu 20.04 Python 3.8 DeepStream SDK 6.1.1 Features: - New app deepstream-demux-multi-in-multi-out added - Updated deepstream_test_4.ipynb notebook - Change binding gst_nvevent_new_stream_reset() to gst_element_send_nvevent_new_stream_reset() - Deprecation: member "dims" of "NvDsInferLayerInfo" deprecated in favor of "inferDims" - Deprecation: member "atttributeIndex" of "NvDsInferAttribute" deprecated in favor of "attributeIndex" - Update to PeopleNet v2.6 for deepstream-test3 app --- HOWTO.md | 6 +- README.md | 9 +- apps/README | 6 +- .../README | 63 +++ .../deepstream_demux_multi_in_multi_out.py | 450 +++++++++++++++++ .../ds_demux_pgie_config.txt | 77 +++ .../README | 2 +- apps/deepstream-imagedata-multistream/README | 2 +- apps/deepstream-nvdsanalytics/README | 2 +- apps/deepstream-opticalflow/README | 2 +- apps/deepstream-preprocess-test/README | 2 +- apps/deepstream-rtsp-in-rtsp-out/README | 2 +- apps/deepstream-segmentation/README | 2 +- apps/deepstream-ssd-parser/README | 2 +- apps/deepstream-ssd-parser/ssd_parser.py | 2 +- apps/deepstream-test1-rtsp-out/README | 2 +- apps/deepstream-test1-usbcam/README | 2 +- apps/deepstream-test1/README | 2 +- apps/deepstream-test2/README | 2 +- apps/deepstream-test3/README | 14 +- .../config_infer_primary_peoplenet.txt | 8 +- ...ig_triton_grpc_infer_primary_peoplenet.txt | 2 +- .../config_triton_infer_primary_peoplenet.txt | 2 +- apps/deepstream-test4/README | 2 +- apps/runtime_source_add_delete/README | 2 +- bindings/CMakeLists.txt | 2 +- bindings/README.md | 24 +- bindings/docstrings/pydocumentation.h | 19 +- bindings/packaging/setup.py | 2 +- .../ubuntu-cross-aarch64.Dockerfile | 4 +- bindings/src/bindfunctions.cpp | 16 +- bindings/src/bindnvdsinfer.cpp | 3 + bindings/src/pyds.cpp | 2 +- docs/PYTHON_API/Methods/methodsdoc.rst | 4 +- docs/conf.py | 2 +- notebooks/deepstream_test_4.ipynb | 454 ++++++++++-------- tests/integration/README.md | 2 +- 37 files changed, 934 insertions(+), 267 deletions(-) create mode 100644 apps/deepstream-demux-multi-in-multi-out/README create mode 100644 apps/deepstream-demux-multi-in-multi-out/deepstream_demux_multi_in_multi_out.py create mode 100644 apps/deepstream-demux-multi-in-multi-out/ds_demux_pgie_config.txt diff --git a/HOWTO.md b/HOWTO.md index 5ccf2a8..492891b 100644 --- a/HOWTO.md +++ b/HOWTO.md @@ -16,7 +16,7 @@ This guide provides resources for DeepStream application development in Python. ## Prerequisites * Ubuntu 20.04 -* [DeepStream SDK 6.1](https://developer.nvidia.com/deepstream-download) or later +* [DeepStream SDK 6.1.1](https://developer.nvidia.com/deepstream-download) or later * Python 3.8 * [Gst Python](https://gstreamer.freedesktop.org/modules/gst-python.html) v1.16.2 @@ -46,11 +46,11 @@ Note: Compiling bindings now also generates a pip installable python wheel for t ## Running Sample Applications -Clone the deepstream_python_apps repo under /sources: +Clone the deepstream_python_apps repo under /sources: git clone https://github.com/NVIDIA-AI-IOT/deepstream_python_apps This will create the following directory: -```/sources/deepstream_python_apps``` +```/sources/deepstream_python_apps``` The Python apps are under the "apps" directory. Go into each app directory and follow instructions in the README. diff --git a/README.md b/README.md index ef8a28e..eb0e77c 100644 --- a/README.md +++ b/README.md @@ -2,11 +2,11 @@ This repository contains Python bindings and sample applications for the [DeepStream SDK](https://developer.nvidia.com/deepstream-sdk). -SDK version supported: 6.1 +SDK version supported: 6.1.1 The bindings sources along with build instructions are now available under [bindings](bindings)! -This release comes with Operating System upgrades (from Ubuntu 18.04 to Ubuntu 20.04) for DeepStreamSDK 6.1 support. This translates to upgrade in Python version to 3.8 and [gst-python](3rdparty/gst-python/) version has also been upgraded to 1.16.2 ! +This release comes with Operating System upgrades (from Ubuntu 18.04 to Ubuntu 20.04) for DeepStreamSDK 6.1.1 support. This translates to upgrade in Python version to 3.8 and [gst-python](3rdparty/gst-python/) version has also been upgraded to 1.16.2 ! Download the latest release package complete with bindings and sample applications from the [release section](../../releases). @@ -43,7 +43,7 @@ To run the sample applications or write your own, please consult the [HOW-TO Gui We currently provide the following sample applications: * [deepstream-test1](apps/deepstream-test1) -- 4-class object detection pipeline * [deepstream-test2](apps/deepstream-test2) -- 4-class object detection, tracking and attribute classification pipeline -* UPDATE [deepstream-test3](apps/deepstream-test3) -- multi-stream pipeline performing 4-class object detection - now also supports triton inference server, no-display mode, file-loop and silent mode +* [deepstream-test3](apps/deepstream-test3) -- multi-stream pipeline performing 4-class object detection - now also supports triton inference server, no-display mode, file-loop and silent mode * [deepstream-test4](apps/deepstream-test4) -- msgbroker for sending analytics results to the cloud * [deepstream-imagedata-multistream](apps/deepstream-imagedata-multistream) -- multi-stream pipeline with access to image buffers * [deepstream-ssd-parser](apps/deepstream-ssd-parser) -- SSD model inference via Triton server with output parsing in Python @@ -55,7 +55,8 @@ We currently provide the following sample applications: * [runtime_source_add_delete](apps/runtime_source_add_delete) -- add/delete source streams at runtime * [deepstream-imagedata-multistream-redaction](apps/deepstream-imagedata-multistream-redaction) -- multi-stream pipeline with face detection and redaction * [deepstream-rtsp-in-rtsp-out](apps/deepstream-rtsp-in-rtsp-out) -- multi-stream pipeline with RTSP input/output -* NEW [deepstream-preprocess-test](apps/deepstream-preprocess-test) -- multi-stream pipeline using nvdspreprocess plugin with custom ROIs +* [deepstream-preprocess-test](apps/deepstream-preprocess-test) -- multi-stream pipeline using nvdspreprocess plugin with custom ROIs +* NEW [deepstream-demux-multi-in-multi-out](apps/deepstream-demux-multi-in-multi-out) -- multi-stream pipeline using nvstreamdemux plugin to generated separate buffer outputs Detailed application information is provided in each application's subdirectory under [apps](apps). diff --git a/apps/README b/apps/README index 27c17fe..7ae5483 100644 --- a/apps/README +++ b/apps/README @@ -20,7 +20,7 @@ DeepStream SDK Python Bindings ================================================================================ Setup pre-requisites: - Ubuntu 20.04 -- NVIDIA DeepStream SDK 6.1 +- NVIDIA DeepStream SDK 6.1.1 - Python 3.8 - Gst-python @@ -52,7 +52,7 @@ Package Contents Installing Pre-requisites: -------------------------------------------------------------------------------- -DeepStream SDK 6.1 +DeepStream SDK 6.1.1 -------------------- Download and install from https://developer.nvidia.com/deepstream-download @@ -70,7 +70,7 @@ $ sudo apt install python3-gi python3-dev python3-gst-1.0 -y -------------------------------------------------------------------------------- Running the samples -------------------------------------------------------------------------------- -The apps are configured to work from inside the DeepStream SDK 6.1 installation. +The apps are configured to work from inside the DeepStream SDK 6.1.1 installation. Clone the deepstream_python_apps repo under /sources: $ git clone https://github.com/NVIDIA-AI-IOT/deepstream_python_apps diff --git a/apps/deepstream-demux-multi-in-multi-out/README b/apps/deepstream-demux-multi-in-multi-out/README new file mode 100644 index 0000000..208acda --- /dev/null +++ b/apps/deepstream-demux-multi-in-multi-out/README @@ -0,0 +1,63 @@ +################################################################################ +# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + +Prerequisites: +- DeepStreamSDK 6.1.1 +- Python 3.8 +- Gst-python + +To run: + $ python3 deepstream_demux_multi_in_multi_out.py -i [uri2] ... [uriN] +e.g. + $ python3 deepstream_demux_multi_in_multi_out.py -i file:///home/ubuntu/video1.mp4 file:///home/ubuntu/video2.mp4 + $ python3 deepstream_demux_multi_in_multi_out.py -i rtsp://127.0.0.1/video1 rtsp://127.0.0.1/video2 + +This document describes the sample deepstream_demux_multi_in_multi_out application. + +This sample builds on top of the deepstream-test3 sample to demonstrate how to: + +* Uses multiple sources in the pipeline. +* The pipeline uses `nvstreamdemux` to split batches and output separate buffer/streams. +* `nvstreamdemux` helps when separate output is required for each input stream. + +Refer to the deepstream-test1 sample documentation for an example of simple +single-stream inference, bounding-box overlay, and rendering. + +Nvstreamdemux reference - https://docs.nvidia.com/metropolis/deepstream/dev-guide/text/DS_plugin_gst-nvstreamdemux.html + +This sample accepts one or more H.264/H.265 video streams as input. It creates +a source bin for each input and connects the bins to an instance of the +"nvstreammux" element, which forms the batch of frames. The batch of +frames is fed to "nvinfer" for batched inferencing. "nvstreamdemux" demuxes batched frames into individual buffers. +It creates a separate Gst Buffer for each frame in the batch. For each input separate branch is created with the following elements in series +`nvstreamdemux -> queue -> nvvidconv -> nvosd -> nveglglessink` +So for two inputs, 2 separate output windows are created, likewise for N input N outputs are created. + +The "width" and "height" properties must be set on the stream-muxer to set the +output resolution. If the input frame resolution is different from +stream-muxer's "width" and "height", the input frame will be scaled to muxer's +output resolution. + +The stream-muxer waits for a user-defined timeout before forming the batch. The +timeout is set using the "batched-push-timeout" property. If the complete batch +is formed before the timeout is reached, the batch is pushed to the downstream +element. If the timeout is reached before the complete batch can be formed +(which can happen in case of rtsp sources), the batch is formed from the +available input buffers and pushed. Ideally, the timeout of the stream-muxer +should be set based on the framerate of the fastest source. It can also be set +to -1 to make the stream-muxer wait infinitely. + diff --git a/apps/deepstream-demux-multi-in-multi-out/deepstream_demux_multi_in_multi_out.py b/apps/deepstream-demux-multi-in-multi-out/deepstream_demux_multi_in_multi_out.py new file mode 100644 index 0000000..5b8bcb4 --- /dev/null +++ b/apps/deepstream-demux-multi-in-multi-out/deepstream_demux_multi_in_multi_out.py @@ -0,0 +1,450 @@ +#!/usr/bin/env python3 + +################################################################################ +# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ +import sys + +sys.path.append("../") +import gi +import configparser +import argparse + +gi.require_version("Gst", "1.0") +from gi.repository import Gst +from gi.repository import GLib +from ctypes import * +import time +import sys +import os +import math +import platform +from common.is_aarch_64 import is_aarch64 +from common.bus_call import bus_call +from common.FPS import PERF_DATA + +import pyds + +no_display = False +silent = False +file_loop = False +perf_data = None + +MAX_DISPLAY_LEN = 64 +PGIE_CLASS_ID_VEHICLE = 0 +PGIE_CLASS_ID_BICYCLE = 1 +PGIE_CLASS_ID_PERSON = 2 +PGIE_CLASS_ID_ROADSIGN = 3 +MUXER_OUTPUT_WIDTH = 540 +MUXER_OUTPUT_HEIGHT = 540 # 1080 +MUXER_BATCH_TIMEOUT_USEC = 4000000 +TILED_OUTPUT_WIDTH = 640 # 1280 +TILED_OUTPUT_HEIGHT = 360 # 720 +GST_CAPS_FEATURES_NVMM = "memory:NVMM" +OSD_PROCESS_MODE = 0 +OSD_DISPLAY_TEXT = 1 +pgie_classes_str = ["Vehicle", "TwoWheeler", "Person", "RoadSign"] + + +def pgie_src_pad_buffer_probe(pad, info, u_data): + """ + The function pgie_src_pad_buffer_probe() is a callback function that is called every time a buffer + is received on the source pad of the pgie element. + The function calculate the batch metadata from the buffer and iterates through the list of frame + metadata in the batch. + For each frame, it iterates through the list of object metadata and prints the frame number, number + of objects detected, and the number of vehicles, persons, bicycles, and road signs detected in the + frame. + The function also retrieves the frame rate of the stream from the frame metadata + :param pad: The pad on which the probe is attached + :param info: The Gst.PadProbeInfo object that contains the buffer + :param u_data: User data passed to the probe + :return: The return value is a Gst.PadProbeReturn.OK. + """ + frame_number = 0 + num_rects = 0 + gst_buffer = info.get_buffer() + if not gst_buffer: + print("Unable to get GstBuffer ") + return + # Retrieve batch metadata from the gst_buffer + # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the + # C address of gst_buffer as input, which is obtained with hash(gst_buffer) + batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) + l_frame = batch_meta.frame_meta_list + while l_frame is not None: + try: + # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta + # The casting is done by pyds.NvDsFrameMeta.cast() + # The casting also keeps ownership of the underlying memory + # in the C code, so the Python garbage collector will leave + # it alone. + frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data) + except StopIteration: + break + + frame_number = frame_meta.frame_num + l_obj = frame_meta.obj_meta_list + num_rects = frame_meta.num_obj_meta + obj_counter = { + PGIE_CLASS_ID_VEHICLE: 0, + PGIE_CLASS_ID_PERSON: 0, + PGIE_CLASS_ID_BICYCLE: 0, + PGIE_CLASS_ID_ROADSIGN: 0, + } + while l_obj is not None: + try: + # Casting l_obj.data to pyds.NvDsObjectMeta + obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data) + except StopIteration: + break + obj_counter[obj_meta.class_id] += 1 + try: + l_obj = l_obj.next + except StopIteration: + break + print( + "Frame Number=", + frame_number, + "Number of Objects=", + num_rects, + "Vehicle_count=", + obj_counter[PGIE_CLASS_ID_VEHICLE], + "Person_count=", + obj_counter[PGIE_CLASS_ID_PERSON], + ) + + # Update frame rate through this probe + stream_index = "stream{0}".format(frame_meta.pad_index) + global perf_data + perf_data.update_fps(stream_index) + try: + l_frame = l_frame.next + except StopIteration: + break + + return Gst.PadProbeReturn.OK + + +def cb_newpad(decodebin, decoder_src_pad, data): + """ + The function is called when a new pad is created by the decodebin. + The function checks if the new pad is for video and not audio. + If the new pad is for video, the function checks if the pad caps contain NVMM memory features. + If the pad caps contain NVMM memory features, the function links the decodebin pad to the source bin + ghost pad. + If the pad caps do not contain NVMM memory features, the function prints an error message. + :param decodebin: The decodebin element that is creating the new pad + :param decoder_src_pad: The source pad created by the decodebin element + :param data: This is the data that was passed to the callback function. In this case, it is the + source_bin + """ + print("In cb_newpad\n") + caps = decoder_src_pad.get_current_caps() + gststruct = caps.get_structure(0) + gstname = gststruct.get_name() + source_bin = data + features = caps.get_features(0) + + # Need to check if the pad created by the decodebin is for video and not + # audio. + print("gstname=", gstname) + if gstname.find("video") != -1: + # Link the decodebin pad only if decodebin has picked nvidia + # decoder plugin nvdec_*. We do this by checking if the pad caps contain + # NVMM memory features. + print("features=", features) + if features.contains("memory:NVMM"): + # Get the source bin ghost pad + bin_ghost_pad = source_bin.get_static_pad("src") + if not bin_ghost_pad.set_target(decoder_src_pad): + sys.stderr.write( + "Failed to link decoder src pad to source bin ghost pad\n" + ) + else: + sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n") + + +def decodebin_child_added(child_proxy, Object, name, user_data): + """ + If the child added to the decodebin is another decodebin, connect to its child-added signal. If the + child added is a source, set its drop-on-latency property to True. + + :param child_proxy: The child element that was added to the decodebin + :param Object: The object that emitted the signal + :param name: The name of the element that was added + :param user_data: This is a pointer to the data that you want to pass to the callback function + """ + print("Decodebin child added:", name, "\n") + if name.find("decodebin") != -1: + Object.connect("child-added", decodebin_child_added, user_data) + + if "source" in name: + source_element = child_proxy.get_by_name("source") + if source_element.find_property("drop-on-latency") != None: + Object.set_property("drop-on-latency", True) + + +def create_source_bin(index, uri): + """ + It creates a GstBin, adds a uridecodebin to it, and connects the uridecodebin's pad-added signal to + a callback function + + :param index: The index of the source bin + :param uri: The URI of the video file to be played + :return: A bin with a uri decode bin and a ghost pad. + """ + print("Creating source bin") + + # Create a source GstBin to abstract this bin's content from the rest of the + # pipeline + bin_name = "source-bin-%02d" % index + print(bin_name) + nbin = Gst.Bin.new(bin_name) + if not nbin: + sys.stderr.write(" Unable to create source bin \n") + + # Source element for reading from the uri. + # We will use decodebin and let it figure out the container format of the + # stream and the codec and plug the appropriate demux and decode plugins. + uri_decode_bin = Gst.ElementFactory.make("uridecodebin", "uri-decode-bin") + if not uri_decode_bin: + sys.stderr.write(" Unable to create uri decode bin \n") + # We set the input uri to the source element + uri_decode_bin.set_property("uri", uri) + # Connect to the "pad-added" signal of the decodebin which generates a + # callback once a new pad for raw data has beed created by the decodebin + uri_decode_bin.connect("pad-added", cb_newpad, nbin) + uri_decode_bin.connect("child-added", decodebin_child_added, nbin) + + # We need to create a ghost pad for the source bin which will act as a proxy + # for the video decoder src pad. The ghost pad will not have a target right + # now. Once the decode bin creates the video decoder and generates the + # cb_newpad callback, we will set the ghost pad target to the video decoder + # src pad. + Gst.Bin.add(nbin, uri_decode_bin) + bin_pad = nbin.add_pad(Gst.GhostPad.new_no_target("src", Gst.PadDirection.SRC)) + if not bin_pad: + sys.stderr.write(" Failed to add ghost pad in source bin \n") + return None + return nbin + + +def make_element(element_name, i): + """ + Creates a Gstreamer element with unique name + Unique name is created by adding element type and index e.g. `element_name-i` + Unique name is essential for all the element in pipeline otherwise gstreamer will throw exception. + :param element_name: The name of the element to create + :param i: the index of the element in the pipeline + :return: A Gst.Element object + """ + element = Gst.ElementFactory.make(element_name, element_name) + if not element: + sys.stderr.write(" Unable to create {0}".format(element_name)) + element.set_property("name", "{0}-{1}".format(element_name, str(i))) + return element + + +def main(args, requested_pgie=None, config=None, disable_probe=False): + input_sources = args + number_sources = len(input_sources) + global perf_data + perf_data = PERF_DATA(number_sources) + + # Standard GStreamer initialization + Gst.init(None) + + # Create gstreamer elements */ + # Create Pipeline element that will form a connection of other elements + print("Creating Pipeline \n ") + pipeline = Gst.Pipeline() + is_live = False + + if not pipeline: + sys.stderr.write(" Unable to create Pipeline \n") + print("Creating streamux \n ") + + # Create nvstreammux instance to form batches from one or more sources. + streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer") + if not streammux: + sys.stderr.write(" Unable to create NvStreamMux \n") + + pipeline.add(streammux) + for i in range(number_sources): + print("Creating source_bin ", i, " \n ") + uri_name = input_sources[i] + if uri_name.find("rtsp://") == 0: + is_live = True + source_bin = create_source_bin(i, uri_name) + if not source_bin: + sys.stderr.write("Unable to create source bin \n") + pipeline.add(source_bin) + padname = "sink_%u" % i + sinkpad = streammux.get_request_pad(padname) + if not sinkpad: + sys.stderr.write("Unable to create sink pad bin \n") + srcpad = source_bin.get_static_pad("src") + if not srcpad: + sys.stderr.write("Unable to create src pad bin \n") + srcpad.link(sinkpad) + + queue1 = Gst.ElementFactory.make("queue", "queue1") + pipeline.add(queue1) + print("Creating Pgie \n ") + pgie = Gst.ElementFactory.make("nvinfer", "primary-inference") + if not pgie: + sys.stderr.write(" Unable to create pgie \n") + + print("Creating nvstreamdemux \n ") + nvstreamdemux = Gst.ElementFactory.make("nvstreamdemux", "nvstreamdemux") + if not nvstreamdemux: + sys.stderr.write(" Unable to create nvstreamdemux \n") + + if is_live: + print("Atleast one of the sources is live") + streammux.set_property("live-source", 1) + + streammux.set_property("width", 960) + streammux.set_property("height", 540) + streammux.set_property("batch-size", number_sources) + streammux.set_property("batched-push-timeout", 4000000) + pgie.set_property("config-file-path", "ds_demux_pgie_config.txt") + pgie_batch_size = pgie.get_property("batch-size") + if pgie_batch_size != number_sources: + print( + "WARNING: Overriding infer-config batch-size", + pgie_batch_size, + " with number of sources ", + number_sources, + " \n", + ) + pgie.set_property("batch-size", number_sources) + + print("Adding elements to Pipeline \n") + pipeline.add(pgie) + pipeline.add(nvstreamdemux) + + # linking + streammux.link(queue1) + queue1.link(pgie) + pgie.link(nvstreamdemux) + ##creating demux src + + for i in range(number_sources): + # pipeline nvstreamdemux -> queue -> nvvidconv -> nvosd -> (if Jetson) nvegltransform -> nveglgl + # Creating EGLsink + print("Creating EGLSink \n") + sink = make_element("nveglglessink", i) + pipeline.add(sink) + + # creating queue + queue = make_element("queue", i) + pipeline.add(queue) + + # creating nvvidconv + nvvideoconvert = make_element("nvvideoconvert", i) + pipeline.add(nvvideoconvert) + + # creating nvosd + nvdsosd = make_element("nvdsosd", i) + pipeline.add(nvdsosd) + nvdsosd.set_property("process-mode", OSD_PROCESS_MODE) + nvdsosd.set_property("display-text", OSD_DISPLAY_TEXT) + + # connect nvstreamdemux -> queue + padname = "src_%u" % i + demuxsrcpad = nvstreamdemux.get_request_pad(padname) + if not demuxsrcpad: + sys.stderr.write("Unable to create demux src pad \n") + + queuesinkpad = queue.get_static_pad("sink") + if not queuesinkpad: + sys.stderr.write("Unable to create queue sink pad \n") + demuxsrcpad.link(queuesinkpad) + + if (is_aarch64()): + print("Creating transform \n ") + transform = make_element("nvegltransform", i) + pipeline.add(transform) + if not transform: + sys.stderr.write(" Unable to create transform \n") + + # connect queue -> nvvidconv -> nvosd -> nveglgl + queue.link(nvvideoconvert) + nvvideoconvert.link(nvdsosd) + if (is_aarch64()): + nvdsosd.link(transform) + transform.link(sink) + else: + nvdsosd.link(sink) + + sink.set_property("qos", 0) + + print("Linking elements in the Pipeline \n") + # create an event loop and feed gstreamer bus mesages to it + loop = GLib.MainLoop() + bus = pipeline.get_bus() + bus.add_signal_watch() + bus.connect("message", bus_call, loop) + pgie_src_pad = pgie.get_static_pad("src") + if not pgie_src_pad: + sys.stderr.write(" Unable to get src pad \n") + else: + pgie_src_pad.add_probe(Gst.PadProbeType.BUFFER, pgie_src_pad_buffer_probe, 0) + # perf callback function to print fps every 5 sec + GLib.timeout_add(5000, perf_data.perf_print_callback) + + # List the sources + print("Now playing...") + for i, source in enumerate(input_sources): + print(i, ": ", source) + + print("Starting pipeline \n") + # start play back and listed to events + pipeline.set_state(Gst.State.PLAYING) + + try: + loop.run() + except: + pass + # cleanup + print("Exiting app\n") + pipeline.set_state(Gst.State.NULL) + + +def parse_args(): + parser = argparse.ArgumentParser(prog="deepstream_demux_multi_in_multi_out.py", + description="deepstream-demux-multi-in-multi-out takes multiple URI streams as input" \ + "and uses `nvstreamdemux` to split batches and output separate buffer/streams") + parser.add_argument( + "-i", + "--input", + help="Path to input streams", + nargs="+", + metavar="URIs", + default=["a"], + required=True, + ) + + args = parser.parse_args() + stream_paths = args.input + return stream_paths + + +if __name__ == "__main__": + stream_paths = parse_args() + sys.exit(main(stream_paths)) diff --git a/apps/deepstream-demux-multi-in-multi-out/ds_demux_pgie_config.txt b/apps/deepstream-demux-multi-in-multi-out/ds_demux_pgie_config.txt new file mode 100644 index 0000000..48ea51a --- /dev/null +++ b/apps/deepstream-demux-multi-in-multi-out/ds_demux_pgie_config.txt @@ -0,0 +1,77 @@ +################################################################################ +# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + +# Following properties are mandatory when engine files are not specified: +# int8-calib-file(Only in INT8) +# Caffemodel mandatory properties: model-file, proto-file, output-blob-names +# UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names +# ONNX: onnx-file +# +# Mandatory properties for detectors: +# num-detected-classes +# +# Optional properties for detectors: +# cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0) +# custom-lib-path +# parse-bbox-func-name +# +# Mandatory properties for classifiers: +# classifier-threshold, is-classifier +# +# Optional properties for classifiers: +# classifier-async-mode(Secondary mode only, Default=false) +# +# Optional properties in secondary mode: +# operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes), +# input-object-min-width, input-object-min-height, input-object-max-width, +# input-object-max-height +# +# Following properties are always recommended: +# batch-size(Default=1) +# +# Other optional properties: +# net-scale-factor(Default=1), network-mode(Default=0 i.e FP32), +# model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path, +# mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary), +# custom-lib-path, network-mode(Default=0 i.e FP32) +# +# The values in the config file are overridden by values set through GObject +# properties. + +[property] +gpu-id=0 +net-scale-factor=0.0039215697906911373 +model-file=../../../../samples/models/Primary_Detector/resnet10.caffemodel +proto-file=../../../../samples/models/Primary_Detector/resnet10.prototxt +model-engine-file=../../../../samples/models/Primary_Detector/resnet10.caffemodel_b1_gpu0_int8.engine +labelfile-path=../../../../samples/models/Primary_Detector/labels.txt +int8-calib-file=../../../../samples/models/Primary_Detector/cal_trt.bin +force-implicit-batch-dim=1 +batch-size=1 +process-mode=1 +model-color-format=0 +network-mode=1 +num-detected-classes=4 +interval=0 +gie-unique-id=1 +output-blob-names=conv2d_bbox;conv2d_cov/Sigmoid +cluster-mode=2 + +[class-attrs-all] +pre-cluster-threshold=0.2 +topk=20 +nms-iou-threshold=0.5 diff --git a/apps/deepstream-imagedata-multistream-redaction/README b/apps/deepstream-imagedata-multistream-redaction/README index 08335a2..39181dd 100755 --- a/apps/deepstream-imagedata-multistream-redaction/README +++ b/apps/deepstream-imagedata-multistream-redaction/README @@ -16,7 +16,7 @@ ################################################################################ Prerequisites: -- DeepStreamSDK 6.1 +- DeepStreamSDK 6.1.1 - Python 3.8 - Gst-python - NumPy package diff --git a/apps/deepstream-imagedata-multistream/README b/apps/deepstream-imagedata-multistream/README index 8c07500..1dcc08f 100755 --- a/apps/deepstream-imagedata-multistream/README +++ b/apps/deepstream-imagedata-multistream/README @@ -16,7 +16,7 @@ ################################################################################ Prerequisites: -- DeepStreamSDK 6.1 +- DeepStreamSDK 6.1.1 - Python 3.8 - Gst-python - NumPy package diff --git a/apps/deepstream-nvdsanalytics/README b/apps/deepstream-nvdsanalytics/README index fc83057..90093ea 100755 --- a/apps/deepstream-nvdsanalytics/README +++ b/apps/deepstream-nvdsanalytics/README @@ -16,7 +16,7 @@ ################################################################################ Prerequisites: -- DeepStreamSDK 6.1 +- DeepStreamSDK 6.1.1 - Python 3.8 - Gst-python diff --git a/apps/deepstream-opticalflow/README b/apps/deepstream-opticalflow/README index 32dbb8c..ab9013e 100755 --- a/apps/deepstream-opticalflow/README +++ b/apps/deepstream-opticalflow/README @@ -16,7 +16,7 @@ ################################################################################ Prerequisites: -- DeepStreamSDK 6.1 +- DeepStreamSDK 6.1.1 - Python 3.8 - Gst-python - NumPy package diff --git a/apps/deepstream-preprocess-test/README b/apps/deepstream-preprocess-test/README index 425a372..e2006bd 100644 --- a/apps/deepstream-preprocess-test/README +++ b/apps/deepstream-preprocess-test/README @@ -16,7 +16,7 @@ ################################################################################ Prerequisites: -- DeepStreamSDK 6.1 +- DeepStreamSDK 6.1.1 - Python 3.8 - Gst-python - GstRtspServer diff --git a/apps/deepstream-rtsp-in-rtsp-out/README b/apps/deepstream-rtsp-in-rtsp-out/README index 376bf34..390768b 100755 --- a/apps/deepstream-rtsp-in-rtsp-out/README +++ b/apps/deepstream-rtsp-in-rtsp-out/README @@ -16,7 +16,7 @@ ################################################################################ Prequisites: -- DeepStreamSDK 6.1 +- DeepStreamSDK 6.1.1 - Python 3.8 - Gst-python - GstRtspServer diff --git a/apps/deepstream-segmentation/README b/apps/deepstream-segmentation/README index a3068b9..701a079 100644 --- a/apps/deepstream-segmentation/README +++ b/apps/deepstream-segmentation/README @@ -16,7 +16,7 @@ ################################################################################ Prerequisites: -- DeepStreamSDK 6.1 +- DeepStreamSDK 6.1.1 - Python 3.8 - Gst-python - NumPy package diff --git a/apps/deepstream-ssd-parser/README b/apps/deepstream-ssd-parser/README index a05631b..812b0f1 100644 --- a/apps/deepstream-ssd-parser/README +++ b/apps/deepstream-ssd-parser/README @@ -16,7 +16,7 @@ ################################################################################ Prequisites: -- DeepStreamSDK 6.1 +- DeepStreamSDK 6.1.1 - NVIDIA Triton Inference Server - Python 3.8 - Gst-python diff --git a/apps/deepstream-ssd-parser/ssd_parser.py b/apps/deepstream-ssd-parser/ssd_parser.py index e3b9dcf..5fb8702 100644 --- a/apps/deepstream-ssd-parser/ssd_parser.py +++ b/apps/deepstream-ssd-parser/ssd_parser.py @@ -146,7 +146,7 @@ def nvds_infer_parse_custom_tf_ssd(output_layer_info, detection_param, box_size_ if num_detection_layer.buffer: num_detection = int(pyds.get_detections(num_detection_layer.buffer, 0)) - num_detection = clip(num_detection, 0, class_layer.dims.d[0]) + num_detection = clip(num_detection, 0, class_layer.inferDims.d[0]) x3_layers = score_layer, class_layer, box_layer object_list = [] diff --git a/apps/deepstream-test1-rtsp-out/README b/apps/deepstream-test1-rtsp-out/README index 37fa464..445fe71 100644 --- a/apps/deepstream-test1-rtsp-out/README +++ b/apps/deepstream-test1-rtsp-out/README @@ -16,7 +16,7 @@ ################################################################################ Prequisites: -- DeepStreamSDK 6.1 +- DeepStreamSDK 6.1.1 - Python 3.8 - Gst-python - GstRtspServer diff --git a/apps/deepstream-test1-usbcam/README b/apps/deepstream-test1-usbcam/README index 01aa793..b72f1ea 100644 --- a/apps/deepstream-test1-usbcam/README +++ b/apps/deepstream-test1-usbcam/README @@ -16,7 +16,7 @@ ################################################################################ Prequisites: -- DeepStreamSDK 6.1 +- DeepStreamSDK 6.1.1 - Python 3.8 - Gst-python diff --git a/apps/deepstream-test1/README b/apps/deepstream-test1/README index f6bf17b..e4893f6 100644 --- a/apps/deepstream-test1/README +++ b/apps/deepstream-test1/README @@ -16,7 +16,7 @@ ################################################################################ Prequisites: -- DeepStreamSDK 6.1 +- DeepStreamSDK 6.1.1 - Python 3.8 - Gst-python diff --git a/apps/deepstream-test2/README b/apps/deepstream-test2/README index 8055b7e..5a3780a 100644 --- a/apps/deepstream-test2/README +++ b/apps/deepstream-test2/README @@ -16,7 +16,7 @@ ################################################################################ Prequisites: -- DeepStreamSDK 6.1 +- DeepStreamSDK 6.1.1 - Python 3.8 - Gst-python diff --git a/apps/deepstream-test3/README b/apps/deepstream-test3/README index 14ac8c4..9a55dd6 100755 --- a/apps/deepstream-test3/README +++ b/apps/deepstream-test3/README @@ -16,7 +16,7 @@ ################################################################################ Prerequisites: -- DeepStreamSDK 6.1 +- DeepStreamSDK 6.1.1 - NVIDIA Triton Inference Server (optional) - Python 3.8 - Gst-python @@ -62,7 +62,7 @@ Also follow these instructions for multi-stream Triton support (optional): " enable=1 plugin-type=0 - model-engine-file=../../models/tao_pretrained_models/peopleNet/V2.5/<.engine file> + model-engine-file=../../models/tao_pretrained_models/peopleNet/V2.6/<.engine file> batch-size= config-file=config_infer_primary_peoplenet.txt " @@ -70,10 +70,10 @@ Also follow these instructions for multi-stream Triton support (optional): For ex. " tlt-model-key=tlt_encode - tlt-encoded-model=../../models/tao_pretrained_models/peopleNet/V2.5/resnet34_peoplenet_int8.etlt - labelfile-path=../../models/tao_pretrained_models/peopleNet/V2.5/labels.txt - model-engine-file=../../models/tao_pretrained_models/peopleNet/V2.5/<.engine file> - int8-calib-file=../../models/tao_pretrained_models/peopleNet/V2.5/resnet34_peoplenet_int8.txt + tlt-encoded-model=../../models/tao_pretrained_models/peopleNet/V2.6/resnet34_peoplenet_int8.etlt + labelfile-path=/opt/nvidia/deepstream/deepstream/samples/configs/tao_pretrained_models/labels_peoplenet.txt + model-engine-file=../../models/tao_pretrained_models/peopleNet/V2.6/<.engine file> + int8-calib-file=../../models/tao_pretrained_models/peopleNet/V2.6/resnet34_peoplenet_int8.txt batch-size=16 " d. While inside the dir /opt/nvidia/deepstream/deepstream/samples/configs/tao_pretrained_models/ , run the deepstream-app @@ -85,7 +85,7 @@ Also follow these instructions for multi-stream Triton support (optional): e. Create the following dir if not present: sudo mkdir -p /opt/nvidia/deepstream/deepstream/samples/triton_model_repo/peoplenet/1/ - f. Copy engine file from dir /opt/nvidia/deepstream/deepstream/samples/models/tao_pretrained_models/peopleNet/V2.5/ + f. Copy engine file from dir /opt/nvidia/deepstream/deepstream/samples/models/tao_pretrained_models/peopleNet/V2.6/ to /opt/nvidia/deepstream/deepstream/samples/triton_model_repo/peoplenet/1/ diff --git a/apps/deepstream-test3/config_infer_primary_peoplenet.txt b/apps/deepstream-test3/config_infer_primary_peoplenet.txt index 67bc007..2d7da9f 100644 --- a/apps/deepstream-test3/config_infer_primary_peoplenet.txt +++ b/apps/deepstream-test3/config_infer_primary_peoplenet.txt @@ -19,10 +19,10 @@ gpu-id=0 net-scale-factor=0.0039215697906911373 tlt-model-key=tlt_encode -tlt-encoded-model=../../../../samples/models/tao_pretrained_models/peopleNet/V2.5/resnet34_peoplenet_int8.etlt -labelfile-path=../../../../samples/models/tao_pretrained_models/peopleNet/V2.5/labels.txt -model-engine-file=../../../../samples/models/tao_pretrained_models/peopleNet/V2.5/resnet34_peoplenet_int8.etlt_b1_gpu0_int8.engine -int8-calib-file=../../../../samples/models/tao_pretrained_models/peopleNet/V2.5/resnet34_peoplenet_int8.txt +tlt-encoded-model=../../../../samples/models/tao_pretrained_models/peopleNet/V2.6/resnet34_peoplenet_int8.etlt +labelfile-path=../../../../samples/configs/tao_pretrained_models/labels_peoplenet.txt +model-engine-file=../../../../samples/models/tao_pretrained_models/peopleNet/V2.6/resnet34_peoplenet_int8.etlt_b1_gpu0_int8.engine +int8-calib-file=../../../../samples/models/tao_pretrained_models/peopleNet/V2.6/resnet34_peoplenet_int8.txt input-dims=3;544;960;0 uff-input-blob-name=input_1 batch-size=1 diff --git a/apps/deepstream-test3/config_triton_grpc_infer_primary_peoplenet.txt b/apps/deepstream-test3/config_triton_grpc_infer_primary_peoplenet.txt index 1eeb183..bd0fa06 100644 --- a/apps/deepstream-test3/config_triton_grpc_infer_primary_peoplenet.txt +++ b/apps/deepstream-test3/config_triton_grpc_infer_primary_peoplenet.txt @@ -43,7 +43,7 @@ infer_config { } postprocess { - labelfile_path: "/opt/nvidia/deepstream/deepstream/samples/models/tao_pretrained_models/peopleNet/V2.5/labels.txt" + labelfile_path: "/opt/nvidia/deepstream/deepstream/samples/configs/tao_pretrained_models/labels_peoplenet.txt" detection { num_detected_classes: 3 per_class_params { diff --git a/apps/deepstream-test3/config_triton_infer_primary_peoplenet.txt b/apps/deepstream-test3/config_triton_infer_primary_peoplenet.txt index 5cb2a3f..f3410f5 100644 --- a/apps/deepstream-test3/config_triton_infer_primary_peoplenet.txt +++ b/apps/deepstream-test3/config_triton_infer_primary_peoplenet.txt @@ -44,7 +44,7 @@ infer_config { } postprocess { - labelfile_path: "/opt/nvidia/deepstream/deepstream/samples/models/tao_pretrained_models/peopleNet/V2.5/labels.txt" + labelfile_path: "/opt/nvidia/deepstream/deepstream/samples/configs/tao_pretrained_models/labels_peoplenet.txt" detection { num_detected_classes: 3 per_class_params { diff --git a/apps/deepstream-test4/README b/apps/deepstream-test4/README index 76c3586..fdf91a5 100755 --- a/apps/deepstream-test4/README +++ b/apps/deepstream-test4/README @@ -16,7 +16,7 @@ ################################################################################ Prerequisites: -- DeepStreamSDK 6.1 +- DeepStreamSDK 6.1.1 - Python 3.8 - Gst-python diff --git a/apps/runtime_source_add_delete/README b/apps/runtime_source_add_delete/README index fd27327..e98bf82 100644 --- a/apps/runtime_source_add_delete/README +++ b/apps/runtime_source_add_delete/README @@ -16,7 +16,7 @@ ################################################################################ Prequisites: -- DeepStreamSDK 6.1 +- DeepStreamSDK 6.1.1 - Python 3.8 - Gst-python diff --git a/bindings/CMakeLists.txt b/bindings/CMakeLists.txt index ab366d5..be61a51 100644 --- a/bindings/CMakeLists.txt +++ b/bindings/CMakeLists.txt @@ -50,7 +50,7 @@ set(CMAKE_SHARED_LINKER_FLAGS "-Wl,--no-undefined") # Setting python build versions set(PYTHON_VERSION ${PYTHON_MAJOR_VERSION}.${PYTHON_MINOR_VERSION}) -set(PIP_WHEEL pyds-1.1.3-py3-none-${PIP_PLATFORM}.whl) +set(PIP_WHEEL pyds-1.1.4-py3-none-${PIP_PLATFORM}.whl) # Describing pyds build project(pyds DESCRIPTION "Python bindings for Deepstream") diff --git a/bindings/README.md b/bindings/README.md index 199e497..865fa79 100644 --- a/bindings/README.md +++ b/bindings/README.md @@ -1,6 +1,6 @@ # DeepStream python bindings -SDK version supported: 6.1 +SDK version supported: 6.1.1 The latest prebuilt release package complete with python bindings and sample applications can be downloaded from the [release section](../../../releases) for both x86 and Jetson platforms. @@ -16,14 +16,14 @@ The readme is divided into three main parts: - [1.4 Installing Gst-python](#14-installing-gst-python) - [2 - Compiling the bindings](#2-compiling-the-bindings) - [2.1 Quick build (x86-ubuntu-18.04 | python 3.6 | Deepstream 6.0.1)](#21-quick-build-x86-ubuntu-1804--python-36--deepstream-601) - - [2.1.1 Quick build (x86-ubuntu-20.04 | python 3.8 | Deepstream 6.1)](#211-quick-build-x86-ubuntu-2004--python-38--deepstream-61) + - [2.1.1 Quick build (x86-ubuntu-20.04 | python 3.8 | Deepstream 6.1.1)](#211-quick-build-x86-ubuntu-2004--python-38--deepstream-61) - [2.2 Advanced build](#22-advanced-build) - [2.2.1 Using Cmake options](#221-using-cmake-options) - [2.2.2 Available cmake options](#222-available-cmake-options) - [2.2.3 Example](#223-example) - [2.3 Cross-Compilation for aarch64 on x86](#23-cross-compilation-for-aarch64-on-x86) - [2.3.1 Build Pre-requisites](#231-build-pre-requisites) - - [2.3.2 Download the JetPack SDK 5.0.1 DP](#232-download-the-jetpack-sdk-501-dp) + - [2.3.2 Download the JetPack SDK 5.0.2](#232-download-the-jetpack-sdk-502) - [2.3.3 Generate the cross-compile build container](#233-generate-the-cross-compile-build-container) - [2.3.4 Launch the cross-compile build container](#234-launch-the-cross-compile-build-container) - [2.3.5 Build DeepStreamSDK python bindings](#235-build-deepstreamsdk-python-bindings) @@ -58,12 +58,12 @@ apt install python3-gi python3-dev python3-gst-1.0 python-gi-dev git python-dev ### 1.3 Initialization of submodules -Make sure you clone the deepstream_python_apps repo under /sources: +Make sure you clone the deepstream_python_apps repo under /sources: git clone https://github.com/NVIDIA-AI-IOT/deepstream_python_apps This will create the following directory: ``` -/sources/deepstream_python_apps +/sources/deepstream_python_apps ``` The repository utilizes gst-python and pybind11 submodules. @@ -104,7 +104,7 @@ cmake .. -DPYTHON_MAJOR_VERSION=3 -DPYTHON_MINOR_VERSION=6 make ``` -### 2.1.1 Quick build (x86-ubuntu-20.04 | python 3.8 | Deepstream 6.1) +### 2.1.1 Quick build (x86-ubuntu-20.04 | python 3.8 | Deepstream 6.1.1) ```bash cd deepstream_python_apps/bindings mkdir build @@ -164,17 +164,17 @@ sudo apt-get install qemu binfmt-support qemu-user-static docker run --rm --privileged dockerhub.nvidia.com/multiarch/qemu-user-static --reset -p yes # Verify qemu installation -docker run --rm -t nvcr.io/nvidia/deepstream-l4t:6.1-samples uname -m +docker run --rm -t nvcr.io/nvidia/deepstream-l4t:6.1.1-samples uname -m #aarch64 ``` -#### 2.3.2 Download the JetPack SDK 5.0.1 DP +#### 2.3.2 Download the JetPack SDK 5.0.2 Cross-compilation for Jetson on x86 host requires some low level libraries which can be downloaded using SDK Manager. Follow these steps to obtain these libraries, which are utilized by the docker build later. 1. Download and install the [NVIDIA SDK manager](https://developer.nvidia.com/nvidia-sdk-manager) 2. Launch the SDK Manager and login with your NVIDIA developer account. -3. Select the platform and target OS (example: Jetson AGX Xavier, `Linux Jetpack 5.0.1 DP`) and click Continue. +3. Select the platform and target OS (example: Jetson AGX Xavier, `Linux Jetpack 5.0.2`) and click Continue. 4. Under `Download & Install Options` change the download folder and select `Download now, Install later`. Agree to the license terms and click Continue. 5. Go to the download folder, and run: @@ -195,7 +195,7 @@ Below command generates the build container cd deepstream_python_apps/bindings # Make sure you are in deepstream_python_apps/bindings directory # This command builds the cross-compile docker and adds the mentioned tag -docker build --tag=deepstream-6.1-ubuntu20.04-python-l4t -f qemu_docker/ubuntu-cross-aarch64.Dockerfile . +docker build --tag=deepstream-6.1.1-ubuntu20.04-python-l4t -f qemu_docker/ubuntu-cross-aarch64.Dockerfile . ``` #### 2.3.4 Launch the cross-compile build container @@ -205,7 +205,7 @@ docker build --tag=deepstream-6.1-ubuntu20.04-python-l4t -f qemu_docker/ubuntu-c mkdir export_pyds # Run the container. Make sure the tag matches the one from Generate step above -docker run -it -v $PWD/export_pyds:/export_pyds deepstream-6.1-ubuntu20.04-python-l4t bash +docker run -it -v $PWD/export_pyds:/export_pyds deepstream-6.1.1-ubuntu20.04-python-l4t bash ``` #### 2.3.5 Build DeepStreamSDK python bindings @@ -246,7 +246,7 @@ Following commands can be used to install the generated pip wheel. ### 3.1 Installing the pip wheel ```bash -pip3 install ./pyds-1.1.3-py3-none*.whl +pip3 install ./pyds-1.1.4-py3-none*.whl ``` #### 3.1.1 pip wheel troubleshooting diff --git a/bindings/docstrings/pydocumentation.h b/bindings/docstrings/pydocumentation.h index c0c069e..2343071 100644 --- a/bindings/docstrings/pydocumentation.h +++ b/bindings/docstrings/pydocumentation.h @@ -1376,7 +1376,8 @@ namespace pydsdoc Holds information about one layer in the model. :ivar dataType: :class:`NvDsInferDataType`, Data type of the layer. - :ivar dims: :class:`NvDsInferDims`, Dimensions of the layer. + :ivar dims: :class:`NvDsInferDims`, Dimensions of the layer. WARNING: to be deprecated, please change all usage to "inferDims". + :ivar inferDims: :class:`NvDsInferDims`, Dimensions of the layer. :ivar bindingIndex: *int*, TensorRT binding index of the layer. :ivar layerName: *str*, Name of the layer. :ivar buffer: Buffer for the layer data. @@ -1417,7 +1418,8 @@ namespace pydsdoc constexpr const char* descr = R"pyds( Holds information about one classified attribute. - :ivar atttributeIndex: *int*, Index of the label. This index corresponds to the order of output layers specified in the outputCoverageLayerNames vector during initialization. + :ivar atttributeIndex: *int*, Index of the label. This index corresponds to the order of output layers specified in the outputCoverageLayerNames vector during initialization. WARNING: misspelling to be deprecated, please change all usage to "attributeIndex". + :ivar attributeIndex: *int*, Index of the label. This index corresponds to the order of output layers specified in the outputCoverageLayerNames vector during initialization. :ivar attributeValue: *int*, Output for the label. :ivar attributeConfidence: *float*, Confidence level for the classified attribute. :ivar attributeLabel: *str*, String label for the attribute. Memory for the string should not be freed.)pyds"; @@ -2058,13 +2060,16 @@ namespace pydsdoc :returns: 0 for success, -1 for failure.)pyds"; - constexpr const char* gst_nvevent_new_stream_reset=R"pyds( - Creates a "custom reset" event for the specified source. + constexpr const char* gst_element_send_nvevent_new_stream_reset=R"pyds( + Sends a "custom reset" event on the given element for the specified source. + This nvevent_new_stream_reset event is propogated downstream. - This function can be used to reset the source in case RTSP reconnection - is required. + This function, along with other reset events, can be used to reset the source + in case RTSP reconnection is required. - :arg source_id: source id for which this event needs to be generated.)pyds"; + :arg gst_element: element for to which the generated event needs to be sent. + :arg source_id: source id for which this event needs to be generated + :returns: True for success.)pyds"; } namespace nvoptical diff --git a/bindings/packaging/setup.py b/bindings/packaging/setup.py index 512dfe9..df48a50 100644 --- a/bindings/packaging/setup.py +++ b/bindings/packaging/setup.py @@ -17,7 +17,7 @@ setuptools.setup( name="pyds", - version="1.1.3", + version="1.1.4", author="NVIDIA", description="Install precompiled DeepStream Python bindings extension", url="nvidia.com", diff --git a/bindings/qemu_docker/ubuntu-cross-aarch64.Dockerfile b/bindings/qemu_docker/ubuntu-cross-aarch64.Dockerfile index 1b19839..edfa4c5 100644 --- a/bindings/qemu_docker/ubuntu-cross-aarch64.Dockerfile +++ b/bindings/qemu_docker/ubuntu-cross-aarch64.Dockerfile @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM nvcr.io/nvidia/deepstream-l4t:6.1-samples +FROM nvcr.io/nvidia/deepstream-l4t:6.1.1-samples LABEL maintainer="NVIDIA CORPORATION" # Set timezone. @@ -74,7 +74,7 @@ COPY docker/jetpack_files/Jetson*Linux_R*aarch64.tbz2 /bsp_files/ # Copy libs from BSP RUN cd /bsp_files \ - && tar -jxpf Jetson*Linux_R*aarch64.tbz2 \ + && tar -jxpf Jetson*Linux_R35*aarch64.tbz2 \ && cd Linux_for_Tegra/nv_tegra \ && tar -jxpf nvidia_drivers.tbz2 \ && cp -aprf usr/lib/aarch64-linux-gnu/tegra/libnvbuf*.so.1.0.0 /opt/nvidia/deepstream/deepstream/lib/ \ diff --git a/bindings/src/bindfunctions.cpp b/bindings/src/bindfunctions.cpp index 0d0e2b8..da8832e 100644 --- a/bindings/src/bindfunctions.cpp +++ b/bindings/src/bindfunctions.cpp @@ -752,10 +752,18 @@ namespace pydeepstream { pydsdoc::methodsDoc::get_segmentation_masks); /* Start binding for /sources/includes/gst-nvevent.h */ - m.def("gst_nvevent_new_stream_reset", - [](uint32_t source_id) { - return gst_nvevent_new_stream_reset(source_id); + /** + * Sends the custom nvevent_new_stream_reset + * for the element it is called upon. This event + * is propogated downstream. The function returns + * True if the event was handled. + */ + m.def("gst_element_send_nvevent_new_stream_reset", + [](size_t gst_element, int source_id) { + auto *element = reinterpret_cast(gst_element); + return gst_element_send_event(element, gst_nvevent_new_stream_reset(source_id)); }, - pydsdoc::methodsDoc::gst_nvevent_new_stream_reset); + pydsdoc::methodsDoc::gst_element_send_nvevent_new_stream_reset); + } } diff --git a/bindings/src/bindnvdsinfer.cpp b/bindings/src/bindnvdsinfer.cpp index 23ae69d..17c3330 100644 --- a/bindings/src/bindnvdsinfer.cpp +++ b/bindings/src/bindnvdsinfer.cpp @@ -99,6 +99,7 @@ namespace pydeepstream { .def(py::init<>()) .def_readonly("dataType", &NvDsInferLayerInfo::dataType) .def_readonly("dims", &NvDsInferLayerInfo::inferDims) + .def_readonly("inferDims", &NvDsInferLayerInfo::inferDims) .def_readonly("bindingIndex", &NvDsInferLayerInfo::bindingIndex) .def_readonly("layerName", &NvDsInferLayerInfo::layerName) .def_readonly("buffer", &NvDsInferLayerInfo::buffer) @@ -171,6 +172,8 @@ namespace pydeepstream { py::class_(m, "NvDsInferAttribute", pydsdoc::NvInferDoc::NvDsInferAttributeDoc::descr) .def(py::init<>()) + .def_readonly("attributeIndex", + &NvDsInferAttribute::attributeIndex) .def_readonly("atttributeIndex", &NvDsInferAttribute::attributeIndex) .def_readonly("attributeValue", diff --git a/bindings/src/pyds.cpp b/bindings/src/pyds.cpp index db662c3..2fcbab2 100644 --- a/bindings/src/pyds.cpp +++ b/bindings/src/pyds.cpp @@ -34,7 +34,7 @@ #include */ -#define PYDS_VERSION "1.1.3" +#define PYDS_VERSION "1.1.4" using namespace std; namespace py = pybind11; diff --git a/docs/PYTHON_API/Methods/methodsdoc.rst b/docs/PYTHON_API/Methods/methodsdoc.rst index 1c2cc02..b3a5564 100644 --- a/docs/PYTHON_API/Methods/methodsdoc.rst +++ b/docs/PYTHON_API/Methods/methodsdoc.rst @@ -1,8 +1,8 @@ ====================== -gst_nvevent_new_stream_reset +gst_element_send_nvevent_new_stream_reset ====================== -.. autofunction:: pyds.gst_nvevent_new_stream_reset +.. autofunction:: pyds.gst_element_send_nvevent_new_stream_reset ====================== get_segmentation_masks diff --git a/docs/conf.py b/docs/conf.py index 9208c40..344dee8 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -38,7 +38,7 @@ project = 'Deepstream' copyright = '2019-2022, NVIDIA.' author = 'NVIDIA' -version = 'Deepstream Version: 6.1' +version = 'Deepstream Version: 6.1.1' release = version diff --git a/notebooks/deepstream_test_4.ipynb b/notebooks/deepstream_test_4.ipynb index ff652f2..43663d6 100644 --- a/notebooks/deepstream_test_4.ipynb +++ b/notebooks/deepstream_test_4.ipynb @@ -36,51 +36,76 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Installing necessary libraries (kafka)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!sudo apt-get install libglib2.0 libglib2.0-dev\n", - "!sudo apt-get install libjansson4 libjansson-dev\n", - "!sudo apt-get install librdkafka1=0.11.3-1build1" + "# Prerequisites" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "# Preparing the necessary arguments \n", + " * **DeepStream SDK 6.1.1**\n", "\n", - "cfg_file = Path to adaptor config file\n", + "To setup and install DeepStream 6.1.1, please follow the steps at https://developer.nvidia.com/deepstream-getting-started\n", + " \n", + " * **DeepStream Python Apps**\n", "\n", - "input_file = Path to input x264 stream\n", + "To install DeepStream Python Apps, follow the instructions in this repo: https://github.com/NVIDIA-AI-IOT/deepstream_python_apps\n", "\n", - "proto_lib = Absolute path to adaptor library\n", + " * [**Jupyter Notebook**](https://jupyter.org/install)\n", "\n", - "conn_str = Connection string of backend server. Optional if it is part of config file.\n", + "To install jupyter notebook, run the following command:\n", "\n", - "topic = Name of message topic. Optional if it is part of connection string or config file.\n", + "```\n", + "pip3 install notebook\n", + "``` \n", + "
\n", + "* **Build dependencies with installation instructions:**\n", "\n", - "no_display = To disable display. Default False\n", - "schema_type = Type of minimal schema. 0= Full. 1= Minimal. Default =0" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "cfg_file = \"cfg_kafka.txt\"\n", - "input_file = \"\"\n", - "proto_lib = \"/home/nvidia/Downloads/deepstream/sources/libs/kafka_protocol_adaptor/\"\n", - "conn_str = \"host;port;topic\"\n", - "topic = \"\"\n", - "no_display = False\n", - "schema_type = 0 " + "    __librdkafka__\n", + "\n", + " Note that for using TLS/SSL security, make sure to build librdkafka with\n", + " SSL suport enabled by using the enable_ssl option while running\n", + " 'configure', as shown below. Also note that these are the same steps as setup in [Deepstream Quickstart Guide](https://docs.nvidia.com/metropolis/deepstream/dev-guide/text/DS_Quickstart.html#id1). If already previously completed, please skip this step. \n", + "```\n", + " git clone https://github.com/edenhill/librdkafka.git\n", + " cd librdkafka\n", + " git reset --hard 063a9ae7a65cebdf1cc128da9815c05f91a2a996\n", + " ./configure --enable-ssl\n", + " make\n", + " sudo make install\n", + " sudo cp /usr/local/lib/librdkafka* /opt/nvidia/deepstream/deepstream/lib/\n", + " sudo ldconfig\n", + "```\n", + "NOTE: To compile the sources, run make with \"sudo\" or root permission.\n", + "\n", + "    __glib 2.0__\n", + " \n", + "```\n", + " apt-get install libglib2.0 libglib2.0-dev \n", + "```\n", + "
\n", + " \n", + "    __jansson__\n", + " \n", + "```\n", + " apt-get install libjansson4 libjansson-dev\n", + "``` \n", + "
\n", + " \n", + "    __ssl__\n", + " \n", + "```\n", + " apt-get install libssl-dev\n", + " ```\n", + "
\n", + "\n", + "* (Optional): Install & setup kafka broker on your machine & create topic(s). See instructions here: https://kafka.apache.org/quickstart (These are the same steps as deepstream-test4)\n", + "\n", + " 1. Install kafka server\n", + " 2. In a terminal tab, navigate to where the kafka server is installed. Run\n", + "```bin/zookeeper-server-start.sh config/zookeeper.properties```\n", + " 3. Open another tab in terminal. Run ``bin/kafka-server-start.sh config/server.properties`` from the same directory where kafka is installed.\n", + " 4. Open another tab in terminal. Run ``bin/kafka-topics.sh --create --topic topic --bootstrap-server localhost:9092``. You may need to delete an old topic (``./bin/kafka-topics.sh --delete --topic topic --bootstrap-server localhost:9092``) and recreate if the topic is already created. Again, run in same directory where kafka is installed." ] }, { @@ -97,14 +122,14 @@ "outputs": [], "source": [ "import sys\n", + "\n", "sys.path.append('../')\n", - "sys.path.append('/usr/lib/x86_64-linux-gnu/gstreamer-1.0/deepstream/')\n", + "sys.path.append('../apps/')\n", "import gi\n", + "\n", "gi.require_version('Gst', '1.0')\n", - "from gi.repository import GObject, Gst\n", - "from gi.repository import GLib\n", + "from gi.repository import GLib, Gst\n", "import sys\n", - "import platform\n", "from optparse import OptionParser\n", "from common.is_aarch_64 import is_aarch64\n", "from common.bus_call import bus_call\n", @@ -119,29 +144,62 @@ "# Declaring class label ids and other meta data requirements" ] }, + { + "cell_type": "raw", + "metadata": {}, + "source": [ + "## Preparing the necessary arguments \n", + "\n", + "cfg_file = Path to adaptor config file\n", + "\n", + "input_file = Path to input x264 stream\n", + "\n", + "proto_lib = Absolute path to kafka proto file\n", + "\n", + "conn_str = Connection string of backend server. Optional if it is part of config file.\n", + "\n", + "topic = Name of message topic. Optional if it is part of connection string or config file.\n", + "\n", + "no_display = To disable display. Default False\n", + "schema_type = Type of minimal schema. 0= Full. 1= Minimal. Default =0" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example: \n", + "cfg_file = \"../apps/deepstream-test4/cfg_kafka.txt\"\n", + "input_file = \"/opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.h264\"\n", + "proto_lib = \"/opt/nvidia/deepstream/deepstream/lib/libnvds_kafka_proto.so\"\n", + "conn_str = \"localhost;2181;testTopic\"\n", + "topic = \"topic\"\n", + "no_display = False\n", + "schema_type = 0 " + ] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "MAX_DISPLAY_LEN=64\n", - "MAX_TIME_STAMP_LEN=32\n", + "MAX_DISPLAY_LEN = 64\n", + "MAX_TIME_STAMP_LEN = 32\n", "PGIE_CLASS_ID_VEHICLE = 0\n", "PGIE_CLASS_ID_BICYCLE = 1\n", "PGIE_CLASS_ID_PERSON = 2\n", "PGIE_CLASS_ID_ROADSIGN = 3\n", - "MUXER_OUTPUT_WIDTH=1920\n", - "MUXER_OUTPUT_HEIGHT=1080\n", - "MUXER_BATCH_TIMEOUT_USEC=4000000\n", - "input_file = \"/home/nvidia/Downloads/deepstream/sources/python/apps/sample_720p.h264\"\n", - "schema_type = 0\n", - "frame_number = 0\n", - "proto_lib = \"/home/nvidia/Downloads/deepstream/sources/libs/kafka_protocol_adaptor/\"\n", - "conn_str=\"localhost;2181;testTopic\"\n", - "cfg_file = \"cfg_kafka.txt\"\n", - "topic = None\n", - "no_display = False" + "MUXER_OUTPUT_WIDTH = 1920\n", + "MUXER_OUTPUT_HEIGHT = 1080\n", + "MUXER_BATCH_TIMEOUT_USEC = 4000000\n", + "cfg_file = \"../apps/deepstream-test4/cfg_kafka.txt\"\n", + "input_file = \"/opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.h264\"\n", + "proto_lib = \"/opt/nvidia/deepstream/deepstream/lib/libnvds_kafka_proto.so\"\n", + "conn_str = \"localhost;2181;testTopic\" # make sure to change to your own connection string\n", + "topic = \"topic\"\n", + "no_display = False\n", + "schema_type = 0 " ] }, { @@ -157,11 +215,9 @@ "metadata": {}, "outputs": [], "source": [ - "PGIE_CONFIG_FILE=\"dstest4_pgie_config.txt\"\n", - "MSCONV_CONFIG_FILE=\"dstest4_msgconv_config.txt\"\n", - "\n", - "\n", - "pgie_classes_str=[\"Vehicle\", \"TwoWheeler\", \"Person\",\"Roadsign\"]" + "PGIE_CONFIG_FILE = \"../apps/deepstream-test4/dstest4_pgie_config.txt\"\n", + "MSCONV_CONFIG_FILE = \"../apps/deepstream-test4/dstest4_msgconv_config.txt\"\n", + "pgie_classes_str = [\"Vehicle\", \"TwoWheeler\", \"Person\", \"Roadsign\"]" ] }, { @@ -177,59 +233,60 @@ "metadata": {}, "outputs": [], "source": [ - "def meta_copy_func(data,user_data):\n", + "def meta_copy_func(data, user_data):\n", " # Cast data to pyds.NvDsUserMeta\n", - " user_meta=pyds.glist_get_nvds_user_meta(data)\n", - " src_meta_data=user_meta.user_meta_data\n", + " user_meta = pyds.NvDsUserMeta.cast(data)\n", + " src_meta_data = user_meta.user_meta_data\n", " # Cast src_meta_data to pyds.NvDsEventMsgMeta\n", - " srcmeta=pyds.glist_get_nvds_event_msg_meta(src_meta_data)\n", + " srcmeta = pyds.NvDsEventMsgMeta.cast(src_meta_data)\n", " # Duplicate the memory contents of srcmeta to dstmeta\n", " # First use pyds.get_ptr() to get the C address of srcmeta, then\n", " # use pyds.memdup() to allocate dstmeta and copy srcmeta into it.\n", " # pyds.memdup returns C address of the allocated duplicate.\n", - " dstmeta_ptr=pyds.memdup(pyds.get_ptr(srcmeta), sys.getsizeof(pyds.NvDsEventMsgMeta))\n", + " dstmeta_ptr = pyds.memdup(pyds.get_ptr(srcmeta),\n", + " sys.getsizeof(pyds.NvDsEventMsgMeta))\n", " # Cast the duplicated memory to pyds.NvDsEventMsgMeta\n", - " dstmeta=pyds.glist_get_nvds_event_msg_meta(dstmeta_ptr)\n", + " dstmeta = pyds.NvDsEventMsgMeta.cast(dstmeta_ptr)\n", "\n", " # Duplicate contents of ts field. Note that reading srcmeat.ts\n", " # returns its C address. This allows to memory operations to be\n", " # performed on it.\n", - " dstmeta.ts=pyds.memdup(srcmeta.ts, MAX_TIME_STAMP_LEN+1)\n", - "\n", - " # Copy the sensorStr. This field is a string property.\n", - " # The getter (read) returns its C address. The setter (write)\n", - " # takes string as input, allocates a string buffer and copies\n", - " # the input string into it.\n", - " # pyds.get_string() takes C address of a string and returns\n", - " # the reference to a string object and the assignment inside the binder copies content.\n", - " dstmeta.sensorStr=pyds.get_string(srcmeta.sensorStr)\n", - "\n", - " if(srcmeta.objSignature.size>0):\n", - " dstmeta.objSignature.signature=pyds.memdup(srcmeta.objSignature.signature,srcMeta.objSignature.size)\n", - " dstmeta.objSignature.size = srcmeta.objSignature.size;\n", - "\n", - " if(srcmeta.extMsgSize>0):\n", - " if(srcmeta.objType==pyds.NvDsObjectType.NVDS_OBJECT_TYPE_VEHICLE):\n", - " srcobj = pyds.glist_get_nvds_vehicle_object(srcmeta.extMsg);\n", - " obj = pyds.alloc_nvds_vehicle_object();\n", - " obj.type=pyds.get_string(srcobj.type)\n", - " obj.make=pyds.get_string(srcobj.make)\n", - " obj.model=pyds.get_string(srcobj.model)\n", - " obj.color=pyds.get_string(srcobj.color)\n", + " dstmeta.ts = pyds.memdup(srcmeta.ts, MAX_TIME_STAMP_LEN + 1)\n", + "\n", + " # Copy the sensorStr. This field is a string property. The getter (read)\n", + " # returns its C address. The setter (write) takes string as input,\n", + " # allocates a string buffer and copies the input string into it.\n", + " # pyds.get_string() takes C address of a string and returns the reference\n", + " # to a string object and the assignment inside the binder copies content.\n", + " dstmeta.sensorStr = pyds.get_string(srcmeta.sensorStr)\n", + "\n", + " if srcmeta.objSignature.size > 0:\n", + " dstmeta.objSignature.signature = pyds.memdup(\n", + " srcmeta.objSignature.signature, srcmeta.objSignature.size)\n", + " dstmeta.objSignature.size = srcmeta.objSignature.size\n", + "\n", + " if srcmeta.extMsgSize > 0:\n", + " if srcmeta.objType == pyds.NvDsObjectType.NVDS_OBJECT_TYPE_VEHICLE:\n", + " srcobj = pyds.NvDsVehicleObject.cast(srcmeta.extMsg)\n", + " obj = pyds.alloc_nvds_vehicle_object()\n", + " obj.type = pyds.get_string(srcobj.type)\n", + " obj.make = pyds.get_string(srcobj.make)\n", + " obj.model = pyds.get_string(srcobj.model)\n", + " obj.color = pyds.get_string(srcobj.color)\n", " obj.license = pyds.get_string(srcobj.license)\n", " obj.region = pyds.get_string(srcobj.region)\n", - " dstmeta.extMsg = obj;\n", + " dstmeta.extMsg = obj\n", " dstmeta.extMsgSize = sys.getsizeof(pyds.NvDsVehicleObject)\n", - " if(srcmeta.objType==pyds.NvDsObjectType.NVDS_OBJECT_TYPE_PERSON):\n", - " srcobj = pyds.glist_get_nvds_person_object(srcmeta.extMsg);\n", + " if srcmeta.objType == pyds.NvDsObjectType.NVDS_OBJECT_TYPE_PERSON:\n", + " srcobj = pyds.NvDsPersonObject.cast(srcmeta.extMsg)\n", " obj = pyds.alloc_nvds_person_object()\n", " obj.age = srcobj.age\n", - " obj.gender = pyds.get_string(srcobj.gender);\n", + " obj.gender = pyds.get_string(srcobj.gender)\n", " obj.cap = pyds.get_string(srcobj.cap)\n", " obj.hair = pyds.get_string(srcobj.hair)\n", - " obj.apparel = pyds.get_string(srcobj.apparel);\n", - " dstmeta.extMsg = obj;\n", - " dstmeta.extMsgSize = sys.getsizeof(pyds.NvDsVehicleObject);\n", + " obj.apparel = pyds.get_string(srcobj.apparel)\n", + " dstmeta.extMsg = obj\n", + " dstmeta.extMsgSize = sys.getsizeof(pyds.NvDsVehicleObject)\n", "\n", " return dstmeta" ] @@ -243,40 +300,40 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "def meta_free_func(data,user_data):\n", - " user_meta=pyds.glist_get_nvds_user_meta(data)\n", - " srcmeta=pyds.glist_get_nvds_event_msg_meta(user_meta.user_meta_data)\n", + "def meta_free_func(data, user_data):\n", + " user_meta = pyds.NvDsUserMeta.cast(data)\n", + " srcmeta = pyds.NvDsEventMsgMeta.cast(user_meta.user_meta_data)\n", "\n", " # pyds.free_buffer takes C address of a buffer and frees the memory\n", " # It's a NOP if the address is NULL\n", " pyds.free_buffer(srcmeta.ts)\n", " pyds.free_buffer(srcmeta.sensorStr)\n", "\n", - " if(srcmeta.objSignature.size > 0):\n", - " pyds.free_buffer(srcmeta.objSignature.signature);\n", + " if srcmeta.objSignature.size > 0:\n", + " pyds.free_buffer(srcmeta.objSignature.signature)\n", " srcmeta.objSignature.size = 0\n", "\n", - " if(srcmeta.extMsgSize > 0):\n", - " if(srcmeta.objType == pyds.NvDsObjectType.NVDS_OBJECT_TYPE_VEHICLE):\n", - " obj =pyds.glist_get_nvds_vehicle_object(srcmeta.extMsg)\n", - " pyds.free_buffer(obj.type);\n", - " pyds.free_buffer(obj.color);\n", - " pyds.free_buffer(obj.make);\n", - " pyds.free_buffer(obj.model);\n", - " pyds.free_buffer(obj.license);\n", - " pyds.free_buffer(obj.region);\n", - " if(srcmeta.objType == pyds.NvDsObjectType.NVDS_OBJECT_TYPE_PERSON):\n", - " obj = pyds.glist_get_nvds_person_object(srcmeta.extMsg);\n", - " pyds.free_buffer(obj.gender);\n", - " pyds.free_buffer(obj.cap);\n", - " pyds.free_buffer(obj.hair);\n", - " pyds.free_buffer(obj.apparel);\n", - " pyds.free_gbuffer(srcmeta.extMsg);\n", - " srcmeta.extMsgSize = 0;" + " if srcmeta.extMsgSize > 0:\n", + " if srcmeta.objType == pyds.NvDsObjectType.NVDS_OBJECT_TYPE_VEHICLE:\n", + " obj = pyds.NvDsVehicleObject.cast(srcmeta.extMsg)\n", + " pyds.free_buffer(obj.type)\n", + " pyds.free_buffer(obj.color)\n", + " pyds.free_buffer(obj.make)\n", + " pyds.free_buffer(obj.model)\n", + " pyds.free_buffer(obj.license)\n", + " pyds.free_buffer(obj.region)\n", + " if srcmeta.objType == pyds.NvDsObjectType.NVDS_OBJECT_TYPE_PERSON:\n", + " obj = pyds.NvDsPersonObject.cast(srcmeta.extMsg)\n", + " pyds.free_buffer(obj.gender)\n", + " pyds.free_buffer(obj.cap)\n", + " pyds.free_buffer(obj.hair)\n", + " pyds.free_buffer(obj.apparel)\n", + " pyds.free_gbuffer(srcmeta.extMsg)\n", + " srcmeta.extMsgSize = 0" ] }, { @@ -293,22 +350,23 @@ "outputs": [], "source": [ "def generate_vehicle_meta(data):\n", - " obj = pyds.glist_get_nvds_vehicle_object(data);\n", - " obj.type =\"sedan\"\n", - " obj.color=\"blue\"\n", - " obj.make =\"Bugatti\"\n", + " obj = pyds.NvDsVehicleObject.cast(data)\n", + " obj.type = \"sedan\"\n", + " obj.color = \"blue\"\n", + " obj.make = \"Bugatti\"\n", " obj.model = \"M\"\n", - " obj.license =\"XX1234\"\n", - " obj.region =\"CA\"\n", + " obj.license = \"XX1234\"\n", + " obj.region = \"CA\"\n", " return obj\n", "\n", + "\n", "def generate_person_meta(data):\n", - " obj = pyds.glist_get_nvds_person_object(data)\n", + " obj = pyds.NvDsPersonObject.cast(data)\n", " obj.age = 45\n", " obj.cap = \"none\"\n", " obj.hair = \"black\"\n", " obj.gender = \"male\"\n", - " obj.apparel= \"formal\"\n", + " obj.apparel = \"formal\"\n", " return obj" ] }, @@ -326,7 +384,7 @@ "outputs": [], "source": [ "def generate_event_msg_meta(data, class_id):\n", - " meta =pyds.glist_get_nvds_event_msg_meta(data)\n", + " meta = pyds.NvDsEventMsgMeta.cast(data)\n", " meta.sensorId = 0\n", " meta.placeId = 0\n", " meta.moduleId = 0\n", @@ -338,20 +396,20 @@ " # Any custom object as per requirement can be generated and attached\n", " # like NvDsVehicleObject / NvDsPersonObject. Then that object should\n", " # be handled in payload generator library (nvmsgconv.cpp) accordingly.\n", - " if(class_id==PGIE_CLASS_ID_VEHICLE):\n", + " if class_id == PGIE_CLASS_ID_VEHICLE:\n", " meta.type = pyds.NvDsEventType.NVDS_EVENT_MOVING\n", " meta.objType = pyds.NvDsObjectType.NVDS_OBJECT_TYPE_VEHICLE\n", " meta.objClassId = PGIE_CLASS_ID_VEHICLE\n", " obj = pyds.alloc_nvds_vehicle_object()\n", " obj = generate_vehicle_meta(obj)\n", " meta.extMsg = obj\n", - " meta.extMsgSize = sys.getsizeof(pyds.NvDsVehicleObject);\n", - " if(class_id == PGIE_CLASS_ID_PERSON):\n", - " meta.type =pyds.NvDsEventType.NVDS_EVENT_ENTRY\n", - " meta.objType = pyds.NvDsObjectType.NVDS_OBJECT_TYPE_PERSON;\n", + " meta.extMsgSize = sys.getsizeof(pyds.NvDsVehicleObject)\n", + " if class_id == PGIE_CLASS_ID_PERSON:\n", + " meta.type = pyds.NvDsEventType.NVDS_EVENT_ENTRY\n", + " meta.objType = pyds.NvDsObjectType.NVDS_OBJECT_TYPE_PERSON\n", " meta.objClassId = PGIE_CLASS_ID_PERSON\n", " obj = pyds.alloc_nvds_person_object()\n", - " obj=generate_person_meta(obj)\n", + " obj = generate_person_meta(obj)\n", " meta.extMsg = obj\n", " meta.extMsgSize = sys.getsizeof(pyds.NvDsPersonObject)\n", " return meta" @@ -384,16 +442,15 @@ "metadata": {}, "outputs": [], "source": [ - "def osd_sink_pad_buffer_probe(pad,info,u_data):\n", - " frame_number=0\n", - " #Intiallizing object counter with 0.\n", + "def osd_sink_pad_buffer_probe(pad, info, u_data):\n", + " frame_number = 0\n", + " # Intiallizing object counter with 0.\n", " obj_counter = {\n", - " PGIE_CLASS_ID_VEHICLE:0,\n", - " PGIE_CLASS_ID_PERSON:0,\n", - " PGIE_CLASS_ID_BICYCLE:0,\n", - " PGIE_CLASS_ID_ROADSIGN:0\n", + " PGIE_CLASS_ID_VEHICLE: 0,\n", + " PGIE_CLASS_ID_PERSON: 0,\n", + " PGIE_CLASS_ID_BICYCLE: 0,\n", + " PGIE_CLASS_ID_ROADSIGN: 0\n", " }\n", - " is_first_object=True\n", " gst_buffer = info.get_buffer()\n", " if not gst_buffer:\n", " print(\"Unable to get GstBuffer \")\n", @@ -409,36 +466,36 @@ " while l_frame is not None:\n", " try:\n", " # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta\n", - " # The casting is done by pyds.glist_get_nvds_frame_meta()\n", + " # The casting is done by pyds.NvDsFrameMeta.cast()\n", " # The casting also keeps ownership of the underlying memory\n", " # in the C code, so the Python garbage collector will leave\n", " # it alone.\n", - " frame_meta = pyds.glist_get_nvds_frame_meta(l_frame.data)\n", + " frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)\n", " except StopIteration:\n", " continue\n", - " is_first_object = True;\n", - "\n", - " '''\n", - " print(\"Frame Number is \", frame_meta.frame_num)\n", - " print(\"Source id is \", frame_meta.source_id)\n", - " print(\"Batch id is \", frame_meta.batch_id)\n", - " print(\"Source Frame Width \", frame_meta.source_frame_width)\n", - " print(\"Source Frame Height \", frame_meta.source_frame_height)\n", - " print(\"Num object meta \", frame_meta.num_obj_meta)\n", - " '''\n", - " frame_number=frame_meta.frame_num\n", - " l_obj=frame_meta.obj_meta_list\n", + " is_first_object = True\n", + "\n", + " # Short example of attribute access for frame_meta:\n", + " # print(\"Frame Number is \", frame_meta.frame_num)\n", + " # print(\"Source id is \", frame_meta.source_id)\n", + " # print(\"Batch id is \", frame_meta.batch_id)\n", + " # print(\"Source Frame Width \", frame_meta.source_frame_width)\n", + " # print(\"Source Frame Height \", frame_meta.source_frame_height)\n", + " # print(\"Num object meta \", frame_meta.num_obj_meta)\n", + "\n", + " frame_number = frame_meta.frame_num\n", + " l_obj = frame_meta.obj_meta_list\n", " while l_obj is not None:\n", " try:\n", - " obj_meta=pyds.glist_get_nvds_object_meta(l_obj.data)\n", + " obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)\n", " except StopIteration:\n", " continue\n", "\n", " # Update the object text display\n", - " txt_params=obj_meta.text_params\n", - " if(txt_params.display_text):\n", - " pyds.free_buffer(txt_params.display_text)\n", + " txt_params = obj_meta.text_params\n", "\n", + " # Set display_text. Any existing display_text string will be\n", + " # freed by the bindings module.\n", " txt_params.display_text = pgie_classes_str[obj_meta.class_id]\n", "\n", " obj_counter[obj_meta.class_id] += 1\n", @@ -447,57 +504,61 @@ " txt_params.font_params.font_name = \"Serif\"\n", " txt_params.font_params.font_size = 10\n", " # set(red, green, blue, alpha); set to White\n", - " txt_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0);\n", + " txt_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)\n", "\n", " # Text background color\n", " txt_params.set_bg_clr = 1\n", " # set(red, green, blue, alpha); set to Black\n", - " txt_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0);\n", + " txt_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)\n", "\n", " # Ideally NVDS_EVENT_MSG_META should be attached to buffer by the\n", " # component implementing detection / recognition logic.\n", " # Here it demonstrates how to use / attach that meta data.\n", - " if(is_first_object and not (frame_number%30)):\n", + " if is_first_object and (frame_number % 30) == 0:\n", " # Frequency of messages to be send will be based on use case.\n", " # Here message is being sent for first object every 30 frames.\n", "\n", - " # Allocating an NvDsEventMsgMeta instance and getting reference\n", - " # to it. The underlying memory is not manged by Python so that\n", - " # downstream plugins can access it. Otherwise the garbage collector\n", - " # will free it when this probe exits.\n", - " msg_meta=pyds.alloc_nvds_event_msg_meta()\n", - " msg_meta.bbox.top = obj_meta.rect_params.top\n", - " msg_meta.bbox.left = obj_meta.rect_params.left\n", + " # Allocating an NvDsEventMsgMeta instance and getting\n", + " # reference to it. The underlying memory is not manged by\n", + " # Python so that downstream plugins can access it. Otherwise\n", + " # the garbage collector will free it when this probe exits.\n", + " msg_meta = pyds.alloc_nvds_event_msg_meta()\n", + " msg_meta.bbox.top = obj_meta.rect_params.top\n", + " msg_meta.bbox.left = obj_meta.rect_params.left\n", " msg_meta.bbox.width = obj_meta.rect_params.width\n", " msg_meta.bbox.height = obj_meta.rect_params.height\n", " msg_meta.frameId = frame_number\n", " msg_meta.trackingId = long_to_uint64(obj_meta.object_id)\n", " msg_meta.confidence = obj_meta.confidence\n", " msg_meta = generate_event_msg_meta(msg_meta, obj_meta.class_id)\n", - " user_event_meta = pyds.nvds_acquire_user_meta_from_pool(batch_meta)\n", - " if(user_event_meta):\n", - " user_event_meta.user_meta_data = msg_meta;\n", + " user_event_meta = pyds.nvds_acquire_user_meta_from_pool(\n", + " batch_meta)\n", + " if user_event_meta:\n", + " user_event_meta.user_meta_data = msg_meta\n", " user_event_meta.base_meta.meta_type = pyds.NvDsMetaType.NVDS_EVENT_MSG_META\n", - " # Setting callbacks in the event msg meta. The bindings layer\n", - " # will wrap these callables in C functions. Currently only one\n", - " # set of callbacks is supported.\n", - " pyds.set_user_copyfunc(user_event_meta, meta_copy_func)\n", - " pyds.set_user_releasefunc(user_event_meta, meta_free_func)\n", - " pyds.nvds_add_user_meta_to_frame(frame_meta, user_event_meta)\n", + " # Setting callbacks in the event msg meta. The bindings\n", + " # layer will wrap these callables in C functions.\n", + " # Currently only one set of callbacks is supported.\n", + " pyds.user_copyfunc(user_event_meta, meta_copy_func)\n", + " pyds.user_releasefunc(user_event_meta, meta_free_func)\n", + " pyds.nvds_add_user_meta_to_frame(frame_meta,\n", + " user_event_meta)\n", " else:\n", " print(\"Error in attaching event meta to buffer\\n\")\n", "\n", " is_first_object = False\n", " try:\n", - " l_obj=l_obj.next\n", + " l_obj = l_obj.next\n", " except StopIteration:\n", " break\n", " try:\n", - " l_frame=l_frame.next\n", + " l_frame = l_frame.next\n", " except StopIteration:\n", " break\n", "\n", - " print(\"Frame Number =\",frame_number,\"Vehicle Count =\",obj_counter[PGIE_CLASS_ID_VEHICLE],\"Person Count =\",obj_counter[PGIE_CLASS_ID_PERSON])\n", + " print(\"Frame Number =\", frame_number, \"Vehicle Count =\",\n", + " obj_counter[PGIE_CLASS_ID_VEHICLE], \"Person Count =\",\n", + " obj_counter[PGIE_CLASS_ID_PERSON])\n", " return Gst.PadProbeReturn.OK" ] }, @@ -514,9 +575,12 @@ "metadata": {}, "outputs": [], "source": [ - "GObject.threads_init()\n", "Gst.init(None)\n", "\n", + "# registering callbacks\n", + "pyds.register_user_copyfunc(meta_copy_func)\n", + "pyds.register_user_releasefunc(meta_free_func)\n", + "\n", "print(\"Creating Pipeline \\n \")\n", "\n", "pipeline = Gst.Pipeline()\n", @@ -787,18 +851,19 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"Playing file %s \" %input_file)\n", + "print(\"Playing file %s \" % input_file)\n", "source.set_property('location', input_file)\n", "streammux.set_property('width', 1920)\n", "streammux.set_property('height', 1080)\n", "streammux.set_property('batch-size', 1)\n", "streammux.set_property('batched-push-timeout', 4000000)\n", "pgie.set_property('config-file-path', PGIE_CONFIG_FILE)\n", - "msgconv.set_property('config',MSCONV_CONFIG_FILE)\n", + "msgconv.set_property('config', MSCONV_CONFIG_FILE)\n", "msgconv.set_property('payload-type', schema_type)\n", "msgbroker.set_property('proto-lib', proto_lib)\n", "msgbroker.set_property('conn-str', conn_str)\n", - "msgbroker.set_property('config', cfg_file)\n", + "if cfg_file is not None:\n", + " msgbroker.set_property('config', cfg_file)\n", "if topic is not None:\n", " msgbroker.set_property('topic', topic)\n", "msgbroker.set_property('sync', False)" @@ -871,13 +936,13 @@ " transform.link(sink)\n", "else:\n", " queue2.link(sink)\n", - "sink_pad=queue1.get_static_pad(\"sink\")\n", - "tee_msg_pad=tee.get_request_pad('src_%u')\n", - "tee_render_pad=tee.get_request_pad(\"src_%u\")\n", + "sink_pad = queue1.get_static_pad(\"sink\")\n", + "tee_msg_pad = tee.get_request_pad('src_%u')\n", + "tee_render_pad = tee.get_request_pad(\"src_%u\")\n", "if not tee_msg_pad or not tee_render_pad:\n", " sys.stderr.write(\"Unable to get request pads\\n\")\n", "tee_msg_pad.link(sink_pad)\n", - "sink_pad=queue2.get_static_pad(\"sink\")\n", + "sink_pad = queue2.get_static_pad(\"sink\")\n", "tee_render_pad.link(sink_pad)" ] }, @@ -894,15 +959,15 @@ "metadata": {}, "outputs": [], "source": [ - "loop = GObject.MainLoop()\n", + " loop = GLib.MainLoop()\n", "bus = pipeline.get_bus()\n", "bus.add_signal_watch()\n", - "bus.connect (\"message\", bus_call, loop)\n", + "bus.connect(\"message\", bus_call, loop)\n", "\n", "osdsinkpad = nvosd.get_static_pad(\"sink\")\n", "if not osdsinkpad:\n", " sys.stderr.write(\" Unable to get sink pad of nvosd \\n\")\n", - "\n", + " \n", "osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)" ] }, @@ -916,7 +981,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "scrolled": false + }, "outputs": [], "source": [ "print(\"Starting pipeline \\n\")\n", @@ -945,18 +1012,11 @@ "pyds.unset_callback_funcs()\n", "pipeline.set_state(Gst.State.NULL)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -970,7 +1030,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.9" + "version": "3.8.10" } }, "nbformat": 4, diff --git a/tests/integration/README.md b/tests/integration/README.md index 3147b6f..93afd04 100644 --- a/tests/integration/README.md +++ b/tests/integration/README.md @@ -55,7 +55,7 @@ python3.8 -m venv env ### step3 ``` . env/bin/activate -pip install pyds-1.1.3-py3-none-*.whl +pip install pyds-1.1.4-py3-none-*.whl pip install pytest cd ../../tests/integration pytest test.py