diff --git a/.github/workflows/object-detection-cv25.yml b/.github/workflows/object-detection-cv25.yml index ade30d7..79838f4 100644 --- a/.github/workflows/object-detection-cv25.yml +++ b/.github/workflows/object-detection-cv25.yml @@ -26,7 +26,7 @@ jobs: - name: Build ${{ env.example }} application env: example: ${{ env.EXNAME }} - imagetag: ${{ env.EXREPO }}_${{ env.EXNAME }}-${{ matrix.chip }}:${{ matrix.arch }} + imagetag: ${{ env.EXREPO }}_${{ env.EXNAME }}_${{ matrix.chip }}:${{ matrix.arch }} run: | docker image rm -f $imagetag cd $EXNAME diff --git a/.github/workflows/object-detection.yml b/.github/workflows/object-detection.yml index 12349ec..24617a3 100644 --- a/.github/workflows/object-detection.yml +++ b/.github/workflows/object-detection.yml @@ -15,13 +15,16 @@ jobs: include: - arch: armv7hf chip: cpu - axis-os: 12.1.60 + axis-os: 12.2.36 - arch: armv7hf chip: edgetpu - axis-os: 12.1.60 + axis-os: 12.2.36 - arch: aarch64 chip: artpec8 - axis-os: 12.1.60 + axis-os: 12.2.36 + - arch: aarch64 + chip: artpec9 + axis-os: 12.2.36 env: EXREPO: acap-native-examples EXNAME: object-detection @@ -32,7 +35,7 @@ jobs: - name: Build ${{ env.example }} application env: example: ${{ env.EXNAME }}-${{ matrix.chip }} - imagetag: ${{ env.EXREPO }}_${{ env.EXNAME }}-${{ matrix.chip }}:${{ matrix.arch }} + imagetag: ${{ env.EXREPO }}_${{ env.EXNAME }}_${{ matrix.chip }}:${{ matrix.arch }} run: | docker image rm -f $imagetag cd $EXNAME diff --git a/.github/workflows/tensorflow-to-larod-artpec8.yml b/.github/workflows/tensorflow-to-larod-artpec8.yml index 51b5d11..58767d0 100644 --- a/.github/workflows/tensorflow-to-larod-artpec8.yml +++ b/.github/workflows/tensorflow-to-larod-artpec8.yml @@ -14,8 +14,6 @@ jobs: matrix: include: - arch: aarch64 - chip: artpec8 - axis-os: 12.1.60 env: EXREPO: acap-native-examples EXNAME: tensorflow-to-larod-artpec8 diff --git a/.github/workflows/tensorflow-to-larod-artpec9.yml b/.github/workflows/tensorflow-to-larod-artpec9.yml new file mode 100644 index 0000000..0c314bd --- /dev/null +++ b/.github/workflows/tensorflow-to-larod-artpec9.yml @@ -0,0 +1,14 @@ +# This is a dummy file for linters, the real test is run in +# tensorflow-to-larod-artpec8 +name: Build tensorflow-to-larod-artpec9 application +on: + workflow_dispatch: + push: + branches: + - 'dummy-branch-that-does-not-exist' + +jobs: + dummy: + runs-on: ubuntu-latest + steps: + - run: echo "This is a dummy workflow" diff --git a/.github/workflows/tensorflow-to-larod-cv25.yml b/.github/workflows/tensorflow-to-larod-cv25.yml index a15e256..5e77113 100644 --- a/.github/workflows/tensorflow-to-larod-cv25.yml +++ b/.github/workflows/tensorflow-to-larod-cv25.yml @@ -14,8 +14,6 @@ jobs: matrix: include: - arch: aarch64 - chip: cv25 - axis-os: 12.1.60 env: EXREPO: acap-native-examples EXNAME: tensorflow-to-larod-cv25 diff --git a/.github/workflows/tensorflow-to-larod.yml b/.github/workflows/tensorflow-to-larod.yml index 8eea149..08ee24f 100644 --- a/.github/workflows/tensorflow-to-larod.yml +++ b/.github/workflows/tensorflow-to-larod.yml @@ -14,8 +14,6 @@ jobs: matrix: include: - arch: armv7hf - chip: edgetpu - axis-os: 12.1.60 env: EXREPO: acap-native-examples EXNAME: tensorflow-to-larod diff --git a/.github/workflows/vdo-larod.yml b/.github/workflows/vdo-larod.yml index 4128013..35e2bc4 100644 --- a/.github/workflows/vdo-larod.yml +++ b/.github/workflows/vdo-larod.yml @@ -15,16 +15,19 @@ jobs: include: - arch: armv7hf chip: cpu - axis-os: 12.1.60 + axis-os: 12.2.36 - arch: armv7hf chip: edgetpu - axis-os: 12.1.60 + axis-os: 12.2.36 - arch: aarch64 chip: artpec8 - axis-os: 12.1.60 + axis-os: 12.2.36 + - arch: aarch64 + chip: artpec9 + axis-os: 12.2.36 - arch: aarch64 chip: cv25 - axis-os: 12.1.60 + axis-os: 12.2.36 env: EXREPO: acap-native-examples EXNAME: vdo-larod diff --git a/README.md b/README.md index 71ad7df..5dec4bf 100644 --- a/README.md +++ b/README.md @@ -57,6 +57,8 @@ Below is the list of examples available in the repository. - An example that shows model conversion, model quantization, image formats and custom models. - [tensorflow-to-larod-artpec8](./tensorflow-to-larod-artpec8/) - An example that shows model conversion, model quantization, image formats and custom models on AXIS ARTPEC-8 devices. +- [tensorflow-to-larod-artpec9](./tensorflow-to-larod-artpec9/) + - An example that shows model conversion, model quantization, image formats and custom models on AXIS ARTPEC-9 devices. Note that this example is pointing to [tensorflow-to-larod-artpec8](./tensorflow-to-larod-artpec8). - [tensorflow-to-larod-cv25](./tensorflow-to-larod-cv25/) - An example that shows model conversion, model quantization, image formats and custom models on AXIS CV25 devices. - [using-opencv](./using-opencv/) diff --git a/object-detection-cv25/README.md b/object-detection-cv25/README.md index 7102ab2..2643bf2 100644 --- a/object-detection-cv25/README.md +++ b/object-detection-cv25/README.md @@ -273,12 +273,13 @@ Depending on selected chip, different output is received. The label file is used In the system log the chip is sometimes only mentioned as a string, they are mapped as follows: -| Chips | Larod 1 (int) | Larod 3 (string) | +| Chips | Larod 1 (int) | Larod 3 | |-------|--------------|------------------| | CPU with TensorFlow Lite | 2 | cpu-tflite | | Google TPU | 4 | google-edge-tpu-tflite | | Ambarella CVFlow (NN) | 6 | ambarella-cvflow | | ARTPEC-8 DLPU | 12 | axis-a8-dlpu-tflite | +| ARTPEC-9 DLPU | - | a9-dlpu-tflite | There are four outputs from MobileNet SSD v2 (COCO) model. The number of detections, cLasses, scores, and locations are shown as below. The four location numbers stand for \[top, left, bottom, right\]. By the way, currently the saved images will be overwritten continuously, so those saved images might not all from the detections of the last frame, if the number of detections is less than previous detection numbers. diff --git a/object-detection/Dockerfile b/object-detection/Dockerfile index 26030f4..4d87bbb 100644 --- a/object-detection/Dockerfile +++ b/object-detection/Dockerfile @@ -53,7 +53,7 @@ RUN . /opt/axis/acapsdk/environment-setup* && \ # Download pretrained models WORKDIR /opt/app/model ARG CHIP= -RUN if [ "$CHIP" = cpu ] || [ "$CHIP" = artpec8 ]; then \ +RUN if [ "$CHIP" = artpec8 ] || [ "$CHIP" = artpec9 ] || [ "$CHIP" = cpu ] ; then \ curl -L -o converted_model.tflite \ https://github.com/google-coral/test_data/raw/master/ssd_mobilenet_v2_coco_quant_postprocess.tflite ; \ elif [ "$CHIP" = edgetpu ]; then \ diff --git a/object-detection/README.md b/object-detection/README.md index 4bfeb24..7390c74 100644 --- a/object-detection/README.md +++ b/object-detection/README.md @@ -8,8 +8,7 @@ This example focuses on the application of object detection on an Axis camera eq ## Prerequisites -- Axis camera equipped with CPU, an [Edge TPU](https://coral.ai/docs/edgetpu/faq/) or DLPU (for ARTPEC-8) -- [Docker](https://docs.docker.com/get-docker/) +- Axis camera equipped with CPU or DLPU ## Quickstart @@ -23,7 +22,7 @@ The following instructions can be executed to simply run the example. ``` where the values are found: - - \ is the chip type. Supported values are `artpec8`, `cpu` and `edgetpu`. + - \ is the chip type. Supported values are `artpec9`, `artpec8`, `cpu` and `edgetpu`. - \ is the architecture. Supported values are `armv7hf` (default) and `aarch64`. 2. Find the ACAP application `.eap` file @@ -225,7 +224,7 @@ docker cp $(docker create obj_detect:1.0):/opt/app ./build where the parameters are: -- \ is the chip type. Supported values are `artpec8`, `cpu` and `edgetpu`. +- \ is the chip type. Supported values are `artpec9`, `artpec8`, `cpu` and `edgetpu`. - \ is the architecture. Supported values are `armv7hf` (default) and `aarch64`. > N.b. The selected architecture and chip must match the targeted device. @@ -271,12 +270,13 @@ Depending on selected chip, different output is received. The label file is used In the system log the chip is sometimes only mentioned as a string, they are mapped as follows: -| Chips | Larod 1 (int) | Larod 3 (string) | +| Chips | Larod 1 (int) | Larod 3 | |-------|--------------|------------------| | CPU with TensorFlow Lite | 2 | cpu-tflite | | Google TPU | 4 | google-edge-tpu-tflite | | Ambarella CVFlow (NN) | 6 | ambarella-cvflow | | ARTPEC-8 DLPU | 12 | axis-a8-dlpu-tflite | +| ARTPEC-9 DLPU | - | a9-dlpu-tflite | There are four outputs from MobileNet SSD v2 (COCO) model. The number of detections, cLasses, scores, and locations are shown as below. The four location numbers stand for \[top, left, bottom, right\]. By the way, currently the saved images will be overwritten continuously, so those saved images might not all from the detections of the last frame, if the number of detections is less than previous detection numbers. diff --git a/object-detection/app/manifest.json.artpec9 b/object-detection/app/manifest.json.artpec9 new file mode 100644 index 0000000..5ab2646 --- /dev/null +++ b/object-detection/app/manifest.json.artpec9 @@ -0,0 +1,15 @@ +{ + "schemaVersion": "1.7.0", + "acapPackageConf": { + "setup": { + "friendlyName": "object_detection_artpec9", + "appName": "object_detection", + "vendor": "Axis Communications", + "embeddedSdkVersion": "3.0", + "runOptions": "/usr/local/packages/object_detection/model/converted_model.tflite 300 300 80 1920 1080 50 /usr/local/packages/object_detection/label/labels.txt -c a9-dlpu-tflite", + "vendorUrl": "https://www.axis.com", + "runMode": "never", + "version": "1.0.0" + } + } +} \ No newline at end of file diff --git a/tensorflow-to-larod-artpec9/README.md b/tensorflow-to-larod-artpec9/README.md new file mode 100644 index 0000000..cf16f07 --- /dev/null +++ b/tensorflow-to-larod-artpec9/README.md @@ -0,0 +1,4 @@ +*Copyright (C) 2024, Axis Communications AB, Lund, Sweden. All Rights Reserved.* + +This tutorial on training a model with TensorFlow for ARTPEC-9 follows the same steps as the ARTPEC-8 version. +For detailed instructions and up-to-date information, please follow the [tensorflow-to-larod-artpec8](../tensorflow-to-larod-artpec8) tutorial. diff --git a/vdo-larod/Dockerfile b/vdo-larod/Dockerfile index d9585a9..8139644 100644 --- a/vdo-larod/Dockerfile +++ b/vdo-larod/Dockerfile @@ -15,9 +15,7 @@ ARG CHIP # Download the pretrained model ARG MODEL_BUCKET=https://acap-artifacts.s3.eu-north-1.amazonaws.com/models -RUN if [ "$CHIP" = artpec8 ]; then \ - curl -o model.zip $MODEL_BUCKET/models.aarch64.artpec8.zip ; \ - elif [ "$CHIP" = cpu ]; then \ +RUN if [ "$CHIP" = artpec8 ] || [ "$CHIP" = artpec9 ] || [ "$CHIP" = cpu ] ; then \ curl -o model.zip $MODEL_BUCKET/models.aarch64.artpec8.zip ; \ elif [ "$CHIP" = edgetpu ]; then \ curl -o model.zip $MODEL_BUCKET/models.armv7hf.edgetpu.zip ; \ @@ -36,7 +34,7 @@ COPY ./app . # Build the ACAP application RUN cp /opt/app/manifest.json.${CHIP} /opt/app/manifest.json && \ . /opt/axis/acapsdk/environment-setup* && \ - if [ "$CHIP" = artpec8 ] || [ "$CHIP" = cpu ]; then \ + if [ "$CHIP" = artpec8 ] || [ "$CHIP" = artpec9 ] || [ "$CHIP" = cpu ] ; then \ acap-build . -a 'models/converted_model.tflite'; \ elif [ "$CHIP" = edgetpu ]; then \ acap-build . -a 'models/converted_model_edgetpu.tflite'; \ diff --git a/vdo-larod/README.md b/vdo-larod/README.md index 0fbb0e4..f67d710 100644 --- a/vdo-larod/README.md +++ b/vdo-larod/README.md @@ -11,6 +11,11 @@ It is achieved by using the containerized API and toolchain images. Together with this README file you should be able to find a directory called app. That directory contains the "vdo_larod" application source code, which can easily be compiled and run with the help of the tools and step by step below. +## Prerequisites + +- Axis camera equipped with CPU or DLPU +- [Docker](https://docs.docker.com/get-docker/) + ## Detailed outline of example application This application opens a client to VDO and starts fetching frames (in a new thread) in the YUV format. It tries to find the smallest VDO stream resolution that fits the width and height required by the neural network. The thread fetching frames is written so that it always tries to provide a frame as new as possible even if not all previous frames have been processed by larod. @@ -45,6 +50,7 @@ vdo-larod │ ├── LICENSE │ ├── Makefile │ ├── manifest.json.artpec8 +│ ├── manifest.json.artpec9 │ ├── manifest.json.cpu │ ├── manifest.json.cv25 │ ├── manifest.json.edgetpu @@ -58,7 +64,8 @@ vdo-larod - **app/LICENSE** - Text file which lists all open source licensed source code distributed with the application. - **app/Makefile** - Makefile containing the build and link instructions for building the ACAP application. -- **app/manifest.json.artpec8** - Defines the application and its configuration when building for DLPU with TensorFlow Lite. +- **app/manifest.json.artpec8** - Defines the application and its configuration when building for artpec8 DLPU with TensorFlow Lite. +- **app/manifest.json.artpec9** - Defines the application and its configuration when building for artpec9 DLPU with TensorFlow Lite. - **app/manifest.json.cpu** - Defines the application and its configuration when building for CPU with TensorFlow Lite. - **app/manifest.json.cv25** - Defines the application and its configuration when building chip and model for cv25 DLPU. @@ -112,7 +119,7 @@ docker cp $(docker create ):/opt/app ./build ``` - \ is the name to tag the image with, e.g., `vdo_larod:1.0`. -- \ is the chip type. Supported values are `artpec8`, `cpu`, `cv25` and `edgetpu`. +- \ is the chip type. Supported values are `artpec9`, `artpec8`, `cpu`, `cv25` and `edgetpu`. - \ is the architecture. Supported values are `armv7hf` (default) and `aarch64`. See the following sections for build commands for each chip. @@ -126,6 +133,15 @@ docker build --build-arg ARCH=aarch64 --build-arg CHIP=artpec8 --tag docker cp $(docker create ):/opt/app ./build ``` +#### Build for ARTPEC-9 with Tensorflow Lite + +To build a package for ARTPEC-9 with Tensorflow Lite, run the following commands standing in your working directory: + +```sh +docker build --build-arg ARCH=aarch64 --build-arg CHIP=artpec9 --tag . +docker cp $(docker create ):/opt/app ./build +``` + #### Build for CPU with Tensorflow Lite To build a package for CPU with Tensorflow Lite, run the following commands standing in your working directory: @@ -169,6 +185,7 @@ vdo-larod │ ├── Makefile │ ├── manifest.json │ ├── manifest.json.artpec8 +│ ├── manifest.json.artpec9 │ ├── manifest.json.cpu │ ├── manifest.json.edgetpu │ ├── manifest.json.cv25 @@ -178,8 +195,8 @@ vdo-larod │ ├── package.conf.orig │ ├── param.conf │ ├── vdo_larod* -│ ├── vdo_larod_{cpu,edgetpu}_1_0_0_armv7hf.eap / vdo_larod_{cv25,artpec8}_1_0_0_aarch64.eap -│ ├── vdo_larod_{cpu,edgetpu}_1_0_0_LICENSE.txt / vdo_larod_{cv25,artpec8}_1_0_0_LICENSE.txt +│ ├── vdo_larod_{cpu,edgetpu}_1_0_0_armv7hf.eap / vdo_larod_{cv25,artpec8,artpec9}_1_0_0_aarch64.eap +│ ├── vdo_larod_{cpu,edgetpu}_1_0_0_LICENSE.txt / vdo_larod_{cv25,artpec8,artpec9}_1_0_0_LICENSE.txt │ └── vdo_larod.c ``` @@ -197,6 +214,10 @@ vdo-larod - **build/vdo_larod_artpec8_1_0_0_aarch64.eap** - Application package .eap file. - **build/vdo_larod_artpec8_1_0_0_LICENSE.txt** - Copy of LICENSE file. + If chip `artpec9` has been built. +- **build/vdo_larod_artpec9_1_0_0_aarch64.eap** - Application package .eap file. +- **build/vdo_larod_artpec9_1_0_0_LICENSE.txt** - Copy of LICENSE file. + If chip `cpu` has been built. - **build/vdo_larod_cpu_1_0_0_armv7hf.eap** - Application package .eap file. - **build/vdo_larod_cpu_1_0_0_LICENSE.txt** - Copy of LICENSE file. @@ -223,6 +244,7 @@ http:///index.html#apps - Browse to the newly built ACAP application, depending on architecture: - `vdo_larod_cv25_1_0_0_aarch64.eap` - `vdo_larod_artpec8_1_0_0_aarch64.eap` + - `vdo_larod_artpec9_1_0_0_aarch64.eap` - `vdo_larod_cpu_1_0_0_armv7hf.eap` - `vdo_larod_edgetpu_1_0_0_armv7hf.eap` - Click `Install` @@ -242,12 +264,13 @@ Depending on the selected chip, different output is received. In previous larod versions, the chip was referred to as a number instead of a string. See the table below to understand the mapping: -| Chips | Larod 1/2 (int) | Larod 3 (string) | +| Chips | Larod 1 (int) | Larod 3 | |-------|--------------|------------------| | CPU with TensorFlow Lite | 2 | cpu-tflite | | Google TPU | 4 | google-edge-tpu-tflite | | Ambarella CVFlow (NN) | 6 | ambarella-cvflow | | ARTPEC-8 DLPU | 12 | axis-a8-dlpu-tflite | +| ARTPEC-9 DLPU | - | a9-dlpu-tflite | #### Output - ARTPEC-8 with TensorFlow Lite @@ -302,6 +325,54 @@ vdo_larod[4165]: Stop streaming video from VDO vdo_larod[4165]: Exit /usr/local/packages/vdo_larod/vdo_larod ``` +#### Output - ARTPEC-9 with TensorFlow Lite + +```sh +----- Contents of SYSTEM_LOG for 'vdo_larod' ----- + + +vdo_larod[4165]: Starting /usr/local/packages/vdo_larod/vdo_larod +vdo_larod[4165]: 'buffer.strategy': +vdo_larod[4165]: 'channel': +vdo_larod[4165]: 'format': +vdo_larod[4165]: 'height': +vdo_larod[4165]: 'width': +vdo_larod[4165]: Creating VDO image provider and creating stream 480 x 270 +vdo_larod[4165]: Dump of vdo stream settings map ===== +vdo_larod[4165]: chooseStreamResolution: We select stream w/h=480 x 270 based on VDO channel info. +vdo_larod[4165]: Calculate crop image +vdo_larod[4165]: Create larod models +vdo_larod[4165]: Create preprocessing maps +vdo_larod[4165]: Crop VDO image X=40 Y=0 (480 x 270) +vdo_larod[4165]: Setting up larod connection with chip a9-dlpu-tflite and model /usr/local/packages/vdo_larod/models/converted_model.tflite +vdo_larod[4165]: Available chip ids: +vdo_larod[4165]: Chip: a9-dlpu-tflite +vdo_larod[4165]: Chip: cpu-tflite +vdo_larod[4165]: Chip: cpu-proc +vdo_larod[4165]: Allocate memory for input/output buffers +vdo_larod[4165]: Connect tensors to file descriptors +vdo_larod[4165]: Create input/output tensors +vdo_larod[4165]: Create job requests +vdo_larod[4165]: Determine tensor buffer sizes +vdo_larod[4165]: Start fetching video frames from VDO +vdo_larod[4165]: Converted image in 14 ms +vdo_larod[4165]: Person detected: 5.49% - Car detected: 80.00% +vdo_larod[4165]: Ran inference for 17 ms +vdo_larod[4165]: Converted image in 4 ms +vdo_larod[4165]: Person detected: 4.31% - Car detected: 88.63% +vdo_larod[4165]: Ran inference for 16 ms +vdo_larod[4165]: Converted image in 4 ms +vdo_larod[4165]: Person detected: 2.75% - Car detected: 75.29% +vdo_larod[4165]: Ran inference for 16 ms +vdo_larod[4165]: Converted image in 3 ms +vdo_larod[4165]: Person detected: 3.14% - Car detected: 88.63% +vdo_larod[4165]: Ran inference for 16 ms +vdo_larod[4165]: Converted image in 3 ms +vdo_larod[4165]: Person detected: 1.57% - Car detected: 74.51% +vdo_larod[4165]: Ran inference for 16 ms +vdo_larod[4165]: Stop streaming video from VDO +vdo_larod[4165]: Exit /usr/local/packages/vdo_larod/vdo_larod + #### Output - CPU with TensorFlow Lite ```sh diff --git a/vdo-larod/app/manifest.json.artpec9 b/vdo-larod/app/manifest.json.artpec9 new file mode 100644 index 0000000..852c951 --- /dev/null +++ b/vdo-larod/app/manifest.json.artpec9 @@ -0,0 +1,15 @@ +{ + "schemaVersion": "1.7.0", + "acapPackageConf": { + "setup": { + "friendlyName": "vdo_larod_artpec9", + "appName": "vdo_larod", + "vendor": "Axis Communications", + "embeddedSdkVersion": "3.0", + "runOptions": "a9-dlpu-tflite /usr/local/packages/vdo_larod/models/converted_model.tflite 480 270 5", + "vendorUrl": "https://www.axis.com", + "runMode": "never", + "version": "1.0.0" + } + } +} \ No newline at end of file