Python OpenVINO

This commit is contained in:
Alexander Popov 2024-08-18 19:56:51 +03:00
parent 12c97ad421
commit 0d8e5d905d
Signed by: iiiypuk
GPG Key ID: E47FE0AB36CD5ED6
6 changed files with 165 additions and 0 deletions

3
projects/OpenVINO/Python/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
# models
intel/
*.onnx

View File

@ -0,0 +1,16 @@
# ...
## Экспорт YOLOv8 в OpenVINO
Документация на сайте [Ultralytics](https://docs.ultralytics.com/ru/integrations/openvino/).
```sh
yolo export model=yolov8n.pt format=openvino
```
##
```sh
pip3 install openvino-dev
omz_downloader --name person-detection-0106
```

View File

@ -0,0 +1 @@
../../../assets/bus.jpg

View File

@ -0,0 +1 @@
../..//../assets/girl_faces.png

View File

@ -0,0 +1,121 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# https://docs.openvino.ai/2023.3/openvino_sample_hello_reshape_ssd.html
import logging as log
import os
import sys
import cv2
import numpy as np
import openvino as ov
def main():
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
# Parsing and validation of input arguments
if len(sys.argv) != 4:
log.info(f'Usage: {sys.argv[0]} <path_to_model> <path_to_image> <device_name>')
return 1
model_path = sys.argv[1]
image_path = sys.argv[2]
device_name = sys.argv[3]
# --------------------------- Step 1. Initialize OpenVINO Runtime Core ------------------------------------------------
log.info('Creating OpenVINO Runtime Core')
core = ov.Core()
# --------------------------- Step 2. Read a model --------------------------------------------------------------------
log.info(f'Reading the model: {model_path}')
# (.xml and .bin files) or (.onnx file)
model = core.read_model(model_path)
if len(model.inputs) != 1:
log.error('Sample supports only single input topologies')
return -1
if len(model.outputs) != 1:
log.error('Sample supports only single output topologies')
return -1
# --------------------------- Step 3. Set up input --------------------------------------------------------------------
# Read input image
image = cv2.imread(image_path)
# Add N dimension
input_tensor = np.expand_dims(image, 0)
log.info('Reshaping the model to the height and width of the input image')
n, h, w, c = input_tensor.shape
model.reshape({model.input().get_any_name(): ov.PartialShape((n, c, h, w))})
# --------------------------- Step 4. Apply preprocessing -------------------------------------------------------------
ppp = ov.preprocess.PrePostProcessor(model)
# 1) Set input tensor information:
# - input() provides information about a single model input
# - precision of tensor is supposed to be 'u8'
# - layout of data is 'NHWC'
ppp.input().tensor() \
.set_element_type(ov.Type.u8) \
.set_layout(ov.Layout('NHWC')) # noqa: N400
# 2) Here we suppose model has 'NCHW' layout for input
ppp.input().model().set_layout(ov.Layout('NCHW'))
# 3) Set output tensor information:
# - precision of tensor is supposed to be 'f32'
ppp.output().tensor().set_element_type(ov.Type.f32)
# 4) Apply preprocessing modifing the original 'model'
model = ppp.build()
# ---------------------------Step 4. Loading model to the device-------------------------------------------------------
log.info('Loading the model to the plugin')
compiled_model = core.compile_model(model, device_name)
# --------------------------- Step 6. Create infer request and do inference synchronously -----------------------------
log.info('Starting inference in synchronous mode')
results = compiled_model.infer_new_request({0: input_tensor})
# ---------------------------Step 6. Process output--------------------------------------------------------------------
predictions = next(iter(results.values()))
# Change a shape of a numpy.ndarray with results ([1, 1, N, 7]) to get another one ([N, 7]),
# where N is the number of detected bounding boxes
detections = predictions.reshape(-1, 7)
for detection in detections:
confidence = detection[2]
if confidence > 0.5:
class_id = int(detection[1])
xmin = int(detection[3] * w)
ymin = int(detection[4] * h)
xmax = int(detection[5] * w)
ymax = int(detection[6] * h)
log.info(f'Found: class_id = {class_id}, confidence = {confidence:.2f}, ' f'coords = ({xmin}, {ymin}), ({xmax}, {ymax})')
# Draw a bounding box on a output image
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)
cv2.imwrite('out.bmp', image)
if os.path.exists('out.bmp'):
log.info('Image out.bmp was created!')
else:
log.error('Image out.bmp was not created. Check your permissions.')
# ----------------------------------------------------------------------------------------------------------------------
log.info('This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool\n')
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -0,0 +1,23 @@
black==24.4.2
certifi==2024.7.4
charset-normalizer==3.3.2
click==8.1.7
defusedxml==0.7.1
idna==3.7
mypy-extensions==1.0.0
networkx==3.1
numpy==1.24.4
opencv-python==4.10.0.84
openvino==2024.3.0
openvino-dev==2024.3.0
openvino-telemetry==2024.1.0
packaging==24.1
pathspec==0.12.1
pillow==10.4.0
pkg_resources==0.0.0
platformdirs==4.2.2
PyYAML==6.0.2
requests==2.32.3
tomli==2.0.1
typing_extensions==4.12.2
urllib3==2.2.2