Compare commits

...

16 Commits

59 changed files with 1208 additions and 11 deletions

BIN
assets/bus.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 134 KiB

7
code/Bash/string_array.sh Executable file
View File

@ -0,0 +1,7 @@
#!/bin/bash
myArray=("cat" "dog" "mouse" "frog")
for str in ${myArray[@]}; do
echo $str
done

26
code/Bash/zenity/progress.sh Executable file
View File

@ -0,0 +1,26 @@
#!/bin/sh
# https://help.gnome.org/users/zenity/stable/progress.html.en
(
echo "0" ; sleep 1
echo "# Scanning mail logs..." ; sleep 1
echo "10" ; sleep 1
echo "# Updating mail logs" ; sleep 1
echo "20" ; sleep 1
echo "# Resetting cron jobs" ; sleep 1
echo "50" ; sleep 1
echo "This line will just be ignored" ; sleep 1
echo "75" ; sleep 1
echo "# Rebooting system" ; sleep 1
echo "100" ; sleep 1
) |
zenity --progress \
--title="Update System Logs" \
--text="..." \
--percentage=0
if [ "$?" = -1 ] ; then
zenity --error \
--text="Update canceled."
fi

8
code/Bash/zenity/table.sh Executable file
View File

@ -0,0 +1,8 @@
#!/bin/bash
for i in $(seq 1 10)
do
echo "bla bla"
echo "xxx"
echo "$i"
done | zenity --list --title="title" --text="text" --column="X" --column="Y" --column="Z"

5
code/C++/.clang-format Normal file
View File

@ -0,0 +1,5 @@
Language: Cpp
BasedOnStyle: LLVM
IndentWidth: 2
ColumnLimit: 132
SortIncludes: CaseSensitive

1
code/C++/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
run

View File

@ -1,3 +1,8 @@
# C++
## std
- [`__cplusplus`](__cplusplus.cpp) - Отображает версию языка C++
- [`std::filesystem`](list_files_in_folder.cpp) - Получение списка файлов в директории
- [`benchmark.cpp`](benchmark.cpp) - Время выполнения операции в секундах
- []() -

11
code/C++/__cplusplus.cpp Normal file
View File

@ -0,0 +1,11 @@
#include <iostream>
/**
* Выводит версию C++
*/
int main() {
std::cout << "C++ version: " << __cplusplus << std::endl;
return EXIT_SUCCESS;
}

24
code/C++/benchmark.cpp Normal file
View File

@ -0,0 +1,24 @@
/**
* Время выполнения операции в секундах
*/
#include <bits/stdc++.h>
#include <iostream>
int main(int argc, char const *argv[]) {
time_t start, end;
long addition;
time(&start);
for (int i = 0; i < 20000; i++) {
for (int j = 0; j < 20000; j++)
;
}
time(&end);
std::cout << "Total " << difftime(end, start) << " seconds" << std::endl;
return EXIT_SUCCESS;
}

5
code/C++/thread/Makefile Normal file
View File

@ -0,0 +1,5 @@
# CC=g++ # Small output file
CC=clang++-18 # Big output file
all:
$(CC) -O3 -o app thread.cpp

View File

@ -0,0 +1 @@
# thread — Потоки в C++

View File

@ -0,0 +1,54 @@
/**
* Оригинальный код: https://stackoverflow.com/a/61960363
*/
#include <iostream>
#include <thread>
#include <optional>
#include <atomic>
// (1) Переменная должна быть атомарной, чтобы избежать состояния гонки
std::atomic<bool> app_finished{false};
using namespace std::literals::chrono_literals;
void SendData(int id) {
std::cout << "Рабочий поток: " << id << std::endl;
std::cout << "Идентификатор потока: " << std::this_thread::get_id() << std::endl;
while (!app_finished) {
std::cout << "Работа..." << std::endl;
std::this_thread::sleep_for(1s);
}
}
std::thread startRecording(std::optional<int> t) {
std::thread th1(SendData, 1);
std::cout << "[startRecording] Другая задача" << std::endl;
// (2) Возвращаем поток, чтобы присоединиться к нему в main()
return th1;
}
void stopRecording() {
app_finished = true;
std::cout << "[stopRecording] Другая задача" << std::endl;
}
int main() {
std::cout << "Запуска программы..." << std::endl;
// (3) Сохраняем поток в переменной с именем "worker"
// что можно было присоединиться к нему позже.
std::thread worker = startRecording(std::optional<int>{1});
std::this_thread::sleep_for(5s);
stopRecording();
// (4) Присоединяемся к потоку здесь, в конце
worker.join();
return EXIT_SUCCESS;
}

29
code/Doxygen/Doxyfile Normal file
View File

@ -0,0 +1,29 @@
# Doxyfile 1.8.17
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = "..."
PROJECT_BRIEF = "..."
PROJECT_LOGO =
OUTPUT_DIRECTORY = doxygen
ALLOW_UNICODE_NAMES = YES
EXTRACT_PRIVATE = YES
VERBATIM_HEADERS = NO
SOURCE_BROWSER = NO
OUTPUT_LANGUAGE = Russian
TAB_SIZE = 2
INPUT = include src
INPUT_ENCODING = UTF-8
FILE_PATTERNS = *.c \
*.cpp \
*.h \
*.hpp \
*.md
GENERATE_LATEX = NO
GENERATE_HTML = YES
HTML_OUTPUT = html
HTML_FILE_EXTENSION = .html
HTML_EXTRA_STYLESHEET = 3rdparty/doxygen-awesome-css/doxygen-awesome.css
HTML_TIMESTAMP = YES
DISABLE_INDEX = NO
FULL_SIDEBAR = NO # for doxygen-awesome-css

View File

@ -0,0 +1 @@
*.md

View File

@ -0,0 +1,10 @@
{
"printWidth": 100,
"bracketSpacing": true,
"bracketSameLine": true,
"semi": true,
"singleQuote": true,
"arrowParens": "always",
"htmlWhitespaceSensitivity": "strict",
"endOfLine": "lf"
}

View File

@ -6,16 +6,10 @@ def connect(essid='ESP', clients=3) -> bool:
print('Starting AP: {0}...'.format(essid))
ap = network.WLAN(network.AP_IF)
ap.active(True)
ap.config(essid=essid)
ap.config(max_clients=clients)
ap.config(essid=essid, max_clients=clients)
time.sleep(3)
while ap.active() == False:
pass
if ap.isconnected():
print('AP "{0}" started'.format(essid))
return True
else:
print('Starting AP failed!')
return False
print('AP "{0}" started'.format(essid))
return True

27
code/Python/loguru.py Normal file
View File

@ -0,0 +1,27 @@
import sys
from loguru import logger
def logger_export() -> None:
logger.add("file_1.log", rotation="500 MB") # Automatically rotate too big file
logger.add("file_2.log", rotation="12:00") # New file is created each day at noon
logger.add("file_3.log", rotation="1 week") # Once the file is too old, it's rotated
logger.add("file_X.log", retention="10 days") # Cleanup after some time
logger.add("file_Y.log", compression="zip") # Save some loved space
def main():
logger.add(sys.stderr, format="{time} {level} {message}", filter="my_module", level="INFO")
logger.trace("trace")
logger.debug("debug")
logger.info("info")
logger.success("success")
logger.warning("warning")
logger.error("error")
logger.critical("critical")
if __name__ == '__main__':
main()

View File

@ -0,0 +1,6 @@
# xmake
.xmake/
build/
# 3rd party
third/

View File

@ -0,0 +1 @@
https://github.com/AndrewBelt/osdialog.git

View File

@ -0,0 +1 @@
../third/osdialog/osdialog.h

View File

@ -0,0 +1,18 @@
#include <stdio.h>
#include "osdialog.h"
int main(int argc, char const *argv[]) {
int res;
res = osdialog_message(OSDIALOG_INFO, OSDIALOG_OK, "Info こんにちは");
printf("Result: %d\n", res);
res = osdialog_message(OSDIALOG_WARNING, OSDIALOG_OK_CANCEL, "Warning こんにちは");
printf("Result: %d\n", res);
res = osdialog_message(OSDIALOG_ERROR, OSDIALOG_YES_NO, "Error こんにちは");
printf("Result: %d\n", res);
return 0;
}

View File

@ -0,0 +1 @@
../third/osdialog/osdialog.c

View File

@ -0,0 +1 @@
../third/osdialog/osdialog_zenity.c

View File

@ -0,0 +1,9 @@
set_project("osdialog-example")
set_languages("c99")
add_rules("mode.release")
add_includedirs("include")
target("osdialog-example")
set_kind("binary")
add_files("src/*.c")

View File

@ -0,0 +1,5 @@
<div class="card">
<div class="circle"></div>
<div class="circle"></div>
<div class="card-inner"></div>
</div>

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

View File

@ -0,0 +1,52 @@
.card {
width: 190px;
height: 254px;
transition: all 0.2s;
position: relative;
cursor: pointer;
}
.card-inner {
width: inherit;
height: inherit;
background: rgba(255,255,255,.05);
box-shadow: 0 0 10px rgba(0,0,0,0.25);
backdrop-filter: blur(10px);
border-radius: 8px;
}
.card:hover {
transform: scale(1.04) rotate(1deg);
}
.circle {
width: 100px;
height: 100px;
background: radial-gradient(#b0e633, #53ef7d);
border-radius: 50%;
position: absolute;
animation: move-up6 2s ease-in infinite alternate-reverse;
}
.circle:nth-child(1) {
top: -25px;
left: -25px;
}
.circle:nth-child(2) {
bottom: -25px;
right: -25px;
animation-name: move-down1;
}
@keyframes move-up6 {
to {
transform: translateY(-10px);
}
}
@keyframes move-down1 {
to {
transform: translateY(10px);
}
}

View File

@ -0,0 +1 @@
<div class="card"></div>

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

@ -0,0 +1,6 @@
.card {
width: 190px;
height: 254px;
background: rgb(236, 236, 236);
box-shadow: rgba(0, 0, 0, 0.4) 0px 2px 4px, rgba(0, 0, 0, 0.3) 0px 7px 13px -3px, rgba(0, 0, 0, 0.2) 0px -3px 0px inset;
}

View File

@ -0,0 +1,7 @@
## Card by G4b413l
![Card by G4b413l](Card by G4b413l/screenshot.png)
## Card by adamgiebl
![Card by adamgiebl](Card by adamgiebl/screenshot.png)

10
projects/OpenVINO/.gitignore vendored Normal file
View File

@ -0,0 +1,10 @@
# models
Models/
*.onnx
*.pt
# junk
trash/
11.jpg
12.jpg
Python/

View File

@ -0,0 +1 @@
../../../code/C++/.clang-format

10
projects/OpenVINO/C++/.gitignore vendored Normal file
View File

@ -0,0 +1,10 @@
# xmake
.xmake/
build/
# binary
a.out
# other
infer.cpp
convert.py

10
projects/OpenVINO/C++/build.sh Executable file
View File

@ -0,0 +1,10 @@
#!/bin/sh
clear
CV_INCLUDE=/opt/opencv-4.8.0/include/opencv4/
CV_LIB=/opt/opencv-4.8.0/lib/
export LD_LIBRARY_PATH=${CV_LIB}:${LD_LIBRARY_PATH}
g++ -I${CV_INCLUDE} -L${CV_LIB} -o inference detect.cpp -lopenvino -lopencv_core -lopencv_imgproc -lopencv_highgui -lopencv_imgcodecs -lopencv_dnn &&
./inference

View File

@ -0,0 +1,47 @@
#include <string>
#include <vector>
#include "infer.hpp"
int main(int argc, char *argv[]) {
// Проверяет количество аргументов
if (argc != 3) {
std::cerr << "Использование: " << argv[0] << " <model_path> <image_path>" << std::endl;
return EXIT_FAILURE;
}
// Получает пути к модели и изображению из аргументов программы
const std::string model_path = argv[1];
const std::string image_path = argv[2];
// Проверяем наличие OpenVINO попыткой вывести версию
try {
std::cout << ov::get_openvino_version() << std::endl;
} catch (const std::exception &ex) {
std::cerr << ex.what() << std::endl;
return EXIT_FAILURE;
}
// Читает изображение из файла
cv::Mat image = cv::imread(image_path);
if (image.empty()) {
std::cerr << "ОШИБКА: Не удалось загрузить изображение" << std::endl;
return EXIT_FAILURE;
}
// Определение значений
const float probability = 0.5;
const float NMS = 0.5;
// Создание объекта класса распознования
Inf *i;
i = new Inf(model_path, cv::Size(640, 640), probability, NMS);
// Запуск распознования объектов
i->inference(image);
// Запись результата в файл
cv::imwrite("/tmp/cpp_openvino_result.bmp", image);
return EXIT_SUCCESS;
}

View File

@ -0,0 +1,136 @@
#include "infer.hpp"
Inf::Inf(const std::string &model_path, const float &model_probability, const float &model_NMS) {
input_shape = cv::Size(640, 640);
probability = model_probability;
NMS = model_NMS;
init(model_path);
};
Inf::Inf(const std::string &model_path, const cv::Size model_input_shape, const float &model_probability, const float &model_NMS) {
input_shape = model_input_shape;
probability = model_probability;
NMS = model_NMS;
init(model_path);
};
void Inf::init(const std::string &model_path) {
ov::Core core;
std::shared_ptr<ov::Model> model = core.read_model(model_path);
// Если модель имеет динамические формы,
// изменяем модель в соответствиии с указанной формой
if (model->is_dynamic()) {
model->reshape({1, 3, static_cast<long int>(input_shape.height), static_cast<long int>(input_shape.width)});
}
// Настройка предварительной обработки для модели
ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model);
ppp.input().tensor().set_element_type(ov::element::u8).set_layout("NHWC").set_color_format(ov::preprocess::ColorFormat::BGR);
ppp.input()
.preprocess()
.convert_element_type(ov::element::f32)
.convert_color(ov::preprocess::ColorFormat::RGB)
.scale({255, 255, 255});
ppp.input().model().set_layout("NCHW");
ppp.output().tensor().set_element_type(ov::element::f32);
model = ppp.build();
compiled_model = core.compile_model(model, "AUTO");
inference_request = compiled_model.create_infer_request();
const std::vector<ov::Output<ov::Node>> inputs = model->inputs();
const ov::Shape in_shape = inputs[0].get_shape();
input_shape = cv::Size2f(in_shape[2], in_shape[1]);
const std::vector<ov::Output<ov::Node>> outputs = model->outputs();
const ov::Shape out_shape = outputs[0].get_shape();
output_shape = cv::Size(out_shape[2], out_shape[1]);
};
void Inf::pre(const cv::Mat &frame) {
cv::Mat resized_frame;
cv::resize(frame, resized_frame, input_shape, 0, 0, cv::INTER_AREA); // Resize the frame to match the model input shape
// Calculate scaling factor
scale_factor.x = static_cast<float>(frame.cols / input_shape.width);
scale_factor.y = static_cast<float>(frame.rows / input_shape.height);
float *input_data = (float *)resized_frame.data; // Get pointer to resized frame data
const ov::Tensor input_tensor =
ov::Tensor(compiled_model.input().get_element_type(), compiled_model.input().get_shape(), input_data);
inference_request.set_input_tensor(input_tensor); // Set input tensor for inference
};
void Inf::post(cv::Mat &frame) {
std::vector<int> class_list;
std::vector<float> confidence_list;
std::vector<cv::Rect> box_list;
const float *detections = inference_request.get_output_tensor().data<const float>();
const cv::Mat detection_outputs(output_shape, CV_32F, (float *)detections);
for (int i = 0; i < detection_outputs.cols; ++i) {
const cv::Mat classes_scores = detection_outputs.col(i).rowRange(4, detection_outputs.rows);
cv::Point class_id;
double score;
cv::minMaxLoc(classes_scores, nullptr, &score, nullptr, &class_id);
if (score > probability) {
class_list.push_back(class_id.y);
confidence_list.push_back(score);
const float x = detection_outputs.at<float>(0, i);
const float y = detection_outputs.at<float>(1, i);
const float w = detection_outputs.at<float>(2, i);
const float h = detection_outputs.at<float>(3, i);
cv::Rect box;
box.x = static_cast<int>(x);
box.y = static_cast<int>(y);
box.width = static_cast<int>(w);
box.height = static_cast<int>(h);
box_list.push_back(box);
}
}
std::vector<int> NMS_result;
cv::dnn::NMSBoxes(box_list, confidence_list, probability, NMS, NMS_result);
for (int i = 0; i < NMS_result.size(); ++i) {
Detection result;
const unsigned short id = NMS_result[i];
result.class_id = class_list[id];
result.probability = confidence_list[id];
result.box = GetBoundingBox(box_list[id]);
DrawDetectedObject(frame, result);
}
};
void Inf::inference(cv::Mat &frame) {
pre(frame);
inference_request.infer();
post(frame);
};
cv::Rect Inf::GetBoundingBox(const cv::Rect &src) const {
cv::Rect box = src;
box.x = (box.x - box.width / 2) * scale_factor.x;
box.y = (box.y - box.height / 2) * scale_factor.y;
box.width *= scale_factor.x;
box.height *= scale_factor.y;
return box;
}
void Inf::DrawDetectedObject(cv::Mat &frame, const Detection &detection) const {
const cv::Rect &box = detection.box;
const float &confidence = detection.probability;
const int &class_id = detection.class_id;
const cv::Scalar &color = cv::Scalar(0, 0, 180);
cv::rectangle(frame, cv::Point(box.x, box.y), cv::Point(box.x + box.width, box.y + box.height), color, 3);
}

View File

@ -0,0 +1,46 @@
#ifndef INFER_HPP_
#define INFER_HPP_
#include <opencv2/dnn/dnn.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <openvino/openvino.hpp>
// Структура обнаружения
struct Detection {
short class_id; // Идентификатор класс
float probability; // Вероятность обнаружения
cv::Rect box; // Размеры объекта
};
// Класс обнаружения
class Inf {
public:
Inf() {};
Inf(const std::string &model_path, const float &model_probability, const float &model_NMS);
Inf(const std::string &model_path, const cv::Size model_input_shape, const float &model_probability, const float &model_NMS);
~Inf() {};
void inference(cv::Mat &frame);
private:
void init(const std::string &model_path);
void pre(const cv::Mat &frame);
void post( cv::Mat &frame);
cv::Rect GetBoundingBox(const cv::Rect &src) const;
void DrawDetectedObject(cv::Mat &frame, const Detection &detections) const;
cv::Point2f scale_factor;
cv::Size2f input_shape;
cv::Size output_shape;
ov::InferRequest inference_request;
ov::CompiledModel compiled_model;
float probability;
float NMS;
};
#endif // INFER_HPP_

View File

@ -0,0 +1 @@
/tmp/cpp_openvino_result.bmp

View File

@ -0,0 +1,26 @@
set_project("inf")
set_languages("cxx17")
add_rules("mode.debug", "mode.release")
if is_mode("debug") then
set_symbols("debug")
set_optimize("none")
end
add_includedirs(
"/opt/opencv-4.8.0/include/opencv4/" -- OpenCV
)
add_linkdirs(
"/opt/opencv-4.8.0/lib/" -- OpenCV
)
target("inf")
set_kind("binary")
add_syslinks(
"openvino",
"opencv_core", "opencv_imgproc", "opencv_highgui",
"opencv_imgcodecs", "opencv_dnn"
)
add_files("i.cpp", "infer.cc")
add_runenvs("LD_LIBRARY_PATH", "/opt/opencv-4.8.0/lib/")

0
projects/OpenVINO/Python/.gitignore vendored Normal file
View File

View File

@ -0,0 +1,16 @@
# ...
## Экспорт YOLOv8 в OpenVINO
Документация на сайте [Ultralytics](https://docs.ultralytics.com/ru/integrations/openvino/).
```sh
yolo export model=yolov8n.pt format=openvino
```
##
```sh
pip3 install openvino-dev
omz_downloader --name person-detection-0106
```

View File

@ -0,0 +1,58 @@
#!/usr/bin/env python3
import cv2
import numpy as np
import openvino as ov
model_path = '../Models/yolov8n_openvino_model/yolov8n.xml'
image_path = '../../../assets/bus.jpg'
device_name = 'CPU'
def main():
# Загрузка OpenVINO
core = ov.Core()
model = core.read_model(model_path)
# Загрузка изображения
image = cv2.imread(image_path)
# Добавить N измерений
input_tensor = np.expand_dims(image, 0)
# Изменение формы модели в соответствии с ВxШ изображения
n, h, w, c = input_tensor.shape
model.reshape({model.input().get_any_name(): ov.PartialShape((n, c, h, w))})
# Предварительная обработка
ppp = ov.preprocess.PrePostProcessor(model)
ppp.input().tensor().set_element_type(ov.Type.u8).set_layout(ov.Layout('NHWC'))
ppp.input().model().set_layout(ov.Layout('NCHW'))
ppp.output().tensor().set_element_type(ov.Type.f32)
model = ppp.build()
compiled_model = core.compile_model(model, device_name)
results = compiled_model.infer_new_request({0: input_tensor})
# Output
predictions = next(iter(results.values()))
detections = predictions.reshape(-1, 7)
for detection in detections:
confidence = detection[2]
if confidence > 0.25:
class_id = int(detection[1])
xmin = int(detection[3] * w)
ymin = int(detection[4] * h)
xmax = int(detection[5] * w)
ymax = int(detection[6] * h)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)
cv2.imwrite('/tmp/py_openvino_result.bmp', image)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,121 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# https://docs.openvino.ai/2023.3/openvino_sample_hello_reshape_ssd.html
import logging as log
import os
import sys
import cv2
import numpy as np
import openvino as ov
def main():
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
# Parsing and validation of input arguments
if len(sys.argv) != 4:
log.info(f'Usage: {sys.argv[0]} <path_to_model> <path_to_image> <device_name>')
return 1
model_path = sys.argv[1]
image_path = sys.argv[2]
device_name = sys.argv[3]
# --------------------------- Step 1. Initialize OpenVINO Runtime Core ------------------------------------------------
log.info('Creating OpenVINO Runtime Core')
core = ov.Core()
# --------------------------- Step 2. Read a model --------------------------------------------------------------------
log.info(f'Reading the model: {model_path}')
# (.xml and .bin files) or (.onnx file)
model = core.read_model(model_path)
if len(model.inputs) != 1:
log.error('Sample supports only single input topologies')
return -1
if len(model.outputs) != 1:
log.error('Sample supports only single output topologies')
return -1
# --------------------------- Step 3. Set up input --------------------------------------------------------------------
# Read input image
image = cv2.imread(image_path)
# Add N dimension
input_tensor = np.expand_dims(image, 0)
log.info('Reshaping the model to the height and width of the input image')
n, h, w, c = input_tensor.shape
model.reshape({model.input().get_any_name(): ov.PartialShape((n, c, h, w))})
# --------------------------- Step 4. Apply preprocessing -------------------------------------------------------------
ppp = ov.preprocess.PrePostProcessor(model)
# 1) Set input tensor information:
# - input() provides information about a single model input
# - precision of tensor is supposed to be 'u8'
# - layout of data is 'NHWC'
ppp.input().tensor() \
.set_element_type(ov.Type.u8) \
.set_layout(ov.Layout('NHWC')) # noqa: N400
# 2) Here we suppose model has 'NCHW' layout for input
ppp.input().model().set_layout(ov.Layout('NCHW'))
# 3) Set output tensor information:
# - precision of tensor is supposed to be 'f32'
ppp.output().tensor().set_element_type(ov.Type.f32)
# 4) Apply preprocessing modifing the original 'model'
model = ppp.build()
# ---------------------------Step 4. Loading model to the device-------------------------------------------------------
log.info('Loading the model to the plugin')
compiled_model = core.compile_model(model, device_name)
# --------------------------- Step 6. Create infer request and do inference synchronously -----------------------------
log.info('Starting inference in synchronous mode')
results = compiled_model.infer_new_request({0: input_tensor})
# ---------------------------Step 6. Process output--------------------------------------------------------------------
predictions = next(iter(results.values()))
# Change a shape of a numpy.ndarray with results ([1, 1, N, 7]) to get another one ([N, 7]),
# where N is the number of detected bounding boxes
detections = predictions.reshape(-1, 7)
for detection in detections:
confidence = detection[2]
if confidence > 0.5:
class_id = int(detection[1])
xmin = int(detection[3] * w)
ymin = int(detection[4] * h)
xmax = int(detection[5] * w)
ymax = int(detection[6] * h)
log.info(f'Found: class_id = {class_id}, confidence = {confidence:.2f}, ' f'coords = ({xmin}, {ymin}), ({xmax}, {ymax})')
# Draw a bounding box on a output image
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)
cv2.imwrite('out.bmp', image)
if os.path.exists('out.bmp'):
log.info('Image out.bmp was created!')
else:
log.error('Image out.bmp was not created. Check your permissions.')
# ----------------------------------------------------------------------------------------------------------------------
log.info('This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool\n')
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -0,0 +1,23 @@
black==24.4.2
certifi==2024.7.4
charset-normalizer==3.3.2
click==8.1.7
defusedxml==0.7.1
idna==3.7
mypy-extensions==1.0.0
networkx==3.1
numpy==1.24.4
opencv-python==4.10.0.84
openvino==2024.3.0
openvino-dev==2024.3.0
openvino-telemetry==2024.1.0
packaging==24.1
pathspec==0.12.1
pillow==10.4.0
pkg_resources==0.0.0
platformdirs==4.2.2
PyYAML==6.0.2
requests==2.32.3
tomli==2.0.1
typing_extensions==4.12.2
urllib3==2.2.2

View File

@ -0,0 +1 @@
/tmp/py_openvino_result.bmp

View File

@ -0,0 +1 @@
../../../code/C++/.clang-format

11
projects/Qt/example/.gitignore vendored Normal file
View File

@ -0,0 +1,11 @@
# xmake
.xmake/
build/
# qmake
.qmake.stash
*.o
moc_*.cpp
moc_*.j
moc_predefs.h
Makefile

View File

@ -0,0 +1,50 @@
## Темы
### qt5ct
```sh
apt install qt5ct qt5-style-plugins
```
```sh
QT_QPA_PLATFORMTHEME=qt5ct xmake run
```
Настройки осуществляются в приложении `qt5ct`
### Adwaita
Установить пакеты `adwaita-qt` и `adwaita-qt6`.
```sh
apt install adwaita-qt adwaita-qt6
```
Светлая тема
```sh
# Светлая тема
QT_STYLE_OVERRIDE=adwaita xmake run
QT_STYLE_OVERRIDE=Adwaita xmake run
```
Тёмная тема
```sh
# Тёмная тема
QT_STYLE_OVERRIDE=adwaita-dark xmake run
QT_STYLE_OVERRIDE=Adwaita-dark xmake run
```
## Другое
```cpp
/**
* Available platform plugins are:
* eglfs, linuxfb, minimal, minimalegl, offscreen, vnc,
* wayland-egl, wayland, wayland-xcomposite-egl, wayland-xcomposite-glx, xcb
*/
qsetenv("QT_QPA_PLATFORM", "minimal").
```

View File

@ -0,0 +1,13 @@
QT += core gui widgets
CONFIG += c++17
INCLUDEPATH += include/ \
SOURCES += src/main.cpp \
src/mainwindow.cpp
HEADERS += \
include/mainwindow.hpp
DESTDIR = build
TARGET = example

View File

@ -0,0 +1,29 @@
#ifndef MAINWINDOW_HPP
#define MAINWINDOW_HPP
#include <QProcess>
#include <QWidget>
class QPushButton;
class QTextBrowser;
class MainWindow : public QWidget {
Q_OBJECT
Q_CLASSINFO("Author", "Alexander Popov")
Q_CLASSINFO("Status", "Active")
private slots:
void onButtonReleased();
void onCaptureProcessOutput();
private:
QPushButton *button;
QTextBrowser *textBrowser;
QProcess process;
public:
explicit MainWindow(QWidget *parent = nullptr);
~MainWindow();
};
#endif // MAINWINDOW_HPP

View File

@ -0,0 +1,18 @@
#include <QApplication>
#include <QDebug>
#include <QtWidgets>
#include "mainwindow.hpp"
int main(int argc, char *argv[]) {
// qputenv("QT_STYLE_OVERRIDE", "adwaita-dark");
QApplication app(argc, argv);
MainWindow window;
window.show();
// qDebug() << "Hello World";
return app.exec();
}

View File

@ -0,0 +1,47 @@
#include <QtWidgets>
#include "mainwindow.hpp"
MainWindow::MainWindow(QWidget *parent) : QWidget(parent) {
setWindowTitle(tr("ololo"));
setGeometry(100, 100, 300, 200);
// widgets
button = new QPushButton(tr("Push Me!"));
textBrowser = new QTextBrowser();
// layout
QGridLayout *mainLayout = new QGridLayout;
mainLayout->addWidget(button, 0, 0);
mainLayout->addWidget(textBrowser, 1, 0);
setLayout(mainLayout);
// signals
connect(button, SIGNAL(released()), this, SLOT(onButtonReleased()));
connect(&process, SIGNAL(readyReadStandardOutput()), this, SLOT(onCaptureProcessOutput()));
}
MainWindow::~MainWindow() {
delete button;
delete textBrowser;
}
void MainWindow::onButtonReleased() {
textBrowser->clear();
textBrowser->append(tr("Running command:\n"));
process.setCurrentWriteChannel(QProcess::StandardOutput);
process.start("ls", QStringList() << "-lh" << qgetenv("HOME"));
// if (!process.waitForStarted() || !process.waitForFinished()) {
// return;
// }
}
void MainWindow::onCaptureProcessOutput() {
QProcess *proc = qobject_cast<QProcess *>(sender());
if (proc) {
textBrowser->append(proc->readAllStandardOutput());
}
}

View File

@ -0,0 +1,21 @@
set_project("example")
add_rules("mode.debug", "mode.release")
if is_mode("debug") then
set_symbols("debug")
set_optimize("none")
end
add_includedirs(
"include" -- APPLICATION
)
add_linkdirs(
)
target("example")
-- set_kind("binary")
add_rules("qt.widgetapp")
add_headerfiles("include/*.hpp")
add_files("src/*.cpp")
-- add files with Q_OBJECT meta (only for qt.moc)
add_files("include/*.hpp")

2
projects/nanomq/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
jwtRS256.*
mqtt_client.*

26
projects/nanomq/README.md Normal file
View File

@ -0,0 +1,26 @@
# ...
## Генерация **jwtRS256** ключа
```sh
ssh-keygen -t rsa -b 4096 -m PEM -f jwtRS256.key # Don't add passphrase
openssl rsa -in jwtRS256.key -pubout -outform PEM -out jwtRS256.key.pub
```
Команды ниже конвертируют ключи в однострочник
```sh
awk 'NF {sub(/\r/, ""); printf "%s\\n",$0;}' jwtRS256.key
awk 'NF {sub(/\r/, ""); printf "%s\\n",$0;}' jwtRS256.key.pub
```
## Отладка
Используйте приложение [mqttx](https://mqttx.app/) для отладки.
## Ссылки
* https://gist.github.com/ygotthilf/baa58da5c3dd1f69fae9

View File

@ -0,0 +1,96 @@
bridges.mqtt.emqx1 {
server = "mqtt-tcp://127.0.0.1:1883"
proto_ver = 5
keepalive = 60s
backoff_max = 60s
clean_start = false
username = username
password = passwd
conn_properties = {
maximum_packet_size = 1024
receive_maximum = 65535
topic_alias_maximum = 0
request_problem_infomation = 1
request_response_infomation = 0
session_expiry_interval = 0
user_property = {
key1 = value1
key2 = value2
}
}
will {
topic = "will_topic"
qos = 1
retain = false
payload = "will_message"
properties = {
payload_format_indicator = 0
message_expiry_interval = 0
content_type = ""
response_topic = ""
correlation_data = ""
will_delay_interval = 0
user_property = {
key1 = value1
key2 = value2
}
}
}
forwards = [
{
remote_topic = "fwd/topic1"
local_topic = "topic1"
}
{
remote_topic = "fwd/topic2"
local_topic = "topic2"
}
]
quic_keepalive = 120s
quic_idle_timeout = 120s
quic_discon_timeout = 20s
quic_handshake_timeout = 60s
quic_send_idle_timeout = 2s
quic_initial_rtt_ms = 800ms
quic_max_ack_delay_ms = 100ms
quic_multi_stream = false
quic_qos_priority = true
quic_0rtt = true
subscription = [
{
remote_topic = "cmd/topic3"
local_topic = "topic3"
qos = 1
}
{
remote_topic = "cmd/topic4"
local_topic = "topic4"
qos = 2
}
]
sub_properties {
identifier = 1
user_property = {
key1 = value1
key2 = value2
}
}
hybrid_bridging = false
hybrid_servers = ["mqtt-quic://127.1:14567", "mqtt-tcp://127.1:1883"]
max_parallel_processes = 2
max_send_queue_len = 32
max_recv_queue_len = 128
}
bridges.mqtt.cache {
disk_cache_size = 102400
flush_mem_threshold = 100
resend_interval = 5000
}

View File

@ -0,0 +1,58 @@
mqtt {
property_size = 32
max_packet_size = 260MB
max_mqueue_len = 2048
retry_interval = 10s
keepalive_multiplier = 1.25
# Three of below, unsupported now
max_inflight_window = 2048
max_awaiting_rel = 10s
await_rel_timeout = 10s
}
listeners.tcp {
bind = "0.0.0.0:1883"
}
listeners.ws {
bind = "0.0.0.0:8083/mqtt"
}
http_server {
port = 8081
limit_conn = 2
username = admin
password = public
auth_type = basic
jwt {
public.keyfile = "/home/user/Develop/snipplets.dev/projects/nanomq/jwtRS256.key.pub"
}
}
log {
to = [file, console]
level = warn
dir = "/tmp"
file = "nanomq.log"
rotation {
size = 10MB
count = 5
}
}
auth {
allow_anonymous = true
no_match = allow
deny_action = ignore
cache = {
max_size = 32
ttl = 1m
}
# password = {include "/etc/nanomq_pwd.conf"}
# acl = {include "/etc/nanomq_acl.conf"}
}
include "/home/user/Develop/snipplets.dev/projects/nanomq/bridge.conf"

View File

@ -0,0 +1,3 @@
#!/bin/sh
nanomq start --conf $(pwd)/nanomq.conf