[JS OV] Node.js API (#19848)

Co-authored-by: almilosz <alicja.miloszewska@intel.com>
Co-authored-by: Andrei Kashchikhin <andrey.kashchikhin@intel.com>
Co-authored-by: yatarkan <yaroslav.tarkan@intel.com>
Co-authored-by: Ilya Lavrenov <ilya.lavrenov@intel.com>
This commit is contained in:
Vishniakov Nikolai 2023-12-20 01:19:34 +01:00 committed by GitHub
parent 2779df5800
commit 8d704f6400
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
99 changed files with 12824 additions and 2 deletions

View File

@ -5,6 +5,11 @@ allow-licenses:
- 'BSD-2-Clause AND BSD-3-Clause'
- 'MIT'
- 'Apache-2.0'
- 'ISC'
- 'Apache-2.0 AND MIT'
- 'BlueOak-1.0.0'
- '0BSD'
- 'Python-2.0'
fail-on-scopes:
- 'runtime'
- 'development'

View File

@ -85,6 +85,8 @@ macro(ov_define_component_include_rules)
set(OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE_EXCLUDE_ALL EXCLUDE_FROM_ALL)
unset(OV_CPACK_COMP_PYTHON_WHEELS_EXCLUDE_ALL)
unset(OV_CPACK_COMP_OPENVINO_REQ_FILES_EXCLUDE_ALL)
# nodejs
set(OV_CPACK_COMP_NPM_EXCLUDE_ALL EXCLUDE_FROM_ALL)
# tools
set(OV_CPACK_COMP_OPENVINO_DEV_REQ_FILES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
unset(OV_CPACK_COMP_DEPLOYMENT_MANAGER_EXCLUDE_ALL)

View File

@ -101,6 +101,8 @@ macro(ov_define_component_include_rules)
set(OV_CPACK_COMP_PYTHON_WHEELS_EXCLUDE_ALL EXCLUDE_FROM_ALL)
# we don't need requirements.txt in package, because dependencies are installed by packages managers like conda
set(OV_CPACK_COMP_OPENVINO_REQ_FILES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
# nodejs
set(OV_CPACK_COMP_NPM_EXCLUDE_ALL EXCLUDE_FROM_ALL)
# tools
set(OV_CPACK_COMP_OPENVINO_DEV_REQ_FILES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
set(OV_CPACK_COMP_DEPLOYMENT_MANAGER_EXCLUDE_ALL EXCLUDE_FROM_ALL)

View File

@ -106,6 +106,8 @@ macro(ov_define_component_include_rules)
set(OV_CPACK_COMP_PYTHON_WHEELS_EXCLUDE_ALL EXCLUDE_FROM_ALL)
# because numpy is installed by apt
set(OV_CPACK_COMP_OPENVINO_REQ_FILES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
# nodejs
set(OV_CPACK_COMP_NPM_EXCLUDE_ALL EXCLUDE_FROM_ALL)
# tools
set(OV_CPACK_COMP_OPENVINO_DEV_REQ_FILES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
set(OV_CPACK_COMP_DEPLOYMENT_MANAGER_EXCLUDE_ALL EXCLUDE_FROM_ALL)

View File

@ -0,0 +1,105 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
include(GNUInstallDirs)
# We have to specify RPATH, all runtime libs are in one dir
set(CMAKE_SKIP_INSTALL_RPATH OFF)
#
# ov_npm_cpack_set_dirs()
#
# Set directories for cpack
#
macro(ov_npm_cpack_set_dirs)
set(OV_CPACK_INCLUDEDIR .)
set(OV_CPACK_IE_CMAKEDIR .)
set(OV_CPACK_NGRAPH_CMAKEDIR .)
set(OV_CPACK_OPENVINO_CMAKEDIR .)
set(OV_CPACK_DOCDIR .)
set(OV_CPACK_LICENSESDIR .)
set(OV_CPACK_SAMPLESDIR .)
set(OV_CPACK_WHEELSDIR .)
set(OV_CPACK_TOOLSDIR .)
set(OV_CPACK_DEVREQDIR .)
set(OV_CPACK_PYTHONDIR .)
if(WIN32)
set(OV_CPACK_LIBRARYDIR .)
set(OV_CPACK_RUNTIMEDIR .)
set(OV_CPACK_ARCHIVEDIR .)
elseif(APPLE)
set(OV_CPACK_LIBRARYDIR .)
set(OV_CPACK_RUNTIMEDIR .)
set(OV_CPACK_ARCHIVEDIR .)
else()
set(OV_CPACK_LIBRARYDIR .)
set(OV_CPACK_RUNTIMEDIR .)
set(OV_CPACK_ARCHIVEDIR .)
endif()
set(OV_CPACK_LIBRARYDIR .)
set(OV_CPACK_ARCHIVEDIR .)
set(OV_CPACK_PLUGINSDIR .)
set(OV_CPACK_IE_CMAKEDIR .)
set(OV_CPACK_NGRAPH_CMAKEDIR .)
set(OV_CPACK_OPENVINO_CMAKEDIR .)
set(OV_CPACK_DOCDIR .)
set(OV_CPACK_LICENSESDIR licenses)
set(OV_CPACK_PYTHONDIR .)
# non-native stuff
set(OV_CPACK_SHAREDIR .)
set(OV_CPACK_SAMPLESDIR .)
set(OV_CPACK_DEVREQDIR .)
unset(OV_CPACK_SHAREDIR)
# skipped during debian packaging
set(OV_CPACK_WHEELSDIR .)
endmacro()
ov_npm_cpack_set_dirs()
#
# Override include / exclude rules for components
# This is required to exclude some files from installation
# (e.g. npm package requires only C++ Core component)
#
macro(ov_define_component_include_rules)
# core components
unset(OV_CPACK_COMP_CORE_EXCLUDE_ALL)
set(OV_CPACK_COMP_CORE_C_EXCLUDE_ALL EXCLUDE_FROM_ALL)
set(OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL EXCLUDE_FROM_ALL)
set(OV_CPACK_COMP_CORE_C_DEV_EXCLUDE_ALL EXCLUDE_FROM_ALL)
# tbb
unset(OV_CPACK_COMP_TBB_EXCLUDE_ALL)
set(OV_CPACK_COMP_TBB_DEV_EXCLUDE_ALL EXCLUDE_FROM_ALL)
# licensing
unset(OV_CPACK_COMP_LICENSING_EXCLUDE_ALL)
# samples
set(OV_CPACK_COMP_CPP_SAMPLES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
set(OV_CPACK_COMP_C_SAMPLES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
set(OV_CPACK_COMP_PYTHON_SAMPLES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
# python
set(OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL EXCLUDE_FROM_ALL)
set(OV_CPACK_COMP_BENCHMARK_APP_EXCLUDE_ALL EXCLUDE_FROM_ALL)
set(OV_CPACK_COMP_OVC_EXCLUDE_ALL EXCLUDE_FROM_ALL)
set(OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE_EXCLUDE_ALL EXCLUDE_FROM_ALL)
set(OV_CPACK_COMP_PYTHON_WHEELS_EXCLUDE_ALL EXCLUDE_FROM_ALL)
set(OV_CPACK_COMP_OPENVINO_REQ_FILES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
# nodejs
unset(OV_CPACK_COMP_NPM_EXCLUDE_ALL)
# tools
set(OV_CPACK_COMP_OPENVINO_DEV_REQ_FILES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
set(OV_CPACK_COMP_DEPLOYMENT_MANAGER_EXCLUDE_ALL EXCLUDE_FROM_ALL)
# scripts
set(OV_CPACK_COMP_INSTALL_DEPENDENCIES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
set(OV_CPACK_COMP_SETUPVARS_EXCLUDE_ALL EXCLUDE_FROM_ALL)
endmacro()
ov_define_component_include_rules()
# New in version 3.18
set(CPACK_ARCHIVE_THREADS 8)

View File

@ -117,6 +117,8 @@ macro(ov_define_component_include_rules)
set(OV_CPACK_COMP_PYTHON_WHEELS_EXCLUDE_ALL EXCLUDE_FROM_ALL)
set(OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE_EXCLUDE_ALL EXCLUDE_FROM_ALL)
unset(OV_CPACK_COMP_OPENVINO_REQ_FILES_EXCLUDE_ALL)
# nodejs
set(OV_CPACK_COMP_NPM_EXCLUDE_ALL EXCLUDE_FROM_ALL)
# tools
unset(OV_CPACK_COMP_OPENVINO_DEV_REQ_FILES_EXCLUDE_ALL)
unset(OV_CPACK_COMP_DEPLOYMENT_MANAGER_EXCLUDE_ALL)

View File

@ -41,10 +41,16 @@ endmacro()
#
function(ov_set_install_rpath TARGET_NAME lib_install_path)
if(APPLE AND CPACK_GENERATOR MATCHES "^(7Z|TBZ2|TGZ|TXZ|TZ|TZST|ZIP)$" OR CPACK_GENERATOR STREQUAL "NPM")
if (APPLE)
set(RPATH_PREFIX "@loader_path")
else()
set(RPATH_PREFIX "$ORIGIN")
endif()
unset(rpath_list)
foreach(dependency_install_path IN LISTS ARGN)
file(RELATIVE_PATH dependency_rpath "/${lib_install_path}" "/${dependency_install_path}")
set(dependency_rpath "@loader_path/${dependency_rpath}")
set(dependency_rpath "${RPATH_PREFIX}/${dependency_rpath}")
list(APPEND rpath_list "${dependency_rpath}")
endforeach()
@ -139,6 +145,8 @@ macro(ov_define_component_names)
set(OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE "pyopenvino_package")
set(OV_CPACK_COMP_PYTHON_WHEELS "python_wheels")
set(OV_CPACK_COMP_OPENVINO_REQ_FILES "openvino_req_files")
# nodejs
set(OV_CPACK_COMP_NPM "ov_node_addon")
# tools
set(OV_CPACK_COMP_OPENVINO_DEV_REQ_FILES "openvino_dev_req_files")
set(OV_CPACK_COMP_DEPLOYMENT_MANAGER "deployment_manager")
@ -179,6 +187,8 @@ elseif(CPACK_GENERATOR STREQUAL "RPM")
include(packaging/rpm/rpm)
elseif(CPACK_GENERATOR STREQUAL "NSIS")
include(packaging/nsis)
elseif(CPACK_GENERATOR STREQUAL "NPM")
include(packaging/npm)
elseif(CPACK_GENERATOR MATCHES "^(CONDA-FORGE|BREW|CONAN|VCPKG)$")
include(packaging/common-libraries)
elseif(CPACK_GENERATOR MATCHES "^(7Z|TBZ2|TGZ|TXZ|TZ|TZST|ZIP)$")

View File

@ -97,6 +97,8 @@ macro(ov_define_component_include_rules)
set(OV_CPACK_COMP_PYTHON_WHEELS_EXCLUDE_ALL EXCLUDE_FROM_ALL)
# because numpy is installed by rpm
set(OV_CPACK_COMP_OPENVINO_REQ_FILES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
# nodejs
set(OV_CPACK_COMP_NPM_EXCLUDE_ALL EXCLUDE_FROM_ALL)
# tools
set(OV_CPACK_COMP_OPENVINO_DEV_REQ_FILES_EXCLUDE_ALL EXCLUDE_FROM_ALL)
set(OV_CPACK_COMP_DEPLOYMENT_MANAGER_EXCLUDE_ALL EXCLUDE_FROM_ALL)

27
cmake/packaging/npm.cmake Normal file
View File

@ -0,0 +1,27 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
#
# OpenVINO npm binaries, includes openvino:runtime, frontends, plugins, tbb
#
macro(ov_cpack_settings)
# fill a list of components which are part of conda
set(cpack_components_all ${CPACK_COMPONENTS_ALL})
unset(CPACK_COMPONENTS_ALL)
foreach(item IN LISTS cpack_components_all)
string(TOUPPER ${item} UPPER_COMP)
# filter out some components, which are not needed to be wrapped to npm package
if(NOT OV_CPACK_COMP_${UPPER_COMP}_EXCLUDE_ALL AND
# python is not required for npm package
NOT item MATCHES "^${OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE}_python.*")
list(APPEND CPACK_COMPONENTS_ALL ${item})
endif()
endforeach()
unset(cpack_components_all)
list(REMOVE_DUPLICATES CPACK_COMPONENTS_ALL)
# override generator
set(CPACK_GENERATOR "TGZ")
endmacro()

View File

@ -4,6 +4,8 @@
if(CPACK_GENERATOR STREQUAL "DEB")
include("${OpenVINO_SOURCE_DIR}/cmake/packaging/debian.cmake")
elseif(CPACK_GENERATOR STREQUAL "NPM")
include("${OpenVINO_SOURCE_DIR}/cmake/packaging/npm.cmake")
elseif(CPACK_GENERATOR STREQUAL "RPM")
include("${OpenVINO_SOURCE_DIR}/cmake/packaging/rpm.cmake")
elseif(CPACK_GENERATOR MATCHES "^(CONDA-FORGE|BREW|CONAN|VCPKG)$")

1
samples/js/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/assets

View File

@ -0,0 +1,15 @@
module.exports = {
parserOptions: {
ecmaVersion: 'latest'
},
env: {
node: true,
es6: true,
},
extends: [
'eslint:recommended',
'../../../src/bindings/js/.eslintrc-global.js',
],
ignorePatterns: ['node_modules/', '.eslintrc.js'],
root: true,
};

2
samples/js/node/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
node_modules
hello_reshape_ssd/out.jpg

26
samples/js/node/README.md Normal file
View File

@ -0,0 +1,26 @@
# OpenVINO™ JavaScript API examples of usage
## Installation of openvinojs-node package
From *openvino/src/bindings/js/node* run `npm i` to download OpenVINO™ runtime, install requirements, build bindings and compile TypeScript code to JavaScript
On the *.nix systems run `source openvino/src/bindings/js/node/scripts/setupvars.sh` to add path to OpenVINO™ runtime libraries in `LD_LIBRARY_PATH` variable
Note: Perform these steps also before running notebooks.
## Samples
- hello_classification
- hello_reshape_ssd
- classification_sample_async
## Notebooks
Use [Node.js Notebooks (REPL)](https://marketplace.visualstudio.com/items?itemName=donjayamanne.typescript-notebook)
VSCode extension to run these notebook samples
Make sure that `LD_LIBRARY_PATH` variable contains path to OpenVINO runtime folder
- ./notebooks
- 001-hello-world.nnb
- 003-hello-segmentation.nnb
- 004-hello-detection.nnb
- 213-question-answering.nnb

View File

@ -0,0 +1,8 @@
# Image Classification Async NodeJS Sample
Models with only 1 input and output are supported.
Run:
`node classification_sample_async.js -m *path_to_model_file* -i *path_to_img1* -i *path_to_img2* -d AUTO`
Other details see in /samples/python/classification_sample_async/README.md

View File

@ -0,0 +1,107 @@
const { addon: ov } = require('openvinojs-node');
const args = require('args');
const { cv } = require('opencv-wasm');
const { getImageData } = require('../helpers.js');
args.options([{
name: 'img',
defaultValue: [],
}, {
name: 'model',
}, {
name: 'device',
}]);
const { model: modelPath, device: deviceName, img: images } =
args.parse(process.argv);
main(modelPath, images, deviceName);
function completionCallback(result, imagePath) {
const predictions = Array.from(result.data)
.map((prediction, classId) => ({ prediction, classId }))
.sort(({ prediction: predictionA }, { prediction: predictionB }) =>
predictionA === predictionB ? 0 : predictionA > predictionB ? -1 : 1);
console.log(`Image path: ${imagePath}`);
console.log('Top 10 results:');
console.log('class_id probability');
console.log('--------------------');
predictions.slice(0, 10).forEach(({ classId, prediction }) =>
console.log(`${classId}\t ${prediction.toFixed(7)}`),
);
console.log();
}
async function main(modelPath, images, deviceName) {
//----------- Step 1. Initialize OpenVINO Runtime Core -----------------------
console.log('Creating OpenVINO Runtime Core');
const core = new ov.Core();
//----------- Step 2. Read a model -------------------------------------------
console.log(`Reading the model: ${modelPath}`);
// (.xml and .bin files) or (.onnx file)
const model = await core.readModel(modelPath);
const [h, w] = model.inputs[0].shape.slice(-2);
const tensorShape = [1, h, w, 3];
if (model.inputs.length !== 1)
throw new Error('Sample supports only single input topologies');
if (model.outputs.length !== 1)
throw new Error('Sample supports only single output topologies');
//----------- Step 3. Set up input -------------------------------------------
// Read input image
const imagesData = [];
for (const imagePath of images)
imagesData.push(await getImageData(imagePath));
const preprocessedImages = imagesData.map((imgData) => {
// Use opencv-wasm to preprocess image.
const originalImage = cv.matFromImageData(imgData);
const image = new cv.Mat();
// The MobileNet model expects images in RGB format.
cv.cvtColor(originalImage, image, cv.COLOR_RGBA2RGB);
cv.resize(image, image, new cv.Size(w, h));
return new Uint8Array(image.data);
});
//----------- Step 4. Apply preprocessing ------------------------------------
const _ppp = new ov.preprocess.PrePostProcessor(model);
_ppp.input().tensor().setLayout('NHWC').setElementType(ov.element.u8);
_ppp.input().model().setLayout('NCHW');
_ppp.output().tensor().setElementType(ov.element.f32);
_ppp.build();
//----------------- Step 5. Loading model to the device ----------------------
console.log('Loading the model to the plugin');
const compiledModel = await core.compileModel(model, deviceName);
const outputName = compiledModel.output(0).toString();
//----------- Step 6. Collecting promises to react when they resolve ---------
console.log('Starting inference in asynchronous mode');
// Create infer request
const inferRequest = compiledModel.createInferRequest();
const promises = preprocessedImages.map((tensorData, i) => {
const inferPromise = inferRequest.inferAsync([
new ov.Tensor(ov.element.u8, tensorShape, tensorData)
]);
inferPromise.then(result =>
completionCallback(result[outputName], images[i]));
return inferPromise;
});
//----------- Step 7. Do inference -------------------------------------------
await Promise.all(promises);
console.log('All inferences executed');
console.log('\nThis sample is an API example, for any performance '
+ 'measurements please use the dedicated benchmark_app tool');
}

View File

@ -0,0 +1,8 @@
# Hello Classification NodeJS Sample
Models with only 1 input and output are supported.
Run:
`node hello_classification.js *path_to_model_file* *path_to_img* AUTO`
Other details see in /samples/python/hello_classification/README.md

View File

@ -0,0 +1,82 @@
const { addon: ov } = require('openvinojs-node');
const { cv } = require('opencv-wasm');
const { getImageData } = require('../helpers.js');
// Parsing and validation of input arguments
if (process.argv.length !== 5)
throw new Error(`Usage: ${process.argv[1]} <path_to_model> `
+ '<path_to_image> <device_name>');
const modelPath = process.argv[2];
const imagePath = process.argv[3];
const deviceName = process.argv[4];
main(modelPath, imagePath, deviceName);
async function main(modelPath, imagePath, deviceName) {
//----------------- Step 1. Initialize OpenVINO Runtime Core -----------------
console.log('Creating OpenVINO Runtime Core');
const core = new ov.Core();
//----------------- Step 2. Read a model -------------------------------------
console.log(`Reading the model: ${modelPath}`);
const model = await core.readModel(modelPath);
if (model.inputs.length !== 1)
throw new Error('Sample supports only single input topologies');
if (model.outputs.length !== 1)
throw new Error('Sample supports only single output topologies');
//----------------- Step 3. Set up input -------------------------------------
// Read input image
const imgData = await getImageData(imagePath);
// Use opencv-wasm to preprocess image.
const originalImage = cv.matFromImageData(imgData);
const image = new cv.Mat();
// The MobileNet model expects images in RGB format.
cv.cvtColor(originalImage, image, cv.COLOR_RGBA2RGB);
const tensorData = new Float32Array(image.data);
const shape = [1, image.rows, image.cols, 3];
const inputTensor = new ov.Tensor(ov.element.f32, shape, tensorData);
//----------------- Step 4. Apply preprocessing ------------------------------
const _ppp = new ov.preprocess.PrePostProcessor(model);
_ppp.input().tensor().setShape(shape).setLayout('NHWC');
_ppp.input().preprocess().resize(ov.preprocess.resizeAlgorithm.RESIZE_LINEAR);
_ppp.input().model().setLayout('NCHW');
_ppp.output().tensor().setElementType(ov.element.f32);
_ppp.build();
//----------------- Step 5. Loading model to the device ----------------------
console.log('Loading the model to the plugin');
const compiledModel = await core.compileModel(model, deviceName);
//---------------- Step 6. Create infer request and do inference synchronously
console.log('Starting inference in synchronous mode');
const inferRequest = compiledModel.createInferRequest();
inferRequest.setInputTensor(inputTensor);
inferRequest.infer();
//----------------- Step 7. Process output -----------------------------------
const outputLayer = compiledModel.outputs[0];
const resultInfer = inferRequest.getTensor(outputLayer);
const predictions = Array.from(resultInfer.data)
.map((prediction, classId) => ({ prediction, classId }))
.sort(({ prediction: predictionA }, { prediction: predictionB }) =>
predictionA === predictionB ? 0 : predictionA > predictionB ? -1 : 1);
console.log(`Image path: ${imagePath}`);
console.log('Top 10 results:');
console.log('class_id probability');
console.log('--------------------');
predictions.slice(0, 10).forEach(({ classId, prediction }) =>
console.log(`${classId}\t ${prediction.toFixed(7)}`),
);
console.log('\nThis sample is an API example, for any performance '
+ 'measurements please use the dedicated benchmark_app tool');
}

View File

@ -0,0 +1,8 @@
# Hello Reshape SSD NodeJS Sample
Models with only 1 input and output are supported.
Run:
`node hello_reshape_ssd.js *path_to_model_file* *path_to_img* AUTO`
Other details see in /samples/python/hello_reshape_ssd/README.md

View File

@ -0,0 +1,119 @@
const { addon: ov } = require('openvinojs-node');
const fs = require('node:fs/promises');
const { cv } = require('opencv-wasm');
const {
setShape,
getImageData,
getImageBuffer,
arrayToImageData,
} = require('../helpers.js');
// Parsing and validation of input arguments
if (process.argv.length !== 5)
throw new Error(`Usage: ${process.argv[1]} <path_to_model> `
+ '<path_to_image> <device_name>');
const modelPath = process.argv[2];
const imagePath = process.argv[3];
const deviceName = process.argv[4];
main(modelPath, imagePath, deviceName);
async function main(modelPath, imagePath, deviceName) {
//----------------- Step 1. Initialize OpenVINO Runtime Core -----------------
console.log('Creating OpenVINO Runtime Core');
const core = new ov.Core();
//----------------- Step 2. Read a model -------------------------------------
console.log(`Reading the model: ${modelPath}`);
// (.xml and .bin files) or (.onnx file)
const model = await core.readModel(modelPath);
if (model.inputs.length !== 1)
throw new Error('Sample supports only single input topologies');
if (model.outputs.length !== 1)
throw new Error('Sample supports only single output topologies');
//----------------- Step 3. Set up input -------------------------------------
// Read input image
const imgData = await getImageData(imagePath);
// Use opencv-wasm to preprocess image.
const originalImage = cv.matFromImageData(imgData);
const image = new cv.Mat();
// The MobileNet model expects images in RGB format.
cv.cvtColor(originalImage, image, cv.COLOR_RGBA2RGB);
const tensorData = new Uint8Array(image.data);
const shape = [1, image.rows, image.cols, 3];
const inputTensor = new ov.Tensor(ov.element.u8, shape, tensorData);
//----------------- Step 4. Apply preprocessing ------------------------------
const _ppp = new ov.preprocess.PrePostProcessor(model);
_ppp.input().preprocess().resize(ov.preprocess.resizeAlgorithm.RESIZE_LINEAR);
_ppp.input().tensor()
.setShape(shape)
.setElementType(ov.element.u8)
.setLayout('NHWC');
_ppp.input().model().setLayout('NCHW');
_ppp.output().tensor().setElementType(ov.element.f32);
_ppp.build();
//----------------- Step 5. Loading model to the device ----------------------
console.log('Loading the model to the plugin');
const compiledModel = await core.compileModel(model, deviceName);
//---------------- Step 6. Create infer request and do inference synchronously
console.log('Starting inference in synchronous mode');
const inferRequest = compiledModel.createInferRequest();
inferRequest.setInputTensor(inputTensor);
inferRequest.infer();
//----------------- Step 7. Process output -----------------------------------
const outputLayer = compiledModel.outputs[0];
const resultInfer = inferRequest.getTensor(outputLayer);
const predictions = Array.from(resultInfer.data);
const [height, width] = [originalImage.rows, originalImage.cols];
const detections = setShape(predictions, [100, 7]);
const color = [255, 0, 0, 255];
const THROUGHPUT = 0.9;
detections.forEach(detection => {
const [classId, confidence, xmin, ymin, xmax, ymax] = detection.slice(1);
if (confidence < THROUGHPUT) return;
console.log(`Found: classId = ${classId}, `
+ `confidence = ${confidence.toFixed(2)}, `
+ `coords = (${xmin}, ${ymin}), (${xmax}, ${ymax})`,
);
// Draw a bounding box on a output image
cv.rectangle(originalImage,
new cv.Point(xmin*width, ymin*height),
new cv.Point(xmax*width, ymax*height),
color,
2,
);
});
const resultImgData = arrayToImageData(originalImage.data, width, height);
const filename = 'out.jpg';
await fs.writeFile(`./${filename}`, getImageBuffer(resultImgData));
try {
await fs.readFile(filename);
console.log('Image out.jpg was created!');
} catch(err) {
console.log(`Image ${filename} was not created. Check your permissions.`);
}
console.log('\nThis sample is an API example, for any performance '
+ 'measurements please use the dedicated benchmark_app tool');
}

332
samples/js/node/helpers.js Normal file
View File

@ -0,0 +1,332 @@
const path = require('node:path');
const { cv } = require('opencv-wasm');
const { createWriteStream } = require('node:fs');
const { mkdir, stat } = require('node:fs/promises');
const { HttpsProxyAgent } = require('https-proxy-agent');
const {
Image,
ImageData,
loadImage,
createCanvas,
createImageData,
} = require('canvas');
module.exports = {
exp,
sum,
triu,
tril,
argMax,
reshape,
getShape,
setShape,
transform,
downloadFile,
displayImage,
getImageData,
extractValues,
getImageBuffer,
arrayToImageData,
displayArrayAsImage,
matrixMultiplication,
};
function arrayToImageData(array, width, height) {
return createImageData(new Uint8ClampedArray(array), width, height);
}
function getImageBuffer(imageOrImageData) {
const canvas = createCanvas(imageOrImageData.width, imageOrImageData.height);
const ctx = canvas.getContext('2d');
if (imageOrImageData instanceof Image)
ctx.drawImage(imageOrImageData, 0, 0);
else if (imageOrImageData instanceof ImageData)
ctx.putImageData(imageOrImageData, 0, 0);
else
throw Error(`Passed parameters has type '${typeof imageOrImageData}'. `
+ 'It is\'t supported.');
return canvas.toBuffer('image/jpeg');
}
function displayImage(imageOrImageData, display) {
const buffer = getImageBuffer(imageOrImageData);
display.image(buffer);
}
function displayArrayAsImage(arr, width, height, display) {
const alpha = 255;
const componentsPerPixel = arr.length / (width*height);
try {
switch (componentsPerPixel) {
case 1:
arr = arr.reduce((acc, val) => {
acc.push(val, val, val, alpha);
return acc;
}, []);
break;
case 3:
arr = arr.reduce((acc, val, index) => {
if (index && index%3 === 0) acc.push(alpha);
acc.push(val);
return acc;
}, []);
break;
}
} catch(e) {
console.log(e);
}
const imageData = arrayToImageData(arr, width, height);
displayImage(imageData, display);
}
async function getImageData(path) {
const image = await loadImage(path);
const { width, height } = image;
const canvas = await createCanvas(width, height);
const ctx = canvas.getContext('2d');
ctx.drawImage(image, 0, 0);
return ctx.getImageData(0, 0, width, height);
}
function transform(arr, { width, height }, order) {
const img = new cv.Mat(height, width, cv.CV_8UC3);
img.data.set(arr, 0, arr.length);
const channels = new cv.MatVector();
cv.split(img, channels);
const val = order.map(num => [...channels.get(num).data]);
return [].concat(...val);
}
async function downloadFile(url, filename, destination) {
const { env } = process;
const timeout = 5000;
await applyFolderPath(destination);
const fullPath = path.resolve(destination, filename);
const file = createWriteStream(fullPath);
const protocolString = new URL(url).protocol === 'https:' ? 'https' : 'http';
const module = require(`node:${protocolString}`);
const proxyUrl = env.http_proxy || env.HTTP_PROXY || env.npm_config_proxy;
let agent;
if (proxyUrl) {
agent = new HttpsProxyAgent(proxyUrl);
console.log(`Proxy agent configured using: '${proxyUrl}'`);
}
return new Promise((resolve, reject) => {
file.on('error', e => {
reject(`Error oppening file stream: ${e}`);
});
const getRequest = module.get(url, { agent }, res => {
const { statusCode } = res;
if (statusCode !== 200)
return reject(`Server returns status code: ${statusCode}`);
res.pipe(file);
file.on('finish', () => {
file.close();
console.log(`File successfully stored at '${fullPath}'`);
resolve();
});
});
getRequest.on('error', e => {
reject(`Error sending request: ${e}`);
});
getRequest.setTimeout(timeout, () => {
getRequest.destroy();
reject(`Request timed out after ${timeout}`);
});
});
}
function sum(array) {
return array.reduce((acc, val) => acc+val, 0);
}
function mul(array) {
return array.reduce((acc, val) => acc*val, 1);
}
function setShape(flatArray, shape) {
if (mul(shape) !== flatArray.length)
throw new Error('Shape doesn\'t according to array length');
return createMultidimensionArray(flatArray, shape, 0);
}
function createMultidimensionArray(flatArray, shape, offset) {
const currentDim = shape[0];
const remainingShape = shape.slice(1);
const currentArray = [];
if (remainingShape.length === 0) {
for (let i = 0; i < currentDim; i++)
currentArray.push(flatArray[offset + i]);
}
else {
const innerArrayLength = mul(shape) / currentDim;
for (let i = 0; i < currentDim; i++) {
const innerArray = createMultidimensionArray(flatArray, remainingShape,
offset + i*innerArrayLength);
currentArray.push(innerArray);
}
}
return currentArray;
}
function extractValues(arrOrVal, collector = []) {
if (arrOrVal[Symbol.iterator] && arrOrVal.map) {
arrOrVal.map(v => extractValues(v, collector));
}
else {
collector.push(arrOrVal);
}
return collector;
}
function isIterableArray(arr) {
return arr[Symbol.iterator] && arr.map;
}
function eachInner(arrOrValue, fn) {
return isIterableArray(arrOrValue)
? arrOrValue.map(e => eachInner(e, fn))
: fn(arrOrValue);
}
function exp(arr) {
return eachInner(arr, Math.exp);
}
function reshape(arr, newShape) {
const flat = extractValues(arr);
return setShape(flat, newShape);
}
function getShape(arr, acc = []) {
if (isIterableArray(arr)) {
acc.push(arr.length);
getShape(arr[0], acc);
}
return acc;
}
function matrixMultiplication(matrix1, matrix2) {
const rows1 = matrix1.length;
const cols1 = matrix1[0].length;
const rows2 = matrix2.length;
const cols2 = matrix2[0].length;
if (cols1 !== rows2)
throw new Error('Number of columns in the first matrix must match the '
+ 'number of rows in the second matrix.');
const result = [];
for (let i = 0; i < rows1; i++) {
result[i] = [];
for (let j = 0; j < cols2; j++) {
let sum = 0;
for (let k = 0; k < cols1; k++)
sum += matrix1[i][k] * matrix2[k][j];
result[i][j] = sum;
}
}
return result;
}
function findMax(arr) {
let max = -Infinity;
let index = -1;
for (let i = 0; i < arr.length; i++) {
if (arr[i] < max) continue;
max = arr[i];
index = i;
}
return { value: max, index };
}
function argMax(arr) {
return findMax(arr).index;
}
function triu(matrix, k = 0) {
const numRows = matrix.length;
const numCols = matrix[0].length;
const result = [];
for (let i = 0; i < numRows; i++) {
result[i] = [];
for (let j = 0; j < numCols; j++)
result[i][j] = i <= j - k ? matrix[i][j] : 0;
}
return result;
}
function tril(matrix, k = 0) {
const numRows = matrix.length;
const numCols = matrix[0].length;
const result = [];
for (let i = 0; i < numRows; i++) {
result[i] = [];
for (let j = 0; j < numCols; j++)
result[i][j] = i >= j - k ? matrix[i][j] : 0;
}
return result;
}
async function applyFolderPath(dirPath) {
try {
await stat(dirPath);
return;
} catch(err) {
if (err.code !== 'ENOENT') throw err;
await mkdir(dirPath, { recursive: true });
}
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,227 @@
{
"cells": [
{
"language": "markdown",
"source": [
"# Interactive question answering with OpenVINO™\n\nThis demo shows interactive question answering with OpenVINO, using [small BERT-large-like model](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/bert-small-uncased-whole-word-masking-squad-int8-0002) distilled and quantized to `INT8` on SQuAD v1.1 training set from larger BERT-large model. The model comes from [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/). Final part of this notebook provides live inference results from your inputs."
],
"outputs": []
},
{
"language": "markdown",
"source": [
"## Imports"
],
"outputs": []
},
{
"language": "javascript",
"source": [
"const {\n exp,\n sum,\n tril,\n triu,\n argMax,\n reshape,\n getShape,\n downloadFile,\n extractValues,\n matrixMultiplication,\n} = require('../helpers.js');\nconst tokens = require('./tokens_bert.js');\n\nconst { addon: ov } = require('openvinojs-node'); \n"
],
"outputs": []
},
{
"language": "markdown",
"source": [
"## Download the Model"
],
"outputs": []
},
{
"language": "typescript",
"source": [
"const baseArtifactsDir = '../../assets/models';\n\nconst modelName = 'bert-small-uncased-whole-word-masking-squad-int8-0002';\nconst modelXMLName = `${modelName}.xml`;\nconst modelBINName = `${modelName}.bin`;\n\nconst modelXMLPath = baseArtifactsDir + '/' + modelXMLName;\n\nconst baseURL = 'https://storage.openvinotoolkit.org/repositories/open_model_zoo/2022.3/models_bin/1/bert-small-uncased-whole-word-masking-squad-int8-0002/FP16-INT8/';\n\nawait downloadFile(baseURL + modelXMLName, modelXMLName, baseArtifactsDir);\nawait downloadFile(baseURL + modelBINName, modelBINName, baseArtifactsDir);\n"
],
"outputs": [
{
"items": [
{
"mime": "application/vnd.code.notebook.stdout",
"value": [
"Proxy agent configured using: 'http://proxy-mu.intel.com:911'",
"Proxy agent configured using: 'http://proxy-mu.intel.com:911'",
"File successfully stored at '/home/nvishnya/Code/wasm-openvino/samples/js/assets/models/bert-small-uncased-whole-word-masking-squad-int8-0002.bin'",
""
]
}
]
}
]
},
{
"language": "markdown",
"source": [
"## Download the Vocab"
],
"outputs": []
},
{
"language": "typescript",
"source": [
"const baseImagesDir = '../../assets/text';\nconst imgUrl = 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/text/bert-uncased/vocab.txt';\n\nawait downloadFile(imgUrl, 'vocab.txt', baseImagesDir);\n"
],
"outputs": [
{
"items": [
{
"mime": "application/vnd.code.notebook.stdout",
"value": [
"Proxy agent configured using: 'http://proxy-mu.intel.com:911'",
"File successfully stored at '/home/nvishnya/Code/wasm-openvino/samples/js/assets/text/vocab.txt'",
""
]
}
]
}
]
},
{
"language": "markdown",
"source": [
"## Load the model\n\nDownloaded models are located in a fixed structure, which indicates a vendor, a model name and a precision. Only a few lines of code are required to run the model. First, create an OpenVINO Runtime object. Then, read the network architecture and model weights from the `.xml` and `.bin` files. Finally, compile the network for the desired device."
],
"outputs": []
},
{
"language": "typescript",
"source": [
"const core = new ov.Core();\nconst model = await core.readModel(modelXMLPath);\n\nconst _ppp = new ov.preprocess.PrePostProcessor(model);\n_ppp.input(0).tensor().setElementType(ov.element.f32);\n_ppp.input(1).tensor().setElementType(ov.element.f32);\n_ppp.input(2).tensor().setElementType(ov.element.f32);\n_ppp.input(3).tensor().setElementType(ov.element.f32);\n_ppp.build();\n\nconst compiledModel = await core.compileModel(model, 'CPU');\n\nconst inputs = compiledModel.inputs;\nconst outputs = compiledModel.outputs;\n\nconst inputSize = compiledModel.input(0).shape[1];\n"
],
"outputs": []
},
{
"language": "javascript",
"source": [
"console.log('=== Model Inputs:');\ninputs.forEach(i => console.log(`${i}`));\nconsole.log('=== Model Outputs:');\noutputs.forEach(o => console.log(`${o}`));\n"
],
"outputs": [
{
"items": [
{
"mime": "application/vnd.code.notebook.stdout",
"value": [
"=== Model Inputs:",
"input_ids",
"attention_mask",
"token_type_ids",
"position_ids",
"=== Model Outputs:",
"output_s",
"output_e",
""
]
}
]
}
]
},
{
"language": "markdown",
"source": [
"## Processing\n\nNLP models usually take a list of tokens as a standard input. A token is a single word converted to some integer. To provide the proper input, you need the vocabulary for such mapping. You also need to define some special tokens, such as separators or padding and a function to load the content from provided URLs."
],
"outputs": []
},
{
"language": "javascript",
"source": [
"// The path to the vocabulary file.\nconst vocabFilePath = \"../../assets/text/vocab.txt\";\n\n// Create a dictionary with words and their indices.\nconst vocab = await tokens.loadVocabFile(vocabFilePath);\n\n// Define special tokens.\nconst clsToken = vocab[\"[CLS]\"];\nconst padToken = vocab[\"[PAD]\"];\nconst sepToken = vocab[\"[SEP]\"];\n\n// A function to load text from given urls.\nfunction loadContext(sources) {\n const input_urls = [];\n const paragraphs = [];\n \n for (source of sources) {\n paragraphs.push(source);\n\n // Produce one big context string.\n return paragraphs.join('\\n');\n }\n}\n"
],
"outputs": []
},
{
"language": "markdown",
"source": [
"## Preprocessing\n\nThe input size in this case is 384 tokens long. The main input (`input_ids`) to used BERT model consists of two parts: question tokens and context tokens separated by some special tokens. \n\nIf `question + context` are shorter than 384 tokens, padding tokens are added. If `question + context` is longer than 384 tokens, the context must be split into parts and the question with different parts of context must be fed to the network many times. \n\nUse overlapping, so neighbor parts of the context are overlapped by half size of the context part (if the context part equals 300 tokens, neighbor context parts overlap with 150 tokens). You also need to provide the following sequences of integer values: \n\n- `attention_mask` - a sequence of integer values representing the mask of valid values in the input. \n- `token_type_ids` - a sequence of integer values representing the segmentation of `input_ids` into question and context. \n- `position_ids` - a sequence of integer values from 0 to 383 representing the position index for each input token. \n\nFor more information, refer to the **Input** section of [BERT model documentation](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/bert-small-uncased-whole-word-masking-squad-int8-0002#input)."
],
"outputs": []
},
{
"language": "javascript",
"source": [
"// Based on https://github.com/openvinotoolkit/open_model_zoo/blob/bf03f505a650bafe8da03d2747a8b55c5cb2ef16/demos/common/python/openvino/model_zoo/model_api/models/bert.py#L188\nfunction findBestAnswerWindow(startScore, endScore, contextStartIdx, contextEndIdx) {\n const contextLen = contextEndIdx - contextStartIdx;\n\n const mat1 = reshape(startScore.slice(contextStartIdx, contextEndIdx), [contextLen, 1]);\n const mat2 = reshape(endScore.slice(contextStartIdx, contextEndIdx), [1, contextLen]);\n\n let scoreMat = matrixMultiplication(mat1, mat2);\n\n // Reset candidates with end before start.\n scoreMat = triu(scoreMat);\n // Reset long candidates (>16 words).\n scoreMat = tril(scoreMat, 16);\n\n // Find the best start-end pair.\n const coef = argMax(extractValues(scoreMat));\n const secondShapeDim = getShape(scoreMat)[1];\n\n const maxS = parseInt(coef/secondShapeDim);\n const maxE = coef%secondShapeDim;\n\n const maxScore = scoreMat[maxS][maxE];\n\n return [maxScore, maxS, maxE];\n}\n"
],
"outputs": []
},
{
"language": "javascript",
"source": [
"function getScore(logits) {\n const out = exp(logits);\n const summedRows = sum(out);\n\n return out.map(i => i/summedRows);\n}\n"
],
"outputs": []
},
{
"language": "javascript",
"source": [
"// Based on https://github.com/openvinotoolkit/open_model_zoo/blob/bf03f505a650bafe8da03d2747a8b55c5cb2ef16/demos/common/python/openvino/model_zoo/model_api/models/bert.py#L163\nfunction postprocess(outputStart, outputEnd, questionTokens, contextTokensStartEnd, padding, startIdx) {\n // Get start-end scores for the context.\n const scoreStart = getScore(outputStart);\n const scoreEnd = getScore(outputEnd);\n\n // An index of the first context token in a tensor.\n const contextStartIdx = questionTokens.length + 2;\n // An index of the last+1 context token in a tensor.\n const contextEndIdx = inputSize - padding - 1;\n\n // Find product of all start-end combinations to find the best one.\n let [maxScore, maxStart, maxEnd] = findBestAnswerWindow(scoreStart,\n scoreEnd,\n contextStartIdx,\n contextEndIdx);\n\n // Convert to context text start-end index.\n maxStart = contextTokensStartEnd[maxStart + startIdx][0];\n maxEnd = contextTokensStartEnd[maxEnd + startIdx][1];\n\n return [maxScore, maxStart, maxEnd];\n}\n"
],
"outputs": []
},
{
"language": "javascript",
"source": [
"// A function to add padding.\nfunction pad({ inputIds, attentionMask, tokenTypeIds }) {\n // How many padding tokens.\n const diffInputSize = inputSize - inputIds.length;\n\n if (diffInputSize > 0) {\n // Add padding to all the inputs.\n inputIds = inputIds.concat(Array(diffInputSize).fill(padToken));\n attentionMask = attentionMask.concat(Array(diffInputSize).fill(0));\n tokenTypeIds = tokenTypeIds.concat(Array(diffInputSize).fill(0));\n }\n\n return [inputIds, attentionMask, tokenTypeIds, diffInputSize];\n}\n"
],
"outputs": []
},
{
"language": "javascript",
"source": [
"// A generator of a sequence of inputs.\nfunction* prepareInput(questionTokens, contextTokens) {\n // A length of question in tokens.\n const questionLen = questionTokens.length;\n // The context part size.\n const contextLen = inputSize - questionLen - 3;\n\n if (contextLen < 16)\n throw new Error('Question is too long in comparison to input size. No space for context');\n\n const inputLayerNames = inputs.map(i => i.toString());\n\n // Take parts of the context with overlapping by 0.5.\n const max = Math.max(1, contextTokens.length - contextLen);\n\n for (let start = 0; start < max; start += parseInt(contextLen / 2)) {\n // A part of the context.\n const partContextTokens = contextTokens.slice(start, start + contextLen);\n // The input: a question and the context separated by special tokens.\n let inputIds = [clsToken, ...questionTokens, sepToken, ...partContextTokens, sepToken];\n // 1 for any index if there is no padding token, 0 otherwise.\n let attentionMask = Array(inputIds.length).fill(1);\n // 0 for question tokens, 1 for context part.\n let tokenTypeIds = [...Array(questionLen + 2).fill(0), ...Array(partContextTokens.length + 1).fill(1)];\n\n let padNumber = 0;\n\n // Add padding at the end.\n [inputIds, attentionMask, tokenTypeIds, padNumber] = pad({ inputIds, attentionMask, tokenTypeIds });\n\n // Create an input to feed the model.\n const inputDict = {\n 'input_ids': new Float32Array(inputIds),\n 'attention_mask': new Float32Array(attentionMask),\n 'token_type_ids': new Float32Array(tokenTypeIds),\n };\n\n // Some models require additional position_ids.\n if (inputLayerNames.includes('position_ids')) {\n positionIds = inputIds.map((_, index) => index);\n inputDict['position_ids'] = new Float32Array(positionIds);\n }\n\n yield [inputDict, padNumber, start];\n }\n}\n"
],
"outputs": []
},
{
"language": "markdown",
"source": [
"## Postprocessing\n\nThe results from the network are raw (logits). Use the softmax function to get the probability distribution. Then, find the best answer in the current part of the context (the highest score) and return the score and the context range for the answer."
],
"outputs": []
},
{
"language": "markdown",
"source": [
"First, create a list of tokens from the context and the question. Then, find the best answer by trying different parts of the context. The best answer should come with the highest score."
],
"outputs": []
},
{
"language": "javascript",
"source": [
"function getBestAnswer(question, context) {\n // Convert the context string to tokens.\n const [contextTokens, contextTokensStartEnd] = tokens.textToTokens(context.toLowerCase(), vocab);\n // Convert the question string to tokens.\n const [questionTokens] = tokens.textToTokens(question.toLowerCase(), vocab);\n\n const results = [];\n // Iterate through different parts of the context.\n for ([networkInput, padding, startIdx] of prepareInput(questionTokens, contextTokens)) {\n // Get output layers.\n const outputStartKey = compiledModel.output('output_s');\n const outputEndKey = compiledModel.output('output_e');\n\n // OpenVINO inference.\n const inferRequest = compiledModel.createInferRequest();\n\n const transformedInput = {\n 'input_ids': new ov.Tensor(ov.element.f32, [1, 384], networkInput['input_ids']),\n 'attention_mask': new ov.Tensor(ov.element.f32, [1, 384], networkInput['attention_mask']),\n 'token_type_ids': new ov.Tensor(ov.element.f32, [1, 384], networkInput['token_type_ids']),\n 'position_ids': new ov.Tensor(ov.element.f32, [1, 384], networkInput['position_ids']),\n }\n\n inferRequest.infer(transformedInput);\n\n const resultStart = inferRequest.getTensor(outputStartKey).data;\n const resultEnd = inferRequest.getTensor(outputEndKey).data;\n\n // Postprocess the result, getting the score and context range for the answer.\n const scoreStartEnd = postprocess(resultStart,\n resultEnd,\n questionTokens,\n contextTokensStartEnd,\n padding,\n startIdx);\n results.push(scoreStartEnd);\n }\n\n // Find the highest score.\n const scores = results.map(r => r[0]);\n const maxIndex = scores.indexOf(Math.max(scores));\n\n const answer = results[maxIndex];\n // Return the part of the context, which is already an answer.\n return [context.slice(answer[1], answer[2]), answer[0]];\n}\n"
],
"outputs": []
},
{
"language": "markdown",
"source": [
"## Main Processing Function\n\nRun question answering on a specific knowledge base (websites) and iterate through the questions. \n"
],
"outputs": []
},
{
"language": "javascript",
"source": [
"function runQuestionAnswering(sources, exampleQuestion) {\n console.log(`Context: ${sources}`);\n const context = loadContext(sources);\n\n if (!context.length)\n return console.log('Error: Empty context or outside paragraphs');\n\n if (exampleQuestion) {\n const startTime = process.hrtime.bigint();\n const [answer, score] = getBestAnswer(exampleQuestion, context);\n const execTime = Number(process.hrtime.bigint() - startTime) / 1e9;\n\n console.log(`Question: ${exampleQuestion}`);\n console.log(`Answer: ${answer}`);\n console.log(`Score: ${score}`);\n console.log(`Time: ${execTime}s`);\n }\n}\n\nconst sources = [\"Computational complexity theory is a branch of the theory of computation in theoretical computer \" +\n \"science that focuses on classifying computational problems according to their inherent difficulty, \" +\n \"and relating those classes to each other. A computational problem is understood to be a task that \" +\n \"is in principle amenable to being solved by a computer, which is equivalent to stating that the \" +\n \"problem may be solved by mechanical application of mathematical steps, such as an algorithm.\"]\n\nrunQuestionAnswering(sources, 'What is the term for a task that generally lends itself to being solved by a computer?');\n"
],
"outputs": [
{
"items": [
{
"mime": "application/vnd.code.notebook.stdout",
"value": [
"Context: Computational complexity theory is a branch of the theory of computation in theoretical computer science that focuses on classifying computational problems according to their inherent difficulty, and relating those classes to each other. A computational problem is understood to be a task that is in principle amenable to being solved by a computer, which is equivalent to stating that the problem may be solved by mechanical application of mathematical steps, such as an algorithm.",
"Question: What is the term for a task that generally lends itself to being solved by a computer?",
"Answer: A computational problem",
"Score: 0.5286847737759395",
"Time: 0.045163961s",
""
]
}
]
}
]
}
]
}

View File

@ -0,0 +1,116 @@
const fs = require('node:fs/promises');
exports.cleanWord = cleanWord;
exports.encodeByVoc = encodeByVoc;
exports.textToTokens = textToTokens;
exports.splitToWords = splitToWords;
exports.loadVocabFile = loadVocabFile;
// Load vocabulary file for encoding
async function loadVocabFile(vocabFileName) {
const vocab = {};
const lines = (await fs.readFile(vocabFileName, 'utf-8')).split('\n');
lines.forEach((line, index) => {
const token = line.trim();
vocab[token] = index;
});
return vocab;
}
// Remove mark and control chars
function cleanWord(w) {
let wo = ''; // Accumulator for output word
for (const c of w.normalize('NFD')) {
const charCode = c.charCodeAt(0);
// Remove mark nonspacing code and controls
if (charCode < 32 || charCode == 127) continue;
wo += c;
}
return wo;
}
// Split word by vocab items and get tok codes iteratively return codes
function encodeByVoc(w, vocab) {
w = cleanWord(w);
const res = [];
const wordIndexes = splitToWords(w);
for (let el of wordIndexes) {
const [s0, e0] = el;
let s = s0;
let e = e0;
const tokens = [];
while (e > s) {
const subword = s == s0 ? w.slice(s, e) : '##' + w.slice(s, e);
if (vocab[subword]) {
tokens.push(vocab[subword]);
s = e;
e = e0;
}
else e -= 1;
}
if (s < e0) tokens.push(vocab['[UNK]']);
res.push(...tokens);
}
return res;
}
// Split big text into words by spaces
// Return start and end indexes of words
function splitToWords(text) {
let start;
let prevIsSep = true; // Mark initial prev as space to start word from 0 char
const result = [];
for (let i = 0; i < text.length + 1; i++) {
const c = text[i] || ' ';
const isPunc = /[!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~]/.test(c);
const curIsSep = c.trim() === '' || isPunc;
if (prevIsSep !== curIsSep)
if (prevIsSep)
start = i;
else {
result.push([start, i]);
prevIsSep = curIsSep;
}
if (isPunc) result.push([i, i + 1]);
prevIsSep = curIsSep;
}
return result;
}
// Get big text and return list of token id and start-end positions
// for each id in original texts
function textToTokens(text, vocab) {
const tokensId = [];
const tokensSe = [];
const wordIndices = splitToWords(text);
for (const [start, end] of wordIndices) {
const word = text.slice(start, end);
const encodedTokens = encodeByVoc(word, vocab);
for (const token of encodedTokens) {
tokensId.push(token);
tokensSe.push([start, end]);
}
}
return [tokensId, tokensSe];
}

4696
samples/js/node/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,19 @@
{
"name": "openvinojs-node-demo",
"version": "1.0.0",
"license": "Apache-2.0",
"devDependencies": {
"args": "^5.0.3",
"canvas": "^2.11.2",
"eslint": "^8.39.0",
"https-proxy-agent": "^7.0.2",
"opencv-wasm": "^4.3.0-10",
"openvinojs-node": "file:../../../src/bindings/js/node/"
},
"scripts": {
"lint": "eslint ."
},
"engines": {
"node": ">=18.16.0"
}
}

View File

@ -3,4 +3,5 @@
#
add_subdirectory(c)
add_subdirectory(js)
add_subdirectory(python)

View File

@ -4,6 +4,8 @@ OpenVINO provides bindings for several languages:
* [c](./c)
* [python](./python)
* [javascript](./js)
* [nodejs](./js/nodejs)
## See also
* [OpenVINO™ README](../../README.md)

View File

@ -0,0 +1,27 @@
module.exports = {
rules: {
'semi': ['error'],
'no-var': ['error'],
'max-len': ['error'],
'eol-last': ['error'],
'indent': ['error', 2],
'camelcase': ['error'],
'semi-spacing': ['error'],
'arrow-spacing': ['error'],
'comma-spacing': ['error'],
'no-multi-spaces': ['error'],
'quotes': ['error', 'single'],
'no-trailing-spaces': ['error'],
'space-before-blocks': ['error'],
'newline-before-return': ['error'],
'comma-dangle': ['error', 'always-multiline'],
'space-before-function-paren': ['error', {
named: 'never',
anonymous: 'never',
asyncArrow: 'always'
}],
'key-spacing': ['error', { beforeColon: false }],
'no-multiple-empty-lines': ['error', { max: 1, maxBOF: 0, maxEOF: 0 }],
'keyword-spacing': ['error', { overrides: { catch: { after: false } } }],
}
};

View File

@ -0,0 +1,9 @@
module.exports = {
extends: [
'eslint:recommended',
'plugin:@typescript-eslint/recommended',
'./.eslintrc-global.js',
],
ignorePatterns: ['**/*.js', 'node_modules/', 'types/', 'dist/', 'bin/'],
root: true,
};

View File

@ -0,0 +1,7 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
project(OpenVINO_JS_API)
add_subdirectory(node)

View File

@ -0,0 +1,3 @@
# OpenVINO™ JavaScript API
- `./node` - openvinojs-node NPM package with Node.js bindings

View File

@ -0,0 +1,4 @@
# Javascript bindings
- `./docs` - documentation
- `./node` - openvinojs-node NPM package with Node.js bindings

View File

@ -0,0 +1,5 @@
module.exports = {
extends: ['../.eslintrc.js'],
parser: '@typescript-eslint/parser',
plugins: ['@typescript-eslint'],
};

5
src/bindings/js/node/.gitignore vendored Normal file
View File

@ -0,0 +1,5 @@
node_modules
dist
build
types
ov_runtime

View File

@ -0,0 +1,12 @@
build
include
lib
src
tests
.eslintrc.js
CMakeLists.txt
tsconfig.json
**/*.tsbuildinfo
*.tgz

View File

@ -0,0 +1,95 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
if(WIN32)
return()
endif()
if(CMAKE_VERSION VERSION_LESS 3.14)
message(WARNING "JS API is not available with CMake version less than 3.14, skipping")
return()
endif()
if(CMAKE_COMPILER_IS_GNUCXX AND LINUX AND AARCH64 AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0)
message(WARNING "JS API is not support compiler version less than 7, skipping")
return()
endif()
cmake_minimum_required(VERSION 3.14)
project(ov_node_addon)
set(CMAKE_CXX_STANDARD 17)
add_definitions(-DNAPI_VERSION=6)
include(FetchContent)
FetchContent_Declare(
node-api-headers
URL https://github.com/nodejs/node-api-headers/archive/refs/tags/v1.1.0.tar.gz
URL_HASH SHA256=70608bc1e6dddce280285f3462f18a106f687c0720a4b90893e1ecd86e5a8bbf
)
FetchContent_MakeAvailable(node-api-headers)
FetchContent_Declare(
node-addon-api
URL https://github.com/nodejs/node-addon-api/archive/refs/tags/v7.0.0.tar.gz
URL_HASH SHA256=e0cf2ef9bed9965ac8f498b4a7007b9f0002fa022c1ff665f256ab54260d6305
)
FetchContent_MakeAvailable(node-addon-api)
add_library(${PROJECT_NAME} SHARED
${CMAKE_CURRENT_SOURCE_DIR}/src/node_output.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/async_reader.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/preprocess/preprocess.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/preprocess/pre_post_process_wrap.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/preprocess/preprocess_steps.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/preprocess/input_info.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/preprocess/output_info.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/preprocess/input_tensor_info.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/preprocess/output_tensor_info.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/preprocess/input_model_info.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/preprocess/resize_algorithm.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/errors.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/helper.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/tensor.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/infer_request.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/compiled_model.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/core_wrap.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/model_wrap.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/addon.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/element_type.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/partial_shape_wrap.cpp
)
target_include_directories(${PROJECT_NAME} PRIVATE
"${node-api-headers_SOURCE_DIR}/include"
"${node-addon-api_SOURCE_DIR}"
"${CMAKE_CURRENT_SOURCE_DIR}/include"
)
target_link_libraries(${PROJECT_NAME} PRIVATE openvino::runtime)
if(CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG)
ov_add_compiler_flags(-Wno-missing-declarations)
endif()
if(APPLE)
target_link_options(${PROJECT_NAME} PRIVATE -Wl,-undefined,suppress,-flat_namespace)
elseif(AARCH64 OR ARM)
target_link_options(${PROJECT_NAME} PRIVATE -Wl,--unresolved-symbols=ignore-all)
endif()
set_target_properties(${PROJECT_NAME} PROPERTIES
PREFIX ""
SUFFIX ".node"
)
ov_set_install_rpath(${PROJECT_NAME}
${OV_CPACK_RUNTIMEDIR} ${OV_CPACK_RUNTIMEDIR})
install(TARGETS ${PROJECT_NAME}
LIBRARY DESTINATION ${OV_CPACK_RUNTIMEDIR} COMPONENT ${PROJECT_NAME} ${OV_CPACK_COMP_NPM_EXCLUDE_ALL}
RUNTIME DESTINATION ${OV_CPACK_RUNTIMEDIR} COMPONENT ${PROJECT_NAME} ${OV_CPACK_COMP_NPM_EXCLUDE_ALL}
)

View File

@ -0,0 +1,34 @@
# OpenVINO Node.js API
## Components
- [include](./include/) - header files for current API.
- [lib](./lib/) - TypeScript sources for current API.
- [src](./src/) - C++ sources for current API.
- [tests](./tests/) - tests directory for current API.
## Build
- Make sure that all submodules are updated `git submodule update --init --recursive`
- Create build dir `mkdir build && cd build`
- To get binaries for openvinojs-node package run:
`cmake -DCPACK_GENERATOR=NPM -DENABLE_SYSTEM_TBB=OFF -UTBB* -DCMAKE_INSTALL_PREFIX=../src/bindings/js/node/bin ..`
`make --jobs=$(nproc --all) install`
- Go to npm package folder `cd ../src/bindings/js/node`
- Now you can install dependencies packages and transpile ts to js code. Run `npm install`
- Run tests `npm run test` to make sure that **openvinojs-node** built successfully
## Usage
- Add `openvinojs-node` package in your project, specify in **package.json**: `"openvinojs-node": "file:*path-to-current-directory*"`
- Require by: `const ov = require('openvinojs-node');`
## Samples
[Samples & notebooks of OpenVINO Node.js API](../../../../samples/js/node/README.md)
## See also
* [OpenVINO™ README](../../../../README.md)
* [OpenVINO™ Core Components](../../../README.md)
* [OpenVINO™ JavaScript API](../README.md)

View File

@ -0,0 +1,21 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
/** @brief A structure with data that will be associated with the instance of the ov.js node-addon. */
struct AddonData {
Napi::FunctionReference* compiled_model_prototype;
Napi::FunctionReference* core_prototype;
Napi::FunctionReference* const_output_prototype;
Napi::FunctionReference* infer_request_prototype;
Napi::FunctionReference* model_prototype;
Napi::FunctionReference* output_prototype;
Napi::FunctionReference* partial_shape_prototype;
Napi::FunctionReference* ppp_prototype;
Napi::FunctionReference* tensor_prototype;
};
Napi::Object init_all(Napi::Env env, Napi::Object exports);

View File

@ -0,0 +1,41 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
#include "model_wrap.hpp"
#include "openvino/runtime/core.hpp"
#include "read_model_args.hpp"
class ReaderWorker : public Napi::AsyncWorker {
public:
/**
* @brief Constructs ReaderWorker class that is responisible for reading the model asynchronously.
* @param info contains passed arguments. Can be empty.
*/
ReaderWorker(const Napi::Env& env, ReadModelArgs* args)
: Napi::AsyncWorker{env, "ReaderWorker"},
_deferred{env},
_args{args},
_model{} {
OPENVINO_ASSERT(_args, "Invalid pointer to ReadModelArgs.");
}
Napi::Promise GetPromise();
protected:
/** @name AsyncWorkerMethods
* Methods to safely move data betweeb the event loop and worker threads.
*/
///@{
void Execute() override;
void OnOK() override;
void OnError(const Napi::Error& err) override;
///@}
private:
Napi::Promise::Deferred _deferred;
ReadModelArgs* _args;
std::shared_ptr<ov::Model> _model;
};

View File

@ -0,0 +1,77 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
#include "openvino/runtime/compiled_model.hpp"
class CompiledModelWrap : public Napi::ObjectWrap<CompiledModelWrap> {
public:
/**
* @brief Constructs CompiledModelWrap from the Napi::CallbackInfo.
* @param info contains passed arguments. Can be empty.
*/
CompiledModelWrap(const Napi::CallbackInfo& info);
/**
* @brief Defines a Javascript CompiledModel class with constructor, static and instance properties and methods.
* @param env The environment in which to construct a JavaScript class.
* @return Napi::Function representing the constructor function for the Javascript CompiledModel class.
*/
static Napi::Function get_class_constructor(Napi::Env env);
/** @brief This method is called during initialization of OpenVino native add-on.
* It exports JavaScript CompiledModel class.
*/
static Napi::Object init(Napi::Env env, Napi::Object exports);
/**
* @brief Creates JavaScript CompiledModel object and wraps inside of it ov::CompiledModel object.
* @param env The environment in which to construct a JavaScript object.
* @param compiled_model ov::CompiledModel to wrap.
* @return A Javascript CompiledModel as Napi::Object. (Not CompiledModelWrap object)
*/
static Napi::Object wrap(Napi::Env env, ov::CompiledModel compiled_model);
/** @brief Sets a _compiled_model property of a CompiledModelWrap object. Used e.g. when creating CompiledModelWrap
* object on node-addon side. */
void set_compiled_model(const ov::CompiledModel& compiled_model);
/** @return A Javascript InferRequest */
Napi::Value create_infer_request(const Napi::CallbackInfo& info);
/**
* @brief Helper function to access the compiled model outputs as an attribute of JavaScript Compiled Model.
* @param info Contains information about the environment and passed arguments
* Empty info array => Gets a single output of a compiled model. If a model has more than one output, this method
* throws ov::Exception. info[0] of type string => Gets output of a compiled model identified by tensor_name.
* info[0] of type int => Gets output of a compiled model identified by index of output.
*/
Napi::Value get_output(const Napi::CallbackInfo& info);
/**
* @brief Helper function to access the compiled model outputs.
* @param info Contains information about the environment and passed arguments
* @return A Javascript Array containing Outputs
*/
Napi::Value get_outputs(const Napi::CallbackInfo& info);
/**
* @brief Helper function to access the compiled model inputs.
* @param info Contains information about the environment and passed arguments
* Empty info array => Gets a single input of a compiled model. If a model has more than one input, this method
* throws ov::Exception. info[0] of type string => Gets input of a compiled model identified by tensor_name. info[0]
* of type int => Gets input of a compiled model identified by index of input.
*/
Napi::Value get_input(const Napi::CallbackInfo& info);
/**
* @brief Helper function to access the CompiledModel inputs.
* @param info Contains information about the environment and passed arguments.
* @return A Javascript Array containing Inputs
*/
Napi::Value get_inputs(const Napi::CallbackInfo& info);
private:
ov::CompiledModel _compiled_model;
};

View File

@ -0,0 +1,125 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
#include <thread>
#include "async_reader.hpp"
#include "errors.hpp"
#include "openvino/runtime/core.hpp"
class CoreWrap : public Napi::ObjectWrap<CoreWrap> {
public:
/**
* @brief Constructs CoreWrap from the Napi::CallbackInfo.
* @param info contains passed arguments. Can be empty.
*/
CoreWrap(const Napi::CallbackInfo& info);
/**
* @brief Defines a Javascript Core class with constructor, static and instance properties and methods.
* @param env The environment in which to construct a JavaScript class.
* @return Napi::Function representing the constructor function for the Javascript Core class.
*/
static Napi::Function get_class_constructor(Napi::Env env);
/** @brief This method is called during initialization of OpenVino native add-on.
* It exports JavaScript Core class.
*/
static Napi::Object init(Napi::Env env, Napi::Object exports);
/**
* @brief Reads a model synchronously.
* @param info contains passed arguments.
* One argument is passed:
* @param info[0] path to a model as string or Buffer<UInt8Array> with a model
* Two arguments are passed:
* @param info[0] path to a model. (model_path string or Buffer<UInt8Array>)
* @param info[1] path to a data file. (e.g. bin_path string or Buffer<UInt8Array>)
* @return A Javascript Model object.
*/
Napi::Value read_model_sync(const Napi::CallbackInfo& info);
/**
* @brief Asynchronously reads a model.
* @param info contains passed arguments.
* One argument is passed:
* @param info[0] path to a model. (model_path)
* Two arguments are passed:
* @param info[0] path to a model. (model_path)
* @param info[1] path to a data file. (e.g. bin_path)
* @return A Javascript Promise.
*/
Napi::Value read_model_async(const Napi::CallbackInfo& info);
/**
* @brief Creates and loads a compiled model from a source model.
* @param info contains two passed arguments.
* @param info[0] Javascript Model object acquired from CoreWrap::read_model
* @param info[1] string with propetries e.g. device
* @return A Javascript CompiledModel object.
*/
Napi::Value compile_model_sync_dispatch(const Napi::CallbackInfo& info);
/**
* @brief Asynchronously creates and loads a compiled model from a source model.
* @param info contains two passed arguments.
* @param info[0] Javascript Model object acquired from CoreWrap::read_model
* @param info[1] string with propetries e.g. device
* @return A Javascript CompiledModel object.
*/
Napi::Value compile_model_async(const Napi::CallbackInfo& info);
protected:
Napi::Value compile_model_sync(const Napi::CallbackInfo& info,
const Napi::Object& model,
const Napi::String& device);
Napi::Value compile_model_sync(const Napi::CallbackInfo& info,
const Napi::String& model_path,
const Napi::String& device);
Napi::Value compile_model_sync(const Napi::CallbackInfo& info,
const Napi::Object& model,
const Napi::String& device,
const std::map<std::string, ov::Any>& config);
Napi::Value compile_model_sync(const Napi::CallbackInfo& info,
const Napi::String& model_path,
const Napi::String& device,
const std::map<std::string, ov::Any>& config);
private:
ov::Core _core;
};
struct TsfnContextModel {
TsfnContextModel(Napi::Env env) : deferred(Napi::Promise::Deferred::New(env)){};
std::thread nativeThread;
Napi::Promise::Deferred deferred;
Napi::ThreadSafeFunction tsfn;
std::shared_ptr<ov::Model> _model;
std::string _device;
ov::CompiledModel _compiled_model;
std::map<std::string, ov::Any> _config = {};
};
struct TsfnContextPath {
TsfnContextPath(Napi::Env env) : deferred(Napi::Promise::Deferred::New(env)){};
std::thread nativeThread;
Napi::Promise::Deferred deferred;
Napi::ThreadSafeFunction tsfn;
std::string _model;
std::string _device;
ov::CompiledModel _compiled_model;
std::map<std::string, ov::Any> _config = {};
};
void FinalizerCallbackModel(Napi::Env env, void* finalizeData, TsfnContextModel* context);
void FinalizerCallbackPath(Napi::Env env, void* finalizeData, TsfnContextPath* context);
void compileModelThreadModel(TsfnContextModel* context);
void compileModelThreadPath(TsfnContextPath* context);

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
#include "helper.hpp"
namespace element {
/** @brief Exports JavaScript element enum. */
Napi::Object init(Napi::Env env, Napi::Object exports);
/** \brief Creates JS object to represent C++ enum class Type_t with element types supported in ov.js*/
Napi::Value add_element_namespace(const Napi::CallbackInfo& info);
}; // namespace element

View File

@ -0,0 +1,12 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
/**
* @brief Creates Napi::Error instance and throws the error as JavaScript exception.
* @param env The environment in which to construct the Napi::Error object.
* @param msg std::string that represent the message of the error
*/
void reportError(const Napi::Env& env, std::string msg);

View File

@ -0,0 +1,169 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
#include <unordered_set>
#include <variant>
#include "element_type.hpp"
#include "openvino/core/type/element_type.hpp"
#include "openvino/openvino.hpp"
typedef enum {
js_array,
} js_type;
const std::vector<std::string>& get_supported_types();
typedef std::variant<napi_valuetype, napi_typedarray_type, js_type> napi_types;
/**
* @brief Template function to convert Javascript data types into C++ data types
* @tparam TargetType destinated C++ data type
* @param info Napi::CallbackInfo contains all arguments passed to a function or method
* @param idx specifies index of a argument inside info.
* @param acceptable_types specifies napi types from which TargetType can be created
* @return specified argument converted to a TargetType.
*/
template <typename TargetType>
TargetType js_to_cpp(const Napi::CallbackInfo& info, const size_t idx, const std::vector<napi_types>& acceptable_types);
template <typename TargetType>
TargetType js_to_cpp(const Napi::Value&, const std::vector<napi_types>& acceptable_types);
template <>
int32_t js_to_cpp<int32_t>(const Napi::CallbackInfo& info,
const size_t idx,
const std::vector<napi_types>& acceptable_types);
/** @brief A template specialization for TargetType int32_t */
template <>
int32_t js_to_cpp<int32_t>(const Napi::CallbackInfo& info,
const size_t idx,
const std::vector<napi_types>& acceptable_types);
/** @brief A template specialization for TargetType std::vector<size_t> */
template <>
std::vector<size_t> js_to_cpp<std::vector<size_t>>(const Napi::CallbackInfo& info,
const size_t idx,
const std::vector<napi_types>& acceptable_types);
/** @brief A template specialization for TargetType std::unordered_set<std::string> */
template <>
std::unordered_set<std::string> js_to_cpp<std::unordered_set<std::string>>(
const Napi::CallbackInfo& info,
const size_t idx,
const std::vector<napi_types>& acceptable_types);
/** @brief A template specialization for TargetType std::string */
template <>
std::string js_to_cpp<std::string>(const Napi::CallbackInfo& info,
const size_t idx,
const std::vector<napi_types>& acceptable_types);
/** @brief A template specialization for TargetType ov::element::Type_T */
template <>
ov::element::Type_t js_to_cpp<ov::element::Type_t>(const Napi::CallbackInfo& info,
const size_t idx,
const std::vector<napi_types>& acceptable_types);
/**
* @brief A template specialization for TargetType ov::Layout
* @param acceptable_types ov::Layout can be created from a napi_string
*/
template <>
ov::Layout js_to_cpp<ov::Layout>(const Napi::CallbackInfo& info,
const size_t idx,
const std::vector<napi_types>& acceptable_types);
/** @brief A template specialization for TargetType ov::Shape */
template <>
ov::Shape js_to_cpp<ov::Shape>(const Napi::CallbackInfo& info,
const size_t idx,
const std::vector<napi_types>& acceptable_types);
/** @brief A template specialization for TargetType ov::preprocess::ResizeAlgorithm */
template <>
ov::preprocess::ResizeAlgorithm js_to_cpp<ov::preprocess::ResizeAlgorithm>(
const Napi::CallbackInfo& info,
const size_t idx,
const std::vector<napi_types>& acceptable_types);
/** @brief A template specialization for TargetType ov::Any */
template <>
ov::Any js_to_cpp<ov::Any>(const Napi::Value&, const std::vector<napi_types>& acceptable_types);
/** @brief A template specialization for TargetType std::map<std::string, ov::Any */
template <>
std::map<std::string, ov::Any> js_to_cpp<std::map<std::string, ov::Any>>(
const Napi::CallbackInfo& info,
const size_t idx,
const std::vector<napi_types>& acceptable_types);
/**
* @brief Template function to convert C++ data types into Javascript data types
* @tparam TargetType Destinated Javascript data type.
* @tparam SourceType C++ data type.
* @param info Contains the environment in which to construct a JavaScript object.
* @return SourceType converted to a TargetType.
*/
template <typename SourceType, typename TargetType>
TargetType cpp_to_js(const Napi::CallbackInfo& info, SourceType);
/** @brief A template specialization for TargetType ov::element::Type_t and SourceType ov::element::Type_t */
template <>
Napi::String cpp_to_js<ov::element::Type_t, Napi::String>(const Napi::CallbackInfo& info,
const ov::element::Type_t type);
template <>
Napi::Array cpp_to_js<ov::Shape, Napi::Array>(const Napi::CallbackInfo& info, const ov::Shape shape);
template <>
Napi::Array cpp_to_js<ov::PartialShape, Napi::Array>(const Napi::CallbackInfo& info, const ov::PartialShape shape);
template <>
Napi::Array cpp_to_js<ov::Dimension, Napi::Array>(const Napi::CallbackInfo& info, const ov::Dimension dim);
template <>
Napi::Boolean cpp_to_js<bool, Napi::Boolean>(const Napi::CallbackInfo& info, const bool value);
/** @brief Takes Napi::Value and parse Napi::Array or Napi::Object to ov::TensorVector. */
ov::TensorVector parse_input_data(const Napi::Value& input);
/** @brief Gets an input/output tensor from InferRequest by key. */
ov::Tensor get_request_tensor(ov::InferRequest& infer_request, const std::string key);
/** @brief Gets an input tensor from InferRequest by index. */
ov::Tensor get_request_tensor(ov::InferRequest& infer_request, const size_t idx);
/** @brief Creates ov::tensor from TensorWrap Object */
ov::Tensor cast_to_tensor(const Napi::Value& value);
/** @brief Creates ov::tensor from TypedArray using given shape and element type*/
ov::Tensor cast_to_tensor(const Napi::TypedArray& data, const ov::Shape& shape, const ov::element::Type_t& type);
/** @brief A helper function to create a ov::Tensor from Napi::Value.
* @param value a Napi::Value that can be either a TypedArray or a TensorWrap Object.
* @param infer_request The reference to InferRequest.
* @param key of the tensor to get from InferRequest.
* @return ov::Tensor
*/
template <typename KeyType>
ov::Tensor value_to_tensor(const Napi::Value& value, ov::InferRequest& infer_request, const KeyType key) {
if (value.IsTypedArray()) {
const auto input = get_request_tensor(infer_request, key);
const auto& shape = input.get_shape();
const auto& type = input.get_element_type();
const auto data = value.As<Napi::TypedArray>();
return cast_to_tensor(data, shape, type);
} else {
return cast_to_tensor(value.As<Napi::Value>());
}
}
napi_types napiType(const Napi::Value& val);
bool acceptableType(const Napi::Value& val, const std::vector<napi_types>& acceptable);

View File

@ -0,0 +1,131 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
#include <thread>
#include "openvino/runtime/infer_request.hpp"
struct TsfnContext {
TsfnContext(Napi::Env env) : deferred(Napi::Promise::Deferred::New(env)){};
std::thread native_thread;
Napi::Promise::Deferred deferred;
Napi::ThreadSafeFunction tsfn;
ov::InferRequest* _ir;
std::vector<ov::Tensor> _inputs;
std::map<std::string, ov::Tensor> result;
};
class InferRequestWrap : public Napi::ObjectWrap<InferRequestWrap> {
public:
/**
* @brief Constructs InferRequestWrap from the Napi::CallbackInfo.
* @param info contains passed arguments. Can be empty.
*/
InferRequestWrap(const Napi::CallbackInfo& info);
/**
* @brief Defines a Javascript InferRequest class with constructor, static and instance properties and methods.
* @param env The environment in which to construct a JavaScript class.
* @return Napi::Function representing the constructor function for the Javascript InferRequest class.
*/
static Napi::Function get_class_constructor(Napi::Env env);
/** @brief This method is called during initialization of OpenVino native add-on.
* It exports JavaScript InferRequest class.
*/
static Napi::Object init(Napi::Env env, Napi::Object exports);
void set_infer_request(const ov::InferRequest& infer_request);
/**
* @brief Creates JavaScript InferRequest object and wraps inside of it ov::InferRequest object.
* @param env The environment in which to construct a JavaScript object.
* @param infer_request ov::InferRequest to wrap.
* @return Javascript InferRequest as Napi::Object. (Not InferRequestWrap object)
*/
static Napi::Object wrap(Napi::Env env, ov::InferRequest infer_request);
/**
* @brief Sets an input/output tensor to infer on.
* @param info contains passed arguments.
* @param info[0] Name of the input or output tensor as Napi::String.
* @param info[1] Javascript Tensor object.
*/
void set_tensor(const Napi::CallbackInfo& info);
/**
* @brief Sets an input tensor for inference.
* @param info contains passed arguments.
* The model needs to have a single input if only one argument is passed:
* @param info[0] Javascript Tensor object.
* The model have more inputs:
* @param info[0] Index of the output tensor.
* @param info[1] Javascript Tensor object.
*/
void set_input_tensor(const Napi::CallbackInfo& info);
/**
* @brief Sets an output tensor for inference.
* @param info contains passed arguments.
* The model needs to have a single input if only one argument is passed:
* @param info[0] Javascript Tensor object.
* The model have more inputs:
* @param info[0] Index of the output tensor.
* @param info[1] Javascript Tensor object.
*/
void set_output_tensor(const Napi::CallbackInfo& info);
/**
* @brief Gets an input/output tensor for inference.
* @param info contains passed arguments.
* @param info[0] Javascript ov::Output<ov::Node> object or name of a tensor to get
* @return Tensor for the specified Node object
*/
Napi::Value get_tensor(const Napi::CallbackInfo& info);
/**
* @brief Gets an input tensor for inference.
* @note The model needs to have a single input if no argument is passed.
* @param idx Index of the tensor to get. (optional)
*/
Napi::Value get_input_tensor(const Napi::CallbackInfo& info);
/**
* @brief Gets an output tensor for inference.
* @note The model needs to have a single input if no argument is passed.
* @param idx Index of the tensor to get. (optional)
*/
Napi::Value get_output_tensor(const Napi::CallbackInfo& info);
/** @return A Javascript object with model outputs. */
Napi::Value get_output_tensors(const Napi::CallbackInfo& info);
/** @brief Checks incoming Napi::Value and calls overloaded infer() method */
Napi::Value infer_dispatch(const Napi::CallbackInfo& info);
/** @brief Checks incoming Napi::Value and asynchronously returns the result of inference. */
Napi::Value infer_async(const Napi::CallbackInfo& info);
/** @brief Infers specified inputs in synchronous mode.
* @param inputs An object with a collection of pairs key (input_name) and a value (tensor, tensor's data)
*/
void infer(const Napi::Object& inputs);
/** @brief Infers specified inputs in synchronous mode.
* @param inputs An Array with values (tensors, tensors' data)
*/
void infer(const Napi::Array& inputs);
/** @return A Javascript CompiledModel. */
Napi::Value get_compiled_model(const Napi::CallbackInfo& info);
private:
ov::InferRequest _infer_request;
};
void FinalizerCallback(Napi::Env env, void* finalizeData, TsfnContext* context);
void performInferenceThread(TsfnContext* context);

View File

@ -0,0 +1,91 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
#include "compiled_model.hpp"
#include "errors.hpp"
#include "openvino/core/model.hpp"
#include "openvino/runtime/core.hpp"
#include "tensor.hpp"
class ModelWrap : public Napi::ObjectWrap<ModelWrap> {
public:
/**
* @brief Constructs ModelWrap from the Napi::CallbackInfo.
* @param info contains passed arguments. Can be empty.
*/
ModelWrap(const Napi::CallbackInfo& info);
/**
* @brief Defines a Javascript Model class with constructor, static and instance properties and methods.
* @param env The environment in which to construct a JavaScript class.
* @return Napi::Function representing the constructor function for the Javascript Model class.
*/
static Napi::Function get_class_constructor(Napi::Env env);
/** @brief This method is called during initialization of OpenVino native add-on.
* It exports JavaScript Model class.
*/
static Napi::Object init(Napi::Env env, Napi::Object exports);
void set_model(const std::shared_ptr<ov::Model>& model);
/**
* @brief Creates JavaScript Model object and wraps inside of it ov::Model object.
* @param env The environment in which to construct a JavaScript object.
* @param model a pointer to ov::Model to wrap.
* @return Javascript Model as Napi::Object. (Not ModelWrap object)
*/
static Napi::Object wrap(Napi::Env env, std::shared_ptr<ov::Model> model);
/** @return Napi::String containing a model name. */
Napi::Value get_name(const Napi::CallbackInfo& info);
std::shared_ptr<ov::Model> get_model() const;
/**
* @brief Helper function to access model inputs.
* @param info contains passed arguments.
* Empty info array:
* @param info Gets a single input of a model. If a model has more than one input, this method
* throws ov::Exception.
* One param of type string:
* @param info[0] Gets input of a model identified by tensor_name.
* One param of type int:
* @param info[0] Gets input of a model identified by index of input.
*/
Napi::Value get_input(const Napi::CallbackInfo& info);
/**
* @brief Helper function to access model outputs.
* @param info contains passed arguments.
* Empty info array:
* @param info Gets a single output of a model. If a model has more than one output, this method
* throws ov::Exception.
* One param of type string:
* @param info[0] Gets output of a model identified by tensor_name.
* One param of type int:
* @param info[0] Gets output of a model identified by index of output.
*/
Napi::Value get_output(const Napi::CallbackInfo& info);
/**
* @brief Helper function to access model inputs
* @param info Contains information about the environment and passed arguments
* @return A Javascript Array containing Outputs
*/
Napi::Value get_inputs(const Napi::CallbackInfo& info);
/**
* @brief Helper function to access model outputs
* @param info Contains information about the environment and passed arguments
* @return A Javascript Array containing Outputs
*/
Napi::Value get_outputs(const Napi::CallbackInfo& info);
private:
std::shared_ptr<ov::Model> _model;
ov::Core _core;
ov::CompiledModel _compiled_model;
};

View File

@ -0,0 +1,78 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
#include "helper.hpp"
#include "openvino/core/node_output.hpp"
template <class NodeType>
class Output : public Napi::ObjectWrap<Output<NodeType>> {};
template <>
class Output<ov::Node> : public Napi::ObjectWrap<Output<ov::Node>> {
public:
Output(const Napi::CallbackInfo& info);
/**
* @brief Defines a Javascript Output class with constructor, static and instance properties and methods.
* @param env The environment in which to construct a JavaScript class.
* @return Napi::Function representing the constructor function for the Javascript Output class.
*/
static Napi::Function get_class_constructor(Napi::Env env);
/** @brief This method is called during initialization of OpenVino native add-on.
* It exports JavaScript Output class.
*/
static Napi::Object init(Napi::Env env, Napi::Object exports);
ov::Output<ov::Node> get_output() const;
static Napi::Object wrap(Napi::Env env, ov::Output<ov::Node> output);
Napi::Value get_shape(const Napi::CallbackInfo& info);
Napi::Value get_partial_shape(const Napi::CallbackInfo& info);
Napi::Value get_shape_data(const Napi::CallbackInfo& info);
Napi::Value get_any_name(const Napi::CallbackInfo& info);
private:
ov::Output<ov::Node> _output;
};
template <>
class Output<const ov::Node> : public Napi::ObjectWrap<Output<const ov::Node>> {
public:
Output(const Napi::CallbackInfo& info);
/**
* @brief Defines a Javascript Output class with constructor, static and instance properties and methods.
* @param env The environment in which to construct a JavaScript class.
* @return Napi::Function representing the constructor function for the Javascript Output class.
*/
static Napi::Function get_class_constructor(Napi::Env env);
/** @brief This method is called during initialization of OpenVino native add-on.
* It exports JavaScript Output class.
*/
static Napi::Object init(Napi::Env env, Napi::Object exports);
ov::Output<const ov::Node> get_output() const;
static Napi::Object wrap(Napi::Env env, ov::Output<const ov::Node> output);
Napi::Value get_shape(const Napi::CallbackInfo& info);
Napi::Value get_partial_shape(const Napi::CallbackInfo& info);
Napi::Value get_shape_data(const Napi::CallbackInfo& info);
Napi::Value get_any_name(const Napi::CallbackInfo& info);
private:
ov::Output<const ov::Node> _output;
};

View File

@ -0,0 +1,45 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
#include "errors.hpp"
#include "helper.hpp"
#include "openvino/core/partial_shape.hpp"
class PartialShapeWrap : public Napi::ObjectWrap<PartialShapeWrap> {
public:
/**
* @brief Constructs PartialShapeWrap from the Napi::CallbackInfo.
* @param info contains passed arguments. Can be empty.
*/
PartialShapeWrap(const Napi::CallbackInfo& info);
/**
* @brief Defines a Javascript PartialShape class with constructor, static and instance properties and methods.
* @param env The environment in which to construct a JavaScript class.
* @return Napi::Function representing the constructor function for the Javascript PartialShape class.
*/
static Napi::Function get_class_constructor(Napi::Env env);
/** @brief This method is called during initialization of OpenVino node-addon.
* It exports JavaScript PartialShape class.
*/
static Napi::Object init(Napi::Env env, Napi::Object exports);
/**
* @brief Creates JavaScript PartialShape object and wraps inside of it ov::PartialShape object.
* @param env The environment in which to construct a JavaScript object.
* @param partial_shape ov::PartialShape to wrap.
* @return Javascript PartialShape as Napi::Object. (Not PartialShapeWrap object)
*/
static Napi::Object wrap(Napi::Env env, ov::PartialShape partial_shape);
Napi::Value is_static(const Napi::CallbackInfo& info);
Napi::Value is_dynamic(const Napi::CallbackInfo& info);
Napi::Value to_string(const Napi::CallbackInfo& info);
Napi::Value get_dimensions(const Napi::CallbackInfo& info);
private:
ov::PartialShape _partial_shape;
};

View File

@ -0,0 +1,26 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
#include "openvino/core/preprocess/input_info.hpp"
class InputInfo : public Napi::ObjectWrap<InputInfo> {
public:
InputInfo(const Napi::CallbackInfo& info);
static Napi::Function get_class_constructor(Napi::Env env);
Napi::Value tensor(const Napi::CallbackInfo& info);
Napi::Value preprocess(const Napi::CallbackInfo& info);
Napi::Value model(const Napi::CallbackInfo& info);
void set_input_info(ov::preprocess::InputInfo& tensor_name);
private:
ov::preprocess::InputInfo* _input_info;
};

View File

@ -0,0 +1,22 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
#include "openvino/core/preprocess/input_model_info.hpp"
class InputModelInfo : public Napi::ObjectWrap<InputModelInfo> {
public:
InputModelInfo(const Napi::CallbackInfo& info);
static Napi::Function get_class_constructor(Napi::Env env);
Napi::Value set_layout(const Napi::CallbackInfo& info);
void set_input_model_info(ov::preprocess::InputModelInfo& info);
private:
ov::preprocess::InputModelInfo* _model_info;
};

View File

@ -0,0 +1,26 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
#include "openvino/core/preprocess/input_tensor_info.hpp"
class InputTensorInfo : public Napi::ObjectWrap<InputTensorInfo> {
public:
InputTensorInfo(const Napi::CallbackInfo& info);
static Napi::Function get_class_constructor(Napi::Env env);
Napi::Value set_element_type(const Napi::CallbackInfo& info);
Napi::Value set_layout(const Napi::CallbackInfo& info);
Napi::Value set_shape(const Napi::CallbackInfo& info);
void set_input_tensor_info(ov::preprocess::InputTensorInfo& tensor_info);
private:
ov::preprocess::InputTensorInfo* _tensor_info;
};

View File

@ -0,0 +1,21 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
#include "openvino/core/preprocess/output_info.hpp"
class OutputInfo : public Napi::ObjectWrap<OutputInfo> {
public:
OutputInfo(const Napi::CallbackInfo& info);
static Napi::Function get_class_constructor(Napi::Env env);
Napi::Value tensor(const Napi::CallbackInfo& info);
void set_output_info(ov::preprocess::OutputInfo& info);
private:
ov::preprocess::OutputInfo* _output_info;
};

View File

@ -0,0 +1,24 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
#include "openvino/core/preprocess/output_tensor_info.hpp"
class OutputTensorInfo : public Napi::ObjectWrap<OutputTensorInfo> {
public:
OutputTensorInfo(const Napi::CallbackInfo& info);
static Napi::Function get_class_constructor(Napi::Env env);
Napi::Value set_element_type(const Napi::CallbackInfo& info);
Napi::Value set_layout(const Napi::CallbackInfo& info);
void set_output_tensor_info(ov::preprocess::OutputTensorInfo& tensor_info);
private:
ov::preprocess::OutputTensorInfo* _tensor_info;
};

View File

@ -0,0 +1,43 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
#include "element_type.hpp"
#include "errors.hpp"
#include "helper.hpp"
#include "model_wrap.hpp"
#include "openvino/core/preprocess/pre_post_process.hpp"
#include "openvino/openvino.hpp"
#include "preprocess/input_info.hpp"
#include "preprocess/output_info.hpp"
class PrePostProcessorWrap : public Napi::ObjectWrap<PrePostProcessorWrap> {
public:
/**
* @brief Constructs PrePostProcessorWrap class from the Napi::CallbackInfo.
* @param info //TO DO
*/
PrePostProcessorWrap(const Napi::CallbackInfo& info);
/**
* @brief Defines a Javascript PrePostProcessor class with constructor, static and instance properties and methods.
* @param env The environment in which to construct a JavaScript class.
* @return Napi::Function representing the constructor function for the Javascript PrePostProcessor class.
*/
static Napi::Function get_class_constructor(Napi::Env env);
/** @brief This method is called during initialization of OpenVino native add-on.
* It exports JavaScript PrePostProcessor class.
*/
static Napi::Object init(Napi::Env env, Napi::Object exports);
Napi::Value input(const Napi::CallbackInfo& info);
Napi::Value output(const Napi::CallbackInfo& info);
void build(const Napi::CallbackInfo& info);
private:
std::unique_ptr<ov::preprocess::PrePostProcessor> _ppp;
};

View File

@ -0,0 +1,20 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
#include "openvino/openvino.hpp"
#include "preprocess/resize_algorithm.hpp"
#include "preprocess/pre_post_process_wrap.hpp"
namespace preprocess {
/** @brief This method is called during initialization of OpenVINO native add-on.
* It exports JavaScript preprocess property.
*/
Napi::Object init(Napi::Env env, Napi::Object exports);
Napi::Value add_preprocess_namespace(const Napi::CallbackInfo& info);
};

View File

@ -0,0 +1,23 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
#include "openvino/core/preprocess/preprocess_steps.hpp"
class PreProcessSteps : public Napi::ObjectWrap<PreProcessSteps> {
public:
PreProcessSteps(const Napi::CallbackInfo& info);
static Napi::Function get_class_constructor(Napi::Env env);
Napi::Value resize(const Napi::CallbackInfo& info);
void set_preprocess_info(ov::preprocess::PreProcessSteps& info) ;
private:
ov::preprocess::PreProcessSteps* _preprocess_info;
};

View File

@ -0,0 +1,10 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
#include "helper.hpp"
/** \brief Creates JS object to represent C++ enum class ResizeAlgorithm */
Napi::Value enumResizeAlgorithm(const Napi::CallbackInfo& info);

View File

@ -0,0 +1,55 @@
#pragma once
#include <napi.h>
#include "openvino/runtime/core.hpp"
/**
* @brief This struct retrieves data from Napi::CallbackInfo.
*/
struct ReadModelArgs {
std::string model_path;
std::string bin_path;
std::string model_str;
ov::Tensor weight_tensor;
ReadModelArgs() {}
ReadModelArgs(const Napi::CallbackInfo& info) {
if (!is_valid_read_model_input(info))
throw std::runtime_error("Invalid arguments of read model function");
const size_t argsLength = info.Length();
std::shared_ptr<ov::Model> model;
if (info[0].IsBuffer()) {
Napi::Buffer<uint8_t> model_data = info[0].As<Napi::Buffer<uint8_t>>();
model_str = std::string(reinterpret_cast<char*>(model_data.Data()), model_data.Length());
if (argsLength == 2) {
Napi::Buffer<uint8_t> weights = info[1].As<Napi::Buffer<uint8_t>>();
const uint8_t* bin = reinterpret_cast<const uint8_t*>(weights.Data());
size_t bin_size = weights.Length();
weight_tensor = ov::Tensor(ov::element::Type_t::u8, {bin_size});
std::memcpy(weight_tensor.data(), bin, bin_size);
}
else {
weight_tensor = ov::Tensor(ov::element::Type_t::u8, {0});
}
} else {
model_path = std::string(info[0].ToString());
if (argsLength == 2) bin_path = info[1].ToString();
}
}
bool is_valid_read_model_input(const Napi::CallbackInfo& info) {
const size_t argsLength = info.Length();
const size_t is_buffers_input = info[0].IsBuffer()
&& (argsLength == 1 || info[1].IsBuffer());
if (is_buffers_input) return true;
return info[0].IsString() && (argsLength == 1 || info[1].IsString());
}
};

View File

@ -0,0 +1,68 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <napi.h>
#include "element_type.hpp"
#include "errors.hpp"
#include "helper.hpp"
#include "openvino/core/shape.hpp"
#include "openvino/core/type/element_type.hpp"
#include "openvino/runtime/compiled_model.hpp"
#include "openvino/runtime/tensor.hpp"
class TensorWrap : public Napi::ObjectWrap<TensorWrap> {
public:
/**
* @brief Constructs TensorWrap class from the Napi::CallbackInfo.
* @param info contains passed arguments. Can be empty or contain more arguments.
* Two arguments are passed:
* @param info[0] ov::element::Type as string or exposed to JS enumElementType.
* @param info[1] ov::Shape as JS Array, Int32Array or Uint32Array
* Three arguments are passed:
* @param info[0] ov::element::Type as string or exposed to JS enumElementType.
* @param info[1] ov::Shape as JS Array, Int32Array or Uint32Array
* @param info[2] Tensor data as TypedArray
* @throw Exception if params are of invalid type.
*/
TensorWrap(const Napi::CallbackInfo& info);
/**
* @brief Defines a Javascript Tensor class with constructor, static and instance properties and methods.
* @param env The environment in which to construct a JavaScript class.
* @return Napi::Function representing the constructor function for the Javascript Tensor class.
*/
static Napi::Function get_class_constructor(Napi::Env env);
/** @brief This method is called during initialization of OpenVino native add-on.
* It exports JavaScript Tensor class.
*/
static Napi::Object init(Napi::Env env, Napi::Object exports);
ov::Tensor get_tensor() const;
void set_tensor(const ov::Tensor& tensor);
/**
* @brief Creates JavaScript Tensor object and wraps inside of it ov::Tensor object.
* @param env The environment in which to construct a JavaScript object.
* @param tensor ov::Tensor to wrap.
* @return Javascript Tensor as Napi::Object. (Not TensorWrap object)
*/
static Napi::Object wrap(Napi::Env env, ov::Tensor tensor);
/**
* @brief Helper function to access the tensor data as an attribute of JavaScript Tensor.
* @param info Contains information about the environment in which to create the Napi::TypedArray instance.
* @return Napi::TypedArray containing the tensor data.
*/
Napi::Value get_data(const Napi::CallbackInfo& info);
/** @return Napi::Array containing a tensor shape. */
Napi::Value get_shape(const Napi::CallbackInfo& info);
/** @return Napi::String containing ov::element type. */
Napi::Value get_element_type(const Napi::CallbackInfo& info);
private:
ov::Tensor _tensor;
};

View File

@ -0,0 +1,178 @@
type SupportedTypedArray =
| Int8Array
| Uint8Array
| Int16Array
| Uint16Array
| Int32Array
| Uint32Array
| Float32Array
| Float64Array;
type elementTypeString =
| 'u8'
| 'u32'
| 'u16'
| 'u64'
| 'i8'
| 'i64'
| 'i32'
| 'i16'
| 'f64'
| 'f32';
interface Core {
compileModel(
model: Model,
device: string,
config?: { [option: string]: string }
): Promise<CompiledModel>;
compileModelSync(
model: Model,
device: string,
config?: { [option: string]: string }
): CompiledModel;
readModel(modelPath: string, weightsPath?: string): Promise<Model>;
readModel(
modelBuffer: Uint8Array, weightsBuffer?: Uint8Array): Promise<Model>;
readModelSync(modelPath: string, weightsPath?: string): Model;
readModelSync(modelBuffer: Uint8Array, weightsBuffer?: Uint8Array): Model;
}
interface CoreConstructor {
new(): Core;
}
interface Model {
outputs: Output[];
inputs: Output[];
output(nameOrId?: string | number): Output;
input(nameOrId?: string | number): Output;
getName(): string;
}
interface CompiledModel {
outputs: Output[];
inputs: Output[];
output(nameOrId?: string | number): Output;
input(nameOrId?: string | number): Output;
createInferRequest(): InferRequest;
}
interface Tensor {
data: number[];
getElementType(): element;
getShape(): number[];
getData(): number[];
}
interface TensorConstructor {
new(type: element | elementTypeString,
shape: number[],
tensorData?: number[] | SupportedTypedArray): Tensor;
}
interface InferRequest {
setTensor(name: string, tensor: Tensor): void;
setInputTensor(idxOrTensor: number | Tensor, tensor?: Tensor): void;
setOutputTensor(idxOrTensor: number | Tensor, tensor?: Tensor): void;
getTensor(nameOrOutput: string | Output): Tensor;
getInputTensor(idx?: number): Tensor;
getOutputTensor(idx?: number): Tensor;
infer(inputData?: { [inputName: string]: Tensor | SupportedTypedArray}
| Tensor[] | SupportedTypedArray[]): { [outputName: string] : Tensor};
inferAsync(inputData: { [inputName: string]: Tensor}
| Tensor[] ): Promise<{ [outputName: string] : Tensor}>;
getCompiledModel(): CompiledModel;
}
type Dimension = number | [number, number];
interface Output {
anyName: string;
shape: number[];
toString(): string;
getAnyName(): string;
getShape(): number[];
getPartialShape(): PartialShape;
}
interface InputTensorInfo {
setElementType(elementType: element | elementTypeString ): InputTensorInfo;
setLayout(layout: string): InputTensorInfo;
setShape(shape: number[]): InputTensorInfo;
}
interface OutputTensorInfo {
setElementType(elementType: element | elementTypeString ): InputTensorInfo;
setLayout(layout: string): InputTensorInfo;
}
interface PreProcessSteps {
resize(algorithm: resizeAlgorithm | string): PreProcessSteps;
}
interface InputModelInfo {
setLayout(layout: string): InputModelInfo;
}
interface InputInfo {
tensor(): InputTensorInfo;
preprocess(): PreProcessSteps;
model(): InputModelInfo;
}
interface OutputInfo {
tensor(): OutputTensorInfo;
}
interface PrePostProcessor {
build(): PrePostProcessor;
input(idxOrTensorName?: number | string): InputInfo;
output(idxOrTensorName?: number | string): OutputInfo;
}
interface PrePostProcessorConstructor {
new(model: Model): PrePostProcessor;
}
interface PartialShape {
isStatic(): boolean;
isDynamic(): boolean;
toString(): string;
getDimensions(): Dimension[];
}
interface PartialShapeConstructor {
new(shape: string): PartialShape;
}
declare enum element {
u8,
u32,
u16,
u64,
i8,
i16,
i32,
i64,
f32,
f64,
}
declare enum resizeAlgorithm {
RESIZE_NEAREST,
RESIZE_CUBIC,
RESIZE_LINEAR,
}
export interface NodeAddon {
Core: CoreConstructor,
Tensor: TensorConstructor,
PartialShape: PartialShapeConstructor,
preprocess: {
resizeAlgorithm: typeof resizeAlgorithm,
PrePostProcessor: PrePostProcessorConstructor,
},
element: typeof element,
}
export default
// eslint-disable-next-line @typescript-eslint/no-var-requires
require('../bin/ov_node_addon.node') as
NodeAddon;

View File

@ -0,0 +1,3 @@
import addon from './addon';
export { addon };

1576
src/bindings/js/node/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,26 @@
{
"name": "openvinojs-node",
"version": "2023.3-preview",
"description": "OpenVINO™ utils for using from Node.js environment",
"license": "Apache-2.0",
"main": "./dist/index.js",
"types": "./types/index.d.ts",
"scripts": {
"build": "npm run tsc",
"prepare": "npm run build",
"lint": "eslint .",
"test": "node --test ./tests/",
"tsc": "tsc"
},
"devDependencies": {
"@types/node": "^20.5.0",
"@typescript-eslint/eslint-plugin": "^6.7.0",
"@typescript-eslint/parser": "^6.7.0",
"eslint": "^8.49.0",
"random-bigint": "^0.0.1",
"typescript": "^5.0.4"
},
"engines": {
"node": ">=18.16.0"
}
}

View File

@ -0,0 +1,40 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "addon.hpp"
#include <napi.h>
#include "compiled_model.hpp"
#include "core_wrap.hpp"
#include "element_type.hpp"
#include "infer_request.hpp"
#include "model_wrap.hpp"
#include "node_output.hpp"
#include "openvino/openvino.hpp"
#include "partial_shape_wrap.hpp"
#include "preprocess/preprocess.hpp"
#include "tensor.hpp"
/** @brief Initialize native add-on */
Napi::Object init_all(Napi::Env env, Napi::Object exports) {
auto addon_data = new AddonData();
env.SetInstanceData<AddonData>(addon_data);
ModelWrap::init(env, exports);
CoreWrap::init(env, exports);
CompiledModelWrap::init(env, exports);
InferRequestWrap::init(env, exports);
TensorWrap::init(env, exports);
Output<const ov::Node>::init(env, exports);
Output<ov::Node>::init(env, exports);
PartialShapeWrap::init(env, exports);
preprocess::init(env, exports);
element::init(env, exports);
return exports;
}
/** @brief Register and initialize native add-on */
NODE_API_MODULE(addon_openvino, init_all)

View File

@ -0,0 +1,32 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "async_reader.hpp"
void ReaderWorker::Execute() {
ov::Core core;
if (_args->model_str.empty())
_model = core.read_model(_args->model_path, _args->bin_path);
else
_model = core.read_model(_args->model_str, _args->weight_tensor);
}
void ReaderWorker::OnOK() {
Napi::HandleScope scope(Env());
Napi::Object mw = ModelWrap::get_class_constructor(Env()).New({});
ModelWrap* m = Napi::ObjectWrap<ModelWrap>::Unwrap(mw);
m->set_model(_model);
delete _args;
_deferred.Resolve(mw);
}
void ReaderWorker::OnError(Napi::Error const& error) {
_deferred.Reject(error.Value());
}
Napi::Promise ReaderWorker::GetPromise() {
return _deferred.Promise();
}

View File

@ -0,0 +1,124 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "compiled_model.hpp"
#include "addon.hpp"
#include "errors.hpp"
#include "infer_request.hpp"
#include "node_output.hpp"
CompiledModelWrap::CompiledModelWrap(const Napi::CallbackInfo& info)
: Napi::ObjectWrap<CompiledModelWrap>(info),
_compiled_model{} {}
Napi::Function CompiledModelWrap::get_class_constructor(Napi::Env env) {
return DefineClass(env,
"CompiledModel",
{InstanceMethod("createInferRequest", &CompiledModelWrap::create_infer_request),
InstanceMethod("input", &CompiledModelWrap::get_input),
InstanceAccessor<&CompiledModelWrap::get_inputs>("inputs"),
InstanceMethod("output", &CompiledModelWrap::get_output),
InstanceAccessor<&CompiledModelWrap::get_outputs>("outputs")});
}
Napi::Object CompiledModelWrap::init(Napi::Env env, Napi::Object exports) {
const auto& prototype = get_class_constructor(env);
const auto ref = new Napi::FunctionReference();
*ref = Napi::Persistent(prototype);
const auto data = env.GetInstanceData<AddonData>();
data->compiled_model_prototype = ref;
exports.Set("CompiledModel", prototype);
return exports;
}
Napi::Object CompiledModelWrap::wrap(Napi::Env env, ov::CompiledModel compiled_model) {
Napi::HandleScope scope(env);
const auto prototype = env.GetInstanceData<AddonData>()->compiled_model_prototype;
if (!prototype) {
OPENVINO_THROW("Invalid pointer to CompiledModel prototype.");
}
auto obj = prototype->New({});
const auto cm = Napi::ObjectWrap<CompiledModelWrap>::Unwrap(obj);
cm->_compiled_model = compiled_model;
return obj;
}
void CompiledModelWrap::set_compiled_model(const ov::CompiledModel& compiled_model) {
_compiled_model = compiled_model;
}
Napi::Value CompiledModelWrap::create_infer_request(const Napi::CallbackInfo& info) {
ov::InferRequest infer_request = _compiled_model.create_infer_request();
return InferRequestWrap::wrap(info.Env(), infer_request);
}
Napi::Value CompiledModelWrap::get_output(const Napi::CallbackInfo& info) {
if (info.Length() == 0) {
try {
return Output<const ov::Node>::wrap(info.Env(), _compiled_model.output());
} catch (std::exception& e) {
reportError(info.Env(), e.what());
return Napi::Value();
}
} else if (info.Length() != 1) {
reportError(info.Env(), "Invalid number of arguments -> " + std::to_string(info.Length()));
return Napi::Value();
} else if (info[0].IsString()) {
auto tensor_name = info[0].ToString();
return Output<const ov::Node>::wrap(info.Env(), _compiled_model.output(tensor_name));
} else if (info[0].IsNumber()) {
auto idx = info[0].As<Napi::Number>().Int32Value();
return Output<const ov::Node>::wrap(info.Env(), _compiled_model.output(idx));
} else {
reportError(info.Env(), "Error while getting compiled model outputs.");
return Napi::Value();
}
}
Napi::Value CompiledModelWrap::get_outputs(const Napi::CallbackInfo& info) {
auto cm_outputs = _compiled_model.outputs(); // Output<Node>
Napi::Array js_outputs = Napi::Array::New(info.Env(), cm_outputs.size());
size_t i = 0;
for (auto& out : cm_outputs)
js_outputs[i++] = Output<const ov::Node>::wrap(info.Env(), out);
return js_outputs;
}
Napi::Value CompiledModelWrap::get_input(const Napi::CallbackInfo& info) {
if (info.Length() == 0) {
try {
return Output<const ov::Node>::wrap(info.Env(), _compiled_model.input());
} catch (std::exception& e) {
reportError(info.Env(), e.what());
return Napi::Value();
}
} else if (info.Length() != 1) {
reportError(info.Env(), "Invalid number of arguments -> " + std::to_string(info.Length()));
return Napi::Value();
} else if (info[0].IsString()) {
auto tensor_name = info[0].ToString();
return Output<const ov::Node>::wrap(info.Env(), _compiled_model.input(tensor_name));
} else if (info[0].IsNumber()) {
auto idx = info[0].As<Napi::Number>().Int32Value();
return Output<const ov::Node>::wrap(info.Env(), _compiled_model.input(idx));
} else {
reportError(info.Env(), "Error while getting compiled model inputs.");
return Napi::Value();
}
}
Napi::Value CompiledModelWrap::get_inputs(const Napi::CallbackInfo& info) {
auto cm_inputs = _compiled_model.inputs(); // Output<Node>
Napi::Array js_inputs = Napi::Array::New(info.Env(), cm_inputs.size());
size_t i = 0;
for (auto& out : cm_inputs)
js_inputs[i++] = Output<const ov::Node>::wrap(info.Env(), out);
return js_inputs;
}

View File

@ -0,0 +1,231 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "core_wrap.hpp"
#include "addon.hpp"
#include "compiled_model.hpp"
#include "model_wrap.hpp"
#include "read_model_args.hpp"
CoreWrap::CoreWrap(const Napi::CallbackInfo& info) : Napi::ObjectWrap<CoreWrap>(info), _core{} {}
Napi::Function CoreWrap::get_class_constructor(Napi::Env env) {
return DefineClass(env,
"Core",
{
InstanceMethod("readModelSync", &CoreWrap::read_model_sync),
InstanceMethod("readModel", &CoreWrap::read_model_async),
InstanceMethod("compileModelSync", &CoreWrap::compile_model_sync_dispatch),
InstanceMethod("compileModel", &CoreWrap::compile_model_async),
});
}
Napi::Object CoreWrap::init(Napi::Env env, Napi::Object exports) {
const auto& prototype = get_class_constructor(env);
const auto ref = new Napi::FunctionReference();
*ref = Napi::Persistent(prototype);
const auto data = env.GetInstanceData<AddonData>();
data->core_prototype = ref;
exports.Set("Core", prototype);
return exports;
}
Napi::Value CoreWrap::read_model_sync(const Napi::CallbackInfo& info) {
try {
ReadModelArgs* args;
args = new ReadModelArgs(info);
auto model = args->model_str.empty() ? _core.read_model(args->model_path, args->bin_path)
: _core.read_model(args->model_str, args->weight_tensor);
delete args;
return ModelWrap::wrap(info.Env(), model);
} catch (std::runtime_error& err) {
reportError(info.Env(), err.what());
return info.Env().Undefined();
}
}
Napi::Value CoreWrap::read_model_async(const Napi::CallbackInfo& info) {
try {
ReadModelArgs* args = new ReadModelArgs(info);
ReaderWorker* _readerWorker = new ReaderWorker(info.Env(), args);
_readerWorker->Queue();
return _readerWorker->GetPromise();
} catch (std::runtime_error& err) {
reportError(info.Env(), err.what());
return info.Env().Undefined();
}
}
Napi::Value CoreWrap::compile_model_sync(const Napi::CallbackInfo& info,
const Napi::Object& model,
const Napi::String& device) {
const auto model_prototype = info.Env().GetInstanceData<AddonData>()->model_prototype;
if (model_prototype && model.InstanceOf(model_prototype->Value().As<Napi::Function>())) {
const auto m = Napi::ObjectWrap<ModelWrap>::Unwrap(model);
const auto& compiled_model = _core.compile_model(m->get_model(), device);
return CompiledModelWrap::wrap(info.Env(), compiled_model);
} else {
reportError(info.Env(), "Cannot create Model from Napi::Object.");
return info.Env().Undefined();
}
}
Napi::Value CoreWrap::compile_model_sync(const Napi::CallbackInfo& info,
const Napi::String& model_path,
const Napi::String& device) {
const auto& compiled_model = _core.compile_model(model_path, device);
return CompiledModelWrap::wrap(info.Env(), compiled_model);
}
Napi::Value CoreWrap::compile_model_sync(const Napi::CallbackInfo& info,
const Napi::Object& model_obj,
const Napi::String& device,
const std::map<std::string, ov::Any>& config) {
const auto& mw = Napi::ObjectWrap<ModelWrap>::Unwrap(model_obj);
const auto& compiled_model = _core.compile_model(mw->get_model(), info[1].ToString(), config);
return CompiledModelWrap::wrap(info.Env(), compiled_model);
}
Napi::Value CoreWrap::compile_model_sync(const Napi::CallbackInfo& info,
const Napi::String& model_path,
const Napi::String& device,
const std::map<std::string, ov::Any>& config) {
const auto& compiled_model = _core.compile_model(model_path, device, config);
return CompiledModelWrap::wrap(info.Env(), compiled_model);
}
Napi::Value CoreWrap::compile_model_sync_dispatch(const Napi::CallbackInfo& info) {
try {
if (info.Length() == 2 && info[0].IsString() && info[1].IsString()) {
return compile_model_sync(info, info[0].ToString(), info[1].ToString());
} else if (info.Length() == 2 && info[0].IsObject() && info[1].IsString()) {
return compile_model_sync(info, info[0].ToObject(), info[1].ToString());
} else if (info.Length() == 3 && info[0].IsString() && info[1].IsString()) {
const auto& config = js_to_cpp<std::map<std::string, ov::Any>>(info, 2, {napi_object});
return compile_model_sync(info, info[0].ToString(), info[1].ToString(), config);
} else if (info.Length() == 3 && info[0].IsObject() && info[1].IsString()) {
const auto& config = js_to_cpp<std::map<std::string, ov::Any>>(info, 2, {napi_object});
return compile_model_sync(info, info[0].ToObject(), info[1].ToString(), config);
} else if (info.Length() < 2 || info.Length() > 3) {
reportError(info.Env(), "Invalid number of arguments -> " + std::to_string(info.Length()));
return info.Env().Undefined();
} else {
reportError(info.Env(), "Error while compiling model.");
return info.Env().Undefined();
}
} catch (std::exception& e) {
reportError(info.Env(), e.what());
return info.Env().Undefined();
}
}
void FinalizerCallbackModel(Napi::Env env, void* finalizeData, TsfnContextModel* context) {
context->nativeThread.join();
delete context;
};
void FinalizerCallbackPath(Napi::Env env, void* finalizeData, TsfnContextPath* context) {
context->nativeThread.join();
delete context;
};
void compileModelThreadModel(TsfnContextModel* context) {
ov::Core core;
context->_compiled_model = core.compile_model(context->_model, context->_device, context->_config);
auto callback = [](Napi::Env env, Napi::Function, TsfnContextModel* context) {
Napi::HandleScope scope(env);
auto obj = CompiledModelWrap::get_class_constructor(env).New({});
auto cm = Napi::ObjectWrap<CompiledModelWrap>::Unwrap(obj);
cm->set_compiled_model(context->_compiled_model);
context->deferred.Resolve(obj);
};
context->tsfn.BlockingCall(context, callback);
context->tsfn.Release();
}
void compileModelThreadPath(TsfnContextPath* context) {
ov::Core core;
context->_compiled_model = core.compile_model(context->_model, context->_device, context->_config);
auto callback = [](Napi::Env env, Napi::Function, TsfnContextPath* context) {
Napi::HandleScope scope(env);
auto obj = CompiledModelWrap::get_class_constructor(env).New({});
auto cm = Napi::ObjectWrap<CompiledModelWrap>::Unwrap(obj);
cm->set_compiled_model(context->_compiled_model);
context->deferred.Resolve(obj);
};
context->tsfn.BlockingCall(context, callback);
context->tsfn.Release();
}
Napi::Value CoreWrap::compile_model_async(const Napi::CallbackInfo& info) {
auto env = info.Env();
if (info[0].IsObject() && info[1].IsString()) {
auto context_data = new TsfnContextModel(env);
auto m = Napi::ObjectWrap<ModelWrap>::Unwrap(info[0].ToObject());
context_data->_model = m->get_model()->clone();
context_data->_device = info[1].ToString();
if (info.Length() == 3) {
try {
context_data->_config = js_to_cpp<std::map<std::string, ov::Any>>(info, 2, {napi_object});
} catch (std::exception& e) {
reportError(env, e.what());
}
}
context_data->tsfn = Napi::ThreadSafeFunction::New(env,
Napi::Function(),
"TSFN",
0,
1,
context_data,
FinalizerCallbackModel,
(void*)nullptr);
context_data->nativeThread = std::thread(compileModelThreadModel, context_data);
return context_data->deferred.Promise();
} else if (info[0].IsString() && info[1].IsString()) {
auto context_data = new TsfnContextPath(env);
context_data->_model = info[0].ToString();
context_data->_device = info[1].ToString();
if (info.Length() == 3) {
try {
context_data->_config = js_to_cpp<std::map<std::string, ov::Any>>(info, 2, {napi_object});
} catch (std::exception& e) {
reportError(env, e.what());
}
}
context_data->tsfn = Napi::ThreadSafeFunction::New(env,
Napi::Function(),
"TSFN",
0,
1,
context_data,
FinalizerCallbackPath,
(void*)nullptr);
context_data->nativeThread = std::thread(compileModelThreadPath, context_data);
return context_data->deferred.Promise();
} else if (info.Length() < 2 || info.Length() > 3) {
reportError(info.Env(), "Invalid number of arguments -> " + std::to_string(info.Length()));
return Napi::Value();
} else {
reportError(info.Env(), "Error while compiling model.");
return Napi::Value();
}
}

View File

@ -0,0 +1,29 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "element_type.hpp"
#include <iostream>
#include <typeinfo>
namespace element {
Napi::Object init(Napi::Env env, Napi::Object exports) {
auto element = Napi::PropertyDescriptor::Accessor<add_element_namespace>("element");
exports.DefineProperty(element);
return exports;
}
Napi::Value add_element_namespace(const Napi::CallbackInfo& info) {
auto element = Napi::Object::New(info.Env());
std::vector<Napi::PropertyDescriptor> pds;
for (const auto& et : get_supported_types())
pds.push_back(Napi::PropertyDescriptor::Value(et, Napi::String::New(info.Env(), et), napi_default));
element.DefineProperties(pds);
return element;
}
};

View File

@ -0,0 +1,8 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "errors.hpp"
void reportError(const Napi::Env& env, std::string msg) {
Napi::Error::New(env, msg).ThrowAsJavaScriptException();
}

View File

@ -0,0 +1,320 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "helper.hpp"
#include "tensor.hpp"
const std::vector<std::string>& get_supported_types() {
static const std::vector<std::string> supported_element_types =
{"i8", "u8", "i16", "u16", "i32", "u32", "f32", "f64", "i64", "u64"};
return supported_element_types;
}
napi_types napiType(const Napi::Value& val) {
if (val.IsTypedArray())
return val.As<Napi::TypedArray>().TypedArrayType();
else if (val.IsArray())
return js_array;
else
return val.Type();
}
bool acceptableType(const Napi::Value& val, const std::vector<napi_types>& acceptable) {
return std::any_of(acceptable.begin(), acceptable.end(), [val](napi_types t) {
return napiType(val) == t;
});
}
template <>
int32_t js_to_cpp<int32_t>(const Napi::CallbackInfo& info,
const size_t idx,
const std::vector<napi_types>& acceptable_types) {
const auto elem = info[idx];
if (!acceptableType(elem, acceptable_types))
OPENVINO_THROW(std::string("Cannot convert argument" + std::to_string(idx)));
if (!elem.IsNumber()) {
OPENVINO_THROW(std::string("Passed argument must be a number."));
}
return elem.ToNumber().Int32Value();
}
template <>
std::string js_to_cpp<std::string>(const Napi::CallbackInfo& info,
const size_t idx,
const std::vector<napi_types>& acceptable_types) {
const auto elem = info[idx];
if (!acceptableType(elem, acceptable_types))
OPENVINO_THROW(std::string("Cannot convert argument") + std::to_string(idx));
if (!elem.IsString()) {
OPENVINO_THROW(std::string("Passed argument must be a string."));
}
return elem.ToString();
}
template <>
std::vector<size_t> js_to_cpp<std::vector<size_t>>(const Napi::CallbackInfo& info,
const size_t idx,
const std::vector<napi_types>& acceptable_types) {
const auto elem = info[idx];
if (!acceptableType(elem, acceptable_types))
OPENVINO_THROW(std::string("Cannot convert argument.") + std::to_string(idx));
if (!elem.IsArray() && !elem.IsTypedArray()) {
OPENVINO_THROW(std::string("Passed argument must be of type Array or TypedArray."));
} else if (elem.IsArray()) {
auto array = elem.As<Napi::Array>();
size_t arrayLength = array.Length();
std::vector<size_t> nativeArray;
for (size_t i = 0; i < arrayLength; ++i) {
Napi::Value arrayItem = array[i];
if (!arrayItem.IsNumber()) {
OPENVINO_THROW(std::string("Passed array must contain only numbers."));
}
Napi::Number num = arrayItem.As<Napi::Number>();
nativeArray.push_back(static_cast<size_t>(num.Int32Value()));
}
return nativeArray;
} else {
Napi::TypedArray buf;
napi_typedarray_type type = elem.As<Napi::TypedArray>().TypedArrayType();
if ((type != napi_int32_array) && (type != napi_uint32_array)) {
OPENVINO_THROW(std::string("Passed argument must be a Int32Array."));
} else if (type == napi_uint32_array)
buf = elem.As<Napi::Uint32Array>();
else {
buf = elem.As<Napi::Int32Array>();
}
auto data_ptr = static_cast<int*>(buf.ArrayBuffer().Data());
std::vector<size_t> vector(data_ptr, data_ptr + buf.ElementLength());
return vector;
}
}
template <>
std::unordered_set<std::string> js_to_cpp<std::unordered_set<std::string>>(
const Napi::CallbackInfo& info,
const size_t idx,
const std::vector<napi_types>& acceptable_types) {
const auto elem = info[idx];
if (!elem.IsArray()) {
OPENVINO_THROW(std::string("Passed argument must be of type Array."));
} else {
auto array = elem.As<Napi::Array>();
size_t arrayLength = array.Length();
std::unordered_set<std::string> nativeArray;
for (size_t i = 0; i < arrayLength; ++i) {
Napi::Value arrayItem = array[i];
if (!arrayItem.IsString()) {
OPENVINO_THROW(std::string("Passed array must contain only strings."));
}
Napi::String str = arrayItem.As<Napi::String>();
nativeArray.insert(str.Utf8Value());
}
return nativeArray;
}
}
template <>
ov::element::Type_t js_to_cpp<ov::element::Type_t>(const Napi::CallbackInfo& info,
const size_t idx,
const std::vector<napi_types>& acceptable_types) {
const auto elem = info[idx];
if (!acceptableType(elem, acceptable_types))
OPENVINO_THROW(std::string("Cannot convert Napi::Value to ov::element::Type_t"));
const std::string type = elem.ToString();
const auto& types = get_supported_types();
if (std::find(types.begin(), types.end(), type) == types.end())
OPENVINO_THROW(std::string("Cannot create ov::element::Type"));
return static_cast<ov::element::Type_t>(ov::element::Type(type));
}
template <>
ov::Layout js_to_cpp<ov::Layout>(const Napi::CallbackInfo& info,
const size_t idx,
const std::vector<napi_types>& acceptable_types) {
auto layout = js_to_cpp<std::string>(info, idx, acceptable_types);
return ov::Layout(layout);
}
template <>
ov::Shape js_to_cpp<ov::Shape>(const Napi::CallbackInfo& info,
const size_t idx,
const std::vector<napi_types>& acceptable_types) {
auto shape = js_to_cpp<std::vector<size_t>>(info, idx, acceptable_types);
return ov::Shape(shape);
}
template <>
ov::preprocess::ResizeAlgorithm js_to_cpp<ov::preprocess::ResizeAlgorithm>(
const Napi::CallbackInfo& info,
const size_t idx,
const std::vector<napi_types>& acceptable_types) {
const auto& elem = info[idx];
if (!acceptableType(elem, acceptable_types))
OPENVINO_THROW(std::string("Cannot convert Napi::Value to resizeAlgorithm"));
const std::string& algorithm = elem.ToString();
if (algorithm == "RESIZE_CUBIC") {
return ov::preprocess::ResizeAlgorithm::RESIZE_CUBIC;
} else if (algorithm == "RESIZE_NEAREST") {
return ov::preprocess::ResizeAlgorithm::RESIZE_NEAREST;
} else if (algorithm == "RESIZE_LINEAR") {
return ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR;
} else {
OPENVINO_THROW(std::string("Not supported resizeAlgorithm."));
}
}
template <>
ov::Any js_to_cpp<ov::Any>(const Napi::Value& value, const std::vector<napi_types>& acceptable_types) {
if (!acceptableType(value, acceptable_types)) {
OPENVINO_THROW(std::string("Cannot convert Napi::Value to ov::Any"));
}
if (value.IsString()) {
return value.ToString().Utf8Value();
} else if (value.IsNumber()) {
return value.ToNumber().Int32Value();
} else {
OPENVINO_THROW(std::string("The conversion is not supported yet."));
}
}
template <>
std::map<std::string, ov::Any> js_to_cpp<std::map<std::string, ov::Any>>(
const Napi::CallbackInfo& info,
const size_t idx,
const std::vector<napi_types>& acceptable_types) {
const auto elem = info[idx];
if (!acceptableType(elem, acceptable_types)) {
OPENVINO_THROW(std::string("Cannot convert Napi::Value to std::map<std::string, ov::Any>"));
}
std::map<std::string, ov::Any> properties_to_cpp;
const auto& config = elem.ToObject();
const auto& keys = config.GetPropertyNames();
for (size_t i = 0; i < keys.Length(); ++i) {
const std::string& option = static_cast<Napi::Value>(keys[i]).ToString();
properties_to_cpp[option] = js_to_cpp<ov::Any>(config.Get(option), {napi_string});
}
return properties_to_cpp;
}
template <>
Napi::String cpp_to_js<ov::element::Type_t, Napi::String>(const Napi::CallbackInfo& info,
const ov::element::Type_t type) {
return Napi::String::New(info.Env(), ov::element::Type(type).to_string());
}
template <>
Napi::Array cpp_to_js<ov::Shape, Napi::Array>(const Napi::CallbackInfo& info, const ov::Shape shape) {
auto arr = Napi::Array::New(info.Env(), shape.size());
for (size_t i = 0; i < shape.size(); ++i)
arr[i] = shape[i];
return arr;
}
template <>
Napi::Array cpp_to_js<ov::PartialShape, Napi::Array>(const Napi::CallbackInfo& info, const ov::PartialShape shape) {
size_t size = shape.size();
Napi::Array dimensions = Napi::Array::New(info.Env(), size);
for (size_t i = 0; i < size; i++) {
ov::Dimension dim = shape[i];
if (dim.is_static()) {
dimensions[i] = dim.get_length();
continue;
}
auto min = dim.get_min_length();
auto max = dim.get_max_length();
if (min > max) {
dimensions[i] = -1;
continue;
}
dimensions[i] = cpp_to_js<ov::Dimension, Napi::Array>(info, dim);
}
return dimensions;
}
template <>
Napi::Array cpp_to_js<ov::Dimension, Napi::Array>(const Napi::CallbackInfo& info, const ov::Dimension dim) {
Napi::Array interval = Napi::Array::New(info.Env(), 2);
// Indexes looks wierd, but clear assignment,
// like: interval[0] = value doesn't work here
size_t indexes[] = {0, 1};
interval[indexes[0]] = dim.get_min_length();
interval[indexes[1]] = dim.get_max_length();
return interval;
}
template <>
Napi::Boolean cpp_to_js<bool, Napi::Boolean>(const Napi::CallbackInfo& info, const bool value) {
return Napi::Boolean::New(info.Env(), value);
}
ov::TensorVector parse_input_data(const Napi::Value& input) {
ov::TensorVector parsed_input;
if (input.IsArray()) {
auto inputs = input.As<Napi::Array>();
for (size_t i = 0; i < inputs.Length(); ++i) {
parsed_input.emplace_back(cast_to_tensor(static_cast<Napi::Value>(inputs[i])));
}
} else if (input.IsObject()) {
auto inputs = input.ToObject();
const auto& keys = inputs.GetPropertyNames();
for (size_t i = 0; i < keys.Length(); ++i) {
auto value = inputs.Get(static_cast<Napi::Value>(keys[i]).ToString().Utf8Value());
parsed_input.emplace_back(cast_to_tensor(static_cast<Napi::Value>(value)));
}
} else {
OPENVINO_THROW("parse_input_data(): wrong arg");
}
return parsed_input;
}
ov::Tensor get_request_tensor(ov::InferRequest& infer_request, const std::string key) {
return infer_request.get_tensor(key);
}
ov::Tensor get_request_tensor(ov::InferRequest& infer_request, const size_t idx) {
return infer_request.get_input_tensor(idx);
}
ov::Tensor cast_to_tensor(const Napi::Value& value) {
if (value.IsObject()) {
auto tensor_wrap = Napi::ObjectWrap<TensorWrap>::Unwrap(value.ToObject());
return tensor_wrap->get_tensor();
} else {
OPENVINO_THROW("Cannot create a tensor from the passed Napi::Value.");
}
}
ov::Tensor cast_to_tensor(const Napi::TypedArray& typed_array,
const ov::Shape& shape,
const ov::element::Type_t& type) {
/* The difference between TypedArray::ArrayBuffer::Data() and e.g. Float32Array::Data() is byteOffset
because the TypedArray may have a non-zero `ByteOffset()` into the `ArrayBuffer`. */
if (typed_array.ByteOffset() != 0) {
OPENVINO_THROW("TypedArray.byteOffset has to be equal to zero.");
}
auto array_buffer = typed_array.ArrayBuffer();
auto tensor = ov::Tensor(type, shape, array_buffer.Data());
if (tensor.get_byte_size() != array_buffer.ByteLength()) {
OPENVINO_THROW("Memory allocated using shape and element::type mismatch passed data's size");
}
return tensor;
}

View File

@ -0,0 +1,270 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "infer_request.hpp"
#include <mutex>
#include <random>
#include "addon.hpp"
#include "compiled_model.hpp"
#include "node_output.hpp"
#include "tensor.hpp"
namespace {
std::mutex infer_mutex;
}
InferRequestWrap::InferRequestWrap(const Napi::CallbackInfo& info)
: Napi::ObjectWrap<InferRequestWrap>(info),
_infer_request{} {}
Napi::Function InferRequestWrap::get_class_constructor(Napi::Env env) {
return DefineClass(env,
"InferRequest",
{
InstanceMethod("setTensor", &InferRequestWrap::set_tensor),
InstanceMethod("setInputTensor", &InferRequestWrap::set_input_tensor),
InstanceMethod("setOutputTensor", &InferRequestWrap::set_output_tensor),
InstanceMethod("getTensor", &InferRequestWrap::get_tensor),
InstanceMethod("getInputTensor", &InferRequestWrap::get_input_tensor),
InstanceMethod("getOutputTensor", &InferRequestWrap::get_output_tensor),
InstanceMethod("infer", &InferRequestWrap::infer_dispatch),
InstanceMethod("inferAsync", &InferRequestWrap::infer_async),
InstanceMethod("getCompiledModel", &InferRequestWrap::get_compiled_model),
});
}
Napi::Object InferRequestWrap::init(Napi::Env env, Napi::Object exports) {
const auto& prototype = get_class_constructor(env);
const auto ref = new Napi::FunctionReference();
*ref = Napi::Persistent(prototype);
const auto data = env.GetInstanceData<AddonData>();
data->infer_request_prototype = ref;
exports.Set("InferRequest", prototype);
return exports;
}
void InferRequestWrap::set_infer_request(const ov::InferRequest& infer_request) {
_infer_request = infer_request;
}
Napi::Object InferRequestWrap::wrap(Napi::Env env, ov::InferRequest infer_request) {
Napi::HandleScope scope(env);
const auto prototype = env.GetInstanceData<AddonData>()->infer_request_prototype;
if (!prototype) {
OPENVINO_THROW("Invalid pointer to InferRequest prototype.");
}
auto obj = prototype->New({});
const auto ir = Napi::ObjectWrap<InferRequestWrap>::Unwrap(obj);
ir->set_infer_request(infer_request);
return obj;
}
void InferRequestWrap::set_tensor(const Napi::CallbackInfo& info) {
if (info.Length() != 2 || !info[0].IsString() || !info[1].IsObject()) {
reportError(info.Env(), "InferRequest.setTensor() invalid argument.");
} else {
std::string name = info[0].ToString();
auto tensorWrap = Napi::ObjectWrap<TensorWrap>::Unwrap(info[1].ToObject());
_infer_request.set_tensor(name, tensorWrap->get_tensor());
}
}
void InferRequestWrap::set_input_tensor(const Napi::CallbackInfo& info) {
if (info.Length() == 1 && info[0].IsObject()) {
auto tensorWrap = Napi::ObjectWrap<TensorWrap>::Unwrap(info[0].ToObject());
_infer_request.set_input_tensor(tensorWrap->get_tensor());
} else if (info.Length() == 2 && info[0].IsNumber() && info[1].IsObject()) {
auto idx = info[0].ToNumber().Int32Value();
auto tensorWrap = Napi::ObjectWrap<TensorWrap>::Unwrap(info[1].ToObject());
_infer_request.set_input_tensor(idx, tensorWrap->get_tensor());
} else {
reportError(info.Env(), "InferRequest.setInputTensor() invalid argument.");
}
}
void InferRequestWrap::set_output_tensor(const Napi::CallbackInfo& info) {
if (info.Length() == 1) {
auto tensorWrap = Napi::ObjectWrap<TensorWrap>::Unwrap(info[0].ToObject());
auto t = tensorWrap->get_tensor();
_infer_request.set_output_tensor(t);
} else if (info.Length() == 2 && info[0].IsNumber() && info[1].IsObject()) {
auto idx = info[0].ToNumber().Int32Value();
auto tensorWrap = Napi::ObjectWrap<TensorWrap>::Unwrap(info[1].ToObject());
_infer_request.set_output_tensor(idx, tensorWrap->get_tensor());
} else {
reportError(info.Env(), "InferRequest.setOutputTensor() invalid argument.");
}
}
Napi::Value InferRequestWrap::get_tensor(const Napi::CallbackInfo& info) {
ov::Tensor tensor;
if (info.Length() != 1) {
reportError(info.Env(), "InferRequest.getTensor() invalid number of arguments.");
} else if (info[0].IsString()) {
std::string tensor_name = info[0].ToString();
tensor = _infer_request.get_tensor(tensor_name);
} else if (info[0].IsObject()) {
auto outputWrap = Napi::ObjectWrap<Output<const ov::Node>>::Unwrap(info[0].ToObject());
ov::Output<const ov::Node> output = outputWrap->get_output();
tensor = _infer_request.get_tensor(output);
} else {
reportError(info.Env(), "InferRequest.getTensor() invalid argument.");
}
return TensorWrap::wrap(info.Env(), tensor);
}
Napi::Value InferRequestWrap::get_input_tensor(const Napi::CallbackInfo& info) {
ov::Tensor tensor;
if (info.Length() == 0) {
tensor = _infer_request.get_input_tensor();
} else if (info.Length() == 1 && info[0].IsNumber()) {
auto idx = info[0].ToNumber().Int32Value();
tensor = _infer_request.get_input_tensor(idx);
} else {
reportError(info.Env(), "InferRequest.getInputTensor() invalid argument.");
}
return TensorWrap::wrap(info.Env(), tensor);
}
Napi::Value InferRequestWrap::get_output_tensor(const Napi::CallbackInfo& info) {
ov::Tensor tensor;
if (info.Length() == 0) {
tensor = _infer_request.get_output_tensor();
} else if (info.Length() == 1 && info[0].IsNumber()) {
auto idx = info[0].ToNumber().Int32Value();
tensor = _infer_request.get_output_tensor(idx);
} else {
reportError(info.Env(), "InferRequest.getInputTensor() invalid argument.");
}
return TensorWrap::wrap(info.Env(), tensor);
}
Napi::Value InferRequestWrap::get_output_tensors(const Napi::CallbackInfo& info) {
auto compiled_model = _infer_request.get_compiled_model().outputs();
auto outputs_obj = Napi::Object::New(info.Env());
for (auto& node : compiled_model) {
auto tensor = _infer_request.get_tensor(node);
auto new_tensor = ov::Tensor(tensor.get_element_type(), tensor.get_shape());
tensor.copy_to(new_tensor);
outputs_obj.Set(node.get_any_name(), TensorWrap::wrap(info.Env(), new_tensor));
}
return outputs_obj;
}
Napi::Value InferRequestWrap::infer_dispatch(const Napi::CallbackInfo& info) {
if (info.Length() == 0)
_infer_request.infer();
else if (info.Length() == 1 && info[0].IsTypedArray()) {
reportError(info.Env(), "TypedArray cannot be passed directly into infer() method.");
return info.Env().Null();
} else if (info.Length() == 1 && info[0].IsArray()) {
try {
infer(info[0].As<Napi::Array>());
} catch (std::exception& e) {
reportError(info.Env(), e.what());
return info.Env().Null();
}
} else if (info.Length() == 1 && info[0].IsObject()) {
try {
infer(info[0].As<Napi::Object>());
} catch (std::exception& e) {
reportError(info.Env(), e.what());
return info.Env().Null();
}
} else {
reportError(info.Env(), "Infer method takes as an argument an array or an object.");
}
return get_output_tensors(info);
}
void InferRequestWrap::infer(const Napi::Array& inputs) {
for (size_t i = 0; i < inputs.Length(); ++i) {
auto tensor = value_to_tensor(inputs[i], _infer_request, i);
_infer_request.set_input_tensor(i, tensor);
}
_infer_request.infer();
}
void InferRequestWrap::infer(const Napi::Object& inputs) {
const auto& keys = inputs.GetPropertyNames();
for (size_t i = 0; i < keys.Length(); ++i) {
auto input_name = static_cast<Napi::Value>(keys[i]).ToString().Utf8Value();
auto value = inputs.Get(input_name);
auto tensor = value_to_tensor(value, _infer_request, input_name);
_infer_request.set_tensor(input_name, tensor);
}
_infer_request.infer();
}
Napi::Value InferRequestWrap::get_compiled_model(const Napi::CallbackInfo& info) {
return CompiledModelWrap::wrap(info.Env(), _infer_request.get_compiled_model());
}
void FinalizerCallback(Napi::Env env, void* finalizeData, TsfnContext* context) {
context->native_thread.join();
delete context;
};
void performInferenceThread(TsfnContext* context) {
{
const std::lock_guard<std::mutex> lock(infer_mutex);
for (size_t i = 0; i < context->_inputs.size(); ++i) {
context->_ir->set_input_tensor(i, context->_inputs[i]);
}
context->_ir->infer();
auto compiled_model = context->_ir->get_compiled_model().outputs();
std::map<std::string, ov::Tensor> outputs;
for (auto& node : compiled_model) {
const auto& tensor = context->_ir->get_tensor(node);
auto new_tensor = ov::Tensor(tensor.get_element_type(), tensor.get_shape());
tensor.copy_to(new_tensor);
outputs.insert({node.get_any_name(), new_tensor});
}
context->result = outputs;
}
auto callback = [](Napi::Env env, Napi::Function, TsfnContext* context) {
const auto& res = context->result;
auto outputs_obj = Napi::Object::New(env);
for (const auto& [key, tensor] : res) {
outputs_obj.Set(key, TensorWrap::wrap(env, tensor));
}
context->deferred.Resolve({outputs_obj});
};
context->tsfn.BlockingCall(context, callback);
context->tsfn.Release();
}
Napi::Value InferRequestWrap::infer_async(const Napi::CallbackInfo& info) {
if (info.Length() != 1) {
reportError(info.Env(), "InferAsync method takes as an argument an array or an object.");
}
Napi::Env env = info.Env();
auto context = new TsfnContext(env);
context->_ir = &_infer_request;
try {
auto parsed_input = parse_input_data(info[0]);
context->_inputs = parsed_input;
} catch (std::exception& e) {
reportError(info.Env(), e.what());
}
context->tsfn =
Napi::ThreadSafeFunction::New(env, Napi::Function(), "TSFN", 0, 1, context, FinalizerCallback, (void*)nullptr);
context->native_thread = std::thread(performInferenceThread, context);
return context->deferred.Promise();
}

View File

@ -0,0 +1,130 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "model_wrap.hpp"
#include "addon.hpp"
#include "node_output.hpp"
ModelWrap::ModelWrap(const Napi::CallbackInfo& info)
: Napi::ObjectWrap<ModelWrap>(info),
_model{},
_core{},
_compiled_model{} {}
Napi::Function ModelWrap::get_class_constructor(Napi::Env env) {
return DefineClass(env,
"ModelWrap",
{InstanceMethod("getName", &ModelWrap::get_name),
InstanceMethod("output", &ModelWrap::get_output),
InstanceMethod("input", &ModelWrap::get_input),
InstanceAccessor<&ModelWrap::get_inputs>("inputs"),
InstanceAccessor<&ModelWrap::get_outputs>("outputs")});
}
Napi::Object ModelWrap::init(Napi::Env env, Napi::Object exports) {
const auto& prototype = get_class_constructor(env);
const auto ref = new Napi::FunctionReference();
*ref = Napi::Persistent(prototype);
const auto data = env.GetInstanceData<AddonData>();
data->model_prototype = ref;
exports.Set("Model", prototype);
return exports;
}
void ModelWrap::set_model(const std::shared_ptr<ov::Model>& model) {
_model = model;
}
Napi::Object ModelWrap::wrap(Napi::Env env, std::shared_ptr<ov::Model> model) {
Napi::HandleScope scope(env);
const auto prototype = env.GetInstanceData<AddonData>()->model_prototype;
if (!prototype) {
OPENVINO_THROW("Invalid pointer to model prototype.");
}
const auto& model_js = prototype->New({});
const auto mw = Napi::ObjectWrap<ModelWrap>::Unwrap(model_js);
mw->set_model(model);
return model_js;
}
Napi::Value ModelWrap::get_name(const Napi::CallbackInfo& info) {
if (_model->get_name() != "")
return Napi::String::New(info.Env(), _model->get_name());
else
return Napi::String::New(info.Env(), "unknown");
}
std::shared_ptr<ov::Model> ModelWrap::get_model() const {
return _model;
}
Napi::Value ModelWrap::get_input(const Napi::CallbackInfo& info) {
if (info.Length() == 0) {
try {
return Output<ov::Node>::wrap(info.Env(), _model->input());
} catch (std::exception& e) {
reportError(info.Env(), e.what());
return Napi::Value();
}
} else if (info.Length() != 1) {
reportError(info.Env(), "Invalid number of arguments -> " + std::to_string(info.Length()));
return Napi::Value();
} else if (info[0].IsString()) {
const auto& tensor_name = info[0].ToString();
return Output<ov::Node>::wrap(info.Env(), _model->input(tensor_name));
} else if (info[0].IsNumber()) {
const auto& idx = info[0].As<Napi::Number>().Int32Value();
return Output<ov::Node>::wrap(info.Env(), _model->input(idx));
} else {
reportError(info.Env(), "Error while getting model outputs.");
return info.Env().Undefined();
}
}
Napi::Value ModelWrap::get_output(const Napi::CallbackInfo& info) {
if (info.Length() == 0) {
try {
return Output<ov::Node>::wrap(info.Env(), _model->output());
} catch (std::exception& e) {
reportError(info.Env(), e.what());
return Napi::Value();
}
} else if (info.Length() != 1) {
reportError(info.Env(), "Invalid number of arguments -> " + std::to_string(info.Length()));
return Napi::Value();
} else if (info[0].IsString()) {
auto tensor_name = info[0].ToString();
return Output<ov::Node>::wrap(info.Env(), _model->output(tensor_name));
} else if (info[0].IsNumber()) {
auto idx = info[0].As<Napi::Number>().Int32Value();
return Output<ov::Node>::wrap(info.Env(), _model->output(idx));
} else {
reportError(info.Env(), "Error while getting model outputs.");
return Napi::Value();
}
}
Napi::Value ModelWrap::get_inputs(const Napi::CallbackInfo& info) {
auto cm_inputs = _model->inputs(); // Output<Node>
Napi::Array js_inputs = Napi::Array::New(info.Env(), cm_inputs.size());
size_t i = 0;
for (auto& input : cm_inputs)
js_inputs[i++] = Output<ov::Node>::wrap(info.Env(), input);
return js_inputs;
}
Napi::Value ModelWrap::get_outputs(const Napi::CallbackInfo& info) {
auto cm_outputs = _model->outputs(); // Output<Node>
Napi::Array js_outputs = Napi::Array::New(info.Env(), cm_outputs.size());
size_t i = 0;
for (auto& out : cm_outputs)
js_outputs[i++] = Output<ov::Node>::wrap(info.Env(), out);
return js_outputs;
}

View File

@ -0,0 +1,116 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "node_output.hpp"
#include "addon.hpp"
#include "helper.hpp"
#include "partial_shape_wrap.hpp"
Output<ov::Node>::Output(const Napi::CallbackInfo& info) : Napi::ObjectWrap<Output<ov::Node>>(info), _output{} {}
Napi::Function Output<ov::Node>::get_class_constructor(Napi::Env env) {
return Output::DefineClass(
env,
"Output",
{Output<ov::Node>::InstanceMethod("getShape", &Output<ov::Node>::get_shape),
Output<ov::Node>::InstanceAccessor<&Output<ov::Node>::get_shape>("shape"),
Output<ov::Node>::InstanceMethod("getPartialShape", &Output<ov::Node>::get_partial_shape),
Output<ov::Node>::InstanceMethod("getAnyName", &Output<ov::Node>::get_any_name),
Output<ov::Node>::InstanceAccessor<&Output<ov::Node>::get_any_name>("anyName"),
Output<ov::Node>::InstanceMethod("toString", &Output<ov::Node>::get_any_name)});
}
Napi::Object Output<ov::Node>::init(Napi::Env env, Napi::Object exports) {
const auto& prototype = get_class_constructor(env);
const auto ref = new Napi::FunctionReference();
*ref = Napi::Persistent(prototype);
const auto data = env.GetInstanceData<AddonData>();
data->output_prototype = ref;
exports.Set("Output", prototype);
return exports;
}
ov::Output<ov::Node> Output<ov::Node>::get_output() const {
return _output;
}
Napi::Object Output<ov::Node>::wrap(Napi::Env env, ov::Output<ov::Node> output) {
const auto prototype = env.GetInstanceData<AddonData>()->output_prototype;
if (!prototype) {
OPENVINO_THROW("Invalid pointer to Output prototype.");
}
const auto& obj = prototype->New({});
Output* output_ptr = Napi::ObjectWrap<Output>::Unwrap(obj);
output_ptr->_output = output;
return obj;
}
Napi::Value Output<ov::Node>::get_shape(const Napi::CallbackInfo& info) {
return cpp_to_js<ov::Shape, Napi::Array>(info, _output.get_shape());
}
Napi::Value Output<ov::Node>::get_partial_shape(const Napi::CallbackInfo& info) {
return PartialShapeWrap::wrap(info.Env(), _output.get_partial_shape());
}
Napi::Value Output<ov::Node>::get_any_name(const Napi::CallbackInfo& info) {
return Napi::String::New(info.Env(), _output.get_any_name());
}
Output<const ov::Node>::Output(const Napi::CallbackInfo& info)
: Napi::ObjectWrap<Output<const ov::Node>>(info),
_output{} {}
Napi::Function Output<const ov::Node>::get_class_constructor(Napi::Env env) {
return Output::DefineClass(
env,
"ConstOutput",
{Output<const ov::Node>::InstanceMethod("getShape", &Output<const ov::Node>::get_shape),
Output<const ov::Node>::InstanceAccessor<&Output<const ov::Node>::get_shape>("shape"),
Output<const ov::Node>::InstanceMethod("getPartialShape", &Output<const ov::Node>::get_partial_shape),
Output<const ov::Node>::InstanceMethod("getAnyName", &Output<const ov::Node>::get_any_name),
Output<const ov::Node>::InstanceAccessor<&Output<const ov::Node>::get_any_name>("anyName"),
Output<const ov::Node>::InstanceMethod("toString", &Output<const ov::Node>::get_any_name)});
}
Napi::Object Output<const ov::Node>::init(Napi::Env env, Napi::Object exports) {
const auto& prototype = get_class_constructor(env);
const auto ref = new Napi::FunctionReference();
*ref = Napi::Persistent(prototype);
const auto data = env.GetInstanceData<AddonData>();
data->const_output_prototype = ref;
exports.Set("ConstOutput", prototype);
return exports;
}
ov::Output<const ov::Node> Output<const ov::Node>::get_output() const {
return _output;
}
Napi::Object Output<const ov::Node>::wrap(Napi::Env env, ov::Output<const ov::Node> output) {
const auto prototype = env.GetInstanceData<AddonData>()->const_output_prototype;
if (!prototype) {
OPENVINO_THROW("Invalid pointer to ConstOutput prototype.");
}
const auto& obj = prototype->New({});
Output* output_ptr = Napi::ObjectWrap<Output>::Unwrap(obj);
output_ptr->_output = output;
return obj;
}
Napi::Value Output<const ov::Node>::get_shape(const Napi::CallbackInfo& info) {
return cpp_to_js<ov::Shape, Napi::Array>(info, _output.get_shape());
}
Napi::Value Output<const ov::Node>::get_partial_shape(const Napi::CallbackInfo& info) {
return PartialShapeWrap::wrap(info.Env(), _output.get_partial_shape());
}
Napi::Value Output<const ov::Node>::get_any_name(const Napi::CallbackInfo& info) {
return Napi::String::New(info.Env(), _output.get_any_name());
}

View File

@ -0,0 +1,73 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "partial_shape_wrap.hpp"
#include "addon.hpp"
PartialShapeWrap::PartialShapeWrap(const Napi::CallbackInfo& info) : Napi::ObjectWrap<PartialShapeWrap>(info) {
const size_t attrs_length = info.Length();
if (attrs_length == 1 && info[0].IsString()) {
try {
const auto& shape = std::string(info[0].ToString());
_partial_shape = ov::PartialShape(shape);
} catch (std::exception& e) {
reportError(info.Env(), e.what());
}
} else {
reportError(info.Env(), "Invalid parameters for PartialShape constructor.");
}
}
Napi::Function PartialShapeWrap::get_class_constructor(Napi::Env env) {
return DefineClass(env,
"PartialShapeWrap",
{
InstanceMethod("isStatic", &PartialShapeWrap::is_static),
InstanceMethod("isDynamic", &PartialShapeWrap::is_dynamic),
InstanceMethod("toString", &PartialShapeWrap::to_string),
InstanceMethod("getDimensions", &PartialShapeWrap::get_dimensions),
});
}
Napi::Object PartialShapeWrap::init(Napi::Env env, Napi::Object exports) {
const auto& prototype = get_class_constructor(env);
const auto ref = new Napi::FunctionReference();
*ref = Napi::Persistent(prototype);
const auto data = env.GetInstanceData<AddonData>();
data->partial_shape_prototype = ref;
exports.Set("PartialShape", prototype);
return exports;
}
Napi::Object PartialShapeWrap::wrap(Napi::Env env, ov::PartialShape partial_shape) {
const auto prototype = env.GetInstanceData<AddonData>()->partial_shape_prototype;
if (!prototype) {
OPENVINO_THROW("Invalid pointer to PartialShape prototype.");
}
auto obj = prototype->New({});
const auto t = Napi::ObjectWrap<PartialShapeWrap>::Unwrap(obj);
t->_partial_shape = partial_shape;
return obj;
}
Napi::Value PartialShapeWrap::is_static(const Napi::CallbackInfo& info) {
return cpp_to_js<bool, Napi::Boolean>(info, _partial_shape.is_static());
}
Napi::Value PartialShapeWrap::is_dynamic(const Napi::CallbackInfo& info) {
return cpp_to_js<bool, Napi::Boolean>(info, _partial_shape.is_dynamic());
}
Napi::Value PartialShapeWrap::to_string(const Napi::CallbackInfo& info) {
return Napi::String::New(info.Env(), _partial_shape.to_string());
}
Napi::Value PartialShapeWrap::get_dimensions(const Napi::CallbackInfo& info) {
return cpp_to_js<ov::PartialShape, Napi::Array>(info, _partial_shape);
}

View File

@ -0,0 +1,59 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "preprocess/input_info.hpp"
#include "errors.hpp"
#include "preprocess/input_model_info.hpp"
#include "preprocess/input_tensor_info.hpp"
#include "preprocess/preprocess_steps.hpp"
InputInfo::InputInfo(const Napi::CallbackInfo& info) : Napi::ObjectWrap<InputInfo>(info){};
Napi::Function InputInfo::get_class_constructor(Napi::Env env) {
return DefineClass(env,
"InputInfo",
{InstanceMethod("tensor", &InputInfo::tensor),
InstanceMethod("preprocess", &InputInfo::preprocess),
InstanceMethod("model", &InputInfo::model)});
}
Napi::Value InputInfo::tensor(const Napi::CallbackInfo& info) {
if (info.Length() != 0) {
reportError(info.Env(), "Error in tensor(). Function does not take any parameters.");
return info.Env().Undefined();
} else {
Napi::Object obj = InputTensorInfo::get_class_constructor(info.Env()).New({});
auto tensor_info = Napi::ObjectWrap<InputTensorInfo>::Unwrap(obj);
tensor_info->set_input_tensor_info(_input_info->tensor());
return obj;
}
}
Napi::Value InputInfo::preprocess(const Napi::CallbackInfo& info) {
if (info.Length() != 0) {
reportError(info.Env(), "Error in preprocess(). Function does not take any parameters.");
return info.Env().Undefined();
} else {
Napi::Object obj = PreProcessSteps::get_class_constructor(info.Env()).New({});
auto preprocess_info = Napi::ObjectWrap<PreProcessSteps>::Unwrap(obj);
preprocess_info->set_preprocess_info(_input_info->preprocess());
return obj;
}
}
Napi::Value InputInfo::model(const Napi::CallbackInfo& info) {
if (info.Length() != 0) {
reportError(info.Env(), "Error in model(). Function does not take any parameters.");
return info.Env().Undefined();
} else {
Napi::Object obj = InputModelInfo::get_class_constructor(info.Env()).New({});
auto model_info = Napi::ObjectWrap<InputModelInfo>::Unwrap(obj);
model_info->set_input_model_info(_input_info->model());
return obj;
}
}
void InputInfo::set_input_info(ov::preprocess::InputInfo& info) {
_input_info = &info;
}

View File

@ -0,0 +1,33 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "preprocess/input_model_info.hpp"
#include <iostream>
#include "errors.hpp"
#include "helper.hpp"
InputModelInfo::InputModelInfo(const Napi::CallbackInfo& info) : Napi::ObjectWrap<InputModelInfo>(info){};
Napi::Function InputModelInfo::get_class_constructor(Napi::Env env) {
return DefineClass(env, "InputModelInfo", {InstanceMethod("setLayout", &InputModelInfo::set_layout)});
}
Napi::Value InputModelInfo::set_layout(const Napi::CallbackInfo& info) {
if (info.Length() == 1) {
try {
auto layout = js_to_cpp<ov::Layout>(info, 0, {napi_string});
_model_info->set_layout(layout);
} catch (std::exception& e) {
reportError(info.Env(), e.what());
}
} else {
reportError(info.Env(), "Error in setLayout(). Wrong number of parameters.");
}
return info.This();
}
void InputModelInfo::set_input_model_info(ov::preprocess::InputModelInfo& info) {
_model_info = &info;
}

View File

@ -0,0 +1,63 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "preprocess/input_tensor_info.hpp"
#include "errors.hpp"
#include "helper.hpp"
InputTensorInfo::InputTensorInfo(const Napi::CallbackInfo& info) : Napi::ObjectWrap<InputTensorInfo>(info){};
Napi::Function InputTensorInfo::get_class_constructor(Napi::Env env) {
return DefineClass(env,
"InputTensorInfo",
{InstanceMethod("setElementType", &InputTensorInfo::set_element_type),
InstanceMethod("setLayout", &InputTensorInfo::set_layout),
InstanceMethod("setShape", &InputTensorInfo::set_shape)});
}
Napi::Value InputTensorInfo::set_layout(const Napi::CallbackInfo& info) {
if (info.Length() == 1) {
try {
auto layout = js_to_cpp<ov::Layout>(info, 0, {napi_string});
_tensor_info->set_layout(layout);
} catch (std::exception& e) {
reportError(info.Env(), e.what());
}
} else {
reportError(info.Env(), "Error in setLayout(). Wrong number of parameters.");
}
return info.This();
}
Napi::Value InputTensorInfo::set_shape(const Napi::CallbackInfo& info) {
if (info.Length() == 1) {
try {
auto shape = js_to_cpp<ov::Shape>(info, 0, {napi_int32_array, js_array});
_tensor_info->set_shape(shape);
} catch (std::exception& e) {
reportError(info.Env(), e.what());
}
} else {
reportError(info.Env(), "Error in setShape(). Wrong number of parameters.");
}
return info.This();
}
Napi::Value InputTensorInfo::set_element_type(const Napi::CallbackInfo& info) {
if (info.Length() == 1) {
try {
auto type = js_to_cpp<ov::element::Type_t>(info, 0, {napi_string});
_tensor_info->set_element_type(type);
} catch (std::exception& e) {
reportError(info.Env(), e.what());
}
} else {
reportError(info.Env(), "Error in setElementType(). Wrong number of parameters.");
}
return info.This();
}
void InputTensorInfo::set_input_tensor_info(ov::preprocess::InputTensorInfo& info) {
_tensor_info = &info;
}

View File

@ -0,0 +1,29 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "preprocess/output_info.hpp"
#include "errors.hpp"
#include "preprocess/output_tensor_info.hpp"
OutputInfo::OutputInfo(const Napi::CallbackInfo& info) : Napi::ObjectWrap<OutputInfo>(info){};
Napi::Function OutputInfo::get_class_constructor(Napi::Env env) {
return DefineClass(env, "OutputInfo", {InstanceMethod("tensor", &OutputInfo::tensor)});
}
Napi::Value OutputInfo::tensor(const Napi::CallbackInfo& info) {
if (info.Length() != 0) {
reportError(info.Env(), "Error in tensor(). Function does not take any parameters.");
return info.Env().Undefined();
} else {
Napi::Object obj = OutputTensorInfo::get_class_constructor(info.Env()).New({});
auto tensor_info = Napi::ObjectWrap<OutputTensorInfo>::Unwrap(obj);
tensor_info->set_output_tensor_info(_output_info->tensor());
return obj;
}
}
void OutputInfo::set_output_info(ov::preprocess::OutputInfo& info) {
_output_info = &info;
}

View File

@ -0,0 +1,49 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "preprocess/output_tensor_info.hpp"
#include "errors.hpp"
#include "helper.hpp"
OutputTensorInfo::OutputTensorInfo(const Napi::CallbackInfo& info) : Napi::ObjectWrap<OutputTensorInfo>(info){};
Napi::Function OutputTensorInfo::get_class_constructor(Napi::Env env) {
return DefineClass(env,
"OutputTensorInfo",
{InstanceMethod("setElementType", &OutputTensorInfo::set_element_type),
InstanceMethod("setLayout", &OutputTensorInfo::set_layout)});
}
Napi::Value OutputTensorInfo::set_layout(const Napi::CallbackInfo& info) {
if (info.Length() == 1) {
try {
auto layout = js_to_cpp<ov::Layout>(info, 0, {napi_string});
_tensor_info->set_layout(layout);
} catch (std::exception& e) {
reportError(info.Env(), e.what());
}
} else {
reportError(info.Env(), "Error in setLayout(). Wrong number of parameters.");
}
return info.This();
}
Napi::Value OutputTensorInfo::set_element_type(const Napi::CallbackInfo& info) {
if (info.Length() != 1) {
reportError(info.Env(), "Error in setElementType(). Wrong number of parameters.");
return info.Env().Undefined();
}
try {
auto type = js_to_cpp<ov::element::Type_t>(info, 0, {napi_string});
_tensor_info->set_element_type(type);
} catch (std::exception& e) {
reportError(info.Env(), e.what());
return info.Env().Undefined();
}
return info.This();
}
void OutputTensorInfo::set_output_tensor_info(ov::preprocess::OutputTensorInfo& info) {
_tensor_info = &info;
}

View File

@ -0,0 +1,81 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "preprocess/pre_post_process_wrap.hpp"
#include "addon.hpp"
PrePostProcessorWrap::PrePostProcessorWrap(const Napi::CallbackInfo& info)
: Napi::ObjectWrap<PrePostProcessorWrap>(info) {
if (info.Length() != 1)
reportError(info.Env(), "Invalid number of arguments for PrePostProcessor constructor.");
else {
Napi::Object obj = info[0].ToObject();
auto m = Napi::ObjectWrap<ModelWrap>::Unwrap(obj);
_ppp = std::unique_ptr<ov::preprocess::PrePostProcessor>(new ov::preprocess::PrePostProcessor(m->get_model()));
}
}
Napi::Function PrePostProcessorWrap::get_class_constructor(Napi::Env env) {
return DefineClass(env,
"PrePostProcessorWrap",
{InstanceMethod("input", &PrePostProcessorWrap::input),
InstanceMethod("output", &PrePostProcessorWrap::output),
InstanceMethod("build", &PrePostProcessorWrap::build)});
}
Napi::Object PrePostProcessorWrap::init(Napi::Env env, Napi::Object exports) {
const auto& prototype = get_class_constructor(env);
const auto ref = new Napi::FunctionReference();
*ref = Napi::Persistent(prototype);
const auto data = env.GetInstanceData<AddonData>();
data->ppp_prototype = ref;
exports.Set("PrePostProcessor", prototype);
return exports;
}
Napi::Value PrePostProcessorWrap::input(const Napi::CallbackInfo& info) {
if (info.Length() != 0 && info.Length() != 1) {
reportError(info.Env(), "Wrong number of parameters.");
return info.Env().Undefined();
}
Napi::Object obj = InputInfo::get_class_constructor(info.Env()).New({});
auto input_info = Napi::ObjectWrap<InputInfo>::Unwrap(obj);
if (info.Length() == 0) {
input_info->set_input_info(_ppp->input());
} else if (info[0].IsNumber()) {
input_info->set_input_info(_ppp->input(info[0].ToNumber().Int32Value()));
} else if (info[0].IsString()) {
input_info->set_input_info(_ppp->input(info[0].ToString().Utf8Value()));
} else {
reportError(info.Env(), "Invalid parameter.");
return info.Env().Undefined();
}
return obj;
}
Napi::Value PrePostProcessorWrap::output(const Napi::CallbackInfo& info) {
if (info.Length() != 0 && info.Length() != 1) {
reportError(info.Env(), "Wrong number of parameters.");
return info.Env().Undefined();
}
Napi::Object obj = OutputInfo::get_class_constructor(info.Env()).New({});
auto output_info = Napi::ObjectWrap<OutputInfo>::Unwrap(obj);
if (info.Length() == 0) {
output_info->set_output_info(_ppp->output());
} else if (info[0].IsNumber()) {
output_info->set_output_info(_ppp->output(info[0].ToNumber().Int32Value()));
} else if (info[0].IsString()) {
output_info->set_output_info(_ppp->output(info[0].ToString().Utf8Value()));
} else {
reportError(info.Env(), "Invalid parameter.");
return info.Env().Undefined();
}
return obj;
}
void PrePostProcessorWrap::build(const Napi::CallbackInfo& info) {
_ppp->build();
}

View File

@ -0,0 +1,27 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "preprocess/preprocess.hpp"
namespace preprocess {
Napi::Object init(Napi::Env env, Napi::Object exports) {
auto preprocess = Napi::PropertyDescriptor::Accessor<add_preprocess_namespace>("preprocess");
exports.DefineProperty(preprocess);
return exports;
}
Napi::Value add_preprocess_namespace(const Napi::CallbackInfo& info) {
Napi::Env env = info.Env();
auto preprocess = Napi::Object::New(env);
auto resizeAlgorithm = Napi::PropertyDescriptor::Accessor<enumResizeAlgorithm>("resizeAlgorithm");
PrePostProcessorWrap::init(env, preprocess);
preprocess.DefineProperty(resizeAlgorithm);
return preprocess;
}
};

View File

@ -0,0 +1,31 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "errors.hpp"
#include "helper.hpp"
#include "preprocess/preprocess_steps.hpp"
PreProcessSteps::PreProcessSteps(const Napi::CallbackInfo& info) : Napi::ObjectWrap<PreProcessSteps>(info){};
Napi::Function PreProcessSteps::get_class_constructor(Napi::Env env) {
return DefineClass(env, "PreProcessSteps", {InstanceMethod("resize", &PreProcessSteps::resize)});
}
Napi::Value PreProcessSteps::resize(const Napi::CallbackInfo& info) {
if (info.Length() != 1 || !info[0].IsString()) {
reportError(info.Env(), "Error in resize(). Wrong number of parameters.");
return Napi::Value();
}
try {
const auto& algorithm = js_to_cpp<ov::preprocess::ResizeAlgorithm>(info, 0, {napi_string});
_preprocess_info->resize(algorithm);
} catch (std::exception& e) {
reportError(info.Env(), e.what());
}
return info.This();
}
void PreProcessSteps::set_preprocess_info(ov::preprocess::PreProcessSteps& info) {
_preprocess_info = &info;
}

View File

@ -0,0 +1,23 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "preprocess/resize_algorithm.hpp"
#include <typeinfo>
#include "openvino/runtime/core.hpp"
Napi::Value enumResizeAlgorithm(const Napi::CallbackInfo& info) {
auto enumObj = Napi::Object::New(info.Env());
std::vector<Napi::PropertyDescriptor> pds;
static const std::array<std::string, 3> resizeAlgorithms = {"RESIZE_LINEAR", "RESIZE_CUBIC", "RESIZE_NEAREST"};
for (auto& algorithm : resizeAlgorithms) {
pds.push_back(
Napi::PropertyDescriptor::Value(algorithm, Napi::String::New(info.Env(), algorithm), napi_default));
}
enumObj.DefineProperties(pds);
return enumObj;
}

View File

@ -0,0 +1,148 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "tensor.hpp"
#include "addon.hpp"
TensorWrap::TensorWrap(const Napi::CallbackInfo& info) : Napi::ObjectWrap<TensorWrap>(info) {
if (info.Length() == 0) {
return;
}
if (info.Length() == 1 || info.Length() > 3) {
reportError(info.Env(), "Invalid number of arguments for Tensor constructor.");
return;
}
try {
const auto type = js_to_cpp<ov::element::Type_t>(info, 0, {napi_string});
const auto shape_vec = js_to_cpp<std::vector<size_t>>(info, 1, {napi_int32_array, napi_uint32_array, js_array});
const auto& shape = ov::Shape(shape_vec);
if (info.Length() == 2) {
this->_tensor = ov::Tensor(type, shape);
} else if (info.Length() == 3) {
if (!info[2].IsTypedArray()) {
reportError(info.Env(), "Third argument of a tensor must be of type TypedArray.");
return;
}
const auto data = info[2].As<Napi::TypedArray>();
this->_tensor = cast_to_tensor(data, shape, type);
}
} catch (std::invalid_argument& e) {
reportError(info.Env(), std::string("Invalid tensor argument. ") + e.what());
} catch (std::exception& e) {
reportError(info.Env(), e.what());
}
}
Napi::Function TensorWrap::get_class_constructor(Napi::Env env) {
return DefineClass(env,
"TensorWrap",
{InstanceAccessor<&TensorWrap::get_data>("data"),
InstanceMethod("getData", &TensorWrap::get_data),
InstanceMethod("getShape", &TensorWrap::get_shape),
InstanceMethod("getElementType", &TensorWrap::get_element_type)});
}
Napi::Object TensorWrap::init(Napi::Env env, Napi::Object exports) {
const auto& prototype = get_class_constructor(env);
const auto ref = new Napi::FunctionReference();
*ref = Napi::Persistent(prototype);
const auto data = env.GetInstanceData<AddonData>();
data->tensor_prototype = ref;
exports.Set("Tensor", prototype);
return exports;
}
ov::Tensor TensorWrap::get_tensor() const {
return this->_tensor;
}
void TensorWrap::set_tensor(const ov::Tensor& tensor) {
_tensor = tensor;
}
Napi::Object TensorWrap::wrap(Napi::Env env, ov::Tensor tensor) {
const auto prototype = env.GetInstanceData<AddonData>()->tensor_prototype;
if (!prototype) {
OPENVINO_THROW("Invalid pointer to Tensor prototype.");
}
auto tensor_js = prototype->New({});
const auto t = Napi::ObjectWrap<TensorWrap>::Unwrap(tensor_js);
t->set_tensor(tensor);
return tensor_js;
}
Napi::Value TensorWrap::get_data(const Napi::CallbackInfo& info) {
auto type = _tensor.get_element_type();
switch (type) {
case ov::element::Type_t::i8: {
auto arr = Napi::Int8Array::New(info.Env(), _tensor.get_size());
std::memcpy(arr.Data(), _tensor.data(), _tensor.get_byte_size());
return arr;
}
case ov::element::Type_t::u8: {
auto arr = Napi::Uint8Array::New(info.Env(), _tensor.get_size());
std::memcpy(arr.Data(), _tensor.data(), _tensor.get_byte_size());
return arr;
}
case ov::element::Type_t::i16: {
auto arr = Napi::Int16Array::New(info.Env(), _tensor.get_size());
std::memcpy(arr.Data(), _tensor.data(), _tensor.get_byte_size());
return arr;
}
case ov::element::Type_t::u16: {
auto arr = Napi::Uint16Array::New(info.Env(), _tensor.get_size());
std::memcpy(arr.Data(), _tensor.data(), _tensor.get_byte_size());
return arr;
}
case ov::element::Type_t::i32: {
auto arr = Napi::Int32Array::New(info.Env(), _tensor.get_size());
std::memcpy(arr.Data(), _tensor.data(), _tensor.get_byte_size());
return arr;
}
case ov::element::Type_t::u32: {
auto arr = Napi::Uint32Array::New(info.Env(), _tensor.get_size());
std::memcpy(arr.Data(), _tensor.data(), _tensor.get_byte_size());
return arr;
}
case ov::element::Type_t::f32: {
auto arr = Napi::Float32Array::New(info.Env(), _tensor.get_size());
std::memcpy(arr.Data(), _tensor.data(), _tensor.get_byte_size());
return arr;
}
case ov::element::Type_t::f64: {
auto arr = Napi::Float64Array::New(info.Env(), _tensor.get_size());
std::memcpy(arr.Data(), _tensor.data(), _tensor.get_byte_size());
return arr;
}
case ov::element::Type_t::i64: {
auto arr = Napi::BigInt64Array::New(info.Env(), _tensor.get_size());
std::memcpy(arr.Data(), _tensor.data(), _tensor.get_byte_size());
return arr;
}
case ov::element::Type_t::u64: {
auto arr = Napi::BigUint64Array::New(info.Env(), _tensor.get_size());
std::memcpy(arr.Data(), _tensor.data(), _tensor.get_byte_size());
return arr;
}
default: {
reportError(info.Env(), "Failed to return tensor data.");
return info.Env().Null();
}
}
}
Napi::Value TensorWrap::get_shape(const Napi::CallbackInfo& info) {
return cpp_to_js<ov::Shape, Napi::Array>(info, _tensor.get_shape());
}
Napi::Value TensorWrap::get_element_type(const Napi::CallbackInfo& info) {
return cpp_to_js<ov::element::Type_t, Napi::String>(info, _tensor.get_element_type());
}

View File

@ -0,0 +1,182 @@
// -*- coding: utf-8 -*-
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
const { addon: ov } = require('..');
const assert = require('assert');
const { describe, it } = require('node:test');
const { getModelPath } = require('./utils.js');
const testXml = getModelPath().xml;
const core = new ov.Core();
const model = core.readModelSync(testXml);
const compiledModel = core.compileModelSync(model, 'CPU');
const modelLike = [[model],
[compiledModel]];
it('CompiledModel type', () => {
assert.ok(compiledModel instanceof ov.CompiledModel);
});
it('compileModel.createInferRequest()', () => {
const ir = compiledModel.createInferRequest();
assert.ok(ir instanceof ov.InferRequest);
});
describe('Core.compileModelSync()', () => {
const tput = { 'PERFORMANCE_HINT': 'THROUGHPUT' };
it('compileModelSync(model:Model, deviceName: string, config: {}) ', () => {
const cm = core.compileModelSync(model, 'CPU', tput);
assert.deepStrictEqual(cm.output(0).shape, [1, 10]);
});
it('compileModelSync(model:model_path, deviceName: string, config: {}) ', () => {
const cm = core.compileModelSync(testXml, 'CPU', tput);
assert.equal(cm.inputs.length, 1);
});
it('compileModelSync(model:model_path, deviceName: string) ', () => {
const cm = core.compileModelSync(testXml, 'CPU');
assert.deepStrictEqual(cm.output(0).shape, [1, 10]);
});
it('compileModelSync(model, device, config) throws when config is a string', () => {
assert.throws(
() => core.compileModelSync(model, 'CPU', 'string'),
/Cannot convert Napi::Value to std::map<std::string, ov::Any>/
);
});
it('compileModelSync(model, device, config) throws when config value is not a string', () => {
assert.throws(
() => core.compileModelSync(model, 'CPU', { 'PERFORMANCE_HINT': tput }),
/Cannot convert Napi::Value to ov::Any/
);
});
it('compileModelSync(model) throws if the number of arguments is invalid', () => {
assert.throws(
() => core.compileModelSync(model),
/Invalid number of arguments/
);
});
});
describe('Core.compileModel()', () => {
const tput = { 'PERFORMANCE_HINT': 'THROUGHPUT' };
it('compileModel(model:Model, deviceName: string, config: {}) ', () => {
core.compileModel(model, 'CPU', tput).then(cm => {
assert.deepStrictEqual(cm.output(0).shape, [1, 10]);
});
});
it('compileModel(model:model_path, deviceName: string, config: {}) ', () => {
core.compileModel(testXml, 'CPU', tput).then(cm => {
assert.equal(cm.inputs.length, 1);
});
});
it('compileModel(model:model_path, deviceName: string) ', () => {
core.compileModel(testXml, 'CPU').then(cm => {
assert.deepStrictEqual(cm.output(0).shape, [1, 10]);
});
});
it('compileModel(model, device, config) throws when config isn\'t an object', () => {
assert.throws(
() => core.compileModel(model, 'CPU', 'string').then(),
/Cannot convert Napi::Value to std::map<std::string, ov::Any>/
);
});
it('compileModel(model, device, config) throws when config value is not a string', () => {
assert.throws(
() => core.compileModel(model, 'CPU', { 'PERFORMANCE_HINT': tput }).then(),
/Cannot convert Napi::Value to ov::Any/
);
});
it('compileModel(model) throws if the number of arguments is invalid', () => {
assert.throws(
() => core.compileModel(model).then(),
/Invalid number of arguments/
);
});
});
describe('Output class', () => {
it('Output type', () => {
assert.ok(model.output() instanceof ov.Output);
});
it('ConstOutput type', () => {
assert.ok(compiledModel.output() instanceof ov.ConstOutput);
});
modelLike.forEach(([obj]) => {
it('Output getters and properties', () => {
assert.strictEqual(obj.outputs.length, 1);
// tests for an obj with one output
assert.strictEqual(obj.output().toString(), 'fc_out');
assert.strictEqual(obj.output(0).toString(), 'fc_out');
assert.strictEqual(obj.output('fc_out').toString(), 'fc_out');
assert.deepStrictEqual(obj.output(0).shape, [1, 10]);
assert.deepStrictEqual(obj.output(0).getShape(), [1, 10]);
assert.strictEqual(obj.output().getAnyName(), 'fc_out');
assert.strictEqual(obj.output().anyName, 'fc_out');
});
});
});
describe('Input class for ov::Input<const ov::Node>', () => {
it('Output type', () => {
assert.ok(model.input() instanceof ov.Output);
});
it('ConstOutput type', () => {
assert.ok(compiledModel.input() instanceof ov.ConstOutput);
});
modelLike.forEach(([obj]) => {
it('input() is typeof object', () => {
assert.strictEqual(typeof obj.input(), 'object');
});
it('inputs property', () => {
assert.strictEqual(obj.inputs.length, 1);
});
it('input().toString()', () => {
assert.strictEqual(obj.input().toString(), 'data');
});
it('input(idx: number).ToString() method', () => {
assert.strictEqual(obj.input(0).toString(), 'data');
});
it('input(tensorName: string).ToString() method', () => {
assert.strictEqual(obj.input('data').toString(), 'data');
});
it('input().getAnyName() and anyName', () => {
assert.strictEqual(obj.input().getAnyName(), 'data');
assert.strictEqual(obj.input().anyName, 'data');
});
it('input(idx).shape property with dimensions', () => {
assert.deepStrictEqual(obj.input(0).shape, [1, 3, 32, 32]);
assert.deepStrictEqual(obj.input(0).getShape(), [1, 3, 32, 32]);
});
});
});

View File

@ -0,0 +1,224 @@
// -*- coding: utf-8 -*-
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
const { addon: ov } = require('..');
const assert = require('assert');
const { describe, it } = require('node:test');
const { getModelPath } = require('./utils.js');
const epsilon = 0.5; // To avoid very small numbers
const testXml = getModelPath().xml;
const core = new ov.Core();
const model = core.readModelSync(testXml);
const compiledModel = core.compileModelSync(model, 'CPU');
const inferRequest = compiledModel.createInferRequest();
const inferRequestAsync = compiledModel.createInferRequest();
const tensorData = Float32Array.from({ length: 3072 }, () => (Math.random() + epsilon));
const tensor = new ov.Tensor(
ov.element.f32,
[1, 3, 32, 32],
tensorData,
);
const resTensor = new ov.Tensor(
ov.element.f32,
[1, 10],
tensorData.slice(-10),
);
const tensorLike = [[tensor],
[tensorData]];
describe('InferRequest', () => {
tensorLike.forEach(([tl]) => {
const result = inferRequest.infer({ data: tl });
const label = tl instanceof Float32Array ? 'TypedArray[]' : 'Tensor[]';
it(`Test infer(inputData: { [inputName: string]: ${label} })`, () => {
assert.deepStrictEqual(Object.keys(result), ['fc_out']);
assert.deepStrictEqual(result['fc_out'].data.length, 10);
});
});
tensorLike.forEach(([tl]) => {
const result = inferRequest.infer([tl]);
const label = tl instanceof Float32Array ? 'TypedArray[]' : 'Tensor[]';
it(`Test infer(inputData: ${label})`, () => {
assert.deepStrictEqual(Object.keys(result), ['fc_out']);
assert.deepStrictEqual(result['fc_out'].data.length, 10);
});
});
it('Test infer(TypedArray) throws', () => {
assert.throws(
() => inferRequest.infer(tensorData),
{message: 'TypedArray cannot be passed directly into infer() method.'});
});
const buffer = new ArrayBuffer(tensorData.length);
const inputMessagePairs = [
['string', 'Cannot create a tensor from the passed Napi::Value.'],
[tensorData.slice(-10), 'Memory allocated using shape and element::type mismatch passed data\'s size'],
[new Float32Array(buffer, 4), 'TypedArray.byteOffset has to be equal to zero.'],
];
inputMessagePairs.forEach( ([tl, msg]) => {
it(`Test infer([data]) throws ${msg}`, () => {
assert.throws(
() => inferRequest.infer([tl]),
{message: new RegExp(msg)});
});
it(`Test infer({ data: tl}) throws ${msg}`, () => {
assert.throws(
() => inferRequest.infer({data: tl}),
{message: new RegExp(msg)});
});
});
it('Test inferAsync(inputData: { [inputName: string]: Tensor })', () => {
inferRequestAsync.inferAsync({ data: tensor }).then(result => {
assert.ok(result['fc_out'] instanceof ov.Tensor);
assert.deepStrictEqual(Object.keys(result), ['fc_out']);
assert.deepStrictEqual(result['fc_out'].data.length, 10);}
);
});
it('Test inferAsync(inputData: Tensor[])', () => {
inferRequestAsync.inferAsync([ tensor ]).then(result => {
assert.ok(result['fc_out'] instanceof ov.Tensor);
assert.deepStrictEqual(Object.keys(result), ['fc_out']);
assert.deepStrictEqual(result['fc_out'].data.length, 10);
});
});
it('Test inferAsync([data]) throws: Cannot create a tensor from the passed Napi::Value.', () => {
assert.throws(
() => inferRequestAsync.inferAsync(['string']).then(),
/Cannot create a tensor from the passed Napi::Value./
);
});
it('Test inferAsync({ data: "string"}) throws: Cannot create a tensor from the passed Napi::Value.', () => {
assert.throws(
() => inferRequestAsync.inferAsync({data: 'string'}).then(),
/Cannot create a tensor from the passed Napi::Value./
);
});
it('Test setInputTensor(tensor)', () => {
inferRequest.setInputTensor(tensor);
const t1 = inferRequest.getInputTensor();
assert.deepStrictEqual(tensor.data[0], t1.data[0]);
});
it('Test setInputTensor(idx, tensor)', () => {
inferRequest.setInputTensor(0, tensor);
const t1 = inferRequest.getInputTensor();
assert.deepStrictEqual(tensor.data[0], t1.data[0]);
});
it('Test setInputTensor() - pass two tensors', () => {
assert.throws(
() => inferRequest.setInputTensor(resTensor, tensor),
{message: 'InferRequest.setInputTensor() invalid argument.'});
});
it('Test setInputTensor() - pass number as a single arg', () => {
assert.throws(
() => inferRequest.setInputTensor(123),
{message: 'InferRequest.setInputTensor() invalid argument.'});
});
it('Test setOutputTensor(tensor)', () => {
inferRequest.setOutputTensor(resTensor);
const res2 = inferRequest.getOutputTensor();
assert.deepStrictEqual(resTensor.data[0], res2.data[0]);
});
it('Test setOutputTensor(idx, tensor)', () => {
inferRequest.setOutputTensor(0, resTensor);
const res2 = inferRequest.getOutputTensor();
assert.deepStrictEqual(resTensor.data[0], res2.data[0]);
});
it('Test setOutputTensor() - pass two tensors', () => {
assert.throws(
() => inferRequest.setOutputTensor(resTensor, tensor),
{message: 'InferRequest.setOutputTensor() invalid argument.'});
});
it('Test setTensor(string, tensor)', () => {
inferRequest.setTensor('fc_out', resTensor);
const res2 = inferRequest.getTensor('fc_out');
assert.ok(res2 instanceof ov.Tensor);
assert.deepStrictEqual(resTensor.data[0], res2.data[0]);
});
it('Test setTensor(string, tensor) - pass one arg', () => {
assert.throws(
() => inferRequest.setTensor('fc_out'),
{message: 'InferRequest.setTensor() invalid argument.'});
});
it('Test setTensor(string, tensor) - pass args in wrong order', () => {
assert.throws(
() => inferRequest.setTensor(resTensor, 'fc_out'),
{message: 'InferRequest.setTensor() invalid argument.'});
});
it('Test setTensor(string, tensor) - pass number as first arg', () => {
assert.throws(
() => inferRequest.setTensor(123, 'fc_out'),
{message: 'InferRequest.setTensor() invalid argument.'});
});
const irGetters = compiledModel.createInferRequest();
irGetters.setInputTensor(tensor);
irGetters.infer();
it('Test getTensor(tensorName)', () => {
const t1 = irGetters.getTensor('data');
assert.ok(t1 instanceof ov.Tensor);
assert.deepStrictEqual(tensor.data[0], t1.data[0]);
});
it('Test getTensor(Output)', () => {
const input = irGetters.getCompiledModel().input();
const t1 = irGetters.getTensor(input);
assert.ok(t1 instanceof ov.Tensor);
assert.deepStrictEqual(tensor.data[0], t1.data[0]);
});
it('Test getInputTensor()', () => {
const t1 = irGetters.getInputTensor();
assert.ok(t1 instanceof ov.Tensor);
assert.deepStrictEqual(tensor.data[0], t1.data[0]);
});
it('Test getInputTensor(idx)', () => {
const t1 = irGetters.getInputTensor(0);
assert.ok(t1 instanceof ov.Tensor);
assert.deepStrictEqual(tensor.data[0], t1.data[0]);
});
it('Test getOutputTensor(idx?)', () => {
const res1 = irGetters.getOutputTensor();
const res2 = irGetters.getOutputTensor(0);
assert.ok(res1 instanceof ov.Tensor);
assert.ok(res2 instanceof ov.Tensor);
assert.deepStrictEqual(res1.data[0], res2.data[0]);
});
it('Test getCompiledModel()', () => {
const ir = compiledModel.createInferRequest();
const cm = ir.getCompiledModel();
assert.ok(cm instanceof ov.CompiledModel);
const ir2 = cm.createInferRequest();
const res2 = ir2.infer([tensorData]);
const res1 = ir.infer([tensorData]);
assert.deepStrictEqual(res1['fc_out'].data[0], res2['fc_out'].data[0]);
});
});

View File

@ -0,0 +1,48 @@
// -*- coding: utf-8 -*-
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
const { addon: ov } = require('..');
const assert = require('assert');
const { describe, it } = require('node:test');
const staticShape = '1, 3, 224, 224';
const dynamicShape = '?, -1, 1..3, 224';
describe('PartialShape', () => {
it('Should detect static shape', () => {
const partialShape = new ov.PartialShape(staticShape);
assert.ok(partialShape.isStatic());
});
it('Should detect dynamic shape', () => {
const partialShape = new ov.PartialShape(dynamicShape);
assert.strictEqual(partialShape.isStatic(), false);
});
it('Should return shape as string for static shape', () => {
const partialShape = new ov.PartialShape(staticShape);
assert.strictEqual(partialShape.toString(), '[1,3,224,224]');
});
it('Should return shape as string for dynamic shape', () => {
const partialShape = new ov.PartialShape(dynamicShape);
assert.strictEqual(partialShape.toString(), '[?,?,1..3,224]');
});
it('Should return array with dimensions for dynamic shape', () => {
const partialShape = new ov.PartialShape(staticShape);
assert.deepStrictEqual(partialShape.getDimensions(), [1,3,224,224]);
});
it('Should return array with dimensions for dynamic shape', () => {
const partialShape = new ov.PartialShape(dynamicShape);
assert.deepStrictEqual(partialShape.getDimensions(), [-1,-1,[1,3],224]);
});
});

View File

@ -0,0 +1,201 @@
// -*- coding: utf-8 -*-
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
const { addon: ov } = require('..');
const assert = require('assert');
const { describe, it } = require('node:test');
const { getModelPath } = require('./utils.js');
const testXml = getModelPath().xml;
const core = new ov.Core();
const model = core.readModelSync(testXml);
describe('PrePostProcess', () => {
it('input() ', () => {
assert.doesNotThrow(() =>
new ov.preprocess.PrePostProcessor(model).input());
});
it('input(size_t input_index)', () => {
assert.doesNotThrow(() =>
new ov.preprocess.PrePostProcessor(model).input(0));
});
it('input(const std::string& tensor_name)', () => {
assert.doesNotThrow(() =>
new ov.preprocess.PrePostProcessor(model).input('data'));
});
it('output() ', () => {
assert.doesNotThrow(() =>
new ov.preprocess.PrePostProcessor(model).output());
});
it('output(size_t output_index)', () => {
assert.doesNotThrow(() =>
new ov.preprocess.PrePostProcessor(model).output(0));
});
it('output(const std::string& tensor_name)', () => {
assert.doesNotThrow(() =>
new ov.preprocess.PrePostProcessor(model).output('fc_out'));
});
});
describe('InputInfo', () => {
it('tensor()', () => {
assert.doesNotThrow(() =>
new ov.preprocess.PrePostProcessor(model).input(0).tensor());
});
it('preprocess()', () => {
assert.doesNotThrow(() =>
new ov.preprocess.PrePostProcessor(model).input(0).preprocess());
});
it('model()', () => {
assert.doesNotThrow(() =>
new ov.preprocess.PrePostProcessor(model).input(0).model());
});
it('tensor(param) throws', () => {
assert.throws(() =>
new ov.preprocess.PrePostProcessor(model).input(0).tensor(0),
/Function does not take any parameters./);
});
it('preprocess(param) throws', () => {
assert.throws(() =>
new ov.preprocess.PrePostProcessor(model).input(0).preprocess(0),
/Function does not take any parameters./);
});
it('model(param) throws', () => {
assert.throws(() =>
new ov.preprocess.PrePostProcessor(model).input(0).model(0),
/Function does not take any parameters./);
});
it('tensor().setElementType()', () => {
assert.doesNotThrow(() =>
new ov.preprocess.PrePostProcessor(model).input(0).tensor().setElementType(ov.element.u8));
});
it('tensor().setElementType() throws', () => {
assert.throws(() =>
new ov.preprocess.PrePostProcessor(model).input(0).tensor().setElementType(),
/Wrong number of parameters./);
});
it('tensor().setElementType() throws', () => {
assert.throws(() =>
new ov.preprocess.PrePostProcessor(model).input(0).tensor().setElementType('invalidType'),
/Cannot create ov::element::Type/);
});
it('tensor().SetShape()', () => {
assert.doesNotThrow(() =>
new ov.preprocess.PrePostProcessor(model).input(0).tensor().setShape([1, 10]));
});
it('tensor().SetShape() throws', () => {
assert.throws(() =>
new ov.preprocess.PrePostProcessor(model).input(0).tensor().setShape(),
/Wrong number of parameters./);
});
it('tensor().setLayout()', () => {
assert.doesNotThrow(() =>
new ov.preprocess.PrePostProcessor(model).input(0).tensor().setLayout('NHWC'));
});
it('tensor().setLayout() throws', () => {
assert.throws(() =>
new ov.preprocess.PrePostProcessor(model).input(0).tensor().setLayout(),
/Wrong number of parameters./);
});
it('preprocess().resize()', () => {
assert.doesNotThrow(() =>
new ov.preprocess.PrePostProcessor(model).input(0).preprocess().resize(
ov.preprocess.resizeAlgorithm.RESIZE_LINEAR,
));
});
it('preprocess().resize() throws', () => {
assert.throws(() =>
new ov.preprocess.PrePostProcessor(model).input(0).preprocess().resize(
ov.preprocess.resizeAlgorithm.RESIZE_LINEAR, 'extraArg',
),
/Wrong number of parameters./);
});
it('preprocess().resize() no arg throws', () => {
assert.throws(() =>
new ov.preprocess.PrePostProcessor(model).input(0).preprocess().resize(),
/Wrong number of parameters./);
});
it('model().setLayout()', () => {
assert.doesNotThrow(() =>
new ov.preprocess.PrePostProcessor(model).input(0).model().setLayout('NCHW'));
});
it('model().setLayout() throws', () => {
assert.throws(() =>
new ov.preprocess.PrePostProcessor(model).input(0).model().setLayout('NCHW', 'extraArg'),
/Wrong number of parameters./);
});
it('model().setLayout() throws', () => {
assert.throws(() =>
new ov.preprocess.PrePostProcessor(model).input(0).model().setLayout('invalidLayout')
);
});
});
describe('OutputInfo', () => {
it('tensor()', () => {
assert.doesNotThrow(() =>
new ov.preprocess.PrePostProcessor(model).output(0).tensor());
});
it('tensor(param) throws', () => {
assert.throws(() =>
new ov.preprocess.PrePostProcessor(model).output(0).tensor(0),
/Function does not take any parameters./);
});
it('tensor().setElementType()', () => {
assert.doesNotThrow(() =>
new ov.preprocess.PrePostProcessor(model).output(0).tensor().setElementType(ov.element.u8));
});
it('tensor().setElementType() throws', () => {
assert.throws(() =>
new ov.preprocess.PrePostProcessor(model).output(0).tensor().setElementType(),
/Wrong number of parameters./);
});
it('tensor().setElementType() throws', () => {
assert.throws(() =>
new ov.preprocess.PrePostProcessor(model).output(0).tensor().setElementType('invalidType'),
/Cannot create ov::element::Type/);
});
it('tensor().setLayout()', () => {
assert.doesNotThrow(() =>
new ov.preprocess.PrePostProcessor(model).output(0).tensor().setLayout('NHWC'));
});
it('tensor().setLayout() throws', () => {
assert.throws(() =>
new ov.preprocess.PrePostProcessor(model).output(0).tensor().setLayout(),
/Wrong number of parameters./);
});
});

View File

@ -0,0 +1,58 @@
// -*- coding: utf-8 -*-
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
const fs = require('node:fs');
const { addon: ov } = require('..');
const assert = require('assert');
const { describe, it } = require('node:test');
const { getModelPath } = require('./utils.js');
const { xml: modelPath, bin: weightsPath } = getModelPath();
const modelFile = fs.readFileSync(modelPath);
const weightsFile = fs.readFileSync(weightsPath);
const core = new ov.Core();
describe('Core.readModeSync', () => {
it('readModeSync(xmlPath) ', () => {
const model = core.readModelSync(modelPath);
assert.ok(model instanceof ov.Model);
assert.equal(model.inputs.length, 1);
});
it('readModeSync(xmlPath, weightsPath) ', () => {
const model = core.readModelSync(modelPath, weightsPath);
assert.ok(model instanceof ov.Model);
assert.equal(model.inputs.length, 1);
});
it('readModeSync(modelUint8ArrayBuffer, weightsUint8ArrayBuffer) ', () => {
const model = core.readModelSync(
new Uint8Array(modelFile.buffer),
new Uint8Array(weightsFile.buffer),
);
assert.ok(model instanceof ov.Model);
assert.equal(model.inputs.length, 1);
});
});
describe('Core.readModel', () => {
it('readModel(xmlPath) ', async () => {
const model = await core.readModel(modelPath);
assert.equal(model.inputs.length, 1);
});
it('readModel(xmlPath, weightsPath) ', async () => {
const model = await core.readModel(modelPath, weightsPath);
assert.equal(model.inputs.length, 1);
});
it('readMode(modelUint8ArrayBuffer, weightsUint8ArrayBuffer) ', async () => {
const model = await core.readModel(
new Uint8Array(modelFile.buffer),
new Uint8Array(weightsFile.buffer),
);
assert.equal(model.inputs.length, 1);
});
});

View File

@ -0,0 +1,138 @@
// -*- coding: utf-8 -*-
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
const { addon: ov } = require('..');
const assert = require('assert');
const { test, describe, it } = require('node:test');
const getRandomBigInt = require('random-bigint');
const shape = [1, 3, 224, 224];
const elemNum = 1 * 3 * 224 * 224;
const data = Float32Array.from({ length: elemNum }, () => Math.random() );
const params = [
[ov.element.i8, 'i8', Int8Array.from({ length: elemNum }, () => Math.random() )],
[ov.element.u8, 'u8', Uint8Array.from({ length: elemNum }, () => Math.random() )],
[ov.element.i16, 'i16', Int16Array.from({ length: elemNum }, () => Math.random() )],
[ov.element.u16, 'u16', Uint16Array.from({ length: elemNum }, () => Math.random() )],
[ov.element.i32, 'i32', Int32Array.from({ length: elemNum }, () => Math.random() )],
[ov.element.u32, 'u32', Uint32Array.from({ length: elemNum }, () => Math.random() )],
[ov.element.f32, 'f32', Float32Array.from({ length: elemNum }, () => Math.random() )],
[ov.element.f64, 'f64', Float64Array.from({ length: elemNum }, () => Math.random() )],
[ov.element.i64, 'i64', BigInt64Array.from({ length: elemNum }, () => getRandomBigInt(10) )],
[ov.element.u64, 'u64', BigUint64Array.from({ length: elemNum }, () => getRandomBigInt(10) )],
];
test('Test for number of arguments in tensor', () => {
assert.throws( () => new ov.Tensor(ov.element.f32),
{message: 'Invalid number of arguments for Tensor constructor.'});
});
describe('Tensor without data parameters', () => {
it('Tensor should have array with zeros and numbers of elements according to the shape', () => {
const tensor = new ov.Tensor(ov.element.f32, shape);
assert.strictEqual(tensor.data.length, elemNum);
});
});
describe('Tensor data', () => {
params.forEach(([type, stringType, data]) => {
it(`Set tensor data with ${stringType} element type`, () => {
const tensor = new ov.Tensor(type, shape, data);
assert.deepStrictEqual(tensor.data, data);
});
});
it('Test tensor getData()', () => {
const tensor = new ov.Tensor(ov.element.f32, shape, data);
assert.deepStrictEqual(tensor.getData(), data);
});
it('Set tensor data with Float32Array created from ArrayBuffer', () => {
const size = elemNum * 4;
const buffer = new ArrayBuffer(size);
const view = new Float32Array(buffer);
view.set(data);
const tensor = new ov.Tensor(ov.element.f32, shape, view);
assert.deepStrictEqual(tensor.data, data);
});
it('Set tensor data with too big Float32Array', () => {
const size = elemNum * 8;
const buffer = new ArrayBuffer(size);
const view = new Float32Array(buffer);
view.set(data);
assert.throws( () => new ov.Tensor(ov.element.f32, shape, view),
{message: /Memory allocated using shape and element::type mismatch/});
});
it('Third argument of a tensor cannot be an ArrayBuffer', () => {
assert.throws(
() => new ov.Tensor(ov.element.f32, shape, new ArrayBuffer(1234)),
{message: 'Third argument of a tensor must be of type TypedArray.'});
});
it('Third argument of a tensor cannot be an array object', () => {
assert.throws(
() => new ov.Tensor(ov.element.f32, shape, [1, 2, 3, 4]),
{message: 'Third argument of a tensor must be of type TypedArray.'});
});
});
describe('Tensor shape', () => {
it('ov::Shape from an array object', () => {
const tensor = new ov.Tensor(ov.element.f32, [1, 3, 224, 224], data);
assert.deepStrictEqual(tensor.getShape(), [1, 3, 224, 224]);
});
it('ov::Shape from an array object with floating point numbers', () => {
const tensor =
new ov.Tensor(ov.element.f32, [1, 3.0, 224.8, 224.4], data);
assert.deepStrictEqual(tensor.getShape(), [1, 3, 224, 224]);
});
it('Array argument to create ov::Shape can only contain numbers', () => {
assert.throws(
() => new ov.Tensor(ov.element.f32, ['1', 3, 224, 224], data),
{message: /Passed array must contain only numbers/});
});
it('ov::Shape from TypedArray -> Int32Array', () => {
const shp = Int32Array.from([1, 224, 224, 3]);
const tensor = new ov.Tensor(ov.element.f32, shp, data);
assert.deepStrictEqual(tensor.getShape(), [1, 224, 224, 3]);
});
it('Cannot create ov::Shape from Float32Array', () => {
const shape = Float32Array.from([1, 224, 224, 3]);
assert.throws(
() => new ov.Tensor(ov.element.f32, shape, data),
/Cannot convert argument./
);
});
it('Cannot create ov::Shape from ArrayBuffer', () => {
const shape = Int32Array.from([1, 224, 224, 3]);
assert.throws(
() => new ov.Tensor(ov.element.f32, shape.buffer, data),
/Cannot convert argument./
);
});
});
describe('Tensor element type', () => {
params.forEach(([elemType, val]) => {
it(`Comparison of ov.element.${elemType} to string ${val}`, () => {
assert.strictEqual(elemType, val);
});
});
params.forEach(([elemType, , data]) => {
it(`Comparison of ov.element ${elemType} got from Tensor object`, () => {
const tensor = new ov.Tensor(elemType, shape, data);
assert.strictEqual(tensor.getElementType(), elemType);
});
});
});

View File

@ -0,0 +1,467 @@
<?xml version="1.0" ?>
<net name="test_model" version="10">
<layers>
<layer id="0" name="data" type="Parameter" version="opset1">
<data element_type="f32" shape="1,3,32,32"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>32</dim>
<dim>32</dim>
</port>
</output>
</layer>
<layer id="1" name="20/mean/Fused_Mul_614616_const" type="Const" version="opset1">
<data element_type="f32" offset="0" shape="16,3,5,5" size="4800"/>
<output>
<port id="1" precision="FP32">
<dim>16</dim>
<dim>3</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="2" name="19/WithoutBiases" type="Convolution" version="opset1">
<data dilations="1,1" output_padding="0,0" pads_begin="2,2" pads_end="2,2" strides="1,1"/>
<input>
<port id="0">
<dim>1</dim>
<dim>3</dim>
<dim>32</dim>
<dim>32</dim>
</port>
<port id="1">
<dim>16</dim>
<dim>3</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>32</dim>
<dim>32</dim>
</port>
</output>
</layer>
<layer id="3" name="data_add_575/copy_const" type="Const" version="opset1">
<data element_type="f32" offset="4800" shape="1,16,1,1" size="64"/>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="4" name="19/Fused_Add_" type="Add" version="opset1">
<input>
<port id="0">
<dim>1</dim>
<dim>16</dim>
<dim>32</dim>
<dim>32</dim>
</port>
<port id="1">
<dim>1</dim>
<dim>16</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>32</dim>
<dim>32</dim>
</port>
</output>
</layer>
<layer id="5" name="21" type="ReLU" version="opset1">
<input>
<port id="0">
<dim>1</dim>
<dim>16</dim>
<dim>32</dim>
<dim>32</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>32</dim>
<dim>32</dim>
</port>
</output>
</layer>
<layer id="6" name="22" type="MaxPool" version="opset1">
<data kernel="2,2" pads_begin="0,0" pads_end="0,0" rounding_type="floor" strides="2,2"/>
<input>
<port id="0">
<dim>1</dim>
<dim>16</dim>
<dim>32</dim>
<dim>32</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>16</dim>
<dim>16</dim>
<dim>16</dim>
</port>
</output>
</layer>
<layer id="7" name="onnx_initializer_node_8/Output_0/Data__const" type="Const" version="opset1">
<data element_type="f32" offset="4864" shape="32,16,5,5" size="51200"/>
<output>
<port id="1" precision="FP32">
<dim>32</dim>
<dim>16</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</output>
</layer>
<layer id="8" name="23/WithoutBiases" type="Convolution" version="opset1">
<data dilations="1,1" output_padding="0,0" pads_begin="2,2" pads_end="2,2" strides="1,1"/>
<input>
<port id="0">
<dim>1</dim>
<dim>16</dim>
<dim>16</dim>
<dim>16</dim>
</port>
<port id="1">
<dim>32</dim>
<dim>16</dim>
<dim>5</dim>
<dim>5</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>16</dim>
<dim>16</dim>
</port>
</output>
</layer>
<layer id="9" name="23/Dims357/copy_const" type="Const" version="opset1">
<data element_type="f32" offset="56064" shape="1,32,1,1" size="128"/>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="10" name="23" type="Add" version="opset1">
<input>
<port id="0">
<dim>1</dim>
<dim>32</dim>
<dim>16</dim>
<dim>16</dim>
</port>
<port id="1">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>16</dim>
<dim>16</dim>
</port>
</output>
</layer>
<layer id="11" name="25/mean/Fused_Mul_618620_const" type="Const" version="opset1">
<data element_type="f32" offset="56192" shape="64,32,3,3" size="73728"/>
<output>
<port id="1" precision="FP32">
<dim>64</dim>
<dim>32</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="12" name="24/WithoutBiases" type="Convolution" version="opset1">
<data dilations="1,1" output_padding="0,0" pads_begin="2,2" pads_end="2,2" strides="1,1"/>
<input>
<port id="0">
<dim>1</dim>
<dim>32</dim>
<dim>16</dim>
<dim>16</dim>
</port>
<port id="1">
<dim>64</dim>
<dim>32</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>64</dim>
<dim>18</dim>
<dim>18</dim>
</port>
</output>
</layer>
<layer id="13" name="data_add_578583/copy_const" type="Const" version="opset1">
<data element_type="f32" offset="129920" shape="1,64,1,1" size="256"/>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>64</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="14" name="24/Fused_Add_" type="Add" version="opset1">
<input>
<port id="0">
<dim>1</dim>
<dim>64</dim>
<dim>18</dim>
<dim>18</dim>
</port>
<port id="1">
<dim>1</dim>
<dim>64</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>64</dim>
<dim>18</dim>
<dim>18</dim>
</port>
</output>
</layer>
<layer id="15" name="26" type="ReLU" version="opset1">
<input>
<port id="0">
<dim>1</dim>
<dim>64</dim>
<dim>18</dim>
<dim>18</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>64</dim>
<dim>18</dim>
<dim>18</dim>
</port>
</output>
</layer>
<layer id="16" name="27" type="MaxPool" version="opset1">
<data kernel="2,2" pads_begin="0,0" pads_end="0,0" rounding_type="floor" strides="2,2"/>
<input>
<port id="0">
<dim>1</dim>
<dim>64</dim>
<dim>18</dim>
<dim>18</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>64</dim>
<dim>9</dim>
<dim>9</dim>
</port>
</output>
</layer>
<layer id="17" name="28/Reshape/Cast_1955_const" type="Const" version="opset1">
<data element_type="i64" offset="130176" shape="2" size="16"/>
<output>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="18" name="28/Reshape" type="Reshape" version="opset1">
<data special_zero="True"/>
<input>
<port id="0">
<dim>1</dim>
<dim>64</dim>
<dim>9</dim>
<dim>9</dim>
</port>
<port id="1">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>5184</dim>
</port>
</output>
</layer>
<layer id="19" name="onnx_initializer_node_17/Output_0/Data__const" type="Const" version="opset1">
<data element_type="f32" offset="130192" shape="10,5184" size="207360"/>
<output>
<port id="1" precision="FP32">
<dim>10</dim>
<dim>5184</dim>
</port>
</output>
</layer>
<layer id="20" name="29/WithoutBiases" type="MatMul" version="opset1">
<data transpose_a="0" transpose_b="1"/>
<input>
<port id="0">
<dim>1</dim>
<dim>5184</dim>
</port>
<port id="1">
<dim>10</dim>
<dim>5184</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>10</dim>
</port>
</output>
</layer>
<layer id="21" name="onnx_initializer_node_18/Output_0/Data_/copy_const" type="Const" version="opset1">
<data element_type="f32" offset="337552" shape="1,10" size="40"/>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>10</dim>
</port>
</output>
</layer>
<layer id="22" name="29" type="Add" version="opset1">
<input>
<port id="0">
<dim>1</dim>
<dim>10</dim>
</port>
<port id="1">
<dim>1</dim>
<dim>10</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>10</dim>
</port>
</output>
</layer>
<layer id="23" name="fc_out" type="SoftMax" version="opset1">
<data axis="1"/>
<input>
<port id="0">
<dim>1</dim>
<dim>10</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>10</dim>
</port>
</output>
</layer>
<layer id="24" name="fc_out/sink_port_0" type="Result" version="opset1">
<input>
<port id="0">
<dim>1</dim>
<dim>10</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
<edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
<edge from-layer="2" from-port="2" to-layer="4" to-port="0"/>
<edge from-layer="3" from-port="1" to-layer="4" to-port="1"/>
<edge from-layer="4" from-port="2" to-layer="5" to-port="0"/>
<edge from-layer="5" from-port="1" to-layer="6" to-port="0"/>
<edge from-layer="6" from-port="1" to-layer="8" to-port="0"/>
<edge from-layer="7" from-port="1" to-layer="8" to-port="1"/>
<edge from-layer="8" from-port="2" to-layer="10" to-port="0"/>
<edge from-layer="9" from-port="1" to-layer="10" to-port="1"/>
<edge from-layer="10" from-port="2" to-layer="12" to-port="0"/>
<edge from-layer="11" from-port="1" to-layer="12" to-port="1"/>
<edge from-layer="12" from-port="2" to-layer="14" to-port="0"/>
<edge from-layer="13" from-port="1" to-layer="14" to-port="1"/>
<edge from-layer="14" from-port="2" to-layer="15" to-port="0"/>
<edge from-layer="15" from-port="1" to-layer="16" to-port="0"/>
<edge from-layer="16" from-port="1" to-layer="18" to-port="0"/>
<edge from-layer="17" from-port="1" to-layer="18" to-port="1"/>
<edge from-layer="18" from-port="2" to-layer="20" to-port="0"/>
<edge from-layer="19" from-port="1" to-layer="20" to-port="1"/>
<edge from-layer="20" from-port="2" to-layer="22" to-port="0"/>
<edge from-layer="21" from-port="1" to-layer="22" to-port="1"/>
<edge from-layer="22" from-port="2" to-layer="23" to-port="0"/>
<edge from-layer="23" from-port="1" to-layer="24" to-port="0"/>
</edges>
<meta_data>
<MO_version value="unknown version"/>
<cli_parameters>
<blobs_as_inputs value="True"/>
<data_type value="FP32"/>
<disable_resnet_optimization value="False"/>
<disable_weights_compression value="False"/>
<enable_concat_optimization value="False"/>
<extensions value="DIR"/>
<framework value="onnx"/>
<freeze_placeholder_with_value value="{}"/>
<generate_deprecated_IR_V2 value="False"/>
<generate_deprecated_IR_V7 value="False"/>
<generate_experimental_IR_V10 value="True"/>
<input_model value="DIR/test_model.onnx"/>
<keep_quantize_ops_in_IR value="True"/>
<keep_shape_ops value="False"/>
<log_level value="ERROR"/>
<mean_scale_values value="{}"/>
<mean_values value="()"/>
<model_name value="test_model"/>
<move_to_preprocess value="False"/>
<output_dir value="DIR"/>
<placeholder_data_types value="{}"/>
<progress value="False"/>
<reverse_input_channels value="False"/>
<scale_values value="()"/>
<silent value="False"/>
<stream_output value="False"/>
<unset unset_cli_parameters="batch, disable_fusing, disable_gfusing, finegrain_fusing, input, input_shape, output, placeholder_shapes, scale, transformations_config"/>
</cli_parameters>
</meta_data>
</net>

View File

@ -0,0 +1,17 @@
// -*- coding: utf-8 -*-
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
const path = require('path');
module.exports = { getModelPath };
function getModelPath(isFP16=false) {
const basePath = 'tests/test_models/';
const modelName = `test_model_fp${isFP16 ? 16 : 32}`;
return {
xml: path.join(basePath, `${modelName}.xml`),
bin: path.join(basePath, `${modelName}.bin`),
};
}

View File

@ -0,0 +1,10 @@
{
"extends": "../tsconfig.json",
"include": ["lib"],
"compilerOptions": {
"module": "CommonJS",
"allowJs": true,
"outDir": "dist",
"declarationDir": "./types"
}
}

View File

@ -0,0 +1,21 @@
{
"compilerOptions": {
"module": "CommonJS",
"declaration": true,
"esModuleInterop": true,
"target": "ES2017",
"lib": ["ES2017", "dom"],
"sourceMap": true,
"noUnusedLocals": true,
"noImplicitAny": true,
"noImplicitReturns": true,
"noImplicitThis": true,
"noUnusedParameters": false,
"alwaysStrict": true,
"strictNullChecks": true,
"pretty": true,
"allowUnreachableCode": false,
"incremental": true,
"newLine": "LF"
}
}

View File

@ -200,8 +200,10 @@ if(THREADING MATCHES "^(TBB|TBB_AUTO)$" AND
set(OV_TBB_DIR_INSTALL ${OV_CPACK_RUNTIMEDIR})
# install content instead of whole directory
set(_ov_tbb_libs_path "${_ov_tbb_libs_path}/")
set(_lib_subfolder "")
else()
set(OV_TBB_DIR_INSTALL "runtime/3rdparty/tbb")
set(_lib_subfolder "lib")
endif()
install(DIRECTORY "${_ov_tbb_libs_path}"
@ -252,7 +254,8 @@ if(THREADING MATCHES "^(TBB|TBB_AUTO)$" AND
PATTERN "cmake" EXCLUDE)
endif()
set(TBB_LIB_INSTALL_DIR "${OV_TBB_DIR_INSTALL}/lib" CACHE PATH "TBB library install directory" FORCE)
set(TBB_LIB_INSTALL_DIR "${OV_TBB_DIR_INSTALL}/${lib_subfolder}" CACHE PATH "TBB library install directory" FORCE)
unset(_lib_folder)
else()
unset(TBB_LIB_INSTALL_DIR CACHE)
message(WARNING "TBB of unknown origin. TBB files are not installed")