diff --git a/docs/conf.py b/docs/conf.py index be9dc1caed1..97446abbd48 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -164,12 +164,18 @@ def autodoc_skip_member(app, what, name, obj, skip, options): return name in exclude_pyapi_methods +shutil.copy("../../../docs/home.rst",".") + +def replace_index_with_redirect(app,exception): + shutil.copy("../../../docs/index.html","../_build/index.html") + def setup(app): logger = logging.getLogger(__name__) app.add_config_value('doxygen_mapping_file', doxygen_mapping_file, rebuild=True) app.add_config_value('repositories', repositories, rebuild=True) app.connect('autodoc-skip-member', autodoc_skip_member) + app.connect('build-finished',replace_index_with_redirect) app.add_js_file('js/custom.js') app.add_js_file('js/graphs.js') app.add_js_file('js/graphs_ov_tf.js') diff --git a/docs/home.rst b/docs/home.rst new file mode 100644 index 00000000000..d25f9583203 --- /dev/null +++ b/docs/home.rst @@ -0,0 +1,125 @@ +.. OpenVINO Toolkit documentation master file, created by + sphinx-quickstart on Wed Jul 7 10:46:56 2021. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +.. meta:: + :google-site-verification: _YqumYQ98cmXUTwtzM_0WIIadtDc6r_TMYGbmGgNvrk + +OpenVINO™ Documentation +======================= + +.. raw:: html + +
+ OpenVINO™ is an open-source toolkit for optimizing and deploying AI inference. +
Check the full range of supported hardware in the
+ Supported Devices page and see how it stacks up in our
+ Performance Benchmarks page.
+ Supports deployment on Windows, Linux, and macOS.
+
OpenVINO Workflow
+Want to know more?
+Learn how to download, install, and configure OpenVINO.
+ + +Browse through over 200 publicly available neural networks and pick the right one for your solution.
+ + +Learn how to convert your model and optimize it for use with OpenVINO.
+ + +Learn how to use OpenVINO based on our training material.
+ + +Try OpenVINO using ready-made applications explaining various use cases.
+ + +Learn about the alternative, web-based version of OpenVINO. DL Workbench container installation Required.
+ + +Learn about OpenVINO's inference mechanism which executes the IR, ONNX, Paddle models on target devices.
+ + +Model-level (e.g. quantization) and Runtime (i.e. application) -level optimizations to make your inference as fast as possible.
+ + +View performance benchmark results for various models on Intel platforms.
+ +- OpenVINO™ is an open-source toolkit for optimizing and deploying AI inference. -
Check the full range of supported hardware in the
- Supported Devices page and see how it stacks up in our
- Performance Benchmarks page.
- Supports deployment on Windows, Linux, and macOS.
-
OpenVINO Workflow
-Want to know more?
-Learn how to download, install, and configure OpenVINO.
- - -Browse through over 200 publicly available neural networks and pick the right one for your solution.
- - -Learn how to convert your model and optimize it for use with OpenVINO.
- - -Learn how to use OpenVINO based on our training material.
- - -Try OpenVINO using ready-made applications explaining various use cases.
- - -Learn about the alternative, web-based version of OpenVINO. DL Workbench container installation Required.
- - -Learn about OpenVINO's inference mechanism which executes the IR, ONNX, Paddle models on target devices.
- - -Model-level (e.g. quantization) and Runtime (i.e. application) -level optimizations to make your inference as fast as possible.
- - -View performance benchmark results for various models on Intel platforms.
- -