Merge remote-tracking branch 'upstream/master' into debian-packages
This commit is contained in:
commit
f461859cfe
@ -1,24 +0,0 @@
|
||||
jobs:
|
||||
- job: Win_docker
|
||||
timeoutInMinutes: 60
|
||||
|
||||
pool:
|
||||
name: WIN_DOCKER_VMSS_VENV_F4S_WU2
|
||||
|
||||
variables:
|
||||
system.debug: true
|
||||
|
||||
steps:
|
||||
- script: |
|
||||
powershell -command "Invoke-RestMethod -Headers @{\"Metadata\"=\"true\"} -Method GET -Uri http://169.254.169.254/metadata/instance/compute?api-version=2019-06-01 | format-custom"
|
||||
where python3
|
||||
python3 --version
|
||||
where python
|
||||
python --version
|
||||
wmic computersystem get TotalPhysicalMemory
|
||||
wmic cpu list
|
||||
wmic logicaldisk get description,name
|
||||
wmic VOLUME list
|
||||
set
|
||||
displayName: 'System info'
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include "int_executable.hpp"
|
||||
|
||||
#include <cstring>
|
||||
#include <openvino/op/util/variable_context.hpp>
|
||||
|
||||
#include "evaluates_map.hpp"
|
||||
#include "ngraph/except.hpp"
|
||||
@ -88,10 +87,6 @@ bool runtime::interpreter::INTExecutable::call(const vector<shared_ptr<runtime::
|
||||
results_map[output] = output_count;
|
||||
}
|
||||
|
||||
EvaluationContext eval_context;
|
||||
ov::op::util::VariableContext variable_context;
|
||||
eval_context.emplace("VariableContext", variable_context);
|
||||
|
||||
// for each ordered op in the graph
|
||||
for (const auto& op : m_nodes) {
|
||||
if (dynamic_pointer_cast<op::Parameter>(op) != nullptr) {
|
||||
@ -148,20 +143,8 @@ bool runtime::interpreter::INTExecutable::call(const vector<shared_ptr<runtime::
|
||||
if (m_performance_counters_enabled) {
|
||||
m_timer_map[op].start();
|
||||
}
|
||||
|
||||
if (auto var_extension = std::dynamic_pointer_cast<ov::op::util::VariableExtension>(cloned_node)) {
|
||||
auto variable = var_extension->get_variable();
|
||||
if (!variable_context.get_variable_value(variable)) {
|
||||
auto h_tensor = std::make_shared<ngraph::HostTensor>(cloned_node->get_input_element_type(0),
|
||||
cloned_node->get_input_shape(0));
|
||||
std::vector<float> data(ov::shape_size(cloned_node->get_input_shape(0)), 0);
|
||||
h_tensor->write(data.data(), data.size() * sizeof(float));
|
||||
variable_context.set_variable_value(variable, std::make_shared<VariableValue>(h_tensor));
|
||||
}
|
||||
}
|
||||
|
||||
// Call evaluate for cloned_node with static shapes
|
||||
if (!cloned_node->evaluate(op_outputs, op_inputs, eval_context)) {
|
||||
if (!cloned_node->evaluate(op_outputs, op_inputs)) {
|
||||
evaluate_node(cloned_node, op_outputs, op_inputs);
|
||||
}
|
||||
if (m_performance_counters_enabled) {
|
||||
|
@ -85,7 +85,6 @@ NGRAPH_OP(NonMaxSuppression, op::v5)
|
||||
NGRAPH_OP(RNNSequence, op::v5)
|
||||
NGRAPH_OP(Round, op::v5)
|
||||
|
||||
NGRAPH_OP(Assign, ngraph::op::v6)
|
||||
NGRAPH_OP(CTCGreedyDecoderSeqLen, op::v6)
|
||||
NGRAPH_OP(ExperimentalDetectronDetectionOutput, op::v6)
|
||||
NGRAPH_OP(ExperimentalDetectronGenerateProposalsSingleImage, op::v6)
|
||||
@ -94,7 +93,6 @@ NGRAPH_OP(ExperimentalDetectronROIFeatureExtractor, op::v6)
|
||||
NGRAPH_OP(ExperimentalDetectronTopKROIs, op::v6)
|
||||
NGRAPH_OP(GatherElements, op::v6)
|
||||
NGRAPH_OP(MVN, ngraph::op::v6)
|
||||
NGRAPH_OP(ReadValue, ngraph::op::v6)
|
||||
|
||||
NGRAPH_OP(DFT, op::v7)
|
||||
NGRAPH_OP(Einsum, op::v7)
|
||||
|
4
inference-engine/thirdparty/CMakeLists.txt
vendored
4
inference-engine/thirdparty/CMakeLists.txt
vendored
@ -16,7 +16,3 @@ endif()
|
||||
if(ENABLE_LTO)
|
||||
set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE ON)
|
||||
endif()
|
||||
|
||||
if (ENABLE_INTEL_GPU)
|
||||
add_subdirectory(clDNN)
|
||||
endif()
|
||||
|
26
inference-engine/thirdparty/clDNN/CMakeLists.txt
vendored
26
inference-engine/thirdparty/clDNN/CMakeLists.txt
vendored
@ -1,26 +0,0 @@
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
if(ENABLE_GPU_DEBUG_CAPS)
|
||||
add_definitions(-DGPU_DEBUG_CONFIG=1)
|
||||
endif()
|
||||
|
||||
set(MAIN_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
|
||||
set(INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/api")
|
||||
|
||||
find_package(PythonInterp 3 QUIET)
|
||||
if(NOT PYTHONINTERP_FOUND)
|
||||
message(WARNING "[clDNN] Project requires Python 3.x interpreter to build (with python loader). CMake could not detect it correctly.
|
||||
If you have installed this interpreter, please disregard this warning or specify PYTHON_EXECUTABLE in CMake command-line."
|
||||
)
|
||||
endif()
|
||||
|
||||
add_subdirectory(runtime)
|
||||
add_subdirectory(kernel_selector)
|
||||
add_subdirectory(src)
|
||||
add_subdirectory(utils)
|
||||
|
||||
if(ENABLE_TESTS)
|
||||
add_subdirectory(tests)
|
||||
endif()
|
@ -1,27 +0,0 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ngraph/ngraph.hpp>
|
||||
#include "layer_transformation.hpp"
|
||||
|
||||
namespace ngraph {
|
||||
namespace pass {
|
||||
namespace low_precision {
|
||||
|
||||
class LP_TRANSFORMATIONS_API AssignAndReadValueTransformation : public LayerTransformation {
|
||||
public:
|
||||
NGRAPH_RTTI_DECLARATION;
|
||||
AssignAndReadValueTransformation(const std::shared_ptr<ngraph::Function> function, const Params& params = Params());
|
||||
bool transform(TransformationContext& context, ngraph::pattern::Matcher& m) override;
|
||||
bool canBeTransformed(const TransformationContext& context, std::shared_ptr<Node> op) const override;
|
||||
bool isPrecisionPreserved(std::shared_ptr<Node> layer) const noexcept override;
|
||||
private:
|
||||
std::shared_ptr<ngraph::Function> function;
|
||||
};
|
||||
|
||||
} // namespace low_precision
|
||||
} // namespace pass
|
||||
} // namespace ngraph
|
@ -33,7 +33,6 @@ public:
|
||||
bool multiplyHasZeroOrDenormal() const;
|
||||
bool isShared() const;
|
||||
bool isLowPrecision() const;
|
||||
std::shared_ptr<Node> copyWithNewInput(const std::shared_ptr<Node>& input) const;
|
||||
|
||||
static bool checkElementwise(const std::shared_ptr<ngraph::Node>& elementwise);
|
||||
|
||||
|
@ -22,10 +22,11 @@ public:
|
||||
|
||||
static bool checkElementwise(const std::shared_ptr<Node>& eltwise);
|
||||
|
||||
static std::shared_ptr<opset1::FakeQuantize> fuseElementwise(
|
||||
private:
|
||||
std::shared_ptr<opset1::FakeQuantize> fuseElementwise(
|
||||
TransformationContext& context,
|
||||
MatcherPass* matcherPass,
|
||||
const std::shared_ptr<opset1::FakeQuantize>& fakeQuantize);
|
||||
const std::shared_ptr<opset1::FakeQuantize>& fakeQuantize) const;
|
||||
};
|
||||
|
||||
} // namespace low_precision
|
||||
|
@ -41,12 +41,17 @@ public:
|
||||
|
||||
static std::vector<Input<Node>> consumer_inputs(std::shared_ptr<Node> node);
|
||||
|
||||
// returns true if at least one child is not FQ
|
||||
static bool notAllChildrensAreFQ(const NodeVector& layer);
|
||||
|
||||
// Collect and return a vector with all nodes that consumes any of the `node` output
|
||||
static std::vector<std::shared_ptr<Node>> consumers(std::shared_ptr<Node> node);
|
||||
|
||||
// return true if op is on a constant path
|
||||
static bool isConstantPath(const std::shared_ptr<Node>& op);
|
||||
|
||||
static Shape alignShapeForChannelDim(const Shape& shape, Rank rank);
|
||||
|
||||
template <typename OperationType>
|
||||
static std::shared_ptr<Node> setOutDataPrecisionForTypeRelaxed(std::shared_ptr<OperationType> operation, const element::Type& precision);
|
||||
|
||||
@ -210,6 +215,87 @@ public:
|
||||
const std::shared_ptr<Node>& dequantization,
|
||||
const std::shared_ptr<Node>& newNode);
|
||||
|
||||
static void replaceAttributeInNodes(
|
||||
std::shared_ptr<ngraph::Function> f,
|
||||
const std::string& name,
|
||||
const ov::Any& newAttribute,
|
||||
const ov::Any& oldAttribute,
|
||||
const std::shared_ptr<ngraph::Node>& initialNode) {
|
||||
std::set<std::shared_ptr<Node>> visited;
|
||||
std::deque<std::shared_ptr<Node>> nodes;
|
||||
nodes.emplace_back(initialNode);
|
||||
|
||||
while (!nodes.empty()) {
|
||||
auto node = nodes.front();
|
||||
nodes.pop_front();
|
||||
|
||||
if (visited.count(node) || ov::is_type<op::Constant>(node)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
visited.insert(node);
|
||||
|
||||
bool handleConnectedNodes = false;
|
||||
if (NetworkHelper::isPrecisionPreserved(node) || ov::is_type<opset1::FakeQuantize>(node)) {
|
||||
auto& rt = node->get_rt_info();
|
||||
|
||||
if (node == initialNode) {
|
||||
rt[name] = newAttribute;
|
||||
handleConnectedNodes = true;
|
||||
} else {
|
||||
auto it = rt.find(name);
|
||||
if (it != rt.end()) {
|
||||
const auto currentAttribute = it->second;
|
||||
if (oldAttribute == currentAttribute) {
|
||||
rt[name] = newAttribute;
|
||||
}
|
||||
handleConnectedNodes = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!handleConnectedNodes) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!ov::is_type<opset1::FakeQuantize>(node)) {
|
||||
for (size_t index = 0ul; index < node->get_input_size(); ++index) {
|
||||
auto getInput = [](const std::shared_ptr<ngraph::Node>& node, const size_t index) {
|
||||
const auto dequantization = NetworkHelper::getDequantization(node, index);
|
||||
if (!dequantization.empty() &&
|
||||
(ov::is_type<opset1::Convert>(dequantization.data.get_node())) &&
|
||||
ov::is_type<opset1::FakeQuantize>(dequantization.data.get_node()->get_input_node_ptr(0))) {
|
||||
const auto input = dequantization.data.get_node()->input(0);
|
||||
return input;
|
||||
}
|
||||
return node->input(index);
|
||||
};
|
||||
|
||||
const auto& input = getInput(node, index);
|
||||
const auto& input_node = input.get_source_output().get_node_shared_ptr();
|
||||
|
||||
//const auto& input_node = input.get_source_output().get_node_shared_ptr();
|
||||
if (visited.count(input_node) || ov::is_type<op::Constant>(input_node)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
nodes.push_front(input_node);
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& output : node->outputs()) {
|
||||
for (auto& input_value : output.get_target_inputs()) {
|
||||
const auto& output_node = input_value.get_node()->shared_from_this();
|
||||
if (visited.count(output_node) || ov::is_type<op::Constant>(output_node)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
nodes.push_front(output_node);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename SharedAttribute>
|
||||
static void reassign(
|
||||
const std::shared_ptr<typename SharedAttribute::SharedValueAttribute::SharedValue>& sharedValue,
|
||||
@ -284,6 +370,14 @@ std::shared_ptr<Node> make_op_pattern(const ngraph::NodeVector& args) {
|
||||
return std::make_shared<ngraph::pattern::op::Any>(element::undefined, PartialShape{}, [](std::shared_ptr<Node> n) {return !!ov::as_type_ptr<T>(n); }, args);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::shared_ptr<Node> make_op_label() {
|
||||
return std::make_shared<ngraph::pattern::op::Label>(
|
||||
element::undefined,
|
||||
PartialShape{},
|
||||
[](std::shared_ptr<Node> n) {return !!ov::as_type_ptr<T>(n); });
|
||||
}
|
||||
|
||||
template <typename T, typename... Args>
|
||||
std::shared_ptr<Node> fold(Args&&... args) {
|
||||
auto node = std::make_shared<T>(std::forward<Args>(args)...);
|
||||
|
@ -1,132 +0,0 @@
|
||||
// Copyright (C) 2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "low_precision/assign_and_read_value.hpp"
|
||||
#include <ngraph/ngraph.hpp>
|
||||
|
||||
#include <ngraph/pattern/op/wrap_type.hpp>
|
||||
#include "low_precision/network_helper.hpp"
|
||||
#include <ngraph/opsets/opset6.hpp>
|
||||
#include <ngraph/pattern/op/or.hpp>
|
||||
#include <openvino/op/util/assign_base.hpp>
|
||||
#include "low_precision/fake_quantize.hpp"
|
||||
|
||||
namespace ngraph {
|
||||
namespace pass {
|
||||
namespace low_precision {
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(ngraph::pass::low_precision::AssignAndReadValueTransformation, "AssignAndReadValueTransformation", 0);
|
||||
|
||||
AssignAndReadValueTransformation::AssignAndReadValueTransformation(const std::shared_ptr<ngraph::Function> function, const Params& params) :
|
||||
LayerTransformation(params), function(function) {
|
||||
auto assign3 = pattern::wrap_type<opset3::Assign>({ pattern::wrap_type<opset1::Multiply>() });
|
||||
auto assign6 = pattern::wrap_type<opset6::Assign>({ pattern::wrap_type<opset1::Multiply>() });
|
||||
|
||||
ngraph::graph_rewrite_callback callback = [=](pattern::Matcher& m) {
|
||||
const auto& opsMap = m.get_pattern_value_map();
|
||||
auto op = m.get_match_root();
|
||||
auto assignIt = opsMap.find(assign3);
|
||||
if (assignIt == opsMap.end()) {
|
||||
assignIt = opsMap.find(assign6);
|
||||
}
|
||||
const auto assign = assignIt->second.get_node_shared_ptr();
|
||||
// check that we have ReadValue as the first dependency
|
||||
if (assign->get_control_dependencies().empty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (transformation_callback(op)) {
|
||||
return false;
|
||||
}
|
||||
return transform(*context, m);
|
||||
};
|
||||
|
||||
auto m = std::make_shared<ngraph::pattern::Matcher>(
|
||||
std::make_shared<pattern::op::Or>(OutputVector{ assign3, assign6 }),
|
||||
"AssignAndReadValueTransformation");
|
||||
this->register_matcher(m, callback);
|
||||
}
|
||||
|
||||
bool AssignAndReadValueTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher& m) {
|
||||
if (!canBeTransformed(context, m.get_match_root())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto oldAssign = m.get_match_root();
|
||||
const auto readValue = oldAssign->get_control_dependencies()[0];
|
||||
oldAssign->remove_control_dependency(readValue);
|
||||
|
||||
const auto assign = NetworkHelper::separateInStandaloneBranch(oldAssign);
|
||||
const auto dequantization = NetworkHelper::getDequantization(assign);
|
||||
|
||||
auto oldVar = ov::as_type_ptr<op::ReadValueBase>(readValue)->get_variable();
|
||||
auto variableInfo = oldVar->get_info();
|
||||
// set new precision for oldVar to update precision in newReadValue
|
||||
oldVar->update({variableInfo.data_shape, dequantization.data.get_element_type(), variableInfo.variable_id});
|
||||
// transform ReadValue part
|
||||
const auto newConstant = foldConvert(readValue->get_input_node_shared_ptr(0), dequantization.data.get_element_type());
|
||||
const auto newReadValue = readValue->copy_with_new_inputs({newConstant});
|
||||
const auto newDequantization = dequantization.copyWithNewInput(newReadValue);
|
||||
replace_node(readValue, newDequantization);
|
||||
|
||||
// transform Assign part
|
||||
|
||||
const auto newAssign = assign->copy_with_new_inputs({dequantization.data});
|
||||
function->remove_sink(as_type_ptr<op::Sink>(oldAssign));
|
||||
function->add_sinks({as_type_ptr<op::Sink>(newAssign)});
|
||||
|
||||
NetworkHelper::copyInfo(assign, newAssign);
|
||||
replace_node(assign, newAssign);
|
||||
newAssign->add_control_dependency(newReadValue);
|
||||
|
||||
// fuse dequantization multiply with FQ after ReadValue if possible
|
||||
const auto nextLayers = newDequantization->get_output_target_inputs(0);
|
||||
if (nextLayers.size() > 1) {
|
||||
return true;
|
||||
}
|
||||
const auto fakeQuantize = as_type_ptr<opset1::FakeQuantize>(nextLayers.begin()->get_node()->shared_from_this());
|
||||
|
||||
if (fakeQuantize == nullptr) {
|
||||
return true;
|
||||
}
|
||||
auto fakeQuantizeInputs = fakeQuantize->input_values();
|
||||
|
||||
const auto inputLow = as_type_ptr<opset1::Constant>(fakeQuantizeInputs[1].get_node_shared_ptr());
|
||||
const auto inputHigh = as_type_ptr<opset1::Constant>(fakeQuantizeInputs[2].get_node_shared_ptr());
|
||||
|
||||
if (inputLow == nullptr || inputHigh == nullptr) {
|
||||
return true;
|
||||
}
|
||||
|
||||
FakeQuantizeTransformation::fuseElementwise(context, this, fakeQuantize);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AssignAndReadValueTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr<Node> op) const {
|
||||
if (!LayerTransformation::canBeTransformed(context, op)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto readValue = std::dynamic_pointer_cast<op::ReadValueBase>(op->get_control_dependencies()[0]);
|
||||
if (!readValue) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// TODO: remove this limitation and change the transformation when this constant will be accepted to be non-zero
|
||||
if (!NetworkHelper::isZeroConst(readValue->get_input_node_shared_ptr(0))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto dequantization = NetworkHelper::getDequantization(op);
|
||||
return dequantization.subtract == nullptr && dequantization.multiply != nullptr;
|
||||
}
|
||||
|
||||
bool AssignAndReadValueTransformation::isPrecisionPreserved(std::shared_ptr<Node> layer) const noexcept {
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace low_precision
|
||||
} // namespace pass
|
||||
} // namespace ngraph
|
@ -129,15 +129,17 @@ bool FakeQuantizeTransformation::checkElementwise(const std::shared_ptr<Node>& e
|
||||
std::shared_ptr<opset1::FakeQuantize> FakeQuantizeTransformation::fuseElementwise(
|
||||
TransformationContext& context,
|
||||
MatcherPass* matcherPass,
|
||||
const std::shared_ptr<opset1::FakeQuantize>& fakeQuantize) {
|
||||
const std::shared_ptr<opset1::FakeQuantize>& fakeQuantize) const {
|
||||
const std::shared_ptr<Node> eltwise = fakeQuantize->get_input_node_shared_ptr(0);
|
||||
|
||||
std::shared_ptr<Node> inputLowConst_f32 = foldConvert(fakeQuantize->input_value(1), element::f32);
|
||||
std::shared_ptr<Node> inputHighConst_f32 = foldConvert(fakeQuantize->input_value(2), element::f32);
|
||||
std::shared_ptr<Node> inputLowConst_f32 = foldConvert(fakeQuantize->input_value(1), deqPrecision);
|
||||
std::shared_ptr<Node> inputHighConst_f32 = foldConvert(fakeQuantize->input_value(2), deqPrecision);
|
||||
|
||||
std::shared_ptr<opset1::Constant> constant = fq::getConstant(eltwise);
|
||||
if (ov::is_type<opset1::Multiply>(eltwise) && checkElementwise(eltwise)) {
|
||||
const auto value = foldConvert(constant, element::f32);
|
||||
const auto value = constant->get_output_element_type(0) == deqPrecision ?
|
||||
constant :
|
||||
foldConvert(constant, deqPrecision);
|
||||
|
||||
const auto valueVec = ov::as_type_ptr<opset1::Constant>(value)->cast_vector<float>();
|
||||
|
||||
@ -157,7 +159,9 @@ std::shared_ptr<opset1::FakeQuantize> FakeQuantizeTransformation::fuseElementwis
|
||||
inputLowConst_f32 = fq::updateShape(inputLowConst_f32, fakeQuantize->get_output_partial_shape(0));
|
||||
inputHighConst_f32 = fq::updateShape(inputHighConst_f32, fakeQuantize->get_output_partial_shape(0));
|
||||
} else if (ov::is_type<opset1::Subtract>(eltwise) && checkElementwise(eltwise)) {
|
||||
const auto value = foldConvert(constant, element::f32);
|
||||
const auto value = constant->get_output_element_type(0) == deqPrecision ?
|
||||
constant :
|
||||
foldConvert(constant, deqPrecision);
|
||||
|
||||
inputLowConst_f32 = fq::updateShape(fold<opset1::Add>(inputLowConst_f32, value), fakeQuantize->get_output_partial_shape(0));
|
||||
inputHighConst_f32 = fq::updateShape(fold<opset1::Add>(inputHighConst_f32, value), fakeQuantize->get_output_partial_shape(0));
|
||||
@ -169,7 +173,9 @@ std::shared_ptr<opset1::FakeQuantize> FakeQuantizeTransformation::fuseElementwis
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const auto value = foldConvert(constant, element::f32);
|
||||
const auto value = constant->get_output_element_type(0) == deqPrecision ?
|
||||
constant :
|
||||
foldConvert(constant, deqPrecision);
|
||||
|
||||
inputLowConst_f32 = fq::updateShape(fold<opset1::Subtract>(inputLowConst_f32, value), fakeQuantize->get_output_partial_shape(0));
|
||||
inputHighConst_f32 = fq::updateShape(fold<opset1::Subtract>(inputHighConst_f32, value), fakeQuantize->get_output_partial_shape(0));
|
||||
@ -190,8 +196,8 @@ std::shared_ptr<opset1::FakeQuantize> FakeQuantizeTransformation::fuseElementwis
|
||||
data->output(outputIdx),
|
||||
inputLowConst_f32,
|
||||
inputHighConst_f32,
|
||||
foldConvert(fakeQuantize->input_value(3), element::f32),
|
||||
foldConvert(fakeQuantize->input_value(4), element::f32) }));
|
||||
foldConvert(fakeQuantize->input_value(3), deqPrecision),
|
||||
foldConvert(fakeQuantize->input_value(4), deqPrecision) }));
|
||||
|
||||
matcherPass->register_new_node(newFakeQuantize);
|
||||
|
||||
|
@ -155,26 +155,6 @@ bool FakeQuantizeDequantization::checkElementwise(const std::shared_ptr<ngraph::
|
||||
return true;
|
||||
}
|
||||
|
||||
std::shared_ptr<Node> FakeQuantizeDequantization::copyWithNewInput(const std::shared_ptr<Node>& input) const {
|
||||
auto lastNode = input;
|
||||
if (convert) {
|
||||
lastNode = convert->copy_with_new_inputs({lastNode});
|
||||
}
|
||||
if (subtract) {
|
||||
std::shared_ptr<Node> input1 = nullptr;
|
||||
if (subtractConvert) {
|
||||
input1 = subtractConvert;
|
||||
} else {
|
||||
input1 = subtractConstant;
|
||||
}
|
||||
lastNode = subtract->copy_with_new_inputs({lastNode, input1});
|
||||
}
|
||||
if (multiply) {
|
||||
lastNode = multiply->copy_with_new_inputs({lastNode, multiplyConstant});
|
||||
}
|
||||
return lastNode;
|
||||
}
|
||||
|
||||
int FakeQuantizeDequantization::fillDequantizationParams(
|
||||
const std::shared_ptr<ngraph::Node>& elementwise,
|
||||
std::shared_ptr<ngraph::opset1::Convert>& convert,
|
||||
|
@ -38,7 +38,6 @@
|
||||
|
||||
// general transformations
|
||||
#include "low_precision/add.hpp"
|
||||
#include "low_precision/assign_and_read_value.hpp"
|
||||
#include "low_precision/avg_pool.hpp"
|
||||
#include "low_precision/clamp.hpp"
|
||||
#include "low_precision/convolution.hpp"
|
||||
@ -208,7 +207,6 @@ bool ngraph::pass::low_precision::LowPrecision::run_on_model(const std::shared_p
|
||||
|
||||
std::shared_ptr<ngraph::pass::GraphRewrite> common = manager.register_pass<ngraph::pass::GraphRewrite>();
|
||||
common->add_matcher<ngraph::pass::low_precision::AddTransformation>(params);
|
||||
common->add_matcher<ngraph::pass::low_precision::AssignAndReadValueTransformation>(f, params);
|
||||
common->add_matcher<ngraph::pass::low_precision::AvgPoolTransformation>(params);
|
||||
common->add_matcher<ngraph::pass::low_precision::ClampTransformation>(params);
|
||||
common->add_matcher<ngraph::pass::low_precision::ConcatTransformation>(params);
|
||||
|
@ -37,6 +37,17 @@ bool NetworkHelper::is_castable_to_one_of(NodeTypeInfo type, const std::unordere
|
||||
return false;
|
||||
}
|
||||
|
||||
bool NetworkHelper::notAllChildrensAreFQ(const NodeVector& childrens) {
|
||||
// NOTE: This check was added for models that don't have FQ after AvgPool
|
||||
// They will have transparent precision as it was in old LPT.
|
||||
for (const auto& child : childrens) {
|
||||
if (!ov::is_type<opset1::FakeQuantize>(child)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Collect and return a vector with all nodes that consumes any of the `node` output
|
||||
std::vector<Input<Node>> NetworkHelper::consumer_inputs(std::shared_ptr<Node> node) {
|
||||
std::vector<Input<Node>> result;
|
||||
@ -188,6 +199,15 @@ size_t NetworkHelper::getGroupsCount(std::shared_ptr<Node> layer) {
|
||||
}
|
||||
}
|
||||
|
||||
// Assumin tensor in NC... layout, append necessary number of 1s to shape to align it to a give rank
|
||||
Shape NetworkHelper::alignShapeForChannelDim(const Shape& shape, Rank rank) {
|
||||
assert(shape.size() == 1);
|
||||
assert(rank.is_static());
|
||||
Shape result = shape;
|
||||
result.resize(rank.get_length() - 1, 1);
|
||||
return result;
|
||||
}
|
||||
|
||||
void NetworkHelper::removeLayer(std::shared_ptr<Node> layer) {
|
||||
ngraph::replace_output_update_name(layer->output(0), layer->input_value(0));
|
||||
}
|
||||
@ -1339,12 +1359,9 @@ FakeQuantizeDequantization NetworkHelper::getDequantization(const std::shared_pt
|
||||
|
||||
const std::shared_ptr<opset1::Convert> convert = ov::as_type_ptr<opset1::Convert>(dataNode.get_node_shared_ptr());
|
||||
if (convert != nullptr) {
|
||||
auto defaultPrecisions = LayerTransformation::getDefaultPrecisions();
|
||||
auto el_type = convert->input(0).get_element_type();
|
||||
auto foundIt = std::find(defaultPrecisions.begin(), defaultPrecisions.end(), el_type);
|
||||
if (foundIt == defaultPrecisions.end() &&
|
||||
el_type != element::i4 && el_type != element::u4 &&
|
||||
el_type != element::f32 && el_type != element::f16) {
|
||||
if ((convert->input(0).get_element_type() != element::i8) && (convert->input(0).get_element_type() != element::u8) &&
|
||||
(convert->input(0).get_element_type() != element::i4) && (convert->input(0).get_element_type() != element::u4) &&
|
||||
(convert->output(0).get_element_type() != element::f32)) {
|
||||
return FakeQuantizeDequantization(dataNode, nullptr, subtract, subtractConvert, subtractConstant, multiply, multiplyConstant);
|
||||
}
|
||||
dataNode = convert->get_input_source_output(0);
|
||||
|
@ -5,7 +5,7 @@
|
||||
set (TARGET_NAME "ov_intel_gpu_plugin")
|
||||
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
ie_add_compiler_flags(-Wno-all)
|
||||
ie_add_compiler_flags(-Wno-all -Wno-missing-declarations)
|
||||
ie_add_compiler_flags(-msse4.1 -msse4.2)
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0)
|
||||
ie_add_compiler_flags(-Wno-error=terminate)
|
||||
@ -16,6 +16,14 @@ if(ENABLE_GPU_DEBUG_CAPS)
|
||||
add_definitions(-DGPU_DEBUG_CONFIG=1)
|
||||
endif()
|
||||
|
||||
set(MAIN_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
|
||||
set(INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include")
|
||||
|
||||
add_subdirectory(src/runtime)
|
||||
add_subdirectory(src/kernel_selector)
|
||||
add_subdirectory(src/graph)
|
||||
add_subdirectory(thirdparty)
|
||||
|
||||
file(GLOB_RECURSE PLUGIN_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/src/plugin/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/include/intel_gpu/plugin/*.hpp)
|
||||
|
||||
addVersionDefines(src/plugin/plugin.cpp CI_BUILD_NUMBER)
|
||||
@ -41,5 +49,9 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set_target_properties(${TARGET_NAME} PROPERTIES LINK_FLAGS_RELEASE "-Wno-error=maybe-uninitialized -Wno-maybe-uninitialized")
|
||||
endif()
|
||||
|
||||
if(ENABLE_TESTS)
|
||||
add_subdirectory(tests)
|
||||
endif()
|
||||
|
||||
# Failed because of OpenCL
|
||||
# ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME})
|
||||
|
@ -1,21 +1,6 @@
|
||||
|
||||
# Compute Library for Deep Neural Networks (clDNN)
|
||||
[](LICENSE)
|
||||

|
||||
|
||||
*Compute Library for Deep Neural Networks* (*clDNN*) is an open source performance
|
||||
library for Deep Learning (DL) applications intended for acceleration of
|
||||
DL Inference on Intel® Processor Graphics – including HD Graphics and
|
||||
Iris® Graphics.
|
||||
*clDNN* includes highly optimized building blocks for implementation of
|
||||
convolutional neural networks (CNN) with C++ interface.
|
||||
|
||||
## License
|
||||
clDNN is licensed is licensed under
|
||||
[Apache License Version 2.0](http://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
||||
### Attached licenses
|
||||
clDNN uses 3<sup>rd</sup>-party components licensed under following licenses:
|
||||
GPU plugin uses 3<sup>rd</sup>-party components licensed under following licenses:
|
||||
- *googletest* under [Google\* License](https://github.com/google/googletest/blob/master/googletest/LICENSE)
|
||||
- *OpenCL™ ICD and C++ Wrapper* under [Khronos™ License](https://github.com/KhronosGroup/OpenCL-CLHPP/blob/master/LICENSE.txt)
|
||||
- *RapidJSON* under [Tencent\* License](https://github.com/Tencent/rapidjson/blob/master/license.txt)
|
||||
@ -25,7 +10,7 @@ Please report issues and suggestions
|
||||
[GitHub issues](https://github.com/openvinotoolkit/openvino/issues).
|
||||
|
||||
## How to Contribute
|
||||
We welcome community contributions to clDNN. If you have an idea how to improve the library:
|
||||
We welcome community contributions to GPU plugin. If you have an idea how to improve the library:
|
||||
|
||||
- Share your proposal via
|
||||
[GitHub issues](https://github.com/openvinotoolkit/openvino/issues)
|
||||
@ -38,11 +23,11 @@ are necessary, may provide feedback to guide you. When accepted, your pull
|
||||
request will be merged into our GitHub repository.
|
||||
|
||||
## System Requirements
|
||||
clDNN supports Intel® HD Graphics and Intel® Iris® Graphics and is optimized for Gen9-Gen12LP architectures
|
||||
GPU plugin supports Intel® HD Graphics and Intel® Iris® Graphics and is optimized for Gen9-Gen12LP architectures
|
||||
|
||||
clDNN currently uses OpenCL™ with multiple Intel OpenCL™ extensions and requires Intel® Graphics Driver to run.
|
||||
GPU plugin currently uses OpenCL™ with multiple Intel OpenCL™ extensions and requires Intel® Graphics Driver to run.
|
||||
|
||||
clDNN requires CPU with Intel® SSE/Intel® AVX support.
|
||||
GPU plugin requires CPU with Intel® SSE/Intel® AVX support.
|
||||
|
||||
---
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user