Fix spelling errors in file names (#5776)

And similar errors in file contents.
This commit is contained in:
Roman Donchenko 2021-05-25 12:52:58 +03:00 committed by GitHub
parent 43bdf6b3c1
commit 68cadf1ff9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 24 additions and 24 deletions

View File

@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "execution_graph_tests/keep_assing.hpp"
#include "execution_graph_tests/keep_assign.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace ExecutionGraphTests;

View File

@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "execution_graph_tests/keep_assing.hpp"
#include "execution_graph_tests/keep_assign.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include <ngraph/ngraph.hpp>

View File

@ -53,7 +53,7 @@ JitConstants MVNKernelRef::GetJitConstants(const mvn_params& params, DispatchDat
std::string MVNKernelRef::GetKernelName(const mvn_params& params) const {
if (params.mvnMode == MVNMode::ACROSS_CHANNELS)
return kernelName + "_accross_channels";
return kernelName + "_across_channels";
else
return kernelName + "_within_channels";
}

View File

@ -6,7 +6,7 @@
#include "include/data_types.cl"
KERNEL (mvn_gpu_ref_accross_channels)(
KERNEL (mvn_gpu_ref_across_channels)(
const __global INPUT0_TYPE* input,
__global OUTPUT_TYPE* restrict output
#if HAS_FUSED_OPS_DECLS

View File

@ -3429,7 +3429,7 @@ struct mvn_test_params {
tensor elwise_size;
data_types input_type;
format input_format;
bool accross_channels;
bool across_channels;
bool normalize_variance;
data_types default_type;
format default_format;

View File

@ -22,7 +22,7 @@ using namespace cldnn;
class mvn_gpu_test : public ::testing::TestWithParam<cldnn::format> {};
template <typename T>
void mvn_compute_mean_accross_channels(cldnn::memory& output, bool normalize_variance) {
void mvn_compute_mean_across_channels(cldnn::memory& output, bool normalize_variance) {
auto output_size = output.get_layout().size;
uint32_t batch_size = output_size.batch[0];
@ -108,7 +108,7 @@ void mvn_compute_mean_within_channels(cldnn::memory& output, bool normalize_vari
}
TEST(mvn_gpu_test, mvn_test_across_channels_outside_sqrt_bfyx) {
// mvn accross channels fp32 test with normalize_variance set to false
// mvn across channels fp32 test with normalize_variance set to false
using namespace cldnn;
using namespace tests;
@ -131,11 +131,11 @@ TEST(mvn_gpu_test, mvn_test_across_channels_outside_sqrt_bfyx) {
EXPECT_EQ(outputs.begin()->first, "mvn");
auto output = outputs.begin()->second.get_memory();
mvn_compute_mean_accross_channels<float>(output, false);
mvn_compute_mean_across_channels<float>(output, false);
}
TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx) {
// mvn accross channels fp32 test with normalize_variance set to false
// mvn across channels fp32 test with normalize_variance set to false
using namespace cldnn;
using namespace tests;
@ -158,11 +158,11 @@ TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx) {
EXPECT_EQ(outputs.begin()->first, "mvn");
auto output = outputs.begin()->second.get_memory();
mvn_compute_mean_accross_channels<float>(output, false);
mvn_compute_mean_across_channels<float>(output, false);
}
TEST(mvn_gpu_test, mvn_test_across_channels_bfyx_outside_sqrt_fp16) {
// mvn accross channels fp16 test with normalize_variance set to false
// mvn across channels fp16 test with normalize_variance set to false
using namespace cldnn;
using namespace tests;
@ -185,11 +185,11 @@ TEST(mvn_gpu_test, mvn_test_across_channels_bfyx_outside_sqrt_fp16) {
EXPECT_EQ(outputs.begin()->first, "mvn");
auto output = outputs.begin()->second.get_memory();
mvn_compute_mean_accross_channels<FLOAT16>(output, false);
mvn_compute_mean_across_channels<FLOAT16>(output, false);
}
TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx_fp16) {
// mvn accross channels fp16 test with normalize_variance set to false
// mvn across channels fp16 test with normalize_variance set to false
using namespace cldnn;
using namespace tests;
@ -212,11 +212,11 @@ TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx_fp16) {
EXPECT_EQ(outputs.begin()->first, "mvn");
auto output = outputs.begin()->second.get_memory();
mvn_compute_mean_accross_channels<FLOAT16>(output, false);
mvn_compute_mean_across_channels<FLOAT16>(output, false);
}
TEST(mvn_gpu_test, mvn_test_across_channels_outside_sqrt_bfyx_normalize_variance) {
// mvn accross channels fp32 test with normalize_variance set to true
// mvn across channels fp32 test with normalize_variance set to true
using namespace cldnn;
using namespace tests;
@ -239,11 +239,11 @@ TEST(mvn_gpu_test, mvn_test_across_channels_outside_sqrt_bfyx_normalize_variance
EXPECT_EQ(outputs.begin()->first, "mvn");
auto output = outputs.begin()->second.get_memory();
mvn_compute_mean_accross_channels<float>(output, true);
mvn_compute_mean_across_channels<float>(output, true);
}
TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx_normalize_variance) {
// mvn accross channels fp32 test with normalize_variance set to true
// mvn across channels fp32 test with normalize_variance set to true
using namespace cldnn;
using namespace tests;
@ -266,11 +266,11 @@ TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx_normalize_variance)
EXPECT_EQ(outputs.begin()->first, "mvn");
auto output = outputs.begin()->second.get_memory();
mvn_compute_mean_accross_channels<float>(output, true);
mvn_compute_mean_across_channels<float>(output, true);
}
TEST(mvn_gpu_test, mvn_test_across_channels_outside_sqrt_bfyx_normalize_variance_fp16) {
// mvn accross channels fp16 test with normalize_variance set to true
// mvn across channels fp16 test with normalize_variance set to true
using namespace cldnn;
using namespace tests;
@ -293,11 +293,11 @@ TEST(mvn_gpu_test, mvn_test_across_channels_outside_sqrt_bfyx_normalize_variance
EXPECT_EQ(outputs.begin()->first, "mvn");
auto output = outputs.begin()->second.get_memory();
mvn_compute_mean_accross_channels<FLOAT16>(output, true);
mvn_compute_mean_across_channels<FLOAT16>(output, true);
}
TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx_normalize_variance_fp16) {
// mvn accross channels fp16 test with normalize_variance set to true
// mvn across channels fp16 test with normalize_variance set to true
using namespace cldnn;
using namespace tests;
@ -320,7 +320,7 @@ TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx_normalize_variance_
EXPECT_EQ(outputs.begin()->first, "mvn");
auto output = outputs.begin()->second.get_memory();
mvn_compute_mean_accross_channels<FLOAT16>(output, true);
mvn_compute_mean_across_channels<FLOAT16>(output, true);
}
TEST(mvn_gpu_test, mvn_test_within_channels_outside_sqrt_bfyx) {
@ -586,13 +586,13 @@ struct mvn_random_test : ::testing::TestWithParam<mvn_basic_test_params> {
void check_result(memory& output, bool across_channels, bool normalize_variance) {
if (output.get_layout().data_type == data_types::f32) {
if (across_channels) {
mvn_compute_mean_accross_channels<float>(output, normalize_variance);
mvn_compute_mean_across_channels<float>(output, normalize_variance);
} else {
mvn_compute_mean_within_channels<float>(output, normalize_variance);
}
} else if (output.get_layout().data_type == data_types::f16) {
if (across_channels) {
mvn_compute_mean_accross_channels<FLOAT16>(output, normalize_variance);
mvn_compute_mean_across_channels<FLOAT16>(output, normalize_variance);
} else {
mvn_compute_mean_within_channels<FLOAT16>(output, normalize_variance);
}