added: (Tensor|Vec)Func::Gradient
returns the gradient of the function
This commit is contained in:
parent
cf8b5542a1
commit
dbcbede16c
|
@ -481,6 +481,24 @@ Ret EvalMultiFunction<ParentFunc,Ret>::dderiv (const Vec3& X, int d1, int d2) co
|
|||
}
|
||||
|
||||
|
||||
template <class ParentFunc, class Ret>
|
||||
std::vector<Real>
|
||||
EvalMultiFunction<ParentFunc, Ret>::evalGradient (const Vec3& X) const
|
||||
{
|
||||
std::vector<Real> result(this->ncmp*this->nsd);
|
||||
std::vector<Vec3> dx;
|
||||
for (const std::unique_ptr<EvalFunction>& f : this->p)
|
||||
dx.push_back(f->gradient(X));
|
||||
|
||||
size_t k = 0;
|
||||
for (size_t d = 1; d <= this->nsd; ++d)
|
||||
for (size_t i = 1; i <= this->ncmp; ++i)
|
||||
result[k++] = dx[i-1][d-1];
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
template class EvalMultiFunction<VecFunc,Vec3>;
|
||||
template class EvalMultiFunction<TensorFunc,Tensor>;
|
||||
template class EvalMultiFunction<STensorFunc,SymmTensor>;
|
||||
|
|
|
@ -223,6 +223,9 @@ protected:
|
|||
|
||||
//! \brief Evaluates the function expressions.
|
||||
Ret evaluate(const Vec3& X) const override;
|
||||
|
||||
//! \brief Returns the gradient of the function as a 1D array.
|
||||
std::vector<Real> evalGradient(const Vec3& X) const override;
|
||||
};
|
||||
|
||||
//! Vector-valued function expression
|
||||
|
|
|
@ -110,6 +110,9 @@ namespace utl
|
|||
virtual Result dderiv(const Vec3&, int, int) const { return zero; }
|
||||
|
||||
protected:
|
||||
//! \brief Returns the gradient of the function as a 1D array.
|
||||
virtual std::vector<Real> evalGradient(const Vec3&) const { return {}; }
|
||||
|
||||
Result zero; //!< Return value for default implementations of derivatives
|
||||
};
|
||||
}
|
||||
|
@ -251,6 +254,14 @@ public:
|
|||
{
|
||||
return this->evaluate(X).length();
|
||||
}
|
||||
|
||||
//! \brief Evaluates first derivatives of the function.
|
||||
Tensor gradient(const Vec3& X) const
|
||||
{
|
||||
Tensor result(ncmp);
|
||||
result = this->evalGradient(X);
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
|
52
src/Utility/TensorFunction.C
Normal file
52
src/Utility/TensorFunction.C
Normal file
|
@ -0,0 +1,52 @@
|
|||
// $Id$
|
||||
//==============================================================================
|
||||
//!
|
||||
//! \file TensorFunction.C
|
||||
//!
|
||||
//! \date May 10 2017
|
||||
//!
|
||||
//! \author Knut Morten Okstad / SINTEF
|
||||
//!
|
||||
//! \brief Spatial tensor-valued functions.
|
||||
//!
|
||||
//==============================================================================
|
||||
|
||||
#include "TensorFunction.h"
|
||||
|
||||
|
||||
utl::matrix3d<Real> TensorFunc::gradient(const Vec3& X) const
|
||||
{
|
||||
const size_t nsd = sqrt(ncmp);
|
||||
utl::matrix3d<Real> result(nsd, nsd, nsd);
|
||||
result.fill(this->evalGradient(X).data());
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
size_t STensorFunc::index(size_t nsd, size_t i, size_t j) const
|
||||
{
|
||||
if (i == j)
|
||||
return i-1; // diagonal term
|
||||
else if (nsd == 2)
|
||||
return ncmp-1; // off-diagonal term (2D)
|
||||
|
||||
if (i == j+1 || i+2 == j)
|
||||
std::swap(i,j);
|
||||
|
||||
return i+2; // upper triangular term (3D)
|
||||
}
|
||||
|
||||
|
||||
utl::matrix3d<Real> STensorFunc::gradient(const Vec3& X) const
|
||||
{
|
||||
const size_t nsd = ncmp > 5 ? 3 : (ncmp > 2 ? 2 : 1);
|
||||
utl::matrix3d<Real> result(nsd,nsd,nsd);
|
||||
const std::vector<Real> temp = this->evalGradient(X);
|
||||
|
||||
for (size_t d = 1; d <= nsd; ++d)
|
||||
for (size_t i = 1; i <= nsd; ++i)
|
||||
for (size_t j = 1; j <= nsd; ++j)
|
||||
result(i,j,d) = temp[index(nsd,i,j) + (d-1)*ncmp];
|
||||
|
||||
return result;
|
||||
}
|
|
@ -15,6 +15,7 @@
|
|||
#define _TENSOR_FUNCTION_H
|
||||
|
||||
#include "Function.h"
|
||||
#include "matrixnd.h"
|
||||
#include "Tensor.h"
|
||||
|
||||
|
||||
|
@ -49,6 +50,9 @@ public:
|
|||
{
|
||||
return this->evaluate(X).trace();
|
||||
}
|
||||
|
||||
//! \brief Evaluates first derivatives of the function.
|
||||
utl::matrix3d<Real> gradient(const Vec3& X) const;
|
||||
};
|
||||
|
||||
|
||||
|
@ -59,6 +63,9 @@ public:
|
|||
class STensorFunc : public utl::SpatialFunction<SymmTensor>,
|
||||
public FunctionBase
|
||||
{
|
||||
//! \brief Returns the flat indices of the symmetric tensor.
|
||||
size_t index(size_t nsd, size_t i, size_t j) const;
|
||||
|
||||
protected:
|
||||
//! \brief The constructor is protected to allow sub-class instances only.
|
||||
STensorFunc(size_t n = 0, bool with33 = false)
|
||||
|
@ -85,6 +92,9 @@ public:
|
|||
{
|
||||
return this->evaluate(X).trace();
|
||||
}
|
||||
|
||||
//! \brief Evaluates first derivatives of the function.
|
||||
utl::matrix3d<Real> gradient(const Vec3& X) const;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
//==============================================================================
|
||||
|
||||
#include "ExprFunctions.h"
|
||||
#include "TensorFunction.h"
|
||||
#include "Functions.h"
|
||||
#include "matrixnd.h"
|
||||
|
||||
|
@ -19,6 +20,7 @@
|
|||
#include <cstdlib>
|
||||
#include <cmath>
|
||||
|
||||
|
||||
TEST(TestScalarFunc, ParseDerivative)
|
||||
{
|
||||
const char* func1 = "sin(1.5*t)*t";
|
||||
|
@ -167,6 +169,139 @@ TEST(TestVecFunc, Evaluate)
|
|||
}
|
||||
|
||||
|
||||
TEST(TestVecFunction, Gradient2D)
|
||||
{
|
||||
const char* g = "sin(x)*sin(y) | x*x*y*y";
|
||||
const char* g_x = "cos(x)*sin(y) | 2*x*y*y";
|
||||
const char* g_y = "sin(x)*cos(y) | 2*x*x*y";
|
||||
|
||||
VecFuncExpr f(g);
|
||||
f.addDerivative(g_x,"",1);
|
||||
f.addDerivative(g_y,"",2);
|
||||
|
||||
EXPECT_TRUE(f.isConstant());
|
||||
|
||||
for (double x : {0.1, 0.2, 0.3})
|
||||
for (double y : {0.5, 0.6, 0.7}) {
|
||||
const Vec3 X(x,y);
|
||||
const Tensor r({cos(x)*sin(y), 2*x*y*y,
|
||||
sin(x)*cos(y), 2*x*x*y});
|
||||
|
||||
const Tensor grad = f.gradient(X);
|
||||
for (size_t d = 1; d <= 2; ++d) {
|
||||
const Vec3 dx = f.deriv(X,d);
|
||||
for (size_t i = 1; i <= 2; ++i) {
|
||||
EXPECT_DOUBLE_EQ(dx[i-1], r(i,d));
|
||||
EXPECT_DOUBLE_EQ(grad(i,d), r(i,d));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST(TestVecFunction, Gradient2DFD)
|
||||
{
|
||||
const char* g = "sin(x)*sin(y) | x*x*y*y";
|
||||
|
||||
const double eps = 1e-6;
|
||||
|
||||
VecFuncExpr f(g,"",eps);
|
||||
|
||||
EXPECT_TRUE(f.isConstant());
|
||||
|
||||
for (double x : {0.1, 0.2, 0.3})
|
||||
for (double y : {0.5, 0.6, 0.7}) {
|
||||
const Vec3 X(x,y);
|
||||
const Vec3 Xp(x + 0.5*eps, y + 0.5*eps);
|
||||
const Vec3 Xm(x - 0.5*eps, y - 0.5*eps);
|
||||
const Tensor r({(sin(Xp.x) - sin(Xm.x))*sin(y) / eps,
|
||||
(Xp.x*Xp.x - Xm.x*Xm.x)*y*y / eps,
|
||||
sin(x)*(sin(Xp.y) - sin(Xm.y)) / eps,
|
||||
x*x*(Xp.y*Xp.y - Xm.y*Xm.y) / eps});
|
||||
|
||||
const Tensor grad = f.gradient(X);
|
||||
for (size_t d = 1; d <= 2; ++d) {
|
||||
const Vec3 dx = f.deriv(X,d);
|
||||
for (size_t i = 1; i <= 2; ++i) {
|
||||
EXPECT_NEAR(dx[i-1], r(i,d), 1e-8);
|
||||
EXPECT_NEAR(grad(i,d), r(i,d), 1e-8);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST(TestVecFunction, Gradient3D)
|
||||
{
|
||||
const char* g = "sin(x)*sin(y)*sin(z) | x*x*y*y*z*z*z | exp(-x)*exp(2*y)*exp(z*z)";
|
||||
const char* g_x = "cos(x)*sin(y)*sin(z) | 2*x*y*y*z*z*z | -exp(-x)*exp(2*y)*exp(z*z)";
|
||||
const char* g_y = "sin(x)*cos(y)*sin(z) | 2*x*x*y*z*z*z | 2*exp(-x)*exp(2*y)*exp(z*z)";
|
||||
const char* g_z = "sin(x)*sin(y)*cos(z) | 3*x*x*y*y*z*z | 2*z*exp(-x)*exp(2*y)*exp(z*z)";
|
||||
|
||||
VecFuncExpr f(g);
|
||||
f.addDerivative(g_x,"",1);
|
||||
f.addDerivative(g_y,"",2);
|
||||
f.addDerivative(g_z,"",3);
|
||||
|
||||
EXPECT_TRUE(f.isConstant());
|
||||
|
||||
for (double x : {0.1, 0.2, 0.3})
|
||||
for (double y : {0.5, 0.6, 0.7})
|
||||
for (double z : {0.8, 0.9, 1.0}) {
|
||||
const Vec3 X(x,y,z);
|
||||
const Tensor r({cos(x)*sin(y)*sin(z), 2*x*y*y*z*z*z, -exp(-x)*exp(2*y)*exp(z*z),
|
||||
sin(x)*cos(y)*sin(z), 2*x*x*y*z*z*z, 2*exp(-x)*exp(2*y)*exp(z*z),
|
||||
sin(x)*sin(y)*cos(z), 3*x*x*y*y*z*z, 2*z*exp(-x)*exp(2*y)*exp(z*z)});
|
||||
|
||||
const Tensor grad = f.gradient(X);
|
||||
for (size_t d = 1; d <= 3; ++d) {
|
||||
const Vec3 dx = f.deriv(X,d);
|
||||
for (size_t i = 1; i <= 3; ++i) {
|
||||
EXPECT_DOUBLE_EQ(dx[i-1], r(i,d));
|
||||
EXPECT_DOUBLE_EQ(grad(i,d), r(i,d));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST(TestVecFunction, Gradient3DFD)
|
||||
{
|
||||
const char* g = "sin(x)*sin(y)*sin(z) | x*x*y*y*z*z*z | exp(-x)*exp(2*y)*exp(z*z)";
|
||||
|
||||
const double eps = 1e-6;
|
||||
VecFuncExpr f(g,"",eps);
|
||||
|
||||
EXPECT_TRUE(f.isConstant());
|
||||
|
||||
for (double x : {0.1, 0.2, 0.3})
|
||||
for (double y : {0.5, 0.6, 0.7})
|
||||
for (double z : {0.8, 0.9, 1.0}) {
|
||||
const Vec3 X(x,y,z);
|
||||
const Vec3 Xp(x + 0.5*eps, y + 0.5*eps, z + 0.5*eps);
|
||||
const Vec3 Xm(x - 0.5*eps, y - 0.5*eps, z - 0.5*eps);
|
||||
const Tensor r({(sin(Xp.x) - sin(Xm.x))*sin(y)*sin(z) / eps,
|
||||
(Xp.x*Xp.x - Xm.x*Xm.x)*X.y*X.y*X.z*X.z*X.z / eps,
|
||||
(exp(-Xp.x) - exp(-Xm.x))*exp(2*y)*exp(z*z) / eps,
|
||||
sin(x)*(sin(Xp.y) - sin(Xm.y))*sin(z) / eps,
|
||||
x*x*(Xp.y*Xp.y - Xm.y*Xm.y)*z*z*z / eps,
|
||||
exp(-x)*(exp(2*Xp.y) - exp(2*Xm.y))*exp(z*z) / eps,
|
||||
sin(x)*sin(y)*(sin(Xp.z) - sin(Xm.z)) / eps,
|
||||
x*x*y*y*(Xp.z*Xp.z*Xp.z - Xm.z*Xm.z*Xm.z) / eps,
|
||||
exp(-x)*exp(2*y)*(exp(Xp.z*Xp.z) - exp(Xm.z*Xm.z)) / eps});
|
||||
|
||||
const Tensor grad = f.gradient(X);
|
||||
for (size_t d = 1; d <= 3; ++d) {
|
||||
const Vec3 dx = f.deriv(X,d);
|
||||
for (size_t i = 1; i <= 3; ++i) {
|
||||
EXPECT_NEAR(dx[i-1], r(i,d), 1e-8);
|
||||
EXPECT_NEAR(grad(i,d), r(i,d), 1e-8);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST(TestVecFuncExpr, NumDimensions)
|
||||
{
|
||||
const char* func1 = "x";
|
||||
|
@ -223,11 +358,14 @@ TEST(TestTensorFunction, Gradient2D)
|
|||
r.fill(std::array{cos(x)*sin(y), 2*x*y*y, exp(x)*exp(2*y), -2*exp(-2*x)*exp(y),
|
||||
sin(x)*cos(y), 2*x*x*y, 2*exp(x)*exp(2*y), exp(-2*x)*exp(y)}.data());
|
||||
|
||||
const utl::matrix3d<Real> grad = f.gradient(X);
|
||||
for (size_t d = 1; d <= 2; ++d) {
|
||||
const Tensor dx = f.deriv(X,d);
|
||||
for (size_t i = 1; i <= 2; ++i)
|
||||
for (size_t j = 1; j <= 2; ++j)
|
||||
for (size_t j = 1; j <= 2; ++j) {
|
||||
EXPECT_DOUBLE_EQ(dx(i,j), r(i,j,d));
|
||||
EXPECT_DOUBLE_EQ(grad(i,j,d), r(i,j,d));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -258,11 +396,14 @@ TEST(TestTensorFunction, Gradient2DFD)
|
|||
exp(-2*x)*(exp(Xp.y) - exp(Xm.y))}.data());
|
||||
r.multiply(1.0 / eps);
|
||||
|
||||
const utl::matrix3d<Real> grad = f.gradient(X);
|
||||
for (size_t d = 1; d <= 2; ++d) {
|
||||
const Tensor dx = f.deriv(X,d);
|
||||
for (size_t i = 1; i <= 2; ++i)
|
||||
for (size_t j = 1; j <= 2; ++j)
|
||||
for (size_t j = 1; j <= 2; ++j) {
|
||||
EXPECT_NEAR(dx(i,j), r(i,j,d), 1e-8);
|
||||
EXPECT_NEAR(grad(i,j,d), r(i,j,d), 1e-8);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -305,11 +446,14 @@ TEST(TestTensorFunction, Gradient3D)
|
|||
exp(-2*x)*exp(y), y*z, x*y*2*z,
|
||||
0.0, 0.0, 1.0}.data());
|
||||
|
||||
const utl::matrix3d<Real> grad = f.gradient(X);
|
||||
for (size_t d = 1; d <= 3; ++d) {
|
||||
const Tensor dx = f.deriv(X,d);
|
||||
for (size_t i = 1; i <= 3; ++i)
|
||||
for (size_t j = 1; j <= 3; ++j)
|
||||
for (size_t j = 1; j <= 3; ++j) {
|
||||
EXPECT_DOUBLE_EQ(dx(i,j), r(i,j,d));
|
||||
EXPECT_DOUBLE_EQ(grad(i,j,d), r(i,j,d));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -362,11 +506,14 @@ TEST(TestTensorFunction, Gradient3DFD)
|
|||
Xp.z - Xm.z}.data());
|
||||
r.multiply(1.0 / eps);
|
||||
|
||||
const utl::matrix3d<Real> grad = f.gradient(X);
|
||||
for (size_t d = 1; d <= 3; ++d) {
|
||||
const Tensor dx = f.deriv(X,d);
|
||||
for (size_t i = 1; i <= 3; ++i)
|
||||
for (size_t j = 1; j <= 3; ++j)
|
||||
for (size_t j = 1; j <= 3; ++j) {
|
||||
EXPECT_NEAR(dx(i,j), r(i,j,d), 1e-8);
|
||||
EXPECT_NEAR(grad(i,j,d), r(i,j,d), 1e-8);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -598,11 +745,14 @@ TEST(TestSTensorFunction, Gradient2D)
|
|||
r.fill(std::array{cos(x)*sin(y), 2*x*y*y, 2*x*y*y, exp(x)*exp(2*y),
|
||||
sin(x)*cos(y), 2*x*x*y, 2*x*x*y, 2*exp(x)*exp(2*y)}.data());
|
||||
|
||||
const utl::matrix3d<Real> grad = f.gradient(X);
|
||||
for (size_t d = 1; d <= 2; ++d) {
|
||||
const SymmTensor dx = f.deriv(X,d);
|
||||
for (size_t i = 1; i <= 2; ++i)
|
||||
for (size_t j = 1; j <= 2; ++j)
|
||||
for (size_t j = 1; j <= 2; ++j) {
|
||||
EXPECT_DOUBLE_EQ(dx(i,j), r(i,j,d));
|
||||
EXPECT_DOUBLE_EQ(grad(i,j,d), r(i,j,d));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -663,11 +813,14 @@ TEST(TestSTensorFunction, Gradient2DFD)
|
|||
exp(x)*(exp(2*Xp.y) - exp(2*Xm.y))}.data());
|
||||
r.multiply(1.0 / eps);
|
||||
|
||||
const utl::matrix3d<Real> grad = f.gradient(X);
|
||||
for (size_t d = 1; d <= 2; ++d) {
|
||||
const SymmTensor dx = f.deriv(X,d);
|
||||
for (size_t i = 1; i <= 2; ++i)
|
||||
for (size_t j = 1; j <= 2; ++j)
|
||||
for (size_t j = 1; j <= 2; ++j) {
|
||||
EXPECT_NEAR(dx(i,j), r(i,j,d), 1e-8);
|
||||
EXPECT_NEAR(grad(i,j,d), r(i,j,d), 1e-8);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -708,11 +861,14 @@ TEST(TestSTensorFunction, Gradient3D)
|
|||
x*y, exp(x)*exp(2*y)*exp(z), x*x*y,
|
||||
1.0, x*x*y, x*x*y*y*2*z}.data());
|
||||
|
||||
const utl::matrix3d<Real> grad = f.gradient(X);
|
||||
for (size_t d = 1; d <= 3; ++d) {
|
||||
const SymmTensor dx = f.deriv(X,d);
|
||||
for (size_t i = 1; i <= 3; ++i)
|
||||
for (size_t j = 1; j <= 3; ++j)
|
||||
for (size_t j = 1; j <= 3; ++j) {
|
||||
EXPECT_DOUBLE_EQ(dx(i,j), r(i,j,d));
|
||||
EXPECT_DOUBLE_EQ(grad(i,j,d), r(i,j,d));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -766,11 +922,14 @@ TEST(TestSTensorFunction, Gradient3DFD)
|
|||
x*x*y*y*(Xp.z*Xp.z - Xm.z*Xm.z)}.data());
|
||||
r.multiply(1.0 / eps);
|
||||
|
||||
const utl::matrix3d<Real> grad = f.gradient(X);
|
||||
for (size_t d = 1; d <= 3; ++d) {
|
||||
const SymmTensor dx = f.deriv(X,d);
|
||||
for (size_t i = 1; i <= 3; ++i)
|
||||
for (size_t j = 1; j <= 3; ++j)
|
||||
for (size_t j = 1; j <= 3; ++j) {
|
||||
EXPECT_NEAR(dx(i,j), r(i,j,d), 1e-8);
|
||||
EXPECT_NEAR(grad(i,j,d), r(i,j,d), 1e-8);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user