/*
Copyright 2014 Dr. Markus Blatt - HPC-Simulation-Software & Services
Copyright 2014 Statoil ASA
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see .
*/
#ifndef OPM_DUNEISTLTESTHELPERS_HEADER
#define OPM_DUNEISTLTESTHELPERS_HEADER
#include
// MPI header
#if HAVE_MPI
#include
#else
#error "This file needs to compiled with MPI support!"
#endif
#include
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
#include
#include
#else
#include
#include
#endif
#include
#include
#include
#include
#include
#include
struct MPIFixture {
MPIFixture()
{
int m_argc = boost::unit_test::framework::master_test_suite().argc;
char** m_argv = boost::unit_test::framework::master_test_suite().argv;
MPI_Init(&m_argc, &m_argv);
}
~MPIFixture()
{
MPI_Finalize();
}
};
BOOST_GLOBAL_FIXTURE(MPIFixture);
struct MyMatrix
{
MyMatrix(std::size_t rows, std::size_t nnz)
: data(nnz, 0.0), rowStart(rows+1, -1),
colIndex(nnz, -1)
{}
MyMatrix()
: data(), rowStart(), colIndex()
{}
std::vector data;
std::vector rowStart;
std::vector colIndex;
};
typedef int LocalId;
typedef int GlobalId;
typedef Dune::OwnerOverlapCopyCommunication Communication;
typedef Dune::OwnerOverlapCopyAttributeSet GridAttributes;
typedef GridAttributes::AttributeSet GridFlag;
typedef Dune::ParallelLocalIndex LocalIndex;
/// \brief Sets up a paralle Laplacian.
///
/// The process stores the unknowns with indices in the range [start, end).
/// As we use an overlapping domain decomposition, the process owns the indices
/// in the range [istart, iend]. If we would only used the indices in this range then
/// they form a partitioning of the whole index set.
/// \tparam I The type of the parallel index set (for convenience)
/// \param indexset The parallel index set for marking owner and copy region.
/// \param N The global number of unknowns of the system.
/// \param start The first index stored on this process
/// \param end One past the last index stored on this process
/// \param istart The first index that the process owns.
/// \param iend One past the last index the process owns.
template
std::shared_ptr create1DLaplacian(I& indexset, int N, int start, int end,
int istart, int iend)
{
indexset.beginResize();
MyMatrix* mm=new MyMatrix(end-start, (end-start)*3);
int nnz=0;
mm->rowStart[0]=0;
assert(start==0||start=iend)
{
// We are in the overlap region of the grid
// therefore we setup the system such that
// right hand side will equal the left hand side
// of the linear system.
if(localRow>0)
{
mm->colIndex[nnz]=localRow-1;
mm->data[nnz++]=0;
}
mm->colIndex[nnz]=localRow;
mm->data[nnz++]=1.0;
indexset.add(row, LocalIndex(localRow, GridAttributes::copy, true));
if(localRowcolIndex[nnz]=localRow+1;
mm->data[nnz++]=0;
}
mm->rowStart[localRow+1]=nnz;
continue;
}
if(row>0)
{
mm->colIndex[nnz]=localRow-1;
mm->data[nnz++]=-1;
}
mm->colIndex[nnz]=localRow;
mm->data[nnz++]=2;//dval+(rowcolIndex[nnz]=localRow+1;
mm->data[nnz++]=-1;
}
mm->rowStart[localRow+1]=nnz;
indexset.add(row, LocalIndex(localRow, GridAttributes::owner, true));
}
mm->data.resize(nnz);
mm->colIndex.resize(nnz);
indexset.endResize();
return std::shared_ptr(mm);
}
template
void createRandomVectors(O& pinfo, int NN, std::vector& x, std::vector& b,
const MyMatrix& mat)
{
x.resize(NN);
for(auto entry=x.begin(), end =x.end(); entry!=end; ++entry)
*entry=((double) (rand()%100))/10.0;
pinfo.copyOwnerToAll(x,x);
b.resize(NN);
// Construct the right hand side as b=A*x
std::fill(b.begin(), b.end(), 0.0);
for(std::size_t row=0; row computeRegions(int N=100)
{
int procs, rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &procs);
int n = N/procs; // number of unknowns per process
int bigger = N%procs; // number of process with n+1 unknows
int start, end, istart, iend;
// Compute owner region
if(rank0)
start = istart - 1;
else
start = istart;
if(iend