Added OMP pragmas

This commit is contained in:
babrodtk 2015-09-01 15:56:46 +02:00
parent 57deb18dc4
commit 1d6f12e870
2 changed files with 16 additions and 0 deletions

View File

@ -223,6 +223,7 @@ namespace Opm
assert (value().size() == rhs.value().size()); assert (value().size() == rhs.value().size());
const int num_blocks = numBlocks(); const int num_blocks = numBlocks();
#pragma omp parallel for schedule(static)
for (int block = 0; block < num_blocks; ++block) { for (int block = 0; block < num_blocks; ++block) {
assert(jac_[block].rows() == rhs.jac_[block].rows()); assert(jac_[block].rows() == rhs.jac_[block].rows());
assert(jac_[block].cols() == rhs.jac_[block].cols()); assert(jac_[block].cols() == rhs.jac_[block].cols());
@ -241,6 +242,7 @@ namespace Opm
if (jac_.empty()) { if (jac_.empty()) {
const int num_blocks = rhs.numBlocks(); const int num_blocks = rhs.numBlocks();
jac_.resize(num_blocks); jac_.resize(num_blocks);
#pragma omp parallel for schedule(static)
for (int block = 0; block < num_blocks; ++block) { for (int block = 0; block < num_blocks; ++block) {
jac_[block] = rhs.jac_[block] * (-1.0); jac_[block] = rhs.jac_[block] * (-1.0);
} }
@ -249,6 +251,7 @@ namespace Opm
assert (value().size() == rhs.value().size()); assert (value().size() == rhs.value().size());
const int num_blocks = numBlocks(); const int num_blocks = numBlocks();
#pragma omp parallel for schedule(static)
for (int block = 0; block < num_blocks; ++block) { for (int block = 0; block < num_blocks; ++block) {
assert(jac_[block].rows() == rhs.jac_[block].rows()); assert(jac_[block].rows() == rhs.jac_[block].rows());
assert(jac_[block].cols() == rhs.jac_[block].cols()); assert(jac_[block].cols() == rhs.jac_[block].cols());
@ -276,6 +279,7 @@ namespace Opm
std::vector<M> jac = jac_; std::vector<M> jac = jac_;
assert(numBlocks() == rhs.numBlocks()); assert(numBlocks() == rhs.numBlocks());
int num_blocks = numBlocks(); int num_blocks = numBlocks();
#pragma omp parallel for schedule(static)
for (int block = 0; block < num_blocks; ++block) { for (int block = 0; block < num_blocks; ++block) {
assert(jac[block].rows() == rhs.jac_[block].rows()); assert(jac[block].rows() == rhs.jac_[block].rows());
assert(jac[block].cols() == rhs.jac_[block].cols()); assert(jac[block].cols() == rhs.jac_[block].cols());
@ -299,6 +303,7 @@ namespace Opm
std::vector<M> jac = jac_; std::vector<M> jac = jac_;
assert(numBlocks() == rhs.numBlocks()); assert(numBlocks() == rhs.numBlocks());
int num_blocks = numBlocks(); int num_blocks = numBlocks();
#pragma omp parallel for schedule(static)
for (int block = 0; block < num_blocks; ++block) { for (int block = 0; block < num_blocks; ++block) {
assert(jac[block].rows() == rhs.jac_[block].rows()); assert(jac[block].rows() == rhs.jac_[block].rows());
assert(jac[block].cols() == rhs.jac_[block].cols()); assert(jac[block].cols() == rhs.jac_[block].cols());
@ -324,6 +329,7 @@ namespace Opm
assert(numBlocks() == rhs.numBlocks()); assert(numBlocks() == rhs.numBlocks());
M D1(val_.matrix().asDiagonal()); M D1(val_.matrix().asDiagonal());
M D2(rhs.val_.matrix().asDiagonal()); M D2(rhs.val_.matrix().asDiagonal());
#pragma omp parallel for schedule(dynamic)
for (int block = 0; block < num_blocks; ++block) { for (int block = 0; block < num_blocks; ++block) {
assert(jac_[block].rows() == rhs.jac_[block].rows()); assert(jac_[block].rows() == rhs.jac_[block].rows());
assert(jac_[block].cols() == rhs.jac_[block].cols()); assert(jac_[block].cols() == rhs.jac_[block].cols());
@ -360,6 +366,7 @@ namespace Opm
M D1(val_.matrix().asDiagonal()); M D1(val_.matrix().asDiagonal());
M D2(rhs.val_.matrix().asDiagonal()); M D2(rhs.val_.matrix().asDiagonal());
M D3((1.0/(rhs.val_*rhs.val_)).matrix().asDiagonal()); M D3((1.0/(rhs.val_*rhs.val_)).matrix().asDiagonal());
#pragma omp parallel for schedule(dynamic)
for (int block = 0; block < num_blocks; ++block) { for (int block = 0; block < num_blocks; ++block) {
assert(jac_[block].rows() == rhs.jac_[block].rows()); assert(jac_[block].rows() == rhs.jac_[block].rows());
assert(jac_[block].cols() == rhs.jac_[block].cols()); assert(jac_[block].cols() == rhs.jac_[block].cols());
@ -484,6 +491,7 @@ namespace Opm
int num_blocks = rhs.numBlocks(); int num_blocks = rhs.numBlocks();
std::vector<typename AutoDiffBlock<Scalar>::M> jac(num_blocks); std::vector<typename AutoDiffBlock<Scalar>::M> jac(num_blocks);
assert(lhs.cols() == rhs.value().rows()); assert(lhs.cols() == rhs.value().rows());
#pragma omp parallel for schedule(dynamic)
for (int block = 0; block < num_blocks; ++block) { for (int block = 0; block < num_blocks; ++block) {
fastSparseProduct(lhs, rhs.derivative()[block], jac[block]); fastSparseProduct(lhs, rhs.derivative()[block], jac[block]);
} }

View File

@ -880,6 +880,7 @@ namespace detail {
trans_all << transi, trans_nnc; trans_all << transi, trans_nnc;
const std::vector<ADB> kr = asImpl().computeRelPerm(state); const std::vector<ADB> kr = asImpl().computeRelPerm(state);
#pragma omp parallel for schedule(static)
for (int phaseIdx = 0; phaseIdx < fluid_.numPhases(); ++phaseIdx) { for (int phaseIdx = 0; phaseIdx < fluid_.numPhases(); ++phaseIdx) {
asImpl().computeMassFlux(phaseIdx, trans_all, kr[canph_[phaseIdx]], state.canonical_phase_pressures[canph_[phaseIdx]], state); asImpl().computeMassFlux(phaseIdx, trans_all, kr[canph_[phaseIdx]], state.canonical_phase_pressures[canph_[phaseIdx]], state);
@ -1259,6 +1260,7 @@ namespace detail {
const int nw = wells.number_of_wells; const int nw = wells.number_of_wells;
ADB::V retval = ADB::V::Zero(nw); ADB::V retval = ADB::V::Zero(nw);
#pragma omp parallel for schedule(static)
for (int i=0; i<nw; ++i) { for (int i=0; i<nw; ++i) {
retval[i] = computeHydrostaticCorrection(wells, i, vfp_ref_depth[i], well_perforation_densities, gravity); retval[i] = computeHydrostaticCorrection(wells, i, vfp_ref_depth[i], well_perforation_densities, gravity);
} }
@ -1312,6 +1314,7 @@ namespace detail {
const int np = wells().number_of_phases; const int np = wells().number_of_phases;
const int nw = wells().number_of_wells; const int nw = wells().number_of_wells;
const Opm::PhaseUsage& pu = fluid_.phaseUsage(); const Opm::PhaseUsage& pu = fluid_.phaseUsage();
#pragma omp parallel for schedule(dynamic)
for (int w = 0; w < nw; ++w) { for (int w = 0; w < nw; ++w) {
const WellControls* wc = wells().ctrls[w]; const WellControls* wc = wells().ctrls[w];
// The current control in the well state overrides // The current control in the well state overrides
@ -2022,6 +2025,7 @@ namespace detail {
// Thp update // Thp update
const Opm::PhaseUsage& pu = fluid_.phaseUsage(); const Opm::PhaseUsage& pu = fluid_.phaseUsage();
//Loop over all wells //Loop over all wells
#pragma omp parallel for schedule(static)
for (int w=0; w<nw; ++w) { for (int w=0; w<nw; ++w) {
const WellControls* wc = wells().ctrls[w]; const WellControls* wc = wells().ctrls[w];
const int nwc = well_controls_get_num(wc); const int nwc = well_controls_get_num(wc);
@ -2678,6 +2682,7 @@ namespace detail {
if (rock_comp_props_ && rock_comp_props_->isActive()) { if (rock_comp_props_ && rock_comp_props_->isActive()) {
V pm(n); V pm(n);
V dpm(n); V dpm(n);
#pragma omp parallel for schedule(static)
for (int i = 0; i < n; ++i) { for (int i = 0; i < n; ++i) {
pm[i] = rock_comp_props_->poroMult(p.value()[i]); pm[i] = rock_comp_props_->poroMult(p.value()[i]);
dpm[i] = rock_comp_props_->poroMultDeriv(p.value()[i]); dpm[i] = rock_comp_props_->poroMultDeriv(p.value()[i]);
@ -2685,6 +2690,7 @@ namespace detail {
ADB::M dpm_diag(dpm.matrix().asDiagonal()); ADB::M dpm_diag(dpm.matrix().asDiagonal());
const int num_blocks = p.numBlocks(); const int num_blocks = p.numBlocks();
std::vector<ADB::M> jacs(num_blocks); std::vector<ADB::M> jacs(num_blocks);
#pragma omp parallel for schedule(dynamic)
for (int block = 0; block < num_blocks; ++block) { for (int block = 0; block < num_blocks; ++block) {
fastSparseProduct(dpm_diag, p.derivative()[block], jacs[block]); fastSparseProduct(dpm_diag, p.derivative()[block], jacs[block]);
} }
@ -2706,6 +2712,7 @@ namespace detail {
if (rock_comp_props_ && rock_comp_props_->isActive()) { if (rock_comp_props_ && rock_comp_props_->isActive()) {
V tm(n); V tm(n);
V dtm(n); V dtm(n);
#pragma omp parallel for schedule(static)
for (int i = 0; i < n; ++i) { for (int i = 0; i < n; ++i) {
tm[i] = rock_comp_props_->transMult(p.value()[i]); tm[i] = rock_comp_props_->transMult(p.value()[i]);
dtm[i] = rock_comp_props_->transMultDeriv(p.value()[i]); dtm[i] = rock_comp_props_->transMultDeriv(p.value()[i]);
@ -2713,6 +2720,7 @@ namespace detail {
ADB::M dtm_diag(dtm.matrix().asDiagonal()); ADB::M dtm_diag(dtm.matrix().asDiagonal());
const int num_blocks = p.numBlocks(); const int num_blocks = p.numBlocks();
std::vector<ADB::M> jacs(num_blocks); std::vector<ADB::M> jacs(num_blocks);
#pragma omp parallel for schedule(dynamic)
for (int block = 0; block < num_blocks; ++block) { for (int block = 0; block < num_blocks; ++block) {
fastSparseProduct(dtm_diag, p.derivative()[block], jacs[block]); fastSparseProduct(dtm_diag, p.derivative()[block], jacs[block]);
} }