mirror of
https://github.com/OPM/opm-simulators.git
synced 2024-12-18 21:43:27 -06:00
[bugfix] Account for shut completions when checking well in a parallel run.
Previously well with just some shut completions errorneously triggered an exception in parallel runs. This is fixed with this commit. Due to the logic shut completions will always be marked as existing on a process. (Initially all completions are marked as found. For each open completion we check whether the cartesian index belongs to the local grid. If that is not the case we mark it as not found). Therefore we now check whether the found number of completions is either the number of shut completions or the number of all completions. In the former case the well is not stored on this process, and in the latter case it is. In other cases we throw an exception.
This commit is contained in:
parent
2dc9d78286
commit
4fc36e58fb
@ -136,7 +136,9 @@ void WellsManager::createWellsFromSpecs(std::vector<WellConstPtr>& wells, size_t
|
|||||||
|
|
||||||
{ // COMPDAT handling
|
{ // COMPDAT handling
|
||||||
CompletionSetConstPtr completionSet = well->getCompletions(timeStep);
|
CompletionSetConstPtr completionSet = well->getCompletions(timeStep);
|
||||||
|
// shut completions and open ones stored in this process will have 1 others 0.
|
||||||
std::vector<std::size_t> completion_on_proc(completionSet->size(), 1);
|
std::vector<std::size_t> completion_on_proc(completionSet->size(), 1);
|
||||||
|
std::size_t shut_completions_number = 0;
|
||||||
for (size_t c=0; c<completionSet->size(); c++) {
|
for (size_t c=0; c<completionSet->size(); c++) {
|
||||||
CompletionConstPtr completion = completionSet->get(c);
|
CompletionConstPtr completion = completionSet->get(c);
|
||||||
if (completion->getState() == WellCompletion::OPEN) {
|
if (completion->getState() == WellCompletion::OPEN) {
|
||||||
@ -195,6 +197,7 @@ void WellsManager::createWellsFromSpecs(std::vector<WellConstPtr>& wells, size_t
|
|||||||
wellperf_data[well_index].push_back(pd);
|
wellperf_data[well_index].push_back(pd);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
++shut_completions_number;
|
||||||
if (completion->getState() != WellCompletion::SHUT) {
|
if (completion->getState() != WellCompletion::SHUT) {
|
||||||
OPM_THROW(std::runtime_error, "Completion state: " << WellCompletion::StateEnum2String( completion->getState() ) << " not handled");
|
OPM_THROW(std::runtime_error, "Completion state: " << WellCompletion::StateEnum2String( completion->getState() ) << " not handled");
|
||||||
}
|
}
|
||||||
@ -205,7 +208,7 @@ void WellsManager::createWellsFromSpecs(std::vector<WellConstPtr>& wells, size_t
|
|||||||
// Set wells that are on other processor to SHUT.
|
// Set wells that are on other processor to SHUT.
|
||||||
std::size_t sum_completions_on_proc = std::accumulate(completion_on_proc.begin(),
|
std::size_t sum_completions_on_proc = std::accumulate(completion_on_proc.begin(),
|
||||||
completion_on_proc.end(),0);
|
completion_on_proc.end(),0);
|
||||||
if ( sum_completions_on_proc == 0 )
|
if ( sum_completions_on_proc == shut_completions_number )
|
||||||
{
|
{
|
||||||
// Mark well as not existent on this process
|
// Mark well as not existent on this process
|
||||||
wells_on_proc[wellIter-wells.begin()] = 0;
|
wells_on_proc[wellIter-wells.begin()] = 0;
|
||||||
@ -214,7 +217,10 @@ void WellsManager::createWellsFromSpecs(std::vector<WellConstPtr>& wells, size_t
|
|||||||
// Check that the complete well is on this process
|
// Check that the complete well is on this process
|
||||||
if( sum_completions_on_proc < completionSet->size() )
|
if( sum_completions_on_proc < completionSet->size() )
|
||||||
{
|
{
|
||||||
OPM_THROW(std::runtime_error, "Wells must be completely on processor!");
|
OPM_THROW(std::runtime_error, "Each well must be completely stored
|
||||||
|
<<"on processor! Not the case for "<< well->name()<<": "
|
||||||
|
<<completionSet->size()-shut_completions-sum_completions_on_proc
|
||||||
|
<<" completions missing.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user