Updated ERT, opm-common and opm-parser

ERT 80650d05d49b419ec74ba316312313dfc6ba0875
opm-common e7db252f3e310a04718f315e6cdb9d6c428c5762
opm-parser 817722b99baa62cc63a5f859cc90b951f0b0636e
This commit is contained in:
Magne Sjaastad
2016-06-13 14:18:50 +02:00
parent 6a8ffb7c6b
commit da76b01611
629 changed files with 10346 additions and 7232 deletions

View File

@@ -23,4 +23,5 @@ scratch.sparsebundle
*.iml
*.DS_Store
__ert_lib_path.py
__ert_lib_info.py

View File

@@ -1,38 +1,67 @@
language: python
language: c
python:
- 2.7_with_system_site_packages
compiler:
- gcc
matrix:
fast_finish: true
allow_failures:
- os: osx
include:
- os: osx
osx_image: xcode7.3
compiler: clang
- os: linux
compiler: gcc
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- liblapack-dev
- texlive-latex-base
- valgrind
- python-qt4
- python-qt4-gl
before_install:
- wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh
- chmod +x miniconda.sh
- ./miniconda.sh -b
- export PATH=/home/travis/miniconda/bin:$PATH
- conda update --yes conda
- gcc-4.8
- g++-4.8
- clang
install:
- conda install --yes numpy scipy matplotlib pandas pyopengl sphinx
- if [[ "$CC" == "gcc" ]]; then export CXX="g++-4.8"; fi
- export TRAVIS_PYTHON_VERSION="2.7"
# We do this conditionally because it saves us some downloading if the version is the same.
- if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
wget https://repo.continuum.io/miniconda/Miniconda-latest-MacOSX-x86_64.sh -O miniconda.sh;
else
wget https://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh;
fi
else
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
wget https://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -O miniconda.sh;
else
wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh;
fi
fi
- bash miniconda.sh -b -p $HOME/miniconda
- export CONDA_HOME="$HOME/miniconda"
- export PATH="$CONDA_HOME/bin:$PATH"
- hash -r
- conda config --set always_yes yes --set changeps1 no
- conda update -q conda
# Useful for debugging any issues with conda
- conda info -a
- conda install pyqt scipy=0.16.1 numpy pandas matplotlib
env:
global:
- ERT_SHOW_BACKTRACE=1
before_script:
- sudo apt-get install libplplot-dev
- sudo apt-get install python-tk
- mkdir build
- cd build
- echo "WORKFLOW_JOB_DIRECTORY ../devel/share/workflows/jobs/internal/config" > DEFAULT_SITE_CONFIG_FILE
- echo "WORKFLOW_JOB_DIRECTORY ../devel/share/workflows/jobs/internal-gui/config" >> DEFAULT_SITE_CONFIG_FILE
- echo "JOB_SCRIPT ../devel/share/bin/job_dispatch.py" >> DEFAULT_SITE_CONFIG_FILE
- cmake -DPYTHON_INSTALL_PREFIX=python -DBUILD_ERT=ON -DERT_BUILD_GUI=ON -DBUILD_ENS_PLOT=ON -DBUILD_TESTS=ON -DBUILD_APPLICATIONS=ON -DUSE_RUNPATH=ON -DBUILD_PYTHON=ON -DERT_USE_OPENMP=ON -DERT_DOC=ON -DSITE_CONFIG_FILE=DEFAULT_SITE_CONFIG_FILE ../devel
- echo "MAX_RUNNING_LOCAL 1" >> DEFAULT_SITE_CONFIG_FILE
- cmake -DPYTHON_INSTALL_PREFIX=python -DBUILD_ERT=ON -DERT_BUILD_GUI=ON -DBUILD_TESTS=ON -DBUILD_APPLICATIONS=ON -DUSE_RUNPATH=ON -DBUILD_PYTHON=ON -DERT_USE_OPENMP=ON -DERT_DOC=OFF -DSITE_CONFIG_FILE=DEFAULT_SITE_CONFIG_FILE -DERT_BUILD_CXX=ON ../devel
script: make && ctest --output-on-failure -LE StatoilData

14
ThirdParty/Ert/appveyor.yml vendored Normal file
View File

@@ -0,0 +1,14 @@
version: 1.0.{build}
clone_depth: 1
build_script:
- cmd: >-
cd devel
mkdir build
cd build
cmake .. -G"Visual Studio 14 2015 Win64" -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=OFF -DERT_BUILD_CXX=OFF -DBUILD_PYTHON=OFF -DBUILD_APPLICATIONS=ON
msbuild /m /p:Configuration=Release /p:Platform="x64" ERT.sln

View File

@@ -13,8 +13,8 @@
include directory as used by VisualStudio.
*/
#ifndef __STDBOOL_H__
#define __STDBOOL_H__
#ifndef ERT_STDBOOL_H
#define ERT_STDBOOL_H
#ifndef __cplusplus
typedef int bool;

View File

@@ -1,5 +1,5 @@
#include <math.h>
int main( int argc , char ** argv) {
isfinite(0);
isfinite(0.0);
}

View File

@@ -1,8 +1,3 @@
.. ert documentation master file, created by
sphinx-quickstart on Mon Aug 11 13:14:00 2014.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to ert's documentation!
===============================
@@ -10,7 +5,8 @@ Contents:
.. toctree::
:maxdepth: 1
:numbered:
user/index
code/index
${ERT_DOC_LINK}

View File

@@ -271,11 +271,6 @@ input. The required coordinates are:
tstep: This is the timestep we are after, this numbering is the
ECLIPSE report steps (ECLIPSE is unfortunately not just any FORWARD
model, but still has severe influence on the structure of ERT :-( )
state: In general the data will exist in "two versions" - the
forecast version before the EnKF update and the analyzed version
after the update. The state variable is an instance of the enum
state_enum defined in enkf_types.h
Observe that the state "coordindate" is not treated like a first class
coordinate in the same manner as iens and tstep. Exactly how the state

View File

@@ -8,8 +8,8 @@ Contents:
tutorial/index
keywords/index
magic_strings/index
workflows/index
magic_strings/index
observations/index
distributions/index
localization/index

View File

@@ -1792,139 +1792,6 @@ Keywords related to plotting
Workflows
---------
.. _workflows:
The Forward Model in ERT runs in the context of a single realization, i.e. there is no communication between the different processes, and collective gather operations must be performed by the ERT core program after the forward model has completed. As an alternative to the forward model ERT has a system with workflows. Using workflows you can automate cumbersome normal ERT processes, and also invoke external programs. The workflows are run serially on the workstation actually running ERT, and should not be used for computationally heavy tasks.
Configuring workflows in ERT consists of two steps: installing the jobs which should be available for ERT to use in workflows, and then subsequently assemble one or more jobs, with arguments, in a workflow.
**Workflow jobs**
The workflow jobs are quite similar to the jobs in the forward model, in particular the jobs are described by a configuration file which resembles the one used by the forward model jobs. The workflow jobs can be of two fundamentally different types:
**INTERNAL**
These jobs invoke a function in the address space of the ERT program itself. The functions are called with the main enkf_main instance as a self argument, and can in principle do anything that ERT can do itself. ERT functions which should be possible to invoke like this must be 'marked as exportable' in the ERT code, but that is a small job. The internal jobs have the following sections in their config file:
::
INTERNAL TRUE -- The job will call an internal function of the current running ERT instance.
FUNCTION enkf_main_plot_all -- Name of the ERT function we are calling; must be marked exportable.
MODULE /name/of/shared/library -- Very optional - to load an extra shared library.
**EXTERNAL**
These jobs invoke an external program/script to do the job, this is very similar to the jobs of the forward model. Context must be passed between the main ERT process and the script through the use of string substitution, in particular the 'magic' key <RUNPATH_FILE> has been introduced for this purpose.
::
INTERNAL FALSE -- This is the default - not necessary to include.
EXECUTABLE /path/to/a/program -- Path to a program/script which will be invoked by the job.
In addition to the INTERNAL, FUNCTION, MODULE and EXECUTABLE keys which are used to configure what the job should do there are some keys which can be used to configure the number of arguments and their type. These arguments apply to both internal and external jobs:
::
MIN_ARG 2 -- The job should have at least 2 arguments.
MAX_ARG 3 -- The job should have maximum 3 arguments.
ARG_TYPE 0 INT -- The first argument should be an integer
ARG_TYPE 1 FLOAT -- The second argument should be a float value
ARG_TYPE 2 STRING -- The third argument should be a string - the default.
The MIN_ARG,MAX_ARG and ARG_TYPE arguments are used to validate workflows.
**Example 1 : Plot variables**
::
-- FILE: PLOT --
INTERNAL TRUE
FUNCTION ert_tui_plot_JOB
MIN_ARG 1
This job will use the ERT internal function ert_tui_plot_JOB to plot an ensemble of an arbitrary ERT variable. The job needs at least one argument; there is no upper limit on the number of arguments.
**Example 2 : Run external script**
::
-- FILE: ECL_HIST --
EXECUTABLE Script/ecl_hist.py
MIN_ARG 3
This job will invoke the external script Script/ecl_host.py; the script should have at least three commandline arguments. The path to the script, Script/ecl_hist.py is interpreted relative to the location of the configuration file.
**Loading workflow jobs into ERT**
Before the jobs can be used in workflows they must be 'loaded' into ERT. This is done with two different ERT keywords:
::
LOAD_WORKFLOW_JOB jobConfigFile JobName
The LOAD_WORKFLOW_JOB keyword will load one workflow. The name of the job is optional, if not provided the job will get name from the configuration file. Alternatively you can use the command WORKFLOW_JOB_DIRECTORY which will load all the jobs in a directory. The command:
::
WORKFLOW_JOB_DIRECTORY /path/to/jobs
will load all the workflow jobs in the /path/to/jobs directory. Observe that all the files in the /path/to/jobs directory should be job configuration files. The jobs loaded in this way will all get the name of the file as the name of the job.
**Complete Workflows**
A workflow is a list of calls to jobs, with additional arguments. The job name should be the first element on each line. Based on the two jobs PLOT and ECL_HIST we can create a small workflow example:
::
PLOT WWCT:OP_1 WWCT:OP_3 PRESSURE:10,10,10
PLOT FGPT FOPT
ECL_HIST <RUNPATH_FILE> <QC_PATH>/<ERTCASE>/wwct_hist WWCT:OP_1 WWCT:OP_2
In this workflow we create plots of the nodes WWCT:OP_1;WWCT:OP_3,PRESSURE:10,10,10,FGPT and FOPT. The plot job we have created in this example is completely general, if we limited ourselves to ECLIPSE summary variables we could get wildcard support. Then we invoke the ECL_HIST example job to create a histogram. See below for documentation of <RUNPATH_FILE>,<QC_PATH> and <ERTCASE>.
**Loading workflows**
Workflows are loaded with the configuration option LOAD_WORKFLOW:
::
LOAD_WORKFLOW /path/to/workflow/WFLOW1
LOAD_WORKFLOW /path/to/workflow/workflow2 WFLOW2
The LOAD_WORKFLOW takes the path to a workflow file as the first argument. By default the workflow will be labeled with the filename internally in ERT, but optionally you can supply a second extra argument which will be used as name for the workflow. Alternatively you can load a workflow interactively.
**Running workflows**
Go to workflow menu and type run.
**Locating the realisations: <RUNPATH_FILE>**
Many of the external workflow jobs involve looping over all the realisations in a construction like this:
::
for each realisation:
// Do something for realisation
summarize()
When running an external job in a workflow there is no direct transfer of information between the main ERT process and the external script. We therefor must have a convention for transfering the information of which realisations we have simulated on, and where they are located in the filesystem. This is done through a file which looks like this:
::
0 /path/to/real0 CASE_0000
1 /path/to/real1 CASE_0001
...
9 /path/to/real9 CASE_0009
The name and location of this file is available as the magical string <RUNPATH_FILE> and that is typically used as the first argument to external workflow jobs which should iterate over all realisations. The realisations referred to in the <RUNPATH_FILE> are meant to be last simulations you have run; the file is updated every time you run simulations. This implies that it is (currently) not so convenient to alter which directories should be used when running a workflow.
.. _hook_workflow:
.. topic:: HOOK_WORKFLOW
@@ -1952,7 +1819,6 @@ run. Observe that the workflows being 'hooked in' with the
:code:`HOOK_WORKFLOW` must be loaded with the :code:`LOAD_WORKFLOW`
keyword.
NB: Currently the :code:`PRE_SIMULATION` workflow is never called.
Manipulating the Unix environment
---------------------------------

View File

@@ -0,0 +1,451 @@
.. _built_in_workflow_jobs:
Built in workflow jobs
======================
ERT comes with a list of default workflow jobs which invoke internal
ERT functionality. The internal workflows include:
Jobs related to case management
-------------------------------
**SELECT_CASE**
The job SELECT_CASE can be used to change the currently selected
case. The SELECT_CASE job should be used as:
::
SELECT_CASE newCase
if the case newCase does not exist it will be created.
**CREATE_CASE**
The job CREATE_CASE can be used to create a new case without selecting
it. The CREATE_CASE job should be used as:
::
CREATE_CASE newCase
**INIT_CASE_FROM_EXISTING**
The job INIT_CASE_FROM_EXISTING can be used to initialize a case from
an existing case. The argument to the workflow should be the name of
the workflow you are initializing from; so to initialize the current
case from the existing case "oldCase":
::
INIT_CASE_FROM_EXISTING oldCase
By default the job will initialize the 'current case', but optionally
you can give the name of a second case which should be initialized. In
this example we will initialize "newCase" from "oldCase":
::
INIT_CASE_FROM_EXISTING oldCase newCase
When giving the name of a second case as target for the initialization
job the 'current' case will not be affected.
Jobs related to export
----------------------
**EXPORT_FIELD**
The EXPORT_FIELD workflow job exports field data to roff or grdecl
format dependent on the extension of the export file argument.The job
takes the following arguments:
#. Field to be exported
#. Filename for export file, must contain %d
#. Report_step
#. Realization range
The filename must contain a %d. This will be replaced with the
realization number.
The realization range parameter is optional. Default is all
realizations.
Example use of this job in a workflow:
::
EXPORT_FIELD PERMZ path_to_export/filename%d.grdecl 0 0,2
**EXPORT_FIELD_RMS_ROFF**
The EXPORT_FIELD_RMS_ROFF workflow job exports field data to roff
format. The job takes the following arguments:
#. Field to be exported
#. Filename for export file, must contain %d
#. Report_step
#. Realization range
The filename must contain a %d. This will be replaced with the
realization number.
The realization range parameter is optional. Default is all realizations.
Example uses of this job in a workflow:
::
EXPORT_FIELD_RMS_ROFF PERMZ path_to_export/filename%d.roff 0
EXPORT_FIELD_RMS_ROFF PERMX path_to_export/filename%d 0 0-5
**EXPORT_FIELD_ECL_GRDECL**
The EXPORT_FIELD_ECL_GRDECL workflow job exports field data to grdecl
format. The job takes the following arguments:
#. Field to be exported
#. Filename for export file, must contain %d
#. Report_step
#. Realization range
The filename must contain a %d. This will be replaced with the realization number.
The realization range parameter is optional. Default is all realizations.
Example uses of this job in a workflow:
::
EXPORT_FIELD_ECL_GRDECL PERMZ path_to_export/filename%d.grdecl 0
EXPORT_FIELD_ECL_GRDECL PERMX path_to_export/filename%d 0 0-5
**EXPORT_RUNPATH**
The EXPORT_RUNPATH workflow job writes the runpath file RUNPATH_FILE
for the selected case.
The job can have no arguments, or one can set a range of realizations
and a range of iterations as arguments.
Example uses of this job in a workflow:
::
EXPORT_RUNPATH
With no arguments, entries for all realizations are written to the
runpath file. If the runpath supports iterations, entries for all
realizations in iter0 are written to the runpath file.
::
EXPORT_RUNPATH 0-5 | *
A range of realizations and a range of iterations can be given. "|" is
used as a delimiter to separate realizations and iterations. "*" can
be used to select all realizations or iterations. In the example
above, entries for realizations 0-5 for all iterations are written to
the runpath file.
Jobs related to analysis update
-------------------------------
**ANALYSIS_UPDATE**
This job will perform a update based on the current case, it is
assumed that you have already completed the necessary simulations. By
default the job will use all available data in the conditioning and
store the updated parameters as the new initial parameters of the
current case. However you can use optional argument to control which
case the parameters go to, at which report step they are stored and
also which report steps are considered when assembling the data. In
the simplest form the ANALYSIS_UPDATE job looks like this:
::
ANALYSIS_UPDATE
In this case the initial parameters in the current case will be
updated; using all available data in the conditioning process. In the
example below we redirect the updated parameters to the new case
NewCase:
::
ANALYSIS_UPDATE NewCase
Optionally we can decide to update the parameters at a later stage,
i.e. for instance at report step 100:
::
ANALYSIS_UPDATE * 100
The '*' above means that we should update parameters in the current
case. Finally we can limit the report steps used for data:
::
ANALYSIS_UPDATE NewCaseII 0 10,20,30,40,100,120-200
In the last example 10,20,30,40,100,120-200 mean the report steps we
are considering when updating. Observe that when we use the first
argument to specify a new case the will be created if it does not
exist, but not selected.
**ANALYSIS_ENKF_UPDATE**
The ANALYSIS_ENKF_UPDATE job will do an EnKF update at the current
report step. The job requires the report step as the first argument:
::
ANALYSIS_ENKF_UPDATE 10
by default the ENKF_UPDATE will use the observed data at the
updatestep, but you can configure it use the report steps you like for
data. In the example below the parameters at step 20 will be updated
based on the observations at report step 0,5,10,15,16,17,18,19,20:
::
ANALYSIS_ENKF_UPDATE 20 0,5,10,15-20
The ANALYSIS_ENKF_UPDATE job is a special case of the ANALYSIS_UPDATE
job, in principle the same can be achieved with the ENKF_UPDATE job.
Jobs related to running simulations - including updates
-------------------------------------------------------
**RUN_SMOOTHER**
The RUN_SMOOTHER job will run a simulation and perform an update. The
job has one required argument - the name of a case where the updated
parameters are stored. Optionally the job can take a second boolean
argument, if the second argument is set to true the job will rerun
based on the updated parameters.
Run a simulation and an update. Store the updated parameters in the
specified case. This case is created if it does not exist:
::
RUN_SMOOTHER new_case
Run a simulation and an update. Store the updated parameters in the
specified case, then run a simulation on this case:
::
RUN_SMOOTHER new_case true
**RUN_SMOOTHER_WITH_ITER**
This is exactly like the RUN_SMOOTHER job, but it has an additional
first argumeent iter which can be used to control the iter number in
the RUNPATH. When using the RUN_SMOOTHER job the iter number will be
defaultetd to zero, and one in the optional rerun.
**ENSEMBLE_RUN**
The ENSEMBLE_RUN job will run a simulation, no update. The job take as
optional arguments a range and/or list of which realizations to run.
::
ENSEMBLE_RUN
::
ENSEMBLE_RUN 1-5, 8
**LOAD_RESULTS**
The LOAD_RESULTS loads result from simulation(s). The job takes as
optional arguments a range and/or list of which realizations to load
results from. If no realizations are specified, results for all
realizations are loaded.
::
LOAD_RESULTS
::
LOAD_RESULTS 1-5, 8
In the case of multi iteration jobs, like e.g. the integrated smoother
update, the LOAD_RESULTS job will load the results from iter==0. To
control which iteration is loaded from you can use the
LOAD_RESULTS_ITER job.
**LOAD_RESULTS_ITER**
The LOAD_RESULTS_ITER job is similar to the LOAD_RESULTS job, but it
takes an additional first argument which is the iteration number to
load from. This should be used when manually loading results from a
multi iteration workflow:
::
LOAD_RESULTS_ITER
::
LOAD_RESULTS_ITER 3 1-3, 8-10
Will load the realisations 1,2,3 and 8,9,10 from the fourth iteration
(counting starts at zero).
**MDA_ES**
This workflow job (plugin) is used to run the *Multiple Data
Assimilation Ensemble Smoother* :code:`MDA ES`. Only two arguments
are required to start the MDA ES process; target case format and
iteration weights. The weights implicitly indicate the number of
iterations and the normalized global standard deviation scaling
applied to the update step.
::
MDA_ES target_case_%d observations/obs.txt
This command will use the weights specified in the obs.txt file. This
file should have a single floating point number per line.
Alternatively the weights can be given as arguments as shown here.
::
MDA_ES target_case_%d 8,4,2,1
This command will use the normalized version of the weights 8,4,2,1
and run for four iterations. The prior will be in *target_case_0* and
the results from the last iteration will be in *target_case_4*.
**Note: the weights must be listed with no spaces and separated with
commas.**
If this is run as a plugin from Ertshell or the GUI a convenient user
interface can be shown.
Jobs for ranking realizations
-----------------------------
**OBSERVATION_RANKING**
The OBSERVATION_RANKING job will rank realizations based on the delta
between observed and simulated values for selected variables and time
steps. The data for selected variables and time steps are summarized
for both observed and simulated values, and then the simulated versus
observed delta is used for ranking the realizations in increasing
order. The job takes a name for the ranking as the first parameter,
then the time steps, a "|" character and then variables to rank on. If
no time steps and/or no variables are given, all time steps and
variables are taken into account.
Rank the realizations on observation/simulation delta value for all
WOPR data for time steps 0-20:
::
OBSERVATION_RANKING Ranking1 0-20 | WOPR:*
Rank the simulations on observation/simulation delta value for all
WOPR and WWCT data for time steps 1 and 10-50
::
OBSERVATION_RANKING Ranking2 1, 10-50 | WOPR:* WWCT:*
Rank the realizations on observation/simulation delta value for
WOPR:OP-1 data for all time steps
::
OBSERVATION_RANKING Ranking3 | WOPR:OP-1
**DATA_RANKING**
The DATA_RANKING job will rank realizations in increasing or
decreasing order on selected data value for a selected time step. The
job takes as parameters the name of the ranking, the data key to rank
on, increasing order and selected time steps. If no time step is
given, the default is the last timestep.
Rank the realizations on PORO:1,2,3 on time step 0 in decreasing order
::
DATA_RANKING Dataranking1 PORO:1,2,3 false 0
**EXPORT_RANKING**
The EXPORT_RANKING job exports ranking results to file. The job takes
two parameters; the name of the ranking to export and the file to
export to.
::
EXPORT_RANKING Dataranking1 /tmp/dataranking1.txt
**INIT_MISFIT_TABLE**
Calculating the misfit for all observations and all timesteps can
potentially be a bit timeconsuming, the results are therefor cached
internally. If you need to force the recalculation of this cache you
can use the INIT_MISFIT_TABLE job to initialize the misfit table that
is used in observation ranking.
::
INIT_MISFIT_TABLE
**STD_SCALE_CORRELATED_OBS**
The workflow job :code:`STD_SCALE_CORRELATED_OBS` is used to scale the
observation standard deviation in an attempt to reduce the effect of
correlations in the observed data. The job expects the observation
keys you want to consider as arguments:
::
STD_SCALE_CORRELATED_OBS WWCT:OP_1 WWCT:OP_2
In this example the observation uncertainty corresponding to
:code:`WWCT:OP_1` and :code:`WWCT:OP_2` will be scaled. Observe that
the :code:`STD_SCALE_CORRELATED_OBS` keyword will "flatten" in both
time and spatial direction. Wildcards are allow, i.e.
::
STD_SCALE_CORRELATED_OBS W*:OP_1
Will scale based on all the observations of well 'OP_1'. For more
advanced selections of observations, where you only want to scale
based on parts of the observation - spatially or temporaly you must
write your own plugin.

View File

@@ -1,373 +1,22 @@
----------------------
Built in workflow jobs
----------------------
.. _built_in_workflow_jobs:
Workflows and plugins
=====================
ERT comes with a list of default workflow jobs which invoke internal ERT functionality. The internal workflows include:
Contents
Jobs related to case management
-------------------------------
.. toctree::
:maxdepth: 1
**SELECT_CASE**
workflows
built_in
The job SELECT_CASE can be used to change the currently selected case. The SELECT_CASE job should be used as:
The Forward Model in ERT runs in the context of a single realization,
i.e. there is no communication between the different processes, and
the jobs are run outside of the main ERT process.
::
SELECT_CASE newCase
if the case newCase does not exist it will be created.
**CREATE_CASE**
The job CREATE_CASE can be used to create a new case without selecting it. The CREATE_CASE job should be used as:
::
CREATE_CASE newCase
**INIT_CASE_FROM_EXISTING**
The job INIT_CASE_FROM_EXISTING can be used to initialize a case from an existing case. The argument to the workflow should be the name of the workflow you are initializing from; so to initialize the current case from the existing case "oldCase":
::
INIT_CASE_FROM_EXISTING oldCase
By default the job will initialize the 'current case', but optionally you can give the name of a second case which should be initialized. In this example we will initialize "newCase" from "oldCase":
::
INIT_CASE_FROM_EXISTING oldCase newCase
When giving the name of a second case as target for the initialization job the 'current' case will not be affected.
Jobs related to export
----------------------
**EXPORT_FIELD**
The EXPORT_FIELD workflow job exports field data to roff or grdecl format dependent on the extension of the export file argument.The job takes the following arguments:
#. Field to be exported
#. Filename for export file, must contain %d
#. Report_step
#. State
#. Realization range
The filename must contain a %d. This will be replaced with the realization number.
The state parameter is either FORECAST or ANALYZED, BOTH is not supported.
The realization range parameter is optional. Default is all realizations.
Example use of this job in a workflow:
::
EXPORT_FIELD PERMZ path_to_export/filename%d.grdecl 0 FORECAST 0,2
**EXPORT_FIELD_RMS_ROFF**
The EXPORT_FIELD_RMS_ROFF workflow job exports field data to roff format. The job takes the following arguments:
#. Field to be exported
#. Filename for export file, must contain %d
#. Report_step
#. State
#. Realization range
The filename must contain a %d. This will be replaced with the realization number.
The state parameter is either FORECAST or ANALYZED, BOTH is not supported.
The realization range parameter is optional. Default is all realizations.
Example uses of this job in a workflow:
::
EXPORT_FIELD_RMS_ROFF PERMZ path_to_export/filename%d.roff 0 FORECAST
EXPORT_FIELD_RMS_ROFF PERMX path_to_export/filename%d 0 FORECAST 0-5
**EXPORT_FIELD_ECL_GRDECL**
The EXPORT_FIELD_ECL_GRDECL workflow job exports field data to grdecl format. The job takes the following arguments:
#. Field to be exported
#. Filename for export file, must contain %d
#. Report_step
#. State
#. Realization range
The filename must contain a %d. This will be replaced with the realization number.
The state parameter is either FORECAST or ANALYZED, BOTH is not supported.
The realization range parameter is optional. Default is all realizations.
Example uses of this job in a workflow:
::
EXPORT_FIELD_ECL_GRDECL PERMZ path_to_export/filename%d.grdecl 0 ANALYZED
EXPORT_FIELD_ECL_GRDECL PERMX path_to_export/filename%d 0 ANALYZED 0-5
**EXPORT_RUNPATH**
The EXPORT_RUNPATH workflow job writes the runpath file RUNPATH_FILE for the selected case.
The job can have no arguments, or one can set a range of realizations and a range of iterations as arguments.
Example uses of this job in a workflow:
::
EXPORT_RUNPATH
With no arguments, entries for all realizations are written to the runpath file. If the runpath supports iterations, entries for all realizations in iter0 are written to the runpath file.
::
EXPORT_RUNPATH 0-5 | *
A range of realizations and a range of iterations can be given. "|" is used as a delimiter to separate realizations and iterations. "*" can be used to select all realizations or iterations. In the example above, entries for realizations 0-5 for all iterations are written to the runpath file.
Jobs related to analysis update
-------------------------------
**ANALYSIS_UPDATE**
This job will perform a update based on the current case, it is assumed that you have already completed the necessary simulations. By default the job will use all available data in the conditioning and store the updated parameters as the new initial parameters of the current case. However you can use optional argument to control which case the parameters go to, at which report step they are stored and also which report steps are considered when assembling the data. In the simplest form the ANALYSIS_UPDATE job looks like this:
::
ANALYSIS_UPDATE
In this case the initial parameters in the current case will be updated; using all available data in the conditioning process. In the example below we redirect the updated parameters to the new case NewCase:
::
ANALYSIS_UPDATE NewCase
Optionally we can decide to update the parameters at a later stage, i.e. for instance at report step 100:
::
ANALYSIS_UPDATE * 100
The '*' above means that we should update parameters in the current case. Finally we can limit the report steps used for data:
::
ANALYSIS_UPDATE NewCaseII 0 10,20,30,40,100,120-200
In the last example 10,20,30,40,100,120-200 mean the report steps we are considering when updating. Observe that when we use the first argument to specify a new case the will be created if it does not exist, but not selected.
**ANALYSIS_ENKF_UPDATE**
The ANALYSIS_ENKF_UPDATE job will do an EnKF update at the current report step. The job requires the report step as the first argument:
::
ANALYSIS_ENKF_UPDATE 10
by default the ENKF_UPDATE will use the observed data at the updatestep, but you can configure it use the report steps you like for data. In the example below the parameters at step 20 will be updated based on the observations at report step 0,5,10,15,16,17,18,19,20:
::
ANALYSIS_ENKF_UPDATE 20 0,5,10,15-20
The ANALYSIS_ENKF_UPDATE job is a special case of the ANALYSIS_UPDATE job, in principle the same can be achieved with the ENKF_UPDATE job.
Jobs related to running simulations - including updates
-------------------------------------------------------
**RUN_SMOOTHER**
The RUN_SMOOTHER job will run a simulation and perform an update. The
job has one required argument - the name of a case where the updated
parameters are stored. Optionally the job can take a second boolean
argument, if the second argument is set to true the job will rerun
based on the updated parameters.
Run a simulation and an update. Store the updated parameters in the
specified case. This case is created if it does not exist:
::
RUN_SMOOTHER new_case
Run a simulation and an update. Store the updated parameters in the
specified case, then run a simulation on this case:
::
RUN_SMOOTHER new_case true
**RUN_SMOOTHER_WITH_ITER**
This is exactly like the RUN_SMOOTHER job, but it has an additional first argumeent iter which can be used to control the iter number in the RUNPATH. When using the RUN_SMOOTHER job the iter number will be defaultetd to zero, and one in the optional rerun.
**ENSEMBLE_RUN**
The ENSEMBLE_RUN job will run a simulation, no update. The job take as optional arguments a range and/or list of which realizations to run.
::
ENSEMBLE_RUN
::
ENSEMBLE_RUN 1-5, 8
**LOAD_RESULTS**
The LOAD_RESULTS loads result from simulation(s). The job takes as optional arguments a range and/or list of which realizations to load results from. If no realizations are specified, results for all realizations are loaded.
::
LOAD_RESULTS
::
LOAD_RESULTS 1-5, 8
In the case of multi iteration jobs, like e.g. the integrated smoother update, the LOAD_RESULTS job will load the results from iter==0. To control which iteration is loaded from you can use the LOAD_RESULTS_ITER job.
**LOAD_RESULTS_ITER**
The LOAD_RESULTS_ITER job is similar to the LOAD_RESULTS job, but it takes an additional first argument which is the iteration number to load from. This should be used when manually loading results from a multi iteration workflow:
::
LOAD_RESULTS_ITER
::
LOAD_RESULTS_ITER 3 1-3, 8-10
Will load the realisations 1,2,3 and 8,9,10 from the fourth iteration (counting starts at zero).
**MDA_ES**
This workflow job (plugin) is used to run the *Multiple Data Assimilation Ensemble Smoother* :code:`MDA ES`.
Only two arguments are required to start the MDA ES process; target case format and iteration weights.
The weights implicitly indicate the number of iterations and the normalized global standard deviation scaling applied to the update step.
::
MDA_ES target_case_%d observations/obs.txt
This command will use the weights specified in the obs.txt file. This file should have a single floating point number per line.
Alternatively the weights can be given as arguments as shown here.
::
MDA_ES target_case_%d 8,4,2,1
This command will use the normalized version of the weights 8,4,2,1 and run for four iterations.
The prior will be in *target_case_0* and the results from the last iteration will be in *target_case_4*.
**Note: the weights must be listed with no spaces and separated with commas.**
If this is run as a plugin from Ertshell or the GUI a convenient user interface can be shown.
Jobs for ranking realizations
-----------------------------
**OBSERVATION_RANKING**
The OBSERVATION_RANKING job will rank realizations based on the delta between observed and simulated values for selected variables and time steps. The data for selected variables and time steps are summarized for both observed and simulated values, and then the simulated versus observed delta is used for ranking the realizations in increasing order. The job takes a name for the ranking as the first parameter, then the time steps, a "|" character and then variables to rank on. If no time steps and/or no variables are given, all time steps and variables are taken into account.
Rank the realizations on observation/simulation delta value for all WOPR data for time steps 0-20:
::
OBSERVATION_RANKING Ranking1 0-20 | WOPR:*
Rank the simulations on observation/simulation delta value for all WOPR and WWCT data for time steps 1 and 10-50
::
OBSERVATION_RANKING Ranking2 1, 10-50 | WOPR:* WWCT:*
Rank the realizations on observation/simulation delta value for WOPR:OP-1 data for all time steps
::
OBSERVATION_RANKING Ranking3 | WOPR:OP-1
**DATA_RANKING**
The DATA_RANKING job will rank realizations in increasing or decreasing order on selected data value for a selected time step. The job takes as parameters the name of the ranking, the data key to rank on, increasing order and selected time steps. If no time step is given, the default is the last timestep.
Rank the realizations on PORO:1,2,3 on time step 0 in decreasing order
::
DATA_RANKING Dataranking1 PORO:1,2,3 false 0
**EXPORT_RANKING**
The EXPORT_RANKING job exports ranking results to file. The job takes two parameters; the name of the ranking to export and the file to export to.
::
EXPORT_RANKING Dataranking1 /tmp/dataranking1.txt
**INIT_MISFIT_TABLE**
Calculating the misfit for all observations and all timesteps can potentially be a bit timeconsuming, the results are therefor cached internally. If you need to force the recalculation of this cache you can use the INIT_MISFIT_TABLE job to initialize the misfit table that is used in observation ranking.
::
INIT_MISFIT_TABLE
**STD_SCALE_CORRELATED_OBS**
The workflow job :code:`STD_SCALE_CORRELATED_OBS` is used to scale the
observation standard deviation in an attempt to reduce the effect of
correlations in the observed data. The job expects the observation
keys you want to consider as arguments:
::
STD_SCALE_CORRELATED_OBS WWCT:OP_1 WWCT:OP_2
In this example the observation uncertainty corresponding to
:code:`WWCT:OP_1` and :code:`WWCT:OP_2` will be scaled. Observe that
the :code:`STD_SCALE_CORRELATED_OBS` keyword will "flatten" in both
time and spatial direction. Wildcards are allow, i.e.
::
STD_SCALE_CORRELATED_OBS W*:OP_1
Will scale based on all the observations of well 'OP_1'. For more
advanced selections of observations, where you only want to scale
based on parts of the observation - spatially or temporaly you must
write your own plugin.
As an alternative to the forward model ERT has a system with
*workflows*. Using workflows you can automate cumbersome normal ERT
processes, and also invoke external programs. The workflows are run
serially on the workstation actually running ERT, and should not be
used for computationally heavy tasks.

View File

@@ -0,0 +1,355 @@
.. _workflows:
Configuring workflows in ERT consists of two steps: *installing the
jobs* which should be available for ERT to use in workflows, and then
subsequently assemble one or more jobs, with arguments, in a
workflow. You can use predefined workflow jobs, or create your
own. There are no predefined complete workflows.
Workflow jobs
=============
The workflow jobs are quite similar to the jobs in the forward model,
in particular the jobs are described by a configuration file which
resembles the one used by the forward model jobs. The workflow jobs
can be of two fundamentally different types - *external* and *internal*.
External workflow jobs
----------------------
These jobs invoke an external program/script to do the job, this is
very similar to the jobs of the forward model, but instead of running
as separate jobs on the cluster - one for each realization, the
workflow jobs will be invoked on the workstation running ert, and
typically go through all the realizations in one loop.
Internal workflow jobs
----------------------
These jobs invoke a function in the address space of the ERT program
itself; i.e. they are run as part of the running ERT process - and can
in principle do anything that ERT can do itself. There are two two
varieties of the internal workflow jobs:
Invoke a pre exported function
..............................
This is the simplest, where you can invoke a a predefined ERT
function. The function must already have been marked as *exported* in
the ert code base. The list of predefined workflow jobs based on this
method can be found here: :ref:`built_in_workflow_jobs`.
Run a Python Script
...................
If you are using one of the Python based frontends, *gert* or
*erthsell*, you can write your own Python script which is run as part
of the existing process. By using the full ert Python api you get
access to powerful customization/automization features. Below is an
example of :code:`ErtScript` which calculates the misfit for all
observations and prints the result to a text file. All Python scripts
of this kind must:
1. Be implemented as a class which iherits from :code:`ErtScript`
2. Have a method :code:`run(self)` which does the actual job
.. code:: python
from ert.util import DoubleVector
from ert.enkf import ErtScript
class ExportMisfit(ErtScript):
def run(self):
# Get a handle to running ert instance
ert = self.ert()
# Get a handle to the case / filesystem we are interested in;
# this should ideally come as an argument - not just use current.
fs = ert.getEnkfFsManager().getCurrentFileSystem()
# How many realisations:
ens_size = ert.getEnsembleSize( )
# Get a handle to all the observations
observations = ert.getObservations()
# Iterate through all the observations; each element in this
# iteration corresponds to one key in the observations file.
for obs in observations:
misfit = DoubleVector()
for iens in range(ens_size):
chi2 = obs.getTotalChi2( fs , iens )
misfit[iens] = chi2
permutation = misfit.permutationSort( )
print " # Realisation Misfit:%s" % obs.getObservationKey()
print "-----------------------------------"
for index in range(len(misfit)):
iens = permutation[index]
print "%2d %2d %10.5f" % (index , iens , misfit[iens])
print "-----------------------------------\n"
Configuring workflow jobs
-------------------------
Workflow jobs are configured with a small configuration file much like
the configuration file used to install forward model jobs. The
keywords used in the configuration file are in two *clases* - those
related to how the job should located/run and the arguments which
should passed from the workflow to the job.
Configure an internal job
.........................
When configuring an internal workflow job the keyword :code:`INTERNAL`
is given the value :code:`TRUE` to indicate that this is an internal
job. In addition you give the name of the C function you wish to
invoke. By default the workflow job will search for the function
symbol in the current process space, but by passing the :code:`MODULE`
keyword you can request the loading of an external shared library:
::
INTERNAL TRUE -- The job will call an internal function of the current running ERT instance.
FUNCTION enkf_main_plot_all -- Name of the ERT function we are calling; must be marked exportable.
MODULE /name/of/shared/library -- Very optional - to load an extra shared library.
Configure a an internal job: Python
...................................
If you wish to implement your job as a Python class, derived from
:code:`ErtScript` you should use the :code:`SCRIPT` keyword instead of
:code:`FUNCTION`, to point to an existing Python script:
::
INTERNAL TRUE -- The job will call an internal function of the current running ERT instance.
SCRIPT sripts/my_script.py -- An existing Python script
Observe that the important thing here is the fact that we are writing
an *internal* Python script; if you are writing an external script to
loop through all your realization folders that will typically be an
*external* script, and in that case the implementation language -
i.e. Python, Perl, C++, F77 ... has no relevance.
Configure an external job
.........................
An *external* job is a workflow job which is implemented in an
external executable, i.e. typically a script written in for instance
Python. When configuring an external job the most important keyword is
:code:`EXECUTABLE` which is used to give the path to the external
executable:
::
INTERNAL FALSE -- This is the default - not necessary to include.
EXECUTABLE path/to/program -- Path to a program/script which will be invoked by the job.
Configuring the arguments
.........................
In addition to the INTERNAL, FUNCTION, MODULE and EXECUTABLE keys
which are used to configure what the job should do there are some keys
which can be used to configure the number of arguments and their
type. These arguments apply to both internal and external jobs:
::
MIN_ARG 2 -- The job should have at least 2 arguments.
MAX_ARG 3 -- The job should have maximum 3 arguments.
ARG_TYPE 0 INT -- The first argument should be an integer
ARG_TYPE 1 FLOAT -- The second argument should be a float value
ARG_TYPE 2 STRING -- The third argument should be a string - the default.
The MIN_ARG,MAX_ARG and ARG_TYPE arguments are used to validate workflows.
**Example 1 : Plot variables**
::
-- FILE: PLOT --
INTERNAL TRUE
FUNCTION ert_tui_plot_JOB
MIN_ARG 1
This job will use the ERT internal function ert_tui_plot_JOB to plot
an ensemble of an arbitrary ERT variable. The job needs at least one
argument; there is no upper limit on the number of arguments.
**Example 2 : Run external script**
::
-- FILE: ECL_HIST --
EXECUTABLE Script/ecl_hist.py
MIN_ARG 3
This job will invoke the external script Script/ecl_host.py; the
script should have at least three commandline arguments. The path to
the script, Script/ecl_hist.py is interpreted relative to the location
of the configuration file.
Loading workflow jobs into ERT
------------------------------
Before the jobs can be used in workflows they must be 'loaded' into
ERT. This is done with two different ERT keywords:
::
LOAD_WORKFLOW_JOB jobConfigFile JobName
The LOAD_WORKFLOW_JOB keyword will load one workflow job. The name of
the job is optional, if not provided the job will get name from the
configuration file. Alternatively you can use the command
WORKFLOW_JOB_DIRECTORY which will load all the jobs in a
directory. The command:
::
WORKFLOW_JOB_DIRECTORY /path/to/jobs
will load all the workflow jobs in the /path/to/jobs
directory. Observe that all the files in the /path/to/jobs directory
should be job configuration files. The jobs loaded in this way will
all get the name of the file as the name of the job. The
:code:`WORKFLOW_OB_DIRECTORY` keyword will *not* load configuration
files recursively.
Complete Workflows
==================
A workflow is a list of calls to jobs, with additional arguments. The
job name should be the first element on each line. Based on the two
jobs PLOT and ECL_HIST we can create a small workflow example:
::
PLOT WWCT:OP_1 WWCT:OP_3 PRESSURE:10,10,10
PLOT FGPT FOPT
ECL_HIST <RUNPATH_FILE> <QC_PATH>/<ERTCASE>/wwct_hist WWCT:OP_1 WWCT:OP_2
In this workflow we create plots of the nodes
WWCT:OP_1;WWCT:OP_3,PRESSURE:10,10,10,FGPT and FOPT. The plot job we
have created in this example is completely general, if we limited
ourselves to ECLIPSE summary variables we could get wildcard
support. Then we invoke the ECL_HIST example job to create a
histogram. See below for documentation of <RUNPATH_FILE>,<QC_PATH> and
<ERTCASE>.
Loading workflows
-----------------
Workflows are loaded with the configuration option LOAD_WORKFLOW:
::
LOAD_WORKFLOW /path/to/workflow/WFLOW1
LOAD_WORKFLOW /path/to/workflow/workflow2 WFLOW2
The LOAD_WORKFLOW takes the path to a workflow file as the first
argument. By default the workflow will be labeled with the filename
internally in ERT, but optionally you can supply a second extra
argument which will be used as name for the workflow. Alternatively
you can load a workflow interactively.
Automatically run workflows : HOOK_WORKFLOW
-------------------------------------------
.. _hook_workflow:
.. topic:: HOOK_WORKFLOW
With the keyword :code:`HOOK_WORKFLOW` you can configure workflow
'hooks'; meaning workflows which will be run automatically at certain
points during ERTs execution. Currently there are two points in ERTs
flow of execution where you can hook in a workflow, either just before
the simulations start, :code:`PRE_SIMULATION` - or after all the
simulations have completed :code:`POST_SIMULATION`. The
:code:`POST_SIMULATION` hook is typically used to trigger QC
workflows:
::
HOOK_WORKFLOW initWFLOW PRE_SIMULATION
HOOK_WORKFLOW QC_WFLOW1 POST_SIMULATION
HOOK_WORKFLOW QC_WFLOW2 POST_SIMULATION
In this example the the workflow :code:`initWFLOW` will run after all
the simulation directiories have been created, just before the forward
model is submitted to the queue. When all the simulations are complete
the two workflows :code:`QC_WFLOW1` and :code:`QC_WFLOW2` will be
run. Observe that the workflows being 'hooked in' with the
:code:`HOOK_WORKFLOW` must be loaded with the :code:`LOAD_WORKFLOW`
keyword.
Locating the realisations: <RUNPATH_FILE>
-----------------------------------------
Context must be passed between the main ERT process and the script
through the use of string substitution, in particular the 'magic' key
<RUNPATH_FILE> has been introduced for this purpose.
Many of the external workflow jobs involve looping over all the
realisations in a construction like this:
::
for each realisation:
// Do something for realisation
summarize()
When running an external job in a workflow there is no direct transfer
of information between the main ERT process and the external
script. We therefor must have a convention for transfering the
information of which realisations we have simulated on, and where they
are located in the filesystem. This is done through a file which looks
like this:
::
0 /path/to/real0 CASE_0000
1 /path/to/real1 CASE_0001
...
9 /path/to/real9 CASE_0009
The name and location of this file is available as the magical string
<RUNPATH_FILE> and that is typically used as the first argument to
external workflow jobs which should iterate over all realisations. The
realisations referred to in the <RUNPATH_FILE> are meant to be last
simulations you have run; the file is updated every time you run
simulations. This implies that it is (currently) not so convenient to
alter which directories should be used when running a workflow.

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ANALYSIS_MODULE_H__
#define __ANALYSIS_MODULE_H__
#ifndef ERT_ANALYSIS_MODULE_H
#define ERT_ANALYSIS_MODULE_H
#ifdef __cplusplus
extern "C" {
#endif

View File

@@ -1,5 +1,5 @@
#ifndef __ANALYSIS_TABLE_H__
#define __ANALYSIS_TABLE_H__
#ifndef ERT_ANALYSIS_TABLE_H
#define ERT_ANALYSIS_TABLE_H
#ifdef __cplusplus
extern "C" {

View File

@@ -1,5 +1,5 @@
#ifndef __ENKF_LINALG_H__
#define __ENKF_LINALG_H__
#ifndef ERT_ENKF_LINALG_H
#define ERT_ENKF_LINALG_H
#include <ert/util/matrix_lapack.h>
#include <ert/util/matrix.h>

View File

@@ -1,5 +1,5 @@
#ifndef __STD_ENKF_H__
#define __STD_ENKF_H__
#ifndef ERT_STD_ENKF_H
#define ERT_STD_ENKF_H
#ifdef __cplusplus
extern "C" {

View File

@@ -1,5 +1,5 @@
#ifndef __RML_ENKF_COMMON_H__
#define __RML_ENKF_COMMON_H__
#ifndef ERT_RML_ENKF_COMMON_H
#define ERT_RML_ENKF_COMMON_H
#include <stdbool.h>

View File

@@ -1,26 +1,26 @@
/*
copyright (C) 2011 Statoil ASA, Norway.
The file 'fwd_step_enkf.c' is part of ERT - Ensemble based Reservoir Tool.
ERT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ERT is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
for more details.
copyright (C) 2011 Statoil ASA, Norway.
The file 'fwd_step_enkf.c' is part of ERT - Ensemble based Reservoir Tool.
ERT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ERT is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
for more details.
*/
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <stdio.h>
#include <ert/util/type_macros.h>
#include <ert/util/util.h>
#include <ert/util/rng.h>
@@ -39,15 +39,17 @@
#define DEFAULT_R2_LIMIT 0.99
#define NFOLDS_KEY "CV_NFOLDS"
#define R2_LIMIT_KEY "FWD_STEP_R2_LIMIT"
#define DEFAULT_VERBOSE false
#define VERBOSE_KEY "VERBOSE"
struct fwd_step_enkf_data_struct {
UTIL_TYPE_ID_DECLARATION;
stepwise_type * stepwise_data;
stepwise_type * stepwise_data;
rng_type * rng;
int nfolds;
long option_flags;
double r2_limit;
bool verbose;
};
@@ -63,106 +65,123 @@ void fwd_step_enkf_set_r2_limit( fwd_step_enkf_data_type * data , double limit )
data->r2_limit = limit;
}
void fwd_step_enkf_set_verbose( fwd_step_enkf_data_type * data , bool verbose ) {
data->verbose = verbose;
}
void * fwd_step_enkf_data_alloc( rng_type * rng ) {
fwd_step_enkf_data_type * data = util_malloc( sizeof * data );
UTIL_TYPE_ID_INIT( data , FWD_STEP_ENKF_TYPE_ID );
data->stepwise_data = NULL;
data->rng = rng;
data->nfolds = DEFAULT_NFOLDS;
data->r2_limit = DEFAULT_R2_LIMIT;
data->option_flags = ANALYSIS_NEED_ED + ANALYSIS_UPDATE_A + ANALYSIS_SCALE_DATA;
data->verbose = DEFAULT_VERBOSE;
return data;
}
/*Main function: */
void fwd_step_enkf_updateA(void * module_data ,
matrix_type * A ,
matrix_type * S ,
matrix_type * R ,
matrix_type * dObs ,
void fwd_step_enkf_updateA(void * module_data ,
matrix_type * A ,
matrix_type * S ,
matrix_type * R ,
matrix_type * dObs ,
matrix_type * E ,
matrix_type * D ) {
fwd_step_enkf_data_type * fwd_step_data = fwd_step_enkf_data_safe_cast( module_data );
printf("Running Forward Stepwise regression:\n");
{
int ens_size = matrix_get_columns( S );
int nx = matrix_get_rows( A );
int nd = matrix_get_rows( S );
int ens_size = matrix_get_columns( S );
int nx = matrix_get_rows( A );
int nd = matrix_get_rows( S );
int nfolds = fwd_step_data->nfolds;
double r2_limit = fwd_step_data->r2_limit;
bool verbose = fwd_step_data->verbose;
if ( ens_size <= nfolds)
util_abort("%s: The number of ensembles must be larger than the CV fold - aborting\n", __func__);
{
stepwise_type * stepwise_data = stepwise_alloc1(ens_size, nd , fwd_step_data->rng);
matrix_type * workS = matrix_alloc( ens_size , nd );
matrix_type * workE = matrix_alloc( ens_size , nd );
/*workS = S' */
for (int i = 0; i < nd; i++) {
for (int j = 0; j < ens_size; j++) {
matrix_iset( workS , j , i , matrix_iget( S , i , j ) );
}
}
/*This might be illigal???? */
matrix_subtract_row_mean( S ); /* Shift away the mean */
workS = matrix_alloc_transpose( S );
workE = matrix_alloc_transpose( E );
stepwise_set_X0( stepwise_data , workS );
double xHat;
stepwise_set_E0( stepwise_data , workE );
matrix_type * di = matrix_alloc( 1 , nd );
printf("nx = %d\n",nx);
if (verbose){
printf("===============================================================================================================================\n");
printf("Total number of parameters : %d\n",nx);
printf("Total number of observations: %d\n",nd);
printf("Number of ensembles : %d\n",ens_size);
printf("CV folds : %d\n",nfolds);
printf("Relative R2 tolerance : %f\n",r2_limit);
printf("===============================================================================================================================\n");
printf("%-15s%-15s%-15s%-15s\n", "Parameter", "NumAttached", "FinalR2", "ActiveIndices");
}
for (int i = 0; i < nx; i++) {
/*Update values of y */
/*Start of the actual update */
matrix_type * y = matrix_alloc( ens_size , 1 );
for (int j = 0; j < ens_size; j++) {
matrix_iset(y , j , 0 , matrix_iget( A, i , j ) );
}
/*This might be illigal???? */
stepwise_set_Y0( stepwise_data , y );
stepwise_estimate(stepwise_data , fwd_step_data->r2_limit , fwd_step_data->nfolds );
stepwise_estimate(stepwise_data , r2_limit , nfolds );
/*manipulate A directly*/
for (int j = 0; j < ens_size; j++) {
for (int k = 0; k < nd; k++) {
matrix_iset(di , 0 , k , matrix_iget( D , k , j ) );
}
xHat = stepwise_eval(stepwise_data , di );
matrix_iset(A , i , j , xHat);
double aij = matrix_iget( A , i , j );
double xHat = stepwise_eval(stepwise_data , di );
matrix_iset(A , i , j , aij + xHat);
}
if (verbose)
stepwise_printf(stepwise_data, i);
}
if (verbose)
printf("===============================================================================================================================\n");
printf("Done with stepwise regression enkf\n");
stepwise_free( stepwise_data );
matrix_free( di );
/*workS is freed in stepwise_free() */
/*matrix_free( workS ); */
/*matrix_free( y );*/
}
}
}
@@ -201,13 +220,28 @@ bool fwd_step_enkf_set_int( void * arg , const char * var_name , int value) {
fwd_step_enkf_data_type * module_data = fwd_step_enkf_data_safe_cast( arg );
{
bool name_recognized = true;
/*Set number of CV folds */
if (strcmp( var_name , NFOLDS_KEY) == 0)
fwd_step_enkf_set_nfolds( module_data , value);
else
name_recognized = false;
return name_recognized;
}
}
bool fwd_step_enkf_set_bool( void * arg , const char * var_name , bool value) {
fwd_step_enkf_data_type * module_data = fwd_step_enkf_data_safe_cast( arg );
{
bool name_recognized = true;
/*Set verbose */
if (strcmp( var_name , VERBOSE_KEY) == 0)
fwd_step_enkf_set_verbose( module_data , value);
else
name_recognized = false;
return name_recognized;
}
}
@@ -225,6 +259,8 @@ bool fwd_step_enkf_has_var( const void * arg, const char * var_name) {
return true;
else if (strcmp(var_name , R2_LIMIT_KEY ) == 0)
return true;
else if (strcmp(var_name , VERBOSE_KEY ) == 0)
return true;
else
return false;
}
@@ -250,6 +286,15 @@ int fwd_step_enkf_get_int( const void * arg, const char * var_name) {
}
}
bool fwd_step_enkf_get_bool( const void * arg, const char * var_name) {
const fwd_step_enkf_data_type * module_data = fwd_step_enkf_data_safe_cast_const( arg );
{
if (strcmp(var_name , VERBOSE_KEY) == 0)
return module_data->verbose;
else
return false;
}
}
@@ -263,19 +308,19 @@ int fwd_step_enkf_get_int( const void * arg, const char * var_name) {
analysis_table_type SYMBOL_TABLE = {
.alloc = fwd_step_enkf_data_alloc,
.freef = fwd_step_enkf_data_free,
.set_int = fwd_step_enkf_set_int ,
.set_double = fwd_step_enkf_set_double ,
.set_bool = NULL ,
.set_string = NULL ,
.get_options = fwd_step_enkf_get_options ,
.initX = NULL ,
.set_int = fwd_step_enkf_set_int ,
.set_double = fwd_step_enkf_set_double ,
.set_bool = fwd_step_enkf_set_bool ,
.set_string = NULL ,
.get_options = fwd_step_enkf_get_options ,
.initX = NULL ,
.updateA = fwd_step_enkf_updateA,
.init_update = NULL ,
.complete_update = NULL ,
.has_var = fwd_step_enkf_has_var,
.get_int = fwd_step_enkf_get_int ,
.get_double = fwd_step_enkf_get_double ,
.get_bool = NULL ,
.get_bool = fwd_step_enkf_get_bool ,
.get_ptr = NULL
};

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __CONF_H__
#define __CONF_H__
#ifndef ERT_CONF_H
#define ERT_CONF_H
/* libconfig: lightweight configuration parser
*

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __CONF_DATA_H__
#define __CONF_DATA_H__
#ifndef ERT_CONF_DATA_H
#define ERT_CONF_DATA_H
#include <stdbool.h>
#include <time.h>

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __CONF_UTIL_H__
#define __CONF_UTIL_H__
#ifndef ERT_CONF_UTIL_H
#define ERT_CONF_UTIL_H
char * conf_util_fscanf_alloc_token_buffer( const char * file_name );

View File

@@ -17,8 +17,8 @@
*/
#ifndef __CONFIG_CONTENT_H__
#define __CONFIG_CONTENT_H__
#ifndef ERT_CONFIG_CONTENT_H
#define ERT_CONFIG_CONTENT_H
#ifdef __cplusplus
extern "C" {

View File

@@ -17,8 +17,8 @@
*/
#ifndef __CONFIG_CONTENT_ITEM_H__
#define __CONFIG_CONTENT_ITEM_H__
#ifndef ERT_CONFIG_CONTENT_ITEM_H
#define ERT_CONFIG_CONTENT_ITEM_H
#ifdef __cplusplus
extern "C" {

View File

@@ -17,8 +17,8 @@
*/
#ifndef __CONFIG_CONTENT_NODE_H__
#define __CONFIG_CONTENT_NODE_H__
#ifndef ERT_CONFIG_CONTENT_NODE_H
#define ERT_CONFIG_CONTENT_NODE_H
#ifdef __cplusplus
define extern "C" {

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __CONFIG_ERROR_H__
#define __CONFIG_ERROR_H__
#ifndef ERT_CONFIG_ERROR_H
#define ERT_CONFIG_ERROR_H
#ifdef __cplusplus
extern "C" {

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __CONFIG_H__
#define __CONFIG_H__
#ifndef ERT_CONFIG_H
#define ERT_CONFIG_H
#ifdef __cplusplus
extern "C" {

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __CONFIG_PATH_ELM_H__
#define __CONFIG_PATH_ELM_H__
#ifndef ERT_CONFIG_PATH_ELM_H
#define ERT_CONFIG_PATH_ELM_H
#ifdef __cplusplus
extern "C"

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __CONFIG_ROOT_PATH_H__
#define __CONFIG_ROOT_PATH_H__
#ifndef ERT_CONFIG_ROOT_PATH_H
#define ERT_CONFIG_ROOT_PATH_H
#ifdef __cplusplus
extern "C"

View File

@@ -17,8 +17,8 @@
*/
#ifndef __CONFIG_SCHEMA_ITEM_H__
#define __CONFIG_SCHEMA_ITEM_H__
#ifndef ERT_CONFIG_SCHEMA_ITEM_H
#define ERT_CONFIG_SCHEMA_ITEM_H
#ifdef __cplusplus
extern "C" {

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_BOX_H__
#define __ECL_BOX_H__
#ifndef ERT_ECL_BOX_H
#define ERT_ECL_BOX_H
#ifdef __cplusplus
extern "C" {
#endif

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_COARSE_CELL_H__
#define __ECL_COARSE_CELL_H__
#ifndef ERT_ECL_COARSE_CELL_H
#define ERT_ECL_COARSE_CELL_H
#ifdef __cplusplus

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_ENDIAN_FLIP_H__
#define __ECL_ENDIAN_FLIP_H__
#ifndef ERT_ECL_ENDIAN_FLIP_H
#define ERT_ECL_ENDIAN_FLIP_H
#ifdef __cplusplus
extern "C" {
#endif

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_FILE_H__
#define __ECL_FILE_H__
#ifndef ERT_ECL_FILE_H
#define ERT_ECL_FILE_H
#ifdef __cplusplus
extern "C" {

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_FILE_KW_H__
#define __ECL_FILE_KW_H__
#ifndef ERT_ECL_FILE_KW_H
#define ERT_ECL_FILE_KW_H
#ifdef __cplusplus
extern "C" {

View File

@@ -17,8 +17,8 @@
for more details.
*/
#ifndef __ECL_GRAV_H__
#define __ECL_GRAV_H__
#ifndef ERT_ECL_GRAV_H
#define ERT_ECL_GRAV_H
#ifdef __plusplus
extern "C" {
#endif

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_GRAV_CALC_H__
#define __ECL_GRAV_CALC_H__
#ifndef ERT_ECL_GRAV_CALC_H
#define ERT_ECL_GRAV_CALC_H
#ifdef __cplusplus
extern "C" {
#endif

View File

@@ -17,8 +17,8 @@
for more details.
*/
#ifndef __ECL_GRAV_COMMON_H__
#define __ECL_GRAV_COMMON_H__
#ifndef ERT_ECL_GRAV_COMMON_H
#define ERT_ECL_GRAV_COMMON_H
#ifdef __cplusplus
extern "C" {

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_GRID_H__
#define __ECL_GRID_H__
#ifndef ERT_ECL_GRID_H
#define ERT_ECL_GRID_H
#ifdef __cplusplus
extern "C" {
#endif
@@ -89,6 +89,8 @@ extern "C" {
const nnc_info_type * ecl_grid_get_cell_nnc_info3( const ecl_grid_type * grid , int i , int j , int k);
const nnc_info_type * ecl_grid_get_cell_nnc_info1( const ecl_grid_type * grid , int global_index);
void ecl_grid_add_self_nnc( ecl_grid_type * grid1, int g1, int g2, int nnc_index);
void ecl_grid_add_self_nnc_list( ecl_grid_type * grid, const int * g1_list , const int * g2_list , int num_nnc );
ecl_grid_type * ecl_grid_alloc_GRDECL_kw( int nx, int ny , int nz , const ecl_kw_type * zcorn_kw , const ecl_kw_type * coord_kw , const ecl_kw_type * actnum_kw , const ecl_kw_type * mapaxes_kw );
ecl_grid_type * ecl_grid_alloc_GRDECL_data(int , int , int , const float * , const float * , const int * , const float * mapaxes);

View File

@@ -17,8 +17,8 @@
for more details.
*/
#ifndef __ECL_GRID_CACHE_H__
#define __ECL_GRID_CACHE_H__
#ifndef ERT_ECL_GRID_CACHE_H
#define ERT_ECL_GRID_CACHE_H
#include <ert/ecl/ecl_grid.h>

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_GRID_DIMS_H__
#define __ECL_GRID_DIMS_H__
#ifndef ERT_ECL_GRID_DIMS_H
#define ERT_ECL_GRID_DIMS_H
#ifdef __cplusplus
extern "C" {
#endif

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_INIT_FILE_H__
#define __ECL_INIT_FILE_H__
#ifndef ERT_ECL_INIT_FILE_H
#define ERT_ECL_INIT_FILE_H
#ifdef __cplusplus
extern "C" {

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_IO_CONFIG_H__
#define __ECL_IO_CONFIG_H__
#ifndef ERT_ECL_IO_CONFIG_H
#define ERT_ECL_IO_CONFIG_H
typedef struct ecl_io_config_struct ecl_io_config_type;

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_KW_H__
#define __ECL_KW_H__
#ifndef ERT_ECL_KW_H
#define ERT_ECL_KW_H
#ifdef __cplusplus
extern "C" {
#endif

View File

@@ -22,8 +22,8 @@
header explicitly.
*/
#ifndef __ECL_KW_GRDECL_H__
#define __ECL_KW_GRDECL_H__
#ifndef ERT_ECL_KW_GRDECL_H
#define ERT_ECL_KW_GRDECL_H
#ifdef __cplusplus
extern "C" {
#endif

View File

@@ -1,5 +1,5 @@
#ifndef __ECL_KW_MAGIC_H__
#define __ECL_KW_MAGIC_H__
#ifndef ERT_ECL_KW_MAGIC_H
#define ERT_ECL_KW_MAGIC_H
#ifdef __cplusplus
extern "C" {
@@ -277,13 +277,16 @@ extern "C" {
#define MAPUNITS_KW "MAPUNITS"
#define GRIDUNIT_KW "GRIDUNIT"
#define NNCHEAD_KW "NNCHEAD" /*Non-neighbour connection header*/
#define NNC1_KW "NNC1" /*Upstream cell numbers for non-neighbour connections*/
#define NNC2_KW "NNC2" /*Downstream cell numbers for non-neighbour connections*/
#define NNCL_KW "NNCL" /*Cell numbers for LGR cells that are connected to global grid cells*/
#define NNCG_KW "NNCG" /*Cell numbers for global cells connected to LGR cells*/
#define NNCHEAD_KW "NNCHEAD" /*Non-neighbour connection header*/
#define NNCHEAD_SIZE 10
#define NNCHEAD_NUMNNC_INDEX 0 /*Item 1 in non-neighbour connection header: number of NNCs. Only present for main grid*/
#define NNCHEAD_LGR_INDEX 1 /*Item 2 in non-neighbour connection header: LGR number (0 for global grid)*/
#define NNCHEADA_KW "NNCHEADA" /*Header for NNC's between two amalgamated LGRs*/
#define NNA1_KW "NNA1" /*Cell numbers in connecting local grid ILOC1*/
#define NNA2_KW "NNA2" /*Cell numbers in connecting local grid ILOC2*/

View File

@@ -17,8 +17,8 @@
*/
#ifndef __ECL_NNC_EXPORT__
#define __ECL_NNC_EXPORT__
#ifndef ERT_ECL_NNC_EXPORT
#define ERT_ECL_NNC_EXPORT
#ifdef __cplusplus
extern "C" {

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_REGION_H__
#define __ECL_REGION_H__
#ifndef ERT_ECL_REGION_H
#define ERT_ECL_REGION_H
#ifdef __cplusplus
extern "C" {
#endif

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_RFT_CELL_H__
#define __ECL_RFT_CELL_H__
#ifndef ERT_ECL_RFT_CELL_H
#define ERT_ECL_RFT_CELL_H
#ifdef __cplusplus
extern "C" {
#endif

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_RFT_FILE_H__
#define __ECL_RFT_FILE_H__
#ifndef ERT_ECL_RFT_FILE_H
#define ERT_ECL_RFT_FILE_H
#ifdef __cplusplus
extern "C" {
#endif

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_RFT_NODE_H__
#define __ECL_RFT_NODE_H__
#ifndef ERT_ECL_RFT_NODE_H
#define ERT_ECL_RFT_NODE_H
#ifdef __cplusplus
extern "C" {
#endif

View File

@@ -17,8 +17,8 @@
*/
#ifndef __ECL_RST_FILE_H__
#define __ECL_RST_FILE_H__
#ifndef ERT_ECL_RST_FILE_H
#define ERT_ECL_RST_FILE_H
#include <ert/ecl/ecl_rsthead.h>

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_RSTHEAD_H__
#define __ECL_RSTHEAD_H__
#ifndef ERT_ECL_RSTHEAD_H
#define ERT_ECL_RSTHEAD_H
#ifdef __cplusplus
extern "C" {

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_SMSPEC__
#define __ECL_SMSPEC__
#ifndef ERT_ECL_SMSPEC
#define ERT_ECL_SMSPEC
#ifdef __cplusplus
extern "C" {

View File

@@ -17,8 +17,8 @@
for more details.
*/
#ifndef __ECL_SUBSIDENCE_H__
#define __ECL_SUBSICENCE_H__
#ifndef ERT_ECL_SUBSIDENCE_H
#define ERT_ECL_SUBSICENCE_H
#ifdef __plusplus
extern "C" {
#endif

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_SUM_H__
#define __ECL_SUM_H__
#ifndef ERT_ECL_SUM_H
#define ERT_ECL_SUM_H
#ifdef __cplusplus
extern "C" {
@@ -206,7 +206,12 @@ typedef struct ecl_sum_struct ecl_sum_type;
bool ecl_sum_report_step_compatible( const ecl_sum_type * ecl_sum1 , const ecl_sum_type * ecl_sum2);
void ecl_sum_export_csv(const ecl_sum_type * ecl_sum , const char * filename , const stringlist_type * var_list , const char * date_format , const char * sep);
UTIL_IS_INSTANCE_HEADER( ecl_sum );
double_vector_type * ecl_sum_alloc_seconds_solution( const ecl_sum_type * ecl_sum , const char * gen_key , double cmp_value , bool rates_clamp_lower);
double_vector_type * ecl_sum_alloc_days_solution( const ecl_sum_type * ecl_sum , const char * gen_key , double cmp_value , bool rates_clamp_lower);
time_t_vector_type * ecl_sum_alloc_time_solution( const ecl_sum_type * ecl_sum , const char * gen_key , double cmp_value , bool rates_clamp_lower);
UTIL_IS_INSTANCE_HEADER( ecl_sum );
#ifdef __cplusplus
}

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_SUM_DATA_H__
#define __ECL_SUM_DATA_H__
#ifndef ERT_ECL_SUM_DATA_H
#define ERT_ECL_SUM_DATA_H
#ifdef __cplusplus
@@ -88,6 +88,9 @@ typedef struct ecl_sum_data_struct ecl_sum_data_type ;
bool ecl_sum_data_report_step_compatible( const ecl_sum_data_type * data1 , const ecl_sum_data_type * data2);
void ecl_sum_data_fwrite_interp_csv_line(const ecl_sum_data_type * data , time_t sim_time, const ecl_sum_vector_type * keylist, FILE *fp);
double_vector_type * ecl_sum_data_alloc_seconds_solution( const ecl_sum_data_type * data , const smspec_node_type * node , double value, bool rates_clamp_lower);
#ifdef __cplusplus
}
#endif

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_SUM_INDEX_H__
#define __ECL_SUM_INDEX_H__
#ifndef ERT_ECL_SUM_INDEX_H
#define ERT_ECL_SUM_INDEX_H
#ifdef __cplusplus
extern "C" {

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_SUM_TSTEP_H__
#define __ECL_SUM_TSTEP_H__
#ifndef ERT_ECL_SUM_TSTEP_H
#define ERT_ECL_SUM_TSTEP_H
#ifdef __cplusplus
extern "C" {
@@ -51,7 +51,9 @@ typedef struct ecl_sum_tstep_struct ecl_sum_tstep_type;
void ecl_sum_tstep_fwrite( const ecl_sum_tstep_type * ministep , const int_vector_type * index_map , fortio_type * fortio);
void ecl_sum_tstep_iset( ecl_sum_tstep_type * tstep , int index , float value);
void ecl_sum_tstep_set_from_node( ecl_sum_tstep_type * tstep , const smspec_node_type * smspec_node , float value);
double ecl_sum_tstep_get_from_node( const ecl_sum_tstep_type * tstep , const smspec_node_type * smspec_node);
void ecl_sum_tstep_set_from_key( ecl_sum_tstep_type * tstep , const char * gen_key , float value);
double ecl_sum_tstep_get_from_key( const ecl_sum_tstep_type * tstep , const char * gen_key);

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_SUM_VECTOR_H__
#define __ECL_SUM_VECTOR_H__
#ifndef ERT_ECL_SUM_VECTOR_H
#define ERT_ECL_SUM_VECTOR_H
#ifdef __cplusplus
extern "C" {

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __ECL_UTIL_H__
#define __ECL_UTIL_H__
#ifndef ERT_ECL_UTIL_H
#define ERT_ECL_UTIL_H
#ifdef __cplusplus
extern "C" {
#endif

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __FAULT_BLOCK_H__
#define __FAULT_BLOCK_H__
#ifndef ERT_FAULT_BLOCK_H
#define ERT_FAULT_BLOCK_H
#ifdef __cplusplus
extern "C" {
#endif

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __FAULT_BLOCK_LAYER_H__
#define __FAULT_BLOCK_LAYER_H__
#ifndef ERT_FAULT_BLOCK_LAYER_H
#define ERT_FAULT_BLOCK_LAYER_H
#ifdef __cplusplus
extern "C" {
#endif

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __FORTIO_H__
#define __FORTIO_H__
#ifndef ERT_FORTIO_H
#define ERT_FORTIO_H
#ifdef __cplusplus
extern "C" {
#endif

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __GRID_DIMS_H__
#define __GRID_DIMS_H__
#ifndef ERT_GRID_DIMS_H
#define ERT_GRID_DIMS_H
#ifdef __cplusplus
extern "C" {
#endif

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __LAYER_H__
#define __LAYER_H__
#ifndef ERT_LAYER_H
#define ERT_LAYER_H
#ifdef __cplusplus
extern "C" {

View File

@@ -17,8 +17,8 @@
*/
#ifndef __NNC_INFO_H__
#define __NNC_INFO_H__
#ifndef ERT_NNC_INFO_H
#define ERT_NNC_INFO_H
#ifdef __cplusplus
extern "C" {
#endif

View File

@@ -1,39 +1,41 @@
/*
Copyright (C) 2013 Statoil ASA, Norway.
The file 'nnc_vector.h' is part of ERT - Ensemble based Reservoir Tool.
ERT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ERT is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
for more details.
Copyright (C) 2013 Statoil ASA, Norway.
The file 'nnc_vector.h' is part of ERT - Ensemble based Reservoir Tool.
ERT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ERT is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
for more details.
*/
#ifndef __NNC_VECTOR_H__
#define __NNC_VECTOR_H__
#ifndef ERT_NNC_VECTOR_H
#define ERT_NNC_VECTOR_H
#ifdef __cplusplus
extern "C" {
#endif
#include <ert/util/int_vector.h>
#include <ert/util/int_vector.h>
#include <ert/util/type_macros.h>
typedef struct nnc_vector_struct nnc_vector_type;
typedef struct nnc_vector_struct nnc_vector_type;
UTIL_IS_INSTANCE_HEADER(nnc_vector);
nnc_vector_type * nnc_vector_alloc(int lgr_nr);
int nnc_vector_iget_nnc_index( const nnc_vector_type * nnc_vector , int index );
int nnc_vector_iget_grid_index( const nnc_vector_type * nnc_vector , int index );
nnc_vector_type * nnc_vector_alloc(int lgr_nr);
nnc_vector_type * nnc_vector_alloc_copy(const nnc_vector_type * src_vector);
void nnc_vector_free( nnc_vector_type * nnc_vector );
void nnc_vector_add_nnc(nnc_vector_type * nnc_vector, int global_cell_number, int nnc_index);
void nnc_vector_add_nnc(nnc_vector_type * nnc_vector, int global_cell_number, int nnc_index);
const int_vector_type * nnc_vector_get_grid_index_list(const nnc_vector_type * nnc_vector);
const int_vector_type * nnc_vector_get_nnc_index_list(const nnc_vector_type * nnc_vector);
int nnc_vector_get_lgr_nr(const nnc_vector_type * nnc_vector );

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __POINT_H__
#define __POINT_H__
#ifndef ERT_POINT_H
#define ERT_POINT_H
#ifdef __cplusplus
extern "C" {
#endif

View File

@@ -17,8 +17,8 @@
*/
#ifndef __SMSPEC_NODE_H__
#define __SMSPEC_NODE_H__
#ifndef ERT_SMSPEC_NODE_H
#define ERT_SMSPEC_NODE_H
#include <stdbool.h>
@@ -127,7 +127,13 @@ typedef enum {ECL_SMSPEC_INVALID_VAR = 0 ,
void smspec_node_set_default( smspec_node_type * smspec_node , float default_value);
float smspec_node_get_default( const smspec_node_type * smspec_node);
const int* smspec_node_get_ijk( const smspec_node_type * smpsec_node );
const char* smspec_node_get_lgr_name( const smspec_node_type * smpsec_node );
const int* smspec_node_get_lgr_ijk( const smspec_node_type * smpsec_node );
int smspec_node_get_R1( const smspec_node_type * smpsec_node );
int smspec_node_get_R2( const smspec_node_type * smpsec_node );
#ifdef __cplusplus
}
#endif

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __TETRAHEDRON_H__
#define __TETRAHEDRON_H__
#ifndef ERT_TETRAHEDRON_H
#define ERT_TETRAHEDRON_H
typedef struct tetrahedron_struct tetrahedron_type;

View File

@@ -2224,6 +2224,66 @@ static void ecl_grid_init_cell_nnc_info(ecl_grid_type * ecl_grid, int global_ind
grid_cell->nnc_info = nnc_info_alloc(ecl_grid->lgr_nr);
}
/*
The function ecl_grid_add_self_nnc() will add a NNC connection
between two cells in the same grid. Observe that there are two
peculiarities with this implementation:
1. In the ecl_grid structure the nnc information is distributed
among the cells. The main purpose of adding the nnc information
like this is to include the NNC information in the EGRID files
when writing to disk. Before being written to disk the NNC
information is serialized into vectors NNC1 and NNC2. It is the
ordering in the NNC1 and NNC2 vectors which must be correct, and
that is goverened by the nnc_index argument - i.e. the nnc_index
serves as an 'ID' for the NNC connections.
After all NCC information has been entered you should be certain
that all nnc_index values in the range [0,num_nnc] have been
set.
2. To get valid NNC information to load in e.g. Resinsight the
corresponding TRANNNC keyword must be added to the INIT file,
i.e. the calling scope must create a ecl_kw with
transmissibility values in parallell with adding NNC information
to the grid:
fortio_type * init_file = fortio_open_writer( "CASE.INIT" , ...
ecl_grid_type * grid ...
ecl_kw_type * trannnc_kw = ecl_kw_alloc( "TRANNNC" , num_nnc , ECL_FLOAT_TYPE );
for (int i = 0; i < num_nnc; i++) {
int g1 = ...
int g2 = ..
float T = ..
ecl_grid_add_self_nnc( grid , g1 , g2 , i );
ecl_kw_iset( trannnc_kw , i , T );
}
...
ecl_grid_fwrite_EGRID( grid , ... );
ecl_kw_fwrite( trannnc_kw , init_file );
*/
void ecl_grid_add_self_nnc( ecl_grid_type * grid, int cell_index1, int cell_index2, int nnc_index) {
ecl_cell_type * grid_cell = ecl_grid_get_cell(grid, cell_index1);
ecl_grid_init_cell_nnc_info(grid, cell_index1);
nnc_info_add_nnc(grid_cell->nnc_info, grid->lgr_nr, cell_index2, nnc_index);
}
/*
This function will add all the nnc connections given by the g1_list
and g2_list arrays. The ncc connections will be added with
consecutively running nnc_index = [0,num_nnc).
*/
void ecl_grid_add_self_nnc_list( ecl_grid_type * grid, const int * g1_list , const int * g2_list , int num_nnc ) {
int i;
for (i = 0; i < num_nnc; i++)
ecl_grid_add_self_nnc( grid , g1_list[i] , g2_list[i] , i );
}
/*
This function populates nnc_info for cells with non neighbour
@@ -5790,6 +5850,48 @@ void ecl_grid_reset_actnum( ecl_grid_type * grid , const int * actnum ) {
}
static void ecl_grid_fwrite_self_nnc( const ecl_grid_type * grid , fortio_type * fortio ) {
const int default_index = 1;
int_vector_type * g1 = int_vector_alloc(0 , default_index );
int_vector_type * g2 = int_vector_alloc(0 , default_index );
int g;
for (g=0; g < ecl_grid_get_global_size(grid); g++) {
ecl_cell_type * cell = ecl_grid_get_cell( grid , g );
const nnc_info_type * nnc_info = cell->nnc_info;
if (nnc_info) {
const nnc_vector_type * nnc_vector = nnc_info_get_self_vector(nnc_info);
int i;
for (i = 0; i < nnc_vector_get_size( nnc_vector ); i++) {
int nnc_index = nnc_vector_iget_nnc_index( nnc_vector , i );
int_vector_iset( g1 , nnc_index , 1 + g );
int_vector_iset( g2 , nnc_index , 1 + nnc_vector_iget_grid_index( nnc_vector , i ));
}
}
}
{
int num_nnc = int_vector_size( g1 );
ecl_kw_type * nnc1_kw = ecl_kw_alloc_new_shared( NNC1_KW , num_nnc , ECL_INT_TYPE , int_vector_get_ptr( g1 ));
ecl_kw_type * nnc2_kw = ecl_kw_alloc_new_shared( NNC2_KW , num_nnc , ECL_INT_TYPE , int_vector_get_ptr( g2 ));
ecl_kw_type * nnchead_kw = ecl_kw_alloc( NNCHEAD_KW , NNCHEAD_SIZE , ECL_INT_TYPE );
ecl_kw_scalar_set_int( nnchead_kw , 0 );
ecl_kw_iset_int( nnchead_kw , NNCHEAD_NUMNNC_INDEX , num_nnc );
ecl_kw_iset_int( nnchead_kw , NNCHEAD_LGR_INDEX , grid->lgr_nr );
ecl_kw_fwrite( nnchead_kw , fortio);
ecl_kw_fwrite( nnc1_kw , fortio);
ecl_kw_fwrite( nnc2_kw , fortio);
ecl_kw_free( nnchead_kw );
ecl_kw_free( nnc2_kw );
ecl_kw_free( nnc1_kw );
}
int_vector_free( g1 );
int_vector_free( g2 );
}
static void ecl_grid_fwrite_EGRID__( ecl_grid_type * grid , fortio_type * fortio, bool metric_output) {
bool is_lgr = true;
@@ -5884,6 +5986,7 @@ static void ecl_grid_fwrite_EGRID__( ecl_grid_type * grid , fortio_type * fortio
ecl_kw_fwrite( endlgr_kw , fortio );
ecl_kw_free( endlgr_kw );
}
ecl_grid_fwrite_self_nnc( grid , fortio );
}

View File

@@ -219,7 +219,9 @@ static const char* special_vars[] = {"NEWTON",
"MAXDSO",
"MAXDSG",
"MAXDSW",
"STEPTYPE"};
"STEPTYPE",
"WNEWTON"};
/*

View File

@@ -283,6 +283,17 @@ smspec_node_type * ecl_sum_add_blank_var( ecl_sum_type * ecl_sum , float default
/*
Observe the time argument in ecl_sum_add_tstep() and the bool flag
time_in_days in ecl_sum_alloc_writer() can be misleading:
- The time argument 'sim_seconds' to ecl_sum_add_tstep() should
*ALWAYS* be in seconds.
- The 'sim_in_days' argument to the ecl_sum_alloc_writer( ) is just
a very very basic unit support in the output. If sim_in_days ==
true the output time unit will be days, otherwise it will be hours.
*/
ecl_sum_tstep_type * ecl_sum_add_tstep( ecl_sum_type * ecl_sum , int report_step , double sim_seconds) {
return ecl_sum_data_add_new_tstep( ecl_sum->data , report_step , sim_seconds );
@@ -1249,4 +1260,30 @@ bool ecl_sum_report_step_compatible( const ecl_sum_type * ecl_sum1 , const ecl_s
}
double_vector_type * ecl_sum_alloc_seconds_solution( const ecl_sum_type * ecl_sum , const char * gen_key , double cmp_value , bool rates_clamp_lower) {
const smspec_node_type * node = ecl_sum_get_general_var_node( ecl_sum , gen_key);
return ecl_sum_data_alloc_seconds_solution( ecl_sum->data , node , cmp_value , rates_clamp_lower);
}
double_vector_type * ecl_sum_alloc_days_solution( const ecl_sum_type * ecl_sum , const char * gen_key , double cmp_value , bool rates_clamp_lower) {
double_vector_type * solution = ecl_sum_alloc_seconds_solution( ecl_sum , gen_key , cmp_value , rates_clamp_lower );
double_vector_scale( solution , 1.0 / 86400 );
return solution;
}
time_t_vector_type * ecl_sum_alloc_time_solution( const ecl_sum_type * ecl_sum , const char * gen_key , double cmp_value , bool rates_clamp_lower) {
time_t_vector_type * solution = time_t_vector_alloc( 0 , 0);
{
double_vector_type * seconds = ecl_sum_alloc_seconds_solution( ecl_sum , gen_key , cmp_value , rates_clamp_lower );
time_t start_time = ecl_sum_get_start_time(ecl_sum);
for (int i=0; i < double_vector_size( seconds ); i++) {
time_t t = start_time;
util_inplace_forward_seconds( &t , double_vector_iget( seconds , i ));
time_t_vector_append( solution , t );
}
double_vector_free( seconds );
}
return solution;
}

View File

@@ -1,4 +1,4 @@
/*
/*
Copyright (C) 2011 Statoil ASA, Norway.
The file 'ecl_sum_data.c' is part of ERT - Ensemble based Reservoir Tool.
@@ -661,6 +661,54 @@ void ecl_sum_data_init_interp_from_sim_days( const ecl_sum_data_type * data , do
}
double_vector_type * ecl_sum_data_alloc_seconds_solution( const ecl_sum_data_type * data , const smspec_node_type * node , double cmp_value, bool rates_clamp_lower) {
double_vector_type * solution = double_vector_alloc( 0, 0);
const int param_index = smspec_node_get_params_index( node );
const int size = vector_get_size( data->data);
const double is_rate = smspec_node_is_rate( node );
if (size > 1) {
int index = 0;
const ecl_sum_tstep_type * ministep = ecl_sum_data_iget_ministep( data , index );
const ecl_sum_tstep_type * prev_ministep;
double value = ecl_sum_tstep_iget( ministep , param_index );
double prev_value;
while (true) {
index++;
if (index >= size)
break;
prev_ministep = ministep;
prev_value = value;
ministep = ecl_sum_data_iget_ministep( data , index );
value = ecl_sum_tstep_iget( ministep , param_index );
if ((value == cmp_value) ||
(((value - cmp_value) * (cmp_value - prev_value)) > 0)) {
double time1 = ecl_sum_tstep_get_sim_seconds( prev_ministep );
double time2 = ecl_sum_tstep_get_sim_seconds( ministep );
if (is_rate) {
if (rates_clamp_lower)
double_vector_append( solution , time1 + 1 );
else
double_vector_append( solution , time2 );
} else {
double slope = (value - prev_value) / (time2 - time1);
double seconds = (cmp_value - prev_value) / slope + time1;
double_vector_append( solution , seconds );
}
}
}
}
return solution;
}

View File

@@ -288,6 +288,11 @@ void ecl_sum_tstep_set_from_node( ecl_sum_tstep_type * tstep , const smspec_node
ecl_sum_tstep_iset( tstep , data_index , value);
}
double ecl_sum_tstep_get_from_node( const ecl_sum_tstep_type * tstep , const smspec_node_type * smspec_node) {
int data_index = smspec_node_get_params_index( smspec_node );
return ecl_sum_tstep_iget( tstep , data_index);
}
void ecl_sum_tstep_set_from_key( ecl_sum_tstep_type * tstep , const char * gen_key , float value) {
const smspec_node_type * smspec_node = ecl_smspec_get_general_var_node( tstep->smspec , gen_key );
@@ -295,9 +300,8 @@ void ecl_sum_tstep_set_from_key( ecl_sum_tstep_type * tstep , const char * gen_k
}
double ecl_sum_tstep_get_from_key(const ecl_sum_tstep_type * tstep , const char * gen_key) {
const smspec_node_type * smspec_node = ecl_smspec_get_general_var_node( tstep->smspec , gen_key );
int data_index = smspec_node_get_params_index( smspec_node );
return ecl_sum_tstep_iget( tstep , data_index);
const smspec_node_type * smspec_node = ecl_smspec_get_general_var_node( tstep->smspec , gen_key );
return ecl_sum_tstep_get_from_node(tstep , smspec_node );
}
bool ecl_sum_tstep_has_key(const ecl_sum_tstep_type * tstep , const char * gen_key) {

View File

@@ -120,3 +120,11 @@ int nnc_vector_get_size( const nnc_vector_type * nnc_vector ) {
int nnc_vector_get_lgr_nr( const nnc_vector_type * nnc_vector ) {
return nnc_vector->lgr_nr;
}
int nnc_vector_iget_nnc_index( const nnc_vector_type * nnc_vector , int index ) {
return int_vector_iget( nnc_vector->nnc_index_list , index );
}
int nnc_vector_iget_grid_index( const nnc_vector_type * nnc_vector , int index ) {
return int_vector_iget( nnc_vector->grid_index_list , index );
}

View File

@@ -414,6 +414,18 @@ static void smspec_node_set_num( smspec_node_type * index , const int grid_dims[
}
}
static void smspec_node_decode_R1R2( const smspec_node_type * smspec_node , int * r1 , int * r2) {
if (smspec_node->var_type == ECL_SMSPEC_REGION_2_REGION_VAR) {
*r1 = smspec_node->num % 32768;
*r2 = ((smspec_node->num - (*r1)) / 32768)-10;
} else {
*r1 = -1;
*r2 = -1;
}
}
/**
This function will init the gen_key field of the smspec_node
instance; this is the keyw which is used to install the
@@ -454,8 +466,8 @@ static void smspec_node_set_gen_keys( smspec_node_type * smspec_node , const cha
case(ECL_SMSPEC_REGION_2_REGION_VAR):
// KEYWORDS:RXF:NUM and RXF:R1-R2
{
int r1 = smspec_node->num % 32768;
int r2 = ((smspec_node->num-r1) / 32768)-10;
int r1,r2;
smspec_node_decode_R1R2( smspec_node , &r1 , &r2);
smspec_node->gen_key1 = smspec_alloc_region_2_region_r1r2_key( key_join_string , smspec_node->keyword , r1, r2);
}
smspec_node->gen_key2 = smspec_alloc_region_2_region_num_key( key_join_string , smspec_node->keyword , smspec_node->num);
@@ -856,8 +868,43 @@ void smspec_node_set_unit( smspec_node_type * smspec_node , const char * unit )
}
// Will be NULL for smspec_nodes which do not have i,j,k
const int* smspec_node_get_ijk( const smspec_node_type * smspec_node ) {
return smspec_node->ijk;
}
// Will be NULL for smspec_nodes which are not related to an LGR.
const char* smspec_node_get_lgr_name( const smspec_node_type * smspec_node ) {
return smspec_node->lgr_name;
}
// Will be NULL for smspec_nodes which are not related to an LGR.
const int* smspec_node_get_lgr_ijk( const smspec_node_type * smspec_node ) {
return smspec_node->lgr_ijk;
}
/*
Will return -1 for smspec_node variables which are not
of type ECL_SMSPEC_REGION_2_REGION_VAR.
*/
int smspec_node_get_R1( const smspec_node_type * smspec_node ) {
if (smspec_node->var_type == ECL_SMSPEC_REGION_2_REGION_VAR) {
int r1,r2;
smspec_node_decode_R1R2( smspec_node , &r1 , &r2);
return r1;
} else
return -1;
}
int smspec_node_get_R2( const smspec_node_type * smspec_node ) {
if (smspec_node->var_type == ECL_SMSPEC_REGION_2_REGION_VAR) {
int r1,r2;
smspec_node_decode_R1R2( smspec_node , &r1 , &r2);
return r2;
} else
return -1;
}
bool smspec_node_need_nums( const smspec_node_type * smspec_node ) {

View File

@@ -111,6 +111,7 @@ void test_writable(const char * src_file ) {
test_work_area_copy_file( work_area , src_file );
{
test_flags( fname );
ecl_file_type * ecl_file = ecl_file_open( fname , ECL_FILE_WRITABLE);
ecl_kw_type * swat = ecl_file_iget_named_kw( ecl_file , "SWAT" , 0 );
ecl_kw_type * swat0 = ecl_kw_alloc_copy( swat );
@@ -163,8 +164,7 @@ int main( int argc , char ** argv) {
{
test_work_area_type * work_area = test_work_area_alloc("ecl_file");
test_work_area_install_file( work_area , src_file );
test_flags( src_file );
test_work_area_copy_file( work_area , src_file );
test_loadall(src_file , target_file );
test_close_stream1( src_file , target_file);

View File

@@ -0,0 +1,140 @@
/*
Copyright (C) 2016 Statoil ASA, Norway.
The file 'ecl_grid_add_nnc.c' is part of ERT - Ensemble based Reservoir Tool.
ERT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ERT is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
for more details.
*/
#include <stdlib.h>
#include <stdbool.h>
#include <ert/util/test_util.h>
#include <ert/util/util.h>
#include <ert/util/test_work_area.h>
#include <ert/ecl/ecl_grid.h>
void verify_simple_nnc( const ecl_grid_type * grid) {
test_assert_not_NULL( ecl_grid_get_cell_nnc_info1(grid, 5 ));
test_assert_not_NULL( ecl_grid_get_cell_nnc_info1(grid, 8 ));
// Cell 5:
{
const nnc_info_type * nnc_info = ecl_grid_get_cell_nnc_info1(grid, 5);
const nnc_vector_type * nnc_vector = nnc_info_iget_vector( nnc_info , 0 );
test_assert_int_equal( 2 , nnc_vector_get_size( nnc_vector ));
test_assert_int_equal( 6 , nnc_vector_iget_grid_index( nnc_vector , 0 ));
test_assert_int_equal( 7 , nnc_vector_iget_grid_index( nnc_vector , 1 ));
test_assert_int_equal( 0 , nnc_vector_iget_nnc_index( nnc_vector , 0 ));
test_assert_int_equal( 1 , nnc_vector_iget_nnc_index( nnc_vector , 1 ));
}
// Cell 8:
{
const nnc_info_type * nnc_info = ecl_grid_get_cell_nnc_info1(grid, 8);
const nnc_vector_type * nnc_vector = nnc_info_iget_vector( nnc_info , 0 );
test_assert_int_equal( 1 , nnc_vector_get_size( nnc_vector ));
test_assert_int_equal( 9 , nnc_vector_iget_grid_index( nnc_vector , 0 ));
test_assert_int_equal( 2 , nnc_vector_iget_nnc_index( nnc_vector , 0 ));
}
}
void simple_test() {
ecl_grid_type * grid0 = ecl_grid_alloc_rectangular( 10 , 10 , 10 , 1 , 1, 1, NULL );
ecl_grid_add_self_nnc( grid0 , 5 , 6 , 0 );
ecl_grid_add_self_nnc( grid0 , 5 , 7 , 1 );
ecl_grid_add_self_nnc( grid0 , 8 , 9 , 2 );
verify_simple_nnc( grid0 );
{
test_work_area_type * test_area = test_work_area_alloc("ecl_grid_nnc");
ecl_grid_type * grid1;
ecl_grid_fwrite_EGRID( grid0 , "TEST.EGRID" , true);
grid1 = ecl_grid_alloc( "TEST.EGRID" );
verify_simple_nnc( grid1 );
ecl_grid_free( grid1 );
test_work_area_free( test_area );
}
ecl_grid_free( grid0 );
}
void overwrite_test() {
ecl_grid_type * grid0 = ecl_grid_alloc_rectangular( 10 , 10 , 10 , 1 , 1, 1, NULL );
/*
This first list of nnc will be overwritten, and will not survive
the serialization to disk.
*/
ecl_grid_add_self_nnc( grid0 , 1 , 2 , 0 );
ecl_grid_add_self_nnc( grid0 , 1 , 3 , 1 );
ecl_grid_add_self_nnc( grid0 , 1 , 4 , 2 );
ecl_grid_add_self_nnc( grid0 , 5 , 6 , 0 );
ecl_grid_add_self_nnc( grid0 , 5 , 7 , 1 );
ecl_grid_add_self_nnc( grid0 , 8 , 9 , 2 );
verify_simple_nnc( grid0 );
{
test_work_area_type * test_area = test_work_area_alloc("ecl_grid_nnc");
ecl_grid_type * grid1;
ecl_grid_fwrite_EGRID( grid0 , "TEST.EGRID" , true);
grid1 = ecl_grid_alloc( "TEST.EGRID" );
verify_simple_nnc( grid1 );
ecl_grid_free( grid1 );
test_work_area_free( test_area );
}
ecl_grid_free( grid0 );
}
void list_test() {
ecl_grid_type * grid0 = ecl_grid_alloc_rectangular( 10 , 10 , 10 , 1 , 1, 1, NULL );
int_vector_type * g1 = int_vector_alloc(0,0);
int_vector_type * g2 = int_vector_alloc(0,0);
int_vector_append( g1 , 5 ); int_vector_append( g2 , 6 );
int_vector_append( g1 , 5 ); int_vector_append( g2 , 7 );
int_vector_append( g1 , 8 ); int_vector_append( g2 , 9 );
ecl_grid_add_self_nnc_list( grid0 , int_vector_get_ptr( g1 ) , int_vector_get_ptr( g2 ) , int_vector_size( g1 ));
verify_simple_nnc( grid0 );
{
test_work_area_type * test_area = test_work_area_alloc("ecl_grid_nnc");
ecl_grid_type * grid1;
ecl_grid_fwrite_EGRID( grid0 , "TEST.EGRID" , true);
grid1 = ecl_grid_alloc( "TEST.EGRID" );
verify_simple_nnc( grid1 );
ecl_grid_free( grid1 );
test_work_area_free( test_area );
}
ecl_grid_free( grid0 );
}
int main( int argc , char ** argv) {
simple_test();
list_test();
overwrite_test();
exit(0);
}

View File

@@ -0,0 +1,109 @@
/*
Copyright (C) 2016 Statoil ASA, Norway.
The file 'ecl_sum_writer.c' is part of ERT - Ensemble based Reservoir Tool.
ERT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ERT is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
for more details.
*/
#include <stdlib.h>
#include <stdbool.h>
#include <ert/util/test_util.h>
#include <ert/util/time_t_vector.h>
#include <ert/util/util.h>
#include <ert/util/test_work_area.h>
#include <ert/ecl/ecl_sum.h>
#include <ert/ecl/ecl_grid.h>
void write_summary( const char * name , time_t start_time , int nx , int ny , int nz , int num_dates, int num_ministep, double ministep_length) {
ecl_sum_type * ecl_sum = ecl_sum_alloc_writer( name , false , true , ":" , start_time , true , nx , ny , nz );
double sim_seconds = 0;
smspec_node_type * node1 = ecl_sum_add_var( ecl_sum , "FOPT" , NULL , 0 , "Barrels" , 99.0 );
smspec_node_type * node2 = ecl_sum_add_var( ecl_sum , "BPR" , NULL , 567 , "BARS" , 0.0 );
smspec_node_type * node3 = ecl_sum_add_var( ecl_sum , "WWCT" , "OP-1" , 0 , "(1)" , 0.0 );
for (int report_step = 0; report_step < num_dates; report_step++) {
for (int step = 0; step < num_ministep; step++) {
/* Simulate .... */
{
ecl_sum_tstep_type * tstep = ecl_sum_add_tstep( ecl_sum , report_step + 1 , sim_seconds );
ecl_sum_tstep_set_from_node( tstep , node1 , sim_seconds );
ecl_sum_tstep_set_from_node( tstep , node2 , 10*sim_seconds );
ecl_sum_tstep_set_from_node( tstep , node3 , 100*sim_seconds );
test_assert_double_equal( ecl_sum_tstep_get_from_node( tstep , node1 ), sim_seconds );
test_assert_double_equal( ecl_sum_tstep_get_from_node( tstep , node2 ), sim_seconds*10 );
test_assert_double_equal( ecl_sum_tstep_get_from_node( tstep , node3 ), sim_seconds*100 );
}
sim_seconds += ministep_length;
}
}
ecl_sum_fwrite( ecl_sum );
ecl_sum_free( ecl_sum );
}
void test_write_read( ) {
const char * name = "CASE";
time_t start_time = util_make_date( 1,1,2010 );
time_t end_time = start_time;
int nx = 10;
int ny = 11;
int nz = 12;
int num_dates = 5;
int num_ministep = 10;
double ministep_length = 36000; // Seconds
{
test_work_area_type * work_area = test_work_area_alloc("sum/write");
ecl_sum_type * ecl_sum;
write_summary( name , start_time , nx , ny , nz , num_dates , num_ministep , ministep_length);
ecl_sum = ecl_sum_fread_alloc_case( name , ":" );
test_assert_true( ecl_sum_is_instance( ecl_sum ));
/* Time direction */
test_assert_time_t_equal( start_time , ecl_sum_get_start_time(ecl_sum));
test_assert_time_t_equal( start_time , ecl_sum_get_data_start(ecl_sum));
util_inplace_forward_seconds(&end_time, (num_dates * num_ministep - 1) * ministep_length );
test_assert_time_t_equal( end_time , ecl_sum_get_end_time(ecl_sum));
/* Keys */
test_assert_true( ecl_sum_has_key( ecl_sum , "FOPT" ));
test_assert_true( ecl_sum_has_key( ecl_sum , "WWCT:OP-1" ));
test_assert_true( ecl_sum_has_key( ecl_sum , "BPR:567" ));
{
ecl_grid_type *grid = ecl_grid_alloc_rectangular(nx,ny,nz,1,1,1,NULL);
int i,j,k;
char * ijk_key;
ecl_grid_get_ijk1( grid , 567 - 1 , &i,&j,&k);
ijk_key = util_alloc_sprintf( "BPR:%d,%d,%d" , i+1 ,j+1 ,k+1);
free( ijk_key );
ecl_grid_free( grid );
}
ecl_sum_free( ecl_sum );
test_work_area_free( work_area );
}
}
int main( int argc , char ** argv) {
test_write_read();
exit(0);
}

View File

@@ -51,6 +51,15 @@ add_executable( ecl_util_make_date_shift ecl_util_make_date_shift.c )
target_link_libraries( ecl_util_make_date_shift ecl test_util )
add_test( ecl_util_make_date_shift ${EXECUTABLE_OUTPUT_PATH}/ecl_util_make_date_shift )
add_executable( ecl_sum_writer ecl_sum_writer.c )
target_link_libraries( ecl_sum_writer ecl test_util )
add_test( ecl_sum_writer ${EXECUTABLE_OUTPUT_PATH}/ecl_sum_writer )
add_executable( ecl_grid_add_nnc ecl_grid_add_nnc.c )
target_link_libraries( ecl_grid_add_nnc ecl test_util )
add_test( ecl_grid_add_nnc ${EXECUTABLE_OUTPUT_PATH}/ecl_grid_add_nnc )
add_executable( ecl_sum_case_exists ecl_sum_case_exists.c )
target_link_libraries( ecl_sum_case_exists ecl test_util )
add_test( ecl_sum_case_exists ${EXECUTABLE_OUTPUT_PATH}/ecl_sum_case_exists

View File

@@ -17,8 +17,8 @@
*/
#ifndef __WELL_BRANCH_COLLECTION_H__
#define __WELL_BRANCH_COLLECTION_H__
#ifndef ERT_WELL_BRANCH_COLLECTION_H
#define ERT_WELL_BRANCH_COLLECTION_H
#ifdef __cplusplus

View File

@@ -17,8 +17,8 @@
*/
#ifndef __WELL_CONN_H__
#define __WELL_CONN_H__
#ifndef ERT_WELL_CONN_H
#define ERT_WELL_CONN_H
#ifdef __cplusplus

View File

@@ -17,8 +17,8 @@
*/
#ifndef __WELL_CONN_COLLECTION_H__
#define __WELL_CONN_COLLECTION_H__
#ifndef ERT_WELL_CONN_COLLECTION_H
#define ERT_WELL_CONN_COLLECTION_H
#ifdef __cplusplus

View File

@@ -17,8 +17,8 @@
*/
#ifndef __WELL_CONST_H__
#define __WELL_CONST_H__
#ifndef ERT_WELL_CONST_H
#define ERT_WELL_CONST_H
#ifdef __cplusplus
extern "C" {

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __WELL_INFO_H__
#define __WELL_INFO_H__
#ifndef ERT_WELL_INFO_H
#define ERT_WELL_INFO_H
#ifdef __cplusplus

View File

@@ -16,8 +16,8 @@
for more details.
*/
#ifndef __WELL_RSEG_LOADER_H__
#define __WELL_RSEG_LOADER_H__
#ifndef ERT_WELL_RSEG_LOADER_H
#define ERT_WELL_RSEG_LOADER_H
#ifdef __cplusplus

View File

@@ -17,8 +17,8 @@
*/
#ifndef __WELL_SEGMENT_H__
#define __WELL_SEGMENT_H__
#ifndef ERT_WELL_SEGMENT_H
#define ERT_WELL_SEGMENT_H
#ifdef __cplusplus

View File

@@ -17,8 +17,8 @@
*/
#ifndef __WELL_SEGMENT_COLLECTION_H__
#define __WELL_SEGMENT_COLLECTION_H__
#ifndef ERT_WELL_SEGMENT_COLLECTION_H
#define ERT_WELL_SEGMENT_COLLECTION_H
#ifdef __cplusplus

View File

@@ -17,8 +17,8 @@
*/
#ifndef __WELL_STATE_H__
#define __WELL_STATE_H__
#ifndef ERT_WELL_STATE_H
#define ERT_WELL_STATE_H
#ifdef __cplusplus
extern "C" {

View File

@@ -17,8 +17,8 @@
*/
#ifndef __WELL_TS_H__
#define __WELL_TS_H__
#ifndef ERT_WELL_TS_H
#define ERT_WELL_TS_H
#ifdef __cplusplus
extern "C" {

View File

@@ -24,8 +24,7 @@
#include <memory>
#include <vector>
#include <stdexcept>
#include <iostream>
#include <type_traits>
#include <ert/ecl/ecl_kw.h>
#include <ert/ecl/ecl_util.h>
@@ -35,60 +34,124 @@
namespace ERT {
template <typename T>
class EclKW
{
public:
EclKW(const std::string& kw, int size_);
EclKW() { ; }
template< typename > struct ecl_type {};
static EclKW load(FortIO& fortio);
template<> struct ecl_type< float >
{ static const ecl_type_enum type { ECL_FLOAT_TYPE }; };
template<> struct ecl_type< double >
{ static const ecl_type_enum type { ECL_DOUBLE_TYPE }; };
template<> struct ecl_type< int >
{ static const ecl_type_enum type { ECL_INT_TYPE }; };
template<> struct ecl_type< char* >
{ static const ecl_type_enum type { ECL_CHAR_TYPE }; };
template<> struct ecl_type< const char* >
{ static const ecl_type_enum type { ECL_CHAR_TYPE }; };
template <typename T>
class EclKW_ref {
public:
explicit EclKW_ref( ecl_kw_type* kw ) : m_kw( kw ) {
if( ecl_kw_get_type( kw ) != ecl_type< T >::type )
throw std::invalid_argument("Type error");
}
EclKW_ref() noexcept = default;
const char* name() const {
return ecl_kw_get_header( this->m_kw );
}
size_t size() const {
return static_cast<size_t>( ecl_kw_get_size( m_kw.get() ));
return size_t( ecl_kw_get_size( this->m_kw ) );
}
T& operator[](size_t index) {
return *( static_cast<T *>( ecl_kw_iget_ptr( m_kw.get() , index) ));
}
void fwrite(FortIO& fortio) const {
ecl_kw_fwrite( m_kw.get() , fortio.get() );
ecl_kw_fwrite( this->m_kw , fortio.get() );
}
void assignVector(const std::vector<T>& data) {
if (data.size() == size())
ecl_kw_set_memcpy_data( m_kw.get() , data.data() );
else
throw std::invalid_argument("Size error");
T at( size_t i ) const {
return *static_cast< T* >( ecl_kw_iget_ptr( this->m_kw, i ) );
}
ecl_kw_type * getPointer() const {
return m_kw.get();
}
private:
EclKW(ecl_kw_type * c_ptr) {
m_kw.reset( c_ptr );
const typename std::remove_pointer< T >::type* data() const {
using Tp = const typename std::remove_pointer< T >::type*;
return static_cast< Tp >( ecl_kw_get_ptr( this->m_kw ) );
}
static EclKW checkedLoad(FortIO& fortio, ecl_type_enum expectedType) {
ecl_kw_type * c_ptr = ecl_kw_fread_alloc( fortio.get() );
if (c_ptr) {
if (ecl_kw_get_type( c_ptr ) == expectedType)
return EclKW( c_ptr );
else
throw std::invalid_argument("Type error");
} else
throw std::invalid_argument("fread kw failed - EOF?");
ecl_kw_type* get() const {
return this->m_kw;
}
ert_unique_ptr<ecl_kw_type , ecl_kw_free> m_kw;
protected:
ecl_kw_type* m_kw = nullptr;
};
template<>
inline const char* EclKW_ref< const char* >::at( size_t i ) const {
return ecl_kw_iget_char_ptr( this->m_kw, i );
}
template< typename T >
class EclKW : public EclKW_ref< T > {
private:
using base = EclKW_ref< T >;
public:
using EclKW_ref< T >::EclKW_ref;
EclKW( const EclKW& ) = delete;
EclKW( EclKW&& rhs ) : base( rhs.m_kw ) {
rhs.m_kw = nullptr;
}
~EclKW() {
if( this->m_kw ) ecl_kw_free( this->m_kw );
}
EclKW( const std::string& kw, int size_ ) :
base( ecl_kw_alloc( kw.c_str(), size_, ecl_type< T >::type ) )
{}
EclKW( const std::string& kw, const std::vector< T >& data ) :
EclKW( kw, data.size() )
{
ecl_kw_set_memcpy_data( this->m_kw, data.data() );
}
template< typename U >
EclKW( const std::string& kw, const std::vector< U >& data ) :
EclKW( kw, data.size() )
{
T* target = static_cast< T* >( ecl_kw_get_ptr( this->m_kw ) );
for( size_t i = 0; i < data.size(); ++i )
target[ i ] = T( data[ i ] );
}
static EclKW load( FortIO& fortio ) {
ecl_kw_type* c_ptr = ecl_kw_fread_alloc( fortio.get() );
if( !c_ptr )
throw std::invalid_argument("fread kw failed - EOF?");
return EclKW( c_ptr );
}
};
template<> inline
EclKW< const char* >::EclKW( const std::string& kw,
const std::vector< const char* >& data ) :
EclKW( kw, data.size() )
{
auto* ptr = this->get();
for( size_t i = 0; i < data.size(); ++i )
ecl_kw_iset_string8( ptr, i, data[ i ] );
}
}
#endif

View File

@@ -22,20 +22,22 @@ namespace ERT {
smspec_node( const std::string& keyword );
smspec_node( const std::string& keyword,
int dims[ 3 ],
int ijk[ 3 ] );
const int dims[ 3 ],
const int ijk[ 3 ] );
smspec_node( const std::string& keyword,
const std::string& wellname,
int dims[ 3 ],
int ijk[ 3 ] );
const int dims[ 3 ],
const int ijk[ 3 ] );
smspec_node( const std::string& keyword,
int dims[ 3 ],
const int dims[ 3 ],
int region );
int type() const;
const char* wgname() const;
const char* keyword() const;
int num() const;
private:
smspec_node(

View File

@@ -1,5 +1,4 @@
set( source_files
EclKW.cpp
FortIO.cpp
Smspec.cpp
)

View File

@@ -1,36 +0,0 @@
#include <ert/ecl/EclKW.hpp>
namespace ERT {
template <>
EclKW<int>::EclKW(const std::string& kw, int size_)
: m_kw( ecl_kw_alloc( kw.c_str() , size_ , ECL_INT_TYPE ) )
{ }
template <>
EclKW<float>::EclKW(const std::string& kw, int size_)
: m_kw( ecl_kw_alloc( kw.c_str() , size_ , ECL_FLOAT_TYPE ) )
{ }
template <>
EclKW<double>::EclKW(const std::string& kw, int size_)
: m_kw( ecl_kw_alloc( kw.c_str() , size_ , ECL_DOUBLE_TYPE ) )
{ }
template <>
EclKW<double> EclKW<double>::load(FortIO& fortio) {
return checkedLoad(fortio , ECL_DOUBLE_TYPE);
}
template <>
EclKW<int> EclKW<int>::load(FortIO& fortio) {
return checkedLoad(fortio , ECL_INT_TYPE);
}
template <>
EclKW<float> EclKW<float>::load(FortIO& fortio) {
return checkedLoad(fortio , ECL_FLOAT_TYPE);
}
}

View File

@@ -10,7 +10,7 @@ namespace ERT {
static const int dummy_dims[ 3 ] = { -1, -1, -1 };
const auto default_join = ":";
static int global_index( int dims[ 3 ], int ijk[ 3 ] ) {
static int global_index( const int dims[ 3 ], const int ijk[ 3 ] ) {
/* num is offset 1 global index */
return 1 + ijk[ 0 ] + ( ijk[ 1 ] * dims[ 0 ] ) + ( ijk[ 2 ] * dims[ 1 ] * dims[ 0 ] );
}
@@ -29,8 +29,8 @@ namespace ERT {
smspec_node::smspec_node(
const std::string& keyword,
int dims[ 3 ],
int ijk[ 3 ] ) :
const int dims[ 3 ],
const int ijk[ 3 ] ) :
smspec_node(
ECL_SMSPEC_BLOCK_VAR, "", keyword.c_str(), "", default_join, dims, global_index( dims, ijk )
)
@@ -39,8 +39,8 @@ namespace ERT {
smspec_node::smspec_node(
const std::string& keyword,
const std::string& wellname,
int dims[ 3 ],
int ijk[ 3 ] ) :
const int dims[ 3 ],
const int ijk[ 3 ] ) :
smspec_node(
ECL_SMSPEC_COMPLETION_VAR, wellname.c_str(), keyword.c_str(), "", default_join, dims, global_index( dims, ijk )
)
@@ -48,7 +48,7 @@ namespace ERT {
smspec_node::smspec_node(
const std::string& keyword,
int dims[ 3 ],
const int dims[ 3 ],
int region ) :
smspec_node(
ECL_SMSPEC_REGION_VAR, "", keyword.c_str(), "", default_join, dims, region
@@ -67,6 +67,10 @@ namespace ERT {
join, grid_dims, num, index, default_value ) )
{}
int smspec_node::type() const {
return smspec_node_get_var_type( this->node.get() );
}
const char* smspec_node::wgname() const {
return smspec_node_get_wgname( this->node.get() );
}
@@ -74,4 +78,8 @@ namespace ERT {
const char* smspec_node::keyword() const {
return smspec_node_get_keyword( this->node.get() );
}
int smspec_node::num() const {
return smspec_node_get_num( this->node.get() );
}
}

View File

@@ -1,11 +1,11 @@
add_executable(eclxx_kw eclxx_kw.cpp)
target_link_libraries(eclxx_kw eclxx test_util util)
target_link_libraries(eclxx_kw eclxx test_util ert_utilxx)
add_test(eclxx_kw ${EXECUTABLE_OUTPUT_PATH}/eclxx_kw)
add_executable(eclxx_fortio eclxx_fortio.cpp)
target_link_libraries(eclxx_fortio eclxx test_util util)
target_link_libraries(eclxx_fortio eclxx test_util ert_utilxx)
add_test(eclxx_fortio ${EXECUTABLE_OUTPUT_PATH}/eclxx_fortio)
add_executable(eclxx_smspec eclxx_smspec.cpp)
target_link_libraries(eclxx_smspec eclxx test_util util)
target_link_libraries(eclxx_smspec eclxx test_util ert_utilxx)
add_test(eclxx_smspec ${EXECUTABLE_OUTPUT_PATH}/eclxx_smspec)

View File

@@ -21,16 +21,14 @@
#include <fstream>
#include <ert/util/test_work_area.h>
#include <ert/util/TestArea.hpp>
#include <ert/util/test_util.hpp>
#include <ert/ecl/EclKW.hpp>
#include <ert/ecl/FortIO.hpp>
void test_open() {
test_work_area_type * work_area = test_work_area_alloc("fortio");
ERT::TestArea work_area("fortio");
ERT::FortIO fortio;
fortio.open( "new_file" , std::fstream::out );
@@ -68,13 +66,11 @@ void test_open() {
test_assert_size_t_equal(data[i], i);
}
fortio.close();
test_work_area_free( work_area );
}
void test_fortio() {
test_work_area_type * work_area = test_work_area_alloc("fortio");
ERT::TestArea ta("fortio");
ERT::FortIO fortio("new_file" , std::fstream::out );
{
std::vector<int> data;
@@ -97,17 +93,19 @@ void test_fortio() {
}
fortio.close();
test_work_area_free( work_area );
test_assert_throw( ERT::FortIO fortio("file/does/not/exists" , std::fstream::in) , std::invalid_argument );
}
void test_fortio_kw() {
test_work_area_type * work_area = test_work_area_alloc("fortio_kw");
ERT::EclKW<int> kw("XYZ" , 1000);
for (size_t i =0 ; i < kw.size(); i++)
kw[i] = i;
ERT::TestArea ta("fortio");
std::vector< int > vec( 1000 );
for (size_t i =0 ; i < vec.size(); i++)
vec[ i ] = i;
ERT::EclKW<int> kw("XYZ" , vec );
{
ERT::FortIO fortio("new_file" , std::fstream::out );
@@ -120,15 +118,13 @@ void test_fortio_kw() {
ERT::EclKW<int> kw2 = ERT::EclKW<int>::load( fortio );
fortio.close( );
for (size_t i =0 ; i < kw.size(); i++)
test_assert_int_equal( kw[i] , kw2[i]);
test_assert_int_equal( kw.at( i ), kw2.at( i ) );
fortio = ERT::FortIO("new_file" , std::fstream::in );
test_assert_throw( ERT::EclKW<float>::load(fortio) , std::invalid_argument );
fortio.close();
}
test_work_area_free( work_area );
}

View File

@@ -21,27 +21,70 @@
#include <fstream>
#include <ert/util/test_util.h>
#include <ert/util/test_work_area.h>
#include <ert/ecl/EclKW.hpp>
#include <ert/ecl/FortIO.hpp>
void test_kw_name() {
ERT::EclKW< int > kw1( "short", 1 );
ERT::EclKW< int > kw2( "verylong", 1 );
void test_kw() {
ERT::EclKW<int> kw("XYZ" , 1000);
test_assert_size_t_equal( kw.size() , 1000 );
kw[0] = 1;
kw[10] = 77;
test_assert_int_equal( kw[0] , 1 );
test_assert_int_equal( kw[10] , 77 );
test_assert_string_equal( kw1.name(), "short" );
test_assert_string_equal( kw2.name(), "verylong" );
}
void test_kw_vector_assign() {
std::vector< int > vec = { 1, 2, 3, 4, 5 };
ERT::EclKW< int > kw( "XYZ", vec );
test_assert_size_t_equal( kw.size(), vec.size() );
for( size_t i = 0; i < kw.size(); ++i )
test_assert_int_equal( kw.at( i ), vec[ i ] );
}
void test_kw_vector_string() {
std::vector< const char* > vec = {
"short",
"sweet",
"padded ",
"verylongkeyword"
};
ERT::EclKW< const char* > kw( "XYZ", vec );
test_assert_size_t_equal( kw.size(), vec.size() );
test_assert_string_equal( kw.at( 0 ), "short " );
test_assert_string_equal( kw.at( 1 ), "sweet " );
test_assert_string_equal( kw.at( 2 ), vec.at( 2 ) );
test_assert_string_equal( kw.at( 3 ), "verylong" );
test_assert_string_not_equal( kw.at( 2 ), "verylongkeyword" );
}
void test_move_semantics_no_crash() {
std::vector< int > vec = { 1, 2, 3, 4, 5 };
ERT::EclKW< int > kw1( "XYZ", vec );
ERT::EclKW< int > kw2( std::move( kw1 ) );
test_assert_true( kw1.get() == nullptr );
}
void test_exception_assing_ref_wrong_type() {
auto* ptr = ecl_kw_alloc( "XYZ", 1, ECL_INT_TYPE );
try {
ERT::EclKW< double > kw( ptr );
test_assert_true( false );
} catch (...) {
ERT::EclKW< int > kw( ptr );
}
}
int main (int argc, char **argv) {
test_kw();
test_kw_name();
test_kw_vector_assign();
test_kw_vector_string();
test_move_semantics_no_crash();
test_exception_assing_ref_wrong_type();
}

View File

@@ -38,7 +38,10 @@ void test_smspec_wg() {
ERT::smspec_node group( ECL_SMSPEC_GROUP_VAR, gr, kw );
test_assert_true(well.wgname() == wg);
test_assert_true(well.type() == ECL_SMSPEC_WELL_VAR );
test_assert_true(group.wgname() == gr);
test_assert_true(group.type() == ECL_SMSPEC_GROUP_VAR );
}
void test_smspec_field() {
@@ -46,6 +49,7 @@ void test_smspec_field() {
ERT::smspec_node field( kw );
test_assert_true( field.keyword() == kw );
test_assert_true( field.type() == ECL_SMSPEC_FIELD_VAR );
}
void test_smspec_block() {
@@ -56,6 +60,8 @@ void test_smspec_block() {
ERT::smspec_node block( kw, dims, ijk );
test_assert_true( block.keyword() == kw );
test_assert_true( block.type() == ECL_SMSPEC_BLOCK_VAR );
test_assert_true( block.num() == 556 );
}
void test_smspec_region() {
@@ -64,6 +70,8 @@ void test_smspec_region() {
ERT::smspec_node region( kw, dims, 0 );
test_assert_true( region.keyword() == kw );
test_assert_true( region.type() == ECL_SMSPEC_REGION_VAR );
test_assert_true( region.num() == 0 );
}
void test_smspec_completion() {
@@ -74,6 +82,8 @@ void test_smspec_completion() {
ERT::smspec_node completion( kw, wg, dims, ijk );
test_assert_true( completion.keyword() == kw );
test_assert_true( completion.type() == ECL_SMSPEC_COMPLETION_VAR );
test_assert_true( completion.num() == 112 );
}
int main (int argc, char **argv) {

Some files were not shown because too many files have changed in this diff Show More