Updated Ert to e2a5a9cc20705537d07822958d925e092a323367 to include new accesss to well rates

This commit is contained in:
Jacob Støren 2017-02-07 13:44:59 +01:00
parent 76bc449d1f
commit a6b7bc7c41
475 changed files with 12978 additions and 5850 deletions

View File

@ -58,6 +58,8 @@ env:
before_script:
- mkdir build
- cd build
- cmake -DPYTHON_INSTALL_PREFIX=python -DBUILD_ERT=ON -DERT_BUILD_GUI=ON -DBUILD_TESTS=ON -DBUILD_APPLICATIONS=ON -DUSE_RUNPATH=ON -DBUILD_PYTHON=ON -DERT_USE_OPENMP=ON -DERT_DOC=OFF -DERT_BUILD_CXX=ON ..
- cmake -DBUILD_ERT=ON -DERT_BUILD_GUI=ON -DBUILD_TESTS=ON -DBUILD_APPLICATIONS=ON -DBUILD_PYTHON=ON -DERT_USE_OPENMP=ON -DERT_DOC=OFF -DERT_BUILD_CXX=ON ..
script: make && ctest --output-on-failure
script:
- make
- ctest --output-on-failure

View File

@ -24,7 +24,7 @@ option( BUILD_PYTHON "Run py_compile on the python wrappers"
option( BUILD_SHARED_LIBS "Build shared libraries" ON )
option( INSTALL_ERT "Should anything be installed when issuing make install?" ON )
option( ERT_BUILD_GUI "Should the PyQt based GUI be compiled and installed" OFF)
option( ERT_USE_OPENMP "Use OpenMP - currently only in EclGrid" OFF)
option( ERT_USE_OPENMP "Use OpenMP" OFF )
option( ERT_DOC "Build ERT documantation" OFF)
option( ERT_BUILD_CXX "Build some CXX wrappers" ON)
@ -50,18 +50,21 @@ include( CheckTypeSize )
ENABLE_TESTING()
if (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
message(STATUS "Found Linux")
set(ERT_LINUX TRUE )
add_definitions( -DERT_LINUX )
set( ERT_BINARY_POSTFIX .${ERT_VERSION_MAJOR}.${ERT_VERSION_MINOR} )
elseif(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
message(STATUS "Found Darwin")
set(ERT_LINUX TRUE )
set(ERT_MAC TRUE)
add_definitions( -DERT_LINUX )
set( ERT_BINARY_POSTFIX .${ERT_VERSION_MAJOR}.${ERT_VERSION_MINOR} )
elseif (${CMAKE_SYSTEM_NAME} MATCHES "Windows")
message(STATUS "Found Windows")
set(ERT_WINDOWS TRUE)
add_definitions( -DERT_WINDOWS )
endif()
endif()
if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
message(STATUS "Setting build type to 'RelWithDebInfo' as none was specified.")
@ -74,7 +77,7 @@ endif()
# Treat warnings as errors if not on Windows
if (NOT ERT_WINDOWS)
set( CMAKE_C_FLAGS "-std=gnu99 -Wall -Wno-unknown-pragmas ")
set( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99 -Wall -Wno-unknown-pragmas ")
set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall " )
endif()
@ -82,19 +85,21 @@ if (MSVC)
add_definitions( "/W3 /D_CRT_SECURE_NO_WARNINGS /wd4996" )
endif()
if (ERT_USE_OPENMP)
find_package(OpenMP)
if (OPENMP_FOUND)
message(STATUS "Enabling OpenMP support")
# The actual use of OpenMP is only in the libecl library - the compile flags is only applied there.
else()
set( ERT_USE_OPENMP OFF )
message(STATUS "OpenMP package not found - OpenMP disabled")
endif()
endif()
list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/Modules)
find_package(CXX11Features)
if (ERT_USE_OPENMP)
find_package(OpenMP)
if (OPENMP_FOUND)
message(STATUS "Enabling OpenMP support")
# The actual use of OpenMP is only in the libecl library - the compile flags is only applied there.
else()
set( ERT_USE_OPENMP OFF )
message(STATUS "OpenMP package not found - OpenMP disabled")
endif()
endif()
#-----------------------------------------------------------------
@ -215,6 +220,7 @@ if (BUILD_PYTHON)
else()
include(cmake/python.cmake2)
add_subdirectory( python )
if(ERT_DOC)
add_subdirectory( docs )
endif()

674
ThirdParty/Ert/COPYING vendored Normal file
View File

@ -0,0 +1,674 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<http://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<http://www.gnu.org/philosophy/why-not-lgpl.html>.

View File

@ -0,0 +1,14 @@
find_program(SPHINX_EXECUTABLE NAMES sphinx-build
HINTS
$ENV{SPHINX_DIR}
PATH_SUFFIXES bin
DOC "Sphinx documentation generator"
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Sphinx DEFAULT_MSG
SPHINX_EXECUTABLE
)
mark_as_advanced(SPHINX_EXECUTABLE)

View File

@ -21,7 +21,7 @@ shutil.copyfile( src_file , target_file )
shutil.copystat( src_file , target_file )
try:
py_compile.compile( target_file , doraise = True)
except Exception,error:
except Exception as error:
sys.exit("py_compile(%s) failed:%s" % (target_file , error))

View File

@ -1,9 +1,3 @@
ert.ecl (2016.10-rfinal-1~xenial) xenial; urgency=medium
* New release
-- Arne Morten Kvarving <arne.morten.kvarving@sintef.no> Wed, 26 Oct 2016 09:46:18 +0200
ert.ecl (1.0-4) precise; urgency=low
* Unmark -dev package as architecture independent due to library symlink

View File

@ -1,2 +1,3 @@
usr/lib/*/lib*.so.*
usr/bin/*
usr/share/man/man1/ecl_summary.1

View File

@ -1,45 +1,85 @@
set( ERT_DOC_INSTALL_PATH "" CACHE PATH "Absolute path to install documentation *in addition* to $PREFIX/documentation")
set( ERT_DOC_EXTERNAL_ROOT "" CACHE PATH "Path to site local ERT documentation")
option( ERT_RST_DOC "Build RST based documentation" ON)
option( ERT_DOXY_DOC "Build doxygen documentation" ON)
option( ERT_DOXY_GRAPHICAL "Add graphics to doxygen documentation" OFF)
file(MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/tmp_doc")
EXECUTE_PROCESS( COMMAND ${CMAKE_COMMAND} -E create_symlink "${CMAKE_CURRENT_SOURCE_DIR}/user" "${PROJECT_BINARY_DIR}/tmp_doc/user")
EXECUTE_PROCESS( COMMAND ${CMAKE_COMMAND} -E create_symlink "${CMAKE_CURRENT_SOURCE_DIR}/code" "${PROJECT_BINARY_DIR}/tmp_doc/code")
if (ERT_DOC_EXTERNAL_ROOT)
EXECUTE_PROCESS( COMMAND ${CMAKE_COMMAND} -E create_symlink "${ERT_DOC_EXTERNAL_ROOT}" "${PROJECT_BINARY_DIR}/tmp_doc/external-doc")
message(STATUS "Adding documentation link ${PROJECT_BINARY_DIR}/tmp_doc/external-doc -> ${ERT_DOC_EXTERNAL_ROOT}")
set( ERT_DOC_LINK external-doc/index )
else()
set( ERT_DOC_LINK "" )
endif()
configure_file(index.rst.in ${PROJECT_BINARY_DIR}/tmp_doc/index.rst)
configure_file(conf.py.in ${PROJECT_BINARY_DIR}/conf.py)
add_custom_target(doc_out ALL
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/script/run-sphinx.py ${PROJECT_BINARY_DIR}/conf.py ${PROJECT_BINARY_DIR}/${PYTHON_INSTALL_PREFIX} ${PROJECT_BINARY_DIR}/tmp_doc
DEPENDS enkf)
INSTALL( DIRECTORY ${PROJECT_BINARY_DIR}/tmp_doc/_build/ DESTINATION ${CMAKE_INSTALL_PREFIX}/documentation )
if (ERT_DOC_INSTALL_PATH)
INSTALL( DIRECTORY ${PROJECT_BINARY_DIR}/tmp_doc/_build/ DESTINATION ${ERT_DOC_INSTALL_PATH} )
endif()
find_package(Doxygen)
if (DOXYGEN_FOUND)
message(STATUS "Creating doxygen target")
if (DOXYGEN_DOT_FOUND)
message(STATUS "Found graphviz, will run doxygen with graphics")
set( DOXYGEN_HAVE_DOT "YES" )
else()
message(STATUS "Graphviz not found, disabling dot")
set( DOXYGEN_HAVE_DOT "NO" )
if (NOT BUILD_ERT)
if (ERT_RST_DOC)
message(WARNING "Turning off ERT_RST_DOC. Depends on BUILD_ERT.")
set (ERT_RST_DOC OFF)
endif()
if (ERT_DOXY_DOC)
message(WARNING "Turning off ERT_DOXY_DOC. Depends on BUILD_ERT.")
set (ERT_DOXY_DOC OFF)
endif()
configure_file(doxygen.cfg.in ${PROJECT_BINARY_DIR}/doxygen.cfg)
add_custom_target(doxy
COMMAND ${DOXYGEN_EXECUTABLE} ${PROJECT_BINARY_DIR}/doxygen.cfg
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/../
COMMENT "Generating doxygen documentation")
endif()
if (ERT_RST_DOC)
find_package(Sphinx REQUIRED)
if (SPHINX_FOUND)
set( ERT_DOC_INSTALL_PATH "" CACHE PATH "Absolute path to install documentation *in addition* to $PREFIX/documentation")
set( ERT_DOC_EXTERNAL_ROOT "" CACHE PATH "Path to site local ERT documentation")
file(MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/doc-src")
file(MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/doc-src/_static")
EXECUTE_PROCESS( COMMAND ${CMAKE_COMMAND} -E create_symlink "${CMAKE_CURRENT_SOURCE_DIR}/user" "${PROJECT_BINARY_DIR}/doc-src/user")
if (ERT_DOC_EXTERNAL_ROOT)
EXECUTE_PROCESS( COMMAND ${CMAKE_COMMAND} -E create_symlink "${ERT_DOC_EXTERNAL_ROOT}" "${PROJECT_BINARY_DIR}/doc-src/external-doc")
message(STATUS "Adding documentation link ${PROJECT_BINARY_DIR}/doc-src/external-doc -> ${ERT_DOC_EXTERNAL_ROOT}")
set( ERT_DOC_LINK external-doc/index )
else()
set( ERT_DOC_LINK "" )
endif()
configure_file(index.rst.in ${PROJECT_BINARY_DIR}/doc-src/index.rst)
configure_file(conf.py.in ${PROJECT_BINARY_DIR}/doc-src/conf.py)
if (BUILD_PYTHON)
EXECUTE_PROCESS( COMMAND ${CMAKE_COMMAND} -E create_symlink "${CMAKE_CURRENT_SOURCE_DIR}/code" "${PROJECT_BINARY_DIR}/doc-src/code")
add_custom_target(api-doc ALL
COMMAND sphinx-apidoc -e -o doc-src/API/python ${PROJECT_BINARY_DIR}/${PYTHON_INSTALL_PREFIX}
DEPENDS enkf
WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
endif()
add_custom_target(rst-doc ALL
COMMAND sphinx-build -b html -d doc-src/doctrees doc-src documentation/rst
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
DEPENDS api-doc
)
else()
message(STATUS "Sphinx documentation tool not found - documentation not generated")
endif()
endif()
if (ERT_DOXY_DOC)
find_package(Doxygen)
if (DOXYGEN_FOUND)
file(MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/documentation/doxy")
set( DOXYGEN_HAVE_DOT "NO" )
if ( ERT_DOXY_GRAPHICAL)
if (DOXYGEN_DOT_FOUND)
set( DOXYGEN_HAVE_DOT "YES" )
endif()
endif()
if (BUILD_ERT)
SET( DOXYGEN_INPUT "${PROJECT_SOURCE_DIR}/libanalysis ${PROJECT_SOURCE_DIR}/libconfig ${PROJECT_SOURCE_DIR}/libecl ${PROJECT_SOURCE_DIR}/libecl_well ${PROJECT_SOURCE_DIR}/libeclxx ${PROJECT_SOURCE_DIR}/libenkf ${PROJECT_SOURCE_DIR}/libert_util ${PROJECT_SOURCE_DIR}/libert_utilxx ${PROJECT_SOURCE_DIR}/libgeometry ${PROJECT_SOURCE_DIR}/libjob_queue ${PROJECT_SOURCE_DIR}/librms ${PROJECT_SOURCE_DIR}/libsched")
else()
SET( DOXYGEN_INPUT "${PROJECT_SOURCE_DIR}/libecl ${PROJECT_SOURCE_DIR}/libecl_well ${PROJECT_SOURCE_DIR}/libeclxx ${PROJECT_SOURCE_DIR}/libert_util ${PROJECT_SOURCE_DIR}/libert_utilxx ${PROJECT_SOURCE_DIR}/libgeometry")
endif()
configure_file(doxygen.cfg.in ${PROJECT_BINARY_DIR}/doxygen.cfg)
add_custom_target(doxy ALL
COMMAND ${DOXYGEN_EXECUTABLE} ${PROJECT_BINARY_DIR}/doxygen.cfg
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/../
COMMENT "Generating doxygen documentation"
DEPENDS enkf)
endif()
endif()
INSTALL( DIRECTORY ${PROJECT_BINARY_DIR}/documentation DESTINATION ${CMAKE_INSTALL_PREFIX} )

View File

@ -0,0 +1,34 @@
QUEUE_SYSTEM LOCAL
JOBNAME SNAKE_OIL_%d
NUM_REALIZATIONS 25
DEFINE <STORAGE> storage/<CONFIG_FILE_BASE>
RUNPATH_FILE directory/test_runpath_list.txt
RUNPATH <STORAGE>/runpath/realisation-%d/iter-%d
ENSPATH <STORAGE>/ensemble
ECLBASE SNAKE_OIL_FIELD
SUMMARY *
HISTORY_SOURCE REFCASE_HISTORY
REFCASE refcase/SNAKE_OIL_FIELD
TIME_MAP refcase/time_map.txt
INSTALL_JOB SNAKE_OIL_SIMULATOR jobs/SNAKE_OIL_SIMULATOR
INSTALL_JOB SNAKE_OIL_NPV jobs/SNAKE_OIL_NPV
INSTALL_JOB SNAKE_OIL_DIFF jobs/SNAKE_OIL_DIFF
FORWARD_MODEL SNAKE_OIL_SIMULATOR
FORWARD_MODEL SNAKE_OIL_NPV
FORWARD_MODEL SNAKE_OIL_DIFF
RUN_TEMPLATE templates/seed_template.txt seed.txt
GEN_KW SNAKE_OIL_PARAM templates/snake_oil_template.txt snake_oil_params.txt parameters/snake_oil_parameters.txt
CUSTOM_KW SNAKE_OIL_NPV snake_oil_npv.txt
GEN_DATA SNAKE_OIL_OPR_DIFF INPUT_FORMAT:ASCII RESULT_FILE:snake_oil_opr_diff_%d.txt REPORT_STEPS:199
GEN_DATA SNAKE_OIL_WPR_DIFF INPUT_FORMAT:ASCII RESULT_FILE:snake_oil_wpr_diff_%d.txt REPORT_STEPS:199
GEN_DATA SNAKE_OIL_GPR_DIFF INPUT_FORMAT:ASCII RESULT_FILE:snake_oil_gpr_diff_%d.txt REPORT_STEPS:199

View File

@ -0,0 +1,4 @@
STDOUT snake_oil_diff.stdout
STDERR snake_oil_diff.stderr
EXECUTABLE snake_oil_diff.py

View File

@ -0,0 +1,4 @@
STDOUT snake_oil_npv.stdout
STDERR snake_oil_npv.stderr
EXECUTABLE snake_oil_npv.py

View File

@ -0,0 +1,4 @@
STDOUT snake_oil.stdout
STDERR snake_oil.stderr
EXECUTABLE snake_oil_simulator.py

View File

@ -0,0 +1,24 @@
#!/usr/bin/env python
from ert.ecl import EclSum
def writeDiff(filename, vector1, vector2):
with open(filename, "w") as f:
for index in range(len(vector1)):
node1 = vector1[index]
node2 = vector2[index]
diff = node1.value - node2.value
f.write("%f\n" % diff)
if __name__ == '__main__':
ecl_sum = EclSum("SNAKE_OIL_FIELD")
report_step = 199
writeDiff("snake_oil_opr_diff_%d.txt" % report_step, ecl_sum["WOPR:OP1"], ecl_sum["WOPR:OP2"])
writeDiff("snake_oil_wpr_diff_%d.txt" % report_step, ecl_sum["WWPR:OP1"], ecl_sum["WWPR:OP2"])
writeDiff("snake_oil_gpr_diff_%d.txt" % report_step, ecl_sum["WGPR:OP1"], ecl_sum["WGPR:OP2"])

View File

@ -0,0 +1,103 @@
#!/usr/bin/env python
from ert.ecl import EclSum
OIL_PRICES = {"2010-01-01": 78.33,
"2010-02-01": 76.39,
"2010-03-01": 81.20,
"2010-04-01": 84.29,
"2010-05-01": 73.74,
"2010-06-01": 75.34,
"2010-07-01": 76.32,
"2010-08-01": 76.60,
"2010-09-01": 75.24,
"2010-10-01": 81.89,
"2010-11-01": 84.25,
"2010-12-01": 89.15,
"2011-01-01": 89.17,
"2011-02-01": 88.58,
"2011-03-01": 102.86,
"2011-04-01": 109.53,
"2011-05-01": 100.90,
"2011-06-01": 96.26,
"2011-07-01": 97.30,
"2011-08-01": 86.33,
"2011-09-01": 85.52,
"2011-10-01": 86.32,
"2011-11-01": 97.16,
"2011-12-01": 98.56,
"2012-01-01": 100.27,
"2012-02-01": 102.20,
"2012-03-01": 106.16,
"2012-04-01": 103.32,
"2012-05-01": 94.65,
"2012-06-01": 82.30,
"2012-07-01": 87.90,
"2012-08-01": 94.13,
"2012-09-01": 94.51,
"2012-10-01": 89.49,
"2012-11-01": 86.53,
"2012-12-01": 87.86,
"2013-01-01": 94.76,
"2013-02-01": 95.31,
"2013-03-01": 92.94,
"2013-04-01": 92.02,
"2013-05-01": 94.51,
"2013-06-01": 95.77,
"2013-07-01": 104.67,
"2013-08-01": 106.57,
"2013-09-01": 106.29,
"2013-10-01": 100.54,
"2013-11-01": 93.86,
"2013-12-01": 97.63,
"2014-01-01": 94.62,
"2014-02-01": 100.82,
"2014-03-01": 100.80,
"2014-04-01": 102.07,
"2014-05-01": 102.18,
"2014-06-01": 105.79,
"2014-07-01": 103.59,
"2014-08-01": 96.54,
"2014-09-01": 93.21,
"2014-10-01": 84.40,
"2014-11-01": 75.79,
"2014-12-01": 59.29,
"2015-01-01": 47.22,
"2015-02-01": 50.58,
"2015-03-01": 47.82,
"2015-04-01": 54.45,
"2015-05-01": 59.27,
"2015-06-01": 59.82,
"2015-07-01": 50.90,
"2015-08-01": 42.87,
"2015-09-01": 45.48}
if __name__ == '__main__':
ecl_sum = EclSum("SNAKE_OIL_FIELD")
start_time = ecl_sum.getStartTime()
date_ranges = ecl_sum.timeRange(start_time, interval="1M")
production_sums = ecl_sum.blockedProduction("FOPT", date_ranges)
npv = 0.0
for index in range(0, len(date_ranges) - 1):
date = date_ranges[index + 1] # end of period
production_sum = production_sums[index]
oil_price = OIL_PRICES[date.date().strftime("%Y-%m-%d")]
production_value = oil_price * production_sum
npv += production_value
with open("snake_oil_npv.txt", "w") as output_file:
output_file.write("NPV %s\n" % npv)
if npv < 80000:
rating = "POOR"
elif 80000 <= npv < 100000:
rating = "AVERAGE"
elif 100000 <= npv < 120000:
rating = "GOOD"
else:
rating = "EXCELLENT"
output_file.write("RATING %s\n" % rating)

View File

@ -0,0 +1,185 @@
#!/usr/bin/env python
from datetime import datetime
import os
import sys
from ert.ecl import EclSum, EclSumTStep
from ert.test import ExtendedTestCase
try:
from synthesizer import OilSimulator
except ImportError as e:
share_lib_path = os.path.join(ExtendedTestCase.findShareRoot(), "lib")
sys.path.insert(0, share_lib_path)
synthesizer_module = __import__("synthesizer")
OilSimulator = synthesizer_module.OilSimulator
sys.path.pop(0)
def globalIndex(i, j, k, nx=10, ny=10, nz=10):
return i + nx * (j - 1) + nx * ny * (k - 1)
def readParameters(filename):
params = {}
with open(filename, "r") as f:
for line in f:
key, value = line.split(":", 1)
params[key] = value.strip()
return params
def runSimulator(simulator, history_simulator, time_step_count):
""" @rtype: EclSum """
ecl_sum = EclSum.writer("SNAKE_OIL_FIELD", datetime(2010, 1, 1), 10, 10, 10)
ecl_sum.addVariable("FOPT")
ecl_sum.addVariable("FOPR")
ecl_sum.addVariable("FGPT")
ecl_sum.addVariable("FGPR")
ecl_sum.addVariable("FWPT")
ecl_sum.addVariable("FWPR")
ecl_sum.addVariable("FGOR")
ecl_sum.addVariable("FWCT")
ecl_sum.addVariable("FOPTH")
ecl_sum.addVariable("FOPRH")
ecl_sum.addVariable("FGPTH")
ecl_sum.addVariable("FGPRH")
ecl_sum.addVariable("FWPTH")
ecl_sum.addVariable("FWPRH")
ecl_sum.addVariable("FGORH")
ecl_sum.addVariable("FWCTH")
ecl_sum.addVariable("WOPR", wgname="OP1")
ecl_sum.addVariable("WOPR", wgname="OP2")
ecl_sum.addVariable("WWPR", wgname="OP1")
ecl_sum.addVariable("WWPR", wgname="OP2")
ecl_sum.addVariable("WGPR", wgname="OP1")
ecl_sum.addVariable("WGPR", wgname="OP2")
ecl_sum.addVariable("WGOR", wgname="OP1")
ecl_sum.addVariable("WGOR", wgname="OP2")
ecl_sum.addVariable("WWCT", wgname="OP1")
ecl_sum.addVariable("WWCT", wgname="OP2")
ecl_sum.addVariable("WOPRH", wgname="OP1")
ecl_sum.addVariable("WOPRH", wgname="OP2")
ecl_sum.addVariable("WWPRH", wgname="OP1")
ecl_sum.addVariable("WWPRH", wgname="OP2")
ecl_sum.addVariable("WGPRH", wgname="OP1")
ecl_sum.addVariable("WGPRH", wgname="OP2")
ecl_sum.addVariable("WGORH", wgname="OP1")
ecl_sum.addVariable("WGORH", wgname="OP2")
ecl_sum.addVariable("WWCTH", wgname="OP1")
ecl_sum.addVariable("WWCTH", wgname="OP2")
ecl_sum.addVariable("BPR", num=globalIndex(5, 5, 5))
ecl_sum.addVariable("BPR", num=globalIndex(1, 3, 8))
time_map = []
mini_step_count = 10
total_step_count = time_step_count * mini_step_count
for report_step in range(time_step_count):
for mini_step in range(mini_step_count):
t_step = ecl_sum.addTStep(report_step + 1, sim_days=report_step * mini_step_count + mini_step)
time_map.append(t_step.getSimTime().datetime().strftime("%d/%m/%Y"))
simulator.step(scale=1.0 / total_step_count)
history_simulator.step(scale=1.0 / total_step_count)
t_step["FOPR"] = simulator.fopr()
t_step["FOPT"] = simulator.fopt()
t_step["FGPR"] = simulator.fgpr()
t_step["FGPT"] = simulator.fgpt()
t_step["FWPR"] = simulator.fwpr()
t_step["FWPT"] = simulator.fwpt()
t_step["FGOR"] = simulator.fgor()
t_step["FWCT"] = simulator.fwct()
t_step["WOPR:OP1"] = simulator.opr("OP1")
t_step["WOPR:OP2"] = simulator.opr("OP2")
t_step["WGPR:OP1"] = simulator.gpr("OP1")
t_step["WGPR:OP2"] = simulator.gpr("OP2")
t_step["WWPR:OP1"] = simulator.wpr("OP1")
t_step["WWPR:OP2"] = simulator.wpr("OP2")
t_step["WGOR:OP1"] = simulator.gor("OP1")
t_step["WGOR:OP2"] = simulator.gor("OP2")
t_step["WWCT:OP1"] = simulator.wct("OP1")
t_step["WWCT:OP2"] = simulator.wct("OP2")
t_step["BPR:5,5,5"] = simulator.bpr("5,5,5")
t_step["BPR:1,3,8"] = simulator.bpr("1,3,8")
t_step["FOPRH"] = history_simulator.fopr()
t_step["FOPTH"] = history_simulator.fopt()
t_step["FGPRH"] = history_simulator.fgpr()
t_step["FGPTH"] = history_simulator.fgpt()
t_step["FWPRH"] = history_simulator.fwpr()
t_step["FWPTH"] = history_simulator.fwpt()
t_step["FGORH"] = history_simulator.fgor()
t_step["FWCTH"] = history_simulator.fwct()
t_step["WOPRH:OP1"] = history_simulator.opr("OP1")
t_step["WOPRH:OP2"] = history_simulator.opr("OP2")
t_step["WGPRH:OP1"] = history_simulator.gpr("OP1")
t_step["WGPRH:OP2"] = history_simulator.gpr("OP2")
t_step["WWPRH:OP1"] = history_simulator.wpr("OP1")
t_step["WWPRH:OP2"] = history_simulator.wpr("OP2")
t_step["WGORH:OP1"] = history_simulator.gor("OP1")
t_step["WGORH:OP2"] = history_simulator.gor("OP2")
t_step["WWCTH:OP1"] = history_simulator.wct("OP1")
t_step["WWCTH:OP2"] = history_simulator.wct("OP2")
return ecl_sum, time_map
def roundedInt(value):
return int(round(float(value)))
if __name__ == '__main__':
seed = int(readParameters("seed.txt")["SEED"])
parameters = readParameters("snake_oil_params.txt")
op1_divergence_scale = float(parameters["OP1_DIVERGENCE_SCALE"])
op2_divergence_scale = float(parameters["OP2_DIVERGENCE_SCALE"])
op1_persistence = float(parameters["OP1_PERSISTENCE"])
op2_persistence = float(parameters["OP2_PERSISTENCE"])
op1_offset = float(parameters["OP1_OFFSET"])
op2_offset = float(parameters["OP2_OFFSET"])
bpr_138_persistence = float(parameters["BPR_138_PERSISTENCE"])
bpr_555_persistence = float(parameters["BPR_555_PERSISTENCE"])
op1_octaves = roundedInt(parameters["OP1_OCTAVES"])
op2_octaves = roundedInt(parameters["OP2_OCTAVES"])
simulator = OilSimulator()
simulator.addWell("OP1", seed * 997, persistence=op1_persistence, octaves=op1_octaves, divergence_scale=op1_divergence_scale, offset=op1_offset)
simulator.addWell("OP2", seed * 13, persistence=op2_persistence, octaves=op2_octaves, divergence_scale=op2_divergence_scale, offset=op2_offset)
simulator.addBlock("5,5,5", seed * 37, persistence=bpr_555_persistence)
simulator.addBlock("1,3,8", seed * 31, persistence=bpr_138_persistence)
history_simulator = OilSimulator()
history_simulator.addWell("OP1", 222118781)
history_simulator.addWell("OP2", 118116362)
report_step_count = 200
ecl_sum, time_map = runSimulator(simulator, history_simulator, report_step_count)
ecl_sum.fwrite()
with open("time_map.txt", "w") as f:
for t in time_map:
f.write("%s\n" % t)

View File

@ -0,0 +1,11 @@
OP1_PERSISTENCE UNIFORM 0.01 0.4
OP1_OCTAVES UNIFORM 3 5
OP1_DIVERGENCE_SCALE UNIFORM 0.25 1.25
OP1_OFFSET UNIFORM -0.1 0.1
OP2_PERSISTENCE UNIFORM 0.1 0.6
OP2_OCTAVES UNIFORM 5 12
OP2_DIVERGENCE_SCALE UNIFORM 0.5 1.5
OP2_OFFSET UNIFORM -0.2 0.2
BPR_555_PERSISTENCE UNIFORM 0.1 0.5
BPR_138_PERSISTENCE UNIFORM 0.2 0.7

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1 @@
To create a refcase run the snake_oil_simulator.py job with the this as working directory.

View File

@ -0,0 +1 @@
SEED:268776

View File

@ -0,0 +1,10 @@
OP1_PERSISTENCE:0.15
OP1_OCTAVES:4
OP1_DIVERGENCE_SCALE:0.5
OP1_OFFSET:0.0
OP2_PERSISTENCE:0.25
OP2_OCTAVES:7.0
OP2_DIVERGENCE_SCALE:1.0
OP2_OFFSET:0.0
BPR_555_PERSISTENCE:0.25
BPR_138_PERSISTENCE:0.35

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,40 @@
QUEUE_SYSTEM LOCAL
JOBNAME SNAKE_OIL_%d
NUM_REALIZATIONS 25
DEFINE <STORAGE> storage/<CONFIG_FILE_BASE>
STORE_SEED SEED
LOAD_SEED SEED
RUNPATH_FILE directory/test_runpath_list.txt
RUNPATH <STORAGE>/runpath/realisation-%d/iter-%d
ENSPATH <STORAGE>/ensemble
ECLBASE SNAKE_OIL_FIELD
SUMMARY *
HISTORY_SOURCE REFCASE_HISTORY
REFCASE refcase/SNAKE_OIL_FIELD
TIME_MAP refcase/time_map.txt
OBS_CONFIG observations/observations.txt
INSTALL_JOB SNAKE_OIL_SIMULATOR jobs/SNAKE_OIL_SIMULATOR
INSTALL_JOB SNAKE_OIL_NPV jobs/SNAKE_OIL_NPV
INSTALL_JOB SNAKE_OIL_DIFF jobs/SNAKE_OIL_DIFF
FORWARD_MODEL SNAKE_OIL_SIMULATOR
FORWARD_MODEL SNAKE_OIL_NPV
FORWARD_MODEL SNAKE_OIL_DIFF
RUN_TEMPLATE templates/seed_template.txt seed.txt
GEN_KW SNAKE_OIL_PARAM templates/snake_oil_template.txt snake_oil_params.txt parameters/snake_oil_parameters.txt
CUSTOM_KW SNAKE_OIL_NPV snake_oil_npv.txt
GEN_DATA SNAKE_OIL_OPR_DIFF INPUT_FORMAT:ASCII RESULT_FILE:snake_oil_opr_diff_%d.txt REPORT_STEPS:199
GEN_DATA SNAKE_OIL_WPR_DIFF INPUT_FORMAT:ASCII RESULT_FILE:snake_oil_wpr_diff_%d.txt REPORT_STEPS:199
GEN_DATA SNAKE_OIL_GPR_DIFF INPUT_FORMAT:ASCII RESULT_FILE:snake_oil_gpr_diff_%d.txt REPORT_STEPS:199
LOG_LEVEL 3
LOG_FILE log/log.txt
UPDATE_LOG_PATH log/update

View File

@ -0,0 +1 @@
SEED:<IENS>

View File

@ -0,0 +1,10 @@
OP1_PERSISTENCE:<OP1_PERSISTENCE>
OP1_OCTAVES:<OP1_OCTAVES>
OP1_DIVERGENCE_SCALE:<OP1_DIVERGENCE_SCALE>
OP1_OFFSET:<OP1_OFFSET>
OP2_PERSISTENCE:<OP2_PERSISTENCE>
OP2_OCTAVES:<OP2_OCTAVES>
OP2_DIVERGENCE_SCALE:<OP2_DIVERGENCE_SCALE>
OP2_OFFSET:<OP2_OFFSET>
BPR_555_PERSISTENCE:<BPR_555_PERSISTENCE>
BPR_138_PERSISTENCE:<BPR_138_PERSISTENCE>

View File

@ -0,0 +1,7 @@
1. Create a small Python script which will load a ERT configuration
file and instantiate a EnkfMain object.
2. Query the EnKFMain instance and print on standard out:
a) How many realisations there are.
b) List all GEN_KW keywords, and their internal keys.

27
ThirdParty/Ert/docs/course/ex1/sol1.py vendored Normal file
View File

@ -0,0 +1,27 @@
#!/usr/bin/env python
import sys
import time
from ert.enkf import EnKFMain
from ert.enkf.enums import ErtImplType
# This will instantiate the EnkFMain object and create a handle to
# "everything" ert related for this instance.
ert = EnKFMain( sys.argv[1] )
# Ask the EnKFMain instance how many realisations it has. Observe that
# the answer to this question is just the value of the
# NUM_REALISATIONS setting in the configuration file.
print("This instance has %d realisations" % ert.getEnsembleSize())
# Get the ensemble configuration object, and ask for all GEN_KW keys:
ens_config = ert.ensembleConfig( )
for key in ens_config.getKeylistFromImplType(ErtImplType.GEN_KW):
config_node = ens_config[key]
# "Downcast" to GEN_KW configuration.
gen_kw_config = config_node.getModelConfig( )
print("%s : %s" % (key , gen_kw_config.getKeyWords( )))

View File

@ -0,0 +1 @@
Implement the [] operator for the gen_data class in GenData.py

35
ThirdParty/Ert/docs/course/ex2/sol2.txt vendored Normal file
View File

@ -0,0 +1,35 @@
The [] operator for python objects is implemeted with the
__getitem__() and __setitem__() methods.
1. The __setitem__ and __getitem__ methods should clearly be based on
C functions which set and get an item based on an index. Going to
libenkf/src/gen_data.c we see that two such functions already exist:
double gen_data_iget_double(const gen_data_type * gen_data, int index);
void gen_data_iset_double(gen_data_type * gen_data, int index, double value);
2. We must add bindings from Python to these C functions. Add the
following lines to at the top of the declaration of class GenData:
_iset = EnkfPrototype("void gen_data_iset_double(gen_data, int , double)")
_iget = EnkfPrototype("double gen_data_iget_double(gen_data, int )")
3. Create (simple) Python methods:
def __getitem__(self , index):
if index < len(self):
return self._iget( index )
else:
raise IndexError("Invalid index:%d - valid range: [0,%d)" % (index , len(self)))
def __setitem__(self , index, value):
if index < len(self):
self._iset( index , value )
else:
raise IndexError("Invalid index:%d - valid range: [0,%d)" % (index , len(self)))

View File

@ -0,0 +1,2 @@
Iterate through all the forward models which have been installed and
get the configuration file and executable.

17
ThirdParty/Ert/docs/course/ex3/sol3.py vendored Normal file
View File

@ -0,0 +1,17 @@
#!/usr/bin/env python
import sys
import time
from ert.enkf import EnKFMain
# This will instantiate the EnkFMain object and create a handle to
# "everything" ert related for this instance.
ert = EnKFMain( sys.argv[1] )
site_config = ert.siteConfig( )
jobs = site_config.get_installed_jobs( )
for job in jobs:
print job.name()
print " config : %s" % job.get_config_file()
print " executable: %s" % job.get_executable( )
print

13
ThirdParty/Ert/docs/course/ex4/ex4.txt vendored Normal file
View File

@ -0,0 +1,13 @@
Create a small script which:
1. Loads the configuration file.
2. Initializes the realisations and creates runpath folders.
3. Submit simulations.
4. Wait for simulations to complete.
5. Fetch and print GEN_DATA results - use the GenData[] operator.

71
ThirdParty/Ert/docs/course/ex4/sol4.py vendored Normal file
View File

@ -0,0 +1,71 @@
#!/usr/bin/env python
import sys
import time
from ert.enkf import EnKFMain, RunArg, NodeId
from ert.enkf.data import EnkfNode
from ert.job_queue import JobQueueManager
ert = EnKFMain( sys.argv[1] )
fs_manager = ert.getEnkfFsManager( )
fs = fs_manager.getCurrentFileSystem( )
# Initialize the realisations.
for iens in range( ert.getEnsembleSize()):
realisation = ert.getRealisation( iens )
realisation.initialize( fs )
# Fetch out the job_queue from the SiteConfig object. In addition we
# create a JobQueueManager objects which wraps the queue. The purpose
# of this manager object is to let the queue run nonblocking in the
# background.
site_config = ert.siteConfig( )
queue_manager = JobQueueManager( site_config.getJobQueue( ) )
queue_manager.startQueue( ert.getEnsembleSize( ) , verbose = False )
# Create list of RunArg instances which hold metadata for one running
# realisation, create the directory where the simulation should run
# and submit the simulation.
path_fmt = "/tmp/run%d"
arg_list = [ RunArg.createEnsembleExperimentRunArg(fs, iens, path_fmt % iens) for iens in range(ert.getEnsembleSize()) ]
for arg in arg_list:
ert.createRunPath( arg )
ert.submitSimulation( arg )
while True:
print("Waiting:%d Running:%d Complete:%d/%d" % (queue_manager.getNumWaiting( ), queue_manager.getNumRunning( ) , queue_manager.getNumSuccess() , queue_manager.getNumFailed( )))
if not queue_manager.isRunning( ):
break
time.sleep( 5 )
ens_config = ert.ensembleConfig( )
data_config = ens_config["SNAKE_OIL_OPR_DIFF"]
param_config = ens_config["SNAKE_OIL_PARAM"]
for iens in range(ert.getEnsembleSize( )):
data_id = NodeId( realization_number = iens,
report_step = 199 )
enkf_node1 = EnkfNode( data_config )
enkf_node1.load( fs , data_id )
gen_data = enkf_node1.asGenData( )
data = gen_data.getData( )
param_id = NodeId( realization_number = iens,
report_step = 0 )
enkf_node2 = EnkfNode( param_config )
enkf_node2.load( fs , param_id )
gen_kw = enkf_node2.asGenKw( )
print sum(data)
for v in gen_kw:
print v
# Using the __getitem__() of GenData which was implemented
# previously.
for d in gen_data:
print d

View File

@ -5,7 +5,7 @@ PROJECT_NAME = "Ert"
PROJECT_NUMBER = ${ERT_VERSION_MAJOR}.${ERT_VERSION_MINOR}
PROJECT_BRIEF = "ERT is a software initially developed by Statoil which main feature is to handle several ECLIPSE simulations in an Ensemble setting. --- http://ert.nr.no/ert"
PROJECT_LOGO =
OUTPUT_DIRECTORY = ${PROJECT_BINARY_DIR}/doxy
OUTPUT_DIRECTORY = ${PROJECT_BINARY_DIR}/documentation/doxy
CREATE_SUBDIRS = NO
STRIP_FROM_PATH =
STRIP_FROM_INC_PATH =
@ -25,7 +25,7 @@ WARN_IF_DOC_ERROR = NO
WARN_NO_PARAMDOC = NO
WARN_FORMAT = "$file:$line: $text"
INPUT = ${PROJECT_SOURCE_DIR}/libanalysis ${PROJECT_SOURCE_DIR}/libconfig ${PROJECT_SOURCE_DIR}/libecl ${PROJECT_SOURCE_DIR}/libecl_well ${PROJECT_SOURCE_DIR}/libeclxx ${PROJECT_SOURCE_DIR}/libenkf ${PROJECT_SOURCE_DIR}/libert_util ${PROJECT_SOURCE_DIR}/libert_utilxx ${PROJECT_SOURCE_DIR}/libgeometry ${PROJECT_SOURCE_DIR}/libjob_queue ${PROJECT_SOURCE_DIR}/librms ${PROJECT_SOURCE_DIR}/libsched
INPUT = ${DOXYGEN_INPUT}
RECURSIVE = YES
EXCLUDE_PATTERNS = */test/* */build/* */test-data/* */docs/* */python*/

View File

@ -0,0 +1,188 @@
.\" First parameter, NAME, should be all caps
.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection
.\" other parameters are allowed: see man(7), man(1)
.TH ecl_summary "1" "November 8 2016"
.\" Please adjust this date whenever revising the manpage.
.\"
.\" Some roff macros, for reference:
.\" .nh disable hyphenation
.\" .hy enable hyphenation
.\" .ad l left justify
.\" .ad b justify to both left and right margins
.\" .nf disable filling
.\" .fi enable filling
.\" .br insert line break
.\" .sp <n> insert n+1 empty lines
.\" for manpage-specific macros, see man(7)
.SH NAME
ecl_summary \- Program to extract summary vectors from ECLIPSE files
.SH DESCRIPTION
The ecl_summary program is used to quickly extract summary vectors
from ECLIPSE summary files. The program is invoked as:
.PP
computer> ecl_summary \fI\,/Path/to/ECLIPSE\/\fP key1 key2 key3 ....
.PP
Here ECLIPSE is the name of an existing case, you can give it with
extension, or without; the case need not be in the current directory.
.PP
The keys are formed by combining ECLIPSE variable names and
qualifiers from the WGNAMES and NUMS arrays. Examples of keys are:
.TP
WWCT:F\-36
\- The watercut in the well F\-36.
.TP
FOPT
\- The total field oil production.
.TP
RPR:3
\- The region pressure in region 3.
.TP
GGIT:NORTH
\- The total gas injection group NORTH.
.TP
SPR:F\-12:18
\- The segment pressure in well F\-12, segment 18.
.TP
BPR:10,10,10
\- The block pressure in cell 10,10,10.
.IP
LBPR:LGR3:10,10,10 \- The block pressure in cell 10,10,10 \- in LGR3
.PP
The option \fB\-\-list\fR can be used to list all available keys.
.SH OPTIONS
.HP
\fB\-\-list\fR : The program will list available keys.
.HP
\fB\-\-no\-restart\fR: If the simulation in question is a restart, i.e a prediction
.IP
which starts at the end of the historical period, the ecl_summary
program will by default also load historical data. If the \fB\-\-no\-restart\fR
option is used the program will not look for old results.
.HP
\fB\-\-no\-header\fR: By default ecl_summary will print a header line at the top, with the
.IP
option \fB\-\-no\-header\fR this will be suppressed.
.HP
\fB\-\-report\-only\fR: Will only report results at report times (i.e. DATES).
.HP
\fB\-\-help\fR: Print this message and exit.
.PP
The options should come before the ECLIPSE basename.
.PP
Example1:
.TP
computer> ecl_summary
CASE1_XXX WWCT:F\-36 FOPT FWPT
.IP
This example will load results from case 'CASE1_XXX' and print the
results for keys 'WWCT:F\-36', 'FOPT' and 'FWPT' on standard out.
.PP
Example2:
.TP
computer> ecl_summary
\fB\-\-list\fR CASE2_XXX "*:F\-36" "BPR:*"
.IP
This example will list all the available keys which end with
\&':F\-36' and those which start with 'BPR:'. Observe the use of
quoting characters "" when using shell wildcards.
.PP
The ecl_summary program will look for and load both unified and
non\-unified and formatted and non\-formatted files. The default
search order is: UNSMRY, Snnnn, FUNSMRY, Annnn, however you can
manipulate this with the extension to the basename:
.PP
* If the extension corresponds to an unformatted file, ecl_summary
.IP
will only look for unformatted files.
.PP
* If the extension corresponds to a unified file, ecl_summary will
.IP
only look for unified files.
.PP
Contact Joakim Hove / joaho@statoil.com / 92 68 57 04 for bugs
and feature requests.
.PP
The ecl_summary program is used to quickly extract summary vectors
from ECLIPSE summary files. The program is invoked as:
.PP
computer> ecl_summary \fI\,/Path/to/ECLIPSE\/\fP key1 key2 key3 ....
.PP
Here ECLIPSE is the name of an existing case, you can give it with
extension, or without; the case need not be in the current directory.
.PP
The keys are formed by combining ECLIPSE variable names and
qualifiers from the WGNAMES and NUMS arrays. Examples of keys are:
.TP
WWCT:F\-36
\- The watercut in the well F\-36.
.TP
FOPT
\- The total field oil production.
.TP
RPR:3
\- The region pressure in region 3.
.TP
GGIT:NORTH
\- The total gas injection group NORTH.
.TP
SPR:F\-12:18
\- The segment pressure in well F\-12, segment 18.
.TP
BPR:10,10,10
\- The block pressure in cell 10,10,10.
.IP
LBPR:LGR3:10,10,10 \- The block pressure in cell 10,10,10 \- in LGR3
.PP
The option \fB\-\-list\fR can be used to list all available keys.
.HP
\fB\-\-list\fR : The program will list available keys.
.HP
\fB\-\-no\-restart\fR: If the simulation in question is a restart, i.e a prediction
.IP
which starts at the end of the historical period, the ecl_summary
program will by default also load historical data. If the \fB\-\-no\-restart\fR
option is used the program will not look for old results.
.HP
\fB\-\-no\-header\fR: By default ecl_summary will print a header line at the top, with the
.IP
option \fB\-\-no\-header\fR this will be suppressed.
.HP
\fB\-\-report\-only\fR: Will only report results at report times (i.e. DATES).
.HP
\fB\-\-help\fR: Print this message and exit.
.PP
The options should come before the ECLIPSE basename.
.PP
Example1:
.TP
computer> ecl_summary
CASE1_XXX WWCT:F\-36 FOPT FWPT
.IP
This example will load results from case 'CASE1_XXX' and print the
results for keys 'WWCT:F\-36', 'FOPT' and 'FWPT' on standard out.
.PP
Example2:
.TP
computer> ecl_summary
\fB\-\-list\fR CASE2_XXX "*:F\-36" "BPR:*"
.IP
This example will list all the available keys which end with
\&':F\-36' and those which start with 'BPR:'. Observe the use of
quoting characters "" when using shell wildcards.
.PP
The ecl_summary program will look for and load both unified and
non\-unified and formatted and non\-formatted files. The default
search order is: UNSMRY, Snnnn, FUNSMRY, Annnn, however you can
manipulate this with the extension to the basename:
.PP
* If the extension corresponds to an unformatted file, ecl_summary
.IP
will only look for unformatted files.
.PP
* If the extension corresponds to a unified file, ecl_summary will
.IP
only look for unified files.
.PP
Contact Joakim Hove / joaho@statoil.com / 92 68 57 04 for bugs
and feature requests.

View File

@ -1,19 +0,0 @@
#!/usr/bin/env python
import sys
import os
import subprocess
import shutil
config_file = sys.argv[1]
PYTHONPATH = sys.argv[2]
work_path = sys.argv[3]
os.environ["PYTHONPATH"] = PYTHONPATH
shutil.copy(config_file , work_path)
os.chdir( work_path )
if not os.path.isdir("_static"):
os.mkdir("_static")
subprocess.call(["sphinx-apidoc" , "-e" , "-o" , "API/python" , PYTHONPATH ])
subprocess.call(["sphinx-build" , "-b" , "html" , "-d" , "_build/doctrees" , "." , "_build"])

View File

@ -114,6 +114,37 @@ variable, i.e. set it to a constant value. Here is an example of use:
CONST 1.0
DUNIF
-----
The keyword DUNIF is used to assign a discrete uniform distribution. It takes three arguments, the number bins, a minimum and maximum value. Here is an example which creates a discrete uniform distribution on [0,1] with 25 bins:
::
DUNIF 25 0 1
ERRF
-----
The ERRF keyword is used to define a prior resulting from applying the error function to a normally distributed variable with mean 0 and variance 1. The keyword takes four arguments:
::
ERRF MIN MAX SKEWNESS WIDTH
The arguments MIN and MAX sets the minimum and maximum value of the transform. Zero SKEWNESS results in a symmetric distribution, whereas negative SKEWNESS will shift the distribution towards the left and positive SKEWNESS will shift it towards the right. Letting WIDTH be larger than one will cause the distribution to be unimodal, whereas WIDTH less than one will create a bi-modal distribution.
DERRF
-----
The keyword DERRF is similar to ERRF, but will create a discrete output. DERRF takes 5 arguments:
::
DERRF NBINS MIN MAX SKEWNESS WIDTH
NBINS set the number of discrete values, and the other arguments have the same effect as in ERRF.
Priors and transformations

View File

@ -33,7 +33,6 @@ List of keywords
Keyword name Required by user? Default value Purpose
===================================================================== ====================================== ============================== ==============================================================================================================================================
:ref:`ADD_FIXED_LENGTH_SCHEDULE_KW <add_fixed_length_schedule_kw>` NO Supporting unknown SCHEDULE keywords.
:ref:`ADD_STATIC_KW <add_Static_kw>` NO Add static ECLIPSE keyword that should be stored
:ref:`ANALYSIS_COPY <analysis_copy>` NO Create new instance of analysis module
:ref:`ANALYSIS_LOAD <analysis_load>` NO Load analysis module
:ref:`ANALYSIS_SET_VAR <analysis_set_var>` NO Set analysis module internal state variable
@ -52,20 +51,15 @@ Keyword name Required by
:ref:`ENKF_BOOTSTRAP <enkf_bootstrap>` NO FALSE Should we bootstrap the Kalman gain estimate
:ref:`ENKF_CROSS_VALIDATION <enkf_cross_validation>` NO ...
:ref:`ENKF_CV_FOLDS <enkf_cv_folds>` NO 10 Number of folds used in the Cross-Validation scheme
:ref:`ENKF_FORCE_NCOMP <enkf_force_ncomp>` NO FALSE Should we want to use a spesific subspace dimension
:ref:`ENKF_KERNEL_REGRESSION <enkf_kernel_regression>` NO FALSE
:ref:`ENKF_KERNEL_FUNCTION <enkf_kernel_function>` NO 1
:ref:`ENKF_KERNEL_PARAM <enkf_kernel_param>` NO 1
:ref:`ENKF_LOCAL_CV <enkf_local_cv>` NO FALSE Should we estimate the subspace dimenseion using Cross-Validation
:ref:`ENKF_MERGE_OBSERVATIONS <enkf_merge_observations>` NO FALSE Should observations from many times be merged together
:ref:`ENKF_MODE <enkf_mode>` NO STANDARD Which EnKF should be used
:ref:`ENKF_NCOMP <enkf_ncomp>` NO 1 Dimension of the reduced order subspace (If ENKF_FORCE_NCOMP = TRUE)
:ref:`ENKF_PEN_PRESS <enkf_pen_press>` NO FALSE Should we want to use a penalised PRESS statistic in model selection?
:ref:`ENKF_RERUN <enkf_rerun>` NO FALSE Should the simulations be restarted from time zero after each update.
:ref:`ENKF_SCALING <enkf_scaling>` NO TRUE Do we want to normalize the data ensemble to have unit variance?
:ref:`ENKF_TRUNCATION <enfk_truncation>` NO 0.99 Cutoff used on singular value spectrum.
:ref:`ENSPATH <enspath>` NO storage Folder used for storage of simulation results.
:ref:`EQUIL_INIT_FILE <equil_init_file>` NO Use INIT_SECTION instead
:ref:`ENSPATH <enspath>` NO storage Folder used for storage of simulation results.
:ref:`FIELD <field>` NO Ads grid parameters
:ref:`FORWARD_MODEL <forward_model>` NO Add the running of a job to the simulation forward model.
:ref:`GEN_DATA <gen_data>` NO Specify a general type of data created/updated by the forward model.
@ -76,11 +70,7 @@ Keyword name Required by
:ref:`GRID <grid>` NO Provide an ECLIPSE grid for the reservoir model.
:ref:`HISTORY_SOURCE <history_source>` NO REFCASE_HISTORY Source used for historical values.
:ref:`HOOK_WORKFLOW <hook_workflow>` NO Install a workflow to be run automatically.
:ref:`HOST_TYPE <host_type>` NO
:ref:`IGNORE_SCHEDULE <ignore_schedule>` NO
:ref:`IMAGE_TYPE <image_type>` NO png The type of the images created when plotting.
:ref:`IMAGE_VIEWER <image_viewer>` NO /usr/bin/display External program spawned to view images.
:ref:`INIT_SECTION <init_section>` NO Initialization code for the reservoir model.
:ref:`IGNORE_SCHEDULE <ignore_schedule>` NO
:ref:`INSTALL_JOB <install_jobb>` NO Install a job for use in a forward model.
:ref:`ITER_CASE <iter_Case>` NO IES%d Case name format - iterated ensemble smoother
:ref:`ITER_COUNT <iter_count>` NO 4 Number of iterations - iterated ensemble smoother
@ -99,22 +89,13 @@ Keyword name Required by
:ref:`LSF_SERVER <lsf_server>` NO Set server used when submitting LSF jobs.
:ref:`MAX_ITER_COUNT <max_iter_count>` NO Maximal number of iterations - iterated ensemble smoother.
:ref:`MAX_RESAMPLE <max_resample>` NO 1 How many times should ert resample & retry a simulation.
:ref:`MAX_RUNNING_LOCAL <max_running_local>` NO The maximum number of running jobs when running locally.
:ref:`MAX_RUNNING_LSF <max_running_lsf>` NO The maximum number of simultaneous jobs submitted to LSF.
:ref:`MAX_RUNNING_RSH <max_running_rsh>` NO The maximum number of running jobs when using RSH queue system.
:ref:`MAX_RUNTIME <max_runtime>` NO 0 Set the maximum runtime in seconds for a realization.
:ref:`MAX_SUBMIT <max_submit>` NO 2 How many times should the queue system retry a simulation.
:ref:`MIN_REALIZATIONS <min_realizations>` NO 0 Set the number of minimum reservoir realizations to run before long running realizations are stopped. Keyword STOP_LONG_RUNNING must be set to TRUE when MIN_REALIZATIONS are set.
:ref:`NUM_REALIZATIONS <num_realizations>` YES Set the number of reservoir realizations to use.
:ref:`OBS_CONFIG <obs_config>` NO File specifying observations with uncertainties.
:ref:`PLOT_DRIVER <plot_driver>` NO PLPLOT Which plotting system should be used.
:ref:`PLOT_ERRORBAR <plot_errorbar>` NO FALSE Should errorbars on observations be plotted?
:ref:`PLOT_ERRORBAR_MAX <plot_errorbar_max>` NO 25 Show error bars if less than this number of observations.
:ref:`PLOT_HEIGHT <plot_height>` NO 768 Pixel height of the plots.
:ref:`PLOT_PATH <plot_path>` NO plots Path to where the plots are stored.
:ref:`PLOT_REFCASE <plot_refcase>` NO TRUE TRUE (IF you want to plot the listed reference cases) FALSE if not.
:ref:`PLOT_REFCASE_LIST <plot_refcase_list>` NO Deprecated. Use REFCASE_LIST instead.
:ref:`PLOT_WIDTH <plot_width>` NO 1024 Pixel width of the plots.
:ref:`PLOT_SETTINGS <plot_driver>` NO Possibility to configure some aspects of plotting.
:ref:`PRE_CLEAR_RUNPATH <pre_clear_runpath>` NO FALSE Should the runpath be cleared before initializing?
:ref:`QUEUE_SYSTEM <queue_system>` NO System used for running simulation jobs.
:ref:`REFCASE <refcase>` NO (see HISTORY_SOURCE and SUMMARY) Reference case used for observations and plotting.
@ -132,7 +113,6 @@ Keyword name Required by
:ref:`SCHEDULE_PREDICTION_FILE <schedule_prediction_file>` NO Schedule prediction file.
:ref:`SETENV <setenv>` NO You can modify the UNIX environment with SETENV calls.
:ref:`SINGLE_NODE_UPDATE <single_node_update>` NO FALSE ...
:ref:`STD_CUTOFF <std_cutoff>` NO 1e-6 ...
:ref:`STOP_LONG_RUNNING <stop_long_running>` NO FALSE Stop long running realizations after minimum number of realizations (MIN_REALIZATIONS) have run.
:ref:`STORE_SEED <store_seed>` NO File where the random seed used is stored.
:ref:`SUMMARY <summary>` NO Add summary variables for internalization.
@ -141,7 +121,8 @@ Keyword name Required by
:ref:`TIME_MAP <time_map>` NO Ability to manually enter a list of dates to establish report step <-> dates mapping.
:ref:`UMASK <umask>` NO Control the permissions on files created by ERT.
:ref:`UPDATE_LOG_PATH <update_log_path>` NO update_log Summary of the EnKF update steps are stored in this directory.
:ref:`UPDATE_PATH <update_path>` NO Modify a UNIX path variable like LD_LIBRARY_PATH.
:ref:`UPDATE_PATH <update_path>` NO Modify a UNIX path variable like LD_LIBRARY_PATH.
:ref:`UPDATE_SETTINGS <update_settings>` NO Possibility to configure some common aspects of the Smoother update.|
:ref:`WORKFLOW_JOB_DIRECTORY <workflow_job_directory>` NO Directory containing workflow jobs.
===================================================================== ====================================== ============================== ==============================================================================================================================================
@ -207,29 +188,6 @@ These keywords must be set to make the enkf function properly.
GRID MY_GRID.EGRID
.. _init_section:
.. topic:: INIT_SECTION
The INIT_SECTION keyword is used to handle initialization of the ECLIPSE run. See the documentation of the Initialization for more details on why this has to be done. The keyword can be used in two different ways:
* If it is set to the name of an existing file, the contents of this file will be used for the initialization.
* If it is set to the name of a non-existing file, it will be assumed that a file with this name in the simulation folder will be generated when simulations are submitted, either by the enkf application itself, or by some job installed by the user (see INSTALL_JOB). This generated file will then be used by ECLIPSE for initialization.
*Example A:*
::
-- Use the contents of the file parameters/EQUIL.INC for initialization
INIT_SECTION params/EQUIL.INC
*Example B:*
::
-- Use a generated file for the initialization
INIT_SECTION MY_GENERATED_EQUIL_KEYWORD.INC
.. _num_realizations:
.. topic:: NUM_REALIZATIONS
@ -591,7 +549,26 @@ The keywords in this section are used to define a parametrization of the ECLIPSE
Here ID is again an arbitrary string, ECLIPSE_FILE is the name of the file the enkf will export this field to when running simulations. Note that there should be an IMPORT statement in the ECLIPSE data file corresponding to the name given with ECLIPSE_FILE. INIT_FILES is a filename (with an embedded %d) to load the initial field from. Can be RMS ROFF format, ECLIPSE restart format or ECLIPSE GRDECL format.
The options MIN, MAX, INIT_TRANSFORM and OUTPUT_TRANSFORM are all optional. MIN and MAX are as for dynamic fields. OUTPUT_TRANSFORM is the name of a mathematical function which will be applied to the field before it is exported, and INIT_TRANSFORM is the name of a function which will be applied to the fields when they are loaded. [Just use INIT_TRANSFORM:XXX to get a list of available functions.]
The input arguments MIN, MAX, INIT_TRANSFORM and OUTPUT_TRANSFORM are all optional. MIN and MAX are as for dynamic fields.
For Assisted history matching, the variables in ERT should be normally distributed internally - the purpose of the transformations is to enable working with normally distributed variables internally in ERT. Thus, the optional arguments INIT_TRANSFORM:FUNC and OUTPUT_TRANSFORM:FUNC are used to transform the user input of parameter distribution. INIT_TRANSFORM:FUNC is a function which will be applied when they are loaded to ERT. OUTPUT_TRANSFORM:FUNC is a function which will be applied to the field when it is exported from ERT, and FUNC is the name of a transformation function to be applied. The avaialble functions are listed below:
"POW10" : This function will raise x to the power of 10: y = 10^x.
"TRUNC_POW10" : This function will raise x to the power of 10 - and truncate lower values at 0.001.
"LOG" : This function will take the NATURAL logarithm of x: y = ln(x).
"LN" : This function will take the NATURAL logarithm of x: y = ln(x).
"LOG10" : This function will take the log10 logarithm of x: y = log10(x).
"EXP" : This function will calculate y = exp(x).
"LN0" : This function will calculate y = ln(x + 0.000001
"EXP0" : This function will calculate y = exp(x) - 0.000001
For example, the most common scenario is that underlying log-normal distributed permeability in RMS are transformed to normally distributted in ERT, then you do:
INIT_TRANSFORM:LOG To ensure that the variables which were initially log-normal distributed are transformed to normal distribution when they are loaded into ert.
OUTPUT_TRANSFORM:EXP To ensure that the variables are reexponentiated to be log-normal distributed before going out to Eclipse.
If users specify the wrong function name (e.g INIT_TRANSFORM:I_DONT_KNOW), ERT will stop and print all the valid function names.
Regarding format of ECLIPSE_FILE: The default format for the parameter fields is binary format of the same type as used in the ECLIPSE restart files. This requires that the ECLIPSE datafile contains an IMPORT statement. The advantage with using a binary format is that the files are smaller, and reading/writing is faster than for plain text files. If you give the ECLIPSE_FILE with the extension .grdecl (arbitrary case), enkf will produce ordinary .grdecl files, which are loaded with an INCLUDE statement. This is probably what most users are used to beforehand - but we recomend the IMPORT form.
@ -959,18 +936,8 @@ Keywords controlling the ES algorithm
.. _enkf_alpha:
.. topic:: ENKF_ALPHA
ENKF_ALPHA has some latex letters - need to be handled!!
Scaling factor (double) used in outlier detection. Increasing this factor means that more observations will potentially be included in the assimilation. The default value is 1.50.
Including outliers in the EnKF algorithm can dramatically increase the coupling between the ensemble members. It is therefore important to filter out these outlier data prior to data assimilation. An observation, \textstyle d^o_i, will be classified as an outlier if
::
|d^o_i - \bar{d}_i| > \mathrm{ENKF\_ALPHA} \left(s_{d_i} + \sigma_{d^o_i}\right),
where \textstyle\boldsymbol{d}^o is the vector of observed data, \textstyle\boldsymbol{\bar{d}} is the average of the forcasted data ensemble, \textstyle\boldsymbol{s_{d}} is the vector of estimated standard deviations for the forcasted data ensemble, and \textstyle\boldsymbol{s_{d}^o} is the vector standard deviations for the observation error (specified a priori).
See the sub keyword :code:`OVERLAP_LIMIT` under the :code:`UPDATE_SETTINGS`keyword.
.. _enkf_bootstrap:
.. topic:: ENKF_BOOTSTRAP
@ -1219,27 +1186,6 @@ The keywords in this section, controls advanced features of the enkf application
Real low level fix for some SCHEDULE parsing problems.
.. _add_static_kw:
.. topic:: ADD_STATIC_KW
The restart files from ECLIPSE are organized by keywords, which are of three different types:
#. Keywords containing the dynamic solution, e.g. pressure and saturations.
#. Keywords containing various types of header information which is needed for a restart.
#. Keywords containing various types of diagnostic information which is not needed for a restart.
Keywords in category 2 and 3 are referred to as static keywords. To be able to restart ECLIPSE, the enkf application has to store the keywords in category 2, whereas keywords in category 3 can safely be dropped. To determine whether a particular keyword is in category 2 or 3 the enkf considers an internal list of keywords. The current list contains the keywords:
::
INTEHEAD LOGIHEAD DOUBHEAD IGRP SGRP XGRP ZGRP IWEL SWEL XWEL ZWEL
ICON SCON XCON HIDDEN STARTSOL PRESSURE SWAT SGAS RS RV ENDSOL ICAQNUM ICAQ IAAQ
SCAQNUM SCAQ SAAQ ACAQNUM ACAQ XAAQ
ISEG ILBS ILBR RSEG ISTHW ISTHG
By using ADD_STATIC_KW you can dynamically add to this list. The magic string __ALL__ will add all static keywords. Use of the __ALL__ option is strongly discouraged, as it wastes a lot disk space.
.. _define:
.. topic:: DEFINE
@ -1456,24 +1402,6 @@ option.
The name of the LSF queue you are running ECLIPSE simulations in.
.. _max_running_lsf:
.. topic:: MAX_RUNNING_LSF
The keyword MAX_RUNNING_LSF controls the maximum number of
simultaneous jobs submitted to the LSF (Load Sharing Facility)
queue when using the LSF option in QUEUE_SYSTEM.
*Example:*
::
-- Submit no more than 30 simultaneous jobs
-- to the LSF cluster.
MAX_RUNNING_LSF 30
Configuring TORQUE access
-------------------------
.. _configuring_torque_access:
@ -1599,25 +1527,6 @@ option DEBUG_OUTPUT:
QUEUE_OPTION TORQUE DEBUG_OUTPUT torque_log.txt
Configuring the LOCAL queue
---------------------------
.. _configuring_the_local_queue:
.. _max_running_local:
.. topic:: MAX_RUNNING_LOCAL
The keyword MAX_RUNNING_LOCAL controls the maximum number of simultaneous jobs running when using the LOCAL option in QUEUE_SYSTEM. It is strongly recommended to not let MAX_RUNNING_LOCAL exceed the number of processors on the workstation used.
*Example:*
::
-- No more than 3 simultaneous jobs
MAX_RUNNING_LOCAL 3
Configuring the RSH queue
-------------------------
.. _configuring_the_rsh_queue:
@ -1661,24 +1570,6 @@ Keywords related to plotting
.. _keywords_related_to_plotting:
.. _image_viewer:
.. topic:: IMAGE_VIEWER
The enkf application has some limited plotting capabilities. The plotting is based on creating a graphics file (currently a png file) and then viewing that file with an external application. The current default image viewer is a program called /usr/bin/display, but you can set IMAGE_VIEWER to point to another binary if that is desired. In particular it can be interesting to set as
::
IMAGE_VIEWER /d/proj/bg/enkf/bin/noplot.sh
then the plot files will be created, but they will not be flashing in your face (which can be a bit annoying).
.. _image_type:
.. topic:: IMAGE_TYPE
This switch control the type of the plot figures/images created by the PLPLOT plot driver. It is by default set to png which works fine, but you can probably(??) use other popular graphics formats like gif and jpg as well.
.. _plot_driver:
.. topic:: PLOT_DRIVER
@ -1733,22 +1624,31 @@ Keywords related to plotting
.. _plot_path:
.. topic:: PLOT_PATH
The plotting engine creates 'files' with plots, they are stored in a directory. You can tell what that directory should be. Observe that the current 'casename' will automatically be appended to the plot path.
.. plot_width:
.. topic:: PLOT_WIDTH
When the PLPLOT driver creates a plot file, it will have the width (in pixels) given by the PLOT_WIDTH keyword. The default value for PLOT_WIDTH is 1024 pixels. To create plots of half the size you use:
.. _plot_settings:
.. topic:: PLOT_SETTINGS
::
The :code:`PLOT_SETTINGS` keyword is a "master keyword" which
can be used to configure some aspects of the plotting. These
settings will affect the default behaviour when you create a
new plot, you can still changes these settings interactively.
PLOT_HEIGHT 384
PLOT_WIDTH 512
When using the :code:`PLOT_SETTINGS` keyword you supply a
secondary keyword and a values as the tow arguments:
::
PLOT_SETTINGS SHOW_REFCASE False
Will make sure that your plots are created without the refcase
plotted as default. The available secondary keys are:
SHOW_REFCASE : Default True
SHOW_HISTORY : Default True
.. _rft_config:
@ -1796,27 +1696,37 @@ Keywords related to plotting
With the keyword :code:`HOOK_WORKFLOW` you can configure workflow
'hooks'; meaning workflows which will be run automatically at certain
points during ERTs execution. Currently there are two points in ERTs
flow of execution where you can hook in a workflow, either just before
the simulations start, :code:`PRE_SIMULATION` - or after all the
simulations have completed :code:`POST_SIMULATION`. The
points during ERTs execution. Currently there are four points in ERTs
flow of execution where you can hook in a workflow, before
the simulations start, :code:`PRE_SIMULATION`; after all the
simulations have completed :code:`POST_SIMULATION`;
before the update step, :code:`PRE_UPDATE` and after the update step, :code:`POST_UPDATE`. The
:code:`POST_SIMULATION` hook is typically used to trigger QC
workflows:
::
HOOK_WORKFLOW initWFLOW PRE_SIMULATION
HOOK_WORKFLOW QC_WFLOW1 POST_SIMULATION
HOOK_WORKFLOW QC_WFLOW2 POST_SIMULATION
HOOK_WORKFLOW initWFLOW PRE_SIMULATION
HOOK_WORKFLOW preUpdateWFLOW PRE_UPDATE
HOOK_WORKFLOW postUpdateWFLOW POST_UPDATE
HOOK_WORKFLOW QC_WFLOW1 POST_SIMULATION
HOOK_WORKFLOW QC_WFLOW2 POST_SIMULATION
In this example the the workflow :code:`initWFLOW` will run after all
the simulation directiories have been created, just before the forward
model is submitted to the queue. When all the simulations are complete
In this example the workflow :code:`initWFLOW` will run after all
the simulation directories have been created, just before the forward
model is submitted to the queue. The workflow :code:`preUpdateWFLOW` will be run before the
update step and :code:`postUpdateWFLOW` will be run after the
update step. When all the simulations are complete
the two workflows :code:`QC_WFLOW1` and :code:`QC_WFLOW2` will be
run. Observe that the workflows being 'hooked in' with the
run.
Observe that the workflows being 'hooked in' with the
:code:`HOOK_WORKFLOW` must be loaded with the :code:`LOAD_WORKFLOW`
keyword.
Currently, :code:`PRE_UPDATE` and :code:`POST_UPDATE` are only
available from python.
Manipulating the Unix environment
---------------------------------
@ -1861,6 +1771,47 @@ The two keywords SETENV and UPDATE_PATH can be used to manipulate the Unix envir
The whole thing is just a workaround because we can not use $PATH.
.. _update_settings:
.. topic:: UPDATE_SETTINGS
The :code:`UPDATE_SETTINGS` keyword is a *super-keyword* which can be
used to control parameters which apply to the Ensemble Smoother update
algorithm. The :code:`UPDATE_SETTINGS`currently supports the two
subkeywords:
OVERLAP_LIMIT
Scaling factor used when detecting outliers. Increasing
this factor means that more observations will potentially be
included in the assimilation. The default value is 3.00..
Including outliers in the Smoother algorithm can dramatically
increase the coupling between the ensemble members. It is
therefore important to filter out these outlier data prior to
data assimilation. An observation, \textstyle d^o_i, will be
classified as an outlier if
::
|d^o_i - \bar{d}_i| > \mathrm{ENKF\_ALPHA} \left(s_{d_i} + \sigma_{d^o_i}\right),
where \textstyle\boldsymbol{d}^o is the vector of observed
data, \textstyle\boldsymbol{\bar{d}} is the average of the
forcasted data ensemble, \textstyle\boldsymbol{s_{d}} is the
vector of estimated standard deviations for the forcasted data
ensemble, and \textstyle\boldsymbol{s_{d}^o} is the vector
standard deviations for the observation error (specified a
priori).
STD_CUTOFF
If the ensemble variation for one particular measurment is
below this limit the observation will be deactivated. he
default value for this cutoff is 1e-6.
Observe that for the updates many settings should be applied on the
analysis module in question.
.. _umask:
.. topic:: UMASK
@ -1916,6 +1867,3 @@ The two keywords SETENV and UPDATE_PATH can be used to manipulate the Unix envir
- Owner(7) can execute(1), write(2) and read(4).
- Group(5) can execute(1) and read(4).
- Others(2) can read(4)

View File

@ -295,28 +295,37 @@ Automatically run workflows : HOOK_WORKFLOW
With the keyword :code:`HOOK_WORKFLOW` you can configure workflow
'hooks'; meaning workflows which will be run automatically at certain
points during ERTs execution. Currently there are two points in ERTs
flow of execution where you can hook in a workflow, either just before
the simulations start, :code:`PRE_SIMULATION` - or after all the
simulations have completed :code:`POST_SIMULATION`. The
points during ERTs execution. Currently there are four points in ERTs
flow of execution where you can hook in a workflow, before
the simulations start, :code:`PRE_SIMULATION`; after all the
simulations have completed :code:`POST_SIMULATION`;
before the update step, :code:`PRE_UPDATE` and after the update step, :code:`POST_UPDATE`. The
:code:`POST_SIMULATION` hook is typically used to trigger QC
workflows:
::
HOOK_WORKFLOW initWFLOW PRE_SIMULATION
HOOK_WORKFLOW QC_WFLOW1 POST_SIMULATION
HOOK_WORKFLOW QC_WFLOW2 POST_SIMULATION
HOOK_WORKFLOW initWFLOW PRE_SIMULATION
HOOK_WORKFLOW preUpdateWFLOW PRE_UPDATE
HOOK_WORKFLOW postUpdateWFLOW POST_UPDATE
HOOK_WORKFLOW QC_WFLOW1 POST_SIMULATION
HOOK_WORKFLOW QC_WFLOW2 POST_SIMULATION
In this example the the workflow :code:`initWFLOW` will run after all
the simulation directiories have been created, just before the forward
model is submitted to the queue. When all the simulations are complete
In this example the workflow :code:`initWFLOW` will run after all
the simulation directories have been created, just before the forward
model is submitted to the queue. The workflow :code:`preUpdateWFLOW` will be run before the
update step and :code:`postUpdateWFLOW` will be run after the
update step. When all the simulations are complete
the two workflows :code:`QC_WFLOW1` and :code:`QC_WFLOW2` will be
run. Observe that the workflows being 'hooked in' with the
run.
Observe that the workflows being 'hooked in' with the
:code:`HOOK_WORKFLOW` must be loaded with the :code:`LOAD_WORKFLOW`
keyword.
Currently, :code:`PRE_UPDATE` and :code:`POST_UPDATE` are only
available from python.
Locating the realisations: <RUNPATH_FILE>
-----------------------------------------

View File

@ -76,7 +76,12 @@ void enkf_linalg_lowrankCinv(const matrix_type * S ,
double truncation ,
int ncomp);
void enkf_linalg_lowrankE(const matrix_type * S , /* (nrobs x nrens) */
const matrix_type * E , /* (nrobs x nrens) */
matrix_type * W , /* (nrobs x nrmin) Corresponding to X1 from Eqs. 14.54-14.55 */
double * eig , /* (nrmin) Corresponding to 1 / (1 + Lambda1^2) (14.54) */
double truncation ,
int ncomp);
void enkf_linalg_genX2(matrix_type * X2 , const matrix_type * S , const matrix_type * W , const double * eig);
void enkf_linalg_genX3(matrix_type * X3 , const matrix_type * W , const matrix_type * D , const double * eig);

View File

@ -14,6 +14,7 @@ extern "C" {
#define ENKF_TRUNCATION_KEY_ "ENKF_TRUNCATION"
#define ENKF_NCOMP_KEY_ "ENKF_NCOMP"
#define USE_EE_KEY_ "USE_EE"
#define USE_GE_KEY_ "USE_GE"
#define ANALYSIS_SCALE_DATA_KEY_ "ANALYSIS_SCALE_DATA"
typedef struct std_enkf_data_struct std_enkf_data_type;

View File

@ -39,6 +39,8 @@ rml_enkf_log_type * rml_enkf_log_alloc() {
rml_log->log_file = NULL;
rml_log->log_stream = NULL;
rml_enkf_log_set_clear_log( rml_log , DEFAULT_CLEAR_LOG );
rml_enkf_log_set_log_file( rml_log, DEFAULT_LOG_FILE );
return rml_log;
}

View File

@ -15,6 +15,14 @@ endif()
#-----------------------------------------------------------------
if (ERT_USE_OPENMP)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${OpenMP_SHARED_LINKER_FLAGS}")
endif()
if (INSTALL_ERT)
install(TARGETS analysis DESTINATION ${CMAKE_INSTALL_LIBDIR})
#install(TARGETS rml_enkf DESTINATION ${CMAKE_INSTALL_LIBDIR})

View File

@ -139,7 +139,7 @@ static int enkf_linalg_num_significant(int num_singular_values , const double *
double total_sigma2 = 0;
for (int i=0; i < num_singular_values; i++)
total_sigma2 += sig0[i] * sig0[i];
/*
Determine the number of singular values by enforcing that
less than a fraction @truncation of the total variance be
@ -183,7 +183,7 @@ int enkf_linalg_svdS(const matrix_type * S ,
if (ncomp > 0)
num_significant = ncomp;
else
else
num_significant = enkf_linalg_num_significant( num_singular_values , sig0 , truncation );
{
@ -214,7 +214,7 @@ int enkf_linalg_num_PC(const matrix_type * S , double truncation ) {
matrix_dgesvd(DGESVD_NONE , DGESVD_NONE , workS , sig0 , NULL , NULL);
matrix_free( workS );
}
num_significant = enkf_linalg_num_significant( num_singular_values , sig0 , truncation );
free( sig0 );
return num_significant;
@ -222,6 +222,75 @@ int enkf_linalg_num_PC(const matrix_type * S , double truncation ) {
/*
****************************************************************************************************
Routine computes X1 and eig corresponding to Eqs 14.54-14.55
Geir Evensen
*/
void enkf_linalg_lowrankE(const matrix_type * S , /* (nrobs x nrens) */
const matrix_type * E , /* (nrobs x nrens) */
matrix_type * W , /* (nrobs x nrmin) Corresponding to X1 from Eqs. 14.54-14.55 */
double * eig , /* (nrmin) Corresponding to 1 / (1 + Lambda1^2) (14.54) */
double truncation ,
int ncomp) {
const int nrobs = matrix_get_rows( S );
const int nrens = matrix_get_columns( S );
const int nrmin = util_int_min( nrobs , nrens );
matrix_type * U0 = matrix_alloc( nrobs , nrmin );
double * inv_sig0 = util_calloc( nrmin , sizeof * inv_sig0);
matrix_type * X0 = matrix_alloc( nrmin , nrens );
matrix_type * U1 = matrix_alloc( nrmin , nrmin );
double * sig1 = util_calloc( nrmin , sizeof * sig1);
int i ,j;
/* Compute SVD of S=HA` -> U0, invsig0=sig0^(-1) */
enkf_linalg_svdS(S , truncation , ncomp , DGESVD_NONE , inv_sig0, U0 , NULL);
/* X0(nrmin x nrens) = Sigma0^(+) * U0'* E (14.51) */
matrix_dgemm(X0 , U0 , E , true , false , 1.0 , 0.0); /* X0 = U0^T * E (14.51) */
/* Multiply X0 with sig0^(-1) from left X0 = S^(-1) * X0 */
for (j=0; j < matrix_get_columns( X0 ) ; j++)
for (i=0; i < matrix_get_rows( X0 ); i++)
matrix_imul(X0 , i , j , inv_sig0[j]);
/* Compute SVD of X0-> U1*eig*V1 14.52 */
matrix_dgesvd(DGESVD_MIN_RETURN , DGESVD_NONE, X0 , sig1, U1 , NULL);
/* Lambda1 = 1/(I + Lambda^2) in 14.56 */
for (i=0; i < nrmin; i++)
eig[i] = 1.0 / (1.0 + sig1[i]*sig1[i]);
/* Compute sig0^+ U1 (14:55) */
for (j=0; j < nrmin; j++)
for (i=0; i < nrmin; i++)
matrix_imul(U1 , i , j , inv_sig0[i]);
/* Compute X1 = W = U0 * (U1=sig0^+ U1) = U0 * Sigma0^(+') * U1 (14:55) */
matrix_matmul(W , U0 , U1);
matrix_free( X0 );
matrix_free( U0 );
util_safe_free( inv_sig0 );
matrix_free( U1 );
util_safe_free( sig1 );
}
void enkf_linalg_Cee(matrix_type * B, int nrens , const matrix_type * R , const matrix_type * U0 , const double * inv_sig0) {

View File

@ -21,6 +21,10 @@
#include <math.h>
#include <stdio.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include <ert/util/type_macros.h>
#include <ert/util/util.h>
#include <ert/util/rng.h>
@ -39,17 +43,20 @@
#include <ert/analysis/module_obs_block.h>
#include <ert/analysis/module_obs_block_vector.h>
#define FWD_STEP_ENKF_TYPE_ID 765524
#define DEFAULT_NFOLDS 5
#define DEFAULT_R2_LIMIT 0.99
#define DEFAULT_NUM_THREADS -1
#define NFOLDS_KEY "CV_NFOLDS"
#define R2_LIMIT_KEY "FWD_STEP_R2_LIMIT"
#define DEFAULT_VERBOSE false
#define VERBOSE_KEY "VERBOSE"
#define LOG_FILE_KEY "LOG_FILE"
#define CLEAR_LOG_KEY "CLEAR_LOG"
#define NUM_THREADS_KEY "NUM_THREADS"
#define LOG_FILE_KEY "LOG_FILE"
#define CLEAR_LOG_KEY "CLEAR_LOG"
struct fwd_step_enkf_data_struct {
UTIL_TYPE_ID_DECLARATION;
@ -59,6 +66,7 @@ struct fwd_step_enkf_data_struct {
long option_flags;
double r2_limit;
bool verbose;
int num_threads;
fwd_step_log_type * fwd_step_log;
};
@ -79,6 +87,11 @@ void fwd_step_enkf_set_verbose( fwd_step_enkf_data_type * data , bool verbose )
data->verbose = verbose;
}
void fwd_step_enkf_set_num_threads( fwd_step_enkf_data_type * data , int threads ) {
data->num_threads = threads;
}
void * fwd_step_enkf_data_alloc( rng_type * rng ) {
fwd_step_enkf_data_type * data = util_malloc( sizeof * data );
UTIL_TYPE_ID_INIT( data , FWD_STEP_ENKF_TYPE_ID );
@ -89,6 +102,7 @@ void * fwd_step_enkf_data_alloc( rng_type * rng ) {
data->r2_limit = DEFAULT_R2_LIMIT;
data->option_flags = ANALYSIS_NEED_ED + ANALYSIS_UPDATE_A + ANALYSIS_SCALE_DATA;
data->verbose = DEFAULT_VERBOSE;
data->num_threads = DEFAULT_NUM_THREADS;
data->fwd_step_log = fwd_step_log_alloc();
return data;
}
@ -120,6 +134,7 @@ static void fwd_step_enkf_write_log_header( fwd_step_enkf_data_type * fwd_step_d
const char * column3 = "NumAttached";
const char * column4 = "AttachedObs(ActiveIndex)[Percentage sensitivity]";
int nfolds = fwd_step_data->nfolds;
int num_threads = fwd_step_data->num_threads;
double r2_limit = fwd_step_data->r2_limit;
if (fwd_step_log_is_open( fwd_step_data->fwd_step_log )) {
@ -141,6 +156,7 @@ static void fwd_step_enkf_write_log_header( fwd_step_enkf_data_type * fwd_step_d
printf("Total number of observations: %d\n",nd);
printf("Number of ensembles : %d\n",ens_size);
printf("CV folds : %d\n",nfolds);
printf("Number of threads : %d\n",num_threads);
printf("Relative R2 tolerance : %f\n",r2_limit);
printf("===============================================================================================================================\n");
printf(format, column1, column2, column3, column4);
@ -250,6 +266,14 @@ void fwd_step_enkf_updateA(void * module_data ,
bool verbose = fwd_step_data->verbose;
int num_kw = module_data_block_vector_get_size(data_block_vector);
#if defined(_OPENMP)
#pragma omp parallel
#pragma omp master
if (fwd_step_data->num_threads == DEFAULT_NUM_THREADS)
fwd_step_data->num_threads = omp_get_num_threads();
#else
fwd_step_data->num_threads = 1;
#endif
if ( ens_size <= nfolds)
util_abort("%s: The number of ensembles must be larger than the CV fold - aborting\n", __func__);
@ -257,18 +281,13 @@ void fwd_step_enkf_updateA(void * module_data ,
{
stepwise_type * stepwise_data = stepwise_alloc1(ens_size, nd , fwd_step_data->rng);
matrix_type * workS = matrix_alloc( ens_size , nd );
matrix_type * workE = matrix_alloc( ens_size , nd );
matrix_type * St = matrix_alloc( ens_size , nd );
matrix_type * Et = matrix_alloc( ens_size , nd );
/*workS = S' */
matrix_subtract_row_mean( S ); /* Shift away the mean */
workS = matrix_alloc_transpose( S );
workE = matrix_alloc_transpose( E );
stepwise_set_X0( stepwise_data , workS );
stepwise_set_E0( stepwise_data , workE );
St = matrix_alloc_transpose( S );
Et = matrix_alloc_transpose( E );
matrix_type * di = matrix_alloc( 1 , nd );
@ -277,53 +296,67 @@ void fwd_step_enkf_updateA(void * module_data ,
fwd_step_enkf_write_log_header(fwd_step_data, ministep_name, nx, nd, ens_size);
}
for (int kw = 0; kw < num_kw; kw++) {
int kw,i;
/* This is to avoid a global-to-block search function since the number of parameters could be very large*/
int_vector_type * kw_list = int_vector_alloc(nx, -1);
int_vector_type * local_index_list = int_vector_alloc(nx, -1);
for (kw = 0; kw < num_kw; kw++) {
module_data_block_type * data_block = module_data_block_vector_iget_module_data_block(data_block_vector, kw);
const char * key = module_data_block_get_key(data_block);
int row_start = module_data_block_get_row_start(data_block);
int row_end = module_data_block_get_row_end(data_block);
for (i = row_start; i < row_end; i++) {
int_vector_iset(kw_list, i, kw);
int_vector_iset(local_index_list, i, i - row_start);
}
}
// =============================================
#pragma omp parallel for schedule(dynamic, 1) num_threads(fwd_step_data->num_threads)
for (i = 0; i < nx; i++) {
int kw_ind = int_vector_iget(kw_list, i);
module_data_block_type * data_block = module_data_block_vector_iget_module_data_block(data_block_vector, kw_ind);
const char * key = module_data_block_get_key(data_block);
const int* active_indices = module_data_block_get_active_indices(data_block);
int local_index = 0;
int active_index = 0;
bool all_active = active_indices == NULL; /* Inactive are not present in A */
stepwise_type * stepwise_data = stepwise_alloc1(ens_size, nd , fwd_step_data->rng, St, Et);
for (int i = row_start; i < row_end; i++) {
/*Update values of y */
/*Start of the actual update */
matrix_type * y = matrix_alloc( ens_size , 1 );
/*Update values of y */
/*Start of the actual update */
matrix_type * y = matrix_alloc( ens_size , 1 );
for (int j = 0; j < ens_size; j++) {
matrix_iset(y , j , 0 , matrix_iget( A, i , j ) );
}
for (int j = 0; j < ens_size; j++) {
matrix_iset(y , j , 0 , matrix_iget( A, i , j ) );
}
stepwise_set_Y0( stepwise_data , y );
stepwise_set_Y0( stepwise_data , y );
stepwise_estimate(stepwise_data , r2_limit , nfolds );
/*manipulate A directly*/
for (int j = 0; j < ens_size; j++) {
for (int k = 0; k < nd; k++) {
matrix_iset(di , 0 , k , matrix_iget( D , k , j ) );
}
double aij = matrix_iget( A , i , j );
double xHat = stepwise_eval(stepwise_data , di );
matrix_iset(A , i , j , aij + xHat);
/*manipulate A directly*/
for (int j = 0; j < ens_size; j++) {
for (int k = 0; k < nd; k++) {
matrix_iset(di , 0 , k , matrix_iget( D , k , j ) );
}
if (verbose){
if (all_active)
active_index = local_index;
else
active_index = active_indices[local_index];
fwd_step_enkf_write_iter_info(fwd_step_data, stepwise_data, key, active_index, i, module_info);
}
local_index ++;
double aij = matrix_iget( A , i , j );
double xHat = stepwise_eval(stepwise_data , di );
matrix_iset(A , i , j , aij + xHat);
}
if (verbose){
int loc_ind = int_vector_iget(local_index_list, i );
if (all_active)
active_index = loc_ind;
else
active_index = active_indices[loc_ind];
fwd_step_enkf_write_iter_info(fwd_step_data, stepwise_data, key, active_index, i, module_info);
}
stepwise_free( stepwise_data );
}
if (verbose)
@ -331,9 +364,10 @@ void fwd_step_enkf_updateA(void * module_data ,
printf("Done with stepwise regression enkf\n");
stepwise_free( stepwise_data );
matrix_free( di );
matrix_free( di );
int_vector_free(kw_list);
int_vector_free(local_index_list);
}
@ -368,9 +402,11 @@ bool fwd_step_enkf_set_int( void * arg , const char * var_name , int value) {
{
bool name_recognized = true;
/*Set number of CV folds */
if (strcmp( var_name , NFOLDS_KEY) == 0)
fwd_step_enkf_set_nfolds( module_data , value);
fwd_step_enkf_set_nfolds( module_data , value); /*Set number of CV folds */
else if (strcmp( var_name , NUM_THREADS_KEY) == 0)
fwd_step_enkf_set_num_threads( module_data , value); /*Set number of OMP threads */
else
name_recognized = false;
@ -428,6 +464,8 @@ bool fwd_step_enkf_has_var( const void * arg, const char * var_name) {
return true;
else if (strcmp(var_name , CLEAR_LOG_KEY) == 0)
return true;
else if (strcmp(var_name , NUM_THREADS_KEY) == 0)
return true;
else
return false;
}
@ -448,6 +486,8 @@ int fwd_step_enkf_get_int( const void * arg, const char * var_name) {
{
if (strcmp(var_name , NFOLDS_KEY) == 0)
return module_data->nfolds;
if (strcmp(var_name , NUM_THREADS_KEY) == 0)
return module_data->num_threads;
else
return -1;
}

View File

@ -52,6 +52,7 @@
#define INVALID_TRUNCATION -1
#define DEFAULT_SUBSPACE_DIMENSION INVALID_SUBSPACE_DIMENSION
#define DEFAULT_USE_EE false
#define DEFAULT_USE_GE false
#define DEFAULT_ANALYSIS_SCALE_DATA true
@ -86,6 +87,7 @@ struct std_enkf_data_struct {
int subspace_dimension; // Controlled by config key: ENKF_NCOMP_KEY (-1: use Truncation instead)
long option_flags;
bool use_EE;
bool use_GE;
bool analysis_scale_data;
};
@ -133,6 +135,7 @@ void * std_enkf_data_alloc( rng_type * rng) {
std_enkf_set_subspace_dimension( data , DEFAULT_SUBSPACE_DIMENSION );
data->option_flags = ANALYSIS_NEED_ED;
data->use_EE = DEFAULT_USE_EE;
data->use_GE = DEFAULT_USE_GE;
data->analysis_scale_data = DEFAULT_ANALYSIS_SCALE_DATA;
return data;
}
@ -153,7 +156,8 @@ static void std_enkf_initX__( matrix_type * X ,
double truncation,
int ncomp,
bool bootstrap ,
bool use_EE) {
bool use_EE ,
bool use_GE) {
int nrobs = matrix_get_rows( S );
int ens_size = matrix_get_columns( S );
@ -165,17 +169,24 @@ static void std_enkf_initX__( matrix_type * X ,
matrix_subtract_row_mean( S ); /* Shift away the mean */
if (use_EE) {
matrix_type * Et = matrix_alloc_transpose( E );
matrix_type * Cee = matrix_alloc_matmul( E , Et );
matrix_scale( Cee , 1.0 / (ens_size - 1));
if (use_GE) {
enkf_linalg_lowrankE( S , E , W , eig , truncation , ncomp);
}
else {
matrix_type * Et = matrix_alloc_transpose( E );
matrix_type * Cee = matrix_alloc_matmul( E , Et );
matrix_scale( Cee , 1.0 / (ens_size - 1));
enkf_linalg_lowrankCinv( S , Cee , W , eig , truncation , ncomp);
enkf_linalg_lowrankCinv( S , Cee , W , eig , truncation , ncomp);
matrix_free( Et );
matrix_free( Cee );
} else
matrix_free( Et );
matrix_free( Cee );
}
}
else {
enkf_linalg_lowrankCinv( S , R , W , eig , truncation , ncomp);
}
enkf_linalg_init_stdX( X , S , D , W , eig , bootstrap);
@ -203,7 +214,7 @@ void std_enkf_initX(void * module_data ,
int ncomp = data->subspace_dimension;
double truncation = data->truncation;
std_enkf_initX__(X,S,R,E,D,truncation,ncomp,false,data->use_EE);
std_enkf_initX__(X,S,R,E,D,truncation,ncomp,false,data->use_EE,data->use_GE);
}
}
@ -250,6 +261,8 @@ bool std_enkf_set_bool( void * arg , const char * var_name , bool value) {
if (strcmp( var_name , USE_EE_KEY_) == 0)
module_data->use_EE = value;
else if (strcmp( var_name , USE_GE_KEY_) == 0)
module_data->use_GE = value;
else if (strcmp( var_name , ANALYSIS_SCALE_DATA_KEY_) == 0)
module_data->analysis_scale_data = value;
else
@ -275,6 +288,8 @@ bool std_enkf_has_var( const void * arg, const char * var_name) {
return true;
else if (strcmp(var_name , USE_EE_KEY_) == 0)
return true;
else if (strcmp(var_name , USE_GE_KEY_) == 0)
return true;
else if (strcmp(var_name , ANALYSIS_SCALE_DATA_KEY_) == 0)
return true;
else
@ -308,7 +323,9 @@ bool std_enkf_get_bool( const void * arg, const char * var_name) {
{
if (strcmp(var_name , USE_EE_KEY_) == 0)
return module_data->use_EE;
if (strcmp(var_name , ANALYSIS_SCALE_DATA_KEY_) == 0)
else if (strcmp(var_name , USE_GE_KEY_) == 0)
return module_data->use_GE;
else if (strcmp(var_name , ANALYSIS_SCALE_DATA_KEY_) == 0)
return module_data->analysis_scale_data;
else
return false;

View File

@ -36,7 +36,7 @@ extern "C" {
typedef struct config_content_struct config_content_type;
config_content_type * config_content_alloc();
config_content_type * config_content_alloc(const char * filename);
void config_content_free( config_content_type * content );
void config_content_set_valid( config_content_type * content);
bool config_content_is_valid( const config_content_type * content );
@ -68,14 +68,13 @@ typedef struct config_content_struct config_content_type;
void config_content_add_define( config_content_type * content , const char * key , const char * value );
subst_list_type * config_content_get_define_list( config_content_type * content );
const char * config_content_get_config_file( const config_content_type * content , bool abs_path );
void config_content_set_config_file( config_content_type * content , const char * config_file );
int config_content_get_size(const config_content_type * content);
const config_content_node_type * config_content_iget_node( const config_content_type * content , int index);
bool config_content_add_file( config_content_type * content , const char * config_file);
config_root_path_type * config_content_get_invoke_path( config_content_type * content );
void config_content_set_invoke_path( config_content_type * content);
config_path_elm_type * config_content_add_path_elm( config_content_type * content , const char * path );
void config_content_pop_path_stack( config_content_type * content );
const stringlist_type * config_content_get_warnings( const config_content_type * content);
UTIL_IS_INSTANCE_HEADER( config_content );

View File

@ -97,6 +97,7 @@ typedef struct config_parser_struct config_parser_type;
const subst_list_type * config_get_define_list( const config_parser_type * config);
int config_get_schema_size( const config_parser_type * config );
config_content_node_type * config_get_value_node( const config_parser_type * config , const char * kw);
void config_parser_deprecate(config_parser_type * config , const char * kw, const char * msg);
#ifdef __cplusplus
}

View File

@ -28,7 +28,7 @@ typedef struct config_root_path_struct config_root_path_type;
void config_root_path_free( config_root_path_type * root_path );
config_root_path_type * config_root_path_alloc( const char * input_path );
void config_root_path_printf( const config_root_path_type * root_path );
const char * config_root_path_get_input_path( const config_root_path_type * root_path );
const char * config_root_path_get_rel_path( const config_root_path_type * root_path );
const char * config_root_path_get_abs_path( const config_root_path_type * root_path );

View File

@ -97,7 +97,10 @@ typedef enum {
void config_schema_item_iset_type( config_schema_item_type * item , int index , config_item_types type);
config_item_types config_schema_item_iget_type(const config_schema_item_type * item , int index );
void config_schema_item_set_default_type( config_schema_item_type * item , config_item_types type);
bool config_schema_item_is_deprecated( const config_schema_item_type * item);
const char * config_schema_item_get_deprecate_msg( const config_schema_item_type * item);
void config_schema_item_set_deprecated( config_schema_item_type * item , const char * msg);
bool config_schema_item_valid_string(config_item_types value_type , const char * value);
#ifdef __cplusplus
}

View File

@ -0,0 +1,67 @@
/*
Copyright (C) 2017 Statoil ASA, Norway.
The file 'config_settings.c' is part of ERT - Ensemble based Reservoir Tool.
ERT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ERT is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
for more details.
*/
#ifndef ERT_CONFIG_SETTINGS_H
#define ERT_CONFIG_SETTINGS_H
#ifdef __cplusplus
extern "C" {
#endif
#include <stdbool.h>
#include <ert/util/stringlist.h>
#include <ert/config/config_parser.h>
#include <ert/config/config_content.h>
#include <ert/config/config_schema_item.h>
typedef struct config_settings_struct config_settings_type;
config_settings_type * config_settings_alloc( const char * root_key );
void config_settings_free( config_settings_type * settings);
bool config_settings_has_key( const config_settings_type * settings , const char * key);
config_item_types config_settings_get_value_type( const config_settings_type * config_settings , const char * key);
bool config_settings_set_value( const config_settings_type * config_settings , const char * key, const char * value);
void config_settings_init_parser( const config_settings_type * config_settings, config_parser_type * config , bool required);
void config_settings_init_parser__( const char * root_key , config_parser_type * config , bool required);
void config_settings_apply(config_settings_type * config_settings , const config_content_type * config );
stringlist_type * config_settings_alloc_keys( const config_settings_type * config_settings );
bool config_settings_add_setting(config_settings_type * settings , const char* key, config_item_types value_type , const char* initial_value);
void config_settings_add_int_setting(config_settings_type * settings , const char* key, int initial_value);
void config_settings_add_double_setting(config_settings_type * settings , const char* key, double initial_value);
void config_settings_add_string_setting(config_settings_type * settings , const char* key, const char * initial_value);
void config_settings_add_bool_setting(config_settings_type * settings , const char* key, bool initial_value);
const char * config_settings_get_value( const config_settings_type * config_settings , const char * key);
const char * config_settings_get_string_value( const config_settings_type * config_settings , const char * key);
int config_settings_get_int_value( const config_settings_type * config_settings , const char * key);
bool config_settings_get_bool_value( const config_settings_type * config_settings , const char * key);
double config_settings_get_double_value( const config_settings_type * config_settings , const char * key);
bool config_settings_set_value( const config_settings_type * config_settings , const char * key, const char * value);
bool config_settings_set_int_value( const config_settings_type * config_settings , const char * key, int value);
bool config_settings_set_double_value( const config_settings_type * config_settings , const char * key, double value);
bool config_settings_set_bool_value( const config_settings_type * config_settings , const char * key, bool value);
bool config_settings_set_string_value( const config_settings_type * config_settings , const char * key, const char * value);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,5 +1,5 @@
set( source_files config_parser.c config_content.c config_error.c config_schema_item.c config_content_item.c config_content_node.c config_root_path.c config_path_elm.c conf.c conf_util.c conf_data.c)
set( header_files config_parser.h config_content.h config_error.h config_schema_item.h config_content_item.h config_content_node.h config_root_path.h config_path_elm.h conf.h conf_data.h)
set( source_files config_parser.c config_content.c config_error.c config_schema_item.c config_content_item.c config_content_node.c config_root_path.c config_path_elm.c conf.c conf_util.c conf_data.c config_settings.c)
set( header_files config_parser.h config_content.h config_error.h config_schema_item.h config_content_item.h config_content_node.h config_root_path.h config_path_elm.h conf.h conf_data.h config_settings.h)
add_library( config ${LIBRARY_TYPE} ${source_files} )
set_target_properties( config PROPERTIES VERSION ${ERT_VERSION_MAJOR}.${ERT_VERSION_MINOR} SOVERSION ${ERT_VERSION_MAJOR} )

View File

@ -39,6 +39,7 @@ struct config_content_struct {
vector_type * nodes;
hash_type * items;
config_error_type * parse_errors;
stringlist_type * warnings;
subst_list_type * define_list;
char * config_file;
char * abs_path;
@ -52,7 +53,7 @@ struct config_content_struct {
UTIL_IS_INSTANCE_FUNCTION( config_content , CONFIG_CONTENT_TYPE_ID )
config_content_type * config_content_alloc() {
config_content_type * config_content_alloc(const char * filename) {
config_content_type * content = util_malloc( sizeof * content );
UTIL_TYPE_ID_INIT( content , CONFIG_CONTENT_TYPE_ID );
content->valid = false;
@ -61,13 +62,20 @@ config_content_type * config_content_alloc() {
content->parse_errors = config_error_alloc();
content->define_list = subst_list_alloc( NULL );
content->parsed_files = set_alloc_empty();
content->warnings = stringlist_alloc_new();
content->path_elm_storage = vector_alloc_new();
content->path_elm_stack = vector_alloc_new();
content->invoke_path = NULL;
content->config_file = NULL;
content->abs_path = NULL;
content->config_file = util_alloc_string_copy( filename );
content->abs_path = util_alloc_abs_path( filename );
{
char * path = util_split_alloc_dirname( filename );
content->invoke_path = config_root_path_alloc( NULL );
free( path );
}
return content;
}
@ -88,6 +96,8 @@ void config_content_add_item( config_content_type * content , const config_schem
config_content_item_type * content_item = config_content_item_alloc( schema_item , path_elm );
hash_insert_hash_owned_ref( content->items , kw , content_item , config_content_item_free__ );
if (config_schema_item_is_deprecated(schema_item))
stringlist_append_copy( content->warnings , config_schema_item_get_deprecate_msg(schema_item));
}
void config_content_add_node( config_content_type * content , config_content_node_type * content_node ) {
@ -108,7 +118,15 @@ config_error_type * config_content_get_errors( const config_content_type * conte
return content->parse_errors;
}
const stringlist_type * config_content_get_warnings( const config_content_type * content) {
return content->warnings;
}
void config_content_free( config_content_type * content ) {
stringlist_free( content->warnings );
vector_free( content->nodes );
vector_free( content->path_elm_stack );
vector_free( content->path_elm_storage );
@ -133,11 +151,6 @@ config_root_path_type * config_content_get_invoke_path( config_content_type * co
}
void config_content_set_invoke_path( config_content_type * content) {
if (content->invoke_path != NULL)
config_root_path_free( content->invoke_path );
content->invoke_path = config_root_path_alloc( NULL );
}
@ -366,12 +379,6 @@ subst_list_type * config_content_get_define_list( config_content_type * content
/*****************************************************************/
void config_content_set_config_file( config_content_type * content , const char * config_file ) {
content->config_file = util_realloc_string_copy( content->config_file , config_file );
util_safe_free(content->abs_path);
content->abs_path = util_alloc_abs_path( config_file );
}

View File

@ -487,7 +487,7 @@ static void config_parse__(config_parser_type * config ,
free( abs_filename );
}
config_path_elm_type * current_path_elm;
char * config_file;
{
/* Extract the path component of the current input file and chdir() */
@ -645,7 +645,7 @@ config_content_type * config_parse(config_parser_type * config ,
config_schema_unrecognized_enum unrecognized_behaviour,
bool validate) {
config_content_type * content = config_content_alloc( );
config_content_type * content = config_content_alloc( filename );
if(pre_defined_kw_map != NULL) {
hash_iter_type * keys = hash_iter_alloc(pre_defined_kw_map);
@ -658,15 +658,11 @@ config_content_type * config_parse(config_parser_type * config ,
hash_iter_free(keys);
}
//
if (util_file_readable( filename )) {
path_stack_type * path_stack = path_stack_alloc();
{
config_content_set_config_file( content , filename );
config_content_set_invoke_path( content );
config_parse__(config , content , path_stack , filename , comment_string , include_kw , define_kw , unrecognized_behaviour , validate);
}
config_parse__(config , content , path_stack , filename , comment_string , include_kw , define_kw , unrecognized_behaviour , validate);
path_stack_free( path_stack );
} else {
char * error_message = util_alloc_sprintf("Could not open file:%s for parsing" , filename);
@ -709,6 +705,13 @@ void config_install_message(config_parser_type * config , const char * kw, const
}
void config_parser_deprecate(config_parser_type * config , const char * kw, const char * msg) {
if (config_has_schema_item(config , kw)) {
config_schema_item_type * item = config_get_schema_item(config , kw);
config_schema_item_set_deprecated(item , msg);
} else
util_abort("%s: item:%s not recognized \n",__func__ , kw);
}
#include "config_get.c"

View File

@ -53,7 +53,11 @@ config_path_elm_type * config_path_elm_alloc( const config_root_path_type * root
path_elm->abs_path = util_alloc_string_copy( path );
path_elm->rel_path = util_alloc_rel_path( config_root_path_get_abs_path(root_path) , path );
} else {
path_elm->abs_path = util_alloc_filename( config_root_path_get_abs_path(root_path) , path , NULL );
{
char * tmp_abs_path = util_alloc_filename( config_root_path_get_abs_path(root_path) , path , NULL );
path_elm->abs_path = util_alloc_abs_path( tmp_abs_path );
free( tmp_abs_path );
}
path_elm->rel_path = util_alloc_string_copy( path );
}
}

View File

@ -92,6 +92,10 @@ const char * config_root_path_get_abs_path( const config_root_path_type * root_p
}
void config_root_path_printf( const config_root_path_type * root_path ) {
printf("input_path : %s \n" , root_path->input_path);
printf("abs_path : %s \n" , root_path->abs_path);
printf("rel_path : %s \n" , root_path->rel_path);
}

View File

@ -95,6 +95,8 @@ struct config_schema_item_struct {
hash_type * required_children_value; /* A list of item's which must also be set - depending on the value of this item. (can be NULL) */
validate_type * validate; /* Information need during validation. */
bool expand_envvar; /* Should environment variables like $HOME be expanded?*/
bool deprecated;
char * deprecate_msg;
};
@ -225,6 +227,8 @@ config_schema_item_type * config_schema_item_alloc(const char * kw , bool requir
item->kw = util_alloc_string_copy(kw);
item->required_set = required;
item->deprecated = false;
item->deprecate_msg = NULL;
item->required_children = NULL;
item->required_children_value = NULL;
item->expand_envvar = true; /* Default is to expand $VAR expressions; can be turned off with
@ -246,6 +250,29 @@ static char * __alloc_relocated__(const config_path_elm_type * path_elm , const
return file;
}
bool config_schema_item_valid_string(config_item_types value_type , const char * value)
{
switch(value_type) {
case(CONFIG_ISODATE):
return util_sscanf_isodate( value , NULL );
break;
case(CONFIG_INT):
return util_sscanf_int( value , NULL );
break;
case(CONFIG_FLOAT):
return util_sscanf_double( value , NULL );
break;
case(CONFIG_BOOL):
return util_sscanf_bool( value , NULL );
break;
case(CONFIG_BYTESIZE):
return util_sscanf_bytesize( value , NULL);
break;
default:
return true;
}
}
bool config_schema_item_validate_set(const config_schema_item_type * item , stringlist_type * token_list , const char * config_file, const config_path_elm_type * path_elm , config_error_type * error_list) {
bool OK = true;
@ -401,6 +428,7 @@ bool config_schema_item_validate_set(const config_schema_item_type * item , stri
void config_schema_item_free( config_schema_item_type * item) {
free(item->kw);
free( item->deprecate_msg );
if (item->required_children != NULL) stringlist_free(item->required_children);
if (item->required_children_value != NULL) hash_free(item->required_children_value);
validate_free(item->validate);
@ -536,6 +564,19 @@ stringlist_type * config_schema_item_get_required_children_value(const config_sc
return hash_safe_get( item->required_children_value , value );
}
bool config_schema_item_is_deprecated( const config_schema_item_type * item) {
return item->deprecated;
}
const char * config_schema_item_get_deprecate_msg( const config_schema_item_type * item) {
return item->deprecate_msg;
}
void config_schema_item_set_deprecated( config_schema_item_type * item , const char * msg) {
item->deprecated = true;
item->deprecate_msg = util_realloc_string_copy(item->deprecate_msg, msg);
}
/*****************************************************************/
/* Small functions to support enum introspection. */

View File

@ -0,0 +1,340 @@
/*
Copyright (C) 2017 Statoil ASA, Norway.
The file 'config_settings.c' is part of ERT - Ensemble based Reservoir Tool.
ERT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ERT is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
for more details.
*/
#include <stdlib.h>
#include <ert/util/type_macros.h>
#include <ert/util/util.h>
#include <ert/util/hash.h>
#include <ert/config/config_schema_item.h>
#include <ert/config/config_settings.h>
#define CONFIG_SETTINGS_TYPE_ID 68621527
#define SETTING_NODE_TYPE_ID 76254096
struct config_settings_struct {
UTIL_TYPE_ID_DECLARATION;
char * root_key;
hash_type * settings;
};
/*****************************************************************/
typedef struct setting_node_struct setting_node_type;
struct setting_node_struct {
UTIL_TYPE_ID_DECLARATION;
config_item_types value_type;
char * key;
char * string_value;
};
static void setting_node_assert_type( const setting_node_type * node , config_item_types expected_type) {
if (node->value_type != expected_type)
util_abort("%s: internal error. Asked for type:%d is of type:%d \n",__func__ , expected_type , node->value_type);
}
UTIL_SAFE_CAST_FUNCTION( setting_node , SETTING_NODE_TYPE_ID )
static setting_node_type * setting_node_alloc( const char * key, config_item_types value_type, const char * initial_value) {
if (!config_schema_item_valid_string( value_type , initial_value))
return NULL;
{
setting_node_type * node = util_malloc( sizeof * node );
UTIL_TYPE_ID_INIT( node , SETTING_NODE_TYPE_ID );
node->value_type = value_type;
node->string_value = util_alloc_string_copy( initial_value );
node->key = util_alloc_string_copy( key );
return node;
}
}
static void setting_node_free( setting_node_type * node ) {
free( node->key );
free( node->string_value );
free( node );
}
static void setting_node_free__( void * arg ) {
setting_node_type * node = setting_node_safe_cast( arg );
setting_node_free( node );
}
static bool setting_node_set_value( setting_node_type * node, const char * value) {
if (config_schema_item_valid_string(node->value_type , value)) {
node->string_value = util_realloc_string_copy( node->string_value , value );
return true;
} else
return false;
}
static void setting_node_set_string_value( setting_node_type * node, const char * value) {
setting_node_assert_type( node , CONFIG_STRING );
setting_node_set_value( node , value );
}
static void setting_node_set_int_value( setting_node_type * node, int value) {
setting_node_assert_type( node , CONFIG_INT );
{
char * string_value = util_alloc_sprintf("%d" , value);
setting_node_set_value( node , string_value );
free( string_value );
}
}
static void setting_node_set_double_value( setting_node_type * node, double value) {
setting_node_assert_type( node , CONFIG_FLOAT );
{
char * string_value = util_alloc_sprintf("%g" , value);
setting_node_set_value( node , string_value );
free( string_value );
}
}
static void setting_node_set_bool_value( setting_node_type * node, bool value) {
bool bool_value;
setting_node_assert_type( node , CONFIG_BOOL );
if (value)
setting_node_set_value( node , "True");
else
setting_node_set_value( node , "False");
}
static const char * setting_node_get_value( const setting_node_type * node) {
return node->string_value;
}
static const char * setting_node_get_string_value( const setting_node_type * node) {
setting_node_assert_type( node , CONFIG_STRING );
return node->string_value;
}
static int setting_node_get_int_value( const setting_node_type * node) {
setting_node_assert_type( node , CONFIG_INT );
return strtol( node->string_value , NULL , 10 );
}
static double setting_node_get_double_value( const setting_node_type * node) {
setting_node_assert_type( node , CONFIG_FLOAT );
return strtod( node->string_value , NULL );
}
static bool setting_node_get_bool_value( const setting_node_type * node) {
bool bool_value;
setting_node_assert_type( node , CONFIG_BOOL );
util_sscanf_bool( node->string_value , &bool_value );
return bool_value;
}
/*****************************************************************/
config_settings_type * config_settings_alloc( const char * root_key ) {
config_settings_type * settings = util_malloc( sizeof * settings );
settings->root_key = util_alloc_string_copy( root_key );
settings->settings = hash_alloc();
return settings;
}
void config_settings_free( config_settings_type * settings) {
free( settings->root_key );
hash_free( settings->settings );
free( settings );
}
bool config_settings_add_setting(config_settings_type * settings , const char* key, config_item_types value_type , const char* initial_value) {
setting_node_type * node = setting_node_alloc( key , value_type , initial_value);
if (node) {
hash_insert_hash_owned_ref( settings->settings , key , node , setting_node_free__ );
return true;
} else
return false;
}
void config_settings_add_bool_setting(config_settings_type * settings , const char* key, bool initial_value) {
if (initial_value)
config_settings_add_setting( settings , key , CONFIG_BOOL , "True");
else
config_settings_add_setting( settings , key , CONFIG_BOOL , "False");
}
void config_settings_add_int_setting(config_settings_type * settings , const char* key, int initial_value) {
char * string_value = util_alloc_sprintf("%d" , initial_value);
config_settings_add_setting( settings , key , CONFIG_INT , string_value);
free( string_value );
}
void config_settings_add_double_setting(config_settings_type * settings , const char* key, double initial_value) {
char * string_value = util_alloc_sprintf("%g" , initial_value);
config_settings_add_setting( settings , key , CONFIG_FLOAT , string_value);
free( string_value );
}
void config_settings_add_string_setting(config_settings_type * settings , const char* key, const char * initial_value) {
config_settings_add_setting( settings , key , CONFIG_STRING , initial_value);
}
bool config_settings_has_key( const config_settings_type * settings , const char * key) {
return hash_has_key( settings->settings , key );
}
static setting_node_type * config_settings_get_node( const config_settings_type * config_settings, const char * key){
return hash_get( config_settings->settings , key );
}
const char * config_settings_get_value( const config_settings_type * config_settings , const char * key) {
setting_node_type * node = config_settings_get_node( config_settings , key );
return setting_node_get_value( node );
}
const char * config_settings_get_string_value( const config_settings_type * config_settings , const char * key) {
setting_node_type * node = config_settings_get_node( config_settings , key );
return setting_node_get_string_value( node );
}
double config_settings_get_double_value( const config_settings_type * config_settings , const char * key) {
setting_node_type * node = config_settings_get_node( config_settings , key );
return setting_node_get_double_value( node );
}
int config_settings_get_int_value( const config_settings_type * config_settings , const char * key) {
setting_node_type * node = config_settings_get_node( config_settings , key );
return setting_node_get_int_value( node );
}
bool config_settings_get_bool_value( const config_settings_type * config_settings , const char * key) {
setting_node_type * node = config_settings_get_node( config_settings , key );
return setting_node_get_bool_value( node );
}
config_item_types config_settings_get_value_type( const config_settings_type * config_settings , const char * key) {
setting_node_type * node = config_settings_get_node( config_settings , key );
return node->value_type;
}
bool config_settings_set_value( const config_settings_type * config_settings , const char * key, const char * value) {
if (config_settings_has_key( config_settings , key )) {
setting_node_type * node = config_settings_get_node( config_settings , key );
return setting_node_set_value( node, value );
}
return false;
}
bool config_settings_set_int_value( const config_settings_type * config_settings , const char * key, int value) {
if (config_settings_has_key( config_settings , key )) {
setting_node_type * node = config_settings_get_node( config_settings , key );
setting_node_set_int_value( node, value );
return true;
}
return false;
}
bool config_settings_set_double_value( const config_settings_type * config_settings , const char * key, double value) {
if (config_settings_has_key( config_settings , key )) {
setting_node_type * node = config_settings_get_node( config_settings , key );
setting_node_set_double_value( node, value );
return true;
}
return false;
}
bool config_settings_set_bool_value( const config_settings_type * config_settings , const char * key, bool value) {
if (config_settings_has_key( config_settings , key )) {
setting_node_type * node = config_settings_get_node( config_settings , key );
setting_node_set_bool_value( node, value );
return true;
}
return false;
}
bool config_settings_set_string_value( const config_settings_type * config_settings , const char * key, const char * value) {
if (config_settings_has_key( config_settings , key )) {
setting_node_type * node = config_settings_get_node( config_settings , key );
setting_node_set_string_value( node, value );
return true;
}
return false;
}
void config_settings_init_parser__( const char * root_key , config_parser_type * config , bool required) {
config_schema_item_type * item = config_add_schema_item(config, root_key , required);
config_schema_item_set_argc_minmax(item, 2, 2);
}
void config_settings_init_parser( const config_settings_type * config_settings, config_parser_type * config , bool required) {
config_settings_init_parser__( config_settings->root_key , config , required );
}
void config_settings_apply(config_settings_type * config_settings , const config_content_type * config ) {
for (int i = 0; i < config_content_get_occurences(config, config_settings->root_key); i++) {
const stringlist_type * tokens = config_content_iget_stringlist_ref(config, config_settings->root_key , i);
const char * setting = stringlist_iget(tokens, 0);
const char * value = stringlist_iget(tokens, 1);
bool set_ok = config_settings_set_value( config_settings , setting , value );
if (!set_ok)
fprintf(stderr," ** Warning: failed to apply CONFIG_SETTING %s=%s \n",setting,value);
}
}
stringlist_type * config_settings_alloc_keys( const config_settings_type * config_settings ) {
return hash_alloc_stringlist(config_settings->settings);
}

View File

@ -22,7 +22,7 @@
void test_create() {
config_content_type * content = config_content_alloc( );
config_content_type * content = config_content_alloc( "filename" );
test_assert_true( config_content_is_instance( content ) );
config_content_free( content );
}

View File

@ -72,5 +72,10 @@ if (BUILD_ECL_SUMMARY)
install(CODE "EXECUTE_PROCESS(COMMAND chmod g+w ${destination}/ecl_summary)")
endif()
endif()
include(GNUInstallDirs)
install(FILES ${PROJECT_SOURCE_DIR}/docs/man/man1/ecl_summary.1
DESTINATION ${CMAKE_INSTALL_MANDIR}/man1)
endif()

View File

@ -48,7 +48,7 @@ int main(int argc, char ** argv) {
char * EGRID_file = util_alloc_filename( path , basename , "EGRID");
printf("Writing file: %s ...",EGRID_file); fflush(stdout);
ecl_grid_fwrite_EGRID2( ecl_grid , EGRID_file, ERT_ECL_METRIC_UNITS);
ecl_grid_fwrite_EGRID2( ecl_grid , EGRID_file, ECL_METRIC_UNITS);
free( EGRID_file );
}

View File

@ -27,7 +27,7 @@ extern "C" {
typedef struct ecl_coarse_cell_struct ecl_coarse_cell_type;
bool ecl_coarse_cell_equal( const ecl_coarse_cell_type * coarse_cell1 , const ecl_coarse_cell_type * coarse_cell2);
ecl_coarse_cell_type * ecl_coarse_cell_alloc( );
ecl_coarse_cell_type * ecl_coarse_cell_alloc( void );
void ecl_coarse_cell_update( ecl_coarse_cell_type * coarse_cell , int i , int j , int k , int global_index );
void ecl_coarse_cell_free( ecl_coarse_cell_type * coarse_cell );
void ecl_coarse_cell_free__( void * arg );

View File

@ -33,7 +33,7 @@ extern "C" {
typedef struct ecl_file_kw_struct ecl_file_kw_type;
typedef struct inv_map_struct inv_map_type;
inv_map_type * inv_map_alloc();
inv_map_type * inv_map_alloc(void);
ecl_file_kw_type * inv_map_get_file_kw( inv_map_type * inv_map , const ecl_kw_type * ecl_kw );
void inv_map_free( inv_map_type * map );

View File

@ -30,6 +30,7 @@ extern "C" {
bool * ecl_grav_common_alloc_aquifer_cell( const ecl_grid_cache_type * grid_cache , const ecl_file_type * init_file);
double ecl_grav_common_eval_biot_savart( const ecl_grid_cache_type * grid_cache , ecl_region_type * region , const bool * aquifer , const double * weight , double utm_x , double utm_y , double depth);
double ecl_grav_common_eval_geertsma( const ecl_grid_cache_type * grid_cache , ecl_region_type * region , const bool * aquifer , const double * weight , double utm_x , double utm_y , double depth, double poisson_ratio, double seabed);
#ifdef __cplusplus
}

View File

@ -80,6 +80,7 @@ extern "C" {
double ecl_grid_get_cell_volume1( const ecl_grid_type * ecl_grid, int global_index );
double ecl_grid_get_cell_volume1_tskille( const ecl_grid_type * ecl_grid, int global_index );
double ecl_grid_get_cell_volume3( const ecl_grid_type * ecl_grid, int i , int j , int k);
double ecl_grid_get_cell_volume1A( const ecl_grid_type * ecl_grid, int active_index );
bool ecl_grid_cell_contains1(const ecl_grid_type * grid , int global_index , double x , double y , double z);
bool ecl_grid_cell_contains3(const ecl_grid_type * grid , int i , int j ,int k , double x , double y , double z);
int ecl_grid_get_global_index_from_xyz(ecl_grid_type * grid , double x , double y , double z , int start_index);
@ -194,13 +195,15 @@ extern "C" {
void ecl_grid_fwrite_dims( const ecl_grid_type * grid , fortio_type * init_file, ert_ecl_unit_enum output_unit);
void ecl_grid_fwrite_depth( const ecl_grid_type * grid , fortio_type * init_file , ert_ecl_unit_enum ouput_unit);
void ecl_grid_fwrite_EGRID( ecl_grid_type * grid , const char * filename, bool metric_output);
void ecl_grid_fwrite_EGRID2( ecl_grid_type * grid , const char * filename, ert_ecl_unit_enum output_unit);
void ecl_grid_fwrite_GRID( const ecl_grid_type * grid , const char * filename);
void ecl_grid_fwrite_GRID2( const ecl_grid_type * grid , const char * filename, ert_ecl_unit_enum output_unit);
void ecl_grid_fprintf_grdecl( ecl_grid_type * grid , FILE * stream );
void ecl_grid_fwrite_EGRID_header__( int dims[3] , const float mapaxes[6], int dualp_flag , fortio_type * fortio);
void ecl_grid_fwrite_EGRID_header( int dims[3] , const float mapaxes[6], fortio_type * fortio);
void ecl_grid_fprintf_grdecl2( ecl_grid_type * grid , FILE * stream , ert_ecl_unit_enum output_unit);
int ecl_grid_zcorn_index(const ecl_grid_type * grid , int i, int j , int k , int c);
ecl_grid_type * ecl_grid_alloc_EGRID(const char * grid_file, bool apply_mapaxes );

View File

@ -1,20 +1,20 @@
/*
Copyright (C) 2011 Statoil ASA, Norway.
Copyright (C) 2011 Statoil ASA, Norway.
The file 'ecl_grid_cache.h' is part of ERT - Ensemble based
Reservoir Tool.
ERT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ERT is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
for more details.
ERT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ERT is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
for more details.
*/
#ifndef ERT_ECL_GRID_CACHE_H
@ -25,9 +25,9 @@
#ifdef __cplusplus
extern "C" {
#endif
typedef struct ecl_grid_cache_struct ecl_grid_cache_type;
ecl_grid_cache_type * ecl_grid_cache_alloc( const ecl_grid_type * grid );
int ecl_grid_cache_get_size( const ecl_grid_cache_type * grid_cache );
@ -36,8 +36,9 @@ extern "C" {
const double * ecl_grid_cache_get_xpos( const ecl_grid_cache_type * grid_cache );
const double * ecl_grid_cache_get_ypos( const ecl_grid_cache_type * grid_cache );
const double * ecl_grid_cache_get_zpos( const ecl_grid_cache_type * grid_cache );
const double * ecl_grid_cache_get_volume( const ecl_grid_cache_type * grid_cache );
void ecl_grid_cache_free( ecl_grid_cache_type * grid_cache );
#ifdef __cplusplus
}

View File

@ -28,9 +28,9 @@ extern "C" {
#include <ert/ecl/fortio.h>
#include <ert/ecl/ecl_kw.h>
#include <ert/ecl/ecl_grid.h>
#include <ert/ecl/ecl_util.h>
void ecl_init_file_fwrite_header( fortio_type * fortio , const ecl_grid_type * grid , const ecl_kw_type * poro , int phases , time_t start_date);
void ecl_init_file_fwrite_header( fortio_type * fortio , const ecl_grid_type * grid , const ecl_kw_type * poro , ert_ecl_unit_enum unit_system, int phases , time_t start_date);
#ifdef __cplusplus

View File

@ -37,6 +37,12 @@ extern "C" {
typedef struct ecl_kw_struct ecl_kw_type;
typedef enum {
ECL_KW_READ_OK = 0,
ECL_KW_READ_FAIL = 1,
ECL_KW_READ_SKIP = 2
} ecl_read_status_enum;
/*
The size of an ecl_kw instance is denoted with an integer. The
choice of int to store the size obviously limits the maximum size to
@ -58,11 +64,9 @@ extern "C" {
ecl_type_enum ecl_kw_get_type(const ecl_kw_type *);
const char * ecl_kw_get_header8(const ecl_kw_type *);
const char * ecl_kw_get_header(const ecl_kw_type * ecl_kw );
ecl_kw_type * ecl_kw_alloc_empty();
bool ecl_kw_fread_header(ecl_kw_type *, fortio_type *);
ecl_kw_type * ecl_kw_alloc_empty(void);
ecl_read_status_enum ecl_kw_fread_header(ecl_kw_type *, fortio_type *);
void ecl_kw_set_header_name(ecl_kw_type * , const char * );
void ecl_kw_set_header(ecl_kw_type * , const char * , int , const char *);
void ecl_kw_set_header_alloc(ecl_kw_type * , const char * , int , const char *);
bool ecl_kw_fseek_kw(const char * , bool , bool , fortio_type *);
bool ecl_kw_fseek_last_kw(const char * , bool , fortio_type *);
void ecl_kw_inplace_update_file(const ecl_kw_type * , const char * , int ) ;
@ -81,6 +85,7 @@ extern "C" {
ecl_kw_type * ecl_kw_alloc_sub_copy( const ecl_kw_type * src, const char * new_kw , int offset , int count);
const void * ecl_kw_copyc__(const void *);
ecl_kw_type * ecl_kw_alloc_slice_copy( const ecl_kw_type * src, int index1, int index2, int stride);
void ecl_kw_resize( ecl_kw_type * ecl_kw, int new_size);
//void * ecl_kw_get_data_ref(const ecl_kw_type *);
void * ecl_kw_alloc_data_copy(const ecl_kw_type * );
void ecl_kw_memcpy(ecl_kw_type *, const ecl_kw_type *);
@ -124,7 +129,6 @@ extern "C" {
void ecl_kw_fskip_header( fortio_type * fortio);
bool ecl_kw_is_grdecl_file(FILE * );
bool ecl_kw_is_kw_file(fortio_type * fortio);
int ecl_kw_element_sum_int( const ecl_kw_type * ecl_kw );

View File

@ -31,6 +31,257 @@ extern "C" {
#define LGRHEADI_LGR_NR_INDEX 0
#define LGRJOIN_KW "LGRJOIN"
/*
The table in the INIT file are organized with one large data keyword
'TAB' and one keyword 'TABDIMS' which describe the layout of the
data in the TAB keyword.
For each of the tables there 'TABDIMS_xxx_OFFSET_ITEM' which points
to an element in the 'TABDIMS' vector which contains the starting
address of table 'xxx' in the 'TAB' keyword, then there are one or
several integer values describing how many values/tables there
are. In addition there is an assumed number of columns which is not
explicitly stored in the TABDIMS keyword.
The input format is quite flexible with respect to the size of the
individual tables and subtables, but the representation in the INIT
file is based on fixed length columns and equal sized tables, where
all inactive elements have the default value 2e20.
Assume the following PVTO input:
PVTO
1.55203 1.00000 1.15907572 0.64345
25.00000 1.15319788 0.67619
50.00000 1.14759314 0.70959 /
28.04570 25.00000 1.17415042 0.63294
50.00000 1.16792401 0.66638
75.00000 1.16222385 0.69918
100.00000 1.15212320 0.76297 /
35.62113 50.00000 1.19208190 0.61538
75.00000 1.18568689 0.64790
100.00000 1.17982339 0.67985
125.00000 1.17441865 0.71127
150.00000 1.16941365 0.74217 /
/
20.66588 1.00000 1.15642614 0.57010
25.00000 1.15051027 0.59831
50.00000 1.14487540 0.62703 /
27.65815 25.00000 1.17402576 0.56928
50.00000 1.16771923 0.59875
75.00000 1.16195281 0.62760
100.00000 1.15665041 0.65588
/
This is the PVTO table, and it is described by the constants
TABDIMS_IBPVTO_OFFSET_ITEM, TABDIMS_JBPVTO_OFFSET_ITEM,
TABDIMS_NRPVTO_ITEM, TABDIMS_NPPVTO_ITEM and TABDIMS_NTPVTO_ITEM. Observe the following:
1. There are 3 GOR values in the first table and 2 in the second,
this is the number of composition nodes -
TABDIMS_NRPVTO_ITEM. Since there are 3 in the first table and 2 in
the second the value of TABDIMS[ TABDIMS_NRPVTO_ITEM ] >= 3.
2. The GOR node values (1.55203, 28.04570, 35.62113) and (20.66588,
27.65815) are stored separately at offset
TABDIMS[ TABDIMS_JBPVTO_OFFSET_ITEM ] in the TAB array.
3. The length of the longest column is 5 elements so the value of
TABDIMS[ TABDIMS_NPPVTO_ITEM ] >= 5.
4. The actual table data starts at offset TABDIMS[
TABDIMS_IBPVTO_ITEM] in the TAB table.
When packing the actual data into the TAB array the indices are
running as row,GOR,table,column - with row fastest. All in all the
linear vector for this PVTO table will look like:
1.00000 \ \ \ \
25.00000 | | | |
50.00000 | NPPVTO = 5 | | |
* | | | |
* | | | |
-----------/ | | |
25.00000 | | |
50.00000 | | |
75.00000 | NRPVTO = 3 | |
100.00000 | | |
* | | |
----------- | | |
50.00000 | | |
75.00000 | | |
100.00000 | | |
125.00000 | | |
150.00000 | | |
=========== / | NTPVTO = 2 |
1.00000 | |
25.00000 | |
50.00000 | |
* | |
* | | Three columns -
----------- | | (not in TABDIMS)
25.00000 | |
50.00000 | |
75.00000 | |
100.00000 | |
* | |
----------- | |
* | |
* | |
* | |
* | |
* | |
@@@@@@@@@@@ / |
1.15907572 |
1.15319788 |
1.14759314 |
* |
* |
----------- |
1.17415042 |
1.16792401 |
1.16222385 |
1.15212320 |
* |
----------- |
1.19208190 |
1.18568689 |
1.17982339 |
1.17441865 |
1.16941365 |
=========== |
1.15642614 |
1.15051027 |
1.14487540 |
* |
* |
----------- |
1.17402576 |
1.16771923 |
1.16195281 |
1.15665041 |
* |
----------- |
* |
* |
* |
* |
* |
@@@@@@@@@@@ |
0.64345 |
0.67619 |
0.70959 |
* |
* |
----------- |
0.63294 |
0.66638 |
0.69918 |
0.76297 |
* |
----------- |
0.61538 |
0.64790 |
0.67985 |
0.71127 |
0.74217 |
=========== |
0.57010 |
0.59831 |
0.62703 |
* |
* |
----------- |
0.56928 |
0.59875 |
0.62760 |
0.65588 |
* |
----------- |
* |
* |
* |
* |
* |
/
In this vector representation the different composition subtable
columns are separated by '----', the different main tables are
separated by '======' and the columns are separated by '@@@@'. Default
values (2e20) are denoted with '*'.
*/
#define TABDIMS_SIZE 100
#define TABDIMS_TAB_SIZE_ITEM 0
#define TABDIMS_IBROCK_OFFSET_ITEM 1
#define TABDIMS_NTROCK_ITEM 2
#define TABDIMS_IBROCC_OFFSET_ITEM 3
#define TABDIMS_NPROCC_ITEM 4
#define TABDIMS_IBPVTO_OFFSET_ITEM 6
#define TABDIMS_JBPVTO_OFFSET_ITEM 7
#define TABDIMS_NRPVTO_ITEM 8
#define TABDIMS_NPPVTO_ITEM 9
#define TABDIMS_NTPVTO_ITEM 10
#define TABDIMS_IBPVTW_OFFSET_ITEM 11
#define TABDIMS_NTPVTW_ITEM 12
#define TABDIMS_IBPVTG_OFFSET_ITEM 13
#define TABDIMS_JBPVTG_OFFSET_ITEM 14
#define TABDIMS_NRPVTG_ITEM 15
#define TABDIMS_NPPVTG_ITEM 16
#define TABDIMS_NTPVTG_ITEM 17
#define TABDIMS_IBDENS_OFFSET_ITEM 18
#define TABDIMS_NTDENS_ITEM 19
#define TABDIMS_IBSWFN_OFFSET_ITEM 20
#define TABDIMS_NSSWFN_ITEM 21
#define TABDIMS_NTSWFN_ITEM 22
#define TABDIMS_IBSGFN_OFFSET_ITEM 23
#define TABDIMS_NSSGFN_ITEM 24
#define TABDIMS_NTSGFN_ITEM 25
#define TABDIMS_IBSOFN_OFFSET_ITEM 26
#define TABDIMS_IBSWCO_OFFSET_ITEM 27
#define TABDIMS_NSSOFN_ITEM 28
#define TABDIMS_NTSOFN_ITEM 29
#define TABDIMS_IBVETB_OFFSET_ITEM 40
#define TABDIMS_NSVETB_ITEM 41
#define TABDIMS_NTVETB_ITEM 42
#define TABDIMS_IBTHPR_OFFSET_ITEM 43
#define TABDIMS_IBSLIM_ITEM 44
#define TABDIMS_NSENDP_ITEM 45
#define TABDIMS_NTENDP_ITEM 46
#define TABDIMS_IBRTEM_OFFSET_ITEM 47
#define TABDIMS_IBCTOL_ITEM 48
#define TABDIMS_IBLANG_OFFSET_ITEM 50 // LANGMUIR Table
#define TABDIMS_NCLANG_ITEM 51 // LANGMUIR Table
#define TABDIMS_NSLANG_ITEM 52 // LANGMUIR Table
#define TABDIMS_NTLANG_ITEM 53 // LANGMUIR Table
#define TABDIMS_IBLNG2_OFFSET_ITEM 54 // LANGSOLV Table
#define TABDIMS_IBCADP_OFFSET_ITEM 55 // COALPP Table
#define TABDIMS_IBCADS_OFFSET_ITEM 56 // COALADS Table
#define TABDIMS_IBROCP_OFFSET_ITEM 57 // ROCKPAMA Table
#define TABDIMS_NTRPMA_ITEM 58 // ROCKPAMA Table
/*
Observe that many of the elements in the INTEHEAD keyword is shared
between the restart and init files. The ones listed below here are
@ -56,6 +307,8 @@ extern "C" {
#define INTEHEAD_ECLIPSE100_VALUE 100
#define INTEHEAD_ECLIPSE300_VALUE 300
#define INTEHEAD_ECLIPSE300THERMAL_VALUE 500
#define INTEHEAD_INTERSECT_VALUE 700
#define INTEHEAD_FRONTSIM_VALUE 800
#define INTEHEAD_INIT_SIZE 95
#define INTEHEAD_RESTART_SIZE 180
@ -103,10 +356,12 @@ extern "C" {
#define STARTSOL_KW "STARTSOL"
#define ENDSOL_KW "ENDSOL"
#define XWEL_KW "XWEL"
#define IWEL_KW "IWEL"
#define ZWEL_KW "ZWEL"
#define ICON_KW "ICON"
#define SCON_KW "SCON"
#define XCON_KW "XCON"
#define ISEG_KW "ISEG"
#define RSEG_KW "RSEG"
@ -128,6 +383,7 @@ extern "C" {
#define INTEHEAD_NWELLS_INDEX 16 // Number of wells
#define INTEHEAD_NIWELZ_INDEX 24 // Number of elements pr. well in the IWEL array.
#define INTEHEAD_NXWELZ_INDEX 26 // Number of elements pr. well in the XWEL array.
#define INTEHEAD_NZWELZ_INDEX 27 // Number of 8 character words pr. well
#define INTEHEAD_NCWMAX_INDEX 17 // Maximum number of completions per well
@ -137,7 +393,7 @@ extern "C" {
#define INTEHEAD_NXWELZ_INDEX 26
#define INTEHEAD_NICONZ_INDEX 32 // Number of elements pr completion in the ICON array.
#define INTEHEAD_NSCONZ_INDEX 33 // Number of elements pr completion in the SCON array
#define INTEHEAD_NXCONZ_INDEX 34
#define INTEHEAD_NXCONZ_INDEX 34 // Number of elements pr completion in the XCON array
#define INTEHEAD_NIGRPZ_INDEX 36 // Number of elements pr group in the IGRP array.

View File

@ -41,6 +41,8 @@ extern "C" {
int version; // 100, 300, 500 (Eclipse300-Thermal)
int phase_sum; // Oil = 1 Gas = 2 Water = 4
ert_ecl_unit_enum unit_system;
int nx;
int ny;
int nz;
@ -56,11 +58,13 @@ extern "C" {
int nwells; // Number of wells
int niwelz; // Number of elements pr well in IWEL array
int nzwelz; // Number of 8 character words pr well in ZWEL array
int nxwelz; // Number of elements pr well in XWEL array.
// Connection properties
int niconz; // Number of elements per completion in ICON array
int ncwmax; // Maximum number of completions per well
int nsconz; // Number of elements per completion in SCON array
int nxconz; // Number of elements per completion in XCON array
// Segment properties
int nisegz; // Number of entries pr segment in the ISEG array
@ -83,7 +87,7 @@ extern "C" {
void ecl_rsthead_free( ecl_rsthead_type * rsthead );
ecl_rsthead_type * ecl_rsthead_alloc_from_kw( int report_step , const ecl_kw_type * intehead_kw , const ecl_kw_type * doubhead_kw , const ecl_kw_type * logihead_kw );
ecl_rsthead_type * ecl_rsthead_alloc( const ecl_file_view_type * rst_file , int report_step);
ecl_rsthead_type * ecl_rsthead_alloc_empty();
ecl_rsthead_type * ecl_rsthead_alloc_empty(void);
time_t ecl_rsthead_date( const ecl_kw_type * intehead_kw );
void ecl_rsthead_fprintf( const ecl_rsthead_type * header , FILE * stream);
void ecl_rsthead_fprintf_struct( const ecl_rsthead_type * header , FILE * stream);

View File

@ -43,6 +43,8 @@ typedef struct ecl_smspec_struct ecl_smspec_type;
the functions smspec_node_alloc(), ecl_smsepec_fread_header() and
ecl_smspec_install_gen_key() must be updated.
*/
int * ecl_smspec_alloc_mapping( const ecl_smspec_type * self, const ecl_smspec_type * other);
const int_vector_type * ecl_smspec_get_index_map( const ecl_smspec_type * smspec );
void ecl_smspec_index_node( ecl_smspec_type * ecl_smspec , smspec_node_type * smspec_node);
void ecl_smspec_insert_node(ecl_smspec_type * ecl_smspec, smspec_node_type * smspec_node);
@ -126,7 +128,7 @@ typedef struct ecl_smspec_struct ecl_smspec_type;
stringlist_type * ecl_smspec_alloc_group_list( const ecl_smspec_type * smspec , const char * pattern);
stringlist_type * ecl_smspec_alloc_well_var_list( const ecl_smspec_type * smspec );
const char * ecl_smspec_get_simulation_path(const ecl_smspec_type * ecl_smspec);
const stringlist_type * ecl_smspec_get_restart_list( const ecl_smspec_type * ecl_smspec);
const char * ecl_smspec_get_restart_case( const ecl_smspec_type * ecl_smspec);
const char * ecl_smspec_get_join_string( const ecl_smspec_type * smspec);
const float_vector_type * ecl_smspec_get_params_default( const ecl_smspec_type * ecl_smspec );
void ecl_smspec_update_wgname( ecl_smspec_type * smspec , smspec_node_type * node , const char * wgname );
@ -139,6 +141,8 @@ typedef struct ecl_smspec_struct ecl_smspec_type;
char * ecl_smspec_alloc_well_key( const ecl_smspec_type * smspec , const char * keyword , const char * wgname);
bool ecl_smspec_equal( const ecl_smspec_type * self , const ecl_smspec_type * other);
#ifdef __cplusplus
}

View File

@ -36,6 +36,8 @@ extern "C" {
#include <ert/ecl/ecl_sum_vector.h>
typedef struct ecl_sum_data_struct ecl_sum_data_type ;
void ecl_sum_data_add_case(ecl_sum_data_type * self, const ecl_sum_data_type * other);
void ecl_sum_data_fwrite_step( const ecl_sum_data_type * data , const char * ecl_case , bool fmt_case , bool unified, int report_step);
void ecl_sum_data_fwrite( const ecl_sum_data_type * data , const char * ecl_case , bool fmt_case , bool unified);
bool ecl_sum_data_fread( ecl_sum_data_type * data , const stringlist_type * filelist);

View File

@ -30,7 +30,8 @@ extern "C" {
typedef struct ecl_sum_tstep_struct ecl_sum_tstep_type;
ecl_sum_tstep_type * ecl_sum_tstep_alloc_remap_copy( const ecl_sum_tstep_type * src , const ecl_smspec_type * new_smspec, float default_value , const int * params_map);
ecl_sum_tstep_type * ecl_sum_tstep_alloc_copy( const ecl_sum_tstep_type * src );
void ecl_sum_tstep_free( ecl_sum_tstep_type * ministep );
void ecl_sum_tstep_free__( void * __ministep);
ecl_sum_tstep_type * ecl_sum_tstep_alloc_from_file(int report_step ,

View File

@ -61,15 +61,39 @@ typedef enum { ECL_OTHER_FILE = 0 ,
i.e. 'REAL', 'INTE', ... , come as 4 character strings.
*/
#define ECL_STRING_LENGTH 8
#define ECL_TYPE_LENGTH 4
#define ECL_KW_HEADER_DATA_SIZE ECL_STRING_LENGTH + ECL_TYPE_LENGTH + 4
#define ECL_STRING10_LENGTH 10 // Initial guess that the type C010 type is a10 character string.
#define ECL_STRING8_LENGTH 8
#define ECL_TYPE_LENGTH 4
#define ECL_KW_HEADER_DATA_SIZE ECL_STRING8_LENGTH + ECL_TYPE_LENGTH + 4
#define ECL_KW_HEADER_FORTIO_SIZE ECL_KW_HEADER_DATA_SIZE + 8
/*****************************************************************/
/*
Observe that these type identidiers are (ab)used in both the rms and
ert/enkf libraries in situations where ECLIPSE is not at all involved.
Regarding the type ECL_C010_TYPE: In an SMSPEC file generated by IX
there was a keyword with header:
NAMES 316 C010
i.e. the type identifier was the string 'C010'. Have not found any
further documentation of that type, but assuming it is a 10
character long string the file loads ok. The ECL_C010_TYPE is
implemented on a very loose basis it is not clear if the
ten-character-long-string implementation makes sense, or if that
just happened to work for the one example we have encountered so
far. For this reason the support for this keyword type is very
limited:
- keywords of this type are not propertly internalized; they are
just cleanly skipped by ecl_kw/ecl_file implementation when
loading a file.
- it is not possible to instantiate a keyword of this type with
ecl_kw_alloc( ).
- The type is not exported to Python.
*/
typedef enum {
@ -78,7 +102,8 @@ typedef enum {
ECL_DOUBLE_TYPE = 2,
ECL_INT_TYPE = 3,
ECL_BOOL_TYPE = 4,
ECL_MESS_TYPE = 5
ECL_MESS_TYPE = 5,
ECL_C010_TYPE = 6 // See comment immediately above about the limited support of this type.
} ecl_type_enum;
#define ECL_TYPE_ENUM_DEFS {.value = 0 , .name = "ECL_CHAR_TYPE"}, \
@ -96,12 +121,19 @@ typedef enum {
The libecl library has been built and tested 99.5% with ECLIPSE100
as context, but in thye gravity code there is some very limited
functionality related to ECLIPSE100 versus ECLIPSE300 functionality.
Observe that numerical values found as part of the INTEHAD keyword
differ from these values, and are found in the ecl_kw_magic.h
header.
*/
typedef enum {
ECLIPSE_UNDEFINED = 0,
ECLIPSE100 = 1,
ECLIPSE300 = 2
ECLIPSE_UNDEFINED = 0,
ECLIPSE100 = 1,
ECLIPSE300 = 2,
ECLIPSE300_THERMAL = 3,
INTERSECT = 4,
FRONTSIM = 5
} ecl_version_enum;
/*
@ -127,14 +159,12 @@ typedef enum {
typedef enum {
ERT_ECL_METRIC_UNITS = 0,
ERT_ECL_FIELD_UNITS = 1,
ERT_ECL_LAB_UNITS = 2
ECL_METRIC_UNITS = 1,
ECL_FIELD_UNITS = 2,
ECL_LAB_UNITS = 3,
ECL_PVT_M_UNITS = 4
} ert_ecl_unit_enum;
#define ECL_UNIT_ENUM_DEFS {.value = 0 , .name = "ECL_METRIC_UNITS"}, {.value = 1 , .name = "ECL_FIELD_UNITS"} , {.value = 2 , .name = "ECL_LAB_UNITS"}
#define ECL_UNIT_ENUM_SIZE 3
// For unformatted files:
#define ECL_BOOL_TRUE_INT -1 // Binary representation: 11111111 11111111 11111111 1111111

View File

@ -73,7 +73,9 @@ typedef enum {ECL_SMSPEC_INVALID_VAR = 0 ,
char * smspec_alloc_local_block_key( const char * join_string , const char * keyword , const char * lgr_name , int i , int j , int k);
char * smspec_alloc_local_completion_key( const char * join_string, const char * keyword , const char * lgr_name , const char * wgname , int i , int j , int k);
bool smspec_node_equal( const smspec_node_type * node1, const smspec_node_type * node2);
bool smspec_node_init( smspec_node_type * smspec_node,
ecl_smspec_var_type var_type ,
const char * wgname ,

View File

@ -525,14 +525,26 @@ static bool ecl_file_scan( ecl_file_type * ecl_file ) {
{
offset_type current_offset = fortio_ftell( ecl_file->fortio );
if (ecl_kw_fread_header( work_kw , ecl_file->fortio)) {
ecl_read_status_enum read_status = ecl_kw_fread_header( work_kw , ecl_file->fortio);
if (read_status == ECL_KW_READ_FAIL) {
printf("Skipping on read:%s \n", ecl_kw_get_header( work_kw ));
break;
}
if (read_status == ECL_KW_READ_OK) {
ecl_file_kw_type * file_kw = ecl_file_kw_alloc( work_kw , current_offset);
if (ecl_file_kw_fskip_data( file_kw , ecl_file->fortio ))
ecl_file_view_add_kw( ecl_file->global_view , file_kw );
else
break;
} else
break;
}
if (read_status == ECL_KW_READ_SKIP) {
bool skip_ok = ecl_kw_fskip_data( work_kw , ecl_file->fortio );
fprintf(stderr,"** Warning: keyword %s is of type \'C010\' - will be skipped when loading file. skip_ok:%d\n" , ecl_kw_get_header( work_kw ) , skip_ok);
if (!skip_ok)
break;
}
}
}
@ -698,12 +710,21 @@ ecl_version_enum ecl_file_get_ecl_version( const ecl_file_type * file ) {
if (int_value == INTEHEAD_ECLIPSE100_VALUE)
return ECLIPSE100;
else if ((int_value == INTEHEAD_ECLIPSE300_VALUE) || (int_value == INTEHEAD_ECLIPSE300THERMAL_VALUE))
if (int_value == INTEHEAD_ECLIPSE300_VALUE)
return ECLIPSE300;
else {
util_abort("%s: ECLIPSE version value:%d not recognized \n",__func__ , int_value );
return -1;
}
if (int_value == INTEHEAD_ECLIPSE300THERMAL_VALUE)
return ECLIPSE300_THERMAL;
if (int_value == INTEHEAD_INTERSECT_VALUE)
return INTERSECT;
if (int_value == INTEHEAD_FRONTSIM_VALUE)
return FRONTSIM;
util_abort("%s: Simulator version value:%d not recognized \n",__func__ , int_value );
return -1;
}
/*

View File

@ -133,7 +133,7 @@ static const char * get_den_kw( ecl_phase_enum phase , ecl_version_enum ecl_vers
util_abort("%s: unrecognized phase id:%d \n",__func__ , phase);
return NULL;
}
} else if (ecl_version == ECLIPSE300) {
} else if ((ecl_version == ECLIPSE300) || (ecl_version == ECLIPSE300_THERMAL)) {
switch( phase ) {
case( ECL_OIL_PHASE ):
return ECLIPSE300_OIL_DEN_KW;
@ -149,7 +149,7 @@ static const char * get_den_kw( ecl_phase_enum phase , ecl_version_enum ecl_vers
return NULL;
}
} else {
util_abort("%s: unrecognized version id:%d \n",__func__ , ecl_version);
util_abort("%s: unrecognized simulator id:%d \n",__func__ , ecl_version);
return NULL;
}
}

View File

@ -38,16 +38,16 @@
bool * ecl_grav_common_alloc_aquifer_cell( const ecl_grid_cache_type * grid_cache , const ecl_file_type * init_file) {
bool * aquifer_cell = util_calloc( ecl_grid_cache_get_size( grid_cache ) , sizeof * aquifer_cell );
for (int active_index = 0; active_index < ecl_grid_cache_get_size( grid_cache ); active_index++)
aquifer_cell[ active_index ] = false;
if (ecl_file_has_kw( init_file , AQUIFER_KW)) {
ecl_kw_type * aquifer_kw = ecl_file_iget_named_kw( init_file , AQUIFER_KW , 0);
const int * aquifer_data = ecl_kw_get_int_ptr( aquifer_kw );
int active_index;
for (active_index = 0; active_index < ecl_grid_cache_get_size( grid_cache ); active_index++) {
for (int active_index = 0; active_index < ecl_grid_cache_get_size( grid_cache ); active_index++) {
if (aquifer_data[ active_index ] < 0)
aquifer_cell[ active_index ] = true;
else
aquifer_cell[ active_index ] = false;
}
}
@ -63,7 +63,7 @@ double ecl_grav_common_eval_biot_savart( const ecl_grid_cache_type * grid_cache
double sum = 0;
if (region == NULL) {
const int size = ecl_grid_cache_get_size( grid_cache );
int index;
int index;
for ( index = 0; index < size; index++) {
if (!aquifer[index]) {
double dist_x = (xpos[index] - utm_x );
@ -99,3 +99,65 @@ double ecl_grav_common_eval_biot_savart( const ecl_grid_cache_type * grid_cache
}
static inline double ecl_grav_common_eval_geertsma_kernel(int index, const double * xpos, const double * ypos, const double * zpos , double utm_x , double utm_y , double depth, double poisson_ratio, double seabed) {
double z = zpos[index];
z -= seabed;
double dist_x = xpos[index] - utm_x;
double dist_y = ypos[index] - utm_y;
double dist_z1 = z - depth;
double dist_z2 = dist_z1 - 2*z;
double dist1 = sqrt( dist_x*dist_x + dist_y*dist_y + dist_z1*dist_z1 );
double dist2 = sqrt( dist_x*dist_x + dist_y*dist_y + dist_z2*dist_z2 );
double cube_dist1 = dist1*dist1*dist1;
double cube_dist2 = dist2*dist2*dist2;
double displacement =
dist_z1 / cube_dist1 +
(3 - 4*poisson_ratio)*dist_z2 / cube_dist2 -
6*depth * (z + depth) * dist_z2 / (dist2*dist2*cube_dist2) +
2*((3 - 4*poisson_ratio)*(z + depth) - depth)/cube_dist2 ;
return displacement;
}
double ecl_grav_common_eval_geertsma( const ecl_grid_cache_type * grid_cache , ecl_region_type * region , const bool * aquifer , const double * weight , double utm_x , double utm_y , double depth, double poisson_ratio, double seabed) {
const double * xpos = ecl_grid_cache_get_xpos( grid_cache );
const double * ypos = ecl_grid_cache_get_ypos( grid_cache );
const double * zpos = ecl_grid_cache_get_zpos( grid_cache );
double sum = 0;
if (region == NULL) {
const int size = ecl_grid_cache_get_size( grid_cache );
int index;
for ( index = 0; index < size; index++) {
if (!aquifer[index]) {
double displacement = ecl_grav_common_eval_geertsma_kernel( index, xpos , ypos , zpos, utm_x, utm_y , depth, poisson_ratio, seabed);
/**
For numerical precision it might be benficial to use the
util_kahan_sum() function to do a Kahan summation.
*/
sum += weight[index] * displacement;
}
}
} else {
const int_vector_type * index_vector = ecl_region_get_active_list( region );
const int size = int_vector_size( index_vector );
const int * index_list = int_vector_get_const_ptr( index_vector );
int i, index;
for (i = 0; i < size; i++) {
index = index_list[i];
if (!aquifer[index]) {
double displacement = ecl_grav_common_eval_geertsma_kernel( index, xpos , ypos , zpos, utm_x, utm_y , depth , poisson_ratio, seabed);
sum += weight[index] * displacement;
}
}
}
return sum;
}

View File

@ -700,7 +700,8 @@ typedef struct ecl_cell_struct ecl_cell_type;
#define GET_CELL_FLAG(cell,flag) (((cell->cell_flags & (flag)) == 0) ? false : true)
#define SET_CELL_FLAG(cell,flag) ((cell->cell_flags |= (flag)))
#define METER_TO_FEET_SCALE_FACTOR 3.28084
#define METER_TO_FEET_SCALE_FACTOR 3.28084
#define METER_TO_CM_SCALE_FACTOR 100.0
struct ecl_cell_struct {
point_type center;
@ -1279,7 +1280,7 @@ static double ecl_cell_get_signed_volume( ecl_cell_type * cell) {
* 6
* Since sum( |a·(b x c)| ) / 6 is equal to
* sum( |a·(b x c)| / 6 ) we can do the (rather expensive) division only once
* and stil get the correct result. We multiply by 0.5 because we've now
* and still get the correct result. We multiply by 0.5 because we've now
* considered two decompositions of the tetrahedron, and want their average.
*
*
@ -1507,7 +1508,7 @@ static ecl_grid_type * ecl_grid_alloc_empty(ecl_grid_type * global_grid , int du
grid->index_map = NULL;
grid->fracture_index_map = NULL;
grid->inv_fracture_index_map = NULL;
grid->unit_system = ERT_ECL_METRIC_UNITS;
grid->unit_system = ECL_METRIC_UNITS;
if (global_grid != NULL) {
@ -4013,7 +4014,9 @@ int ecl_grid_get_global_index_from_xyz(ecl_grid_type * grid , double x , double
else {
/* Try boxes 2, 4, 8, ..., 64 */
for (int bx = 1; bx <= 6; bx++) {
global_index = ecl_grid_get_global_index_from_xyz_around_box( grid , x, y, z, start_index, 1<<bx , &p);
global_index = ecl_grid_get_global_index_from_xyz_around_box(grid, x, y, z,
start_index,
1<<bx, &p);
if (global_index >= 0)
return global_index;
}
@ -4021,29 +4024,16 @@ int ecl_grid_get_global_index_from_xyz(ecl_grid_type * grid , double x , double
}
/*
OK - the attempted shortcuts did not pay off. We start on the
full linear search starting from start_index.
OK - the attempted shortcuts did not pay off. Perform full linear search.
*/
{
int index = 0;
global_index = -1;
global_index = -1;
while (true) {
int current_index = ((index + start_index) % grid->size);
bool cell_contains;
cell_contains = ecl_grid_cell_contains_xyz1( grid , current_index , x , y , z);
if (cell_contains) {
global_index = current_index;
break;
}
index++;
if (index == grid->size)
break;
}
for (int index = 0; index < grid->size; index++) {
if (ecl_grid_cell_contains_xyz1( grid , index , x , y , z))
return index;
}
return global_index;
return -1;
}
@ -5588,15 +5578,33 @@ static ecl_kw_type * ecl_grid_alloc_mapaxes_kw( const float * mapaxes ) {
return ecl_kw_alloc_new( MAPAXES_KW , 6 , ECL_FLOAT_TYPE , mapaxes);
}
static ecl_kw_type * ecl_grid_alloc_mapunits_kw( ) {
static ecl_kw_type * ecl_grid_alloc_mapunits_kw( ert_ecl_unit_enum output_unit ) {
ecl_kw_type * mapunits_kw = ecl_kw_alloc( MAPUNITS_KW , 1 , ECL_CHAR_TYPE);
ecl_kw_iset_string8( mapunits_kw , 0 , "METRES" );
if (output_unit == ECL_FIELD_UNITS)
ecl_kw_iset_string8( mapunits_kw , 0 , "FEET" );
if (output_unit == ECL_METRIC_UNITS)
ecl_kw_iset_string8( mapunits_kw , 0 , "METRES" );
if (output_unit == ECL_LAB_UNITS)
ecl_kw_iset_string8( mapunits_kw , 0 , "CM" );
return mapunits_kw;
}
static ecl_kw_type * ecl_grid_alloc_gridunits_kw( ) {
static ecl_kw_type * ecl_grid_alloc_gridunits_kw( ert_ecl_unit_enum output_unit ) {
ecl_kw_type * gridunits_kw = ecl_kw_alloc( GRIDUNIT_KW , 2 , ECL_CHAR_TYPE);
ecl_kw_iset_string8( gridunits_kw , 0 , "METRES" );
if (output_unit == ECL_FIELD_UNITS)
ecl_kw_iset_string8( gridunits_kw , 0 , "FEET" );
if (output_unit == ECL_METRIC_UNITS)
ecl_kw_iset_string8( gridunits_kw , 0 , "METRES" );
if (output_unit == ECL_LAB_UNITS)
ecl_kw_iset_string8( gridunits_kw , 0 , "CM" );
ecl_kw_iset_string8( gridunits_kw , 1 , "" );
return gridunits_kw;
}
@ -5604,19 +5612,24 @@ static ecl_kw_type * ecl_grid_alloc_gridunits_kw( ) {
/*****************************************************************/
static float ecl_grid_output_scaling( const ecl_grid_type * grid , ert_ecl_unit_enum output_unit) {
if (output_unit == ERT_ECL_LAB_UNITS)
util_abort("%s: sorry - lab units not yet supported" , __func__);
if (grid->unit_system == ERT_ECL_LAB_UNITS)
util_abort("%s: sorry - lab units not yet supported");
if (grid->unit_system == output_unit)
return 1.0;
return 1.0;
else {
if (grid->unit_system == ERT_ECL_METRIC_UNITS)
return METER_TO_FEET_SCALE_FACTOR;
else
return 1.0 / METER_TO_FEET_SCALE_FACTOR;
double scale_factor = 1;
if (grid->unit_system == ECL_FIELD_UNITS)
scale_factor = 1.0 / METER_TO_FEET_SCALE_FACTOR;
if (grid->unit_system == ECL_LAB_UNITS)
scale_factor = 1.0 / METER_TO_CM_SCALE_FACTOR;
if (output_unit == ECL_FIELD_UNITS)
scale_factor *= METER_TO_FEET_SCALE_FACTOR;
if (output_unit == ECL_LAB_UNITS)
scale_factor *= METER_TO_CM_SCALE_FACTOR;
return scale_factor;
}
}
@ -5627,30 +5640,31 @@ static void ecl_grid_fwrite_mapaxes( const float * mapaxes , fortio_type * forti
ecl_kw_free( mapaxes_kw );
}
static void ecl_grid_fwrite_mapunits( fortio_type * fortio ) {
ecl_kw_type * mapunits_kw = ecl_grid_alloc_mapunits_kw( );
static void ecl_grid_fwrite_mapunits( fortio_type * fortio , ert_ecl_unit_enum output_unit) {
ecl_kw_type * mapunits_kw = ecl_grid_alloc_mapunits_kw( output_unit );
ecl_kw_fwrite( mapunits_kw , fortio );
ecl_kw_free( mapunits_kw );
}
static void ecl_grid_fwrite_gridunits( fortio_type * fortio) {
ecl_kw_type * gridunits_kw = ecl_grid_alloc_gridunits_kw( );
static void ecl_grid_fwrite_gridunits( fortio_type * fortio, ert_ecl_unit_enum output_unit) {
ecl_kw_type * gridunits_kw = ecl_grid_alloc_gridunits_kw( output_unit );
ecl_kw_fwrite( gridunits_kw , fortio );
ecl_kw_free( gridunits_kw );
}
static void ecl_grid_fwrite_main_GRID_headers( const ecl_grid_type * ecl_grid , fortio_type * fortio) {
ecl_grid_fwrite_mapunits( fortio );
static void ecl_grid_fwrite_main_GRID_headers( const ecl_grid_type * ecl_grid , fortio_type * fortio , ert_ecl_unit_enum output_unit) {
ecl_grid_fwrite_mapunits( fortio , output_unit );
if (ecl_grid->use_mapaxes)
ecl_grid_fwrite_mapaxes( ecl_grid->mapaxes , fortio );
ecl_grid_fwrite_gridunits( fortio );
ecl_grid_fwrite_gridunits( fortio , output_unit );
}
static void ecl_grid_fwrite_GRID__( const ecl_grid_type * grid , int coords_size , fortio_type * fortio) {
static void ecl_grid_fwrite_GRID__( const ecl_grid_type * grid , int coords_size , fortio_type * fortio, ert_ecl_unit_enum output_unit) {
if (grid->parent_grid != NULL) {
ecl_kw_type * lgr_kw = ecl_kw_alloc(LGR_KW , 1 , ECL_CHAR_TYPE );
ecl_kw_iset_string8( lgr_kw , 0 , grid->name );
@ -5672,7 +5686,7 @@ static void ecl_grid_fwrite_GRID__( const ecl_grid_type * grid , int coords_size
}
if (grid->parent_grid == NULL)
ecl_grid_fwrite_main_GRID_headers( grid , fortio );
ecl_grid_fwrite_main_GRID_headers( grid , fortio , output_unit);
{
ecl_kw_type * radial_kw = ecl_kw_alloc( RADIAL_KW , 1 , ECL_CHAR_TYPE);
@ -5712,7 +5726,7 @@ static void ecl_grid_fwrite_GRID__( const ecl_grid_type * grid , int coords_size
}
void ecl_grid_fwrite_GRID( const ecl_grid_type * grid , const char * filename) {
void ecl_grid_fwrite_GRID2( const ecl_grid_type * grid , const char * filename, ert_ecl_unit_enum output_unit) {
int coords_size = 5;
bool fmt_file = false;
@ -5723,18 +5737,22 @@ void ecl_grid_fwrite_GRID( const ecl_grid_type * grid , const char * filename) {
if (grid->coarsening_active)
coords_size = 7;
ecl_grid_fwrite_GRID__( grid , coords_size , fortio );
ecl_grid_fwrite_GRID__( grid , coords_size , fortio , output_unit);
{
int grid_nr;
for (grid_nr = 0; grid_nr < vector_get_size( grid->LGR_list ); grid_nr++) {
const ecl_grid_type * igrid = vector_iget_const( grid->LGR_list , grid_nr );
ecl_grid_fwrite_GRID__( igrid , coords_size , fortio );
ecl_grid_fwrite_GRID__( igrid , coords_size , fortio , output_unit );
}
}
fortio_fclose( fortio );
}
void ecl_grid_fwrite_GRID( const ecl_grid_type * grid , const char * filename) {
ecl_grid_fwrite_GRID2( grid , filename , ECL_METRIC_UNITS );
}
/*****************************************************************/
/*
@ -5756,10 +5774,11 @@ ENDGRID 0:INTE
a standard EGRID header without creating a grid instance first.
*/
static void ecl_grid_fwrite_main_EGRID_header( const float * mapaxes, int dualp_flag , fortio_type * fortio ) {
static void ecl_grid_fwrite_main_EGRID_header( const ecl_grid_type * grid , fortio_type * fortio , ert_ecl_unit_enum output_unit) {
int EGRID_VERSION = 3;
int RELEASE_YEAR = 2007;
int COMPAT_VERSION = 0;
const float * mapaxes = ecl_grid_get_mapaxes( grid );
{
ecl_kw_type * filehead_kw = ecl_kw_alloc( FILEHEAD_KW , 100 , ECL_INT_TYPE );
@ -5769,18 +5788,18 @@ static void ecl_grid_fwrite_main_EGRID_header( const float * mapaxes, int dualp_
ecl_kw_iset_int( filehead_kw , FILEHEAD_YEAR_INDEX , RELEASE_YEAR );
ecl_kw_iset_int( filehead_kw , FILEHEAD_COMPAT_INDEX , COMPAT_VERSION );
ecl_kw_iset_int( filehead_kw , FILEHEAD_TYPE_INDEX , FILEHEAD_GRIDTYPE_CORNERPOINT );
ecl_kw_iset_int( filehead_kw , FILEHEAD_DUALP_INDEX , dualp_flag );
ecl_kw_iset_int( filehead_kw , FILEHEAD_DUALP_INDEX , grid->dualp_flag );
ecl_kw_iset_int( filehead_kw , FILEHEAD_ORGFORMAT_INDEX , FILEHEAD_ORGTYPE_CORNERPOINT );
ecl_kw_fwrite( filehead_kw , fortio );
ecl_kw_free( filehead_kw );
}
ecl_grid_fwrite_mapunits( fortio );
ecl_grid_fwrite_mapunits( fortio , output_unit );
if (mapaxes != NULL)
ecl_grid_fwrite_mapaxes( mapaxes , fortio );
ecl_grid_fwrite_gridunits( fortio );
ecl_grid_fwrite_gridunits( fortio , output_unit);
}
@ -5793,16 +5812,6 @@ static void ecl_grid_fwrite_gridhead_kw( int nx, int ny , int nz, int grid_nr, f
void ecl_grid_fwrite_EGRID_header__( int dims[3] , const float mapaxes[6], int dualp_flag , fortio_type * fortio) {
ecl_grid_fwrite_main_EGRID_header( mapaxes , dualp_flag , fortio );
ecl_grid_fwrite_gridhead_kw( dims[0] , dims[1] , dims[2] , 0 , fortio);
}
void ecl_grid_fwrite_EGRID_header( int dims[3] , const float mapaxes[6], fortio_type * fortio) {
ecl_grid_fwrite_EGRID_header__(dims , mapaxes , FILEHEAD_SINGLE_POROSITY , fortio );
}
/*****************************************************************/
@ -6264,7 +6273,7 @@ static void ecl_grid_fwrite_EGRID__( ecl_grid_type * grid , fortio_type * fortio
/* Writing header */
if (!is_lgr) {
ecl_grid_fwrite_main_EGRID_header( ecl_grid_get_mapaxes( grid ) , grid->dualp_flag , fortio );
ecl_grid_fwrite_main_EGRID_header( grid , fortio , output_unit );
} else {
{
ecl_kw_type * lgr_kw = ecl_kw_alloc(LGR_KW , 1 , ECL_CHAR_TYPE );
@ -6360,10 +6369,10 @@ void ecl_grid_fwrite_EGRID2( ecl_grid_type * grid , const char * filename, ert_e
*/
void ecl_grid_fwrite_EGRID( ecl_grid_type * grid , const char * filename, bool output_metric) {
ert_ecl_unit_enum output_unit = ERT_ECL_METRIC_UNITS;
ert_ecl_unit_enum output_unit = ECL_METRIC_UNITS;
if (!output_metric)
output_unit = ERT_ECL_FIELD_UNITS;
output_unit = ECL_FIELD_UNITS;
ecl_grid_fwrite_EGRID2( grid , filename , output_unit );
}
@ -6422,9 +6431,9 @@ void ecl_grid_fwrite_dims( const ecl_grid_type * grid , fortio_type * init_file,
possible LGRs which are attached.
*/
void ecl_grid_fprintf_grdecl(ecl_grid_type * grid , FILE * stream ) {
void ecl_grid_fprintf_grdecl2(ecl_grid_type * grid , FILE * stream , ert_ecl_unit_enum output_unit) {
{
ecl_kw_type * mapunits_kw = ecl_grid_alloc_mapunits_kw( grid );
ecl_kw_type * mapunits_kw = ecl_grid_alloc_mapunits_kw( output_unit );
ecl_kw_fprintf_grdecl( mapunits_kw , stream );
ecl_kw_free( mapunits_kw );
fprintf(stream , "\n");
@ -6437,7 +6446,7 @@ void ecl_grid_fprintf_grdecl(ecl_grid_type * grid , FILE * stream ) {
}
{
ecl_kw_type * gridunits_kw = ecl_grid_alloc_gridunits_kw( grid );
ecl_kw_type * gridunits_kw = ecl_grid_alloc_gridunits_kw( output_unit );
ecl_kw_fprintf_grdecl( gridunits_kw , stream );
ecl_kw_free( gridunits_kw );
fprintf(stream , "\n");
@ -6476,6 +6485,11 @@ void ecl_grid_fprintf_grdecl(ecl_grid_type * grid , FILE * stream ) {
}
void ecl_grid_fprintf_grdecl(ecl_grid_type * grid , FILE * stream ) {
ecl_grid_fprintf_grdecl2( grid , stream , ECL_METRIC_UNITS);
}
/*****************************************************************/
/**
@ -6568,3 +6582,4 @@ ecl_kw_type * ecl_grid_alloc_volume_kw( const ecl_grid_type * grid , bool active
else
return ecl_grid_alloc_volume_kw_global( grid );
}
//

View File

@ -1,20 +1,20 @@
/*
Copyright (C) 2011 Statoil ASA, Norway.
Copyright (C) 2011 Statoil ASA, Norway.
The file 'ecl_grid_cache.c' is part of ERT - Ensemble based
Reservoir Tool.
ERT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ERT is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
for more details.
ERT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ERT is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
for more details.
*/
#include <stdlib.h>
@ -36,7 +36,7 @@
The ecl_grid_cache_struct data structure internalizes the world
position of all the active cells. This is just a minor
simplification to speed up repeated calls to get the true world
coordinates of a cell.
coordinates of a cell.
*/
struct ecl_grid_cache_struct {
@ -44,7 +44,9 @@ struct ecl_grid_cache_struct {
double * xpos;
double * ypos;
double * zpos;
double * volume; /* Will be initialized on demand. */
int * global_index; /* Maps from active index (i.e. natural index in this context) - to the corresponding global index. */
const ecl_grid_type * grid;
};
@ -53,7 +55,9 @@ struct ecl_grid_cache_struct {
ecl_grid_cache_type * ecl_grid_cache_alloc( const ecl_grid_type * grid ) {
ecl_grid_cache_type * grid_cache = util_malloc( sizeof * grid_cache );
grid_cache->grid = grid;
grid_cache->volume = NULL;
grid_cache->size = ecl_grid_get_active_size( grid );
grid_cache->xpos = util_calloc( grid_cache->size , sizeof * grid_cache->xpos );
grid_cache->ypos = util_calloc( grid_cache->size , sizeof * grid_cache->ypos );
@ -61,20 +65,20 @@ ecl_grid_cache_type * ecl_grid_cache_alloc( const ecl_grid_type * grid ) {
grid_cache->global_index = util_calloc( grid_cache->size , sizeof * grid_cache->global_index );
{
int active_index;
/* Go trough all the active cells and extract the cell center
position and store it in xpos/ypos/zpos. */
for (active_index = 0; active_index < grid_cache->size; active_index++) {
int global_index = ecl_grid_get_global_index1A( grid , active_index );
grid_cache->global_index[ active_index ] = global_index;
ecl_grid_get_xyz1( grid , global_index ,
&grid_cache->xpos[ active_index ] ,
&grid_cache->ypos[ active_index ] ,
ecl_grid_get_xyz1( grid , global_index ,
&grid_cache->xpos[ active_index ] ,
&grid_cache->ypos[ active_index ] ,
&grid_cache->zpos[ active_index ]);
}
}
return grid_cache;
}
@ -104,10 +108,25 @@ const double * ecl_grid_cache_get_zpos( const ecl_grid_cache_type * grid_cache )
return grid_cache->zpos;
}
const double * ecl_grid_cache_get_volume( const ecl_grid_cache_type * grid_cache ) {
if (!grid_cache->volume) {
// C++ style const cast.
ecl_grid_cache_type * gc = (ecl_grid_cache_type *) grid_cache;
gc->volume = util_calloc( gc->size , sizeof * gc->volume );
for (int active_index = 0; active_index < grid_cache->size; active_index++)
gc->volume[active_index] = ecl_grid_get_cell_volume1A( gc->grid , active_index );
}
return grid_cache->volume;
}
void ecl_grid_cache_free( ecl_grid_cache_type * grid_cache ) {
free( grid_cache->xpos );
free( grid_cache->ypos );
free( grid_cache->zpos );
free( grid_cache->global_index );
free( grid_cache->volume );
free( grid_cache );
}

View File

@ -37,12 +37,13 @@
#include <ert/ecl/ecl_kw_magic.h>
#include <ert/ecl/ecl_kw.h>
#include <ert/ecl/ecl_grid.h>
#include <ert/ecl/ecl_util.h>
static ecl_kw_type * ecl_init_file_alloc_INTEHEAD( const ecl_grid_type * ecl_grid , int phases, time_t start_date , int simulator) {
static ecl_kw_type * ecl_init_file_alloc_INTEHEAD( const ecl_grid_type * ecl_grid , ert_ecl_unit_enum unit_system, int phases, time_t start_date , int simulator) {
ecl_kw_type * intehead_kw = ecl_kw_alloc( INTEHEAD_KW , INTEHEAD_INIT_SIZE , ECL_INT_TYPE );
ecl_kw_scalar_set_int( intehead_kw , 0 );
ecl_kw_iset_int( intehead_kw , INTEHEAD_UNIT_INDEX , INTEHEAD_METRIC_VALUE );
ecl_kw_iset_int( intehead_kw , INTEHEAD_UNIT_INDEX , unit_system );
ecl_kw_iset_int( intehead_kw , INTEHEAD_NX_INDEX , ecl_grid_get_nx( ecl_grid ));
ecl_kw_iset_int( intehead_kw , INTEHEAD_NY_INDEX , ecl_grid_get_ny( ecl_grid ));
ecl_kw_iset_int( intehead_kw , INTEHEAD_NZ_INDEX , ecl_grid_get_nz( ecl_grid ));
@ -171,10 +172,10 @@ static void ecl_init_file_fwrite_poro( fortio_type * fortio , const ecl_grid_typ
that.
*/
void ecl_init_file_fwrite_header( fortio_type * fortio , const ecl_grid_type * ecl_grid , const ecl_kw_type * poro , int phases , time_t start_date) {
void ecl_init_file_fwrite_header( fortio_type * fortio , const ecl_grid_type * ecl_grid , const ecl_kw_type * poro , ert_ecl_unit_enum unit_system, int phases , time_t start_date) {
int simulator = INTEHEAD_ECLIPSE100_VALUE;
{
ecl_kw_type * intehead_kw = ecl_init_file_alloc_INTEHEAD( ecl_grid , phases , start_date , simulator );
ecl_kw_type * intehead_kw = ecl_init_file_alloc_INTEHEAD( ecl_grid , unit_system , phases , start_date , simulator );
ecl_kw_fwrite( intehead_kw , fortio );
ecl_kw_free( intehead_kw );
}

View File

@ -62,6 +62,7 @@ UTIL_IS_INSTANCE_FUNCTION(ecl_kw , ECL_KW_TYPE_ID )
#define BLOCKSIZE_NUMERIC 1000
#define BLOCKSIZE_CHAR 105
#define BLOCKSIZE_C010 105
@ -113,6 +114,7 @@ UTIL_IS_INSTANCE_FUNCTION(ecl_kw , ECL_KW_TYPE_ID )
*/
#define READ_FMT_CHAR "%8c"
#define READ_FMT_C010 "%10c"
#define READ_FMT_FLOAT "%gE"
#define READ_FMT_INT "%d"
#define READ_FMT_MESS "%8c"
@ -121,6 +123,7 @@ UTIL_IS_INSTANCE_FUNCTION(ecl_kw , ECL_KW_TYPE_ID )
#define WRITE_FMT_CHAR " '%-8s'"
#define WRITE_FMT_C010 " '%-10s'"
#define WRITE_FMT_INT " %11d"
#define WRITE_FMT_FLOAT " %11.8fE%+03d"
#define WRITE_FMT_DOUBLE " %17.14fD%+03d"
@ -162,6 +165,9 @@ static const char * get_read_fmt( ecl_type_enum ecl_type ) {
case(ECL_CHAR_TYPE):
return READ_FMT_CHAR;
break;
case(ECL_C010_TYPE):
return READ_FMT_C010;
break;
case(ECL_INT_TYPE):
return READ_FMT_INT;
break;
@ -189,6 +195,9 @@ const char * ecl_kw_get_write_fmt( ecl_type_enum ecl_type ) {
case(ECL_CHAR_TYPE):
return WRITE_FMT_CHAR;
break;
case(ECL_C010_TYPE):
return WRITE_FMT_C010;
break;
case(ECL_INT_TYPE):
return WRITE_FMT_INT;
break;
@ -213,9 +222,13 @@ const char * ecl_kw_get_write_fmt( ecl_type_enum ecl_type ) {
static int get_blocksize( ecl_type_enum ecl_type ) {
if (ecl_type == ECL_CHAR_TYPE)
return BLOCKSIZE_CHAR;
else if (ecl_type == ECL_MESS_TYPE)
if (ecl_type == ECL_MESS_TYPE)
return BLOCKSIZE_CHAR;
else
if (ecl_type == ECL_C010_TYPE)
return BLOCKSIZE_C010;
return BLOCKSIZE_NUMERIC;
}
@ -321,7 +334,7 @@ static bool ecl_kw_string_eq(const char *s1 , const char *s2) {
const int len2 = strlen(short_kw);
int index;
bool eq = true;
if (len1 > ECL_STRING_LENGTH)
if (len1 > ECL_STRING8_LENGTH)
util_abort("%s : eclipse keyword:%s is too long - aborting \n",__func__ , long_kw);
for (index = 0; index < len2; index++)
@ -338,7 +351,7 @@ static bool ecl_kw_string_eq(const char *s1 , const char *s2) {
bool ecl_kw_ichar_eq(const ecl_kw_type *ecl_kw , int i , const char *value) {
char s1[ECL_STRING_LENGTH + 1];
char s1[ECL_STRING8_LENGTH + 1];
ecl_kw_iget(ecl_kw , i , s1);
return ecl_kw_string_eq(s1 , value);
}
@ -413,27 +426,6 @@ bool ecl_kw_equal(const ecl_kw_type *ecl_kw1, const ecl_kw_type *ecl_kw2) {
}
#define CMP(ctype,ABS) \
static bool CMP_ ## ctype( ctype v1, ctype v2 , ctype abs_epsilon , ctype rel_epsilon) { \
if ((ABS(v1) + ABS(v2)) == 0) \
return true; \
{ \
ctype diff = ABS(v1 - v2); \
if ((abs_epsilon > 0) && (diff > abs_epsilon)) \
return false; \
{ \
ctype sum = ABS(v1) + ABS(v2); \
ctype rel_diff = diff / sum; \
if ((rel_epsilon > 0) && (rel_diff > rel_epsilon)) \
return false; \
} \
return true; \
} \
}
CMP(float,fabsf)
CMP(double,fabs)
#undef CMP
#define ECL_KW_NUMERIC_CMP(ctype) \
static bool ecl_kw_numeric_equal_ ## ctype( const ecl_kw_type * ecl_kw1 , const ecl_kw_type * ecl_kw2 , ctype abs_diff , ctype rel_diff) { \
@ -443,7 +435,7 @@ CMP(double,fabs)
const ctype * data1 = (const ctype *) ecl_kw1->data; \
const ctype * data2 = (const ctype *) ecl_kw2->data; \
for (index = 0; index < ecl_kw1->size; index++) { \
equal = CMP_ ## ctype( data1[index] , data2[index] , abs_diff , rel_diff); \
equal = util_ ## ctype ## _approx_equal__( data1[index], data2[index] , rel_diff , abs_diff); \
if (!equal) \
break; \
} \
@ -509,12 +501,8 @@ static void ecl_kw_set_shared_ref(ecl_kw_type * ecl_kw , void *data_ptr) {
static void ecl_kw_initialize(ecl_kw_type * ecl_kw , const char *header , int size , ecl_type_enum ecl_type) {
ecl_kw->ecl_type = ecl_type;
ecl_kw->sizeof_ctype = ecl_util_get_sizeof_ctype(ecl_kw->ecl_type);
// NOTE ! Do not abort when reading a keyword with more than eight characters
// The keyword is populated with the eight first characters, and import works as expected
//if (strlen(header) > ECL_STRING_LENGTH)
//util_abort("%s: Fatal error: ecl_header_name:%s is longer than eight characters - aborting \n",__func__,header);
if (strlen(header) > ECL_STRING8_LENGTH)
util_abort("%s: Fatal error: ecl_header_name:%s is longer than eight characters - aborting \n",__func__,header);
ecl_kw_set_header_name(ecl_kw , header);
ecl_kw->size = size;
@ -560,11 +548,17 @@ ecl_kw_type * ecl_kw_alloc_new(const char * header , int size, ecl_type_enum ec
ecl_kw_type * ecl_kw_alloc( const char * header , int size , ecl_type_enum ecl_type ) {
ecl_kw_type *ecl_kw;
ecl_kw = ecl_kw_alloc_empty();
ecl_kw_initialize(ecl_kw , header , size , ecl_type);
ecl_kw_alloc_data(ecl_kw);
return ecl_kw;
if (ecl_type == ECL_C010_TYPE)
return NULL;
{
ecl_kw_type *ecl_kw;
ecl_kw = ecl_kw_alloc_empty();
ecl_kw_initialize(ecl_kw , header , size , ecl_type);
ecl_kw_alloc_data(ecl_kw);
return ecl_kw;
}
}
@ -695,7 +689,22 @@ ecl_kw_type * ecl_kw_alloc_slice_copy( const ecl_kw_type * src, int index1, int
void ecl_kw_resize( ecl_kw_type * ecl_kw, int new_size) {
if (ecl_kw->shared_data)
util_abort("%s: trying to allocate data for ecl_kw object which has been declared with shared storage - aborting \n",__func__);
if (new_size != ecl_kw->size) {
size_t old_byte_size = ecl_kw->size * ecl_kw->sizeof_ctype;
size_t new_byte_size = new_size * ecl_kw->sizeof_ctype;
ecl_kw->data = util_realloc(ecl_kw->data , new_byte_size );
if (new_byte_size > old_byte_size) {
size_t offset = old_byte_size;
memset(&ecl_kw->data[offset] , 0 , new_byte_size - old_byte_size);
}
ecl_kw->size = new_size;
}
}
/**
Will allocate a copy of the src_kw. Will copy @count elements
@ -828,10 +837,10 @@ const char * ecl_kw_iget_char_ptr( const ecl_kw_type * ecl_kw , int i) {
*/
void ecl_kw_iset_string8(ecl_kw_type * ecl_kw , int index , const char *s8) {
char * ecl_string = (char *) ecl_kw_iget_ptr( ecl_kw , index );
if (strlen( s8 ) >= ECL_STRING_LENGTH) {
if (strlen( s8 ) >= ECL_STRING8_LENGTH) {
/* The whole string goes in - possibly loosing content at the end. */
int i;
for (i=0; i < ECL_STRING_LENGTH; i++)
for (i=0; i < ECL_STRING8_LENGTH; i++)
ecl_string[i] = s8[i];
} else {
/* The string is padded with trailing spaces. */
@ -841,12 +850,12 @@ void ecl_kw_iset_string8(ecl_kw_type * ecl_kw , int index , const char *s8) {
for (i=0; i < string_length; i++)
ecl_string[i] = s8[i];
for (i=string_length; i < ECL_STRING_LENGTH; i++)
for (i=string_length; i < ECL_STRING8_LENGTH; i++)
ecl_string[i] = ' ';
}
ecl_string[ ECL_STRING_LENGTH ] = '\0';
ecl_string[ ECL_STRING8_LENGTH ] = '\0';
}
/**
@ -861,13 +870,13 @@ void ecl_kw_iset_string8(ecl_kw_type * ecl_kw , int index , const char *s8) {
elements is not what you want?
*/
void ecl_kw_iset_char_ptr( ecl_kw_type * ecl_kw , int index, const char * s) {
int strings = strlen( s ) / ECL_STRING_LENGTH;
if ((strlen( s ) % ECL_STRING_LENGTH) != 0)
int strings = strlen( s ) / ECL_STRING8_LENGTH;
if ((strlen( s ) % ECL_STRING8_LENGTH) != 0)
strings++;
{
int sub_index;
for (sub_index = 0; sub_index < strings; sub_index++)
ecl_kw_iset_string8( ecl_kw , index + sub_index , &s[ sub_index * ECL_STRING_LENGTH ]);
ecl_kw_iset_string8( ecl_kw , index + sub_index , &s[ sub_index * ECL_STRING8_LENGTH ]);
}
}
@ -1165,8 +1174,8 @@ bool ecl_kw_fread_data(ecl_kw_type *ecl_kw, fortio_type *fortio) {
if (record_size >= 0) {
int ir;
for (ir = 0; ir < read_elm; ir++) {
util_fread( &ecl_kw->data[(ib * blocksize + ir) * ecl_kw->sizeof_ctype] , 1 , ECL_STRING_LENGTH , stream , __func__);
ecl_kw->data[(ib * blocksize + ir) * ecl_kw->sizeof_ctype + ECL_STRING_LENGTH] = null_char;
util_fread( &ecl_kw->data[(ib * blocksize + ir) * ecl_kw->sizeof_ctype] , 1 , ECL_STRING8_LENGTH , stream , __func__);
ecl_kw->data[(ib * blocksize + ir) * ecl_kw->sizeof_ctype + ECL_STRING8_LENGTH] = null_char;
}
read_ok = fortio_complete_read(fortio , record_size);
} else
@ -1203,7 +1212,7 @@ void ecl_kw_fread_indexed_data(fortio_type * fortio, offset_type data_offset, ec
int element_size = ecl_util_get_sizeof_ctype(ecl_type);
if(ecl_type == ECL_CHAR_TYPE || ecl_type == ECL_MESS_TYPE) {
element_size = ECL_STRING_LENGTH;
element_size = ECL_STRING8_LENGTH;
}
@ -1250,9 +1259,11 @@ bool ecl_kw_fskip_data__( ecl_type_enum ecl_type , const int element_count , for
const int block_count = element_count / blocksize + (element_count % blocksize == 0 ? 0 : 1);
int element_size = ecl_util_get_sizeof_ctype(ecl_type );
if(ecl_type == ECL_CHAR_TYPE || ecl_type == ECL_MESS_TYPE) {
element_size = ECL_STRING_LENGTH;
}
if(ecl_type == ECL_CHAR_TYPE)
element_size = ECL_STRING8_LENGTH;
if(ecl_type == ECL_C010_TYPE)
element_size = ECL_STRING10_LENGTH;
skip_ok = fortio_data_fskip(fortio, element_size, element_count, block_count);
}
@ -1286,11 +1297,11 @@ void ecl_kw_fskip_header( fortio_type * fortio) {
}
bool ecl_kw_fread_header(ecl_kw_type *ecl_kw , fortio_type * fortio) {
ecl_read_status_enum ecl_kw_fread_header(ecl_kw_type *ecl_kw , fortio_type * fortio) {
const char null_char = '\0';
FILE *stream = fortio_get_FILE( fortio );
bool fmt_file = fortio_fmt_file( fortio );
char header[ECL_STRING_LENGTH + 1];
char header[ECL_STRING8_LENGTH + 1];
char ecl_type_str[ECL_TYPE_LENGTH + 1];
int record_size;
int size;
@ -1307,7 +1318,7 @@ bool ecl_kw_fread_header(ecl_kw_type *ecl_kw , fortio_type * fortio) {
util_abort("%s: reading failed - at end of file?\n",__func__);
}
} else {
header[ECL_STRING_LENGTH] = null_char;
header[ECL_STRING8_LENGTH] = null_char;
ecl_type_str[ECL_TYPE_LENGTH] = null_char;
record_size = fortio_init_read(fortio);
if (record_size > 0) {
@ -1315,9 +1326,9 @@ bool ecl_kw_fread_header(ecl_kw_type *ecl_kw , fortio_type * fortio) {
size_t read_bytes = fread(buffer , 1 , ECL_KW_HEADER_DATA_SIZE , stream);
if (read_bytes == ECL_KW_HEADER_DATA_SIZE) {
memcpy( header , &buffer[0] , ECL_STRING_LENGTH);
size = *( (int *) &buffer[ECL_STRING_LENGTH] );
memcpy( ecl_type_str , &buffer[ECL_STRING_LENGTH + sizeof(size)] , ECL_TYPE_LENGTH);
memcpy( header , &buffer[0] , ECL_STRING8_LENGTH);
size = *( (int *) &buffer[ECL_STRING8_LENGTH] );
memcpy( ecl_type_str , &buffer[ECL_STRING8_LENGTH + sizeof(size)] , ECL_TYPE_LENGTH);
OK = fortio_complete_read(fortio , record_size);
} else
@ -1328,10 +1339,17 @@ bool ecl_kw_fread_header(ecl_kw_type *ecl_kw , fortio_type * fortio) {
} else
OK = false;
}
if (OK)
ecl_kw_set_header(ecl_kw , header , size , ecl_type_str);
return OK;
if (OK) {
ecl_type_enum ecl_type = ecl_util_get_type_from_name( ecl_type_str );
ecl_kw_initialize( ecl_kw , header , size , ecl_type);
if (ecl_type == ECL_C010_TYPE)
return ECL_KW_READ_SKIP;
return ECL_KW_READ_OK;
} else
return ECL_KW_READ_FAIL;
}
@ -1359,8 +1377,7 @@ bool ecl_kw_fseek_kw(const char * kw , bool rewind , bool abort_on_error , forti
kw_found = false;
while (cont) {
long current_pos = fortio_ftell( fortio );
bool header_OK = ecl_kw_fread_header(tmp_kw , fortio);
if (header_OK) {
if (ecl_kw_fread_header(tmp_kw , fortio) == ECL_KW_READ_OK) {
if (ecl_kw_string_eq(ecl_kw_get_header8(tmp_kw) , kw)) {
fortio_fseek( fortio , current_pos , SEEK_SET );
kw_found = true;
@ -1457,7 +1474,7 @@ void ecl_kw_free_data(ecl_kw_type *ecl_kw) {
void ecl_kw_set_header_name(ecl_kw_type * ecl_kw , const char * header) {
ecl_kw->header8 = realloc(ecl_kw->header8 , ECL_STRING_LENGTH + 1);
ecl_kw->header8 = realloc(ecl_kw->header8 , ECL_STRING8_LENGTH + 1);
sprintf(ecl_kw->header8 , "%-8s" , header);
/* Internalizing a header without the trailing spaces as well. */
@ -1467,21 +1484,9 @@ void ecl_kw_set_header_name(ecl_kw_type * ecl_kw , const char * header) {
void ecl_kw_set_header(ecl_kw_type *ecl_kw , const char *header , int size , const char *type_name) {
ecl_type_enum ecl_type = ecl_util_get_type_from_name( type_name );
ecl_kw_initialize( ecl_kw , header , size , ecl_type);
}
void ecl_kw_set_header_alloc(ecl_kw_type *ecl_kw , const char *header , int size , const char *type_name ) {
ecl_kw_set_header(ecl_kw , header , size , type_name );
ecl_kw_alloc_data(ecl_kw);
}
bool ecl_kw_fread_realloc(ecl_kw_type *ecl_kw , fortio_type *fortio) {
bool OK = ecl_kw_fread_header(ecl_kw , fortio);
if (OK)
if (ecl_kw_fread_header(ecl_kw , fortio) == ECL_KW_READ_OK)
return ecl_kw_fread_realloc_data( ecl_kw , fortio );
else
return false;
@ -1490,7 +1495,7 @@ bool ecl_kw_fread_realloc(ecl_kw_type *ecl_kw , fortio_type *fortio) {
void ecl_kw_fread(ecl_kw_type * ecl_kw , fortio_type * fortio) {
int current_size = ecl_kw->size;
if (!ecl_kw_fread_header(ecl_kw , fortio))
if (ecl_kw_fread_header(ecl_kw , fortio) != ECL_KW_READ_OK)
util_abort("%s: failed to read header for ecl_kw - aborting \n",__func__);
if (ecl_kw->size == current_size)
@ -1541,11 +1546,11 @@ static void ecl_kw_fwrite_data_unformatted( ecl_kw_type * ecl_kw , fortio_type *
skipped.
*/
FILE *stream = fortio_get_FILE(fortio);
int record_size = this_blocksize * ECL_STRING_LENGTH; /* The total size in bytes of the record written by the fortio layer. */
int record_size = this_blocksize * ECL_STRING8_LENGTH; /* The total size in bytes of the record written by the fortio layer. */
int i;
fortio_init_write(fortio , record_size );
for (i = 0; i < this_blocksize; i++)
fwrite(&ecl_kw->data[(block_nr * blocksize + i) * ecl_kw->sizeof_ctype] , 1 , ECL_STRING_LENGTH , stream);
fwrite(&ecl_kw->data[(block_nr * blocksize + i) * ecl_kw->sizeof_ctype] , 1 , ECL_STRING8_LENGTH , stream);
fortio_complete_write(fortio , record_size);
} else {
int record_size = this_blocksize * ecl_kw->sizeof_ctype; /* The total size in bytes of the record written by the fortio layer. */
@ -1621,6 +1626,9 @@ static void ecl_kw_fwrite_data_formatted( ecl_kw_type * ecl_kw , fortio_type * f
case(ECL_CHAR_TYPE):
fprintf(stream , write_fmt , data_ptr);
break;
case(ECL_C010_TYPE):
fprintf(stream , write_fmt , data_ptr);
break;
case(ECL_INT_TYPE):
{
int int_value = ((int *) data_ptr)[0];
@ -1684,7 +1692,7 @@ void ecl_kw_fwrite_header(const ecl_kw_type *ecl_kw , fortio_type *fortio) {
fortio_init_write(fortio , ECL_KW_HEADER_DATA_SIZE );
fwrite(ecl_kw->header8 , sizeof(char) , ECL_STRING_LENGTH , stream);
fwrite(ecl_kw->header8 , sizeof(char) , ECL_STRING8_LENGTH , stream);
fwrite(&size , sizeof(int) , 1 , stream);
fwrite(ecl_util_get_type_name( ecl_kw->ecl_type ) , sizeof(char) , ECL_TYPE_LENGTH , stream);
@ -2512,10 +2520,10 @@ bool ecl_kw_is_kw_file(fortio_type * fortio) {
ecl_kw_type * ecl_kw = ecl_kw_alloc_empty();
if (fortio_fmt_file( fortio ))
kw_file = ecl_kw_fread_header(ecl_kw , fortio);
kw_file = (ecl_kw_fread_header(ecl_kw , fortio) != ECL_KW_READ_FAIL);
else {
if (fortio_is_fortio_file(fortio))
kw_file = ecl_kw_fread_header(ecl_kw , fortio);
kw_file = (ecl_kw_fread_header(ecl_kw , fortio) != ECL_KW_READ_FAIL);
else
kw_file = false;
}
@ -2528,41 +2536,6 @@ bool ecl_kw_is_kw_file(fortio_type * fortio) {
}
bool ecl_kw_is_grdecl_file(FILE * stream) {
const long int init_pos = util_ftell(stream);
bool grdecl_file;
bool at_eof = false;
util_fskip_chars(stream , " \r\n\t" , &at_eof); /* Skipping intial space */
util_fskip_cchars(stream , " \r\n\t" , &at_eof); /* Skipping PORO/PERMX/... */
if (at_eof)
grdecl_file = false;
else {
grdecl_file = true;
{
int c;
do {
c = fgetc(stream);
if (c == '\r' || c == '\n')
break;
else {
if (c != ' ') {
grdecl_file = false;
break;
}
}
} while (c == ' ');
}
}
util_fseek(stream , init_pos , SEEK_SET);
return grdecl_file;
}
#define KW_MAX_MIN(type) \
{ \
type * data = ecl_kw_get_data_ref(ecl_kw); \
@ -2749,7 +2722,7 @@ void ecl_kw_fprintf_data( const ecl_kw_type * ecl_kw , const char * fmt , FILE *
static bool ecl_kw_elm_equal_numeric__( const ecl_kw_type * ecl_kw1 , const ecl_kw_type * ecl_kw2 , int offset, double abs_epsilon, double rel_epsilon) {
double v1 = ecl_kw_iget_as_double( ecl_kw1 , offset );
double v2 = ecl_kw_iget_as_double( ecl_kw2 , offset );
return CMP_double(v1 , v2 , abs_epsilon , rel_epsilon );
return util_double_approx_equal__( v1, v2 , rel_epsilon , abs_epsilon );
}

View File

@ -480,7 +480,7 @@ bool ecl_rft_node_is_RFT( const ecl_rft_node_type * rft_node ) {
}
static void ecl_rft_node_fill_welletc(ecl_kw_type * welletc, ert_ecl_unit_enum unit_set){
if(unit_set==ERT_ECL_METRIC_UNITS) {
if(unit_set==ECL_METRIC_UNITS) {
ecl_kw_iset_string8(welletc, 0, " DAYS");
ecl_kw_iset_string8(welletc, 2, "");
ecl_kw_iset_string8(welletc, 3, " METRES");
@ -495,7 +495,7 @@ static void ecl_rft_node_fill_welletc(ecl_kw_type * welletc, ert_ecl_unit_enum u
ecl_kw_iset_string8(welletc, 13, " KG/SM3");
ecl_kw_iset_string8(welletc, 14, " KG/DAY");
ecl_kw_iset_string8(welletc, 15, " KG/KG");
}else if(unit_set==ERT_ECL_FIELD_UNITS){
}else if(unit_set==ECL_FIELD_UNITS){
ecl_kw_iset_string8(welletc, 0, " DAYS");
ecl_kw_iset_string8(welletc, 2, "");
ecl_kw_iset_string8(welletc, 3, " FEET");
@ -511,7 +511,7 @@ static void ecl_rft_node_fill_welletc(ecl_kw_type * welletc, ert_ecl_unit_enum u
ecl_kw_iset_string8(welletc, 14, " LB/DAY");
ecl_kw_iset_string8(welletc, 15, " LB/LB");
}else if(unit_set==ERT_ECL_LAB_UNITS){
}else if(unit_set==ECL_LAB_UNITS){
ecl_kw_iset_string8(welletc, 0, " HR");
ecl_kw_iset_string8(welletc, 2, "");
ecl_kw_iset_string8(welletc, 3, " CM");

View File

@ -112,7 +112,7 @@ ecl_rst_file_type * ecl_rst_file_open_write_seek( const char * filename , int re
break;
}
if (!ecl_kw_fread_header( work_kw , rst_file->fortio))
if (ecl_kw_fread_header( work_kw , rst_file->fortio) == ECL_KW_READ_FAIL)
break;
if (ecl_kw_name_equal( work_kw , SEQNUM_KW)) {
@ -183,7 +183,7 @@ static ecl_kw_type * ecl_rst_file_alloc_INTEHEAD( ecl_rst_file_type * rst_file,
ecl_kw_type * intehead_kw = ecl_kw_alloc( INTEHEAD_KW , INTEHEAD_RESTART_SIZE , ECL_INT_TYPE );
ecl_kw_scalar_set_int( intehead_kw , 0 );
ecl_kw_iset_int( intehead_kw , INTEHEAD_UNIT_INDEX , INTEHEAD_METRIC_VALUE );
ecl_kw_iset_int( intehead_kw , INTEHEAD_UNIT_INDEX , rsthead->unit_system );
ecl_kw_iset_int( intehead_kw , INTEHEAD_NX_INDEX , rsthead->nx);
ecl_kw_iset_int( intehead_kw , INTEHEAD_NY_INDEX , rsthead->ny);
ecl_kw_iset_int( intehead_kw , INTEHEAD_NZ_INDEX , rsthead->nz);

View File

@ -73,9 +73,10 @@ ecl_rsthead_type * ecl_rsthead_alloc_from_kw( int report_step , const ecl_kw_typ
rsthead->nwells = data[INTEHEAD_NWELLS_INDEX];
rsthead->niwelz = data[INTEHEAD_NIWELZ_INDEX];
rsthead->nxwelz = data[INTEHEAD_NXWELZ_INDEX];
rsthead->nzwelz = data[INTEHEAD_NZWELZ_INDEX];
rsthead->nsconz = data[INTEHEAD_NSCONZ_INDEX];
rsthead->nxconz = data[INTEHEAD_NXCONZ_INDEX];
rsthead->niconz = data[INTEHEAD_NICONZ_INDEX];
rsthead->ncwmax = data[INTEHEAD_NCWMAX_INDEX];
@ -87,8 +88,7 @@ ecl_rsthead_type * ecl_rsthead_alloc_from_kw( int report_step , const ecl_kw_typ
// The only derived quantity
rsthead->sim_time = rsthead_date( rsthead->day , rsthead->month , rsthead->year );
}
if (doubhead_kw)
rsthead->sim_days = ecl_kw_iget_double( doubhead_kw , DOUBHEAD_DAYS_INDEX );
rsthead->sim_days = ecl_kw_iget_double( doubhead_kw , DOUBHEAD_DAYS_INDEX );
if (logihead_kw)
rsthead->dualp = ecl_kw_iget_bool( logihead_kw , LOGIHEAD_DUALP_INDEX);
@ -109,16 +109,12 @@ ecl_rsthead_type * ecl_rsthead_alloc_from_kw( int report_step , const ecl_kw_typ
ecl_rsthead_type * ecl_rsthead_alloc( const ecl_file_view_type * rst_view, int report_step) {
const ecl_kw_type * intehead_kw = ecl_file_view_iget_named_kw( rst_view , INTEHEAD_KW , 0);
// const ecl_kw_type * doubhead_kw = ecl_file_view_iget_named_kw(rst_view, DOUBHEAD_KW, 0);
const ecl_kw_type * doubhead_kw = NULL;//ecl_file_view_iget_named_kw( rst_view , DOUBHEAD_KW , 0);
const ecl_kw_type * doubhead_kw = ecl_file_view_iget_named_kw( rst_view , DOUBHEAD_KW , 0);
const ecl_kw_type * logihead_kw = NULL;
if (ecl_file_view_has_kw(rst_view, LOGIHEAD_KW))
logihead_kw = ecl_file_view_iget_named_kw( rst_view , LOGIHEAD_KW , 0);
if (ecl_file_view_has_kw(rst_view, DOUBHEAD_KW))
doubhead_kw = ecl_file_view_iget_named_kw(rst_view, DOUBHEAD_KW, 0);
if (ecl_file_view_has_kw( rst_view , SEQNUM_KW)) {
const ecl_kw_type * seqnum_kw = ecl_file_view_iget_named_kw( rst_view , SEQNUM_KW , 0);
report_step = ecl_kw_iget_int( seqnum_kw , 0);

View File

@ -139,7 +139,7 @@ struct ecl_smspec_struct {
bool has_lgr;
float_vector_type * params_default;
stringlist_type * restart_list; /* List of ECLBASE names of restart files this case has been restarted from (if any). */
char * restart_case;
};
@ -269,7 +269,7 @@ static ecl_smspec_type * ecl_smspec_alloc_empty(bool write_mode , const char * k
ecl_smspec->time_seconds = -1;
ecl_smspec->index_map = int_vector_alloc(0,0);
ecl_smspec->restart_list = stringlist_alloc_new();
ecl_smspec->restart_case = NULL;
ecl_smspec->params_default = float_vector_alloc(0 , PARAMS_GLOBAL_DEFAULT);
ecl_smspec->write_mode = write_mode;
ecl_smspec->need_nums = false;
@ -278,6 +278,29 @@ static ecl_smspec_type * ecl_smspec_alloc_empty(bool write_mode , const char * k
}
int * ecl_smspec_alloc_mapping( const ecl_smspec_type * self, const ecl_smspec_type * other) {
int params_size = ecl_smspec_get_params_size( self );
int * mapping = util_malloc( params_size * sizeof * mapping );
for (int i = 0; i < params_size; i++)
mapping[i] = -1;
for (int i=0; i < ecl_smspec_num_nodes( self ); i++) {
const smspec_node_type * self_node = ecl_smspec_iget_node( self , i );
int self_index = smspec_node_get_params_index( self_node );
const char * key = smspec_node_get_gen_key1( self_node );
if (ecl_smspec_has_general_var( other , key)) {
const smspec_node_type * other_node = ecl_smspec_get_general_var_node( other , key);
int other_index = smspec_node_get_params_index(other_node);
mapping[ self_index ] = other_index;
}
}
return mapping;
}
/**
Observe that the index here is into the __INTERNAL__ indexing in
the smspec_nodes vector; and in general widely different from the
@ -907,62 +930,30 @@ bool ecl_smspec_needs_num( ecl_smspec_var_type var_type ) {
}
static bool ecl_smspec_kw_equal(const ecl_file_type * header , const ecl_file_type * restart_header , const char * kw , int cmp_elements) {
if (ecl_file_has_kw( header , kw ) == ecl_file_has_kw( restart_header , kw )) {
if (ecl_file_has_kw( header , kw)) {
ecl_kw_type *ecl_kw1 = ecl_file_iget_named_kw(header, kw , 0);
ecl_kw_type *ecl_kw2 = ecl_file_iget_named_kw(restart_header, kw , 0);
return ecl_kw_block_equal( ecl_kw1 , ecl_kw2 , cmp_elements);
} else
return true; // None of the headers have this keyword - that is equality!
bool ecl_smspec_equal( const ecl_smspec_type * self , const ecl_smspec_type * other) {
bool equal = true;
if (vector_get_size( self->smspec_nodes ) == vector_get_size( other->smspec_nodes)) {
for (int i=0; i < vector_get_size( self->smspec_nodes ); i++) {
const smspec_node_type * node1 = vector_iget_const( self->smspec_nodes , i );
const smspec_node_type * node2 = vector_iget_const( other->smspec_nodes , i );
if (!smspec_node_equal( node1,node2)) {
equal = false;
break;
}
}
} else
return false;
equal = false;
return equal;
}
/**
When loading historical summary results the SMSPEC header of the
historical results is not internalized, i.e. it is essential that
the historical case has identical header as the main case. This
function compares the ecl_file represeantation of two SMSPEC
headers.
Unfortunately there are legitimate reasons why some of the headers
can be different; in particular new well can appear. In the code
below we therefor only check a limited set of keywords for
equality.
*/
static bool ecl_smspec_file_equal( const ecl_file_type * header1 , const ecl_file_type * header2) {
if (! ecl_smspec_kw_equal( header1 , header2 , KEYWORDS_KW , 0))
return false;
if (! ecl_smspec_kw_equal( header1 , header2 , STARTDAT_KW , 0))
return false;
if (! ecl_smspec_kw_equal( header1 , header2 , UNITS_KW , 0))
return false;
if (! ecl_smspec_kw_equal( header1 , header2 , DIMENS_KW , 4)) // Only the first four elements are compared.
return false;
if (!ecl_smspec_kw_equal( header1, header2 , LGRS_KW , 0))
return false;
return true;
}
/**
This will iterate backwards through the RESTART header in the
SMSPEC files to find names of the case(s) this case has been
restarted from.
The case names are internalized in the restart_list field of the
ecl_smspec instance. The actual loading of the restart summary data
is subsequently handled by the ecl_sum_data function.
*/
static void ecl_smspec_load_restart( ecl_smspec_type * ecl_smspec , const ecl_file_type * header ) {
if (ecl_file_has_kw( header , RESTART_KW )) {
@ -981,32 +972,12 @@ static void ecl_smspec_load_restart( ecl_smspec_type * ecl_smspec , const ecl_fi
util_alloc_file_components( ecl_smspec->header_file , &path , NULL , NULL );
smspec_header = ecl_util_alloc_exfilename( path , restart_base , ECL_SUMMARY_HEADER_FILE , ecl_smspec->formatted , 0);
if (smspec_header == NULL)
fprintf(stderr,"Warning - the header file: %s refers to restart from case: %s - which was not found.... \n", ecl_smspec->header_file , restart_base);
else {
if (!util_same_file(smspec_header , ecl_smspec->header_file)) { /* Restart from the current case is ignored. */
/*
Verify that this smspec_header is not already in the list of restart
cases. Don't know if this is at all possible, but this test
nevertheless prevents against a recursive death.
*/
if (!stringlist_contains( ecl_smspec->restart_list , restart_base)) {
ecl_file_type * restart_header = ecl_file_open( smspec_header , 0);
if (restart_header) {
if (ecl_smspec_file_equal( header , restart_header)) {
stringlist_insert_copy( ecl_smspec->restart_list , 0 , restart_base );
ecl_smspec_load_restart( ecl_smspec , restart_header); /* Recursive call */
} else
fprintf(stderr,"** Warning: the historical case: %s is not compatible with the current case - ignored.\n" ,
ecl_file_get_src_file( restart_header));
ecl_file_close( restart_header );
} else
fprintf(stderr,"** Warning: failed to historical case:%s - ignored.\n", smspec_header);
}
}
if (!util_same_file(smspec_header , ecl_smspec->header_file)) /* Restart from the current case is ignored. */ {
char * tmp_path = util_alloc_filename( path , restart_base , NULL );
ecl_smspec->restart_case = util_alloc_abs_path(tmp_path);
free( tmp_path );
}
util_safe_free( path );
util_safe_free( smspec_header );
}
@ -1620,8 +1591,8 @@ const char * ecl_smspec_get_header_file( const ecl_smspec_type * ecl_smspec ) {
const stringlist_type * ecl_smspec_get_restart_list( const ecl_smspec_type * ecl_smspec) {
return ecl_smspec->restart_list;
const char * ecl_smspec_get_restart_case( const ecl_smspec_type * ecl_smspec) {
return ecl_smspec->restart_case;
}
@ -1644,7 +1615,7 @@ void ecl_smspec_free(ecl_smspec_type *ecl_smspec) {
int_vector_free( ecl_smspec->index_map );
float_vector_free( ecl_smspec->params_default );
vector_free( ecl_smspec->smspec_nodes );
stringlist_free( ecl_smspec->restart_list );
free( ecl_smspec->restart_case );
free( ecl_smspec );
}

View File

@ -18,6 +18,7 @@
*/
#include <stdlib.h>
#define _USE_MATH_DEFINES // for C WINDOWS
#include <math.h>
#include <stdbool.h>
@ -46,7 +47,7 @@
/**
The ecl_subsidence_struct datastructure is the main structure for
calculating the subsidence from time lapse ECLIPSE simulations.
calculating the subsidence from time lapse ECLIPSE simulations.
*/
struct ecl_subsidence_struct {
@ -81,17 +82,17 @@ struct ecl_subsidence_survey_struct {
/*****************************************************************/
static ecl_subsidence_survey_type * ecl_subsidence_survey_alloc_empty(const ecl_subsidence_type * sub,
static ecl_subsidence_survey_type * ecl_subsidence_survey_alloc_empty(const ecl_subsidence_type * sub,
const char * name) {
ecl_subsidence_survey_type * survey = util_malloc( sizeof * survey );
UTIL_TYPE_ID_INIT( survey , ECL_SUBSIDENCE_SURVEY_ID );
survey->grid_cache = sub->grid_cache;
survey->aquifer_cell = sub->aquifer_cell;
survey->name = util_alloc_string_copy( name );
survey->porv = util_calloc( ecl_grid_cache_get_size( sub->grid_cache ) , sizeof * survey->porv );
survey->pressure = util_calloc( ecl_grid_cache_get_size( sub->grid_cache ) , sizeof * survey->pressure );
return survey;
}
@ -108,7 +109,7 @@ static ecl_subsidence_survey_type * ecl_subsidence_survey_alloc_PRESSURE(ecl_sub
int active_index;
ecl_kw_type * init_porv_kw = ecl_file_iget_named_kw( ecl_subsidence->init_file , PORV_KW , 0); /*Global indexing*/
ecl_kw_type * pressure_kw = ecl_file_view_iget_named_kw( restart_view , PRESSURE_KW , 0); /*Active indexing*/
for (active_index = 0; active_index < size; active_index++){
survey->porv[ active_index ] = ecl_kw_iget_float( init_porv_kw , global_index[active_index] );
survey->pressure[ active_index ] = ecl_kw_iget_float( pressure_kw , active_index );
@ -138,7 +139,7 @@ static void ecl_subsidence_survey_free__( void * __subsidence_survey ) {
static double ecl_subsidence_survey_eval( const ecl_subsidence_survey_type * base_survey ,
const ecl_subsidence_survey_type * monitor_survey,
ecl_region_type * region ,
double utm_x , double utm_y , double depth,
double utm_x , double utm_y , double depth,
double compressibility, double poisson_ratio) {
const ecl_grid_cache_type * grid_cache = base_survey->grid_cache;
@ -154,14 +155,44 @@ static double ecl_subsidence_survey_eval( const ecl_subsidence_survey_type * bas
for (index = 0; index < size; index++)
weight[index] = base_survey->porv[index] * base_survey->pressure[index];
}
deltaz = compressibility * 31.83099*(1-poisson_ratio) *
deltaz = compressibility * 31.83099*(1-poisson_ratio) *
ecl_grav_common_eval_biot_savart( grid_cache , region , base_survey->aquifer_cell , weight , utm_x , utm_y , depth );
free( weight );
return deltaz;
}
static double ecl_subsidence_survey_eval_geertsma( const ecl_subsidence_survey_type * base_survey ,
const ecl_subsidence_survey_type * monitor_survey,
ecl_region_type * region ,
double utm_x , double utm_y , double depth,
double youngs_modulus, double poisson_ratio, double seabed) {
const ecl_grid_cache_type * grid_cache = base_survey->grid_cache;
const double * cell_volume = ecl_grid_cache_get_volume( grid_cache );
const int size = ecl_grid_cache_get_size( grid_cache );
double scale_factor = 1e4 *(1 + poisson_ratio) * ( 1 - 2*poisson_ratio) / ( 4*M_PI*( 1 - poisson_ratio) * youngs_modulus );
double * weight = util_calloc( size , sizeof * weight );
double deltaz;
for (int index = 0; index < size; index++) {
if (monitor_survey) {
weight[index] = - scale_factor * cell_volume[index] * (monitor_survey->pressure[index] - base_survey->pressure[index]);
} else {
weight[index] = - scale_factor * cell_volume[index] * (base_survey->pressure[index] );
}
}
deltaz = ecl_grav_common_eval_geertsma( grid_cache , region , base_survey->aquifer_cell , weight , utm_x , utm_y , depth , poisson_ratio, seabed);
free( weight );
return deltaz;
}
/*****************************************************************/
/**
The grid instance is only used during the construction phase. The
@ -175,7 +206,7 @@ ecl_subsidence_type * ecl_subsidence_alloc( const ecl_grid_type * ecl_grid, cons
ecl_subsidence->init_file = init_file;
ecl_subsidence->grid_cache = ecl_grid_cache_alloc( ecl_grid );
ecl_subsidence->aquifer_cell = ecl_grav_common_alloc_aquifer_cell( ecl_subsidence->grid_cache , init_file );
ecl_subsidence->surveys = hash_alloc();
return ecl_subsidence;
}
@ -205,14 +236,23 @@ static ecl_subsidence_survey_type * ecl_subsidence_get_survey( const ecl_subside
}
double ecl_subsidence_eval( const ecl_subsidence_type * subsidence , const char * base, const char * monitor , ecl_region_type * region ,
double utm_x, double utm_y , double depth,
double ecl_subsidence_eval( const ecl_subsidence_type * subsidence , const char * base, const char * monitor , ecl_region_type * region ,
double utm_x, double utm_y , double depth,
double compressibility, double poisson_ratio) {
ecl_subsidence_survey_type * base_survey = ecl_subsidence_get_survey( subsidence , base );
ecl_subsidence_survey_type * monitor_survey = ecl_subsidence_get_survey( subsidence , monitor );
return ecl_subsidence_survey_eval( base_survey , monitor_survey , region , utm_x , utm_y , depth , compressibility, poisson_ratio);
}
double ecl_subsidence_eval_geertsma( const ecl_subsidence_type * subsidence , const char * base, const char * monitor , ecl_region_type * region ,
double utm_x, double utm_y , double depth,
double youngs_modulus, double poisson_ratio, double seabed) {
ecl_subsidence_survey_type * base_survey = ecl_subsidence_get_survey( subsidence , base );
ecl_subsidence_survey_type * monitor_survey = ecl_subsidence_get_survey( subsidence , monitor );
return ecl_subsidence_survey_eval_geertsma( base_survey , monitor_survey , region , utm_x , utm_y , depth , youngs_modulus, poisson_ratio, seabed);
}
void ecl_subsidence_free( ecl_subsidence_type * ecl_subsidence ) {
ecl_grid_cache_free( ecl_subsidence->grid_cache );
free( ecl_subsidence->aquifer_cell );

View File

@ -174,16 +174,7 @@ static bool ecl_sum_fread_data( ecl_sum_type * ecl_sum , const stringlist_type *
ecl_sum->data = ecl_sum_data_alloc( ecl_sum->smspec );
if (ecl_sum_data_fread( ecl_sum->data , data_files )) {
if (include_restart) {
const char * path = ecl_sum->path;
const stringlist_type * restart_cases = ecl_smspec_get_restart_list( ecl_sum->smspec );
stringlist_type * restart_files = stringlist_alloc_new();
int restart_nr;
for (restart_nr = 0; restart_nr < stringlist_get_size( restart_cases ); restart_nr++) {
ecl_util_alloc_summary_data_files(path , stringlist_iget( restart_cases , restart_nr ) , ecl_sum->fmt_case , restart_files );
ecl_sum_data_fread_restart( ecl_sum->data , restart_files );
}
stringlist_free( restart_files );
}
return true;
} else
@ -191,6 +182,15 @@ static bool ecl_sum_fread_data( ecl_sum_type * ecl_sum , const stringlist_type *
}
static void ecl_sum_fread_history( ecl_sum_type * ecl_sum ) {
ecl_sum_type * history = ecl_sum_fread_alloc_case__( ecl_smspec_get_restart_case( ecl_sum->smspec ) , ":" , true);
if (history) {
ecl_sum_data_add_case(ecl_sum->data , history->data );
ecl_sum_free( history );
}
}
static bool ecl_sum_fread(ecl_sum_type * ecl_sum , const char *header_file , const stringlist_type *data_files , bool include_restart) {
ecl_sum->smspec = ecl_smspec_fread_alloc( header_file , ecl_sum->key_join_string , include_restart);
@ -213,6 +213,9 @@ static bool ecl_sum_fread(ecl_sum_type * ecl_sum , const char *header_file , con
} else
return false;
if (include_restart && ecl_smspec_get_restart_case( ecl_sum->smspec ))
ecl_sum_fread_history( ecl_sum );
return true;
}

View File

@ -712,7 +712,7 @@ double_vector_type * ecl_sum_data_alloc_seconds_solution( const ecl_sum_data_typ
static void ecl_sum_data_append_tstep__( ecl_sum_data_type * data , int ministep_nr , ecl_sum_tstep_type * tstep) {
static void ecl_sum_data_append_tstep__( ecl_sum_data_type * data , ecl_sum_tstep_type * tstep) {
/*
Here the tstep is just appended naively, the vector will be
sorted by ministep_nr before the data instance is returned.
@ -854,7 +854,7 @@ ecl_sum_tstep_type * ecl_sum_data_add_new_tstep( ecl_sum_data_type * data , int
if (vector_get_size( data->data ) > 0)
prev_tstep = vector_get_last( data->data );
ecl_sum_data_append_tstep__( data , ministep_nr , tstep );
ecl_sum_data_append_tstep__( data , tstep );
{
bool rebuild_index = true;
@ -938,7 +938,7 @@ static void ecl_sum_data_add_ecl_file(ecl_sum_data_type * data ,
if (tstep != NULL) {
if (load_end == 0 || (ecl_sum_tstep_get_sim_time( tstep ) < load_end))
ecl_sum_data_append_tstep__( data , ministep_nr , tstep );
ecl_sum_data_append_tstep__( data , tstep );
else
/* This tstep is in a time-period overlapping with data we
already have; discard this. */
@ -950,6 +950,42 @@ static void ecl_sum_data_add_ecl_file(ecl_sum_data_type * data ,
}
void ecl_sum_data_add_case(ecl_sum_data_type * self, const ecl_sum_data_type * other) {
int * param_mapping = NULL;
bool header_equal = ecl_smspec_equal( self->smspec , other->smspec);
float default_value = 0;
if (!header_equal)
param_mapping = ecl_smspec_alloc_mapping( self->smspec , other->smspec );
for (int tstep_nr = 0; tstep_nr < ecl_sum_data_get_length( other ); tstep_nr++) {
ecl_sum_tstep_type * other_tstep = ecl_sum_data_iget_ministep( other , tstep_nr );
/*
The dataset 'self' is the authorative in the timeinterval where
it has data, so if 'other' also has data in the same time interval
that is discarded.
*/
if (!time_interval_contains( self->sim_time , ecl_sum_tstep_get_sim_time( other_tstep ))) {
ecl_sum_tstep_type * new_tstep;
if (header_equal)
new_tstep = ecl_sum_tstep_alloc_copy( other_tstep );
else
new_tstep = ecl_sum_tstep_alloc_remap_copy( other_tstep , self->smspec , default_value , param_mapping );
ecl_sum_data_append_tstep__( self , new_tstep );
}
}
ecl_sum_data_build_index( self );
free( param_mapping );
}
static bool ecl_sum_data_check_file( ecl_file_type * ecl_file ) {
if (ecl_file_has_kw( ecl_file , PARAMS_KW ) &&
(ecl_file_get_num_named_kw( ecl_file , PARAMS_KW ) == ecl_file_get_num_named_kw( ecl_file , MINISTEP_KW)))

View File

@ -69,6 +69,31 @@ struct ecl_sum_tstep_struct {
};
ecl_sum_tstep_type * ecl_sum_tstep_alloc_remap_copy( const ecl_sum_tstep_type * src , const ecl_smspec_type * new_smspec, float default_value , const int * params_map) {
int params_size = ecl_smspec_get_params_size( new_smspec );
ecl_sum_tstep_type * target = util_alloc_copy(src , sizeof * src );
target->smspec = new_smspec;
target->data = util_malloc( params_size * sizeof * target->data );
target->data_size = params_size;
for (int i=0; i < params_size; i++) {
if (params_map[i] >= 0)
target->data[i] = src->data[ params_map[i] ];
else
target->data[i] = default_value;
}
return target;
}
ecl_sum_tstep_type * ecl_sum_tstep_alloc_copy( const ecl_sum_tstep_type * src ) {
ecl_sum_tstep_type * target = util_alloc_copy(src , sizeof * src );
target->data = util_alloc_copy( src->data , src->data_size * sizeof * src->data );
return target;
}
static ecl_sum_tstep_type * ecl_sum_tstep_alloc( int report_step , int ministep_nr , const ecl_smspec_type * smspec) {
ecl_sum_tstep_type * tstep = util_malloc( sizeof * tstep );
UTIL_TYPE_ID_INIT( tstep , ECL_SUM_TSTEP_ID);

View File

@ -39,6 +39,7 @@
#define ECL_TYPE_NAME_CHAR "CHAR"
#define ECL_TYPE_NAME_C010 "C010"
#define ECL_TYPE_NAME_FLOAT "REAL"
#define ECL_TYPE_NAME_INT "INTE"
#define ECL_TYPE_NAME_DOUBLE "DOUB"
@ -113,6 +114,9 @@ const char * ecl_util_get_type_name( ecl_type_enum ecl_type ) {
case(ECL_CHAR_TYPE):
return ECL_TYPE_NAME_CHAR ;
break;
case(ECL_C010_TYPE):
return ECL_TYPE_NAME_C010;
break;
case(ECL_FLOAT_TYPE):
return ECL_TYPE_NAME_FLOAT;
break;
@ -146,6 +150,8 @@ ecl_type_enum ecl_util_get_type_from_name( const char * type_name ) {
ecl_type = ECL_DOUBLE_TYPE;
else if (strncmp( type_name , ECL_TYPE_NAME_CHAR , ECL_TYPE_LENGTH) == 0)
ecl_type = ECL_CHAR_TYPE;
else if (strncmp( type_name , ECL_TYPE_NAME_C010 , ECL_TYPE_LENGTH) == 0)
ecl_type = ECL_C010_TYPE;
else if (strncmp( type_name , ECL_TYPE_NAME_MESSAGE , ECL_TYPE_LENGTH) == 0)
ecl_type = ECL_MESS_TYPE;
else if (strncmp( type_name , ECL_TYPE_NAME_BOOL , ECL_TYPE_LENGTH) == 0)
@ -161,7 +167,10 @@ ecl_type_enum ecl_util_get_type_from_name( const char * type_name ) {
int ecl_util_get_sizeof_ctype_fortio(ecl_type_enum ecl_type) {
int size = ecl_util_get_sizeof_ctype ( ecl_type );
if (ecl_type == ECL_CHAR_TYPE)
size = ECL_STRING_LENGTH * sizeof(char);
size = ECL_STRING8_LENGTH * sizeof(char);
if (ecl_type == ECL_C010_TYPE)
size = ECL_STRING10_LENGTH * sizeof(char);
return size;
}
@ -176,7 +185,16 @@ int ecl_util_get_sizeof_ctype(ecl_type_enum ecl_type) {
corresponds to the size requirements of ECL_CHAR_TYPE instance
in memory; on disk the trailing \0 is not stored.
*/
sizeof_ctype = (ECL_STRING_LENGTH + 1) * sizeof(char);
sizeof_ctype = (ECL_STRING8_LENGTH + 1) * sizeof(char);
break;
case(ECL_C010_TYPE):
/*
One element of character data is a string section of 8
characters + \0. Observe that the return value here
corresponds to the size requirements of ECL_CHAR_TYPE instance
in memory; on disk the trailing \0 is not stored.
*/
sizeof_ctype = (ECL_STRING10_LENGTH + 1) * sizeof(char);
break;
case(ECL_FLOAT_TYPE):
sizeof_ctype = sizeof(float);
@ -1318,8 +1336,15 @@ static int ecl_util_get_num_slave_cpu__(basic_parser_type* parser, FILE* stream,
if (first_item[0] == '/') {
break;
}
else
++num_cpu;
else{
int no_of_tokens = stringlist_get_size(tokens);
int no_of_slaves =0;
if(no_of_tokens == 6 && util_sscanf_int(stringlist_iget(tokens, 4), &no_of_slaves)){
num_cpu += no_of_slaves;
}else{
++num_cpu;
}
}
}
stringlist_free( tokens );
}
@ -1353,14 +1378,14 @@ int ecl_util_get_num_cpu(const char * data_file) {
ert_ecl_unit_enum ecl_util_get_unit_set(const char * data_file) {
ert_ecl_unit_enum units = ERT_ECL_METRIC_UNITS;
ert_ecl_unit_enum units = ECL_METRIC_UNITS;
basic_parser_type * parser = basic_parser_alloc(" \t\r\n" , "\"\'" , NULL , NULL , "--" , "\n");
FILE * stream = util_fopen(data_file , "r");
if (basic_parser_fseek_string( parser , stream , "FIELD" , true , true)) { /* Seeks case insensitive. */
units = ERT_ECL_FIELD_UNITS;
units = ECL_FIELD_UNITS;
} else if (basic_parser_fseek_string( parser , stream , "LAB" , true , true)) { /* Seeks case insensitive. */
units = ERT_ECL_LAB_UNITS;
units = ECL_LAB_UNITS;
}
basic_parser_free( parser );

View File

@ -52,16 +52,19 @@
struct smspec_node_struct {
UTIL_TYPE_ID_DECLARATION;
char * gen_key1; /* The main composite key, i.e. WWCT:OP3 for this element. */
char * gen_key2; /* Some of the ijk based elements will have both a xxx:i,j,k and a xxx:num key. Some of the region_2_region elements will have both a xxx:num and a xxx:r2-r2 key. Mostly NULL. */
ecl_smspec_var_type var_type; /* The variable type */
char * wgname; /* The value of the WGNAMES vector for this element. */
char * keyword; /* The value of the KEYWORDS vector for this elements. */
char * unit; /* The value of the UNITS vector for this elements. */
int num; /* The value of the NUMS vector for this elements - NB this will have the value SMSPEC_NUMS_INVALID if the smspec file does not have a NUMS vector. */
int * ijk; /* The ijk coordinates (NB: OFFSET 1) corresponding to the nums value - will be NULL if not relevant. */
char * lgr_name; /* The lgr name of the current variable - will be NULL for non-lgr variables. */
int * lgr_ijk; /* The (i,j,k) coordinate, in the local grid, if this is a LGR variable. WIll be NULL for no-lgr variables. */
/*------------------------------------------- All members below this line are *derived* quantities. */
char * gen_key1; /* The main composite key, i.e. WWCT:OP3 for this element. */
char * gen_key2; /* Some of the ijk based elements will have both a xxx:i,j,k and a xxx:num key. Some of the region_2_region elements will have both a xxx:num and a xxx:r2-r2 key. Mostly NULL. */
ecl_smspec_var_type var_type; /* The variable type */
int * ijk; /* The ijk coordinates (NB: OFFSET 1) corresponding to the nums value - will be NULL if not relevant. */
bool rate_variable; /* Is this a rate variable (i.e. WOPR) or a state variable (i.e. BPR). Relevant when doing time interpolation. */
bool total_variable; /* Is this a total variable like WOPT? */
bool historical; /* Does the name end with 'H'? */
@ -70,6 +73,36 @@ struct smspec_node_struct {
};
static bool string_equal(const char * s1 , const char * s2)
{
if ((s1 == NULL) && (s2 == NULL))
return true;
else
return util_string_equal( s1 , s2 );
}
bool smspec_node_equal( const smspec_node_type * node1, const smspec_node_type * node2) {
if ((node1->params_index == node2->params_index) &&
(node1->num == node2->num) &&
(node1->var_type == node2->var_type) &&
(string_equal( node1->keyword, node2->keyword)) &&
(string_equal( node1->wgname, node2->wgname)) &&
(string_equal( node1->unit, node2->unit)) &&
(string_equal( node1->lgr_name, node2->lgr_name)))
{
if (node1->lgr_ijk)
return ((node1->lgr_ijk[0] == node2->lgr_ijk[0]) &&
(node1->lgr_ijk[1] == node2->lgr_ijk[1]) &&
(node1->lgr_ijk[2] == node2->lgr_ijk[2]));
return true;
}
return false;
}
/*****************************************************************/
/*
The key formats for the combined keys like e.g. 'WWCT:OP_5' should
@ -308,6 +341,7 @@ static void smspec_node_set_flags( smspec_node_type * smspec_node) {
if (smspec_node->var_type == ECL_SMSPEC_WELL_VAR ||
smspec_node->var_type == ECL_SMSPEC_GROUP_VAR ||
smspec_node->var_type == ECL_SMSPEC_FIELD_VAR ||
smspec_node->var_type == ECL_SMSPEC_REGION_VAR ||
smspec_node->var_type == ECL_SMSPEC_COMPLETION_VAR ) {
const char *total_vars[] = {"OPT" , "GPT" , "WPT" , "GIT", "WIT", "OPTF" , "OPTS" , "OIT" , "OVPT" , "OVIT" , "MWT" ,
"WVPT" , "WVIT" , "GMT" , "GPTF" , "SGT" , "GST" , "FGT" , "GCT" , "GIMT" ,

View File

@ -1,8 +1,9 @@
SLAVES
-- slave datafile machine directory
-- name root hostname of data file
'RES-R2' 'base' 'rios' '/usr/models/res2' /
'RES-R3' 'base' 'sg-indigo' '/usr/models/res3' /
'RES-R2' 'base' 'rios' '/usr/models/res2' 3 /
'RES-R3' 'base' 'sg-indigo' '/usr/models/res3' A / -- a wrongfully added character in the
-- "slaves token" position doesn't brake parsing
-- Testing comments in middle
'RES-R5' 'base' 'sg-indigo' '/usr/models/res5' /
/testRubbish
'RES-R5' 'base' 'sg-indigo' '/usr/models/res5' 10 /
/testRubbish -- this line also counts

View File

@ -136,7 +136,7 @@ void test_truncated() {
test_work_area_type * work_area = test_work_area_alloc("ecl_file_truncated" );
{
ecl_grid_type * grid = ecl_grid_alloc_rectangular(20,20,20,1,1,1,NULL);
ecl_grid_fwrite_EGRID2( grid , "TEST.EGRID", ERT_ECL_METRIC_UNITS );
ecl_grid_fwrite_EGRID2( grid , "TEST.EGRID", ECL_METRIC_UNITS );
ecl_grid_free( grid );
}
{

View File

@ -28,11 +28,11 @@ int main(int argc , char ** argv) {
const char * filename3 = argv[3];
const char * filename4 = argv[4];
int num_cpu = 4;
test_assert_int_equal(ecl_util_get_num_cpu(filename1), num_cpu);
test_assert_int_equal(ecl_util_get_num_cpu(filename2), num_cpu);
test_assert_int_equal(ecl_util_get_num_cpu(filename3), num_cpu);
test_assert_int_equal(ecl_util_get_num_cpu(filename4), num_cpu);
test_assert_int_equal(ecl_util_get_num_cpu(filename1), 4);
test_assert_int_equal(ecl_util_get_num_cpu(filename2), 4);
test_assert_int_equal(ecl_util_get_num_cpu(filename3), 15);
test_assert_int_equal(ecl_util_get_num_cpu(filename4), 4);
exit(0);
}

View File

@ -62,7 +62,7 @@ void simple_test() {
{
test_work_area_type * test_area = test_work_area_alloc("ecl_grid_nnc");
ecl_grid_type * grid1;
ecl_grid_fwrite_EGRID2( grid0 , "TEST.EGRID" , ERT_ECL_METRIC_UNITS);
ecl_grid_fwrite_EGRID2( grid0 , "TEST.EGRID" , ECL_METRIC_UNITS);
grid1 = ecl_grid_alloc( "TEST.EGRID" );
verify_simple_nnc( grid1 );
@ -92,7 +92,7 @@ void overwrite_test() {
{
test_work_area_type * test_area = test_work_area_alloc("ecl_grid_nnc");
ecl_grid_type * grid1;
ecl_grid_fwrite_EGRID2( grid0 , "TEST.EGRID" , ERT_ECL_METRIC_UNITS);
ecl_grid_fwrite_EGRID2( grid0 , "TEST.EGRID" , ECL_METRIC_UNITS);
grid1 = ecl_grid_alloc( "TEST.EGRID" );
verify_simple_nnc( grid1 );
@ -120,7 +120,7 @@ void list_test() {
{
test_work_area_type * test_area = test_work_area_alloc("ecl_grid_nnc");
ecl_grid_type * grid1;
ecl_grid_fwrite_EGRID2( grid0 , "TEST.EGRID" , ERT_ECL_METRIC_UNITS);
ecl_grid_fwrite_EGRID2( grid0 , "TEST.EGRID" , ECL_METRIC_UNITS);
grid1 = ecl_grid_alloc( "TEST.EGRID" );
verify_simple_nnc( grid1 );

View File

@ -28,7 +28,7 @@
void test_fwrite_EGRID(ecl_grid_type * grid ) {
test_work_area_type * work_area = test_work_area_alloc("grid-has-mapaxes");
ecl_grid_fwrite_EGRID2( grid , "TEST.EGRID", ERT_ECL_METRIC_UNITS);
ecl_grid_fwrite_EGRID2( grid , "TEST.EGRID", ECL_METRIC_UNITS);
{
ecl_grid_type * copy = ecl_grid_alloc( "TEST.EGRID" );
test_assert_true( ecl_grid_compare( grid , copy , false , false , true ));

View File

@ -34,7 +34,7 @@ void test_write_depth(const ecl_grid_type * grid) {
test_work_area_type * test_area = test_work_area_alloc("write_depth");
{
fortio_type * init_file = fortio_open_writer( "INIT" , false , ECL_ENDIAN_FLIP );
ecl_grid_fwrite_depth( grid , init_file , ERT_ECL_METRIC_UNITS);
ecl_grid_fwrite_depth( grid , init_file , ECL_METRIC_UNITS);
fortio_fclose( init_file );
}
{
@ -55,7 +55,7 @@ void test_write_dims(const ecl_grid_type * grid) {
test_work_area_type * test_area = test_work_area_alloc("write_dims");
{
fortio_type * init_file = fortio_open_writer( "INIT" , false , ECL_ENDIAN_FLIP );
ecl_grid_fwrite_dims( grid , init_file , ERT_ECL_METRIC_UNITS );
ecl_grid_fwrite_dims( grid , init_file , ECL_METRIC_UNITS );
fortio_fclose( init_file );
}
{

View File

@ -68,7 +68,7 @@ int main(int argc , char ** argv) {
total_volume += grid_volume;
total_diff += fabs( eclipse_volume - grid_volume );
if (!util_double_approx_equal__( grid_volume , eclipse_volume , 2.5e-3)) {
if (!util_double_approx_equal__( grid_volume , eclipse_volume , 2.5e-3, 0.00)) {
double diff = 100 * (grid_volume - eclipse_volume) / eclipse_volume;
printf("Error in cell: %d V1: %g V2: %g diff:%g %% \n", iglobal , grid_volume , eclipse_volume , diff);
error_count++;

View File

@ -50,7 +50,7 @@ void test_write_header() {
fortio_type * f = fortio_open_writer( "FOO1.INIT" , false , ECL_ENDIAN_FLIP );
ecl_kw_type * poro = ecl_kw_alloc( "PORO" , ecl_grid_get_global_size( ecl_grid ) , ECL_FLOAT_TYPE );
ecl_kw_scalar_set_float( poro , 0.10 );
ecl_init_file_fwrite_header( f , ecl_grid , poro , 7 , start_time );
ecl_init_file_fwrite_header( f , ecl_grid , poro , ECL_FIELD_UNITS, 7 , start_time );
ecl_kw_free( poro );
fortio_fclose( f );
}
@ -61,7 +61,7 @@ void test_write_header() {
fortio_type * f = fortio_open_writer( "FOO2.INIT" , false , ECL_ENDIAN_FLIP );
ecl_kw_type * poro = ecl_kw_alloc( "PORO" , ecl_grid_get_global_size( ecl_grid ) , ECL_FLOAT_TYPE );
ecl_kw_scalar_set_float( poro , 0.10 );
ecl_init_file_fwrite_header( f , ecl_grid , poro , 7 , start_time );
ecl_init_file_fwrite_header( f , ecl_grid , poro , ECL_FIELD_UNITS, 7 , start_time );
ecl_kw_free( poro );
fortio_fclose( f );
}
@ -80,7 +80,7 @@ void test_write_header() {
// Poro == NULL
{
fortio_type * f = fortio_open_writer( "FOO3.INIT" , false , ECL_ENDIAN_FLIP );
ecl_init_file_fwrite_header( f , ecl_grid , NULL , 7 , start_time );
ecl_init_file_fwrite_header( f , ecl_grid , NULL , ECL_METRIC_UNITS, 7 , start_time );
fortio_fclose( f );
}
test_work_area_free( test_area );

Some files were not shown because too many files have changed in this diff Show More