opm-simulators/opm/simulators/linalg/gpuistl/GpuBuffer.cpp

212 lines
5.7 KiB
C++
Raw Normal View History

2024-05-15 06:43:46 -05:00
/*
Copyright 2024 SINTEF AS
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include <algorithm>
#include <fmt/core.h>
2024-08-22 08:32:21 -05:00
#include <opm/simulators/linalg/gpuistl/GpuBuffer.hpp>
#include <opm/simulators/linalg/gpuistl/GpuView.hpp>
2024-08-23 04:12:13 -05:00
#include <opm/simulators/linalg/gpuistl/detail/gpu_safe_call.hpp>
2024-05-15 06:43:46 -05:00
2024-08-22 06:52:50 -05:00
namespace Opm::gpuistl
2024-05-15 06:43:46 -05:00
{
template <class T>
2024-08-22 07:12:30 -05:00
GpuBuffer<T>::GpuBuffer(const std::vector<T>& data)
: GpuBuffer(data.data(), data.size())
2024-05-15 06:43:46 -05:00
{
}
template <class T>
2024-08-22 07:12:30 -05:00
GpuBuffer<T>::GpuBuffer(const size_t numberOfElements)
2024-05-15 06:43:46 -05:00
: m_numberOfElements(numberOfElements)
{
if (numberOfElements < 1) {
2024-08-22 07:12:30 -05:00
OPM_THROW(std::invalid_argument, "Setting a GpuBuffer size to a non-positive number is not allowed");
2024-05-15 06:43:46 -05:00
}
2024-08-23 04:12:13 -05:00
OPM_GPU_SAFE_CALL(cudaMalloc(&m_dataOnDevice, sizeof(T) * m_numberOfElements));
2024-05-15 06:43:46 -05:00
}
template <class T>
2024-08-22 07:12:30 -05:00
GpuBuffer<T>::GpuBuffer(const T* dataOnHost, const size_t numberOfElements)
: GpuBuffer(numberOfElements)
2024-05-15 06:43:46 -05:00
{
2024-08-23 04:12:13 -05:00
OPM_GPU_SAFE_CALL(cudaMemcpy(
m_dataOnDevice, dataOnHost, m_numberOfElements * sizeof(T), cudaMemcpyHostToDevice));
2024-05-15 06:43:46 -05:00
}
template <class T>
2024-08-22 07:12:30 -05:00
GpuBuffer<T>::GpuBuffer(const GpuBuffer<T>& other)
: GpuBuffer(other.m_numberOfElements)
2024-05-15 06:43:46 -05:00
{
assertHasElements();
assertSameSize(other);
2024-08-23 04:12:13 -05:00
OPM_GPU_SAFE_CALL(cudaMemcpy(m_dataOnDevice,
2024-05-15 06:43:46 -05:00
other.m_dataOnDevice,
m_numberOfElements * sizeof(T),
2024-05-15 06:43:46 -05:00
cudaMemcpyDeviceToDevice));
}
template <class T>
2024-08-22 07:12:30 -05:00
GpuBuffer<T>::~GpuBuffer()
2024-05-15 06:43:46 -05:00
{
2024-08-23 04:12:13 -05:00
OPM_GPU_WARN_IF_ERROR(cudaFree(m_dataOnDevice));
2024-05-15 06:43:46 -05:00
}
template <typename T>
2024-08-22 07:12:30 -05:00
typename GpuBuffer<T>::size_type
GpuBuffer<T>::size() const
2024-05-15 06:43:46 -05:00
{
return m_numberOfElements;
2024-05-15 06:43:46 -05:00
}
template <typename T>
void
2024-08-22 07:12:30 -05:00
GpuBuffer<T>::resize(size_t newSize)
2024-05-15 06:43:46 -05:00
{
if (newSize < 1) {
2024-08-22 07:12:30 -05:00
OPM_THROW(std::invalid_argument, "Setting a GpuBuffer size to a non-positive number is not allowed");
2024-05-15 06:43:46 -05:00
}
2024-09-23 07:55:07 -05:00
if (m_numberOfElements == 0) {
// We have no data, so we can just allocate new memory
OPM_GPU_SAFE_CALL(cudaMalloc(&m_dataOnDevice, sizeof(T) * newSize));
}
else {
// Allocate memory for temporary buffer
T* tmpBuffer = nullptr;
OPM_GPU_SAFE_CALL(cudaMalloc(&tmpBuffer, sizeof(T) * m_numberOfElements));
// Move the data from the old to the new buffer with truncation
size_t sizeOfMove = std::min({m_numberOfElements, newSize});
OPM_GPU_SAFE_CALL(cudaMemcpy(tmpBuffer,
m_dataOnDevice,
sizeOfMove * sizeof(T),
cudaMemcpyDeviceToDevice));
// free the old buffer
OPM_GPU_SAFE_CALL(cudaFree(m_dataOnDevice));
// swap the buffers
m_dataOnDevice = tmpBuffer;
}
2024-05-15 06:43:46 -05:00
// update size
m_numberOfElements = newSize;
}
template <typename T>
std::vector<T>
2024-08-22 07:12:30 -05:00
GpuBuffer<T>::asStdVector() const
2024-05-15 06:43:46 -05:00
{
std::vector<T> temporary(m_numberOfElements);
2024-05-15 06:43:46 -05:00
copyToHost(temporary);
return temporary;
}
template <typename T>
void
2024-08-22 07:12:30 -05:00
GpuBuffer<T>::assertSameSize(const GpuBuffer<T>& x) const
2024-05-15 06:43:46 -05:00
{
assertSameSize(x.m_numberOfElements);
}
template <typename T>
void
2024-08-22 07:12:30 -05:00
GpuBuffer<T>::assertSameSize(size_t size) const
2024-05-15 06:43:46 -05:00
{
if (size != m_numberOfElements) {
OPM_THROW(std::invalid_argument,
fmt::format("Given buffer has {}, while we have {}.", size, m_numberOfElements));
}
}
template <typename T>
void
2024-08-22 07:12:30 -05:00
GpuBuffer<T>::assertHasElements() const
2024-05-15 06:43:46 -05:00
{
if (m_numberOfElements <= 0) {
OPM_THROW(std::invalid_argument, "We have 0 elements");
}
}
template <typename T>
T*
2024-08-22 07:12:30 -05:00
GpuBuffer<T>::data()
2024-05-15 06:43:46 -05:00
{
return m_dataOnDevice;
}
template <typename T>
const T*
2024-08-22 07:12:30 -05:00
GpuBuffer<T>::data() const
2024-05-15 06:43:46 -05:00
{
return m_dataOnDevice;
}
template <class T>
void
2024-08-22 07:12:30 -05:00
GpuBuffer<T>::copyFromHost(const T* dataPointer, size_t numberOfElements)
2024-05-15 06:43:46 -05:00
{
if (numberOfElements > size()) {
OPM_THROW(std::runtime_error,
fmt::format("Requesting to copy too many elements. buffer has {} elements, while {} was requested.",
size(),
numberOfElements));
}
2024-08-23 04:12:13 -05:00
OPM_GPU_SAFE_CALL(cudaMemcpy(data(), dataPointer, numberOfElements * sizeof(T), cudaMemcpyHostToDevice));
2024-05-15 06:43:46 -05:00
}
template <class T>
void
2024-08-22 07:12:30 -05:00
GpuBuffer<T>::copyToHost(T* dataPointer, size_t numberOfElements) const
2024-05-15 06:43:46 -05:00
{
assertSameSize(numberOfElements);
2024-08-23 04:12:13 -05:00
OPM_GPU_SAFE_CALL(cudaMemcpy(dataPointer, data(), numberOfElements * sizeof(T), cudaMemcpyDeviceToHost));
2024-05-15 06:43:46 -05:00
}
template <class T>
void
2024-08-22 07:12:30 -05:00
GpuBuffer<T>::copyFromHost(const std::vector<T>& data)
2024-05-15 06:43:46 -05:00
{
copyFromHost(data.data(), data.size());
}
template <class T>
void
2024-08-22 07:12:30 -05:00
GpuBuffer<T>::copyToHost(std::vector<T>& data) const
2024-05-15 06:43:46 -05:00
{
copyToHost(data.data(), data.size());
}
2024-08-22 07:12:30 -05:00
template class GpuBuffer<double>;
template class GpuBuffer<float>;
template class GpuBuffer<int>;
2024-05-15 06:43:46 -05:00
template <class T>
2024-08-22 08:27:23 -05:00
GpuView<const T> make_view(const GpuBuffer<T>& buf) {
return GpuView<const T>(buf.data(), buf.size());
2024-05-15 06:43:46 -05:00
}
2024-08-22 08:27:23 -05:00
template GpuView<const double> make_view<double>(const GpuBuffer<double>&);
template GpuView<const float> make_view<float>(const GpuBuffer<float>&);
template GpuView<const int> make_view<int>(const GpuBuffer<int>&);
2024-05-15 06:43:46 -05:00
2024-08-22 06:52:50 -05:00
} // namespace Opm::gpuistl