preCICE
Loading...
Searching...
No Matches
BatchedRBFSolver.hpp
Go to the documentation of this file.
1#pragma once
2#ifndef PRECICE_NO_KOKKOS_KERNELS
3
4#include <Kokkos_Core.hpp>
5#include <array>
6#include <cmath>
7#include <functional>
8#include <numeric>
15#include "mesh/Mesh.hpp"
17#include "profiling/Event.hpp"
18
20
21namespace precice::mapping {
22
33template <typename RADIAL_BASIS_FUNCTION_T>
35public:
36 using RBF_T = RADIAL_BASIS_FUNCTION_T;
37
40 BatchedRBFSolver(RBF_T basisFunction,
41 mesh::PtrMesh inMesh,
42 mesh::PtrMesh outMesh,
43 const std::vector<mesh::Vertex> &centers,
44 double clusterRadius,
45 Polynomial polynomial,
46 bool computeEvaluationOffline,
48
49 void solveConsistent(const time::Sample &globalIn, Eigen::VectorXd &globalOut);
50
51private:
52 mutable precice::logging::Logger _log{"mapping::BatchedRBFSolver"};
53
54 // Helper to dispatch the actual kernel. Needed to help with the template parameters
55 template <typename... Args>
56 void _dispatch_solve_kernel(bool polynomial, bool evaluation_op_available, Args &&...args);
57
58 // Linear offsets for each cluster, i.e., all cluster sizes
61
62 // Stores for each cluster the VertexIDs
65
68
71
72 VectorView<> _qrMatrix; // flat view of (nCluster x verticesPerCluster_i x (dim + 1) = nCluster x verticesPerCluster_i x polyParams)
73 VectorView<> _qrTau; // flat view of Householder tau (nCluster x (dim + 1) = nCluster x polyParams)
74 PivotView<> _qrP; // flat view of Permutation and rank (nCluster x (dim + 2) = nCluster x (polyParams + rank))
75
77
80
81 // Currently only scalar data
83 // Kokkos::View<double *>::HostMirror _inDataMirror;
85 // Kokkos::View<double *>::HostMirror _outDataMirror;
86
89
92 const int _nCluster;
93 const int _dim; // Mesh dimension
96 // MappingConfiguration::GinkgoParameter _ginkgoParameter;
97};
98
99template <typename RADIAL_BASIS_FUNCTION_T>
101 mesh::PtrMesh inMesh,
102 mesh::PtrMesh outMesh,
103 const std::vector<mesh::Vertex> &centers,
104 double clusterRadius,
105 Polynomial polynomial,
106 bool computeEvaluationOffline,
108 : _basisFunction(basisFunction), _polynomial(polynomial), _nCluster(static_cast<int>(centers.size())), _dim(inMesh->getDimensions()), _computeEvaluationOffline(computeEvaluationOffline)
109{
111 PRECICE_CHECK(_polynomial != Polynomial::ON, "Setting polynomial to \"on\" for the mapping between \"{}\" and \"{}\" is not supported", inMesh->getName(), outMesh->getName());
112 // The LU decomposition uses no pivoting, which leads to divisions by zero if the diagonal contains zero entries, which is the case for our basis functions
113 PRECICE_CHECK(RADIAL_BASIS_FUNCTION_T::isStrictlyPositiveDefinite(), "batched solver is only available for positive definite basis functions, i.e., compact-polynomial functions and Gaussian.");
114
115 PRECICE_CHECK(!(inMesh->vertices().empty() || outMesh->vertices().empty()), "One of the meshes in the batched solvers is empty, which is invalid.");
116 PRECICE_CHECK(inMesh->getDimensions() == outMesh->getDimensions(), "Incompatible dimensions passed to the batched solver.");
117
118 precice::profiling::Event eInit("solver.initializeKokkos");
119 // We have to initialize Kokkos and Ginkgo here, as the initialization call allocates memory
120 // in the current setup, this will only initialize the device (and allocate memory) on the primary rank
121 // TODO: Document restriction: all mappings must use the same executor configuration within one participant
122 device::Device::initialize(ginkgoParameter.nThreads, ginkgoParameter.deviceId);
123 PRECICE_DEBUG("Using batched PU-RBF solver on executor \"{}\" for \"{}\" PU-RBF clusters in execution mode {}.", ginkgoParameter.executor, centers.size(), _computeEvaluationOffline ? "\"minimal-compute\" (evaluation offline)" : "\"minimal-memory\" (evaluation online)");
124 Kokkos::fence();
125 eInit.stop();
126
127// General assumption of the algorithm
128#ifndef NDEBUG
129 for (int i = 0; i < inMesh->nVertices(); ++i) {
130 PRECICE_ASSERT(inMesh->vertices()[i].getID() == i);
131 }
132 for (int i = 0; i < outMesh->nVertices(); ++i) {
133 PRECICE_ASSERT(outMesh->vertices()[i].getID() == i);
134 }
135#endif
136
137 precice::profiling::Event eNearestNeighbors("solver.queryVertices");
138
139 // Step 1: Query n-nearest neighbors and compute offsets, which hold the range for each cluster
140
141 PRECICE_DEBUG("Computing cluster association on the GPU");
142
143 // Initialize the view for the GPU offsets
144 _inOffsets = VectorOffsetView<>("inOffsets", _nCluster + 1);
145 _outOffsets = VectorOffsetView<>("outOffsets", _nCluster + 1);
146
147 // we fill this view on the host side
148 auto hostIn = Kokkos::create_mirror_view(_inOffsets);
149 auto hostOut = Kokkos::create_mirror_view(_outOffsets);
150
151 // for the global IDs, we use now a std::vector which we emplace
152 // we need at least contiguous memory here
153 // TODO: Check the performance of reallocations
154 std::vector<VertexID> globalInIDs;
155 std::vector<VertexID> globalOutIDs;
156
157 // has to be a separate loop, as we first need to gather knowledge about
158 // the shape for the meshes
159 hostIn(0) = 0;
160 hostOut(0) = 0;
161 // To detect overflows
162 std::uint64_t inCheck = 0;
163 std::uint64_t outCheck = 0;
165 for (int i = 0; i < _nCluster; ++i) {
166 const auto &center = centers[i];
167
168 // First we handle the input side
169 auto inIDs = inMesh->index().getVerticesInsideBox(center, clusterRadius);
170 _maxInClusterSize = std::max(_maxInClusterSize, static_cast<int>(inIDs.size()));
171 std::uint64_t tmpIn = hostIn(i) + inIDs.size();
172
173 // Check overflows
174 if constexpr (std::numeric_limits<offset_1d_type>::digits < std::numeric_limits<std::uint64_t>::digits) {
175 PRECICE_CHECK(tmpIn < std::numeric_limits<offset_1d_type>::max(),
176 "The selected integer precision for the (input) vector offsets (\"offset_1d_type\") overflow. You might want to change the precision specified in \"device/KokkosTypes.hpp\"");
177 }
178 if constexpr (std::numeric_limits<offset_2d_type>::digits < std::numeric_limits<std::uint64_t>::digits) {
179 inCheck += static_cast<std::uint64_t>(inIDs.size() * inIDs.size());
180 PRECICE_CHECK(inCheck < std::numeric_limits<offset_2d_type>::max(),
181 "The selected integer precision for the (input) matrix offsets (\"offset_2d_type\") overflow. You might want to change the precision specified in \"device/KokkosTypes.hpp\"");
182 }
183 hostIn(i + 1) = static_cast<offset_1d_type>(tmpIn);
184 std::copy(inIDs.begin(), inIDs.end(), std::back_inserter(globalInIDs));
185
186 // ... and the same for the output side
187 auto outIDs = outMesh->index().getVerticesInsideBox(center, clusterRadius - math::NUMERICAL_ZERO_DIFFERENCE);
188 _maxOutClusterSize = std::max(_maxOutClusterSize, static_cast<int>(outIDs.size()));
189 std::uint64_t tmpOut = hostOut(i) + outIDs.size();
190
191 // Check overflows
192 if constexpr (std::numeric_limits<offset_1d_type>::digits < std::numeric_limits<std::uint64_t>::digits) {
193 PRECICE_CHECK(tmpOut < std::numeric_limits<offset_1d_type>::max(),
194 "The selected integer precision for the (output) vector offsets (\"offset_1d_type\") overflow. You might want to change the precision specified in \"device/KokkosTypes.hpp\"");
195 }
197 if constexpr (std::numeric_limits<offset_2d_type>::digits < std::numeric_limits<std::uint64_t>::digits) {
198 outCheck += static_cast<std::uint64_t>(outIDs.size() * inIDs.size()); // is in x out
199 PRECICE_CHECK(outCheck < std::numeric_limits<offset_2d_type>::max(),
200 "The selected integer precision for the (output) matrix offsets (\"offset_2d_type\") overflow. You might want to change the precision specified in \"device/KokkosTypes.hpp\"");
201 }
202 }
203 hostOut(i + 1) = static_cast<offset_1d_type>(tmpOut);
204 std::copy(outIDs.begin(), outIDs.end(), std::back_inserter(globalOutIDs));
205 }
206
207 _avgClusterSize = hostIn(_nCluster /* = hostIn.extent(0) - 1 */) / _nCluster;
208 PRECICE_DEBUG("Average cluster size used to find a good team size of the kernel execution: {}", _avgClusterSize);
209
210 // Copy offsets onto the device
211 Kokkos::deep_copy(_inOffsets, hostIn);
212 Kokkos::deep_copy(_outOffsets, hostOut);
213
214 // ... now that we have the sizes, we transfer the map onto the device
215 _globalInIDs = GlobalIDView<>("globalInIDs", globalInIDs.size());
216 _globalOutIDs = GlobalIDView<>("globalOutIDs", globalOutIDs.size());
217
218 // Wrap in a view to perform deep copies further down
219 Kokkos::View<VertexID *, Kokkos::HostSpace, UnmanagedMemory>
220 tmpIn(globalInIDs.data(), globalInIDs.size());
221 Kokkos::View<VertexID *, Kokkos::HostSpace, UnmanagedMemory>
222 tmpOut(globalOutIDs.data(), globalOutIDs.size());
223
224 Kokkos::deep_copy(_globalInIDs, tmpIn);
225 Kokkos::deep_copy(_globalOutIDs, tmpOut);
226
227 Kokkos::fence();
228 eNearestNeighbors.stop();
229
230 precice::profiling::Event eOff2d("solver.kernel.compute2DOffsets");
231
232 // Step 2: Compute the matrix offsets on the device
233 PRECICE_DEBUG("Computing matrix offsets");
234 // We use a parallel scan for that
235 _kernelOffsets = MatrixOffsetView<>("kernelOffsets", _nCluster + 1);
236 Kokkos::deep_copy(_kernelOffsets, 0);
238
240 _evaluationOffsets = MatrixOffsetView<>("evaluationOffsets", _nCluster + 1);
241 Kokkos::deep_copy(_evaluationOffsets, 0);
243 }
244 Kokkos::fence();
245 eOff2d.stop();
246 precice::profiling::Event eMesh("solver.copyMeshes");
247 // Step 3: Handle the mesh data structure and copy over to the device
248 PRECICE_DEBUG("Computing mesh data on the device");
249
250 _inMesh = MeshView<>("inMesh", inMesh->nVertices(), _dim);
251 _outMesh = MeshView<>("outMesh", outMesh->nVertices(), _dim);
252
253 auto hostInMesh = Kokkos::create_mirror_view(_inMesh);
254 auto hostOutMesh = Kokkos::create_mirror_view(_outMesh);
255
256 for (int i = 0; i < inMesh->nVertices(); ++i) {
257 const auto &v = inMesh->vertex(i);
258 for (int d = 0; d < _dim; ++d) {
259 hostInMesh(i, d) = v.rawCoords()[d];
260 }
261 }
262 for (int i = 0; i < outMesh->nVertices(); ++i) {
263 const auto &v = outMesh->vertex(i);
264 for (int d = 0; d < _dim; ++d) {
265 hostOutMesh(i, d) = v.rawCoords()[d];
266 }
267 }
268 // Copy to device
269 Kokkos::deep_copy(_inMesh, hostInMesh);
270 Kokkos::deep_copy(_outMesh, hostOutMesh);
271 Kokkos::fence();
272 eMesh.stop();
273 {
274 PRECICE_DEBUG("Computing PU-RBF weights");
275 precice::profiling::Event eWeights("solver.kernel.computeWeights");
276
277 // Step 4: Compute the weights for each vertex
278 // we first need to transfer the center coordinates and the meshes onto the device
279 MeshView<> centerMesh("centerMesh", _nCluster, _dim);
280 auto hostCenterMesh = Kokkos::create_mirror_view(centerMesh);
281 for (int i = 0; i < _nCluster; ++i) {
282 const auto &v = centers[i];
283 for (int d = 0; d < _dim; ++d) {
284 hostCenterMesh(i, d) = v.rawCoords()[d];
285 }
286 }
287 Kokkos::deep_copy(centerMesh, hostCenterMesh);
288
289 _normalizedWeights = VectorView<>("normalizedWeights", globalOutIDs.size());
290 CompactPolynomialC2 weightingFunction(clusterRadius);
291 // Computing the weights parallelizes over the number of output mesh vertices
292 int avgOutClusterSize = hostOut(_nCluster /* = hostOut.extent(0) - 1 */) / _nCluster;
293 bool success = kernel::compute_weights(_nCluster, avgOutClusterSize, globalOutIDs.size(), outMesh->nVertices(), _dim, _outOffsets,
294 centerMesh, _globalOutIDs, _outMesh, weightingFunction, _normalizedWeights);
295 PRECICE_CHECK(success, "Clustering resulted in unassigned vertices for the output mesh \"{}\".", outMesh->getName());
296 Kokkos::fence();
297 }
298
301 PRECICE_DEBUG("Computing polynomial QR");
302 precice::profiling::Event ePoly("solver.kernel.computePolynomialQR");
303 _qrMatrix = VectorView<>("qrMatrix", globalInIDs.size() * (_dim + 1)); // = nCluster x verticesPerCluster_i x polyParams
304 _qrTau = VectorView<>("qrTau", _nCluster * (_dim + 1)); // = nCluster x polyParams
305 _qrP = PivotView<>("qrP", _nCluster * (_dim + 2)); // = nCluster x (polyParams + rank)
307 Kokkos::fence();
308 }
309 precice::profiling::Event eMatr("solver.kernel.assembleInputMatrices");
310 // Step 6: Launch the parallel kernel to assemble the kernel matrices
311 PRECICE_DEBUG("Assemble batched matrices");
312 // The kernel matrices /////////////
313 offset_2d_type unrolledSize = 0;
314 auto last_elem_view = Kokkos::subview(_kernelOffsets, _nCluster);
315 Kokkos::deep_copy(unrolledSize, last_elem_view);
316 _kernelMatrices = VectorView<>("kernelMatrices", unrolledSize);
317
320
321 Kokkos::fence();
322 eMatr.stop();
323
325 // The eval matrices ///////////////
326 precice::profiling::Event eMatrOut("solver.kernel.assembleOutputMatrices");
327 offset_2d_type evalSize = 0;
328 auto last_elem_view2 = Kokkos::subview(_evaluationOffsets, _nCluster);
329 Kokkos::deep_copy(evalSize, last_elem_view2);
330 _evalMatrices = VectorView<>("evalMatrices", evalSize);
331
334 Kokkos::fence();
335 }
336
337 precice::profiling::Event eLU("solver.kernel.lu");
338 // Step 7: Compute batched lu
339 PRECICE_DEBUG("Compute batched lu");
341 Kokkos::fence();
342 eLU.stop();
343 precice::profiling::Event eAllo("solver.allocateData");
344 // Step 8: Allocate memory for data transfer
345 PRECICE_DEBUG("Allocate data containers for data transfer");
346
347 _inData = VectorView<>("inData", inMesh->nVertices());
348 _outData = VectorView<>("outData", outMesh->nVertices());
349 Kokkos::fence();
350}
351
352template <typename RADIAL_BASIS_FUNCTION_T>
353void BatchedRBFSolver<RADIAL_BASIS_FUNCTION_T>::solveConsistent(const time::Sample &globalIn, Eigen::VectorXd &globalOut)
354{
355 auto solve_component =
356 [&](const double *inPtr, Eigen::Index inSize, double *outPtr, Eigen::Index outSize) {
357 // Step 1: Wrap memory into an unmanaged view
358 Kokkos::View<const double *, Kokkos::HostSpace, UnmanagedMemory>
359 inView(inPtr, inSize);
360
361 // Step 2: Copy over
362 precice::profiling::Event e1("solver.copyHostToDevice");
363 Kokkos::deep_copy(_inData, inView);
364 Kokkos::deep_copy(_outData, 0.0); // Reset output data
365
366 Kokkos::fence();
367 e1.stop();
368
369 // Step 3: Launch the kernel
370 precice::profiling::Event e2("solver.kernel.batchedSolve");
376
377 Kokkos::fence();
378 e2.stop();
379
380 // Step 4: Copy back
381 precice::profiling::Event e3("solver.copyDeviceToHost");
382 Kokkos::View<double *, Kokkos::HostSpace, UnmanagedMemory>
383 outView(outPtr, outSize);
384 Kokkos::deep_copy(outView, _outData);
385 Kokkos::fence();
386 e3.stop();
387 };
388
389 const int nComponents = globalIn.dataDims;
390
391 // If we have just one component, we can directly copy the data over and solve
392 if (nComponents == 1) {
393 solve_component(globalIn.values.data(), globalIn.values.size(), globalOut.data(), globalOut.size());
394 } else {
395 // Otherwise, we map the data to a component-wise matrix
396 Eigen::Map<const Eigen::MatrixXd> inMatrix(globalIn.values.data(), nComponents, globalIn.values.size() / nComponents);
397 Eigen::Map<Eigen::MatrixXd> outMatrix(globalOut.data(), nComponents, globalOut.size() / nComponents);
398
399 // ... and solve component-wise. This requires each component to be contiguous in memory
400 Eigen::VectorXd tmpIn(inMatrix.cols());
401 Eigen::VectorXd tmpOut(outMatrix.cols());
402 for (int c = 0; c < nComponents; ++c) {
403 tmpIn = inMatrix.row(c);
404 solve_component(tmpIn.data(), tmpIn.size(), tmpOut.data(), tmpOut.size());
405 outMatrix.row(c) = tmpOut;
406 }
407 }
408}
409
410// Forwarding dispatcher for the actual implementation to help with the template parameters:
411template <typename RADIAL_BASIS_FUNCTION_T>
412template <typename... Args>
413void BatchedRBFSolver<RADIAL_BASIS_FUNCTION_T>::_dispatch_solve_kernel(bool polynomial, bool evaluation_op_available,
414 Args &&...args)
415{
416 if (polynomial) {
417 if (evaluation_op_available)
418 kernel::do_batched_solve<true, true>(std::forward<Args>(args)...);
419 else
420 kernel::do_batched_solve<true, false>(std::forward<Args>(args)...);
421 } else {
422 if (evaluation_op_available)
423 kernel::do_batched_solve<false, true>(std::forward<Args>(args)...);
424 else
425 kernel::do_batched_solve<false, false>(std::forward<Args>(args)...);
426 }
427}
428} // namespace precice::mapping
429
430#else
431
434namespace precice::mapping {
435
436template <typename RADIAL_BASIS_FUNCTION_T>
437class BatchedRBFSolver {
438public:
439 BatchedRBFSolver(RADIAL_BASIS_FUNCTION_T,
442 const std::vector<mesh::Vertex> &,
443 double,
444 Polynomial,
445 bool,
446 MappingConfiguration::GinkgoParameter) {}
447
448 void solveConsistent(const time::Sample &, Eigen::VectorXd &) {}
449};
450} // namespace precice::mapping
451#endif // PRECICE_NO_KOKKOS_KERNELS
#define PRECICE_DEBUG(...)
Definition LogMacros.hpp:61
#define PRECICE_TRACE(...)
Definition LogMacros.hpp:92
#define PRECICE_CHECK(check,...)
Definition LogMacros.hpp:32
#define PRECICE_ASSERT(...)
Definition assertion.hpp:85
int getDimensions() const
Definition Mesh.cpp:99
VertexContainer & vertices()
Returns modifieable container holding all vertices.
Definition Mesh.cpp:54
const std::string & getName() const
Returns the name of the mesh, as set in the config file.
Definition Mesh.cpp:242
std::size_t nVertices() const
Returns the number of vertices.
Definition Mesh.cpp:64
Vertex & vertex(VertexID id)
Mutable access to a vertex by VertexID.
Definition Mesh.cpp:42
const query::Index & index() const
Call preprocess() before index() to ensure correct projection handling.
Definition Mesh.hpp:329
static void initialize(int *argc, char ***argv)
Definition Device.cpp:12
This class provides a lightweight logger.
Definition Logger.hpp:17
void _dispatch_solve_kernel(bool polynomial, bool evaluation_op_available, Args &&...args)
void solveConsistent(const time::Sample &globalIn, Eigen::VectorXd &globalOut)
BatchedRBFSolver(RBF_T basisFunction, mesh::PtrMesh inMesh, mesh::PtrMesh outMesh, const std::vector< mesh::Vertex > &centers, double clusterRadius, Polynomial polynomial, bool computeEvaluationOffline, MappingConfiguration::GinkgoParameter ginkgoParameter)
Wendland radial basis function with compact support.
void stop()
Stops a running event.
Definition Event.cpp:51
void do_batched_qr(int nCluster, int dim, int avgClusterSize, int maxClusterSize, VectorOffsetView< MemorySpace > inOffsets, GlobalIDView< MemorySpace > globalInIDs, MeshView< MemorySpace > inMesh, VectorView< MemorySpace > qrMatrix, VectorView< MemorySpace > qrTau, PivotView< MemorySpace > qrP)
void do_batched_assembly(int nCluster, int dim, int avgClusterSize, EvalFunctionType f, const VectorOffsetView< MemorySpace > &inOffsets, const GlobalIDView< MemorySpace > &globalInIDs, const MeshView< MemorySpace > &inCoords, const VectorOffsetView< MemorySpace > &targetOffsets, const GlobalIDView< MemorySpace > &globalTargetIDs, const MeshView< MemorySpace > &targetCoords, const MatrixOffsetView< MemorySpace > &matrixOffsets, VectorView< MemorySpace > matrices)
bool compute_weights(const int nCluster, const int avgOutClusterSize, const offset_1d_type nWeights, const int nMeshVertices, const int dim, VectorOffsetView< MemorySpace > offsets, MeshView< MemorySpace > centers, GlobalIDView< MemorySpace > globalIDs, MeshView< MemorySpace > mesh, const CompactPolynomialC2 &w, VectorView< MemorySpace > normalizedWeights)
void do_input_assembly(int nCluster, int dim, int avgClusterSize, int maxInClusterSize, EvalFunctionType f, const VectorOffsetView< MemorySpace > &inOffsets, const GlobalIDView< MemorySpace > &globalInIDs, const MeshView< MemorySpace > &inCoords, const MatrixOffsetView< MemorySpace > &matrixOffsets, VectorView< MemorySpace > matrices)
void do_batched_solve(int nCluster, int dim, int avgInClusterSize, int maxInClusterSize, int maxOutClusterSize, EvalFunctionType f, const VectorOffsetView< MemorySpace > &rhsOffsets, const GlobalIDView< MemorySpace > &globalRhsIDs, VectorView< MemorySpace > rhs, const MatrixOffsetView< MemorySpace > &matrixOffsets, const VectorView< MemorySpace > &matrices, const VectorView< MemorySpace > &normalizedWeights, const MatrixOffsetView< MemorySpace > &evalOffsets, const VectorView< MemorySpace > &evalMat, const VectorOffsetView< MemorySpace > &outOffsets, const GlobalIDView< MemorySpace > &globalOutIDs, VectorView< MemorySpace > out, const MeshView< MemorySpace > &inMesh, const MeshView< MemorySpace > &outMesh, const VectorView< MemorySpace > &qrMatrix, const VectorView< MemorySpace > &qrTau, const PivotView< MemorySpace > &qrP)
void compute_offsets(const VectorOffsetView< MemorySpace > src1, const VectorOffsetView< MemorySpace > src2, MatrixOffsetView< MemorySpace > dst, int nCluster)
void do_batched_lu(int nCluster, int avgClusterSize, const MatrixOffsetView< MemorySpace > &matrixOffsets, VectorView< MemorySpace > matrices)
contains data mapping from points to meshes.
ExecutionSpace::size_type offset_2d_type
Kokkos::View< int *, MemorySpace > PivotView
Kokkos::View< offset_2d_type *, MemorySpace > MatrixOffsetView
Kokkos::View< offset_1d_type *, MemorySpace > VectorOffsetView
Kokkos::View< VertexID *, MemorySpace > GlobalIDView
ExecutionSpace::size_type offset_1d_type
Kokkos::View< double *, MemorySpace > VectorView
Polynomial
How to handle the polynomial?
Kokkos::View< double **, Kokkos::LayoutRight, MemorySpace > MeshView
constexpr double NUMERICAL_ZERO_DIFFERENCE
std::shared_ptr< Mesh > PtrMesh
Wrapper struct that is used to transfer RBF-specific parameters to the GPU.
int dataDims
The dimensionality of the data.
Definition Sample.hpp:60
Eigen::VectorXd values
Definition Sample.hpp:64