fvbaselinearizer.hh
Go to the documentation of this file.
1// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2// vi: set et ts=4 sw=4 sts=4:
3/*
4 This file is part of the Open Porous Media project (OPM).
5
6 OPM is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 2 of the License, or
9 (at your option) any later version.
10
11 OPM is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with OPM. If not, see <http://www.gnu.org/licenses/>.
18
19 Consult the COPYING file in the top-level source directory of this
20 module for the precise wording of the license and the list of
21 copyright holders.
22*/
28#ifndef EWOMS_FV_BASE_LINEARIZER_HH
29#define EWOMS_FV_BASE_LINEARIZER_HH
30
31#include <dune/common/fmatrix.hh>
32#include <dune/common/fvector.hh>
33#include <dune/common/version.hh>
34
35#include <dune/grid/common/gridenums.hh>
36
37#include <opm/common/Exceptions.hpp>
38#include <opm/common/TimingMacros.hpp>
39
40#include <opm/grid/utility/SparseTable.hpp>
41
42#include <opm/material/common/MathToolbox.hpp>
43
47
51
52#include <cstddef>
53#include <exception> // current_exception, rethrow_exception
54#include <iostream>
55#include <map>
56#include <memory>
57#include <mutex>
58#include <set>
59#include <vector>
60
61namespace Opm {
62
63// forward declarations
64template<class TypeTag>
65class EcfvDiscretization;
66
76template<class TypeTag>
78{
90
98
100
101 using Toolbox = MathToolbox<Evaluation>;
102
103 using Element = typename GridView::template Codim<0>::Entity;
104 using ElementIterator = typename GridView::template Codim<0>::Iterator;
105
106 using Vector = GlobalEqVector;
107
108 using IstlMatrix = typename SparseMatrixAdapter::IstlMatrix;
109
110 enum { numEq = getPropValue<TypeTag, Properties::NumEq>() };
111 enum { historySize = getPropValue<TypeTag, Properties::TimeDiscHistorySize>() };
112
113 using MatrixBlock = typename SparseMatrixAdapter::MatrixBlock;
114 using VectorBlock = Dune::FieldVector<Scalar, numEq>;
115
116 static constexpr bool linearizeNonLocalElements = getPropValue<TypeTag, Properties::LinearizeNonLocalElements>();
117
119
120public:
121 FvBaseLinearizer() = default;
122
123 // copying the linearizer is not a good idea
125
129 static void registerParameters()
130 {}
131
141 void init(Simulator& simulator)
142 {
143 simulatorPtr_ = &simulator;
144 eraseMatrix();
145 elementCtx_.clear();
146 fullDomain_ = std::make_unique<FullDomain>(simulator.gridView());
147 }
148
157 {
158 jacobian_.reset();
159 }
160
171 {
174 }
175
187 {
188 linearizeDomain(*fullDomain_);
189 }
190
191 template <class SubDomainType>
192 void linearizeDomain(const SubDomainType& domain, bool isNlddLocalSolve = false)
193 {
194 OPM_TIMEBLOCK(linearizeDomain);
195 // we defer the initialization of the Jacobian matrix until here because the
196 // auxiliary modules usually assume the problem, model and grid to be fully
197 // initialized...
198 if (!jacobian_) {
199 initFirstIteration_();
200 }
201
202 // Called here because it is no longer called from linearize_().
203 if (isNlddLocalSolve) {
204 resetSystem_(domain);
205 }
206 else {
207 resetSystem_();
208 }
209
210 int succeeded;
211 try {
212 linearize_(domain);
213 succeeded = 1;
214 }
215 catch (const std::exception& e)
216 {
217 std::cout << "rank " << simulator_().gridView().comm().rank()
218 << " caught an exception while linearizing:" << e.what()
219 << "\n" << std::flush;
220 succeeded = 0;
221 }
222 catch (...)
223 {
224 std::cout << "rank " << simulator_().gridView().comm().rank()
225 << " caught an exception while linearizing"
226 << "\n" << std::flush;
227 succeeded = 0;
228 }
229 succeeded = simulator_().gridView().comm().min(succeeded);
230
231 if (!succeeded) {
232 throw NumericalProblem("A process did not succeed in linearizing the system");
233 }
234 }
235
236 void finalize()
237 { jacobian_->finalize(); }
238
244 {
245 OPM_TIMEBLOCK(linearizeAuxiliaryEquations);
246 // flush possible local caches into matrix structure
247 jacobian_->commit();
248
249 auto& model = model_();
250 const auto& comm = simulator_().gridView().comm();
251 for (unsigned auxModIdx = 0; auxModIdx < model.numAuxiliaryModules(); ++auxModIdx) {
252 bool succeeded = true;
253 try {
254 model.auxiliaryModule(auxModIdx)->linearize(*jacobian_, residual_);
255 }
256 catch (const std::exception& e) {
257 succeeded = false;
258
259 std::cout << "rank " << simulator_().gridView().comm().rank()
260 << " caught an exception while linearizing:" << e.what()
261 << "\n" << std::flush;
262 }
263
264 succeeded = comm.min(succeeded);
265
266 if (!succeeded) {
267 throw NumericalProblem("linearization of an auxiliary equation failed");
268 }
269 }
270 }
271
275 const SparseMatrixAdapter& jacobian() const
276 { return *jacobian_; }
277
278 SparseMatrixAdapter& jacobian()
279 { return *jacobian_; }
280
284 const GlobalEqVector& residual() const
285 { return residual_; }
286
287 GlobalEqVector& residual()
288 { return residual_; }
289
291 { linearizationType_ = linearizationType; }
292
294 { return linearizationType_; }
295
297 {
298 // This linearizer stores no such parameters.
299 }
300
302 {
303 // This linearizer stores no such data.
304 }
305
307 {
308 // This linearizer stores no such data.
309 }
310
316 const std::map<unsigned, Constraints>& constraintsMap() const
317 { return constraintsMap_; }
318
324 const auto& getFlowsInfo() const
325 { return flowsInfo_; }
326
332 const auto& getFloresInfo() const
333 { return floresInfo_; }
334
335 template <class SubDomainType>
336 void resetSystem_(const SubDomainType& domain)
337 {
338 if (!jacobian_) {
339 initFirstIteration_();
340 }
341
342 // loop over selected elements
343 using GridViewType = decltype(domain.view);
344 ThreadedEntityIterator<GridViewType, /*codim=*/0> threadedElemIt(domain.view);
345#ifdef _OPENMP
346#pragma omp parallel
347#endif
348 {
349 const unsigned threadId = ThreadManager::threadId();
350 auto elemIt = threadedElemIt.beginParallel();
351 MatrixBlock zeroBlock;
352 zeroBlock = 0.0;
353 for (; !threadedElemIt.isFinished(elemIt); elemIt = threadedElemIt.increment()) {
354 const Element& elem = *elemIt;
355 ElementContext& elemCtx = *elementCtx_[threadId];
356 elemCtx.updatePrimaryStencil(elem);
357 // Set to zero the relevant residual and jacobian parts.
358 for (unsigned primaryDofIdx = 0;
359 primaryDofIdx < elemCtx.numPrimaryDof(/*timeIdx=*/0);
360 ++primaryDofIdx)
361 {
362 const unsigned globI = elemCtx.globalSpaceIndex(primaryDofIdx, /*timeIdx=*/0);
363 residual_[globI] = 0.0;
364 jacobian_->clearRow(globI, 0.0);
365 }
366 }
367 }
368 }
369
370private:
371 Simulator& simulator_()
372 { return *simulatorPtr_; }
373
374 const Simulator& simulator_() const
375 { return *simulatorPtr_; }
376
377 Problem& problem_()
378 { return simulator_().problem(); }
379
380 const Problem& problem_() const
381 { return simulator_().problem(); }
382
383 Model& model_()
384 { return simulator_().model(); }
385
386 const Model& model_() const
387 { return simulator_().model(); }
388
389 const GridView& gridView_() const
390 { return problem_().gridView(); }
391
392 const ElementMapper& elementMapper_() const
393 { return model_().elementMapper(); }
394
395 const DofMapper& dofMapper_() const
396 { return model_().dofMapper(); }
397
398 void initFirstIteration_()
399 {
400 // initialize the BCRS matrix for the Jacobian of the residual function
401 createMatrix_();
402
403 // initialize the Jacobian matrix and the vector for the residual function
404 residual_.resize(model_().numTotalDof());
405 resetSystem_();
406
407 // create the per-thread context objects
408 elementCtx_.clear();
409 elementCtx_.reserve(ThreadManager::maxThreads());
410 for (unsigned threadId = 0; threadId != ThreadManager::maxThreads(); ++ threadId) {
411 elementCtx_.push_back(std::make_unique<ElementContext>(simulator_()));
412 }
413 }
414
415 // Construct the BCRS matrix for the Jacobian of the residual function
416 void createMatrix_()
417 {
418 const auto& model = model_();
419 Stencil stencil(gridView_(), model_().dofMapper());
420
421 // for the main model, find out the global indices of the neighboring degrees of
422 // freedom of each primary degree of freedom
423 sparsityPattern_.clear();
424 sparsityPattern_.resize(model.numTotalDof());
425
426 for (const auto& elem : elements(gridView_())) {
427 stencil.update(elem);
428
429 for (unsigned primaryDofIdx = 0; primaryDofIdx < stencil.numPrimaryDof(); ++primaryDofIdx) {
430 const unsigned myIdx = stencil.globalSpaceIndex(primaryDofIdx);
431
432 for (unsigned dofIdx = 0; dofIdx < stencil.numDof(); ++dofIdx) {
433 const unsigned neighborIdx = stencil.globalSpaceIndex(dofIdx);
434 sparsityPattern_[myIdx].insert(neighborIdx);
435 }
436 }
437 }
438
439 // add the additional neighbors and degrees of freedom caused by the auxiliary
440 // equations
441 const std::size_t numAuxMod = model.numAuxiliaryModules();
442 for (unsigned auxModIdx = 0; auxModIdx < numAuxMod; ++auxModIdx) {
443 model.auxiliaryModule(auxModIdx)->addNeighbors(sparsityPattern_);
444 }
445
446 // allocate raw matrix
447 jacobian_ = std::make_unique<SparseMatrixAdapter>(simulator_());
448
449 // create matrix structure based on sparsity pattern
450 jacobian_->reserve(sparsityPattern_);
451 }
452
453 // reset the global linear system of equations.
454 void resetSystem_()
455 {
456 residual_ = 0.0;
457 // zero all matrix entries
458 jacobian_->clear();
459 }
460
461 // query the problem for all constraint degrees of freedom. note that this method is
462 // quite involved and is thus relatively slow.
463 void updateConstraintsMap_()
464 {
465 if (!enableConstraints_()) {
466 // constraints are not explictly enabled, so we don't need to consider them!
467 return;
468 }
469
470 constraintsMap_.clear();
471
472 // loop over all elements...
473 ThreadedEntityIterator<GridView, /*codim=*/0> threadedElemIt(gridView_());
474#ifdef _OPENMP
475#pragma omp parallel
476#endif
477 {
478 const unsigned threadId = ThreadManager::threadId();
479 ElementIterator elemIt = threadedElemIt.beginParallel();
480 for (; !threadedElemIt.isFinished(elemIt); elemIt = threadedElemIt.increment()) {
481 // create an element context (the solution-based quantities are not
482 // available here!)
483 const Element& elem = *elemIt;
484 ElementContext& elemCtx = *elementCtx_[threadId];
485 elemCtx.updateStencil(elem);
486
487 // check if the problem wants to constrain any degree of the current
488 // element's freedom. if yes, add the constraint to the map.
489 for (unsigned primaryDofIdx = 0;
490 primaryDofIdx < elemCtx.numPrimaryDof(/*timeIdx=*/0);
491 ++primaryDofIdx)
492 {
493 Constraints constraints;
494 elemCtx.problem().constraints(constraints,
495 elemCtx,
496 primaryDofIdx,
497 /*timeIdx=*/0);
498 if (constraints.isActive()) {
499 const unsigned globI = elemCtx.globalSpaceIndex(primaryDofIdx, /*timeIdx=*/0);
500 constraintsMap_[globI] = constraints;
501 continue;
502 }
503 }
504 }
505 }
506 }
507
508 // linearize the whole or part of the system
509 template <class SubDomainType>
510 void linearize_(const SubDomainType& domain)
511 {
512 OPM_TIMEBLOCK(linearize_);
513
514 // We do not call resetSystem_() here, since that will set
515 // the full system to zero, not just our part.
516 // Instead, that must be called before starting the linearization.
517
518 // before the first iteration of each time step, we need to update the
519 // constraints. (i.e., we assume that constraints can be time dependent, but they
520 // can't depend on the solution.)
521 if (model_().newtonMethod().numIterations() == 0) {
522 updateConstraintsMap_();
523 }
524
525 applyConstraintsToSolution_();
526
527 // to avoid a race condition if two threads handle an exception at the same time,
528 // we use an explicit lock to control access to the exception storage object
529 // amongst thread-local handlers
530 std::mutex exceptionLock;
531
532 // storage to any exception that needs to be bridged out of the
533 // parallel block below. initialized to null to indicate no exception
534 std::exception_ptr exceptionPtr = nullptr;
535
536 // relinearize the elements...
537 using GridViewType = decltype(domain.view);
538 ThreadedEntityIterator<GridViewType, /*codim=*/0> threadedElemIt(domain.view);
539#ifdef _OPENMP
540#pragma omp parallel
541#endif
542 {
543 auto elemIt = threadedElemIt.beginParallel();
544 auto nextElemIt = elemIt;
545 try {
546 for (; !threadedElemIt.isFinished(elemIt); elemIt = nextElemIt) {
547 // give the model and the problem a chance to prefetch the data required
548 // to linearize the next element, but only if we need to consider it
549 nextElemIt = threadedElemIt.increment();
550 if (!threadedElemIt.isFinished(nextElemIt)) {
551 const auto& nextElem = *nextElemIt;
552 if (linearizeNonLocalElements ||
553 nextElem.partitionType() == Dune::InteriorEntity)
554 {
555 model_().prefetch(nextElem);
556 problem_().prefetch(nextElem);
557 }
558 }
559
560 const auto& elem = *elemIt;
561 if (!linearizeNonLocalElements && elem.partitionType() != Dune::InteriorEntity) {
562 continue;
563 }
564
565 linearizeElement_(elem);
566 }
567 }
568 // If an exception occurs in the parallel block, it won't escape the
569 // block; terminate() is called instead of a handler outside! hence, we
570 // tuck any exceptions that occur away in the pointer. If an exception
571 // occurs in more than one thread at the same time, we must pick one of
572 // them to be rethrown as we cannot have two active exceptions at the
573 // same time. This solution essentially picks one at random. This will
574 // only be a problem if two different kinds of exceptions are thrown, for
575 // instance if one thread experiences a (recoverable) numerical issue
576 // while another is out of memory.
577 catch(...) {
578 std::lock_guard<std::mutex> take(exceptionLock);
579 exceptionPtr = std::current_exception();
580 threadedElemIt.setFinished();
581 }
582 } // parallel block
583
584 // after reduction from the parallel block, exceptionPtr will point to
585 // a valid exception if one occurred in one of the threads; rethrow
586 // it here to let the outer handler take care of it properly
587 if (exceptionPtr) {
588 std::rethrow_exception(exceptionPtr);
589 }
590
591 applyConstraintsToLinearization_();
592 }
593
594 // linearize an element in the interior of the process' grid partition
595 template <class ElementType>
596 void linearizeElement_(const ElementType& elem)
597 {
598 const unsigned threadId = ThreadManager::threadId();
599
600 ElementContext& elementCtx = *elementCtx_[threadId];
601 auto& localLinearizer = model_().localLinearizer(threadId);
602
603 // the actual work of linearization is done by the local linearizer class
604 localLinearizer.linearize(elementCtx, elem);
605
606 // update the right hand side and the Jacobian matrix
607 if (getPropValue<TypeTag, Properties::UseLinearizationLock>()) {
608 globalMatrixMutex_.lock();
609 }
610
611 const std::size_t numPrimaryDof = elementCtx.numPrimaryDof(/*timeIdx=*/0);
612 for (unsigned primaryDofIdx = 0; primaryDofIdx < numPrimaryDof; ++primaryDofIdx) {
613 const unsigned globI = elementCtx.globalSpaceIndex(/*spaceIdx=*/primaryDofIdx, /*timeIdx=*/0);
614
615 // update the right hand side
616 residual_[globI] += localLinearizer.residual(primaryDofIdx);
617
618 // update the global Jacobian matrix
619 for (unsigned dofIdx = 0; dofIdx < elementCtx.numDof(/*timeIdx=*/0); ++dofIdx) {
620 const unsigned globJ = elementCtx.globalSpaceIndex(/*spaceIdx=*/dofIdx, /*timeIdx=*/0);
621
622 jacobian_->addToBlock(globJ, globI, localLinearizer.jacobian(dofIdx, primaryDofIdx));
623 }
624 }
625
626 if (getPropValue<TypeTag, Properties::UseLinearizationLock>()) {
627 globalMatrixMutex_.unlock();
628 }
629 }
630
631 // apply the constraints to the solution. (i.e., the solution of constraint degrees
632 // of freedom is set to the value of the constraint.)
633 void applyConstraintsToSolution_()
634 {
635 if (!enableConstraints_()) {
636 return;
637 }
638
639 // TODO: assuming a history size of 2 only works for Euler time discretizations!
640 auto& sol = model_().solution(/*timeIdx=*/0);
641 auto& oldSol = model_().solution(/*timeIdx=*/1);
642
643 for (const auto& constraint : constraintsMap_) {
644 sol[constraint.first] = constraint.second;
645 oldSol[constraint.first] = constraint.second;
646 }
647 }
648
649 // apply the constraints to the linearization. (i.e., for constrain degrees of
650 // freedom the Jacobian matrix maps to identity and the residual is zero)
651 void applyConstraintsToLinearization_()
652 {
653 if (!enableConstraints_()) {
654 return;
655 }
656
657 for (const auto& constraint : constraintsMap_) {
658 // reset the column of the Jacobian matrix
659 // put an identity matrix on the main diagonal of the Jacobian
660 jacobian_->clearRow(constraint.first, Scalar(1.0));
661
662 // make the right-hand side of constraint DOFs zero
663 residual_[constraint.first] = 0.0;
664 }
665 }
666
667 static bool enableConstraints_()
668 { return getPropValue<TypeTag, Properties::EnableConstraints>(); }
669
670 Simulator* simulatorPtr_{};
671 std::vector<std::unique_ptr<ElementContext>> elementCtx_;
672
673 // The constraint equations (only non-empty if the
674 // EnableConstraints property is true)
675 std::map<unsigned, Constraints> constraintsMap_;
676
677 struct FlowInfo
678 {
679 int faceId;
680 VectorBlock flow;
681 unsigned int nncId;
682 };
683 SparseTable<FlowInfo> flowsInfo_;
684 SparseTable<FlowInfo> floresInfo_;
685
686 // the jacobian matrix
687 std::unique_ptr<SparseMatrixAdapter> jacobian_;
688
689 // the right-hand side
690 GlobalEqVector residual_;
691
692 LinearizationType linearizationType_;
693
694 std::mutex globalMatrixMutex_;
695
696 std::vector<std::set<unsigned int>> sparsityPattern_;
697
698 struct FullDomain
699 {
700 explicit FullDomain(const GridView& v) : view (v) {}
701 GridView view;
702 std::vector<bool> interior; // Should remain empty.
703 };
704 // Simple domain object used for full-domain linearization, it allows
705 // us to have the same interface for sub-domain and full-domain work.
706 // Pointer since it must defer construction, due to GridView member.
707 std::unique_ptr<FullDomain> fullDomain_;
708};
709
710} // namespace Opm
711
712#endif
The common code for the linearizers of non-linear systems of equations.
Definition: fvbaselinearizer.hh:78
const auto & getFlowsInfo() const
Return constant reference to the flowsInfo.
Definition: fvbaselinearizer.hh:324
const GlobalEqVector & residual() const
Return constant reference to global residual vector.
Definition: fvbaselinearizer.hh:284
void setLinearizationType(LinearizationType linearizationType)
Definition: fvbaselinearizer.hh:290
SparseMatrixAdapter & jacobian()
Definition: fvbaselinearizer.hh:278
FvBaseLinearizer()=default
static void registerParameters()
Register all run-time parameters for the Jacobian linearizer.
Definition: fvbaselinearizer.hh:129
void linearize()
Linearize the full system of non-linear equations.
Definition: fvbaselinearizer.hh:170
void finalize()
Definition: fvbaselinearizer.hh:236
void init(Simulator &simulator)
Initialize the linearizer.
Definition: fvbaselinearizer.hh:141
FvBaseLinearizer(const FvBaseLinearizer &)=delete
void updateBoundaryConditionData()
Definition: fvbaselinearizer.hh:301
void linearizeDomain()
Linearize the part of the non-linear system of equations that is associated with the spatial domain.
Definition: fvbaselinearizer.hh:186
void resetSystem_(const SubDomainType &domain)
Definition: fvbaselinearizer.hh:336
GlobalEqVector & residual()
Definition: fvbaselinearizer.hh:287
const SparseMatrixAdapter & jacobian() const
Return constant reference to global Jacobian matrix backend.
Definition: fvbaselinearizer.hh:275
const auto & getFloresInfo() const
Return constant reference to the floresInfo.
Definition: fvbaselinearizer.hh:332
void linearizeAuxiliaryEquations()
Linearize the part of the non-linear system of equations that is associated with the spatial domain.
Definition: fvbaselinearizer.hh:243
void updateDiscretizationParameters()
Definition: fvbaselinearizer.hh:296
const LinearizationType & getLinearizationType() const
Definition: fvbaselinearizer.hh:293
void updateFlowsInfo()
Definition: fvbaselinearizer.hh:306
void linearizeDomain(const SubDomainType &domain, bool isNlddLocalSolve=false)
Definition: fvbaselinearizer.hh:192
const std::map< unsigned, Constraints > & constraintsMap() const
Returns the map of constraint degrees of freedom.
Definition: fvbaselinearizer.hh:316
void eraseMatrix()
Causes the Jacobian matrix to be recreated from scratch before the next iteration.
Definition: fvbaselinearizer.hh:156
Definition: matrixblock.hh:229
Manages the initializing and running of time dependent problems.
Definition: simulator.hh:84
Problem & problem()
Return the object which specifies the pysical setup of the simulation.
Definition: simulator.hh:265
const GridView & gridView() const
Return the grid view for which the simulation is done.
Definition: simulator.hh:246
Model & model()
Return the physical model used in the simulation.
Definition: simulator.hh:252
Simplifies multi-threaded capabilities.
Definition: threadmanager.hpp:36
static unsigned maxThreads()
Return the maximum number of threads of the current process.
Definition: threadmanager.hpp:66
static unsigned threadId()
Return the index of the current OpenMP thread.
Provides an STL-iterator like interface to iterate over the enties of a GridView in OpenMP threaded a...
Definition: threadedentityiterator.hh:42
bool isFinished(const EntityIterator &it) const
Definition: threadedentityiterator.hh:67
EntityIterator increment()
Definition: threadedentityiterator.hh:80
EntityIterator beginParallel()
Definition: threadedentityiterator.hh:54
Declare the properties used by the infrastructure code of the finite volume discretizations.
Provides data handles for parallel communication which operate on DOFs.
Definition: blackoilbioeffectsmodules.hh:43
ElementType
The types of reference elements available.
Definition: vcfvstencil.hh:56
typename Properties::Detail::GetPropImpl< TypeTag, Property >::type::type GetPropType
get the type alias defined in the property (equivalent to old macro GET_PROP_TYPE(....
Definition: propertysystem.hh:233
Definition: linearizationtype.hh:34