EclGenericWriter_impl.hpp
Go to the documentation of this file.
1// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2// vi: set et ts=4 sw=4 sts=4:
3/*
4 This file is part of the Open Porous Media project (OPM).
5
6 OPM is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 2 of the License, or
9 (at your option) any later version.
10
11 OPM is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with OPM. If not, see <http://www.gnu.org/licenses/>.
18
19 Consult the COPYING file in the top-level source directory of this
20 module for the precise wording of the license and the list of
21 copyright holders.
22*/
23#ifndef OPM_ECL_GENERIC_WRITER_IMPL_HPP
24#define OPM_ECL_GENERIC_WRITER_IMPL_HPP
25
26#include <dune/grid/common/mcmgmapper.hh>
27
28#include <opm/grid/cpgrid/LgrOutputHelpers.hpp>
29#include <opm/grid/GridHelpers.hpp>
30#include <opm/grid/utility/cartesianToCompressed.hpp>
31
32#include <opm/input/eclipse/EclipseState/EclipseState.hpp>
33#include <opm/input/eclipse/EclipseState/Grid/RegionSetMatcher.hpp>
34#include <opm/input/eclipse/EclipseState/SummaryConfig/SummaryConfig.hpp>
35
36#include <opm/input/eclipse/Schedule/Action/State.hpp>
37#include <opm/input/eclipse/Schedule/RPTConfig.hpp>
38#include <opm/input/eclipse/Schedule/Schedule.hpp>
39#include <opm/input/eclipse/Schedule/SummaryState.hpp>
40#include <opm/input/eclipse/Schedule/UDQ/UDQConfig.hpp>
41#include <opm/input/eclipse/Schedule/UDQ/UDQState.hpp>
42#include <opm/input/eclipse/Schedule/Well/WellConnections.hpp>
43#include <opm/input/eclipse/Schedule/Well/WellMatcher.hpp>
44
45#include <opm/input/eclipse/Units/UnitSystem.hpp>
46
47#include <opm/output/eclipse/EclipseIO.hpp>
48#include <opm/output/eclipse/RestartValue.hpp>
49#include <opm/output/eclipse/Summary.hpp>
50
52
53#if HAVE_MPI
55#endif
56
57#if HAVE_MPI
58#include <mpi.h>
59#endif
60
61#include <algorithm>
62#include <array>
63#include <cassert>
64#include <cmath>
65#include <functional>
66#include <map>
67#include <memory>
68#include <string>
69#include <unordered_map>
70#include <utility>
71#include <vector>
73namespace {
74
89bool directVerticalNeighbors(const std::array<int, 3>& cartDims,
90 const std::unordered_map<int,int>& cartesianToActive,
91 int smallGlobalIndex, int largeGlobalIndex)
92{
93 assert(smallGlobalIndex <= largeGlobalIndex);
94 std::array<int, 3> ijk1, ijk2;
95 auto globalToIjk = [cartDims](int gc) {
96 std::array<int, 3> ijk;
97 ijk[0] = gc % cartDims[0];
98 gc /= cartDims[0];
99 ijk[1] = gc % cartDims[1];
100 ijk[2] = gc / cartDims[1];
101 return ijk;
102 };
103 ijk1 = globalToIjk(smallGlobalIndex);
104 ijk2 = globalToIjk(largeGlobalIndex);
105 assert(ijk2[2]>=ijk1[2]);
106
107 if ( ijk1[0] == ijk2[0] && ijk1[1] == ijk2[1] && (ijk2[2] - ijk1[2]) > 1)
108 {
109 assert((largeGlobalIndex-smallGlobalIndex)%(cartDims[0]*cartDims[1])==0);
110 for ( int gi = smallGlobalIndex + cartDims[0] * cartDims[1]; gi < largeGlobalIndex;
111 gi += cartDims[0] * cartDims[1] )
112 {
113 if ( cartesianToActive.find( gi ) != cartesianToActive.end() )
115 return false;
116 }
118 return true;
119 } else
120 return false;
121}
122
123std::unordered_map<std::string, Opm::data::InterRegFlowMap>
124getInterRegFlowsAsMap(const Opm::InterRegFlowMap& map)
125{
126 auto maps = std::unordered_map<std::string, Opm::data::InterRegFlowMap>{};
127
128 const auto& regionNames = map.names();
129 auto flows = map.getInterRegFlows();
130 const auto nmap = regionNames.size();
131
132 maps.reserve(nmap);
133 for (auto mapID = 0*nmap; mapID < nmap; ++mapID) {
134 maps.emplace(regionNames[mapID], std::move(flows[mapID]));
135 }
136
137 return maps;
138}
139
140struct EclWriteTasklet : public Opm::TaskletInterface
141{
142 Opm::Action::State actionState_;
143 Opm::WellTestState wtestState_;
144 Opm::SummaryState summaryState_;
145 Opm::UDQState udqState_;
146 Opm::EclipseIO& eclIO_;
147 int reportStepNum_;
148 std::optional<int> timeStepNum_;
149 bool isSubStep_;
150 double secondsElapsed_;
151 std::vector<Opm::RestartValue> restartValue_;
152 bool writeDoublePrecision_;
153
154 explicit EclWriteTasklet(const Opm::Action::State& actionState,
155 const Opm::WellTestState& wtestState,
156 const Opm::SummaryState& summaryState,
157 const Opm::UDQState& udqState,
158 Opm::EclipseIO& eclIO,
159 int reportStepNum,
160 std::optional<int> timeStepNum,
161 bool isSubStep,
162 double secondsElapsed,
163 std::vector<Opm::RestartValue> restartValue,
164 bool writeDoublePrecision)
165 : actionState_(actionState)
166 , wtestState_(wtestState)
167 , summaryState_(summaryState)
168 , udqState_(udqState)
169 , eclIO_(eclIO)
170 , reportStepNum_(reportStepNum)
171 , timeStepNum_(timeStepNum)
172 , isSubStep_(isSubStep)
173 , secondsElapsed_(secondsElapsed)
174 , restartValue_(std::move(restartValue))
175 , writeDoublePrecision_(writeDoublePrecision)
176 {}
177
178 // callback to eclIO serial writeTimeStep method
179 void run() override
180 {
181 if (this->restartValue_.size() == 1) {
182 this->eclIO_.writeTimeStep(this->actionState_,
183 this->wtestState_,
184 this->summaryState_,
185 this->udqState_,
186 this->reportStepNum_,
187 this->isSubStep_,
188 this->secondsElapsed_,
189 std::move(this->restartValue_.back()),
190 this->writeDoublePrecision_,
191 this->timeStepNum_);
192 }
193 else{
194 this->eclIO_.writeTimeStep(this->actionState_,
195 this->wtestState_,
196 this->summaryState_,
197 this->udqState_,
198 this->reportStepNum_,
199 this->isSubStep_,
200 this->secondsElapsed_,
201 std::move(this->restartValue_),
202 this->writeDoublePrecision_,
203 this->timeStepNum_);
204 }
205 }
206};
207
208}
209
210namespace Opm {
211
212template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
214EclGenericWriter(const Schedule& schedule,
215 const EclipseState& eclState,
216 const SummaryConfig& summaryConfig,
217 const Grid& grid,
218 const EquilGrid* equilGrid,
219 const GridView& gridView,
220 const Dune::CartesianIndexMapper<Grid>& cartMapper,
221 const Dune::CartesianIndexMapper<EquilGrid>* equilCartMapper,
222 bool enableAsyncOutput,
223 bool enableEsmry )
224 : collectOnIORank_(grid,
225 equilGrid,
226 gridView,
227 cartMapper,
228 equilCartMapper,
229 summaryConfig.fip_regions_interreg_flow())
230 , grid_ (grid)
231 , gridView_ (gridView)
232 , schedule_ (schedule)
233 , eclState_ (eclState)
234 , cartMapper_ (cartMapper)
235 , equilCartMapper_(equilCartMapper)
236 , equilGrid_ (equilGrid)
237{
238 if (this->collectOnIORank_.isIORank()) {
239 this->eclIO_ = std::make_unique<EclipseIO>
240 (this->eclState_,
241 UgGridHelpers::createEclipseGrid(*equilGrid, eclState_.getInputGrid()),
242 this->schedule_, summaryConfig, "", enableEsmry);
243 }
244
245 // create output thread if enabled and rank is I/O rank
246 // async output is enabled by default if pthread are enabled
247 int numWorkerThreads = 0;
248 if (enableAsyncOutput && collectOnIORank_.isIORank()) {
249 numWorkerThreads = 1;
250 }
251
252 this->taskletRunner_.reset(new TaskletRunner(numWorkerThreads));
253}
254
255template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
257eclIO() const
258{
259 assert(eclIO_);
260 return *eclIO_;
261}
262
263template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
265writeInit()
266{
267 if (collectOnIORank_.isIORank()) {
268 std::map<std::string, std::vector<int>> integerVectors;
269 if (collectOnIORank_.isParallel()) {
270 integerVectors.emplace("MPI_RANK", collectOnIORank_.globalRanks());
271 }
272
273 eclIO_->writeInitial(*this->outputTrans_,
274 integerVectors,
275 this->outputNnc_);
276 this->outputTrans_.reset();
277 }
278}
279template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
280void
282extractOutputTransAndNNC(const std::function<unsigned int(unsigned int)>& map)
283{
284 if (collectOnIORank_.isIORank()) {
285 auto cartMap = cartesianToCompressed(equilGrid_->size(0), UgGridHelpers::globalCell(*equilGrid_));
286 computeTrans_(cartMap, map);
287 exportNncStructure_(cartMap, map);
288 }
289
290#if HAVE_MPI
291 if (collectOnIORank_.isParallel()) {
292 const auto& comm = grid_.comm();
293 Parallel::MpiSerializer ser(comm);
294 ser.broadcast(Parallel::RootRank{0}, outputNnc_);
295 }
296#endif
297}
298
299template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
300void
302computeTrans_(const std::unordered_map<int,int>& cartesianToActive,
303 const std::function<unsigned int(unsigned int)>& map) const
304{
305 if (!outputTrans_) {
306 outputTrans_ = std::make_unique<data::Solution>();
307 }
308
309 const auto& cartMapper = *equilCartMapper_;
310 const auto& cartDims = cartMapper.cartesianDimensions();
311
312 auto createCellData = [&cartDims]() {
313 return data::CellData{
314 UnitSystem::measure::transmissibility,
315 std::vector<double>(cartDims[0] * cartDims[1] * cartDims[2], 0.0),
316 data::TargetType::INIT
317 };
318 };
319
320 outputTrans_->clear();
321 outputTrans_->emplace("TRANX", createCellData());
322 outputTrans_->emplace("TRANY", createCellData());
323 outputTrans_->emplace("TRANZ", createCellData());
324
325 auto& tranx = this->outputTrans_->at("TRANX");
326 auto& trany = this->outputTrans_->at("TRANY");
327 auto& tranz = this->outputTrans_->at("TRANZ");
328
329 using GlobalGridView = typename EquilGrid::LeafGridView;
330 using GlobElementMapper = Dune::MultipleCodimMultipleGeomTypeMapper<GlobalGridView>;
331 const GlobalGridView& globalGridView = this->equilGrid_->leafGridView();
332 const GlobElementMapper globalElemMapper { globalGridView, Dune::mcmgElementLayout() };
333
334 auto isNumAquCell = [numAquCell = this->eclState_.aquifer().hasNumericalAquifer()
335 ? this->eclState_.aquifer().numericalAquifers().allAquiferCellIds()
336 : std::vector<std::size_t>{}]
337 (const std::size_t cellIdx)
338 {
339 return std::binary_search(numAquCell.begin(), numAquCell.end(), cellIdx);
340 };
341
342 for (const auto& elem : elements(globalGridView)) {
343 for (const auto& is : intersections(globalGridView, elem)) {
344 if (!is.neighbor())
345 continue; // intersection is on the domain boundary
346
347 // Not 'const' because remapped if 'map' is non-null.
348 unsigned c1 = globalElemMapper.index(is.inside());
349 unsigned c2 = globalElemMapper.index(is.outside());
350
351 if (c1 > c2)
352 continue; // we only need to handle each connection once, thank you.
353
354 // Ordering of compressed and uncompressed index should be the same
355 const int cartIdx1 = cartMapper.cartesianIndex( c1 );
356 const int cartIdx2 = cartMapper.cartesianIndex( c2 );
357
358 if (isNumAquCell(cartIdx1) || isNumAquCell(cartIdx2)) {
359 // Connections involving numerical aquifers are always NNCs
360 // for the purpose of file output. This holds even for
361 // connections between cells like (I,J,K) and (I+1,J,K)
362 // which are nominally neighbours in the Cartesian grid.
363 continue;
364 }
365
366 // Ordering of compressed and uncompressed index should be the same
367 assert(cartIdx1 <= cartIdx2);
368 int gc1 = std::min(cartIdx1, cartIdx2);
369 int gc2 = std::max(cartIdx1, cartIdx2);
370
371 // Re-ordering in case of non-empty mapping between equilGrid to grid
372 if (map) {
373 c1 = map(c1); // equilGridToGrid map
374 c2 = map(c2);
375 }
376
377 if (gc2 - gc1 == 1 && cartDims[0] > 1 ) {
378 tranx.template data<double>()[gc1] = globalTrans().transmissibility(c1, c2);
379 continue; // skip other if clauses as they are false, last one needs some computation
380 }
381
382 if (gc2 - gc1 == cartDims[0] && cartDims[1] > 1) {
383 trany.template data<double>()[gc1] = globalTrans().transmissibility(c1, c2);
384 continue; // skipt next if clause as it needs some computation
385 }
386
387 if ( gc2 - gc1 == cartDims[0]*cartDims[1] ||
388 directVerticalNeighbors(cartDims, cartesianToActive, gc1, gc2))
389 tranz.template data<double>()[gc1] = globalTrans().transmissibility(c1, c2);
390 }
391 }
392}
393
394template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
395std::vector<NNCdata>
396EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>::
397exportNncStructure_(const std::unordered_map<int,int>& cartesianToActive,
398 const std::function<unsigned int(unsigned int)>& map) const
399{
400 auto isNumAquCell = [numAquCell = this->eclState_.aquifer().hasNumericalAquifer()
401 ? this->eclState_.aquifer().numericalAquifers().allAquiferCellIds()
402 : std::vector<std::size_t>{}]
403 (const std::size_t cellIdx)
404 {
405 return std::binary_search(numAquCell.begin(), numAquCell.end(), cellIdx);
406 };
407
408 auto isNumAquConn = [&isNumAquCell](const std::size_t cellIdx1,
409 const std::size_t cellIdx2)
410 {
411 return isNumAquCell(cellIdx1) || isNumAquCell(cellIdx2);
412 };
413
414 auto isCartesianNeighbour = [nx = this->eclState_.getInputGrid().getNX(),
415 ny = this->eclState_.getInputGrid().getNY()]
416 (const std::size_t cellIdx1, const std::size_t cellIdx2)
417 {
418 const auto cellDiff = cellIdx2 - cellIdx1;
419
420 return (cellDiff == 1)
421 || (cellDiff == nx)
422 || (cellDiff == nx * ny);
423 };
424
425 auto activeCell = [&cartesianToActive](const std::size_t cellIdx)
426 {
427 auto pos = cartesianToActive.find(cellIdx);
428 return (pos == cartesianToActive.end()) ? -1 : pos->second;
429 };
430
431 const auto& nncData = this->eclState_.getInputNNC().input();
432 const auto& unitSystem = this->eclState_.getDeckUnitSystem();
433
434 for (const auto& entry : nncData) {
435 // Ignore most explicit NNCs between otherwise neighbouring cells.
436 // We keep NNCs that involve cells with numerical aquifers even if
437 // these might be between neighbouring cells in the Cartesian
438 // grid--e.g., between cells (I,J,K) and (I+1,J,K). All such
439 // connections should be written to NNC output arrays provided the
440 // transmissibility value is sufficiently large.
441 //
442 // The condition cell2 >= cell1 holds by construction of nncData.
443 assert (entry.cell2 >= entry.cell1);
444
445 if (! isCartesianNeighbour(entry.cell1, entry.cell2) ||
446 isNumAquConn(entry.cell1, entry.cell2))
447 {
448 // Pick up transmissibility value from 'globalTrans()' since
449 // multiplier keywords like MULTREGT might have impacted the
450 // values entered in primary sources like NNC/EDITNNC/EDITNNCR.
451 const auto c1 = activeCell(entry.cell1);
452 const auto c2 = activeCell(entry.cell2);
453
454 if ((c1 < 0) || (c2 < 0)) {
455 // Connection between inactive cells? Unexpected at this
456 // level. Might consider 'throw'ing if this happens...
457 continue;
458 }
459
460 const auto trans = this->globalTrans().transmissibility(c1, c2);
461 const auto tt = unitSystem
462 .from_si(UnitSystem::measure::transmissibility, trans);
463
464 // ECLIPSE ignores NNCs (with EDITNNC/EDITNNCR applied) with
465 // small transmissibility values. Seems like the threshold is
466 // 1.0e-6 in output units.
467 if (std::isnormal(tt) && ! (tt < 1.0e-6)) {
468 this->outputNnc_.emplace_back(entry.cell1, entry.cell2, trans);
469 }
470 }
471 }
472
473 auto isDirectNeighbours = [&isCartesianNeighbour, &cartesianToActive,
474 cartDims = &this->cartMapper_.cartesianDimensions()]
475 (const std::size_t cellIdx1, const std::size_t cellIdx2)
476 {
477 return isCartesianNeighbour(cellIdx1, cellIdx2)
478 || directVerticalNeighbors(*cartDims, cartesianToActive, cellIdx1, cellIdx2);
479 };
480
481 using GlobalGridView = typename EquilGrid::LeafGridView;
482 using GlobElementMapper = Dune::MultipleCodimMultipleGeomTypeMapper<GlobalGridView>;
483 const GlobalGridView& globalGridView = this->equilGrid_->leafGridView();
484 const GlobElementMapper globalElemMapper { globalGridView, Dune::mcmgElementLayout() };
485
486 // Cartesian index mapper for the serial I/O grid
487 const auto& equilCartMapper = *equilCartMapper_;
488 for (const auto& elem : elements(globalGridView)) {
489 for (const auto& is : intersections(globalGridView, elem)) {
490 if (!is.neighbor())
491 continue; // intersection is on the domain boundary
492
493 // Not 'const' because remapped if 'map' is non-null.
494 unsigned c1 = globalElemMapper.index(is.inside());
495 unsigned c2 = globalElemMapper.index(is.outside());
496
497 if (c1 > c2)
498 continue; // we only need to handle each connection once, thank you.
499
500 std::size_t cc1 = equilCartMapper.cartesianIndex( c1 );
501 std::size_t cc2 = equilCartMapper.cartesianIndex( c2 );
502
503 if ( cc2 < cc1 )
504 std::swap(cc1, cc2);
505
506 // Re-ordering in case of non-empty mapping between equilGrid to grid
507 if (map) {
508 c1 = map(c1); // equilGridToGrid map
509 c2 = map(c2);
510 }
511
512 if (isNumAquConn(cc1, cc2) || ! isDirectNeighbours(cc1, cc2)) {
513 // We need to check whether an NNC for this face was also
514 // specified via the NNC keyword in the deck.
515 auto t = this->globalTrans().transmissibility(c1, c2);
516 auto candidate = std::lower_bound(nncData.begin(), nncData.end(),
517 NNCdata { cc1, cc2, 0.0 });
518
519 while ((candidate != nncData.end()) &&
520 (candidate->cell1 == cc1) &&
521 (candidate->cell2 == cc2))
522 {
523 t -= candidate->trans;
524 ++candidate;
525 }
526
527 // ECLIPSE ignores NNCs with zero transmissibility
528 // (different threshold than for NNC with corresponding
529 // EDITNNC above). In addition we do set small
530 // transmissibilities to zero when setting up the simulator.
531 // These will be ignored here, too.
532 const auto tt = unitSystem
533 .from_si(UnitSystem::measure::transmissibility, t);
534
535 if (std::isnormal(tt) && (tt > 1.0e-12)) {
536 this->outputNnc_.emplace_back(cc1, cc2, t);
537 }
538 }
539 }
540 }
541
542 return this->outputNnc_;
543}
544
545template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
547doWriteOutput(const int reportStepNum,
548 const std::optional<int> timeStepNum,
549 const bool isSubStep,
550 data::Solution&& localCellData,
551 data::Wells&& localWellData,
552 data::GroupAndNetworkValues&& localGroupAndNetworkData,
553 data::Aquifers&& localAquiferData,
554 WellTestState&& localWTestState,
555 const Action::State& actionState,
556 const UDQState& udqState,
557 const SummaryState& summaryState,
558 const std::vector<Scalar>& thresholdPressure,
559 Scalar curTime,
560 Scalar nextStepSize,
561 bool doublePrecision,
562 bool isFlowsn,
563 std::array<FlowsData<double>, 3>&& flowsn,
564 bool isFloresn,
565 std::array<FlowsData<double>, 3>&& floresn)
566{
567 const auto isParallel = this->collectOnIORank_.isParallel();
568 const bool needsReordering = this->collectOnIORank_.doesNeedReordering();
569
570 RestartValue restartValue {
571 (isParallel || needsReordering)
572 ? this->collectOnIORank_.globalCellData()
573 : std::move(localCellData),
574
575 isParallel ? this->collectOnIORank_.globalWellData()
576 : std::move(localWellData),
577
578 isParallel ? this->collectOnIORank_.globalGroupAndNetworkData()
579 : std::move(localGroupAndNetworkData),
580
581 isParallel ? this->collectOnIORank_.globalAquiferData()
582 : std::move(localAquiferData)
583 };
584
585 if (eclState_.getSimulationConfig().useThresholdPressure()) {
586 restartValue.addExtra("THRESHPR", UnitSystem::measure::pressure,
587 thresholdPressure);
588 }
589
590 // Add suggested next timestep to extra data.
591 if (! isSubStep) {
592 restartValue.addExtra("OPMEXTRA", std::vector<double>(1, nextStepSize));
593 }
594
595 // Add nnc flows and flores.
596 if (isFlowsn) {
597 const auto flowsn_global = isParallel ? this->collectOnIORank_.globalFlowsn() : std::move(flowsn);
598 for (const auto& flows : flowsn_global) {
599 if (flows.name.empty())
600 continue;
601 if (flows.name == "FLOGASN+") {
602 restartValue.addExtra(flows.name, UnitSystem::measure::gas_surface_rate, flows.values);
603 } else {
604 restartValue.addExtra(flows.name, UnitSystem::measure::liquid_surface_rate, flows.values);
605 }
606 }
607 }
608 if (isFloresn) {
609 const auto floresn_global = isParallel ? this->collectOnIORank_.globalFloresn() : std::move(floresn);
610 for (const auto& flores : floresn_global) {
611 if (flores.name.empty()) {
612 continue;
613 }
614 restartValue.addExtra(flores.name, UnitSystem::measure::rate, flores.values);
615 }
616 }
617
618 std::vector<Opm::RestartValue> restartValues{};
619 // only serial, only CpGrid (for now)
620 if ( !isParallel && !needsReordering && (this->eclState_.getLgrs().size()>0) && (this->grid_.maxLevel()>0) ) {
621 // Level cells that appear on the leaf grid view get the data::Solution values from there.
622 // Other cells (i.e., parent cells that vanished due to refinement) get rubbish values for now.
623 // Only data::Solution is restricted to the level grids. Well, GroupAndNetwork, Aquifer are
624 // not modified in this method.
625 Opm::Lgr::extractRestartValueLevelGrids<Grid>(this->grid_, restartValue, restartValues);
626 }
627 else {
628 restartValues.reserve(1); // minimum size
629 restartValues.push_back(std::move(restartValue)); // no LGRs-> only one restart value
630 }
631
632 // make sure that the previous I/O request has been completed
633 // and the number of incomplete tasklets does not increase between
634 // time steps
635 this->taskletRunner_->barrier();
636
637 // check if there might have been a failure in the TaskletRunner
638 if (this->taskletRunner_->failure()) {
639 throw std::runtime_error("Failure in the TaskletRunner while writing output.");
640 }
641
642 // create a tasklet to write the data for the current time step to disk
643 auto eclWriteTasklet = std::make_shared<EclWriteTasklet>(
644 actionState,
645 isParallel ? this->collectOnIORank_.globalWellTestState() : std::move(localWTestState),
646 summaryState, udqState, *this->eclIO_,
647 reportStepNum, timeStepNum, isSubStep, curTime, std::move(restartValues), doublePrecision);
648
649 // finally, start a new output writing job
650 this->taskletRunner_->dispatch(std::move(eclWriteTasklet));
651}
652
653template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
655evalSummary(const int reportStepNum,
656 const Scalar curTime,
657 const data::Wells& localWellData,
658 const data::WellBlockAveragePressures& localWBPData,
659 const data::GroupAndNetworkValues& localGroupAndNetworkData,
660 const std::map<int,data::AquiferData>& localAquiferData,
661 const std::map<std::pair<std::string, int>, double>& blockData,
662 const std::map<std::string, double>& miscSummaryData,
663 const std::map<std::string, std::vector<double>>& regionData,
664 const Inplace& inplace,
665 const std::optional<Inplace>& initialInPlace,
666 const InterRegFlowMap& interRegFlows,
667 SummaryState& summaryState,
668 UDQState& udqState)
669{
670 if (collectOnIORank_.isIORank()) {
671 const auto& summary = eclIO_->summary();
672
673 const auto& wellData = this->collectOnIORank_.isParallel()
674 ? this->collectOnIORank_.globalWellData()
675 : localWellData;
676
677 const auto& wbpData = this->collectOnIORank_.isParallel()
678 ? this->collectOnIORank_.globalWBPData()
679 : localWBPData;
680
681 const auto& groupAndNetworkData = this->collectOnIORank_.isParallel()
682 ? this->collectOnIORank_.globalGroupAndNetworkData()
683 : localGroupAndNetworkData;
684
685 const auto& aquiferData = this->collectOnIORank_.isParallel()
686 ? this->collectOnIORank_.globalAquiferData()
687 : localAquiferData;
688
689 summary.eval(summaryState,
690 reportStepNum,
691 curTime,
692 wellData,
693 wbpData,
694 groupAndNetworkData,
695 miscSummaryData,
696 initialInPlace,
697 inplace,
698 regionData,
699 blockData,
700 aquiferData,
701 getInterRegFlowsAsMap(interRegFlows));
702
703 // Off-by-one-fun: The reportStepNum argument corresponds to the
704 // report step these results will be written to, whereas the
705 // argument to UDQ function evaluation corresponds to the report
706 // step we are currently on.
707 const auto udq_step = reportStepNum - 1;
708
709 this->schedule_[udq_step].udq()
710 .eval(udq_step,
711 this->schedule_.wellMatcher(udq_step),
712 this->schedule_[udq_step].group_order(),
713 this->schedule_.segmentMatcherFactory(udq_step),
714 [es = std::cref(this->eclState_)]() {
715 return std::make_unique<RegionSetMatcher>
716 (es.get().fipRegionStatistics());
717 },
718 summaryState, udqState);
719 }
720
721#if HAVE_MPI
722 if (collectOnIORank_.isParallel()) {
723 Parallel::MpiSerializer ser(grid_.comm());
724 ser.append(summaryState);
725 }
726#endif
727}
728
729template<class Grid, class EquilGrid, class GridView, class ElementMapper, class Scalar>
732globalTrans() const
733{
734 assert (globalTrans_);
735 return *globalTrans_;
736}
737
738} // namespace Opm
739
740#endif // OPM_ECL_GENERIC_WRITER_IMPL_HPP
Definition: CollectDataOnIORank.hpp:49
bool isIORank() const
Definition: CollectDataOnIORank.hpp:126
Definition: EclGenericWriter.hpp:65
const EclipseState & eclState_
Definition: EclGenericWriter.hpp:156
void doWriteOutput(const int reportStepNum, const std::optional< int > timeStepNum, const bool isSubStep, data::Solution &&localCellData, data::Wells &&localWellData, data::GroupAndNetworkValues &&localGroupAndNetworkData, data::Aquifers &&localAquiferData, WellTestState &&localWTestState, const Action::State &actionState, const UDQState &udqState, const SummaryState &summaryState, const std::vector< Scalar > &thresholdPressure, Scalar curTime, Scalar nextStepSize, bool doublePrecision, bool isFlowsn, std::array< FlowsData< double >, 3 > &&flowsn, bool isFloresn, std::array< FlowsData< double >, 3 > &&floresn)
Definition: EclGenericWriter_impl.hpp:547
CollectDataOnIORankType collectOnIORank_
Definition: EclGenericWriter.hpp:152
void evalSummary(int reportStepNum, Scalar curTime, const data::Wells &localWellData, const data::WellBlockAveragePressures &localWBPData, const data::GroupAndNetworkValues &localGroupAndNetworkData, const std::map< int, data::AquiferData > &localAquiferData, const std::map< std::pair< std::string, int >, double > &blockData, const std::map< std::string, double > &miscSummaryData, const std::map< std::string, std::vector< double > > &regionData, const Inplace &inplace, const std::optional< Inplace > &initialInPlace, const InterRegFlowMap &interRegFlows, SummaryState &summaryState, UDQState &udqState)
Definition: EclGenericWriter_impl.hpp:655
void extractOutputTransAndNNC(const std::function< unsigned int(unsigned int)> &map)
Definition: EclGenericWriter_impl.hpp:282
std::unique_ptr< TaskletRunner > taskletRunner_
Definition: EclGenericWriter.hpp:158
void writeInit()
Definition: EclGenericWriter_impl.hpp:265
EclGenericWriter(const Schedule &schedule, const EclipseState &eclState, const SummaryConfig &summaryConfig, const Grid &grid, const EquilGrid *equilGrid, const GridView &gridView, const Dune::CartesianIndexMapper< Grid > &cartMapper, const Dune::CartesianIndexMapper< EquilGrid > *equilCartMapper, bool enableAsyncOutput, bool enableEsmry)
Definition: EclGenericWriter_impl.hpp:214
const EclipseIO & eclIO() const
Definition: EclGenericWriter_impl.hpp:257
const TransmissibilityType & globalTrans() const
Definition: EclGenericWriter_impl.hpp:732
Inter-region flow accumulation maps for all region definition arrays.
Definition: InterRegFlows.hpp:179
std::vector< data::InterRegFlowMap > getInterRegFlows() const
const std::vector< std::string > & names() const
Class for serializing and broadcasting data using MPI.
Definition: MPISerializer.hpp:38
void append(T &data, int root=0)
Serialize and broadcast on root process, de-serialize and append on others.
Definition: MPISerializer.hpp:82
void broadcast(RootRank rootrank, Args &&... args)
Definition: MPISerializer.hpp:47
The base class for tasklets.
Definition: tasklets.hpp:45
virtual void run()=0
Handles where a given tasklet is run.
Definition: tasklets.hpp:93
Definition: Transmissibility.hpp:54
Definition: blackoilbioeffectsmodules.hh:43
Avoid mistakes in calls to broadcast() by wrapping the root argument in an explicit type.
Definition: MPISerializer.hpp:33