MueLu Version of the Day
Loading...
Searching...
No Matches
MueLu_ReitzingerPFactory_def.hpp
Go to the documentation of this file.
1// @HEADER
2//
3// ***********************************************************************
4//
5// MueLu: A package for multigrid based preconditioning
6// Copyright 2012 Sandia Corporation
7//
8// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9// the U.S. Government retains certain rights in this software.
10//
11// Redistribution and use in source and binary forms, with or without
12// modification, are permitted provided that the following conditions are
13// met:
14//
15// 1. Redistributions of source code must retain the above copyright
16// notice, this list of conditions and the following disclaimer.
17//
18// 2. Redistributions in binary form must reproduce the above copyright
19// notice, this list of conditions and the following disclaimer in the
20// documentation and/or other materials provided with the distribution.
21//
22// 3. Neither the name of the Corporation nor the names of the
23// contributors may be used to endorse or promote products derived from
24// this software without specific prior written permission.
25//
26// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37//
38// Questions? Contact
39// Jonathan Hu (jhu@sandia.gov)
40// Andrey Prokopenko (aprokop@sandia.gov)
41// Ray Tuminaro (rstumin@sandia.gov)
42//
43// ***********************************************************************
44//
45// @HEADER
46#ifndef MUELU_REITZINGERPFACTORY_DEF_HPP
47#define MUELU_REITZINGERPFACTORY_DEF_HPP
48
49#include <Xpetra_MapFactory.hpp>
50#include <Xpetra_Map.hpp>
51#include <Xpetra_CrsMatrix.hpp>
52#include <Xpetra_Matrix.hpp>
53#include <Xpetra_MatrixMatrix.hpp>
54#include <Xpetra_MultiVector.hpp>
55#include <Xpetra_MultiVectorFactory.hpp>
56#include <Xpetra_VectorFactory.hpp>
57#include <Xpetra_Import.hpp>
58#include <Xpetra_ImportUtils.hpp>
59#include <Xpetra_ImportFactory.hpp>
60#include <Xpetra_CrsMatrixWrap.hpp>
61#include <Xpetra_StridedMap.hpp>
62#include <Xpetra_StridedMapFactory.hpp>
63#include <Xpetra_IO.hpp>
64
66
67#include "MueLu_MasterList.hpp"
68#include "MueLu_Monitor.hpp"
69#include "MueLu_Utilities.hpp"
70
71namespace MueLu {
72
73 template <class Scalar,class LocalOrdinal, class GlobalOrdinal, class Node>
75 RCP<ParameterList> validParamList = rcp(new ParameterList());
76
77#define SET_VALID_ENTRY(name) validParamList->setEntry(name, MasterList::getEntry(name))
78 SET_VALID_ENTRY("repartition: enable");
79 SET_VALID_ENTRY("repartition: use subcommunicators");
80 SET_VALID_ENTRY("tentative: calculate qr");
81 SET_VALID_ENTRY("tentative: constant column sums");
82#undef SET_VALID_ENTRY
83
84 validParamList->set< RCP<const FactoryBase> >("A", Teuchos::null, "Generating factory of the matrix A");
85 validParamList->set< RCP<const FactoryBase> >("D0", Teuchos::null, "Generating factory of the matrix D0");
86 validParamList->set< RCP<const FactoryBase> >("NodeAggMatrix", Teuchos::null, "Generating factory of the matrix NodeAggMatrix");
87 validParamList->set< RCP<const FactoryBase> >("Pnodal", Teuchos::null, "Generating factory of the matrix P");
88 validParamList->set< RCP<const FactoryBase> >("NodeImporter", Teuchos::null, "Generating factory of the matrix NodeImporter");
89
90 // Make sure we don't recursively validate options for the matrixmatrix kernels
91 ParameterList norecurse;
92 norecurse.disableRecursiveValidation();
93 validParamList->set<ParameterList> ("matrixmatrix: kernel params", norecurse, "MatrixMatrix kernel parameters");
94
95 return validParamList;
96 }
97
98 template <class Scalar,class LocalOrdinal, class GlobalOrdinal, class Node>
100 Input(fineLevel, "A");
101 Input(fineLevel, "D0");
102 Input(fineLevel, "NodeAggMatrix");
103 Input(coarseLevel, "NodeAggMatrix");
104 Input(coarseLevel, "Pnodal");
105 // Input(coarseLevel, "NodeImporter");
106
107 }
108
109 template <class Scalar,class LocalOrdinal, class GlobalOrdinal, class Node>
111 return BuildP(fineLevel, coarseLevel);
112 }
113
114 template <class Scalar,class LocalOrdinal, class GlobalOrdinal, class Node>
116 FactoryMonitor m(*this, "Build", coarseLevel);
117 using Teuchos::arcp_const_cast;
118 using MT = typename Teuchos::ScalarTraits<SC>::magnitudeType;
119 using XMM = Xpetra::MatrixMatrix<SC,LO,GO,NO>;
120 Teuchos::FancyOStream& out0=GetBlackHole();
121 const ParameterList& pL = GetParameterList();
122
123 bool update_communicators = pL.get<bool>("repartition: enable") && pL.get<bool>("repartition: use subcommunicators");
124
125 // If these are set correctly we assume that the nodal P contains only ones
126 bool nodal_p_is_all_ones = !pL.get<bool>("tentative: constant column sums") && !pL.get<bool>("tentative: calculate qr");
127
128 RCP<Matrix> EdgeMatrix = Get< RCP<Matrix> > (fineLevel, "A");
129 RCP<Matrix> D0 = Get< RCP<Matrix> > (fineLevel, "D0");
130 RCP<Matrix> NodeMatrix = Get< RCP<Matrix> > (fineLevel, "NodeAggMatrix");
131 RCP<Matrix> Pn = Get< RCP<Matrix> > (coarseLevel, "Pnodal");
132
133 const GO GO_INVALID = Teuchos::OrdinalTraits<GO>::invalid();
134 const LO LO_INVALID = Teuchos::OrdinalTraits<LO>::invalid();
135
136 // This needs to be an Operator because if NodeMatrix gets repartitioned away, we get an Operator on the level
137 RCP<Operator> CoarseNodeMatrix = Get< RCP<Operator> >(coarseLevel, "NodeAggMatrix");
138 int MyPID = EdgeMatrix.is_null()? -1 : EdgeMatrix->getRowMap()->getComm()->getRank();
139
140 // Matrix matrix params
141 RCP<ParameterList> mm_params = rcp(new ParameterList);;
142 if(pL.isSublist("matrixmatrix: kernel params"))
143 mm_params->sublist("matrixmatrix: kernel params") = pL.sublist("matrixmatrix: kernel params");
144
145
146 // Normalize P
147 if(!nodal_p_is_all_ones) {
148 // The parameters told us the nodal P isn't all ones, so we make a copy that is.
149 GetOStream(Runtime0) << "ReitzingerPFactory::BuildP(): Assuming Pn is not normalized" << std::endl;
150 RCP<Matrix> Pn_old = Pn;
151
152 Pn = Xpetra::MatrixFactory<SC,LO,GO,NO>::Build(Pn->getCrsGraph());
153 Pn->setAllToScalar(Teuchos::ScalarTraits<SC>::one());
154 Pn->fillComplete(Pn->getDomainMap(),Pn->getRangeMap());
155 }
156 else {
157 // The parameters claim P is all ones.
158 GetOStream(Runtime0) << "ReitzingerPFactory::BuildP(): Assuming Pn is normalized" << std::endl;
159 }
160
161 // TODO: We need to make sure Pn isn't normalized. Right now this has to be done explicitly by the user
162
163 // TODO: We need to look through and see which of these really need importers and which ones don't
164
165 /* Generate the D0 * Pn matrix and its transpose */
166 RCP<Matrix> D0_Pn, PnT_D0T, D0_Pn_nonghosted;
167 Teuchos::Array<int> D0_Pn_col_pids;
168 {
169 RCP<Matrix> dummy;
170 SubFactoryMonitor m2(*this, "Generate D0*Pn", coarseLevel);
171 D0_Pn = XMM::Multiply(*D0,false,*Pn,false,dummy,out0,true,true,"D0*Pn",mm_params);
172
173 // We don't want this guy getting accidently used later
174 if(!mm_params.is_null()) mm_params->remove("importer",false);
175
176 // Save this so we don't need to do the multiplication again later
177 D0_Pn_nonghosted = D0_Pn;
178
179 // Get owning PID information on columns for tie-breaking
180 if(!D0_Pn->getCrsGraph()->getImporter().is_null()) {
181 Xpetra::ImportUtils<LO,GO,NO> utils;
182 utils.getPids(*D0_Pn->getCrsGraph()->getImporter(),D0_Pn_col_pids,false);
183 }
184 else {
185 D0_Pn_col_pids.resize(D0_Pn->getCrsGraph()->getColMap()->getLocalNumElements(),MyPID);
186 }
187 }
188
189
190 {
191 // Get the transpose
192 SubFactoryMonitor m2(*this, "Transpose D0*Pn", coarseLevel);
193 PnT_D0T = Utilities::Transpose(*D0_Pn, true);
194 }
195
196
197 // We really need a ghosted version of D0_Pn here.
198 // The reason is that if there's only one fine edge between two coarse nodes, somebody is going
199 // to own the associated coarse edge. The sum/sign rule doesn't guarantee the fine owner is the coarse owner.
200 // So you can wind up with a situation that only guy who *can* register the coarse edge isn't the sum/sign
201 // owner. Adding more ghosting fixes that.
202 if(!PnT_D0T->getCrsGraph()->getImporter().is_null()) {
203 RCP<const Import> Importer = PnT_D0T->getCrsGraph()->getImporter();
204 RCP<const CrsMatrix> D0_Pn_crs = rcp_dynamic_cast<const CrsMatrixWrap>(D0_Pn)->getCrsMatrix();
205 RCP<Matrix> D0_Pn_new = rcp(new CrsMatrixWrap(CrsMatrixFactory::Build(D0_Pn_crs,*Importer,D0_Pn->getDomainMap(),Importer->getTargetMap())));
206 D0_Pn = D0_Pn_new;
207 // Get owning PID information on columns for tie-breaking
208 if(!D0_Pn->getCrsGraph()->getImporter().is_null()) {
209 Xpetra::ImportUtils<LO,GO,NO> utils;
210 utils.getPids(*D0_Pn->getCrsGraph()->getImporter(),D0_Pn_col_pids,false);
211 }
212 else {
213 D0_Pn_col_pids.resize(D0_Pn->getCrsGraph()->getColMap()->getLocalNumElements(),MyPID);
214 }
215 }
216
217
218 // FIXME: This is using deprecated interfaces
219 ArrayView<const LO> colind_E, colind_N;
220 ArrayView<const SC> values_E, values_N;
221
222 size_t Ne=EdgeMatrix->getLocalNumRows();
223 size_t Nn=NodeMatrix->getLocalNumRows();
224
225 // Upper bound on local number of coarse edges
226 size_t max_edges = (NodeMatrix->getLocalNumEntries() + Nn +1) / 2;
227 ArrayRCP<size_t> D0_rowptr(Ne+1);
228 ArrayRCP<LO> D0_colind(max_edges);
229 ArrayRCP<SC> D0_values(max_edges);
230 D0_rowptr[0] = 0;
231
232 LO current = 0;
233 LO Nnc = PnT_D0T->getRowMap()->getLocalNumElements();
234
235 for(LO i=0; i<(LO)Nnc; i++) {
236 // GO global_i = PnT_D0T->getRowMap()->getGlobalElement(i);
237
238 // FIXME: We don't really want an std::map here. This is just a first cut implementation
239 using value_type = bool;
240 std::map<LO, value_type> ce_map;
241
242 // FIXME: This is using deprecated interfaces
243 PnT_D0T->getLocalRowView(i,colind_E,values_E);
244
245 for(LO j=0; j<(LO)colind_E.size(); j++) {
246
247 // NOTE: Edges between procs will be via handled via the a version
248 // of ML's odd/even rule
249 // For this to function correctly, we make two assumptions:
250 // (a) The processor that owns a fine edge owns at least one of the attached nodes.
251 // (b) Aggregation is uncoupled.
252
253 // TODO: Add some debug code to check the assumptions
254
255 // Check to see if we own this edge and continue if we don't
256 GO edge_gid = PnT_D0T->getColMap()->getGlobalElement(colind_E[j]);
257 LO j_row = D0_Pn->getRowMap()->getLocalElement(edge_gid);
258 int pid0, pid1;
259 D0_Pn->getLocalRowView(j_row,colind_N,values_N);
260
261 // Skip incomplete rows
262 if(colind_N.size() != 2) continue;
263
264 pid0 = D0_Pn_col_pids[colind_N[0]];
265 pid1 = D0_Pn_col_pids[colind_N[1]];
266 // printf("[%d] Row %d considering edge (%d)%d -> (%d)%d\n",MyPID,global_i,colind_N[0],D0_Pn->getColMap()->getGlobalElement(colind_N[0]),colind_N[1],D0_Pn->getColMap()->getGlobalElement(colind_N[1]));
267
268 // Check to see who owns these nodes
269 // If the sum of owning procs is odd, the lower ranked proc gets it
270
271 bool zero_matches = pid0 == MyPID;
272 bool one_matches = pid1 == MyPID;
273 bool keep_shared_edge = false, own_both_nodes = false;
274 if(zero_matches && one_matches) {own_both_nodes=true;}
275 else {
276 int sum_is_odd = (pid0 + pid1) % 2;
277 int i_am_smaller = MyPID == std::min(pid0,pid1);
278 if(sum_is_odd && i_am_smaller) keep_shared_edge=true;
279 if(!sum_is_odd && !i_am_smaller) keep_shared_edge=true;
280 }
281 // printf("[%d] - matches %d/%d keep_shared = %d own_both = %d\n",MyPID,(int)zero_matches,(int)one_matches,(int)keep_shared_edge,(int)own_both_nodes);
282 if(!keep_shared_edge && !own_both_nodes) continue;
283
284
285 // We're doing this in GID space, but only because it allows us to explain
286 // the edge orientation as "always goes from lower GID to higher GID". This could
287 // be done entirely in local GIDs, but then the ordering is a little more confusing.
288 // This could be done in local indices later if we need the extra performance.
289 for(LO k=0; k<(LO)colind_N.size(); k++) {
290 LO my_colind = colind_N[k];
291 if(my_colind!=LO_INVALID && ((keep_shared_edge && my_colind != i) || (own_both_nodes && my_colind > i)) ) {
292 ce_map.emplace(std::make_pair(my_colind,true));
293 }
294 }//end for k < colind_N.size()
295 }// end for j < colind_E.size()
296
297
298 // std::map is sorted, so we'll just iterate through this
299 for(auto iter=ce_map.begin(); iter != ce_map.end(); iter++) {
300 LO col = iter->first;
301 // This shouldn't happen. But in case it did...
302 if(col == i) {
303 continue;
304 }
305
306 // ASSUMPTION: "i" is a valid local column id
307 D0_colind[current] = i;
308 D0_values[current] = -1;
309 current++;
310 D0_colind[current] = col;
311 D0_values[current] = 1;
312 current++;
313 D0_rowptr[current / 2] = current;
314 }
315
316 }// end for i < Nn
317
318 LO num_coarse_edges = current / 2;
319 D0_rowptr.resize(num_coarse_edges+1);
320 D0_colind.resize(current);
321 D0_values.resize(current);
322
323 // We're assuming that if the coarse NodeMatrix has no nodes on a rank, the coarse edge guy won't either.
324 // We check that here.
325 TEUCHOS_TEST_FOR_EXCEPTION( (num_coarse_edges > 0 && CoarseNodeMatrix.is_null()) ||
326 (num_coarse_edges == 0 && !CoarseNodeMatrix.is_null())
327 , Exceptions::RuntimeError, "MueLu::ReitzingerPFactory: Mismatched num_coarse_edges and NodeMatrix repartition.");
328
329
330 // Count the total number of edges
331 // NOTE: Since we solve the ownership issue above, this should do what we want
332 RCP<const Map> ownedCoarseEdgeMap = Xpetra::MapFactory<LO,GO,NO>::Build(EdgeMatrix->getRowMap()->lib(), GO_INVALID, num_coarse_edges,EdgeMatrix->getRowMap()->getIndexBase(),EdgeMatrix->getRowMap()->getComm());
333
334
335 // NOTE: This only works because of the assumptions above
336 RCP<const Map> ownedCoarseNodeMap = Pn->getDomainMap();
337 RCP<const Map> ownedPlusSharedCoarseNodeMap = D0_Pn->getCrsGraph()->getColMap();
338
339 // Create the coarse D0
340 RCP<CrsMatrix> D0_coarse;
341 {
342 SubFactoryMonitor m2(*this, "Build D0", coarseLevel);
343 // FIXME: We can be smarter with memory here
344 // TODO: Is there a smarter way to get this importer?
345 D0_coarse = CrsMatrixFactory::Build(ownedCoarseEdgeMap,ownedPlusSharedCoarseNodeMap,0);
346 TEUCHOS_TEST_FOR_EXCEPTION(D0_coarse.is_null(), Exceptions::RuntimeError, "MueLu::ReitzingerPFactory: CrsMatrixFatory failed.");
347
348 // FIXME: Deprecated code
349 ArrayRCP<size_t> ia;
350 ArrayRCP<LO> ja;
351 ArrayRCP<SC> val;
352 D0_coarse->allocateAllValues(current, ia, ja, val);
353 std::copy(D0_rowptr.begin(),D0_rowptr.end(),ia.begin());
354 std::copy(D0_colind.begin(),D0_colind.end(),ja.begin());
355 std::copy(D0_values.begin(),D0_values.end(),val.begin());
356 D0_coarse->setAllValues(ia, ja, val);
357
358#if 0
359 {
360 char fname[80];
361 printf("[%d] D0: ia.size() = %d ja.size() = %d\n",MyPID,(int)ia.size(),(int)ja.size());
362 printf("[%d] D0: ia :",MyPID);
363 for(int i=0; i<(int)ia.size(); i++)
364 printf("%d ",(int)ia[i]);
365 printf("\n[%d] D0: global ja :",MyPID);
366 for(int i=0; i<(int)ja.size(); i++)
367 printf("%d ",(int)ownedPlusSharedCoarseNodeMap->getGlobalElement(ja[i]));
368 printf("\n[%d] D0: local ja :",MyPID);
369 for(int i=0; i<(int)ja.size(); i++)
370 printf("%d ",(int)ja[i]);
371 printf("\n");
372
373 sprintf(fname,"D0_global_ja_%d_%d.dat",MyPID,fineLevel.GetLevelID());
374 FILE * f = fopen(fname,"w");
375 for(int i=0; i<(int)ja.size(); i++)
376 fprintf(f,"%d ",(int)ownedPlusSharedCoarseNodeMap->getGlobalElement(ja[i]));
377 fclose(f);
378
379 sprintf(fname,"D0_local_ja_%d_%d.dat",MyPID,fineLevel.GetLevelID());
380 f = fopen(fname,"w");
381 for(int i=0; i<(int)ja.size(); i++)
382 fprintf(f,"%d ",(int)ja[i]);
383 fclose(f);
384
385 }
386#endif
387 D0_coarse->expertStaticFillComplete(ownedCoarseNodeMap,ownedCoarseEdgeMap);
388 }
389 RCP<Matrix> D0_coarse_m = rcp(new CrsMatrixWrap(D0_coarse));
390 RCP<Teuchos::FancyOStream> fout = Teuchos::fancyOStream(Teuchos::rcpFromRef(std::cout));
391
392
393 // Create the Pe matrix, but with the extra entries. From ML's notes:
394 /* The general idea is that the matrix */
395 /* T_h P_n T_H^* */
396 /* is almost Pe. If we make sure that P_n contains 1's and -1's, the*/
397 /* matrix triple product will yield a matrix with +/- 1 and +/- 2's.*/
398 /* If we remove all the 1's and divide the 2's by 2. we arrive at Pe*/
399 RCP<Matrix> Pe;
400 {
401 SubFactoryMonitor m2(*this, "Generate Pe (pre-fix)", coarseLevel);
402
403 RCP<Matrix> dummy;
404 RCP<Matrix> Pn_D0cT = XMM::Multiply(*Pn,false,*D0_coarse_m,true,dummy,out0,true,true,"Pn*D0c'",mm_params);
405
406 // We don't want this guy getting accidently used later
407 if(!mm_params.is_null()) mm_params->remove("importer",false);
408
409 Pe = XMM::Multiply(*D0,false,*Pn_D0cT,false,dummy,out0,true,true,"D0*(Pn*D0c')",mm_params);
410
411 // TODO: Something like this *might* work. But this specifically, doesn't
412 // Pe = XMM::Multiply(*D0_Pn_nonghosted,false,*D0_coarse_m,true,dummy,out0,true,true,"(D0*Pn)*D0c'",mm_params);
413 }
414
415 /* Weed out the +/- entries, shrinking the matrix as we go */
416 {
417 SubFactoryMonitor m2(*this, "Generate Pe (post-fix)", coarseLevel);
418 Pe->resumeFill();
419 SC one = Teuchos::ScalarTraits<SC>::one();
420 MT two = 2*Teuchos::ScalarTraits<MT>::one();
421 SC zero = Teuchos::ScalarTraits<SC>::zero();
422 SC neg_one = - one;
423
424 RCP<const CrsMatrix> Pe_crs = rcp_dynamic_cast<const CrsMatrixWrap>(Pe)->getCrsMatrix();
425 TEUCHOS_TEST_FOR_EXCEPTION(Pe_crs.is_null(), Exceptions::RuntimeError, "MueLu::ReitzingerPFactory: Pe is not a crs matrix.");
426 ArrayRCP<const size_t > rowptr_const;
427 ArrayRCP<const LO> colind_const;
428 ArrayRCP<const SC> values_const;
429 Pe_crs->getAllValues(rowptr_const,colind_const,values_const);
430 ArrayRCP<size_t> rowptr = arcp_const_cast<size_t>(rowptr_const);
431 ArrayRCP<LO> colind = arcp_const_cast<LO>(colind_const);
432 ArrayRCP<SC> values = arcp_const_cast<SC>(values_const);
433 LO ct = 0;
434 LO lower = rowptr[0];
435 for(LO i=0; i<(LO)Ne; i++) {
436 for(size_t j=lower; j<rowptr[i+1]; j++) {
437 if (values[j] == one || values[j] == neg_one || values[j] == zero) {
438 // drop this guy
439 }
440 else {
441 colind[ct] = colind[j];
442 values[ct] = values[j] / two;
443 ct++;
444 }
445 }
446 lower = rowptr[i+1];
447 rowptr[i+1] = ct;
448 }
449 rowptr[Ne] = ct;
450 colind.resize(ct);
451 values.resize(ct);
452 rcp_const_cast<CrsMatrix>(Pe_crs)->setAllValues(rowptr,colind,values);
453
454 Pe->fillComplete(Pe->getDomainMap(),Pe->getRangeMap());
455 }
456
457 /* Check commuting property */
458 CheckCommutingProperty(*Pe,*D0_coarse_m,*D0,*Pn);
459
460 /* If we're repartitioning here, we need to cut down the communicators */
461 // NOTE: We need to do this *after* checking the commuting property, since
462 // that's going to need to fineLevel's communicators, not the repartitioned ones
463 if(update_communicators) {
464 //NOTE: We can only do D0 here. We have to do Ke_coarse=(Re Ke_fine Pe) in RebalanceAcFactory
465 RCP<const Teuchos::Comm<int> > newComm;
466 if(!CoarseNodeMatrix.is_null()) newComm = CoarseNodeMatrix->getDomainMap()->getComm();
467 RCP<const Map> newMap = Xpetra::MapFactory<LO,GO,NO>::copyMapWithNewComm(D0_coarse_m->getRowMap(),newComm);
468 D0_coarse_m->removeEmptyProcessesInPlace(newMap);
469
470 // The "in place" still leaves a dummy matrix here. That needs to go
471 if(newMap.is_null()) D0_coarse_m = Teuchos::null;
472
473 Set(coarseLevel,"InPlaceMap",newMap);
474 }
475
476 /* Set output on the level */
477 Set(coarseLevel,"P",Pe);
478 Set(coarseLevel,"Ptent",Pe);
479
480 Set(coarseLevel,"D0",D0_coarse_m);
481 coarseLevel.Set("D0",D0_coarse_m,NoFactory::get());
482 coarseLevel.AddKeepFlag("D0",NoFactory::get(), MueLu::Final);
483 coarseLevel.RemoveKeepFlag("D0",NoFactory::get(), MueLu::UserData);
484
485#if 0
486 {
487 int numProcs = Pe->getRowMap()->getComm()->getSize();
488 char fname[80];
489
490 sprintf(fname,"Pe_%d_%d.mat",numProcs,fineLevel.GetLevelID()); Xpetra::IO<SC,LO,GO,NO>::Write(fname,*Pe);
491 sprintf(fname,"Pn_%d_%d.mat",numProcs,fineLevel.GetLevelID()); Xpetra::IO<SC,LO,GO,NO>::Write(fname,*Pn);
492 sprintf(fname,"D0c_%d_%d.mat",numProcs,fineLevel.GetLevelID()); Xpetra::IO<SC,LO,GO,NO>::Write(fname,*D0_coarse_m);
493 sprintf(fname,"D0f_%d_%d.mat",numProcs,fineLevel.GetLevelID()); Xpetra::IO<SC,LO,GO,NO>::Write(fname,*D0);
494 }
495#endif
496
497 }// end Build
498
499 template <class Scalar,class LocalOrdinal, class GlobalOrdinal, class Node>
501 CheckCommutingProperty(const Matrix & Pe, const Matrix & D0_c, const Matrix& D0_f, const Matrix & Pn) const {
502 if(IsPrint(Statistics0)) {
503 using XMM = Xpetra::MatrixMatrix<SC,LO,GO,NO>;
504 using MT = typename Teuchos::ScalarTraits<SC>::magnitudeType;
505 SC one = Teuchos::ScalarTraits<SC>::one();
506 SC zero = Teuchos::ScalarTraits<SC>::zero();
507
508 RCP<Matrix> dummy;
509 Teuchos::FancyOStream &out0=GetBlackHole();
510 RCP<Matrix> left = XMM::Multiply(Pe,false,D0_c,false,dummy,out0);
511 RCP<Matrix> right = XMM::Multiply(D0_f,false,Pn,false,dummy,out0);
512
513 // We need a non-FC matrix for the add, sadly
514 RCP<CrsMatrix> sum_c = CrsMatrixFactory::Build(left->getRowMap(),left->getLocalMaxNumRowEntries()+right->getLocalMaxNumRowEntries());
515 RCP<Matrix> summation = rcp(new CrsMatrixWrap(sum_c));
516 XMM::TwoMatrixAdd(*left, false, one, *summation, zero);
517 XMM::TwoMatrixAdd(*right, false, -one, *summation, one);
518
519 MT norm = summation->getFrobeniusNorm();
520 GetOStream(Statistics0) << "CheckCommutingProperty: ||Pe D0_c - D0_f Pn || = "<<norm<<std::endl;
521
522 }
523
524 }//end CheckCommutingProperty
525
526
527
528} //namespace MueLu
529
530
531
532#define MUELU_REITZINGERPFACTORY_SHORT
533#endif // MUELU_REITZINGERPFACTORY_DEF_HPP
#define SET_VALID_ENTRY(name)
Exception throws to report errors in the internal logical of the program.
Timer to be used in factories. Similar to Monitor but with additional timers.
void Input(Level &level, const std::string &varName) const
T Get(Level &level, const std::string &varName) const
void Set(Level &level, const std::string &varName, const T &data) const
Class that holds all level-specific information.
void RemoveKeepFlag(const std::string &ename, const FactoryBase *factory, KeepType keep=MueLu::All)
int GetLevelID() const
Return level number.
void AddKeepFlag(const std::string &ename, const FactoryBase *factory=NoFactory::get(), KeepType keep=MueLu::Keep)
void Set(const std::string &ename, const T &entry, const FactoryBase *factory=NoFactory::get())
static const NoFactory * get()
virtual const Teuchos::ParameterList & GetParameterList() const
void BuildP(Level &fineLevel, Level &coarseLevel) const
Abstract Build method.
void Build(Level &fineLevel, Level &coarseLevel) const
Build an object with this factory.
void CheckCommutingProperty(const Matrix &Pe, const Matrix &D0_c, const Matrix &D0_f, const Matrix &Pn) const
Utility method.
RCP< const ParameterList > GetValidParameterList() const
Return a const parameter list of valid parameters that setParameterList() will accept.
void DeclareInput(Level &fineLevel, Level &coarseLevel) const
Input.
Timer to be used in factories. Similar to SubMonitor but adds a timer level by level.
static RCP< Xpetra::Matrix< Scalar, LocalOrdinal, GlobalOrdinal, Node > > Transpose(Xpetra::Matrix< Scalar, LocalOrdinal, GlobalOrdinal, Node > &Op, bool optimizeTranspose=false, const std::string &label=std::string(), const Teuchos::RCP< Teuchos::ParameterList > &params=Teuchos::null)
Teuchos::FancyOStream & GetOStream(MsgType type, int thisProcRankOnly=0) const
Get an output stream for outputting the input message type.
bool IsPrint(MsgType type, int thisProcRankOnly=-1) const
Find out whether we need to print out information for a specific message type.
Teuchos::FancyOStream & GetBlackHole() const
Namespace for MueLu classes and methods.
@ Final
Keep data only for this run. Used to keep data useful for Hierarchy::Iterate(). Data will be deleted ...
@ UserData
User data are always kept. This flag is set automatically when Level::Set("data", data) is used....
@ Runtime0
One-liner description of what is happening.
@ Statistics0
Print statistics that do not involve significant additional computation.