21#define fei_file "fei_Vector_core.cpp"
29 comm_(vecSpace->getCommunicator()),
49 const std::vector<int>& offsets =
eqnComm_->getGlobalOffsets();
69 const int* remoteEqns)
71 if (numRemoteEqns == 0 && remoteEqns == NULL) {
82 if (numRemoteEqns != 0 && remoteEqns != NULL) {
83 for(
int i=0; i<numRemoteEqns; ++i) {
84 int proc =
eqnComm_->getOwnerProc(remoteEqns[i]);
85 if (proc == local_proc)
continue;
91 std::vector<int> eqns;
92 vecSpace_->getIndices_SharedAndOwned(eqns);
94 for(
size_t i=0; i<eqns.size(); ++i) {
95 int proc =
eqnComm_->getOwnerProc(eqns[i]);
96 if (proc == local_proc)
continue;
121 std::vector<int> recvProcs;
130 std::vector<int> sendProcs;
134 std::vector<std::vector<int> > send_ints(sendProcs.size());
135 std::vector<std::vector<double> > send_doubles(sendProcs.size());
136 std::vector<int> send_sizes(sendProcs.size());
138 std::vector<MPI_Request> mpiReqs(sendProcs.size()+recvProcs.size());
139 std::vector<MPI_Status> mpiStatuses(sendProcs.size()+recvProcs.size());
146 for(
unsigned i=0; i<sendProcs.size(); ++i) {
147 MPI_Irecv(&send_sizes[i], 1, MPI_INT, sendProcs[i],
148 tag1,
comm_, &mpiReqs[i]);
154 for(
unsigned i=0; i<recvProcs.size(); ++i) {
155 int proc = recvProcs[i];
158 int size = remoteVec->
size();
159 MPI_Send(&size, 1, MPI_INT, proc, tag1,
comm_);
162 MPI_Waitall(sendProcs.size(), &mpiReqs[0], &mpiStatuses[0]);
166 for(
unsigned i=0; i<sendProcs.size(); ++i) {
167 int proc = sendProcs[i];
168 int size = send_sizes[i];
169 send_ints[i].resize(size);
170 MPI_Irecv(&(send_ints[i][0]), size, MPI_INT, proc, tag1,
172 send_doubles[i].resize(size);
176 for(
unsigned i=0; i<recvProcs.size(); ++i) {
177 int proc = recvProcs[i];
179 int size = remoteVec->
size();
180 int* indices = &(remoteVec->
indices())[0];
181 MPI_Send(indices, size, MPI_INT, proc, tag1,
comm_);
184 MPI_Waitall(sendProcs.size(), &mpiReqs[0], &mpiStatuses[0]);
187 for(
unsigned i=0; i<recvProcs.size(); ++i) {
188 int proc = recvProcs[i];
190 int size = remoteVec->
size();
191 double* coefs = &(remoteVec->
coefs())[0];
192 MPI_Irecv(coefs, size, MPI_DOUBLE, proc, tag2,
comm_, &mpiReqs[i]);
196 for(
unsigned i=0; i<sendProcs.size(); ++i) {
197 int proc = sendProcs[i];
199 int num = send_sizes[i];
201 &(send_doubles[i][0]), 0);
203 FEI_COUT <<
"fei::Vector_core::scatterToOverlap ERROR getting data to send."<<
FEI_ENDL;
207 MPI_Send(&(send_doubles[i][0]), num, MPI_DOUBLE, proc, tag2,
comm_);
210 MPI_Waitall(recvProcs.size(), &mpiReqs[0], &mpiStatuses[0]);
220 int vectorIndex)
const
222 for(
int i=0; i<numValues; ++i) {
223 int ind = indices[i];
231 int proc =
eqnComm_->getOwnerProc(ind);
234 int insertPoint = -1;
238 <<
", index " << ind <<
" not in remotelyOwned_ vec object for proc "
243 values[i] = remoteVec->
coefs()[idx];
256 const double* values,
262 for(
int i=0; i<numValues; ++i) {
263 int ind = indices[i];
264 double val = values[i];
275 int proc =
eqnComm_->getOwnerProc(ind);
278 os <<
dbgprefix_<<
"giveToVector remote["<<proc<<
"]("
282 if (proc != prev_proc) {
284 prev_vec = remoteVec;
318 int fieldSize =
vecSpace_->getFieldSize(fieldID);
341 int fieldSize =
vecSpace_->getFieldSize(fieldID);
346 CHK_ERR(
vecSpace_->getGlobalIndicesLocalIDs(numIDs, localIDs, idType, fieldID,
356 std::vector<std::vector<char> >& send_chars,
358 bool zeroRemotelyOwnedAfterPacking)
360 for(
size_t i=0; i<sendProcs.size(); ++i) {
361 int proc = sendProcs[i];
364 remoteVec->
coefs(), send_chars[i], resize_buffer);
366 if (zeroRemotelyOwnedAfterPacking) {
384 std::vector<int> tmpSendProcs;
386 for(
size_t i=0; i<tmpSendProcs.size(); ++i) {
399 if (!found)
sendProcs_.push_back(tmpSendProcs[i]);
405 std::vector<MPI_Request> mpiReqs;
411 bool resize_buffer =
true;
412 bool zero_remotely_owned_after_packing =
false;
414 resize_buffer, zero_remotely_owned_after_packing);
423 tag1,
comm_, &mpiReqs[i]);
430 MPI_Send(&size, 1, MPI_INT, proc, tag1,
comm_);
436 MPI_Waitany(mpiReqs.size(), &mpiReqs[0], &index, &status);
452 std::vector<MPI_Request> mpiReqs;
464 tag1,
comm_, &mpiReqs[i]);
467 bool resize_buffer =
false;
468 bool zero_remotely_owned_after_packing =
true;
470 resize_buffer, zero_remotely_owned_after_packing);
484 MPI_Waitany(numRecvProcs, &mpiReqs[0], &index, &status);
487 std::vector<int> indices;
488 std::vector<double> coefs;
492 int num = indices.size();
493 if (num == 0)
continue;
495 &(coefs[0]), accumulate, 0);
516 int fieldSize =
vecSpace_->getFieldSize(fieldID);
524 std::vector<int>& eqnNums =
vecSpace_->getEqnNumbers();
525 int* vspcEqnPtr = eqnNums.size() > 0 ? &eqnNums[0] : NULL;
528 for(
int i=0; i<numIDs; ++i) {
541 dofOffset = eqnNumbers[foffset] - eqnNumbers[0];
542 for(
int j=0; j<fieldSize; ++j) {
552 fieldID, indicesPtr) );
561 bool matrixMarketFormat)
568 static char mmbanner[] =
"%%MatrixMarket matrix array real general";
578 if (matrixMarketFormat) {
593 if (matrixMarketFormat) {
597 ofref << i <<
" " << coef <<
FEI_ENDL;
608 bool matrixMarketFormat)
615 static char mmbanner[] =
"%%MatrixMarket matrix array real general";
620 for(
int proc=0; proc<
numProcs; ++proc) {
622 if (proc != local_proc)
continue;
625 if (matrixMarketFormat) {
637 if (matrixMarketFormat) {
649 if (matrixMarketFormat) {
653 ostrm <<
" " << i <<
" " << coef <<
FEI_ENDL;
660 if (matrixMarketFormat) {
std::vector< int > & indices()
std::vector< double > & coefs()
int getFieldEqnOffset(int fieldID, int &offset) const
FEI_OSTREAM * output_stream_
OutputLevel output_level_
fei::FieldMask * getFieldMask()
int getOffsetIntoEqnNumbers() const
GlobalIDType getNumber() const
std::vector< int > recvProcs_
fei::SharedPtr< fei::EqnComm > eqnComm_
void pack_send_buffers(const std::vector< int > &sendProcs, const std::vector< fei::CSVec * > &remotelyOwned, std::vector< std::vector< char > > &send_chars, bool resize_buffer, bool zeroRemotelyOwnedAfterPacking)
std::vector< int > recv_sizes_
fei::SharedPtr< fei::VectorSpace > vecSpace_
virtual int gatherFromOverlap(bool accumulate=true)
int assembleFieldDataLocalIDs(int fieldID, int idType, int numIDs, const int *localIDs, const double *data, bool sumInto=true, int vectorIndex=0)
std::vector< int > work_indices2_
int copyOut(int numValues, const int *indices, double *values, int vectorIndex=0) const
int giveToVector(int numValues, const int *indices, const double *values, bool sumInto=true, int vectorIndex=0)
virtual int writeToFile(const char *filename, bool matrixMarketFormat=true)
std::vector< int > remotelyOwnedProcs_
std::vector< std::vector< char > > send_chars_
Vector_core(fei::SharedPtr< fei::VectorSpace > vecSpace, int numLocalEqns)
bool sendRecvProcsNeedUpdated_
virtual int giveToUnderlyingVector(int numValues, const int *indices, const double *values, bool sumInto=true, int vectorIndex=0)=0
std::vector< int > work_indices_
virtual int copyOut_FE(int nodeNumber, int dofOffset, double &value)=0
virtual int writeToStream(FEI_OSTREAM &ostrm, bool matrixMarketFormat=true)
fei::CSVec * getRemotelyOwned(int proc)
virtual int copyOutOfUnderlyingVector(int numValues, const int *indices, double *values, int vectorIndex=0) const =0
void setOverlap(int numRemoteEqns=0, const int *remoteEqns=NULL)
std::vector< CSVec * > remotelyOwned_
virtual int scatterToOverlap()
virtual int copyOutFieldData(int fieldID, int idType, int numIDs, const int *IDs, double *data, int vectorIndex=0)
std::vector< std::vector< char > > recv_chars_
std::vector< int > sendProcs_
std::vector< CSVec * > & remotelyOwned()
int assembleFieldData(int fieldID, int idType, int numIDs, const int *IDs, const double *data, bool sumInto=true, int vectorIndex=0)
fei::Record< int > * getRecordWithID(int ID)
void unpack_indices_coefs(const std::vector< char > &buffer, std::vector< int > &indices, std::vector< double > &coefs)
void pack_indices_coefs(const std::vector< int > &indices, const std::vector< double > &coefs, std::vector< char > &buffer, bool resize_buffer)
void put_entry(CSVec &vec, int eqn, double coef)
int localProc(MPI_Comm comm)
int mirrorProcs(MPI_Comm comm, std::vector< int > &toProcs, std::vector< int > &fromProcs)
void add_entry(CSVec &vec, int eqn, double coef)
int binarySearch(const T &item, const T *list, int len)
std::ostream & console_out()
void set_values(CSVec &vec, double scalar)
int numProcs(MPI_Comm comm)
void Barrier(MPI_Comm comm)