78 bool all_local =
true;
80 for(
int i=0; i<numIndices; ++i) {
92 int p =
eqnComm_->getOwnerProc(indices[i]);
100 numIndices, indices);
103 for(
int i=0; i<numIndices; ++i) {
116 bool all_local =
true;
119 for(i=0; i<numIndices; ++i) {
120 int ind = indices[i];
122 throw std::runtime_error(
"fei::Graph_Impl::addDiagonals given negative index");
142 for(i=0; i<numIndices; ++i) {
143 int ind = indices[i];
202 std::vector<int> sendProcs;
210 sendProcs.push_back((
int)i);
215 std::vector<int> recvProcs;
219 std::vector<std::vector<int> > recv_ints(recvProcs.size());
222 std::vector<int> recv_sizes(recvProcs.size());
223 std::vector<MPI_Request> mpiReqs(recvProcs.size());
224 std::vector<MPI_Status> mpiStatuses(recvProcs.size());
229 for(
unsigned i=0; i<recvProcs.size(); ++i) {
230 MPI_Irecv(&recv_sizes[i], 1, MPI_INT, recvProcs[i],
231 tag1,
comm_, &mpiReqs[i]);
236 std::vector<std::vector<int> > send_ints(sendProcs.size());
238 for(
unsigned i=0; i<sendProcs.size(); ++i) {
239 int proc = sendProcs[i];
243 int isize = send_ints[i].size();
245 MPI_Send(&isize, 1, MPI_INT, proc, tag1,
comm_);
248 if (mpiReqs.size() > 0) {
249 MPI_Waitall(mpiReqs.size(), &mpiReqs[0], &mpiStatuses[0]);
253 for(
size_t i=0; i<recvProcs.size(); ++i) {
254 int intsize = recv_sizes[i];
256 recv_ints[i].resize(intsize);
258 MPI_Irecv(&(recv_ints[i][0]), intsize, MPI_INT, recvProcs[i],
259 tag1,
comm_, &mpiReqs[i]);
263 for(
size_t i=0; i<sendProcs.size(); ++i) {
264 int proc = sendProcs[i];
266 MPI_Send(&(send_ints[i][0]), send_ints[i].size(), MPI_INT,
270 if (mpiReqs.size() > 0) {
271 MPI_Waitall(mpiReqs.size(), &mpiReqs[0], &mpiStatuses[0]);
274 for(
unsigned i=0; i<recvProcs.size(); ++i) {
275 std::vector<int> recvdata = recv_ints[i];
276 int numRows = recvdata[0];
277 int* rowNumbers = &recvdata[1];
278 int* rowLengths = rowNumbers+numRows;
279 int* packedCols = rowLengths+numRows;
281 for(
int r=0; r<numRows; ++r) {
282 addIndices(rowNumbers[r], rowLengths[r], &packedCols[offset]);
283 offset += rowLengths[r];