34 template <
typename IT>
36 std::vector<std::array<char, MAXVERTNAME>> & distmapper_array,
const MPI_Comm & comm)
39 MPI_Comm_size(comm, &nprocs);
40 MPI_Comm_rank(comm, &myrank);
42 int * map_rcnt =
new int[nprocs];
43 MPI_Alltoall(map_scnt, 1, MPI_INT, map_rcnt, 1, MPI_INT, comm);
44 int * map_sdspl =
new int[nprocs]();
45 int * map_rdspl =
new int[nprocs]();
46 std::partial_sum(map_scnt, map_scnt+nprocs-1, map_sdspl+1);
47 std::partial_sum(map_rcnt, map_rcnt+nprocs-1, map_rdspl+1);
48 IT totmapsend = map_sdspl[nprocs-1] + map_scnt[nprocs-1];
49 IT totmaprecv = map_rdspl[nprocs-1] + map_rcnt[nprocs-1];
57 IT * sendinds =
new IT[totmapsend];
58 for(
int i=0; i<nprocs; ++i)
61 for(std::string s:data_send[i])
63 std::strcpy(sendbuf[map_sdspl[i]+loccnt], s.c_str());
66 std::vector<std::string>().swap(data_send[i]);
68 for(
int i=0; i<nprocs; ++i)
70 std::copy(locs_send[i].begin(), locs_send[i].end(), sendinds+map_sdspl[i]);
71 std::vector<IT>().swap(locs_send[i]);
75 recvbuf = (char (*)[
MAXVERTNAME]) malloc(
sizeof(
char[MAXVERTNAME])* totmaprecv);
77 MPI_Datatype MPI_STRING;
78 MPI_Type_contiguous(
sizeof(
char[MAXVERTNAME]), MPI_CHAR, &MPI_STRING);
79 MPI_Type_commit(&MPI_STRING);
81 MPI_Alltoallv(sendbuf, map_scnt, map_sdspl, MPI_STRING, recvbuf, map_rcnt, map_rdspl, MPI_STRING, comm);
83 MPI_Type_free(&MPI_STRING);
85 IT * recvinds =
new IT[totmaprecv];
86 MPI_Alltoallv(sendinds, map_scnt, map_sdspl, MPIType<IT>(), recvinds, map_rcnt, map_rdspl, MPIType<IT>(), comm);
87 DeleteAll(sendinds, map_scnt, map_sdspl, map_rcnt, map_rdspl);
90 std::cout <<
"Assertion failed at proc " << myrank <<
": Received indices are not sorted, this is unexpected" << std::endl;
92 for(IT i=0; i< totmaprecv; ++i)
94 assert(i == recvinds[i]);
95 std::copy(recvbuf[i], recvbuf[i]+MAXVERTNAME, distmapper_array[i].begin());
102 template<
typename KEY,
typename VAL,
typename IT>
106 MPI_Comm_size(comm, &nprocs);
107 MPI_Comm_rank(comm, &myrank);
108 int nsize = nprocs / 2;
111 bool excluded =
false;
112 if(dist[myrank] == 0) excluded =
true;
115 for(
int i=0; i< nprocs; ++i)
116 if(dist[i] != 0) ++nreals;
122 long * dist_in =
new long[nprocs];
123 for(
int i=0; i< nprocs; ++i) dist_in[i] = (
long) dist[i];
129 long * dist_in =
new long[nreals];
130 int * dist_out =
new int[nprocs-nreals];
133 for(
int i=0; i< nprocs; ++i)
136 dist_out[indout++] = i;
138 dist_in[indin++] = (long) dist[i];
142 std::ostringstream outs;
143 outs <<
"To exclude indices: ";
144 std::copy(dist_out, dist_out+indout, std::ostream_iterator<int>(outs,
" ")); outs << std::endl;
148 MPI_Group sort_group, real_group;
149 MPI_Comm_group(comm, &sort_group);
150 MPI_Group_excl(sort_group, indout, dist_out, &real_group);
151 MPI_Group_free(&sort_group);
157 MPI_Comm_create(comm, real_group, &real_comm);
161 MPI_Comm_free(&real_comm);
163 MPI_Group_free(&real_group);
170 IT gl_median = std::accumulate(dist, dist+nsize, static_cast<IT>(0));
171 sort(array, array+length);
172 int color = (myrank < nsize)? 0: 1;
174 std::pair<KEY,VAL> * low = array;
175 std::pair<KEY,VAL> * upp = array;
179 if(color == 1) dist = dist + nsize;
185 MPI_Comm_split(comm, color, myrank, &halfcomm);
197 template<
typename KEY,
typename VAL,
typename IT>
201 MPI_Comm_size(comm, &nprocs);
202 MPI_Comm_rank(comm, &myrank);
203 int nsize = nprocs / 2;
207 bool excluded =
false;
208 if(dist[myrank] == 0) excluded =
true;
211 for(
int i=0; i< nprocs; ++i)
212 if(dist[i] != 0) ++nreals;
214 std::vector<IndexHolder<KEY>> in(length);
216 #pragma omp parallel for 218 for(
int i=0; i< length; ++i)
220 in[i] =
IndexHolder<KEY>(array[i].first,
static_cast<unsigned long>(array[i].second));
229 long * dist_in =
new long[nreals];
230 int * dist_out =
new int[nprocs-nreals];
233 for(
int i=0; i< nprocs; ++i)
236 dist_out[indout++] = i;
238 dist_in[indin++] = (long) dist[i];
242 std::ostringstream outs;
243 outs <<
"To exclude indices: ";
244 std::copy(dist_out, dist_out+indout, std::ostream_iterator<int>(outs,
" ")); outs << std::endl;
248 MPI_Group sort_group, real_group;
249 MPI_Comm_group(comm, &sort_group);
250 MPI_Group_excl(sort_group, indout, dist_out, &real_group);
251 MPI_Group_free(&sort_group);
257 MPI_Comm_create(comm, real_group, &real_comm);
261 MPI_Comm_free(&real_comm);
263 MPI_Group_free(&real_group);
268 std::vector<std::pair<KEY,VAL>> sorted(in.size());
269 for(
int i=0; i<in.size(); i++)
271 sorted[i].second =
static_cast<VAL
>(in[i].index);
272 sorted[i].first = in[i].value;
278 template<
typename KEY,
typename VAL,
typename IT>
279 void SpParHelper::GlobalSelect(IT gl_rank, std::pair<KEY,VAL> * & low, std::pair<KEY,VAL> * & upp, std::pair<KEY,VAL> * array, IT length,
const MPI_Comm & comm)
282 MPI_Comm_size(comm, &nprocs);
283 MPI_Comm_rank(comm, &myrank);
286 std::pair<KEY, double> * wmminput =
new std::pair<KEY,double>[nprocs];
288 MPI_Datatype MPI_sortType;
289 MPI_Type_contiguous (
sizeof(std::pair<KEY,double>), MPI_CHAR, &MPI_sortType);
290 MPI_Type_commit (&MPI_sortType);
294 IT active = end-begin;
304 begin0 = begin; end0 = end;
305 KEY
median = array[(begin + end)/2].first;
306 wmminput[myrank].first =
median;
307 wmminput[myrank].second =
static_cast<double>(active);
308 MPI_Allgather(MPI_IN_PLACE, 0, MPI_sortType, wmminput, 1, MPI_sortType, comm);
310 for(
int i=0; i<nprocs; ++i)
311 totact += wmminput[i].second;
316 for(
int i=0; i<nprocs; ++i)
317 wmminput[i].second /= totact ;
319 sort(wmminput, wmminput+nprocs);
320 double totweight = 0;
322 while( wmmloc<nprocs && totweight < 0.5 )
324 totweight += wmminput[wmmloc++].second;
327 wmm = wmminput[wmmloc-1].first;
329 std::pair<KEY,VAL> wmmpair = std::make_pair(wmm, VAL());
330 low =std::lower_bound (array+begin, array+end, wmmpair);
331 upp =std::upper_bound (array+begin, array+end, wmmpair);
332 IT loc_low = low-array;
333 IT loc_upp = upp-array;
335 MPI_Allreduce( &loc_low, &gl_low, 1, MPIType<IT>(), MPI_SUM, comm);
336 MPI_Allreduce( &loc_upp, &gl_upp, 1, MPIType<IT>(), MPI_SUM, comm);
341 begin = (low - array);
343 else if(gl_rank < gl_low)
353 MPI_Allreduce(&active, &nacts, 1, MPIType<IT>(), MPI_SUM, comm);
354 if (begin0 == begin && end0 == end)
break;
356 while((nacts > 2*nprocs) && (!found));
359 MPI_Datatype MPI_pairType;
360 MPI_Type_contiguous (
sizeof(std::pair<KEY,VAL>), MPI_CHAR, &MPI_pairType);
361 MPI_Type_commit (&MPI_pairType);
363 int * nactives =
new int[nprocs];
364 nactives[myrank] =
static_cast<int>(active);
365 MPI_Allgather(MPI_IN_PLACE, 0, MPI_INT, nactives, 1, MPI_INT, comm);
366 int * dpls =
new int[nprocs]();
367 std::partial_sum(nactives, nactives+nprocs-1, dpls+1);
368 std::pair<KEY,VAL> * recvbuf =
new std::pair<KEY,VAL>[nacts];
370 MPI_Allgatherv(low, active, MPI_pairType, recvbuf, nactives, dpls, MPI_pairType, comm);
372 std::pair<KEY,int> * allactives =
new std::pair<KEY,int>[nacts];
374 for(
int i=0; i<nprocs; ++i)
376 for(
int j=0; j<nactives[i]; ++j)
378 allactives[k] = std::make_pair(recvbuf[k].first, i);
383 sort(allactives, allactives+nacts);
384 MPI_Allreduce(&begin, &gl_low, 1, MPIType<IT>(), MPI_SUM, comm);
385 int diff = gl_rank - gl_low;
386 for(
int k=0; k < diff; ++k)
388 if(allactives[k].second == myrank)
391 delete [] allactives;
393 MPI_Allreduce(&begin, &gl_low, 1, MPIType<IT>(), MPI_SUM, comm);
396 template<
typename KEY,
typename VAL,
typename IT>
397 void SpParHelper::BipartiteSwap(std::pair<KEY,VAL> * low, std::pair<KEY,VAL> * array, IT length,
int nfirsthalf,
int color,
const MPI_Comm & comm)
400 MPI_Comm_size(comm, &nprocs);
401 MPI_Comm_rank(comm, &myrank);
403 IT * firsthalves =
new IT[nprocs];
404 IT * secondhalves =
new IT[nprocs];
405 firsthalves[myrank] = low-array;
406 secondhalves[myrank] = length - (low-array);
408 MPI_Allgather(MPI_IN_PLACE, 0, MPIType<IT>(), firsthalves, 1, MPIType<IT>(), comm);
409 MPI_Allgather(MPI_IN_PLACE, 0, MPIType<IT>(), secondhalves, 1, MPIType<IT>(), comm);
411 int * sendcnt =
new int[nprocs]();
414 std::pair<KEY,VAL> * bufbegin = NULL;
418 totrecvcnt = length - (low-array);
419 IT beg_oftransfer = std::accumulate(secondhalves, secondhalves+myrank, static_cast<IT>(0));
420 IT spaceafter = firsthalves[nfirsthalf];
422 while(i < nprocs && spaceafter < beg_oftransfer)
424 spaceafter += firsthalves[i++];
426 IT end_oftransfer = beg_oftransfer + secondhalves[myrank];
427 IT beg_pour = beg_oftransfer;
428 IT end_pour = std::min(end_oftransfer, spaceafter);
429 sendcnt[i-1] = end_pour - beg_pour;
430 while( i < nprocs && spaceafter < end_oftransfer )
433 spaceafter += firsthalves[i];
434 end_pour = std::min(end_oftransfer, spaceafter);
435 sendcnt[i++] = end_pour - beg_pour;
441 totrecvcnt = low-array;
443 IT beg_oftransfer = std::accumulate(firsthalves+nfirsthalf, firsthalves+myrank, static_cast<IT>(0));
444 IT spaceafter = secondhalves[0];
446 while( i< nfirsthalf && spaceafter < beg_oftransfer)
449 spaceafter += secondhalves[i++];
451 IT end_oftransfer = beg_oftransfer + firsthalves[myrank];
452 IT beg_pour = beg_oftransfer;
453 IT end_pour = std::min(end_oftransfer, spaceafter);
454 sendcnt[i-1] = end_pour - beg_pour;
455 while( i < nfirsthalf && spaceafter < end_oftransfer )
458 spaceafter += secondhalves[i];
459 end_pour = std::min(end_oftransfer, spaceafter);
460 sendcnt[i++] = end_pour - beg_pour;
464 int * recvcnt =
new int[nprocs];
465 MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, comm);
473 MPI_Datatype MPI_valueType;
474 MPI_Type_contiguous(
sizeof(std::pair<KEY,VAL>), MPI_CHAR, &MPI_valueType);
475 MPI_Type_commit(&MPI_valueType);
477 std::pair<KEY,VAL> * receives =
new std::pair<KEY,VAL>[totrecvcnt];
478 int * sdpls =
new int[nprocs]();
479 int * rdpls =
new int[nprocs]();
480 std::partial_sum(sendcnt, sendcnt+nprocs-1, sdpls+1);
481 std::partial_sum(recvcnt, recvcnt+nprocs-1, rdpls+1);
483 MPI_Alltoallv(bufbegin, sendcnt, sdpls, MPI_valueType, receives, recvcnt, rdpls, MPI_valueType, comm);
485 DeleteAll(sendcnt, recvcnt, sdpls, rdpls);
486 std::copy(receives, receives+totrecvcnt, bufbegin);
491 template<
typename KEY,
typename VAL,
typename IT>
495 MPI_Comm_rank(World, &rank);
496 MPI_Comm_size(World, &nprocs);
499 char _fn[] =
"temp_sortedkeys";
500 MPI_File_open(World, _fn, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &thefile);
505 IT sizeuntil = std::accumulate(dist, dist+rank, static_cast<IT>(0));
507 MPI_Offset disp = sizeuntil *
sizeof(KEY);
508 MPI_File_set_view(thefile, disp, MPIType<KEY>(), MPIType<KEY>(),
"native", MPI_INFO_NULL);
510 KEY * packed =
new KEY[length];
511 for(
int i=0; i<length; ++i)
513 packed[i] = array[i].first;
515 MPI_File_write(thefile, packed, length, MPIType<KEY>(), NULL);
516 MPI_File_close(&thefile);
522 FILE * f = fopen(
"temp_sortedkeys",
"r");
525 std::cerr <<
"Problem reading binary input file\n";
528 IT maxd = *std::max_element(dist, dist+nprocs);
529 KEY * data =
new KEY[maxd];
531 for(
int i=0; i<nprocs; ++i)
534 fread(data,
sizeof(KEY), dist[i],f);
536 std::cout <<
"Elements stored on proc " << i <<
": " << std::endl;
537 std::copy(data, data+dist[i], std::ostream_iterator<KEY>(std::cout,
"\n"));
551 template <
class IT,
class NT,
class DER>
557 assert( (arrwin.size() == arrinfo.
totalsize()));
564 for(
int i=0; i< arrinfo.
indarrs.size(); ++i)
567 MPI_Get( arrinfo.
indarrs[i].addr, arrinfo.
indarrs[i].count, MPIType<IT>(), ownind, 0, arrinfo.
indarrs[i].count, MPIType<IT>(), arrwin[essk++]);
569 for(
int i=0; i< arrinfo.
numarrs.size(); ++i)
572 MPI_Get(arrinfo.
numarrs[i].addr, arrinfo.
numarrs[i].count, MPIType<NT>(), ownind, 0, arrinfo.
numarrs[i].count, MPIType<NT>(), arrwin[essk++]);
582 template<
typename IT,
typename NT,
typename DER>
586 MPI_Comm_rank(comm1d, &myrank);
589 Matrix.
Create(essentials);
593 for(
unsigned int i=0; i< arrinfo.
indarrs.size(); ++i)
595 MPI_Bcast(arrinfo.
indarrs[i].addr, arrinfo.
indarrs[i].count, MPIType<IT>(), root, comm1d);
597 for(
unsigned int i=0; i< arrinfo.
numarrs.size(); ++i)
599 MPI_Bcast(arrinfo.
numarrs[i].addr, arrinfo.
numarrs[i].count, MPIType<NT>(), root, comm1d);
611 template<
typename IT,
typename NT,
typename DER>
615 MPI_Comm_rank(comm1d, &myrank);
616 MPI_Comm_size(comm1d,&nprocs);
626 std::vector<std::vector<int>> recvcnt_ind(arrinfo.
indarrs.size());
627 std::vector<std::vector<int>> recvcnt_num(arrinfo.
numarrs.size());
628 for(
unsigned int i=0; i< arrinfo.
indarrs.size(); ++i)
630 recvcnt_ind[i].resize(nprocs);
631 int lcount = (int)arrinfo.
indarrs[i].count;
632 MPI_Gather(&lcount, 1, MPI_INT, recvcnt_ind[i].data(),1, MPI_INT, root, comm1d);
634 for(
unsigned int i=0; i< arrinfo.
numarrs.size(); ++i)
636 recvcnt_num[i].resize(nprocs);
637 int lcount = (int) arrinfo.
numarrs[i].count;
638 MPI_Gather(&lcount, 1, MPI_INT, recvcnt_num[i].data(),1, MPI_INT, root, comm1d);
642 std::vector<std::vector<int>> recvdsp_ind(arrinfo.
indarrs.size());
643 std::vector<std::vector<int>> recvdsp_num(arrinfo.
numarrs.size());
644 std::vector<std::vector<IT>> recvind(arrinfo.
indarrs.size());
645 std::vector<std::vector<IT>> recvnum(arrinfo.
numarrs.size());
646 for(
unsigned int i=0; i< arrinfo.
indarrs.size(); ++i)
648 recvdsp_ind[i].resize(nprocs);
649 recvdsp_ind[i][0] = 0;
650 for(
int j=1; j<nprocs; j++)
651 recvdsp_ind[i][j] = recvdsp_ind[i][j-1] + recvcnt_ind[i][j-1];
652 recvind[i].resize(recvdsp_ind[i][nprocs-1] + recvcnt_ind[i][nprocs-1]);
653 MPI_Gatherv(arrinfo.
indarrs[i].addr, arrinfo.
indarrs[i].count, MPIType<IT>(), recvind[i].data(),recvcnt_ind[i].data(), recvdsp_ind[i].data(), MPIType<IT>(), root, comm1d);
657 for(
unsigned int i=0; i< arrinfo.
numarrs.size(); ++i)
659 recvdsp_num[i].resize(nprocs);
660 recvdsp_num[i][0] = 0;
661 for(
int j=1; j<nprocs; j++)
662 recvdsp_num[i][j] = recvdsp_num[i][j-1] + recvcnt_num[i][j-1];
663 recvnum[i].resize(recvdsp_num[i][nprocs-1] + recvcnt_num[i][nprocs-1]);
664 MPI_Gatherv(arrinfo.
numarrs[i].addr, arrinfo.
numarrs[i].count, MPIType<NT>(), recvnum[i].data(),recvcnt_num[i].data(), recvdsp_num[i].data(), MPIType<NT>(), root, comm1d);
669 template <
class IT,
class NT,
class DER>
678 for(
int i=0; i< arrs.
indarrs.size(); ++i)
681 MPI_Win_create(arrs.
indarrs[i].addr,
682 arrs.
indarrs[i].count *
sizeof(IT),
sizeof(IT), MPI_INFO_NULL, comm1d, &nWin);
683 arrwin.push_back(nWin);
685 for(
int i=0; i< arrs.
numarrs.size(); ++i)
688 MPI_Win_create(arrs.
numarrs[i].addr,
689 arrs.
numarrs[i].count *
sizeof(NT),
sizeof(NT), MPI_INFO_NULL, comm1d, &nWin);
690 arrwin.push_back(nWin);
696 for(std::vector<MPI_Win>::iterator itr = arrwin.begin(); itr != arrwin.end(); ++itr)
698 MPI_Win_lock(MPI_LOCK_SHARED, ownind, 0, *itr);
704 for(std::vector<MPI_Win>::iterator itr = arrwin.begin(); itr != arrwin.end(); ++itr)
706 MPI_Win_unlock( ownind, *itr);
719 acc_ranks[0] = owner;
721 MPI_Group_incl(group, 1, acc_ranks, &access);
725 for(
unsigned int i=0; i< arrwin.size(); ++i)
726 MPI_Win_start(access, 0, arrwin[i]);
728 MPI_Group_free(&access);
737 for(
unsigned int i=0; i< arrwin.size(); ++i)
738 MPI_Win_post(group, MPI_MODE_NOPUT, arrwin[i]);
741 template <
class IT,
class DER>
746 std::vector<IT> ess(DER::esscount);
747 for(
int j=0; j< DER::esscount; ++j)
748 ess[j] = sizes[j][owner];
754 template <
class IT,
class DER>
759 std::vector<IT> ess(DER::esscount);
760 for(
int j=0; j< DER::esscount; ++j)
761 ess[j] = sizes[j][owner];
772 template <
class IT,
class NT,
class DER>
777 MPI_Comm_rank(comm1d, &index);
779 for(IT i=0; (unsigned)i < essentials.size(); ++i)
781 sizes[i][index] = essentials[i];
782 MPI_Allgather(MPI_IN_PLACE, 1, MPIType<IT>(), sizes[i], 1, MPIType<IT>(), comm1d);
789 MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
792 std::ofstream out(filename.c_str(), std::ofstream::app);
801 MPI_Comm_rank(world, &myrank);
804 std::ofstream out(filename.c_str(), std::ofstream::app);
814 MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
824 MPI_Comm_rank(world, &myrank);
834 if ((*bytes_read) < bytes_requested) {
836 if (buf[(*bytes_read) - 1] !=
'\n') {
838 buf[(*bytes_read) - 1] =
'\n';
839 std::cout <<
"Error in Matrix Market format, appending missing newline at end of file" << std::endl;
846 inline bool SpParHelper::FetchBatch(MPI_File & infile, MPI_Offset & curpos, MPI_Offset end_fpos,
bool firstcall, std::vector<std::string> & lines,
int myrank)
851 if(firstcall && myrank != 0)
856 char * buf =
new char[bytes2fetch];
857 char * originalbuf = buf;
859 MPI_File_read_at(infile, curpos, buf, bytes2fetch, MPI_CHAR, &status);
860 MPI_Get_count(&status, MPI_CHAR, &bytes_read);
863 delete [] originalbuf;
867 if(firstcall && myrank != 0)
879 std::cout <<
"Unexpected line without a break" << std::endl;
887 while(bytes_read > 0 && curpos < end_fpos)
889 char *c = (
char*)memchr(buf,
'\n', bytes_read);
891 delete [] originalbuf;
897 lines.push_back(std::string(buf, n-1));
902 delete [] originalbuf;
903 if (curpos >= end_fpos)
return true;
913 for(
unsigned int i=0; i< arrwin.size(); ++i)
915 MPI_Win_wait(arrwin[i]);
922 for(
unsigned int i=0; i< arrwin.size(); ++i)
924 MPI_Win_free(&arrwin[i]);
static void LockNFetch(DER *&Matrix, int owner, std::vector< MPI_Win > &arrwin, MPI_Group &group, IT **sizes)
std::vector< IT > GetEssentials() const
A set of parallel utilities.
static void AccessNFetch(DER *&Matrix, int owner, std::vector< MPI_Win > &arrwin, MPI_Group &group, IT **sizes)
static void StartAccessEpoch(int owner, std::vector< MPI_Win > &arrwin, MPI_Group &group)
std::vector< LocArr< NT, IT > > numarrs
bool is_sorted(_RandomAccessIter first, _RandomAccessIter last, _Compare comp, MPI_Comm comm)
void parallel_sort(_RandomAccessIter first, _RandomAccessIter last, _Compare comp, long *dist_in, SeqSort< _SeqSortType > &mysort, Split< _SplitType > &mysplit, Merge< _MergeType > &mymerge, MPI_Comm comm)
static void GetSetSizes(const SpMat< IT, NT, DER > &Matrix, IT **&sizes, MPI_Comm &comm1d)
std::vector< LocArr< IT, IT > > indarrs
T median(const T &a, const T &b, const T &c, Pred comp)
static void GatherMatrix(MPI_Comm &comm1d, SpMat< IT, NT, DER > &Matrix, int root)
static void BipartiteSwap(std::pair< KEY, VAL > *low, std::pair< KEY, VAL > *array, IT length, int nfirsthalf, int color, const MPI_Comm &comm)
static void WaitNFree(std::vector< MPI_Win > &arrwin)
static void FetchMatrix(SpMat< IT, NT, DER > &MRecv, const std::vector< IT > &essentials, std::vector< MPI_Win > &arrwin, int ownind)
static void MemoryEfficientPSort(std::pair< KEY, VAL > *array, IT length, IT *dist, const MPI_Comm &comm)
void Create(const std::vector< IT > &essentials)
static void Print(const std::string &s)
static void GlobalSelect(IT gl_rank, std::pair< KEY, VAL > *&low, std::pair< KEY, VAL > *&upp, std::pair< KEY, VAL > *array, IT length, const MPI_Comm &comm)
static void BCastMatrix(MPI_Comm &comm1d, SpMat< IT, NT, DER > &Matrix, const std::vector< IT > &essentials, int root)
static std::vector< std::pair< KEY, VAL > > KeyValuePSort(std::pair< KEY, VAL > *array, IT length, IT *dist, const MPI_Comm &comm)
static void check_newline(int *bytes_read, int bytes_requested, char *buf)
static void LockWindows(int ownind, std::vector< MPI_Win > &arrwin)
static void ReDistributeToVector(int *&map_scnt, std::vector< std::vector< IT > > &locs_send, std::vector< std::vector< std::string > > &data_send, std::vector< std::array< char, MAXVERTNAME >> &distmapper_array, const MPI_Comm &comm)
static void DebugPrintKeys(std::pair< KEY, VAL > *array, IT length, IT *dist, MPI_Comm &World)
static void UnlockWindows(int ownind, std::vector< MPI_Win > &arrwin)
static bool FetchBatch(MPI_File &infile, MPI_Offset &curpos, MPI_Offset end_fpos, bool firstcall, std::vector< std::string > &lines, int myrank)
static void PrintFile(const std::string &s, const std::string &filename)
static void PostExposureEpoch(int self, std::vector< MPI_Win > &arrwin, MPI_Group &group)
Arr< IT, NT > GetArrays() const
A small helper class used while sorting a pair of value and its index in an array/vector.
static void FreeWindows(std::vector< MPI_Win > &arrwin)
static void SetWindows(MPI_Comm &comm1d, const SpMat< IT, NT, DER > &Matrix, std::vector< MPI_Win > &arrwin)
int sampleSort(std::vector< T > &in, std::vector< T > &out, MPI_Comm comm)
A parallel sample sort implementation. In our implementation, we do not pose any restriction on the i...