35 template <
class IT,
class NT>
41 template <
class IT,
class NT>
45 arr.resize(MyLocLength(), initval);
49 template <
class IT,
class NT>
54 template <
class IT,
class NT>
58 arr.resize(MyLocLength(), initval);
61 template <
class IT,
class NT>
69 template <
class IT,
class NT>
70 template <
class ITRHS,
class NTRHS>
74 arr.resize(static_cast<IT>(rhs.arr.size()), NT());
76 for(IT i=0; (unsigned)i < arr.size(); ++i)
78 arr[i] =
static_cast<NT
>(rhs.arr[
static_cast<ITRHS
>(i)]);
86 template <
class IT,
class NT>
90 MPI_Comm World = commGrid->GetWorld();
91 int nprocs = commGrid->GetSize();
92 int rank = commGrid->GetRank();
94 IT * sizes =
new IT[nprocs];
95 IT nsize = fillarr.size();
97 MPI_Allgather(MPI_IN_PLACE, 1, MPIType<IT>(), sizes, 1, MPIType<IT>(), World);
98 glen = std::accumulate(sizes, sizes+nprocs, static_cast<IT>(0));
100 std::vector<IT> uniq_sizes;
101 std::unique_copy(sizes, sizes+nprocs, std::back_inserter(uniq_sizes));
102 if(uniq_sizes.size() == 1)
108 IT lengthuntil = std::accumulate(sizes, sizes+rank, static_cast<IT>(0));
114 int * sendcnt =
new int[nprocs];
115 std::fill(sendcnt, sendcnt+nprocs, 0);
116 for(IT i=0; i<nsize; ++i)
119 int owner = Owner(i+lengthuntil, locind);
122 int * recvcnt =
new int[nprocs];
123 MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World);
125 int * sdispls =
new int[nprocs];
126 int * rdispls =
new int[nprocs];
129 for(
int i=0; i<nprocs-1; ++i)
131 sdispls[i+1] = sdispls[i] + sendcnt[i];
132 rdispls[i+1] = rdispls[i] + recvcnt[i];
134 IT totrecv = std::accumulate(recvcnt,recvcnt+nprocs, static_cast<IT>(0));
135 std::vector<IT> recvbuf(totrecv);
138 MPI_Alltoallv(&(arr[0]), sendcnt, sdispls, MPIType<IT>(), &(recvbuf[0]), recvcnt, rdispls, MPIType<IT>(), World);
140 DeleteAll(sendcnt, recvcnt, sdispls, rdispls);
147 template <
class IT,
class NT>
152 auto it = min_element(arr.begin(), arr.end());
155 MPI_Allreduce( &localMin, &globalMin, 1, MPIType<NT>(), MPI_MIN, commGrid->GetWorld());
157 IT localMinIdx = TotalLength();
158 if(globalMin==localMin)
160 localMinIdx = distance(arr.begin(), it) + LengthUntil();
163 MPI_Allreduce( &localMinIdx, &globalMinIdx, 1, MPIType<IT>(), MPI_MIN, commGrid->GetWorld());
165 return std::make_pair(globalMinIdx, globalMin);
169 template <
class IT,
class NT>
170 template <
typename _BinaryOperation>
174 NT localsum = std::accumulate( arr.begin(), arr.end(), identity, __binary_op);
176 NT totalsum = identity;
181 template <
class IT,
class NT>
182 template <
typename OUT,
typename _BinaryOperation,
typename _UnaryOperation>
186 OUT localsum = default_val;
190 typename std::vector< NT >::const_iterator iter = arr.begin();
193 while (iter < arr.end())
195 localsum = __binary_op(localsum, __unary_op(*iter));
200 OUT totalsum = default_val;
207 template<
class IT,
class NT>
216 IT length = TotalLength();
217 std::vector<double> loccands(length);
218 std::vector<NT> loccandints(length);
219 MPI_Comm World = commGrid->GetWorld();
220 int myrank = commGrid->GetRank();
223 for(
int i=0; i<length; ++i)
224 loccands[i] = M.
rand();
225 std::transform(loccands.begin(), loccands.end(), loccands.begin(), std::bind2nd( std::multiplies<double>(), nver ));
227 for(
int i=0; i<length; ++i)
228 loccandints[i] = static_cast<NT>(loccands[i]);
230 MPI_Bcast(&(loccandints[0]), length, MPIType<NT>(),0, World);
231 for(IT i=0; i<length; ++i)
235 template <
class IT,
class NT>
236 template <
class ITRHS,
class NTRHS>
239 if(static_cast<const void*>(
this) != static_cast<const void*>(&rhs))
242 glen =
static_cast<IT
>(rhs.glen);
243 commGrid = rhs.commGrid;
245 arr.resize(rhs.arr.size(), NT());
246 for(IT i=0; (unsigned)i < arr.size(); ++i)
248 arr[i] =
static_cast<NT
>(rhs.arr[
static_cast<ITRHS
>(i)]);
254 template <
class IT,
class NT>
265 template <
class IT,
class NT>
269 arr.resize(rhs.MyLocLength());
270 std::fill(arr.begin(), arr.end(), NT());
273 for(IT i=0; i< spvecsize; ++i)
277 arr[rhs.ind[i]] = rhs.num[i];
290 template <
class IT,
class NT>
295 #pragma omp parallel for 297 for(IT i=0; i< spvecsize; ++i)
299 if(arr[rhs.ind[i]] == NT())
300 arr[rhs.ind[i]] = rhs.num[i];
302 arr[rhs.ind[i]] += rhs.num[i];
309 template <
class IT,
class NT>
313 for(IT i=0; i< spvecsize; ++i)
315 arr[rhs.ind[i]] -= rhs.num[i];
325 template <
class IT,
class NT>
326 template <
typename _BinaryOperation>
329 std::transform ( arr.begin(), arr.end(), rhs.arr.begin(), arr.begin(), __binary_op );
336 template <
class IT,
class NT>
337 template <
typename _BinaryOperation,
typename OUT>
340 std::transform ( arr.begin(), arr.end(), rhs.arr.begin(), result.arr.begin(), __binary_op );
344 template <
class IT,
class NT>
349 if(!(*commGrid == *rhs.commGrid))
351 std::cout <<
"Grids are not comparable elementwise addition" << std::endl;
356 EWise(rhs, std::plus<NT>());
362 template <
class IT,
class NT>
367 if(!(*commGrid == *rhs.commGrid))
369 std::cout <<
"Grids are not comparable elementwise addition" << std::endl;
374 EWise(rhs, std::minus<NT>());
380 template <
class IT,
class NT>
385 local = (int) std::equal(arr.begin(), arr.end(), rhs.arr.begin(), epsilonequal );
387 MPI_Allreduce( &local, &whole, 1, MPI_INT, MPI_BAND, commGrid->GetWorld());
388 return static_cast<bool>(whole);
391 template <
class IT,
class NT>
392 template <
typename _Predicate>
395 IT local = count_if( arr.begin(), arr.end(), pred );
397 MPI_Allreduce( &local, &whole, 1, MPIType<IT>(), MPI_SUM, commGrid->GetWorld());
403 template <
class IT,
class NT>
404 template <
typename _Predicate>
408 MPI_Comm World = commGrid->GetWorld();
409 int nprocs = commGrid->GetSize();
410 int rank = commGrid->GetRank();
413 IT sizesofar = LengthUntil();
414 for(IT i=0; i<sizelocal; ++i)
418 found.arr.push_back(i+sizesofar);
421 IT * dist =
new IT[nprocs];
422 IT nsize = found.arr.size();
424 MPI_Allgather(MPI_IN_PLACE, 1, MPIType<IT>(), dist, 1, MPIType<IT>(), World);
425 IT lengthuntil = std::accumulate(dist, dist+rank, static_cast<IT>(0));
426 found.glen = std::accumulate(dist, dist+nprocs, static_cast<IT>(0));
432 int * sendcnt =
new int[nprocs];
433 std::fill(sendcnt, sendcnt+nprocs, 0);
434 for(IT i=0; i<nsize; ++i)
437 int owner = found.Owner(i+lengthuntil, locind);
440 int * recvcnt =
new int[nprocs];
441 MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World);
443 int * sdispls =
new int[nprocs];
444 int * rdispls =
new int[nprocs];
447 for(
int i=0; i<nprocs-1; ++i)
449 sdispls[i+1] = sdispls[i] + sendcnt[i];
450 rdispls[i+1] = rdispls[i] + recvcnt[i];
452 IT totrecv = std::accumulate(recvcnt,recvcnt+nprocs, static_cast<IT>(0));
453 std::vector<IT> recvbuf(totrecv);
456 MPI_Alltoallv(&(found.arr[0]), sendcnt, sdispls, MPIType<IT>(), &(recvbuf[0]), recvcnt, rdispls, MPIType<IT>(), World);
457 found.arr.swap(recvbuf);
459 DeleteAll(sendcnt, recvcnt, sdispls, rdispls);
467 template <
class IT,
class NT>
468 template <
typename _Predicate>
472 size_t size = arr.size();
473 for(
size_t i=0; i<
size; ++i)
477 found.ind.push_back( (IT) i);
478 found.num.push_back(arr[i]);
487 template <
class IT,
class NT>
491 size_t size = arr.size();
492 for(
size_t i=0; i<
size; ++i)
496 found.ind.push_back( (IT) i);
497 found.num.push_back(val);
505 template <
class IT,
class NT>
506 template <
class HANDLER>
516 template <
class IT,
class NT>
517 template <
class HANDLER>
521 tmpSpVec.
SaveGathered(outfile, master, handler, printProcSplits);
524 template <
class IT,
class NT>
527 int rank = commGrid->GetRank();
531 std::cout <<
"FullyDistVec::SetElement can't be called on an empty vector." << std::endl;
535 int owner = Owner(indx, locind);
536 if(commGrid->GetRank() == owner)
540 std::cout <<
"FullyDistVec::SetElement cannot expand array" << std::endl;
544 std::cout <<
"FullyDistVec::SetElement local index < 0" << std::endl;
553 template <
class IT,
class NT>
557 MPI_Comm World = commGrid->GetWorld();
558 int rank = commGrid->GetRank();
562 std::cout <<
"FullyDistVec::GetElement can't be called on an empty vector." << std::endl;
567 int owner = Owner(indx, locind);
568 if(commGrid->GetRank() == owner)
572 std::cout <<
"FullyDistVec::GetElement local index > size" << std::endl;
578 std::cout <<
"FullyDistVec::GetElement local index < 0" << std::endl;
586 MPI_Bcast(&ret, 1, MPIType<NT>(), owner, World);
591 template <
class IT,
class NT>
595 MPI_Comm World = commGrid->GetWorld();
596 MPI_Comm_rank(World, &rank);
597 MPI_Comm_size(World, &nprocs);
599 char _fn[] =
"temp_fullydistvec";
600 MPI_File_open(World, _fn, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &thefile);
601 IT lengthuntil = LengthUntil();
605 char native[] =
"native";
606 MPI_File_set_view(thefile,
int64_t(lengthuntil *
sizeof(NT)), MPIType<NT>(), MPIType<NT>(), native, MPI_INFO_NULL);
609 MPI_File_write(thefile, &(arr[0]), count, MPIType<NT>(), MPI_STATUS_IGNORE);
610 MPI_File_close(&thefile);
613 IT * counts =
new IT[nprocs];
614 MPI_Gather(&count, 1, MPIType<IT>(), counts, 1, MPIType<IT>(), 0, World);
617 FILE * f = fopen(
"temp_fullydistvec",
"r");
620 std::cerr <<
"Problem reading binary input file\n";
623 IT maxd = *std::max_element(counts, counts+nprocs);
624 NT * data =
new NT[maxd];
626 for(
int i=0; i<nprocs; ++i)
629 size_t result = fread(data,
sizeof(NT), counts[i],f);
630 if (result != (
unsigned)counts[i]) { std::cout <<
"Error in fread, only " << result <<
" entries read" << std::endl; }
632 std::cout <<
"Elements stored on proc " << i <<
": {" ;
633 for (
int j = 0; j < counts[i]; j++)
635 std::cout << data[j] <<
",";
637 std::cout <<
"}" << std::endl;
644 template <
class IT,
class NT>
645 template <
typename _UnaryOperation,
typename IRRELEVANT_NT>
648 typename std::vector< IT >::const_iterator miter = mask.ind.begin();
649 while (miter < mask.ind.end())
652 arr[index] = __unary_op(arr[index]);
656 template <
class IT,
class NT>
657 template <
typename _BinaryOperation,
typename _BinaryPredicate,
class NT2>
660 if(*(commGrid) == *(other.commGrid))
662 if(glen != other.glen)
664 std::ostringstream outs;
665 outs <<
"Vector dimensions don't match (" << glen <<
" vs " << other.glen <<
") for FullyDistVec::EWiseApply\n";
671 typename std::vector< NT >::iterator thisIter = arr.begin();
672 typename std::vector< NT2 >::const_iterator otherIter = other.arr.begin();
673 while (thisIter < arr.end())
675 if (_do_op(*thisIter, *otherIter,
false,
false))
676 *thisIter = __binary_op(*thisIter, *otherIter,
false,
false);
684 std::ostringstream outs;
685 outs <<
"Grids are not comparable for FullyDistVec<IT,NT>::EWiseApply" << std::endl;
694 template <
class IT,
class NT>
695 template <
typename _BinaryOperation,
typename _BinaryPredicate,
class NT2>
698 if(*(commGrid) == *(other.commGrid))
700 if(glen != other.glen)
702 std::cerr <<
"Vector dimensions don't match (" << glen <<
" vs " << other.glen <<
") for FullyDistVec::EWiseApply\n";
707 typename std::vector< IT >::const_iterator otherInd = other.ind.begin();
708 typename std::vector< NT2 >::const_iterator otherNum = other.num.begin();
712 for(IT i=0; (unsigned)i < arr.size(); ++i)
714 if (otherInd == other.ind.end() || i < *otherInd)
716 if (_do_op(arr[i], nullValue,
false,
true))
717 arr[i] = __binary_op(arr[i], nullValue,
false,
true);
721 if (_do_op(arr[i], *otherNum,
false,
false))
722 arr[i] = __binary_op(arr[i], *otherNum,
false,
false);
737 IT spsize = other.ind.size();
739 #pragma omp parallel for 741 for(IT i=0; i< spsize; i++)
743 if (_do_op(arr[other.ind[i]], other.num[i],
false,
false))
744 arr[other.ind[i]] = __binary_op(arr[other.ind[i]], other.num[i],
false,
false);
751 std::cout <<
"Grids are not comparable elementwise apply" << std::endl;
757 template <
class IT,
class NT>
760 MPI_Comm World = commGrid->GetWorld();
763 std::pair<NT,IT> * vecpair =
new std::pair<NT,IT>[nnz];
764 int nprocs = commGrid->GetSize();
765 int rank = commGrid->GetRank();
767 IT * dist =
new IT[nprocs];
769 MPI_Allgather(MPI_IN_PLACE, 1, MPIType<IT>(), dist, 1, MPIType<IT>(), World);
770 IT sizeuntil = LengthUntil();
771 for(IT i=0; i< nnz; ++i)
773 vecpair[i].first = arr[i];
774 vecpair[i].second = i + sizeuntil;
778 std::vector< IT > narr(nnz);
779 for(IT i=0; i< nnz; ++i)
781 arr[i] = vecpair[i].first;
782 narr[i] = vecpair[i].second;
794 template <
class IT,
class NT>
798 uint64_t seed = 1383098845;
800 uint64_t seed= time(NULL);
804 MPI_Comm World = commGrid->GetWorld();
805 int nprocs = commGrid->GetSize();
806 int rank = commGrid->GetRank();
809 #ifdef COMBBLAS_LEGACY 810 std::pair<double,NT> * vecpair =
new std::pair<double,NT>[
size];
811 IT * dist =
new IT[nprocs];
813 MPI_Allgather(MPI_IN_PLACE, 1, MPIType<IT>(), dist, 1, MPIType<IT>(), World);
814 for(
int i=0; i<
size; ++i)
816 vecpair[i].first = M.
rand();
817 vecpair[i].second = arr[i];
821 std::vector< NT > nnum(size);
822 for(
int i=0; i<
size; ++i) nnum[i] = vecpair[i].second;
826 std::vector< std::vector< NT > > data_send(nprocs);
827 for(
int i=0; i<
size; ++i)
830 uint32_t dest = M.
randInt(nprocs-1);
831 data_send[dest].push_back(arr[i]);
833 int * sendcnt =
new int[nprocs];
834 int * sdispls =
new int[nprocs];
835 for(
int i=0; i<nprocs; ++i) sendcnt[i] = (
int) data_send[i].size();
837 int * rdispls =
new int[nprocs];
838 int * recvcnt =
new int[nprocs];
839 MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World);
842 for(
int i=0; i<nprocs-1; ++i)
844 sdispls[i+1] = sdispls[i] + sendcnt[i];
845 rdispls[i+1] = rdispls[i] + recvcnt[i];
847 IT totrecv = std::accumulate(recvcnt,recvcnt+nprocs, static_cast<IT>(0));
848 if(totrecv > std::numeric_limits<int>::max())
850 std::cout <<
"COMBBLAS_WARNING: total data to receive exceeds max int: " << totrecv << std::endl;
852 std::vector<NT>().swap(arr);
854 NT * sendbuf =
new NT[
size];
855 for(
int i=0; i<nprocs; ++i)
857 std::copy(data_send[i].begin(), data_send[i].end(), sendbuf+sdispls[i]);
858 std::vector<NT>().swap(data_send[i]);
860 NT * recvbuf =
new NT[totrecv];
861 MPI_Alltoallv(sendbuf, sendcnt, sdispls, MPIType<NT>(), recvbuf, recvcnt, rdispls, MPIType<NT>(), World);
863 std::default_random_engine gen(seed);
864 std::shuffle(recvbuf, recvbuf+ totrecv,gen);
867 localcounts[
rank] = totrecv;
868 MPI_Allgather(MPI_IN_PLACE, 1, MPI_LONG_LONG, localcounts, 1, MPI_LONG_LONG, World);
869 int64_t glenuntil = std::accumulate(localcounts, localcounts+rank, static_cast<int64_t>(0));
871 std::vector< std::vector< IT > > locs_send(nprocs);
872 for(IT i=0; i< totrecv; ++i)
875 int owner = Owner(glenuntil+i, remotelocind);
876 locs_send[owner].push_back(remotelocind);
877 data_send[owner].push_back(recvbuf[i]);
880 for(
int i=0; i<nprocs; ++i) sendcnt[i] = (
int) data_send[i].size();
881 MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World);
884 for(
int i=0; i<nprocs-1; ++i)
886 sdispls[i+1] = sdispls[i] + sendcnt[i];
887 rdispls[i+1] = rdispls[i] + recvcnt[i];
889 IT newsize = std::accumulate(recvcnt,recvcnt+nprocs, static_cast<IT>(0));
890 if(newsize > std::numeric_limits<int>::max())
892 std::cout <<
"COMBBLAS_WARNING: total data to receive exceeds max int: " << newsize << std::endl;
895 IT totalsend = std::accumulate(sendcnt, sendcnt+nprocs, static_cast<IT>(0));
896 if(totalsend != totrecv || newsize != size)
898 std::cout <<
"COMBBLAS_WARNING: sending different sized data than received: " << totalsend <<
"=" << totrecv <<
" , " << newsize <<
"=" << size << std::endl;
900 for(
int i=0; i<nprocs; ++i)
902 std::copy(data_send[i].begin(), data_send[i].end(), recvbuf+sdispls[i]);
903 std::vector<NT>().swap(data_send[i]);
906 MPI_Alltoallv(recvbuf, sendcnt, sdispls, MPIType<NT>(), sendbuf, recvcnt, rdispls, MPIType<NT>(), World);
908 IT * newinds =
new IT[totalsend];
909 for(
int i=0; i<nprocs; ++i)
911 std::copy(locs_send[i].begin(), locs_send[i].end(), newinds+sdispls[i]);
912 std::vector<IT>().swap(locs_send[i]);
914 IT * indsbuf =
new IT[
size];
915 MPI_Alltoallv(newinds, sendcnt, sdispls, MPIType<IT>(), indsbuf, recvcnt, rdispls, MPIType<IT>(), World);
916 DeleteAll(newinds, sendcnt, sdispls, rdispls, recvcnt);
918 for(IT i=0; i<
size; ++i)
920 arr[indsbuf[i]] = sendbuf[i];
927 template <
class IT,
class NT>
931 IT length = MyLocLength();
937 template <
class IT,
class NT>
940 if(!(*commGrid == *ri.commGrid))
942 std::cout <<
"Grids are not comparable for dense vector subsref" << std::endl;
946 MPI_Comm World = commGrid->GetWorld();
948 int nprocs = commGrid->GetSize();
949 std::vector< std::vector< IT > > data_req(nprocs);
950 std::vector< std::vector< IT > > revr_map(nprocs);
953 for(IT i=0; i < riloclen; ++i)
956 int owner = Owner(ri.arr[i], locind);
957 data_req[owner].push_back(locind);
958 revr_map[owner].push_back(i);
960 IT * sendbuf =
new IT[riloclen];
961 int * sendcnt =
new int[nprocs];
962 int * sdispls =
new int[nprocs];
963 for(
int i=0; i<nprocs; ++i)
964 sendcnt[i] = (
int) data_req[i].size();
966 int * rdispls =
new int[nprocs];
967 int * recvcnt =
new int[nprocs];
968 MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World);
971 for(
int i=0; i<nprocs-1; ++i)
973 sdispls[i+1] = sdispls[i] + sendcnt[i];
974 rdispls[i+1] = rdispls[i] + recvcnt[i];
976 IT totrecv = std::accumulate(recvcnt,recvcnt+nprocs, static_cast<IT>(0));
977 for(
int i=0; i<nprocs; ++i)
979 std::copy(data_req[i].begin(), data_req[i].end(), sendbuf+sdispls[i]);
980 std::vector<IT>().swap(data_req[i]);
983 IT * reversemap =
new IT[riloclen];
984 for(
int i=0; i<nprocs; ++i)
986 std::copy(revr_map[i].begin(), revr_map[i].end(), reversemap+sdispls[i]);
987 std::vector<IT>().swap(revr_map[i]);
990 IT * recvbuf =
new IT[totrecv];
991 MPI_Alltoallv(sendbuf, sendcnt, sdispls, MPIType<IT>(), recvbuf, recvcnt, rdispls, MPIType<IT>(), World);
998 NT * databack =
new NT[totrecv];
999 for(
int i=0; i<nprocs; ++i)
1001 for(
int j = rdispls[i]; j < rdispls[i] + recvcnt[i]; ++j)
1003 databack[j] = arr[recvbuf[j]];
1007 NT * databuf =
new NT[riloclen];
1010 MPI_Alltoallv(databack, recvcnt, rdispls, MPIType<NT>(), databuf, sendcnt, sdispls, MPIType<NT>(), World);
1015 for(
int i=0; i<nprocs; ++i)
1017 for(
int j=sdispls[i]; j< sdispls[i]+sendcnt[i]; ++j)
1019 Indexed.arr[reversemap[j]] = databuf[j];
1022 DeleteAll(sdispls, sendcnt, databuf,reversemap);
1027 template <
class IT,
class NT>
1030 IT totl = TotalLength();
1031 if (commGrid->GetRank() == 0)
1032 std::cout <<
"As a whole, " << vectorname <<
" has length " << totl << std::endl;
1039 template <
class IT,
class NT>
1042 if(*(commGrid) == *(other.commGrid))
1044 if(glen != other.glen)
1046 std::cerr <<
"Vector dimensions don't match (" << glen <<
" vs " << other.glen <<
") for FullyDistVec::Set\n";
1054 #pragma omp parallel for 1056 for(IT i=0; i< spvecsize; ++i)
1058 arr[other.ind[i]] = other.num[i];
1064 std::cout <<
"Grids are not comparable for Set" << std::endl;
1074 template <
class IT,
class NT>
1075 template <
class NT1,
typename _BinaryOperationIdx,
typename _BinaryOperationVal>
1078 if(*(commGrid) != *(spVec.commGrid))
1080 std::cout <<
"Grids are not comparable for GSet" << std::endl;
1085 if(spVecSize==0)
return;
1088 MPI_Comm World = commGrid->GetWorld();
1089 int nprocs = commGrid->GetSize();
1091 MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
1094 std::vector< std::vector< NT > > datsent(nprocs);
1095 std::vector< std::vector< IT > > indsent(nprocs);
1096 IT lengthUntil = spVec.LengthUntil();
1098 for(IT k=0; k < spVecSize; ++k)
1103 IT globind = __binopIdx(spVec.num[k], spVec.ind[k] + lengthUntil);
1104 int owner = Owner(globind, locind);
1105 NT val = __binopVal(spVec.num[k], spVec.ind[k] + lengthUntil);
1108 datsent[owner].push_back(val);
1109 indsent[owner].push_back(locind);
1114 for(
int j = 0; j < datsent[myrank].size(); ++j)
1116 arr[indsent[myrank][j]] = datsent[myrank][j];
1123 for(
int i=0; i<nprocs; ++i)
1127 MPI_Win_lock(MPI_LOCK_SHARED,i,MPI_MODE_NOCHECK,win);
1128 for(
int j = 0; j < datsent[i].size(); ++j)
1130 MPI_Put(&datsent[i][j], 1, MPIType<NT>(), i, indsent[i][j], 1, MPIType<NT>(), win);
1132 MPI_Win_unlock(i, win);
1146 template <
class IT,
class NT>
1147 template <
class NT1,
typename _BinaryOperationIdx>
1150 if(*(commGrid) != *(spVec.commGrid))
1152 std::cout <<
"Grids are not comparable for GGet" << std::endl;
1156 MPI_Comm World = commGrid->GetWorld();
1157 int nprocs = commGrid->GetSize();
1159 MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
1162 std::vector< std::vector< NT > > spIdx(nprocs);
1163 std::vector< std::vector< IT > > indsent(nprocs);
1164 IT lengthUntil = spVec.LengthUntil();
1168 res.ind.resize(spVecSize);
1169 res.num.resize(spVecSize);
1172 for(IT k=0; k < spVecSize; ++k)
1177 IT globind = __binopIdx(spVec.num[k], spVec.ind[k] + lengthUntil);
1178 int owner = Owner(globind, locind);
1182 spIdx[owner].push_back(k);
1183 indsent[owner].push_back(locind);
1186 res.num[k] = nullValue;
1187 res.ind[k] = spVec.ind[k];
1191 for(
int j = 0; j < indsent[myrank].size(); ++j)
1193 res.num[spIdx[myrank][j]] = arr[indsent[myrank][j]];
1198 MPI_Win_create(&arr[0],
LocArrSize() *
sizeof(NT),
sizeof(NT), MPI_INFO_NULL, World, &win);
1199 for(
int i=0; i<nprocs; ++i)
1203 MPI_Win_lock(MPI_LOCK_SHARED,i,MPI_MODE_NOCHECK,win);
1204 for(
int j = 0; j < indsent[i].size(); ++j)
1206 MPI_Get(&res.num[spIdx[i][j]], 1, MPIType<NT>(), i, indsent[i][j], 1, MPIType<NT>(), win);
1208 MPI_Win_unlock(i, win);
void EWiseOut(const FullyDistVec< IT, NT > &rhs, _BinaryOperation __binary_op, FullyDistVec< IT, OUT > &result)
static void iota(_ForwardIter __first, _ForwardIter __last, T __val)
FullyDistVec< IT, NT > operator()(const FullyDistVec< IT, IT > &ri) const
void SetElement(IT indx, NT numx)
FullyDistVec< IT, NT > & operator=(const FullyDistVec< ITRHS, NTRHS > &rhs)
void Set(const FullyDistSpVec< IT, NT > &rhs)
FullyDistVec< IT, IT > FindInds(_Predicate pred) const
Return the indices where pred is true.
FullyDistSpVec< IT, NT > Find(_Predicate pred) const
Return the elements for which pred is true.
void SaveGathered(std::ofstream &outfile, int master, HANDLER handler, bool printProcSplits=false)
void EWiseApply(const FullyDistVec< IT, NT2 > &other, _BinaryOperation __binary_op, _BinaryPredicate _do_op, const bool useExtendedBinOp)
std::pair< IT, NT > MinElement() const
NT GetElement(IT indx) const
static void MemoryEfficientPSort(std::pair< KEY, VAL > *array, IT length, IT *dist, const MPI_Comm &comm)
IT Count(_Predicate pred) const
Return the number of elements for which pred is true.
static void Print(const std::string &s)
void SaveGathered(std::ofstream &outfile, int master, HANDLER handler, bool printProcSplits=false)
void iota(IT globalsize, NT first)
void SelectCandidates(double nver)
ABAB: Put concept check, NT should be integer for this to make sense.
void GSet(const FullyDistSpVec< IT, NT1 > &spVec, _BinaryOperationIdx __binopIdx, _BinaryOperationVal __binopVal, MPI_Win win)
std::ifstream & ReadDistribute(std::ifstream &infile, int master, HANDLER handler)
Totally obsolete version that only accepts an ifstream object and ascii files.
FullyDistVec< IT, IT > sort()
void PrintInfo(std::string vectorname) const
void Apply(_UnaryOperation __unary_op)
bool operator==(const FullyDistVec< IT, NT > &rhs) const
FullyDistSpVec< IT, NT > GGet(const FullyDistSpVec< IT, NT1 > &spVec, _BinaryOperationIdx __binopIdx, NT nullValue)
FullyDistVec< IT, NT > & operator+=(const FullyDistSpVec< IT, NT > &rhs)
FullyDistVec< IT, NT > & operator-=(const FullyDistSpVec< IT, NT > &rhs)
NT Reduce(_BinaryOperation __binary_op, NT identity) const
std::ifstream & ReadDistribute(std::ifstream &infile, int master, HANDLER handler)