27 template <
typename c,
typename t,
typename VT>
28 void save(std::basic_ostream<c,t>& os, std::vector<VT> & strvec,
int64_t index)
30 for (
auto it = strvec.begin() ; it != strvec.end(); ++it)
48 MPI_Comm World = commGrid->GetWorld();
49 int nprocs = commGrid->GetSize();
55 std::vector<int> rdispls(nprocs+1);
56 std::vector<int> recvcnt(nprocs);
57 std::vector<int> sendcnt(nprocs,0);
58 std::vector<int> sdispls(nprocs+1);
62 const IT* larr = clustIdForVtx.
GetLocArr();
65 for(IT i=0; i < ploclen; ++i)
68 int owner = temp.Owner(larr[i], locind);
71 MPI_Alltoall(sendcnt.data(), 1, MPI_INT, recvcnt.data(), 1, MPI_INT, World);
75 for(
int i=0; i<nprocs; ++i)
77 sdispls[i+1] = sdispls[i] + sendcnt[i];
78 rdispls[i+1] = rdispls[i] + recvcnt[i];
82 typedef std::array<char, MAXVERTNAME> STRASARRAY;
83 typedef std::pair< IT, STRASARRAY> TYPE2SEND;
84 const STRASARRAY* lVtxLabels = vtxLabels.
GetLocArr();
85 std::vector<TYPE2SEND> senddata(ploclen);
89 std::vector<int> count(nprocs, 0);
90 for(IT i=0; i < ploclen; ++i)
93 int owner = temp.Owner(larr[i], locind);
94 int idx = sdispls[owner] + count[owner];
96 senddata[idx] = TYPE2SEND(locind, lVtxLabels[i]);
99 MPI_Datatype MPI_CLUST;
100 MPI_Type_contiguous(
sizeof(TYPE2SEND), MPI_CHAR, &MPI_CLUST);
101 MPI_Type_commit(&MPI_CLUST);
103 IT totrecv = rdispls[nprocs];
104 std::vector<TYPE2SEND> recvdata(totrecv);
106 MPI_Alltoallv(senddata.data(), sendcnt.data(), sdispls.data(), MPI_CLUST, recvdata.data(), recvcnt.data(), rdispls.data(), MPI_CLUST, World);
110 std::vector< std::vector<std::string> > vtxGroupbyCC(temp.
LocArrSize());
111 for(
int i=0; i<totrecv; ++i)
113 IT clusterID = recvdata[i].first;
114 auto locnull = std::find(recvdata[i].second.begin(), recvdata[i].second.end(),
'\0');
115 std::string vtxstr(recvdata[i].second.begin(), locnull);
116 vtxGroupbyCC[clusterID].push_back(vtxstr);
121 #pragma omp parallel for 123 for(
unsigned int i=0; i<vtxGroupbyCC.size(); ++i)
125 std::sort(vtxGroupbyCC[i].begin(), vtxGroupbyCC[i].end());
130 for(
int i=0; i<clusters.LocArrSize(); i++)
152 MPI_Comm World = commGrid->GetWorld();
153 int nprocs = commGrid->GetSize();
154 IT lenuntil = clustIdForVtx.LengthUntil();
160 std::vector<int> rdispls(nprocs+1);
161 std::vector<int> recvcnt(nprocs);
162 std::vector<int> sendcnt(nprocs,0);
163 std::vector<int> sdispls(nprocs+1);
167 const IT* larr = clustIdForVtx.
GetLocArr();
170 for(IT i=0; i < ploclen; ++i)
173 int owner = temp.Owner(larr[i], locind);
176 MPI_Alltoall(sendcnt.data(), 1, MPI_INT, recvcnt.data(), 1, MPI_INT, World);
180 for(
int i=0; i<nprocs; ++i)
182 sdispls[i+1] = sdispls[i] + sendcnt[i];
183 rdispls[i+1] = rdispls[i] + recvcnt[i];
188 std::vector<std::pair<IT, IT>> senddata(ploclen);
190 std::vector<int> count(nprocs, 0);
191 for(IT i=0; i < ploclen; ++i)
194 int owner = temp.Owner(larr[i], locind);
195 int idx = sdispls[owner] + count[owner];
197 senddata[idx] = std::make_pair(locind, i+lenuntil+base);
200 MPI_Datatype MPI_CLUST;
201 MPI_Type_contiguous(
sizeof(std::pair<IT, IT>), MPI_CHAR, &MPI_CLUST);
202 MPI_Type_commit(&MPI_CLUST);
204 IT totrecv = rdispls[nprocs];
205 std::vector<std::pair<IT, IT>> recvdata(totrecv);
207 MPI_Alltoallv(senddata.data(), sendcnt.data(), sdispls.data(), MPI_CLUST, recvdata.data(), recvcnt.data(), rdispls.data(), MPI_CLUST, World);
211 std::vector< std::vector<IT> > vtxGroupbyCC(temp.
LocArrSize());
212 for(
int i=0; i<totrecv; ++i)
214 IT clusterID = recvdata[i].first;
215 vtxGroupbyCC[clusterID].push_back(recvdata[i].second);
220 for(
int i=0; i<clusters.LocArrSize(); i++)
void save(std::basic_ostream< c, t > &os, std::vector< VT > &strvec, int64_t index)
Compute the maximum of two values.
std::shared_ptr< CommGrid > getcommgrid() const
void SetLocalElement(IT index, NT value)
NT Reduce(_BinaryOperation __binary_op, NT identity) const
void WriteMCLClusters(std::string ofName, FullyDistVec< IT, IT > clustIdForVtx, FullyDistVec< IT, std::array< char, MAXVERTNAME > > vtxLabels)
const NT * GetLocArr() const