COMBINATORIAL_BLAS  1.6
GenRmatDist.h
Go to the documentation of this file.
1 #ifndef _GEN_RMAT_DIST_H_
2 #define _GEN_RMAT_DIST_H_
3 
4 #include <mpi.h>
5 #include <sys/time.h>
6 #include <iostream>
7 #include <iomanip>
8 #include <functional>
9 #include <algorithm>
10 #include <vector>
11 #include <string>
12 #include <sstream>
13 
14 // These macros should be defined before stdint.h is included
15 #ifndef __STDC_CONSTANT_MACROS
16 #define __STDC_CONSTANT_MACROS
17 #endif
18 #ifndef __STDC_LIMIT_MACROS
19 #define __STDC_LIMIT_MACROS
20 #endif
21 #include <stdint.h>
22 
23 #include "CombBLAS/CombBLAS.h"
24 #include "Glue.h"
25 
26 namespace combblas {
27 
28 template<typename IT, typename NT>
29 SpDCCols<IT,NT> * GenRMat(unsigned scale, unsigned EDGEFACTOR, double initiator[4], MPI_Comm & layerworld, bool scramble)
30 {
31  double t01 = MPI_Wtime();
32  double t02;
33 
34  DistEdgeList<int64_t> * DEL = new DistEdgeList<int64_t>(layerworld);
35 
36  ostringstream minfo;
37  int nprocs = DEL->commGrid->GetSize();
38  minfo << "Started Generation of scale "<< scale << endl;
39  minfo << "Using " << nprocs << " MPI processes" << endl;
40  SpParHelper::Print(minfo.str());
41 
42  DEL->GenGraph500Data(initiator, scale, EDGEFACTOR, scramble, false );
43  // don't generate packed edges, that function uses MPI_COMM_WORLD which can not be used in a single layer!
44 
45  SpParHelper::Print("Generated renamed edge lists\n");
46  ostringstream tinfo;
47  t02 = MPI_Wtime();
48  tinfo << "Generation took " << t02-t01 << " seconds" << endl;
49  SpParHelper::Print(tinfo.str());
50 
52 
53  delete DEL;
54  SpParHelper::Print("Created Sparse Matrix\n");
55 
56  float balance = A->LoadImbalance();
57  ostringstream outs;
58  outs << "Load balance: " << balance << endl;
59  SpParHelper::Print(outs.str());
60 
61  return A->seqptr();
62 }
63 
67 template <typename IT, typename NT>
68 void Generator(unsigned scale, unsigned EDGEFACTOR, double initiator[4], CCGrid & CMG, SpDCCols<IT,NT> & splitmat, bool trans, bool scramble)
69 {
70  std::vector<IT> vecEss; // at layer_grid=0, this will have [CMG.GridLayers * SpDCCols<IT,NT>::esscount] entries
71  std::vector< SpDCCols<IT, NT> > partsmat; // only valid at layer_grid=0
72  int nparts = CMG.GridLayers;
73  if(CMG.layer_grid == 0)
74  {
75  SpDCCols<IT, NT> * localmat = GenRMat<IT,NT>(scale, EDGEFACTOR, initiator, CMG.layerWorld, scramble);
76 
77  double trans_beg = MPI_Wtime();
78  if(trans) localmat->Transpose(); // locally transpose
79  comp_trans += (MPI_Wtime() - trans_beg);
80 
81  double split_beg = MPI_Wtime();
82  localmat->ColSplit(nparts, partsmat); // split matrices are emplaced-back into partsmat vector, localmat destroyed
83 
84  for(int i=0; i< nparts; ++i)
85  {
86  std::vector<IT> ess = partsmat[i].GetEssentials();
87  for(auto itr = ess.begin(); itr != ess.end(); ++itr)
88  {
89  vecEss.push_back(*itr);
90  }
91  }
92  comp_split += (MPI_Wtime() - split_beg);
93  }
94 
95  double scatter_beg = MPI_Wtime(); // timer on
96  int esscnt = SpDCCols<IT,NT>::esscount; // necessary cast for MPI
97 
98  std::vector<IT> myess(esscnt);
99  MPI_Scatter(vecEss.data(), esscnt, MPIType<IT>(), myess.data(), esscnt, MPIType<IT>(), 0, CMG.fiberWorld);
100 
101  if(CMG.layer_grid == 0) // senders
102  {
103  splitmat = partsmat[0]; // just copy the local split
104  for(int recipient=1; recipient< nparts; ++recipient) // scatter the others
105  {
106  int tag = 0;
107  Arr<IT,NT> arrinfo = partsmat[recipient].GetArrays();
108  for(unsigned int i=0; i< arrinfo.indarrs.size(); ++i) // get index arrays
109  {
110  // MPI_Send(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm)
111  MPI_Send(arrinfo.indarrs[i].addr, arrinfo.indarrs[i].count, MPIType<IT>(), recipient, tag++, CMG.fiberWorld);
112  }
113  for(unsigned int i=0; i< arrinfo.numarrs.size(); ++i) // get numerical arrays
114  {
115  MPI_Send(arrinfo.numarrs[i].addr, arrinfo.numarrs[i].count, MPIType<NT>(), recipient, tag++, CMG.fiberWorld);
116  }
117  }
118  }
119  else // receivers
120  {
121  splitmat.Create(myess); // allocate memory for arrays
122  Arr<IT,NT> arrinfo = splitmat.GetArrays();
123 
124  int tag = 0;
125  for(unsigned int i=0; i< arrinfo.indarrs.size(); ++i) // get index arrays
126  {
127  MPI_Recv(arrinfo.indarrs[i].addr, arrinfo.indarrs[i].count, MPIType<IT>(), 0, tag++, CMG.fiberWorld, MPI_STATUS_IGNORE);
128  }
129  for(unsigned int i=0; i< arrinfo.numarrs.size(); ++i) // get numerical arrays
130  {
131  MPI_Recv(arrinfo.numarrs[i].addr, arrinfo.numarrs[i].count, MPIType<NT>(), 0, tag++, CMG.fiberWorld, MPI_STATUS_IGNORE);
132  }
133  }
134  comm_split += (MPI_Wtime() - scatter_beg);
135 }
136 
137 }
138 
139 #endif
int layer_grid
Definition: CCGrid.h:40
void GenGraph500Data(double initiator[4], int log_numverts, int edgefactor, bool scramble=false, bool packed=false)
std::vector< LocArr< NT, IT > > numarrs
Definition: LocArr.h:55
std::vector< LocArr< IT, IT > > indarrs
Definition: LocArr.h:54
double comp_split
Definition: mpipspgemm.cpp:28
#define EDGEFACTOR
Definition: DirOptBFS.cpp:81
MPI_Comm layerWorld
Definition: CCGrid.h:41
void Generator(unsigned scale, unsigned EDGEFACTOR, double initiator[4], CCGrid &CMG, SpDCCols< IT, NT > &splitmat, bool trans, bool scramble)
Definition: GenRmatDist.h:68
void ColSplit(int parts, std::vector< SpDCCols< IT, NT > > &matrices)
Definition: SpDCCols.cpp:897
void Create(const std::vector< IT > &essentials)
Definition: SpMat.h:61
double A
static void Print(const std::string &s)
SpDCCols< IT, NT > * GenRMat(unsigned scale, unsigned EDGEFACTOR, double initiator[4], MPI_Comm &layerworld, bool scramble)
Definition: GenRmatDist.h:29
Arr< IT, NT > GetArrays() const
Definition: SpDCCols.cpp:787
double comp_trans
Definition: mpipspgemm.cpp:29
double comm_split
Definition: mpipspgemm.cpp:30
Definition: CCGrid.h:4
MPI_Comm fiberWorld
Definition: CCGrid.h:42
std::shared_ptr< CommGrid > commGrid
Definition: DistEdgeList.h:99
void Transpose()
Mutator version, replaces the calling object.
Definition: SpDCCols.cpp:815
int GridLayers
Definition: CCGrid.h:36