64 int main(
int argc,
char* argv[])
67 MPI_Init(&argc, &argv);
68 MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
69 MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
75 cout <<
"Usage: ./MultTest <MatrixA> <MatrixB> <MatrixC> <vecX> <vecY>" << endl;
76 cout <<
"<MatrixA>,<MatrixB>,<MatrixC> are absolute addresses, and files should be in triples format" << endl;
82 string Aname(argv[1]);
83 string Bname(argv[2]);
84 string Cname(argv[3]);
85 string V1name(argv[4]);
86 string V2name(argv[5]);
88 ifstream vecinpx(V1name.c_str());
89 ifstream vecinpy(V2name.c_str());
91 MPI_Barrier(MPI_COMM_WORLD);
95 shared_ptr<CommGrid> fullWorld;
96 fullWorld.reset(
new CommGrid(MPI_COMM_WORLD, 0, 0) );
122 SpParHelper::Print(
"Dense SpMV (fully dist) working correctly\n");
126 SpParHelper::Print(
"ERROR in Dense SpMV, go fix it!\n");
133 SpMV<PTDOUBLEDOUBLE>(
A, spx, spy,
false);
135 if (spycontrol == spy)
137 SpParHelper::Print(
"Sparse SpMV (fully dist) working correctly\n");
141 SpParHelper::Print(
"ERROR in Sparse SpMV, go fix it!\n");
149 SpMV<PTDOUBLEDOUBLE>(ACsc, spx, spy_csc,
false, SPA);
153 SpParHelper::Print(
"SpMSpV-bucket works correctly for general CSC matrices\n");
157 SpParHelper::Print(
"SpMSpV-bucket does not work correctly for general CSC matrices, go fix it!\n");
162 C = Mult_AnXBn_Synch<PTDOUBLEDOUBLE, double, PSpMat<double>::DCCols >(
A,
B);
165 SpParHelper::Print(
"Synchronous Multiplication working correctly\n");
170 SpParHelper::Print(
"ERROR in Synchronous Multiplication, go fix it!\n");
173 C = Mult_AnXBn_DoubleBuff<PTDOUBLEDOUBLE, double, PSpMat<double>::DCCols >(
A,
B);
176 SpParHelper::Print(
"Double buffered multiplication working correctly\n");
180 SpParHelper::Print(
"ERROR in double buffered multiplication, go fix it!\n");
186 spx.Apply(bind1st (multiplies<double>(), 100));
190 SpMV<SR>(ABool, spxint64, spyint64,
false);
193 ABool.OptimizeForGraph500(optbuf);
196 SpMV<SR>(ABool, spxint64, spyint64buf,
false, optbuf);
199 if (spyint64 == spyint64buf)
201 SpParHelper::Print(
"Graph500 Optimizations are correct\n");
205 SpParHelper::Print(
"ERROR in graph500 optimizations, go fix it!\n");
214 SpMV<SR>(ABool, spxint64, spyint64_threaded,
false);
216 if (spyint64 == spyint64_threaded)
218 SpParHelper::Print(
"Multithreaded Sparse SpMV works\n");
222 SpParHelper::Print(
"ERROR in multithreaded sparse SpMV, go fix it!\n");
230 SpMV<SR>(ABoolCsc, spxint64, spyint64_csc_threaded,
false, SPA1);
232 if (spyint64 == spyint64_csc_threaded)
234 SpParHelper::Print(
"SpMSpV-bucket works correctly for Boolean CSC matrices\n");
238 SpParHelper::Print(
"ERROR in SpMSpV-bucket with Boolean CSC matrices, go fix it!\n");
SpDCCols< int64_t, NT > DCCols
Compute the maximum of two values.
SelectMaxSRing< bool, int32_t > SR
std::shared_ptr< CommGrid > getcommgrid() const
double cblas_allgathertime
std::ifstream & ReadDistribute(std::ifstream &infile, int master, HANDLER handler)
Totally obsolete version that only accepts an ifstream object and ascii files.
void ParallelWrite(const std::string &filename, bool onebased, HANDLER handler, bool includeindices=true)
void ParallelWrite(const std::string &filename, bool onebased, HANDLER handler, bool includeindices=true, bool includeheader=false)
int main(int argc, char *argv[])
SpParMat< int64_t, NT, DCCols > MPI_DCCols
double cblas_alltoalltime
std::ifstream & ReadDistribute(std::ifstream &infile, int master, HANDLER handler)
void ParallelReadMM(const std::string &filename, bool onebased, _BinaryOperation BinOp)