54 int main(
int argc,
char* argv[])
57 MPI_Init(&argc, &argv);
58 MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
59 MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
65 cout <<
"Usage: ./GalerkinNew <Matrix> <OffDiagonal> <Diagonal> <T(right hand side restriction matrix)>" << endl;
66 cout <<
"<Matrix> <OffDiagonal> <Diagonal> <T> are absolute addresses, and files should be in triples format" << endl;
67 cout <<
"Example: ./GalerkinNew TESTDATA/grid3d_k5.txt TESTDATA/offdiag_grid3d_k5.txt TESTDATA/diag_grid3d_k5.txt TESTDATA/restrict_T_grid3d_k5.txt" << endl;
73 string Aname(argv[1]);
74 string Aoffd(argv[2]);
75 string Adiag(argv[3]);
76 string Tname(argv[4]);
81 ifstream inputD(Adiag.c_str());
83 MPI_Barrier(MPI_COMM_WORLD);
85 shared_ptr<CommGrid> fullWorld;
86 fullWorld.reset(
new CommGrid(MPI_COMM_WORLD, 0, 0) );
98 SpParHelper::Print(
"Data read\n");
117 SpParHelper::Print(
"Splitting approach is correct\n");
121 SpParHelper::Print(
"Error in splitting, go fix it\n");
128 MPI_Barrier(MPI_COMM_WORLD);
129 double t1 = MPI_Wtime();
135 MPI_Barrier(MPI_COMM_WORLD);
136 double t2 = MPI_Wtime();
139 cout<<
"Full restriction (without splitting) finished"<<endl;
140 printf(
"%.6lf seconds elapsed per iteration\n", (t2-t1)/(
double)ITERATIONS);
143 MPI_Barrier(MPI_COMM_WORLD);
155 MPI_Barrier(MPI_COMM_WORLD);
159 cout<<
"Full restriction (with splitting) finished"<<endl;
160 printf(
"%.6lf seconds elapsed per iteration\n", (t2-t1)/(
double)ITERATIONS);
162 inputD.clear();inputD.close();
void DimApply(Dim dim, const FullyDistVec< IT, NT > &v, _BinaryOperation __binary_op)
void ReadDistribute(const std::string &filename, int master, bool nonum, HANDLER handler, bool transpose=false, bool pario=false)
int main(int argc, char *argv[])
SpDCCols< int, NT > DCCols
std::ifstream & ReadDistribute(std::ifstream &infile, int master, HANDLER handler)
SpParMat< int, NT, DCCols > MPI_DCCols