ABACUS develop
Atomic-orbital Based Ab-initio Computation at UStc
|
Functions | |
void | read_pal_param (int argc, char **argv, int &NPROC, int &NTHREAD_PER_PROC, int &MY_RANK) |
void | myProd (std::complex< double > *in, std::complex< double > *inout, int *len, MPI_Datatype *dptr) |
void | split_diag_world (const int &diag_np, const int &nproc, const int &my_rank, int &drank, int &dsize, int &dcolor) |
void | split_grid_world (const int diag_np, const int &nproc, const int &my_rank, int &grank, int &gsize) |
void | init_pools (const int &NPROC, const int &MY_RANK, const int &BNDPAR, const int &KPAR, int &NPROC_IN_BNDGROUP, int &RANK_IN_BPGROUP, int &MY_BNDGROUP, int &NPROC_IN_POOL, int &RANK_IN_POOL, int &MY_POOL) |
An interface function to call "Parallel_Global::divide_pools()". | |
void | divide_pools (const int &NPROC, const int &MY_RANK, const int &BNDPAR, const int &KPAR, int &NPROC_IN_BNDGROUP, int &RANK_IN_BPGROUP, int &MY_BNDGROUP, int &NPROC_IN_POOL, int &RANK_IN_POOL, int &MY_POOL) |
void | divide_mpi_groups (const int &procs, const int &num_groups, const int &rank, int &procs_in_group, int &my_group, int &rank_in_group, const bool even=false) |
Divide MPI processes into groups. | |
void | finalize_mpi () |
Release MPI communicator and resources. | |
Variables | |
int | mpi_number = 0 |
int | omp_number = 0 |
void Parallel_Global::divide_mpi_groups | ( | const int & | procs, |
const int & | num_groups, | ||
const int & | rank, | ||
int & | procs_in_group, | ||
int & | my_group, | ||
int & | rank_in_group, | ||
const bool | even = false |
||
) |
Divide MPI processes into groups.
[in] | procs | Number of MPI processes |
[in] | num_groups | Number of groups |
[in] | rank | Rank of the process |
[out] | procs_in_group | Number of processes in each group |
[out] | my_group | Group number of the process |
[out] | rank_in_group | Rank of the process in the group |
[in] | even | If true, require the number of processes in each group is the same |
void Parallel_Global::divide_pools | ( | const int & | NPROC, |
const int & | MY_RANK, | ||
const int & | BNDPAR, | ||
const int & | KPAR, | ||
int & | NPROC_IN_BNDGROUP, | ||
int & | RANK_IN_BPGROUP, | ||
int & | MY_BNDGROUP, | ||
int & | NPROC_IN_POOL, | ||
int & | RANK_IN_POOL, | ||
int & | MY_POOL | ||
) |
void Parallel_Global::finalize_mpi | ( | ) |
Release MPI communicator and resources.
void Parallel_Global::init_pools | ( | const int & | NPROC, |
const int & | MY_RANK, | ||
const int & | BNDPAR, | ||
const int & | KPAR, | ||
int & | NPROC_IN_BNDGROUP, | ||
int & | RANK_IN_BPGROUP, | ||
int & | MY_BNDGROUP, | ||
int & | NPROC_IN_POOL, | ||
int & | RANK_IN_POOL, | ||
int & | MY_POOL | ||
) |
An interface function to call "Parallel_Global::divide_pools()".
void Parallel_Global::myProd | ( | std::complex< double > * | in, |
std::complex< double > * | inout, | ||
int * | len, | ||
MPI_Datatype * | dptr | ||
) |
void Parallel_Global::read_pal_param | ( | int | argc, |
char ** | argv, | ||
int & | NPROC, | ||
int & | NTHREAD_PER_PROC, | ||
int & | MY_RANK | ||
) |
void Parallel_Global::split_diag_world | ( | const int & | diag_np, |
const int & | nproc, | ||
const int & | my_rank, | ||
int & | drank, | ||
int & | dsize, | ||
int & | dcolor | ||
) |
call to split the "diago world" the unit of first proc of each grid group us the diag world. for example, if we have 64 processors, and diago_proc = 4, then we have 4 'grid world', which have 16, 16, 16, 16 processors each, and the head of each 'grid world' leads to the 'diag world', diag is only carried out using those 4 proc.
void Parallel_Global::split_grid_world | ( | const int | diag_np, |
const int & | nproc, | ||
const int & | my_rank, | ||
int & | grank, | ||
int & | gsize | ||
) |
int Parallel_Global::mpi_number = 0 |
int Parallel_Global::omp_number = 0 |