ABACUS develop
Atomic-orbital Based Ab-initio Computation at UStc
Loading...
Searching...
No Matches
Classes | Functions
Parallel_Common Namespace Reference

Classes

struct  object_cpu_point
 
struct  object_cpu_point< T, base_device::DEVICE_CPU >
 
struct  object_cpu_point< T, base_device::DEVICE_GPU >
 

Functions

void bcast_complex_double (std::complex< double > *object, const int n)
 
void bcast_string (std::string *object, const int n)
 
void bcast_double (double *object, const int n)
 
void bcast_int (int *object, const int n)
 
void bcast_char (char *object, const int n)
 
void bcast_complex_double (std::complex< double > &object)
 
void bcast_string (std::string &object)
 
void bcast_double (double &object)
 
void bcast_int (int &object)
 
void bcast_bool (bool &object)
 
void isend_data (const double *buf, int count, int dest, int tag, MPI_Comm &comm, MPI_Request *request)
 
void isend_data (const std::complex< double > *buf, int count, int dest, int tag, MPI_Comm &comm, MPI_Request *request)
 
void isend_data (const float *buf, int count, int dest, int tag, MPI_Comm &comm, MPI_Request *request)
 
void isend_data (const std::complex< float > *buf, int count, int dest, int tag, MPI_Comm &comm, MPI_Request *request)
 
void send_data (const double *buf, int count, int dest, int tag, MPI_Comm &comm)
 
void send_data (const std::complex< double > *buf, int count, int dest, int tag, MPI_Comm &comm)
 
void send_data (const float *buf, int count, int dest, int tag, MPI_Comm &comm)
 
void send_data (const std::complex< float > *buf, int count, int dest, int tag, MPI_Comm &comm)
 
void recv_data (double *buf, int count, int source, int tag, MPI_Comm &comm, MPI_Status *status)
 
void recv_data (std::complex< double > *buf, int count, int source, int tag, MPI_Comm &comm, MPI_Status *status)
 
void recv_data (float *buf, int count, int source, int tag, MPI_Comm &comm, MPI_Status *status)
 
void recv_data (std::complex< float > *buf, int count, int source, int tag, MPI_Comm &comm, MPI_Status *status)
 
void bcast_data (std::complex< double > *object, const int &n, const MPI_Comm &comm)
 
void bcast_data (std::complex< float > *object, const int &n, const MPI_Comm &comm)
 
void bcast_data (double *object, const int &n, const MPI_Comm &comm)
 
void bcast_data (float *object, const int &n, const MPI_Comm &comm)
 
void reduce_data (std::complex< double > *object, const int &n, const MPI_Comm &comm)
 
void reduce_data (std::complex< float > *object, const int &n, const MPI_Comm &comm)
 
void reduce_data (double *object, const int &n, const MPI_Comm &comm)
 
void reduce_data (float *object, const int &n, const MPI_Comm &comm)
 
void gatherv_data (const double *sendbuf, int sendcount, double *recvbuf, const int *recvcounts, const int *displs, MPI_Comm &comm)
 
void gatherv_data (const std::complex< double > *sendbuf, int sendcount, std::complex< double > *recvbuf, const int *recvcounts, const int *displs, MPI_Comm &comm)
 
void gatherv_data (const float *sendbuf, int sendcount, float *recvbuf, const int *recvcounts, const int *displs, MPI_Comm &comm)
 
void gatherv_data (const std::complex< float > *sendbuf, int sendcount, std::complex< float > *recvbuf, const int *recvcounts, const int *displs, MPI_Comm &comm)
 
template<typename T , typename Device >
void send_dev (const T *object, int count, int dest, int tag, MPI_Comm &comm, T *tmp_space=nullptr)
 send data in Device
 
template<typename T , typename Device >
void isend_dev (const T *object, int count, int dest, int tag, MPI_Comm &comm, MPI_Request *request, T *send_space)
 isend data in Device
 
template<typename T , typename Device >
void recv_dev (T *object, int count, int source, int tag, MPI_Comm &comm, MPI_Status *status, T *tmp_space=nullptr)
 recv data in Device
 
template<typename T , typename Device >
void bcast_dev (T *object, const int &n, const MPI_Comm &comm, T *tmp_space=nullptr)
 bcast data in Device
 
template<typename T , typename Device >
void reduce_dev (T *object, const int &n, const MPI_Comm &comm, T *tmp_space=nullptr)
 
template<typename T , typename Device >
void gatherv_dev (const T *sendbuf, int sendcount, T *recvbuf, const int *recvcounts, const int *displs, MPI_Comm &comm, T *tmp_sspace=nullptr, T *tmp_rspace=nullptr)
 

Function Documentation

◆ bcast_bool()

void Parallel_Common::bcast_bool ( bool &  object)
Here is the caller graph for this function:

◆ bcast_char()

void Parallel_Common::bcast_char ( char *  object,
const int  n 
)
Here is the caller graph for this function:

◆ bcast_complex_double() [1/2]

void Parallel_Common::bcast_complex_double ( std::complex< double > &  object)

◆ bcast_complex_double() [2/2]

void Parallel_Common::bcast_complex_double ( std::complex< double > *  object,
const int  n 
)
Here is the caller graph for this function:

◆ bcast_data() [1/4]

void Parallel_Common::bcast_data ( double *  object,
const int &  n,
const MPI_Comm &  comm 
)

◆ bcast_data() [2/4]

void Parallel_Common::bcast_data ( float *  object,
const int &  n,
const MPI_Comm &  comm 
)

◆ bcast_data() [3/4]

void Parallel_Common::bcast_data ( std::complex< double > *  object,
const int &  n,
const MPI_Comm &  comm 
)
Here is the caller graph for this function:

◆ bcast_data() [4/4]

void Parallel_Common::bcast_data ( std::complex< float > *  object,
const int &  n,
const MPI_Comm &  comm 
)

◆ bcast_dev()

template<typename T , typename Device >
void Parallel_Common::bcast_dev ( T object,
const int &  n,
const MPI_Comm &  comm,
T tmp_space = nullptr 
)

bcast data in Device

Template Parameters
Tfloat, double, std::complex<float>, std::complex<double>
Device
Parameters
ctxDevice ctx
objectcomplex arrays in Device
nthe size of complex arrays
commMPI_Comm
tmp_spacetmp space in CPU
Here is the call graph for this function:

◆ bcast_double() [1/2]

void Parallel_Common::bcast_double ( double &  object)

◆ bcast_double() [2/2]

void Parallel_Common::bcast_double ( double *  object,
const int  n 
)
Here is the caller graph for this function:

◆ bcast_int() [1/2]

void Parallel_Common::bcast_int ( int &  object)

◆ bcast_int() [2/2]

void Parallel_Common::bcast_int ( int *  object,
const int  n 
)
Here is the caller graph for this function:

◆ bcast_string() [1/2]

void Parallel_Common::bcast_string ( std::string &  object)

◆ bcast_string() [2/2]

void Parallel_Common::bcast_string ( std::string *  object,
const int  n 
)
Here is the call graph for this function:
Here is the caller graph for this function:

◆ gatherv_data() [1/4]

void Parallel_Common::gatherv_data ( const double *  sendbuf,
int  sendcount,
double *  recvbuf,
const int *  recvcounts,
const int *  displs,
MPI_Comm &  comm 
)
Here is the caller graph for this function:

◆ gatherv_data() [2/4]

void Parallel_Common::gatherv_data ( const float *  sendbuf,
int  sendcount,
float *  recvbuf,
const int *  recvcounts,
const int *  displs,
MPI_Comm &  comm 
)

◆ gatherv_data() [3/4]

void Parallel_Common::gatherv_data ( const std::complex< double > *  sendbuf,
int  sendcount,
std::complex< double > *  recvbuf,
const int *  recvcounts,
const int *  displs,
MPI_Comm &  comm 
)

◆ gatherv_data() [4/4]

void Parallel_Common::gatherv_data ( const std::complex< float > *  sendbuf,
int  sendcount,
std::complex< float > *  recvbuf,
const int *  recvcounts,
const int *  displs,
MPI_Comm &  comm 
)

◆ gatherv_dev()

template<typename T , typename Device >
void Parallel_Common::gatherv_dev ( const T sendbuf,
int  sendcount,
T recvbuf,
const int *  recvcounts,
const int *  displs,
MPI_Comm &  comm,
T tmp_sspace = nullptr,
T tmp_rspace = nullptr 
)
Here is the call graph for this function:

◆ isend_data() [1/4]

void Parallel_Common::isend_data ( const double *  buf,
int  count,
int  dest,
int  tag,
MPI_Comm &  comm,
MPI_Request *  request 
)
Here is the caller graph for this function:

◆ isend_data() [2/4]

void Parallel_Common::isend_data ( const float *  buf,
int  count,
int  dest,
int  tag,
MPI_Comm &  comm,
MPI_Request *  request 
)

◆ isend_data() [3/4]

void Parallel_Common::isend_data ( const std::complex< double > *  buf,
int  count,
int  dest,
int  tag,
MPI_Comm &  comm,
MPI_Request *  request 
)

◆ isend_data() [4/4]

void Parallel_Common::isend_data ( const std::complex< float > *  buf,
int  count,
int  dest,
int  tag,
MPI_Comm &  comm,
MPI_Request *  request 
)

◆ isend_dev()

template<typename T , typename Device >
void Parallel_Common::isend_dev ( const T object,
int  count,
int  dest,
int  tag,
MPI_Comm &  comm,
MPI_Request *  request,
T send_space 
)

isend data in Device

Note
before the date in send_space is recieved, it should not be modified
Here is the call graph for this function:

◆ recv_data() [1/4]

void Parallel_Common::recv_data ( double *  buf,
int  count,
int  source,
int  tag,
MPI_Comm &  comm,
MPI_Status *  status 
)
Here is the caller graph for this function:

◆ recv_data() [2/4]

void Parallel_Common::recv_data ( float *  buf,
int  count,
int  source,
int  tag,
MPI_Comm &  comm,
MPI_Status *  status 
)

◆ recv_data() [3/4]

void Parallel_Common::recv_data ( std::complex< double > *  buf,
int  count,
int  source,
int  tag,
MPI_Comm &  comm,
MPI_Status *  status 
)

◆ recv_data() [4/4]

void Parallel_Common::recv_data ( std::complex< float > *  buf,
int  count,
int  source,
int  tag,
MPI_Comm &  comm,
MPI_Status *  status 
)

◆ recv_dev()

template<typename T , typename Device >
void Parallel_Common::recv_dev ( T object,
int  count,
int  source,
int  tag,
MPI_Comm &  comm,
MPI_Status *  status,
T tmp_space = nullptr 
)

recv data in Device

Here is the call graph for this function:

◆ reduce_data() [1/4]

void Parallel_Common::reduce_data ( double *  object,
const int &  n,
const MPI_Comm &  comm 
)

◆ reduce_data() [2/4]

void Parallel_Common::reduce_data ( float *  object,
const int &  n,
const MPI_Comm &  comm 
)

◆ reduce_data() [3/4]

void Parallel_Common::reduce_data ( std::complex< double > *  object,
const int &  n,
const MPI_Comm &  comm 
)
Here is the caller graph for this function:

◆ reduce_data() [4/4]

void Parallel_Common::reduce_data ( std::complex< float > *  object,
const int &  n,
const MPI_Comm &  comm 
)

◆ reduce_dev()

template<typename T , typename Device >
void Parallel_Common::reduce_dev ( T object,
const int &  n,
const MPI_Comm &  comm,
T tmp_space = nullptr 
)
Here is the call graph for this function:

◆ send_data() [1/4]

void Parallel_Common::send_data ( const double *  buf,
int  count,
int  dest,
int  tag,
MPI_Comm &  comm 
)
Here is the caller graph for this function:

◆ send_data() [2/4]

void Parallel_Common::send_data ( const float *  buf,
int  count,
int  dest,
int  tag,
MPI_Comm &  comm 
)

◆ send_data() [3/4]

void Parallel_Common::send_data ( const std::complex< double > *  buf,
int  count,
int  dest,
int  tag,
MPI_Comm &  comm 
)

◆ send_data() [4/4]

void Parallel_Common::send_data ( const std::complex< float > *  buf,
int  count,
int  dest,
int  tag,
MPI_Comm &  comm 
)

◆ send_dev()

template<typename T , typename Device >
void Parallel_Common::send_dev ( const T object,
int  count,
int  dest,
int  tag,
MPI_Comm &  comm,
T tmp_space = nullptr 
)

send data in Device

Here is the call graph for this function: