Eigenvalue SoLvers for Petaflop-Applications (ELPA)  2019.05.002
Functions
elpa_generated_legacy.h File Reference
#include <complex.h>

Go to the source code of this file.

Functions

int elpa_solve_evp_real_double (int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU, char *method)
 C interface to driver function "elpa_solve_evp_real_double". More...
 
int elpa_solve_evp_real_single (int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU, char *method)
 C interface to driver function "elpa_solve_evp_real_single". More...
 
int elpa_solve_evp_complex_double (int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU, char *method)
 C interface to driver function "elpa_solve_evp_complex_double". More...
 
int elpa_solve_evp_complex_single (int na, int nev, complex float *a, int lda, float *ev, complex float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU, char *method)
 C interface to driver function "elpa_solve_evp_complex_single". More...
 
int elpa_get_communicators (int mpi_comm_world, int my_prow, int my_pcol, int *mpi_comm_rows, int *mpi_comm_cols)
 C interface to create ELPA communicators. More...
 
int elpa_solve_evp_real_1stage_double_precision (int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU)
 C interface to solve the double-precision real eigenvalue problem with 1-stage solver. More...
 
int elpa_solve_evp_real_1stage_single_precision (int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU)
 C interface to solve the single-precision real eigenvalue problem with 1-stage solver. More...
 
int elpa_solve_evp_complex_1stage_double_precision (int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU)
 C interface to solve the double-precision complex eigenvalue problem with 1-stage solver. More...
 
int elpa_solve_evp_complex_1stage_single_precision (int na, int nev, complex float *a, int lda, float *ev, complex float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU)
 C interface to solve the single-precision complex eigenvalue problem with 1-stage solver. More...
 
int elpa_solve_tridi_double (int na, int nev, double *d, double *e, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug)
 
int elpa_solve_tridi_single (int na, int nev, float *d, float *e, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug)
 
int elpa_mult_at_b_real_double (char uplo_a, char uplo_c, int na, int ncb, double *a, int lda, int ldaCols, double *b, int ldb, int ldbCols, int nlbk, int mpi_comm_rows, int mpi_comm_cols, double *c, int ldc, int ldcCols)
 
int elpa_mult_at_b_real_single (char uplo_a, char uplo_c, int na, int ncb, float *a, int lda, int ldaCols, float *b, int ldb, int ldbCols, int nlbk, int mpi_comm_rows, int mpi_comm_cols, float *c, int ldc, int ldcCols)
 
int elpa_mult_ah_b_complex_double (char uplo_a, char uplo_c, int na, int ncb, double complex *a, int lda, int ldaCols, double complex *b, int ldb, int ldbCols, int nblk, int mpi_comm_rows, int mpi_comm_cols, double complex *c, int ldc, int ldcCols)
 
int elpa_mult_ah_b_complex_single (char uplo_a, char uplo_c, int na, int ncb, complex float *a, int lda, int ldaCols, complex float *b, int ldb, int ldbCols, int nblk, int mpi_comm_rows, int mpi_comm_cols, complex float *c, int ldc, int ldcCols)
 
int elpa_invert_trm_real_double (int na, double *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug)
 
int elpa_invert_trm_real_single (int na, float *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug)
 
int elpa_invert_trm_complex_double (int na, double complex *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug)
 
int elpa_invert_trm_complex_single (int na, complex float *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug)
 
int elpa_cholesky_real_double (int na, double *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug)
 
int elpa_cholesky_real_single (int na, float *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug)
 
int elpa_cholesky_complex_double (int na, double complex *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug)
 
int elpa_cholesky_complex_single (int na, complex float *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug)
 
int elpa_solve_evp_real_2stage_double_precision (int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU)
 C interface to solve the double-precision real eigenvalue problem with 2-stage solver. More...
 
int elpa_solve_evp_real_2stage_single_precision (int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU)
 C interface to solve the single-precision real eigenvalue problem with 2-stage solver. More...
 
int elpa_solve_evp_complex_2stage_double_precision (int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU)
 C interface to solve the double-precision complex eigenvalue problem with 2-stage solver. More...
 
int elpa_solve_evp_complex_2stage_single_precision (int na, int nev, complex float *a, int lda, float *ev, complex float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU)
 C interface to solve the single-precision complex eigenvalue problem with 2-stage solver. More...
 

Function Documentation

◆ elpa_cholesky_complex_double()

int elpa_cholesky_complex_double ( int  na,
double complex *  a,
int  lda,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  wantDebug 
)

◆ elpa_cholesky_complex_single()

int elpa_cholesky_complex_single ( int  na,
complex float *  a,
int  lda,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  wantDebug 
)

◆ elpa_cholesky_real_double()

int elpa_cholesky_real_double ( int  na,
double *  a,
int  lda,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  wantDebug 
)

◆ elpa_cholesky_real_single()

int elpa_cholesky_real_single ( int  na,
float *  a,
int  lda,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  wantDebug 
)

◆ elpa_get_communicators()

int elpa_get_communicators ( int  mpi_comm_world,
int  my_prow,
int  my_pcol,
int *  mpi_comm_rows,
int *  mpi_comm_cols 
)

C interface to create ELPA communicators.

Parameters
mpi_comm_wordMPI global communicator (in)
my_prowRow coordinate of the calling process in the process grid (in)
my_pcolColumn coordinate of the calling process in the process grid (in)
mpi_comm_rowsCommunicator for communicating within rows of processes (out)
Returns
int integer error value of mpi_comm_split function

◆ elpa_invert_trm_complex_double()

int elpa_invert_trm_complex_double ( int  na,
double complex *  a,
int  lda,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  wantDebug 
)

◆ elpa_invert_trm_complex_single()

int elpa_invert_trm_complex_single ( int  na,
complex float *  a,
int  lda,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  wantDebug 
)

◆ elpa_invert_trm_real_double()

int elpa_invert_trm_real_double ( int  na,
double *  a,
int  lda,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  wantDebug 
)

◆ elpa_invert_trm_real_single()

int elpa_invert_trm_real_single ( int  na,
float *  a,
int  lda,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  wantDebug 
)

◆ elpa_mult_ah_b_complex_double()

int elpa_mult_ah_b_complex_double ( char  uplo_a,
char  uplo_c,
int  na,
int  ncb,
double complex *  a,
int  lda,
int  ldaCols,
double complex *  b,
int  ldb,
int  ldbCols,
int  nblk,
int  mpi_comm_rows,
int  mpi_comm_cols,
double complex *  c,
int  ldc,
int  ldcCols 
)

◆ elpa_mult_ah_b_complex_single()

int elpa_mult_ah_b_complex_single ( char  uplo_a,
char  uplo_c,
int  na,
int  ncb,
complex float *  a,
int  lda,
int  ldaCols,
complex float *  b,
int  ldb,
int  ldbCols,
int  nblk,
int  mpi_comm_rows,
int  mpi_comm_cols,
complex float *  c,
int  ldc,
int  ldcCols 
)

◆ elpa_mult_at_b_real_double()

int elpa_mult_at_b_real_double ( char  uplo_a,
char  uplo_c,
int  na,
int  ncb,
double *  a,
int  lda,
int  ldaCols,
double *  b,
int  ldb,
int  ldbCols,
int  nlbk,
int  mpi_comm_rows,
int  mpi_comm_cols,
double *  c,
int  ldc,
int  ldcCols 
)

◆ elpa_mult_at_b_real_single()

int elpa_mult_at_b_real_single ( char  uplo_a,
char  uplo_c,
int  na,
int  ncb,
float *  a,
int  lda,
int  ldaCols,
float *  b,
int  ldb,
int  ldbCols,
int  nlbk,
int  mpi_comm_rows,
int  mpi_comm_cols,
float *  c,
int  ldc,
int  ldcCols 
)

◆ elpa_solve_evp_complex_1stage_double_precision()

int elpa_solve_evp_complex_1stage_double_precision ( int  na,
int  nev,
double complex *  a,
int  lda,
double *  ev,
double complex *  q,
int  ldq,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  mpi_comm_all,
int  useGPU 
)

C interface to solve the double-precision complex eigenvalue problem with 1-stage solver.

Parameters
naOrder of matrix a
nevNumber of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated.
aDistributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack).
ldaLeading dimension of a
ev(na)On output: eigenvalues of a, every processor gets the complete set
qOn output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed.
ldqLeading dimension of q
nblkblocksize of cyclic distribution, must be the same in both directions!
matrixColsdistributed number of matrix columns
mpi_comm_rowsMPI-Communicator for rows
mpi_comm_colsMPI-Communicator for columns
useGPUuse GPU (1=yes, 0=No)
Returns
int: 1 if error occured, otherwise 0

◆ elpa_solve_evp_complex_1stage_single_precision()

int elpa_solve_evp_complex_1stage_single_precision ( int  na,
int  nev,
complex float *  a,
int  lda,
float *  ev,
complex float *  q,
int  ldq,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  mpi_comm_all,
int  useGPU 
)

C interface to solve the single-precision complex eigenvalue problem with 1-stage solver.

Parameters
naOrder of matrix a
nevNumber of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated.
aDistributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack).
ldaLeading dimension of a
ev(na)On output: eigenvalues of a, every processor gets the complete set
qOn output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed.
ldqLeading dimension of q
nblkblocksize of cyclic distribution, must be the same in both directions!
matrixColsdistributed number of matrix columns
mpi_comm_rowsMPI-Communicator for rows
mpi_comm_colsMPI-Communicator for columns
useGPUuse GPU (1=yes, 0=No)
Returns
int: 1 if error occured, otherwise 0

◆ elpa_solve_evp_complex_2stage_double_precision()

int elpa_solve_evp_complex_2stage_double_precision ( int  na,
int  nev,
double complex *  a,
int  lda,
double *  ev,
double complex *  q,
int  ldq,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  mpi_comm_all,
int  THIS_COMPLEX_ELPA_KERNEL_API,
int  useGPU 
)

C interface to solve the double-precision complex eigenvalue problem with 2-stage solver.

Parameters
naOrder of matrix a
nevNumber of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated.
aDistributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack).
ldaLeading dimension of a
ev(na)On output: eigenvalues of a, every processor gets the complete set
qOn output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed.
ldqLeading dimension of q
nblkblocksize of cyclic distribution, must be the same in both directions!
matrixColsdistributed number of matrix columns
mpi_comm_rowsMPI-Communicator for rows
mpi_comm_colsMPI-Communicator for columns
mpi_coll_allMPI communicator for the total processor set
THIS_COMPLEX_ELPA_KERNEL_APIspecify used ELPA2 kernel via API
useGPUuse GPU (1=yes, 0=No)
Returns
int: 1 if error occured, otherwise 0

◆ elpa_solve_evp_complex_2stage_single_precision()

int elpa_solve_evp_complex_2stage_single_precision ( int  na,
int  nev,
complex float *  a,
int  lda,
float *  ev,
complex float *  q,
int  ldq,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  mpi_comm_all,
int  THIS_COMPLEX_ELPA_KERNEL_API,
int  useGPU 
)

C interface to solve the single-precision complex eigenvalue problem with 2-stage solver.

Parameters
naOrder of matrix a
nevNumber of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated.
aDistributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack).
ldaLeading dimension of a
ev(na)On output: eigenvalues of a, every processor gets the complete set
qOn output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed.
ldqLeading dimension of q
nblkblocksize of cyclic distribution, must be the same in both directions!
matrixColsdistributed number of matrix columns
mpi_comm_rowsMPI-Communicator for rows
mpi_comm_colsMPI-Communicator for columns
mpi_coll_allMPI communicator for the total processor set
THIS_REAL_ELPA_KERNEL_APIspecify used ELPA2 kernel via API
useGPUuse GPU (1=yes, 0=No)
Returns
int: 1 if error occured, otherwise 0

◆ elpa_solve_evp_complex_double()

int elpa_solve_evp_complex_double ( int  na,
int  nev,
double complex *  a,
int  lda,
double *  ev,
double complex *  q,
int  ldq,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  mpi_comm_all,
int  THIS_COMPLEX_ELPA_KERNEL_API,
int  useGPU,
char *  method 
)

C interface to driver function "elpa_solve_evp_complex_double".

Parameters
naOrder of matrix a
nevNumber of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated.
aDistributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack).
ldaLeading dimension of a
ev(na)On output: eigenvalues of a, every processor gets the complete set
qOn output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed.
ldqLeading dimension of q
nblkblocksize of cyclic distribution, must be the same in both directions!
matrixColsdistributed number of matrix columns
mpi_comm_rowsMPI-Communicator for rows
mpi_comm_colsMPI-Communicator for columns
mpi_coll_allMPI communicator for the total processor set
THIS_COMPLEX_ELPA_KERNEL_APIspecify used ELPA2 kernel via API
useGPUuse GPU (1=yes, 0=No)
methodchoose whether to use ELPA 1stage or 2stage solver possible values: "1stage" => use ELPA 1stage solver "2stage" => use ELPA 2stage solver "auto" => (at the moment) use ELPA 2stage solver
Returns
int: 1 if error occured, otherwise 0

◆ elpa_solve_evp_complex_single()

int elpa_solve_evp_complex_single ( int  na,
int  nev,
complex float *  a,
int  lda,
float *  ev,
complex float *  q,
int  ldq,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  mpi_comm_all,
int  THIS_COMPLEX_ELPA_KERNEL_API,
int  useGPU,
char *  method 
)

C interface to driver function "elpa_solve_evp_complex_single".

Parameters
naOrder of matrix a
nevNumber of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated.
aDistributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack).
ldaLeading dimension of a
ev(na)On output: eigenvalues of a, every processor gets the complete set
qOn output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed.
ldqLeading dimension of q
nblkblocksize of cyclic distribution, must be the same in both directions!
matrixColsdistributed number of matrix columns
mpi_comm_rowsMPI-Communicator for rows
mpi_comm_colsMPI-Communicator for columns
mpi_coll_allMPI communicator for the total processor set
THIS_COMPLEX_ELPA_KERNEL_APIspecify used ELPA2 kernel via API
useGPUuse GPU (1=yes, 0=No)
methodchoose whether to use ELPA 1stage or 2stage solver possible values: "1stage" => use ELPA 1stage solver "2stage" => use ELPA 2stage solver "auto" => (at the moment) use ELPA 2stage solver
Returns
int: 1 if error occured, otherwise 0

◆ elpa_solve_evp_real_1stage_double_precision()

int elpa_solve_evp_real_1stage_double_precision ( int  na,
int  nev,
double *  a,
int  lda,
double *  ev,
double *  q,
int  ldq,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  mpi_comm_all,
int  useGPU 
)

C interface to solve the double-precision real eigenvalue problem with 1-stage solver.

Parameters
naOrder of matrix a
nevNumber of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated.
aDistributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack).
ldaLeading dimension of a
ev(na)On output: eigenvalues of a, every processor gets the complete set
qOn output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed.
ldqLeading dimension of q
nblkblocksize of cyclic distribution, must be the same in both directions!
matrixColsdistributed number of matrix columns
mpi_comm_rowsMPI-Communicator for rows
mpi_comm_colsMPI-Communicator for columns
useGPUuse GPU (1=yes, 0=No)
Returns
int: 1 if error occured, otherwise 0

◆ elpa_solve_evp_real_1stage_single_precision()

int elpa_solve_evp_real_1stage_single_precision ( int  na,
int  nev,
float *  a,
int  lda,
float *  ev,
float *  q,
int  ldq,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  mpi_comm_all,
int  useGPU 
)

C interface to solve the single-precision real eigenvalue problem with 1-stage solver.

Parameters
naOrder of matrix a
nevNumber of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated.
aDistributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack).
ldaLeading dimension of a
ev(na)On output: eigenvalues of a, every processor gets the complete set
qOn output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed.
ldqLeading dimension of q
nblkblocksize of cyclic distribution, must be the same in both directions!
matrixColsdistributed number of matrix columns
mpi_comm_rowsMPI-Communicator for rows
mpi_comm_colsMPI-Communicator for columns
useGPUuse GPU (1=yes, 0=No)
Returns
int: 1 if error occured, otherwise 0

◆ elpa_solve_evp_real_2stage_double_precision()

int elpa_solve_evp_real_2stage_double_precision ( int  na,
int  nev,
double *  a,
int  lda,
double *  ev,
double *  q,
int  ldq,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  mpi_comm_all,
int  THIS_REAL_ELPA_KERNEL_API,
int  useQR,
int  useGPU 
)

C interface to solve the double-precision real eigenvalue problem with 2-stage solver.

Parameters
naOrder of matrix a
nevNumber of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated.
aDistributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack).
ldaLeading dimension of a
ev(na)On output: eigenvalues of a, every processor gets the complete set
qOn output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed.
ldqLeading dimension of q
nblkblocksize of cyclic distribution, must be the same in both directions!
matrixColsdistributed number of matrix columns
mpi_comm_rowsMPI-Communicator for rows
mpi_comm_colsMPI-Communicator for columns
mpi_coll_allMPI communicator for the total processor set
THIS_REAL_ELPA_KERNEL_APIspecify used ELPA2 kernel via API
useQRuse QR decomposition 1 = yes, 0 = no
useGPUuse GPU (1=yes, 0=No)
Returns
int: 1 if error occured, otherwise 0

◆ elpa_solve_evp_real_2stage_single_precision()

int elpa_solve_evp_real_2stage_single_precision ( int  na,
int  nev,
float *  a,
int  lda,
float *  ev,
float *  q,
int  ldq,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  mpi_comm_all,
int  THIS_REAL_ELPA_KERNEL_API,
int  useQR,
int  useGPU 
)

C interface to solve the single-precision real eigenvalue problem with 2-stage solver.

Parameters
naOrder of matrix a
nevNumber of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated.
aDistributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack).
ldaLeading dimension of a
ev(na)On output: eigenvalues of a, every processor gets the complete set
qOn output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed.
ldqLeading dimension of q
nblkblocksize of cyclic distribution, must be the same in both directions!
matrixColsdistributed number of matrix columns
mpi_comm_rowsMPI-Communicator for rows
mpi_comm_colsMPI-Communicator for columns
mpi_coll_allMPI communicator for the total processor set
THIS_REAL_ELPA_KERNEL_APIspecify used ELPA2 kernel via API
useQRuse QR decomposition 1 = yes, 0 = no
useGPUuse GPU (1=yes, 0=No)
Returns
int: 1 if error occured, otherwise 0

◆ elpa_solve_evp_real_double()

int elpa_solve_evp_real_double ( int  na,
int  nev,
double *  a,
int  lda,
double *  ev,
double *  q,
int  ldq,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  mpi_comm_all,
int  THIS_REAL_ELPA_KERNEL_API,
int  useQR,
int  useGPU,
char *  method 
)

C interface to driver function "elpa_solve_evp_real_double".

Parameters
naOrder of matrix a
nevNumber of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated.
aDistributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack).
ldaLeading dimension of a
ev(na)On output: eigenvalues of a, every processor gets the complete set
qOn output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed.
ldqLeading dimension of q
nblkblocksize of cyclic distribution, must be the same in both directions!
matrixColsdistributed number of matrix columns
mpi_comm_rowsMPI-Communicator for rows
mpi_comm_colsMPI-Communicator for columns
mpi_coll_allMPI communicator for the total processor set
THIS_REAL_ELPA_KERNEL_APIspecify used ELPA2 kernel via API
useQRuse QR decomposition 1 = yes, 0 = no
useGPUuse GPU (1=yes, 0=No)
methodchoose whether to use ELPA 1stage or 2stage solver possible values: "1stage" => use ELPA 1stage solver "2stage" => use ELPA 2stage solver "auto" => (at the moment) use ELPA 2stage solver
Returns
int: 1 if error occured, otherwise 0

◆ elpa_solve_evp_real_single()

int elpa_solve_evp_real_single ( int  na,
int  nev,
float *  a,
int  lda,
float *  ev,
float *  q,
int  ldq,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  mpi_comm_all,
int  THIS_REAL_ELPA_KERNEL_API,
int  useQR,
int  useGPU,
char *  method 
)

C interface to driver function "elpa_solve_evp_real_single".

Parameters
naOrder of matrix a
nevNumber of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated.
aDistributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack).
ldaLeading dimension of a
ev(na)On output: eigenvalues of a, every processor gets the complete set
qOn output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed.
ldqLeading dimension of q
nblkblocksize of cyclic distribution, must be the same in both directions!
matrixColsdistributed number of matrix columns
mpi_comm_rowsMPI-Communicator for rows
mpi_comm_colsMPI-Communicator for columns
mpi_coll_allMPI communicator for the total processor set
THIS_REAL_ELPA_KERNEL_APIspecify used ELPA2 kernel via API
useQRuse QR decomposition 1 = yes, 0 = no
useGPUuse GPU (1=yes, 0=No)
methodchoose whether to use ELPA 1stage or 2stage solver possible values: "1stage" => use ELPA 1stage solver "2stage" => use ELPA 2stage solver "auto" => (at the moment) use ELPA 2stage solver
Returns
int: 1 if error occured, otherwise 0

◆ elpa_solve_tridi_double()

int elpa_solve_tridi_double ( int  na,
int  nev,
double *  d,
double *  e,
double *  q,
int  ldq,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  wantDebug 
)

◆ elpa_solve_tridi_single()

int elpa_solve_tridi_single ( int  na,
int  nev,
float *  d,
float *  e,
float *  q,
int  ldq,
int  nblk,
int  matrixCols,
int  mpi_comm_rows,
int  mpi_comm_cols,
int  wantDebug 
)