Eigenvalue SoLvers for Petaflop-Applications (ELPA)
2019.05.002
|
#include <complex.h>
Go to the source code of this file.
Functions | |
int | elpa_solve_evp_real_double (int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU, char *method) |
C interface to driver function "elpa_solve_evp_real_double". More... | |
int | elpa_solve_evp_real_single (int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU, char *method) |
C interface to driver function "elpa_solve_evp_real_single". More... | |
int | elpa_solve_evp_complex_double (int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU, char *method) |
C interface to driver function "elpa_solve_evp_complex_double". More... | |
int | elpa_solve_evp_complex_single (int na, int nev, complex float *a, int lda, float *ev, complex float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU, char *method) |
C interface to driver function "elpa_solve_evp_complex_single". More... | |
int | elpa_get_communicators (int mpi_comm_world, int my_prow, int my_pcol, int *mpi_comm_rows, int *mpi_comm_cols) |
C interface to create ELPA communicators. More... | |
int | elpa_solve_evp_real_1stage_double_precision (int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU) |
C interface to solve the double-precision real eigenvalue problem with 1-stage solver. More... | |
int | elpa_solve_evp_real_1stage_single_precision (int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU) |
C interface to solve the single-precision real eigenvalue problem with 1-stage solver. More... | |
int | elpa_solve_evp_complex_1stage_double_precision (int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU) |
C interface to solve the double-precision complex eigenvalue problem with 1-stage solver. More... | |
int | elpa_solve_evp_complex_1stage_single_precision (int na, int nev, complex float *a, int lda, float *ev, complex float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU) |
C interface to solve the single-precision complex eigenvalue problem with 1-stage solver. More... | |
int | elpa_solve_tridi_double (int na, int nev, double *d, double *e, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug) |
int | elpa_solve_tridi_single (int na, int nev, float *d, float *e, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug) |
int | elpa_mult_at_b_real_double (char uplo_a, char uplo_c, int na, int ncb, double *a, int lda, int ldaCols, double *b, int ldb, int ldbCols, int nlbk, int mpi_comm_rows, int mpi_comm_cols, double *c, int ldc, int ldcCols) |
int | elpa_mult_at_b_real_single (char uplo_a, char uplo_c, int na, int ncb, float *a, int lda, int ldaCols, float *b, int ldb, int ldbCols, int nlbk, int mpi_comm_rows, int mpi_comm_cols, float *c, int ldc, int ldcCols) |
int | elpa_mult_ah_b_complex_double (char uplo_a, char uplo_c, int na, int ncb, double complex *a, int lda, int ldaCols, double complex *b, int ldb, int ldbCols, int nblk, int mpi_comm_rows, int mpi_comm_cols, double complex *c, int ldc, int ldcCols) |
int | elpa_mult_ah_b_complex_single (char uplo_a, char uplo_c, int na, int ncb, complex float *a, int lda, int ldaCols, complex float *b, int ldb, int ldbCols, int nblk, int mpi_comm_rows, int mpi_comm_cols, complex float *c, int ldc, int ldcCols) |
int | elpa_invert_trm_real_double (int na, double *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug) |
int | elpa_invert_trm_real_single (int na, float *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug) |
int | elpa_invert_trm_complex_double (int na, double complex *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug) |
int | elpa_invert_trm_complex_single (int na, complex float *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug) |
int | elpa_cholesky_real_double (int na, double *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug) |
int | elpa_cholesky_real_single (int na, float *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug) |
int | elpa_cholesky_complex_double (int na, double complex *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug) |
int | elpa_cholesky_complex_single (int na, complex float *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug) |
int | elpa_solve_evp_real_2stage_double_precision (int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU) |
C interface to solve the double-precision real eigenvalue problem with 2-stage solver. More... | |
int | elpa_solve_evp_real_2stage_single_precision (int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU) |
C interface to solve the single-precision real eigenvalue problem with 2-stage solver. More... | |
int | elpa_solve_evp_complex_2stage_double_precision (int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU) |
C interface to solve the double-precision complex eigenvalue problem with 2-stage solver. More... | |
int | elpa_solve_evp_complex_2stage_single_precision (int na, int nev, complex float *a, int lda, float *ev, complex float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU) |
C interface to solve the single-precision complex eigenvalue problem with 2-stage solver. More... | |
int elpa_cholesky_complex_double | ( | int | na, |
double complex * | a, | ||
int | lda, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | wantDebug | ||
) |
int elpa_cholesky_complex_single | ( | int | na, |
complex float * | a, | ||
int | lda, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | wantDebug | ||
) |
int elpa_cholesky_real_double | ( | int | na, |
double * | a, | ||
int | lda, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | wantDebug | ||
) |
int elpa_cholesky_real_single | ( | int | na, |
float * | a, | ||
int | lda, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | wantDebug | ||
) |
int elpa_get_communicators | ( | int | mpi_comm_world, |
int | my_prow, | ||
int | my_pcol, | ||
int * | mpi_comm_rows, | ||
int * | mpi_comm_cols | ||
) |
C interface to create ELPA communicators.
mpi_comm_word | MPI global communicator (in) |
my_prow | Row coordinate of the calling process in the process grid (in) |
my_pcol | Column coordinate of the calling process in the process grid (in) |
mpi_comm_rows | Communicator for communicating within rows of processes (out) |
int elpa_invert_trm_complex_double | ( | int | na, |
double complex * | a, | ||
int | lda, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | wantDebug | ||
) |
int elpa_invert_trm_complex_single | ( | int | na, |
complex float * | a, | ||
int | lda, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | wantDebug | ||
) |
int elpa_invert_trm_real_double | ( | int | na, |
double * | a, | ||
int | lda, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | wantDebug | ||
) |
int elpa_invert_trm_real_single | ( | int | na, |
float * | a, | ||
int | lda, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | wantDebug | ||
) |
int elpa_mult_ah_b_complex_double | ( | char | uplo_a, |
char | uplo_c, | ||
int | na, | ||
int | ncb, | ||
double complex * | a, | ||
int | lda, | ||
int | ldaCols, | ||
double complex * | b, | ||
int | ldb, | ||
int | ldbCols, | ||
int | nblk, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
double complex * | c, | ||
int | ldc, | ||
int | ldcCols | ||
) |
int elpa_mult_ah_b_complex_single | ( | char | uplo_a, |
char | uplo_c, | ||
int | na, | ||
int | ncb, | ||
complex float * | a, | ||
int | lda, | ||
int | ldaCols, | ||
complex float * | b, | ||
int | ldb, | ||
int | ldbCols, | ||
int | nblk, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
complex float * | c, | ||
int | ldc, | ||
int | ldcCols | ||
) |
int elpa_mult_at_b_real_double | ( | char | uplo_a, |
char | uplo_c, | ||
int | na, | ||
int | ncb, | ||
double * | a, | ||
int | lda, | ||
int | ldaCols, | ||
double * | b, | ||
int | ldb, | ||
int | ldbCols, | ||
int | nlbk, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
double * | c, | ||
int | ldc, | ||
int | ldcCols | ||
) |
int elpa_mult_at_b_real_single | ( | char | uplo_a, |
char | uplo_c, | ||
int | na, | ||
int | ncb, | ||
float * | a, | ||
int | lda, | ||
int | ldaCols, | ||
float * | b, | ||
int | ldb, | ||
int | ldbCols, | ||
int | nlbk, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
float * | c, | ||
int | ldc, | ||
int | ldcCols | ||
) |
int elpa_solve_evp_complex_1stage_double_precision | ( | int | na, |
int | nev, | ||
double complex * | a, | ||
int | lda, | ||
double * | ev, | ||
double complex * | q, | ||
int | ldq, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | mpi_comm_all, | ||
int | useGPU | ||
) |
C interface to solve the double-precision complex eigenvalue problem with 1-stage solver.
na | Order of matrix a |
nev | Number of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated. |
a | Distributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack). |
lda | Leading dimension of a |
ev(na) | On output: eigenvalues of a, every processor gets the complete set |
q | On output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed. |
ldq | Leading dimension of q |
nblk | blocksize of cyclic distribution, must be the same in both directions! |
matrixCols | distributed number of matrix columns |
mpi_comm_rows | MPI-Communicator for rows |
mpi_comm_cols | MPI-Communicator for columns |
useGPU | use GPU (1=yes, 0=No) |
int elpa_solve_evp_complex_1stage_single_precision | ( | int | na, |
int | nev, | ||
complex float * | a, | ||
int | lda, | ||
float * | ev, | ||
complex float * | q, | ||
int | ldq, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | mpi_comm_all, | ||
int | useGPU | ||
) |
C interface to solve the single-precision complex eigenvalue problem with 1-stage solver.
na | Order of matrix a |
nev | Number of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated. |
a | Distributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack). |
lda | Leading dimension of a |
ev(na) | On output: eigenvalues of a, every processor gets the complete set |
q | On output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed. |
ldq | Leading dimension of q |
nblk | blocksize of cyclic distribution, must be the same in both directions! |
matrixCols | distributed number of matrix columns |
mpi_comm_rows | MPI-Communicator for rows |
mpi_comm_cols | MPI-Communicator for columns |
useGPU | use GPU (1=yes, 0=No) |
int elpa_solve_evp_complex_2stage_double_precision | ( | int | na, |
int | nev, | ||
double complex * | a, | ||
int | lda, | ||
double * | ev, | ||
double complex * | q, | ||
int | ldq, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | mpi_comm_all, | ||
int | THIS_COMPLEX_ELPA_KERNEL_API, | ||
int | useGPU | ||
) |
C interface to solve the double-precision complex eigenvalue problem with 2-stage solver.
na | Order of matrix a |
nev | Number of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated. |
a | Distributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack). |
lda | Leading dimension of a |
ev(na) | On output: eigenvalues of a, every processor gets the complete set |
q | On output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed. |
ldq | Leading dimension of q |
nblk | blocksize of cyclic distribution, must be the same in both directions! |
matrixCols | distributed number of matrix columns |
mpi_comm_rows | MPI-Communicator for rows |
mpi_comm_cols | MPI-Communicator for columns |
mpi_coll_all | MPI communicator for the total processor set |
THIS_COMPLEX_ELPA_KERNEL_API | specify used ELPA2 kernel via API |
useGPU | use GPU (1=yes, 0=No) |
int elpa_solve_evp_complex_2stage_single_precision | ( | int | na, |
int | nev, | ||
complex float * | a, | ||
int | lda, | ||
float * | ev, | ||
complex float * | q, | ||
int | ldq, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | mpi_comm_all, | ||
int | THIS_COMPLEX_ELPA_KERNEL_API, | ||
int | useGPU | ||
) |
C interface to solve the single-precision complex eigenvalue problem with 2-stage solver.
na | Order of matrix a |
nev | Number of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated. |
a | Distributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack). |
lda | Leading dimension of a |
ev(na) | On output: eigenvalues of a, every processor gets the complete set |
q | On output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed. |
ldq | Leading dimension of q |
nblk | blocksize of cyclic distribution, must be the same in both directions! |
matrixCols | distributed number of matrix columns |
mpi_comm_rows | MPI-Communicator for rows |
mpi_comm_cols | MPI-Communicator for columns |
mpi_coll_all | MPI communicator for the total processor set |
THIS_REAL_ELPA_KERNEL_API | specify used ELPA2 kernel via API |
useGPU | use GPU (1=yes, 0=No) |
int elpa_solve_evp_complex_double | ( | int | na, |
int | nev, | ||
double complex * | a, | ||
int | lda, | ||
double * | ev, | ||
double complex * | q, | ||
int | ldq, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | mpi_comm_all, | ||
int | THIS_COMPLEX_ELPA_KERNEL_API, | ||
int | useGPU, | ||
char * | method | ||
) |
C interface to driver function "elpa_solve_evp_complex_double".
na | Order of matrix a |
nev | Number of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated. |
a | Distributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack). |
lda | Leading dimension of a |
ev(na) | On output: eigenvalues of a, every processor gets the complete set |
q | On output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed. |
ldq | Leading dimension of q |
nblk | blocksize of cyclic distribution, must be the same in both directions! |
matrixCols | distributed number of matrix columns |
mpi_comm_rows | MPI-Communicator for rows |
mpi_comm_cols | MPI-Communicator for columns |
mpi_coll_all | MPI communicator for the total processor set |
THIS_COMPLEX_ELPA_KERNEL_API | specify used ELPA2 kernel via API |
useGPU | use GPU (1=yes, 0=No) |
method | choose whether to use ELPA 1stage or 2stage solver possible values: "1stage" => use ELPA 1stage solver "2stage" => use ELPA 2stage solver "auto" => (at the moment) use ELPA 2stage solver |
int elpa_solve_evp_complex_single | ( | int | na, |
int | nev, | ||
complex float * | a, | ||
int | lda, | ||
float * | ev, | ||
complex float * | q, | ||
int | ldq, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | mpi_comm_all, | ||
int | THIS_COMPLEX_ELPA_KERNEL_API, | ||
int | useGPU, | ||
char * | method | ||
) |
C interface to driver function "elpa_solve_evp_complex_single".
na | Order of matrix a |
nev | Number of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated. |
a | Distributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack). |
lda | Leading dimension of a |
ev(na) | On output: eigenvalues of a, every processor gets the complete set |
q | On output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed. |
ldq | Leading dimension of q |
nblk | blocksize of cyclic distribution, must be the same in both directions! |
matrixCols | distributed number of matrix columns |
mpi_comm_rows | MPI-Communicator for rows |
mpi_comm_cols | MPI-Communicator for columns |
mpi_coll_all | MPI communicator for the total processor set |
THIS_COMPLEX_ELPA_KERNEL_API | specify used ELPA2 kernel via API |
useGPU | use GPU (1=yes, 0=No) |
method | choose whether to use ELPA 1stage or 2stage solver possible values: "1stage" => use ELPA 1stage solver "2stage" => use ELPA 2stage solver "auto" => (at the moment) use ELPA 2stage solver |
int elpa_solve_evp_real_1stage_double_precision | ( | int | na, |
int | nev, | ||
double * | a, | ||
int | lda, | ||
double * | ev, | ||
double * | q, | ||
int | ldq, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | mpi_comm_all, | ||
int | useGPU | ||
) |
C interface to solve the double-precision real eigenvalue problem with 1-stage solver.
na | Order of matrix a |
nev | Number of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated. |
a | Distributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack). |
lda | Leading dimension of a |
ev(na) | On output: eigenvalues of a, every processor gets the complete set |
q | On output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed. |
ldq | Leading dimension of q |
nblk | blocksize of cyclic distribution, must be the same in both directions! |
matrixCols | distributed number of matrix columns |
mpi_comm_rows | MPI-Communicator for rows |
mpi_comm_cols | MPI-Communicator for columns |
useGPU | use GPU (1=yes, 0=No) |
int elpa_solve_evp_real_1stage_single_precision | ( | int | na, |
int | nev, | ||
float * | a, | ||
int | lda, | ||
float * | ev, | ||
float * | q, | ||
int | ldq, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | mpi_comm_all, | ||
int | useGPU | ||
) |
C interface to solve the single-precision real eigenvalue problem with 1-stage solver.
na | Order of matrix a |
nev | Number of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated. |
a | Distributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack). |
lda | Leading dimension of a |
ev(na) | On output: eigenvalues of a, every processor gets the complete set |
q | On output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed. |
ldq | Leading dimension of q |
nblk | blocksize of cyclic distribution, must be the same in both directions! |
matrixCols | distributed number of matrix columns |
mpi_comm_rows | MPI-Communicator for rows |
mpi_comm_cols | MPI-Communicator for columns |
useGPU | use GPU (1=yes, 0=No) |
int elpa_solve_evp_real_2stage_double_precision | ( | int | na, |
int | nev, | ||
double * | a, | ||
int | lda, | ||
double * | ev, | ||
double * | q, | ||
int | ldq, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | mpi_comm_all, | ||
int | THIS_REAL_ELPA_KERNEL_API, | ||
int | useQR, | ||
int | useGPU | ||
) |
C interface to solve the double-precision real eigenvalue problem with 2-stage solver.
na | Order of matrix a |
nev | Number of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated. |
a | Distributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack). |
lda | Leading dimension of a |
ev(na) | On output: eigenvalues of a, every processor gets the complete set |
q | On output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed. |
ldq | Leading dimension of q |
nblk | blocksize of cyclic distribution, must be the same in both directions! |
matrixCols | distributed number of matrix columns |
mpi_comm_rows | MPI-Communicator for rows |
mpi_comm_cols | MPI-Communicator for columns |
mpi_coll_all | MPI communicator for the total processor set |
THIS_REAL_ELPA_KERNEL_API | specify used ELPA2 kernel via API |
useQR | use QR decomposition 1 = yes, 0 = no |
useGPU | use GPU (1=yes, 0=No) |
int elpa_solve_evp_real_2stage_single_precision | ( | int | na, |
int | nev, | ||
float * | a, | ||
int | lda, | ||
float * | ev, | ||
float * | q, | ||
int | ldq, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | mpi_comm_all, | ||
int | THIS_REAL_ELPA_KERNEL_API, | ||
int | useQR, | ||
int | useGPU | ||
) |
C interface to solve the single-precision real eigenvalue problem with 2-stage solver.
na | Order of matrix a |
nev | Number of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated. |
a | Distributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack). |
lda | Leading dimension of a |
ev(na) | On output: eigenvalues of a, every processor gets the complete set |
q | On output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed. |
ldq | Leading dimension of q |
nblk | blocksize of cyclic distribution, must be the same in both directions! |
matrixCols | distributed number of matrix columns |
mpi_comm_rows | MPI-Communicator for rows |
mpi_comm_cols | MPI-Communicator for columns |
mpi_coll_all | MPI communicator for the total processor set |
THIS_REAL_ELPA_KERNEL_API | specify used ELPA2 kernel via API |
useQR | use QR decomposition 1 = yes, 0 = no |
useGPU | use GPU (1=yes, 0=No) |
int elpa_solve_evp_real_double | ( | int | na, |
int | nev, | ||
double * | a, | ||
int | lda, | ||
double * | ev, | ||
double * | q, | ||
int | ldq, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | mpi_comm_all, | ||
int | THIS_REAL_ELPA_KERNEL_API, | ||
int | useQR, | ||
int | useGPU, | ||
char * | method | ||
) |
C interface to driver function "elpa_solve_evp_real_double".
na | Order of matrix a |
nev | Number of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated. |
a | Distributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack). |
lda | Leading dimension of a |
ev(na) | On output: eigenvalues of a, every processor gets the complete set |
q | On output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed. |
ldq | Leading dimension of q |
nblk | blocksize of cyclic distribution, must be the same in both directions! |
matrixCols | distributed number of matrix columns |
mpi_comm_rows | MPI-Communicator for rows |
mpi_comm_cols | MPI-Communicator for columns |
mpi_coll_all | MPI communicator for the total processor set |
THIS_REAL_ELPA_KERNEL_API | specify used ELPA2 kernel via API |
useQR | use QR decomposition 1 = yes, 0 = no |
useGPU | use GPU (1=yes, 0=No) |
method | choose whether to use ELPA 1stage or 2stage solver possible values: "1stage" => use ELPA 1stage solver "2stage" => use ELPA 2stage solver "auto" => (at the moment) use ELPA 2stage solver |
int elpa_solve_evp_real_single | ( | int | na, |
int | nev, | ||
float * | a, | ||
int | lda, | ||
float * | ev, | ||
float * | q, | ||
int | ldq, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | mpi_comm_all, | ||
int | THIS_REAL_ELPA_KERNEL_API, | ||
int | useQR, | ||
int | useGPU, | ||
char * | method | ||
) |
C interface to driver function "elpa_solve_evp_real_single".
na | Order of matrix a |
nev | Number of eigenvalues needed. The smallest nev eigenvalues/eigenvectors are calculated. |
a | Distributed matrix for which eigenvalues are to be computed. Distribution is like in Scalapack. The full matrix must be set (not only one half like in scalapack). |
lda | Leading dimension of a |
ev(na) | On output: eigenvalues of a, every processor gets the complete set |
q | On output: Eigenvectors of a Distribution is like in Scalapack. Must be always dimensioned to the full size (corresponding to (na,na)) even if only a part of the eigenvalues is needed. |
ldq | Leading dimension of q |
nblk | blocksize of cyclic distribution, must be the same in both directions! |
matrixCols | distributed number of matrix columns |
mpi_comm_rows | MPI-Communicator for rows |
mpi_comm_cols | MPI-Communicator for columns |
mpi_coll_all | MPI communicator for the total processor set |
THIS_REAL_ELPA_KERNEL_API | specify used ELPA2 kernel via API |
useQR | use QR decomposition 1 = yes, 0 = no |
useGPU | use GPU (1=yes, 0=No) |
method | choose whether to use ELPA 1stage or 2stage solver possible values: "1stage" => use ELPA 1stage solver "2stage" => use ELPA 2stage solver "auto" => (at the moment) use ELPA 2stage solver |
int elpa_solve_tridi_double | ( | int | na, |
int | nev, | ||
double * | d, | ||
double * | e, | ||
double * | q, | ||
int | ldq, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | wantDebug | ||
) |
int elpa_solve_tridi_single | ( | int | na, |
int | nev, | ||
float * | d, | ||
float * | e, | ||
float * | q, | ||
int | ldq, | ||
int | nblk, | ||
int | matrixCols, | ||
int | mpi_comm_rows, | ||
int | mpi_comm_cols, | ||
int | wantDebug | ||
) |