Eigenvalue SoLvers for Petaflop-Applications (ELPA)  2017.05.002
elpa_generated_legacy.h
Go to the documentation of this file.
1 
31  int elpa_solve_evp_real_double(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU, char *method);
62  int elpa_solve_evp_real_single(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU, char *method);
63  #include <complex.h>
93  int elpa_solve_evp_complex_double(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols,
94  int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU, char *method);
95  #include <complex.h>
125  int elpa_solve_evp_complex_single(int na, int nev, complex float *a, int lda, float *ev, complex float *q, int ldq, int nblk, int matrixCols,
126  int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU, char *method);
127  #include <complex.h>
136  int get_elpa_row_col_comms(int mpi_comm_world, int my_prow, int my_pcol, int *mpi_comm_rows, int *mpi_comm_cols);
137  #include <complex.h>
146  int get_elpa_communicators(int mpi_comm_world, int my_prow, int my_pcol, int *mpi_comm_rows, int *mpi_comm_cols);
147  #include <complex.h>
156  int elpa_get_communicators(int mpi_comm_world, int my_prow, int my_pcol, int *mpi_comm_rows, int *mpi_comm_cols);
180  int elpa_solve_evp_real_1stage_double_precision(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU);
204  int elpa_solve_evp_real_1stage_single_precision(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU);
228  int elpa_solve_evp_complex_1stage_double_precision(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU);
252  int elpa_solve_evp_complex_1stage_single_precision(int na, int nev, complex float *a, int lda, float *ev, complex float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU);
253  /*
254  \brief C interface to solve double-precision tridiagonal eigensystem with divide and conquer method
255  \details
256 
257  *\param na Matrix dimension
258  *\param nev number of eigenvalues/vectors to be computed
259  *\param d array d(na) on input diagonal elements of tridiagonal matrix, on
260  * output the eigenvalues in ascending order
261  *\param e array e(na) on input subdiagonal elements of matrix, on exit destroyed
262  *\param q on exit : matrix q(ldq,matrixCols) contains the eigenvectors
263  *\param ldq leading dimension of matrix q
264  *\param nblk blocksize of cyclic distribution, must be the same in both directions!
265  *\param matrixCols columns of matrix q
266  *\param mpi_comm_rows MPI communicator for rows
267  *\param mpi_comm_cols MPI communicator for columns
268  *\param wantDebug give more debug information if 1, else 0
269  *\result success int 1 on success, else 0
270  */
271  int elpa_solve_tridi_double(int na, int nev, double *d, double *e, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug);
272  /*
273  \brief C interface to solve single-precision tridiagonal eigensystem with divide and conquer method
274  \details
275 
276  \param na Matrix dimension
277  \param nev number of eigenvalues/vectors to be computed
278  \param d array d(na) on input diagonal elements of tridiagonal matrix, on
279  output the eigenvalues in ascending order
280  \param e array e(na) on input subdiagonal elements of matrix, on exit destroyed
281  \param q on exit : matrix q(ldq,matrixCols) contains the eigenvectors
282  \param ldq leading dimension of matrix q
283  \param nblk blocksize of cyclic distribution, must be the same in both directions!
284  \param matrixCols columns of matrix q
285  \param mpi_comm_rows MPI communicator for rows
286  \param mpi_comm_cols MPI communicator for columns
287  \param wantDebug give more debug information if 1, else 0
288  \result success int 1 on success, else 0
289  */
290  int elpa_solve_tridi_single(int na, int nev, float *d, float *e, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug);
291  /*
292  \brief C interface for elpa_mult_at_b_real_double: Performs C : = A**T * B for double-precision matrices
293  where A is a square matrix (na,na) which is optionally upper or lower triangular
294  B is a (na,ncb) matrix
295  C is a (na,ncb) matrix where optionally only the upper or lower
296  triangle may be computed
297  \details
298  \param uplo_a 'U' if A is upper triangular
299  'L' if A is lower triangular
300  anything else if A is a full matrix
301  Please note: This pertains to the original A (as set in the calling program)
302  whereas the transpose of A is used for calculations
303  If uplo_a is 'U' or 'L', the other triangle is not used at all,
304  i.e. it may contain arbitrary numbers
305  \param uplo_c 'U' if only the upper diagonal part of C is needed
306  'L' if only the upper diagonal part of C is needed
307  anything else if the full matrix C is needed
308  Please note: Even when uplo_c is 'U' or 'L', the other triangle may be
309  written to a certain extent, i.e. one shouldn't rely on the content there!
310  \param na Number of rows/columns of A, number of rows of B and C
311  \param ncb Number of columns of B and C
312  \param a matrix a
313  \param lda leading dimension of matrix a
314  \param ldaCols columns of matrix a
315  \param b matrix b
316  \param ldb leading dimension of matrix b
317  \param ldbCols columns of matrix b
318  \param nblk blocksize of cyclic distribution, must be the same in both directions!
319  \param mpi_comm_rows MPI communicator for rows
320  \param mpi_comm_cols MPI communicator for columns
321  \param c matrix c
322  \param ldc leading dimension of matrix c
323  \param ldcCols columns of matrix c
324  \result success int report success (1) or failure (0)
325  */
326  int elpa_mult_at_b_real_double(char uplo_a, char uplo_c, int na, int ncb, double *a, int lda, int ldaCols, double *b, int ldb, int ldbCols, int nlbk, int mpi_comm_rows, int mpi_comm_cols, double *c, int ldc, int ldcCols);
327  /*
328  \brief C interface for elpa_mult_at_b_real_single: Performs C : = A**T * B for single-precision matrices
329  where A is a square matrix (na,na) which is optionally upper or lower triangular
330  B is a (na,ncb) matrix
331  C is a (na,ncb) matrix where optionally only the upper or lower
332  triangle may be computed
333  \details
334  \param uplo_a 'U' if A is upper triangular
335  'L' if A is lower triangular
336  anything else if A is a full matrix
337  Please note: This pertains to the original A (as set in the calling program)
338  whereas the transpose of A is used for calculations
339  If uplo_a is 'U' or 'L', the other triangle is not used at all,
340  i.e. it may contain arbitrary numbers
341  \param uplo_c 'U' if only the upper diagonal part of C is needed
342  'L' if only the upper diagonal part of C is needed
343  anything else if the full matrix C is needed
344  Please note: Even when uplo_c is 'U' or 'L', the other triangle may be
345  written to a certain extent, i.e. one shouldn't rely on the content there!
346  \param na Number of rows/columns of A, number of rows of B and C
347  \param ncb Number of columns of B and C
348  \param a matrix a
349  \param lda leading dimension of matrix a
350  \param ldaCols columns of matrix a
351  \param b matrix b
352  \param ldb leading dimension of matrix b
353  \param ldbCols columns of matrix b
354  \param nblk blocksize of cyclic distribution, must be the same in both directions!
355  \param mpi_comm_rows MPI communicator for rows
356  \param mpi_comm_cols MPI communicator for columns
357  \param c matrix c
358  \param ldc leading dimension of matrix c
359  \result success int report success (1) or failure (0)
360  */
361  int elpa_mult_at_b_real_single(char uplo_a, char uplo_c, int na, int ncb, float *a, int lda, int ldaCols, float *b, int ldb, int ldbCols, int nlbk, int mpi_comm_rows, int mpi_comm_cols, float *c, int ldc, int ldcCols);
362  /*
363  \brief C interface for elpa_mult_ah_b_complex_double: Performs C : = A**H * B for double-precision matrices
364  where A is a square matrix (na,na) which is optionally upper or lower triangular
365  B is a (na,ncb) matrix
366  C is a (na,ncb) matrix where optionally only the upper or lower
367  triangle may be computed
368  \details
369 
370  \param uplo_a 'U' if A is upper triangular
371  'L' if A is lower triangular
372  anything else if A is a full matrix
373  Please note: This pertains to the original A (as set in the calling program)
374  whereas the transpose of A is used for calculations
375  If uplo_a is 'U' or 'L', the other triangle is not used at all,
376  i.e. it may contain arbitrary numbers
377  \param uplo_c 'U' if only the upper diagonal part of C is needed
378  'L' if only the upper diagonal part of C is needed
379  anything else if the full matrix C is needed
380  Please note: Even when uplo_c is 'U' or 'L', the other triangle may be
381  written to a certain extent, i.e. one shouldn't rely on the content there!
382  \param na Number of rows/columns of A, number of rows of B and C
383  \param ncb Number of columns of B and C
384  \param a matrix a
385  \param lda leading dimension of matrix a
386  \param b matrix b
387  \param ldb leading dimension of matrix b
388  \param nblk blocksize of cyclic distribution, must be the same in both directions!
389  \param mpi_comm_rows MPI communicator for rows
390  \param mpi_comm_cols MPI communicator for columns
391  \param c matrix c
392  \param ldc leading dimension of matrix c
393  \result success int reports success (1) or failure (0)
394  */
395  int elpa_mult_ah_b_complex_double(char uplo_a, char uplo_c, int na, int ncb, double complex *a, int lda, int ldaCols, double complex *b, int ldb, int ldbCols, int nblk, int mpi_comm_rows, int mpi_comm_cols, double complex *c, int ldc, int ldcCols);
396  /*
397  \brief C interface for elpa_mult_ah_b_complex_single: Performs C : = A**H * B for single-precision matrices
398  where A is a square matrix (na,na) which is optionally upper or lower triangular
399  B is a (na,ncb) matrix
400  C is a (na,ncb) matrix where optionally only the upper or lower
401  triangle may be computed
402  \details
403 
404  \param uplo_a 'U' if A is upper triangular
405  'L' if A is lower triangular
406  anything else if A is a full matrix
407  Please note: This pertains to the original A (as set in the calling program)
408  whereas the transpose of A is used for calculations
409  If uplo_a is 'U' or 'L', the other triangle is not used at all,
410  i.e. it may contain arbitrary numbers
411  \param uplo_c 'U' if only the upper diagonal part of C is needed
412  'L' if only the upper diagonal part of C is needed
413  anything else if the full matrix C is needed
414  Please note: Even when uplo_c is 'U' or 'L', the other triangle may be
415  written to a certain extent, i.e. one shouldn't rely on the content there!
416  \param na Number of rows/columns of A, number of rows of B and C
417  \param ncb Number of columns of B and C
418  \param a matrix a
419  \param lda leading dimension of matrix a
420  \param b matrix b
421  \param ldb leading dimension of matrix b
422  \param nblk blocksize of cyclic distribution, must be the same in both directions!
423  \param mpi_comm_rows MPI communicator for rows
424  \param mpi_comm_cols MPI communicator for columns
425  \param c matrix c
426  \param ldc leading dimension of matrix c
427  \result success int reports success (1) or failure (0)
428  */
429  int elpa_mult_ah_b_complex_single(char uplo_a, char uplo_c, int na, int ncb, complex float *a, int lda, int ldaCols, complex float *b, int ldb, int ldbCols, int nblk, int mpi_comm_rows, int mpi_comm_cols, complex float *c, int ldc, int ldcCols);
430  /*
431  \brief C interface to elpa_invert_trm_real_double: Inverts a real double-precision upper triangular matrix
432  \details
433  \param na Order of matrix
434  \param a(lda,matrixCols) Distributed matrix which should be inverted
435  Distribution is like in Scalapack.
436  Only upper triangle is needs to be set.
437  The lower triangle is not referenced.
438  \param lda Leading dimension of a
439  \param matrixCols local columns of matrix a
440  \param nblk blocksize of cyclic distribution, must be the same in both directions!
441  \param mpi_comm_rows MPI communicator for rows
442  \param mpi_comm_cols MPI communicator for columns
443  \param wantDebug int more debug information on failure if 1, else 0
444  \result succes int reports success (1) or failure (0)
445  */
446  int elpa_invert_trm_real_double(int na, double *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug);
447  /*
448  \brief C interface to elpa_invert_trm_real_single: Inverts a real single-precision upper triangular matrix
449  \details
450  \param na Order of matrix
451  \param a(lda,matrixCols) Distributed matrix which should be inverted
452  Distribution is like in Scalapack.
453  Only upper triangle is needs to be set.
454  The lower triangle is not referenced.
455  \param lda Leading dimension of a
456  \param matrixCols local columns of matrix a
457  \param nblk blocksize of cyclic distribution, must be the same in both directions!
458  \param mpi_comm_rows MPI communicator for rows
459  \param mpi_comm_cols MPI communicator for columns
460  \param wantDebug int more debug information on failure if 1, else 0
461  \result succes int reports success (1) or failure (0)
462  */
463  int elpa_invert_trm_real_single(int na, double *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug);
464  /*
465  \brief C interface to elpa_invert_trm_complex_double: Inverts a double-precision complex upper triangular matrix
466  \details
467  \param na Order of matrix
468  \param a(lda,matrixCols) Distributed matrix which should be inverted
469  Distribution is like in Scalapack.
470  Only upper triangle is needs to be set.
471  The lower triangle is not referenced.
472  \param lda Leading dimension of a
473  \param matrixCols local columns of matrix a
474  \param nblk blocksize of cyclic distribution, must be the same in both directions!
475  \param mpi_comm_rows MPI communicator for rows
476  \param mpi_comm_cols MPI communicator for columns
477  \param wantDebug int more debug information on failure if 1, else 0
478  \result succes int reports success (1) or failure (0)
479  */
480  int elpa_invert_trm_complex_double(int na, double complex *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug);
481  /*
482  \brief C interface to elpa_invert_trm_complex_single: Inverts a single-precision complex upper triangular matrix
483  \details
484  \param na Order of matrix
485  \param a(lda,matrixCols) Distributed matrix which should be inverted
486  Distribution is like in Scalapack.
487  Only upper triangle is needs to be set.
488  The lower triangle is not referenced.
489  \param lda Leading dimension of a
490  \param matrixCols local columns of matrix a
491  \param nblk blocksize of cyclic distribution, must be the same in both directions!
492  \param mpi_comm_rows MPI communicator for rows
493  \param mpi_comm_cols MPI communicator for columns
494  \param wantDebug int more debug information on failure if 1, else 0
495  \result succes int reports success (1) or failure (0)
496  */
497  int elpa_invert_trm_complex_single(int na, complex float *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug);
498  /*
499  \brief elpa_cholesky_real_double: Cholesky factorization of a double-precision real symmetric matrix
500  \details
501 
502  *\param na Order of matrix
503  *\param a(lda,matrixCols) Distributed matrix which should be factorized.
504  * Distribution is like in Scalapack.
505  * Only upper triangle is needs to be set.
506  * On return, the upper triangle contains the Cholesky factor
507  * and the lower triangle is set to 0.
508  *\param lda Leading dimension of a
509  *\param matrixCols local columns of matrix a
510  *\param nblk blocksize of cyclic distribution, must be the same in both directions!
511  *\param mpi_comm_rows MPI communicator for rows
512  *\param mpi_comm_cols MPI communicator for columns
513  *\param wantDebug int more debug information on failure if 1, else 0
514  *\result succes int reports success (1) or failure (0)
515  */
516  int elpa_cholesky_real_double(int na, double *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug);
517  /*
518  \brief elpa_cholesky_real_single: Cholesky factorization of a single-precision real symmetric matrix
519  \details
520 
521  \param na Order of matrix
522  \param a(lda,matrixCols) Distributed matrix which should be factorized.
523  Distribution is like in Scalapack.
524  Only upper triangle is needs to be set.
525  On return, the upper triangle contains the Cholesky factor
526  and the lower triangle is set to 0.
527  \param lda Leading dimension of a
528  \param matrixCols local columns of matrix a
529  \param nblk blocksize of cyclic distribution, must be the same in both directions!
530  \param mpi_comm_rows MPI communicator for rows
531  \param mpi_comm_cols MPI communicator for columns
532  \param wantDebug int more debug information on failure if 1, else 0
533  \result succes int reports success (1) or failure (0)
534  */
535  int elpa_cholesky_real_single(int na, float *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug);
536  /*
537  \brief C interface elpa_cholesky_complex_double: Cholesky factorization of a double-precision complex hermitian matrix
538  \details
539  \param na Order of matrix
540  \param a(lda,matrixCols) Distributed matrix which should be factorized.
541  Distribution is like in Scalapack.
542  Only upper triangle is needs to be set.
543  On return, the upper triangle contains the Cholesky factor
544  and the lower triangle is set to 0.
545  \param lda Leading dimension of a
546  \param matrixCols local columns of matrix a
547  \param nblk blocksize of cyclic distribution, must be the same in both directions!
548  \param mpi_comm_rows MPI communicator for rows
549  \param mpi_comm_cols MPI communicator for columns
550  \param wantDebug int more debug information on failure, if 1, else 0
551  \result succes int reports success (1) or failure (0)
552  */
553  int elpa_cholesky_complex_double(int na, double complex *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug);
554  /*
555  \brief C interface elpa_cholesky_complex_single: Cholesky factorization of a single-precision complex hermitian matrix
556  \details
557  \param na Order of matrix
558  \param a(lda,matrixCols) Distributed matrix which should be factorized.
559  Distribution is like in Scalapack.
560  Only upper triangle is needs to be set.
561  On return, the upper triangle contains the Cholesky factor
562  and the lower triangle is set to 0.
563  \param lda Leading dimension of a
564  \param matrixCols local columns of matrix a
565  \param nblk blocksize of cyclic distribution, must be the same in both directions!
566  \param mpi_comm_rows MPI communicator for rows
567  \param mpi_comm_cols MPI communicator for columns
568  \param wantDebug int more debug information on failure, if 1, else 0
569  \result succes int reports success (1) or failure (0)
570  */
571  int elpa_cholesky_complex_single(int na, complex float *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug);
598  int elpa_solve_evp_real_2stage_double_precision(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk,
599  int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU);
626  int elpa_solve_evp_real_2stage_single_precision(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk,
627  int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU);
628  #include <complex.h>
654  int elpa_solve_evp_complex_2stage_double_precision(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq,
655  int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU);
656  #include <complex.h>
682  int elpa_solve_evp_complex_2stage_single_precision(int na, int nev, complex float *a, int lda, float *ev, complex float *q, int ldq, int nblk,
683  int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU);
int elpa_cholesky_complex_single(int na, complex float *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug)
int elpa_solve_evp_complex_1stage_double_precision(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU)
C interface to solve the double-precision complex eigenvalue problem with 1-stage solver...
int elpa_solve_evp_complex_double(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU, char *method)
C interface to driver function "elpa_solve_evp_complex_double".
int elpa_solve_evp_complex_2stage_single_precision(int na, int nev, complex float *a, int lda, float *ev, complex float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU)
C interface to solve the single-precision complex eigenvalue problem with 2-stage solver...
int elpa_solve_evp_real_1stage_double_precision(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU)
C interface to solve the double-precision real eigenvalue problem with 1-stage solver.
int elpa_solve_tridi_double(int na, int nev, double *d, double *e, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug)
int elpa_get_communicators(int mpi_comm_world, int my_prow, int my_pcol, int *mpi_comm_rows, int *mpi_comm_cols)
C interface to create ELPA communicators.
int get_elpa_row_col_comms(int mpi_comm_world, int my_prow, int my_pcol, int *mpi_comm_rows, int *mpi_comm_cols)
C old, deprecated interface, will be deleted. Use "elpa_get_communicators".
int elpa_solve_tridi_single(int na, int nev, float *d, float *e, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug)
int get_elpa_communicators(int mpi_comm_world, int my_prow, int my_pcol, int *mpi_comm_rows, int *mpi_comm_cols)
C old, deprecated interface, will be deleted. Use "elpa_get_communicators".
int elpa_solve_evp_real_2stage_double_precision(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU)
C interface to solve the double-precision real eigenvalue problem with 2-stage solver.
int elpa_cholesky_real_single(int na, float *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug)
int elpa_solve_evp_real_1stage_single_precision(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU)
C interface to solve the single-precision real eigenvalue problem with 1-stage solver.
int elpa_cholesky_complex_double(int na, double complex *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug)
int elpa_solve_evp_complex_single(int na, int nev, complex float *a, int lda, float *ev, complex float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU, char *method)
C interface to driver function "elpa_solve_evp_complex_single".
int elpa_invert_trm_complex_double(int na, double complex *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug)
int elpa_invert_trm_complex_single(int na, complex float *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug)
int elpa_mult_at_b_real_double(char uplo_a, char uplo_c, int na, int ncb, double *a, int lda, int ldaCols, double *b, int ldb, int ldbCols, int nlbk, int mpi_comm_rows, int mpi_comm_cols, double *c, int ldc, int ldcCols)
int elpa_solve_evp_real_double(int na, int nev, double *a, int lda, double *ev, double *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU, char *method)
C interface to driver function "elpa_solve_evp_real_double".
int elpa_solve_evp_real_2stage_single_precision(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU)
C interface to solve the single-precision real eigenvalue problem with 2-stage solver.
int elpa_invert_trm_real_single(int na, double *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug)
int elpa_mult_ah_b_complex_single(char uplo_a, char uplo_c, int na, int ncb, complex float *a, int lda, int ldaCols, complex float *b, int ldb, int ldbCols, int nblk, int mpi_comm_rows, int mpi_comm_cols, complex float *c, int ldc, int ldcCols)
int elpa_mult_at_b_real_single(char uplo_a, char uplo_c, int na, int ncb, float *a, int lda, int ldaCols, float *b, int ldb, int ldbCols, int nlbk, int mpi_comm_rows, int mpi_comm_cols, float *c, int ldc, int ldcCols)
int elpa_invert_trm_real_double(int na, double *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug)
int elpa_solve_evp_complex_2stage_double_precision(int na, int nev, double complex *a, int lda, double *ev, double complex *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_COMPLEX_ELPA_KERNEL_API, int useGPU)
C interface to solve the double-precision complex eigenvalue problem with 2-stage solver...
int elpa_cholesky_real_double(int na, double *a, int lda, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int wantDebug)
int elpa_solve_evp_real_single(int na, int nev, float *a, int lda, float *ev, float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int THIS_REAL_ELPA_KERNEL_API, int useQR, int useGPU, char *method)
C interface to driver function "elpa_solve_evp_real_single".
int elpa_mult_ah_b_complex_double(char uplo_a, char uplo_c, int na, int ncb, double complex *a, int lda, int ldaCols, double complex *b, int ldb, int ldbCols, int nblk, int mpi_comm_rows, int mpi_comm_cols, double complex *c, int ldc, int ldcCols)
int elpa_solve_evp_complex_1stage_single_precision(int na, int nev, complex float *a, int lda, float *ev, complex float *q, int ldq, int nblk, int matrixCols, int mpi_comm_rows, int mpi_comm_cols, int mpi_comm_all, int useGPU)
C interface to solve the single-precision complex eigenvalue problem with 1-stage solver...