Eigenvalue SoLvers for Petaflop-Applications (ELPA) 2024.03.001
Loading...
Searching...
No Matches
cannon_back_template.c
Go to the documentation of this file.
1// This file is part of ELPA.
2//
3// The ELPA library was originally created by the ELPA consortium,
4// consisting of the following organizations:
5//
6// - Max Planck Computing and Data Facility (MPCDF), formerly known as
7// Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
8// - Bergische Universität Wuppertal, Lehrstuhl für angewandte
9// Informatik,
10// - Technische Universität München, Lehrstuhl für Informatik mit
11// Schwerpunkt Wissenschaftliches Rechnen ,
12// - Fritz-Haber-Institut, Berlin, Abt. Theorie,
13// - Max-Plack-Institut für Mathematik in den Naturwissenschaften,
14// Leipzig, Abt. Komplexe Strukutren in Biologie und Kognition,
15// and
16// - IBM Deutschland GmbH
17//
18// This particular source code file has been developed within the ELPA-AEO //
19// project, which has been a joint effort of
20//
21// - Max Planck Computing and Data Facility (MPCDF), formerly known as
22// Rechenzentrum Garching der Max-Planck-Gesellschaft (RZG),
23// - Bergische Universität Wuppertal, Lehrstuhl für angewandte
24// Informatik,
25// - Technische Universität München, Lehrstuhl für Informatik mit
26// Schwerpunkt Wissenschaftliches Rechnen ,
27// - Technische Universität München, Lehrstuhl für Theoretische Chemie,
28// - Fritz-Haber-Institut, Berlin, Abt. Theorie,
29
30// More information can be found here:
31// http://elpa.mpcdf.mpg.de/ and
32// http://elpa-aeo.mpcdf.mpg.de
33//
34// ELPA is free software: you can redistribute it and/or modify
35// it under the terms of the version 3 of the license of the
36// GNU Lesser General Public License as published by the Free
37// Software Foundation.
38//
39// ELPA is distributed in the hope that it will be useful,
40// but WITHOUT ANY WARRANTY; without even the implied warranty of
41// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
42// GNU Lesser General Public License for more details.
43//
44// You should have received a copy of the GNU Lesser General Public License
45// along with ELPA. If not, see <http://www.gnu.org/licenses/>
46//
47// ELPA reflects a substantial effort on the part of the original
48// ELPA consortium, and we ask you to respect the spirit of the
49// license that we chose: i.e., please contribute any changes you
50// may have back to the original ELPA library distribution, and keep
51// any derivatives of ELPA under the same license that we chose for
52// the original distribution, the GNU Lesser General Public License.
53//
54// Author: Valeriy Manin (Bergische Universität Wuppertal)
55// integreated into the ELPA library Pavel Kus, Andeas Marek (MPCDF)
56
57#ifdef HAVE_64BIT_INTEGER_MATH_SUPPORT
58#define C_INT_TYPE_PTR long int*
59#define C_INT_TYPE long int
60#define BLAS_KIND c_int64_t
61#else
62#define C_INT_TYPE_PTR int*
63#define C_INT_TYPE int
64#define BLAS_KIND c_int
65#endif
66#ifdef HAVE_64BIT_INTEGER_MPI_SUPPORT
67#define C_INT_MPI_TYPE_PTR long int*
68#define C_INT_MPI_TYPE long int
69#define MPI_KIND c_int64_t
70#else
71#define C_INT_MPI_TYPE_PTR int*
72#define C_INT_MPI_TYPE int
73#define MPI_KIND c_int
74#endif
75
76// it seems, that we need those two levels of indirection to correctly expand macros
77#define cannons_triang_rectangular_impl_expand2(SUFFIX) cannons_triang_rectangular_##SUFFIX
78#define cannons_triang_rectangular_impl_expand1(SUFFIX) cannons_triang_rectangular_impl_expand2(SUFFIX)
79#define cannons_triang_rectangular_impl cannons_triang_rectangular_impl_expand1(ELPA_IMPL_SUFFIX)
80
81#define cannons_triang_rectangular_c_impl_expand2(SUFFIX) cannons_triang_rectangular_c_##SUFFIX
82#define cannons_triang_rectangular_c_impl_expand1(SUFFIX) cannons_triang_rectangular_c_impl_expand2(SUFFIX)
83#define cannons_triang_rectangular_c_impl cannons_triang_rectangular_c_impl_expand1(ELPA_IMPL_SUFFIX)
84
85void cannons_triang_rectangular_impl(math_type* U, math_type* B, C_INT_TYPE np_rows, C_INT_TYPE np_cols, C_INT_TYPE my_prow, C_INT_TYPE my_pcol, C_INT_TYPE_PTR U_desc, C_INT_TYPE_PTR b_desc, math_type *Res, MPI_Comm row_comm, MPI_Comm col_comm)
86{
87 // Cannons algorithm, Non-blocking version
88 // Input:
89 // - U is upper triangular matrix
90 // - B is rectangular matrix
91 // Output:
92 // - Res is a full rectangular matrix Res = U*B
93 // row_comm: communicator along rows
94 // col_comm: communicator along columns
95 // This function will be used for a backtransformation
96
97 C_INT_TYPE na, nb, nblk, width, na_rows, na_cols, nb_cols, cols_in_buffer_U_my_initial, cols_in_buffer_U, rows_in_buffer_U, Size_receive_U_now, rows_in_buffer_U_now, cols_in_buffer_U_now, rows_in_buffer_U_my_initial;
98
99 C_INT_MPI_TYPE Size_receive_U_nowMPI, Size_receive_UMPI, Size_receive_BMPI;
100 C_INT_TYPE i, j, Size_send_U, Size_receive_U, Size_send_B, Size_receive_B, intNumber, Buf_rows, Buf_cols_U, Buf_cols_B, curr_rows, num_of_iters, cols_in_buffer, rows_in_block, curr_col_loc, cols_in_block, num_of_blocks_in_U_buffer, col_of_origin_U, b_rows_mult, b_cols_mult;
101
102 math_type *Buf_to_send_U, *Buf_to_receive_U, *Buf_to_send_B, *Buf_to_receive_B, *Buf_U, *PosBuff;
103
104 C_INT_TYPE where_to_send_U, from_where_to_receive_U, where_to_send_B, from_where_to_receive_B, last_proc_col_B, last_proc_row_B, n, Size_U_stored, proc_col_min;
105
106 math_type *U_local_start, *Buf_pos, *B_local_start, *double_ptr, *CopyTo, *CopyFrom;
107
108 C_INT_TYPE ratio;
109
110 MPI_Status status;
111
112 C_INT_TYPE one = 1;
113 C_INT_TYPE zero = 0;
114 math_type done = 1.0;
115 math_type dzero = 0.0;
116
117 na = U_desc[2];
118 nblk = U_desc[4];
119 nb = b_desc[3];
120
121 na_rows = numroc_(&na, &nblk, &my_prow, &zero, &np_rows);
122 na_cols = numroc_(&na, &nblk, &my_pcol, &zero, &np_cols);
123 nb_cols = numroc_(&nb, &nblk, &my_pcol, &zero, &np_cols);
124
125 MPI_Request request_U_Recv;
126 MPI_Request request_U_Send;
127 MPI_Request request_B_Recv;
128 MPI_Request request_B_Send;
129
131 last_proc_col_B = ((nb-1)/nblk) % np_cols;
132 last_proc_row_B = ((na-1)/nblk) % np_rows;
133
135
136 if(nb%nblk == 0)
137 if(my_pcol <= last_proc_col_B)
138 Buf_cols_B = nb_cols;
139 else
140 Buf_cols_B = nb_cols + nblk;
141 else
142 if(my_pcol < last_proc_col_B)
143 Buf_cols_B = nb_cols;
144 else if(my_pcol > last_proc_col_B)
145 Buf_cols_B = nb_cols + nblk;
146 else // if my_pcol == last_proc_col_B
147 Buf_cols_B = nb_cols + nblk - nb_cols%nblk;
148
149 if(na%nblk == 0)
150 if(my_prow <= last_proc_row_B)
151 Buf_rows = na_rows;
152 else
153 Buf_rows = na_rows + nblk;
154 else
155 if(my_prow < last_proc_row_B)
156 Buf_rows = na_rows;
157 else if(my_prow > last_proc_row_B)
158 Buf_rows = na_rows + nblk;
159 else // if my_prow == last_proc_row_B
160 Buf_rows = na_rows + nblk - na_rows%nblk;
161
162 ratio = np_cols/np_rows;
163
164 intNumber = ceil((math_type)na/(math_type)(np_cols*nblk)); // max. possible number of the local block columns of U
165 Size_U_stored = ratio*nblk*nblk*intNumber*(intNumber+1)/2 + 2; // number of local elements from the upper triangular part that every proc. has (max. possible value among all the procs.)
166
167 Buf_to_send_U = malloc(ratio*Size_U_stored*sizeof(math_type));
168 Buf_to_receive_U = malloc(ratio*Size_U_stored*sizeof(math_type));
169 Buf_to_send_B = malloc(Buf_cols_B*Buf_rows*sizeof(math_type));
170 Buf_to_receive_B = malloc(Buf_cols_B*Buf_rows*sizeof(math_type));
171 if(ratio != 1)
172 Buf_U = malloc(Size_U_stored*sizeof(math_type)); // in this case we will receive data into initial buffer and after place block-rows to the needed positions of buffer for calculation
173
174 for(i = 0; i < na_rows*nb_cols; i++)
175 Res[i] = 0;
176
178
179 // here we assume, that np_rows < np_cols; then I will send to the number of processors equal to <ratio> with the "leap" equal to np_rows; the same holds for receive
180 if((ratio != 1)||(my_prow != 0)) // if grid is rectangular or my_prow is not 0
181 Buf_pos = Buf_to_send_U; // I will copy to the send buffer
182 else
183 Buf_pos = Buf_to_receive_U; // if grid is square and my_prow is 0, then I will copy to the received buffer
184
185 // form array to send by block-columns; we need only upper triangular part
186 // find the first local block belonging to the upper part of matrix U
187 if(my_pcol >= my_prow) // if I am in the upper part of proc. grid
188 curr_col_loc = 0; // my first local block-column has block from the upper part of matrix
189 else
190 curr_col_loc = 1; //ceil((math_type)(((math_type)my_prow - (math_type)my_pcol)/(math_type)np_cols)) always will give 1 since np_cols > np_rows
191
192 num_of_iters = ceil((math_type)na_cols/(math_type)nblk); // number my of block-columns
193 num_of_iters = num_of_iters - curr_col_loc; // I will exclude the first <curr_col_loc> block-columns since they do not have blocks from the upper part of matrix U
194 curr_col_loc = curr_col_loc*nblk; // local index of the found block-column
195
196 if(my_pcol >= my_prow )
197 rows_in_block = ceil(((math_type)(my_pcol + 1) - (math_type)my_prow)/(math_type)np_rows)*nblk;
198 else
199 rows_in_block = ratio*nblk;
200 cols_in_buffer_U_my_initial = 0;
201 Size_send_U = 0;
202 for(i = 0; i < num_of_iters; i++) // loop over my block-columns, which have blocks in the upepr part of U
203 {
204 if(rows_in_block > na_rows)
205 rows_in_block = na_rows;
206
207 if ((na_cols - curr_col_loc) < nblk)
208 cols_in_block = na_cols - curr_col_loc; // how many columns do I have in the current block-column
209 else
210 cols_in_block = nblk;
211
212 if((rows_in_block > 0)&&(cols_in_block > 0))
213 {
214 double_ptr = &U[curr_col_loc*na_rows]; // pointer to start of the current block-column to be copied to buffer
215 C_LACPY("A", &rows_in_block, &cols_in_block, double_ptr, &na_rows, Buf_pos, &rows_in_block); // copy upper part of block-column in the buffer with LDA = length of the upper part of block-column
216 Buf_pos = Buf_pos + rows_in_block*cols_in_block; // go to the position where the next block-column will be copied
217 Size_send_U = Size_send_U + rows_in_block*cols_in_block;
218 cols_in_buffer_U_my_initial = cols_in_buffer_U_my_initial + cols_in_block;
219 }
220 curr_col_loc = curr_col_loc + nblk; // go to the next local block-column of my local array U
221 rows_in_block = rows_in_block + ratio*nblk;
222 }
223 rows_in_buffer_U_my_initial = rows_in_block - ratio*nblk; // remove redundant addition from the previous loop
224 *Buf_pos = (math_type)cols_in_buffer_U_my_initial; // write number of the columns at the end of the buffer; we will need this for furhter multiplications on the other processors
225 Buf_pos = Buf_pos + 1;
226 *Buf_pos = (math_type)rows_in_buffer_U_my_initial; // write number of the rows at the end of the buffer; we will need this for furhter multiplications on the other processors
227 Size_send_U = Size_send_U + 2;
228
229 // now we have the local buffer to send
230 // find the lowest processor column among those who will send me
231 proc_col_min = np_cols;
232 for(i = 0; i < ratio; i++)
233 {
234 from_where_to_receive_U = (my_pcol + my_prow + i*np_rows)%np_cols;
235 if(from_where_to_receive_U < proc_col_min)
236 proc_col_min = from_where_to_receive_U;
237 }
238
239 // do communications and form local buffers for calculations
240 Size_receive_U = 0; // size of the accumulated buffer
241 cols_in_buffer_U = 0; // number of columns in the accumulated buffer
242 rows_in_buffer_U = 0; // number of rows in the accumulated buffer
243 for(i = 0; i < ratio; i++)
244 {
245 where_to_send_U = (my_pcol - my_prow - i*np_rows + np_cols)%np_cols;
246 from_where_to_receive_U = (my_pcol + my_prow + i*np_rows)%np_cols;
247
248 // send and receive in the row_comm
249 if(ratio != 1) // if grid is not square
250 {
251 if(where_to_send_U != my_pcol) // if I need to send and receive on this step
252 {
253 MPI_Sendrecv(Buf_to_send_U, (C_INT_MPI_TYPE) Size_send_U, MPI_MATH_DATATYPE_PRECISION_C, (C_INT_MPI_TYPE) where_to_send_U, 0, Buf_U, (C_INT_MPI_TYPE) Size_U_stored, MPI_MATH_DATATYPE_PRECISION_C, (C_INT_MPI_TYPE) from_where_to_receive_U, 0, row_comm, &status);
254 MPI_Get_count(&status, MPI_MATH_DATATYPE_PRECISION_C, &Size_receive_U_nowMPI);
255 Size_receive_U_now = (C_INT_TYPE) Size_receive_U_nowMPI;
256 Size_receive_U = Size_receive_U + Size_receive_U_now - 2; // we need only number of elements, so exclude information about cols_in_buffer_U and rows_in_buffer_
257
258 cols_in_buffer_U_now = Buf_U[Size_receive_U_now - 2];
259 cols_in_buffer_U = cols_in_buffer_U + cols_in_buffer_U_now;
260 rows_in_buffer_U_now = Buf_U[Size_receive_U_now - 1];
261
262 if(rows_in_buffer_U < rows_in_buffer_U_now)
263 rows_in_buffer_U = rows_in_buffer_U_now;
264
265 intNumber = from_where_to_receive_U/np_rows; // how many processors will send me blocks, such that they will be placed before the current blocks
266 if(proc_col_min >= my_prow) // if among procs who will send me there is one with the full sets of block-rows in the upper part
267 CopyTo = &Buf_to_receive_U[nblk*nblk*intNumber*(intNumber + 1)/2]; // here I will copy to; formula based on arithm. progression
268 else // if among procs who will send me there is one from the lower part of grid
269 if(from_where_to_receive_U < my_prow) // if I have just received from this processor from the lower part
270 CopyTo = &Buf_to_receive_U[nblk*nblk*ratio*(ratio - 1)/2]; // copy the first block of this processor after the first blocks from the others procs. that will send me later (the first block-column of this proc. is in the lower part of matrix)
271 else
272 CopyTo = &Buf_to_receive_U[nblk*nblk*intNumber*(intNumber - 1)/2];
273 CopyFrom = Buf_U;
274 }
275 else // if I need to send to myself on this step, then I will copy from Buf_to_send_U to Buf_to_receive_U
276 {
277 cols_in_buffer_U_now = cols_in_buffer_U_my_initial;
278 cols_in_buffer_U = cols_in_buffer_U + cols_in_buffer_U_now;
279
280 rows_in_buffer_U_now = rows_in_buffer_U_my_initial;
281 if(rows_in_buffer_U < rows_in_buffer_U_now)
282 rows_in_buffer_U = rows_in_buffer_U_now;
283
284 intNumber = my_pcol/np_rows; // how many processors will send me blocks, such that they will be placed before the current blocks
285 if(proc_col_min >= my_prow) // if among procs who will send me there is one with the full sets of block-rows in the upper part
286 CopyTo = &Buf_to_receive_U[nblk*nblk*intNumber*(intNumber + 1)/2]; // here I will copy to; formula based on arithm. progression
287 else // if among procs who will send me there is one from the lower part of grid
288 if(my_pcol < my_prow) // if I have just received from this processor from the lower part (in this case it is me)
289 CopyTo = &Buf_to_receive_U[nblk*nblk*ratio*(ratio - 1)/2]; // copy the first block of this processor after the first blocks from the others procs. that will send me later (the first block-column of this proc. is in the lower part of matrix)
290 else
291 CopyTo = &Buf_to_receive_U[nblk*nblk*intNumber*(intNumber - 1)/2];
292 CopyFrom = Buf_to_send_U;
293 Size_receive_U = Size_receive_U + Size_send_U - 2;
294 }
295
296 // copy by block-columns
297 intNumber = ceil((math_type)cols_in_buffer_U_now/(math_type)nblk); // how many block-columns I have received on this iteration
298 if(from_where_to_receive_U >= my_prow)
299 rows_in_block = ceil(((math_type)(from_where_to_receive_U + 1) - (math_type)my_prow)/(math_type)np_rows)*nblk; // number of rows in the first block-column of U buffer
300 else
301 rows_in_block = ratio*nblk;
302 for(j = 0; j < intNumber; j++)
303 {
304 if((j+1)*nblk < cols_in_buffer_U_now)
305 cols_in_block = nblk;
306 else
307 cols_in_block = cols_in_buffer_U_now - j*nblk;
308
309 C_LACPY("A", &rows_in_block, &cols_in_block, CopyFrom, &rows_in_block, CopyTo, &rows_in_block);
310
311 CopyFrom = CopyFrom + rows_in_block*cols_in_block;
312 CopyTo = CopyTo + ratio*rows_in_block*nblk + nblk*nblk*ratio*(ratio-1)/2; // I need to leave place for ratio block-columns of the other procs. of the lengths rows_in_block, (rows_in_block+nblk), (rows_in_block+2*nblk) and so on
313 rows_in_block = rows_in_block + ratio*nblk; // number of rows in the next block-columns
314 if(rows_in_block > rows_in_buffer_U_now)
315 rows_in_block = rows_in_buffer_U_now;
316 }
317 }
318 else // if grid is square
319 {
320 if(my_prow > 0)
321 {
322 MPI_Sendrecv(Buf_to_send_U, (C_INT_MPI_TYPE) Size_send_U, MPI_MATH_DATATYPE_PRECISION_C, (C_INT_MPI_TYPE) where_to_send_U, 0, Buf_to_receive_U, (C_INT_MPI_TYPE) Size_U_stored, MPI_MATH_DATATYPE_PRECISION_C, (C_INT_MPI_TYPE) from_where_to_receive_U, 0, row_comm, &status);
323 MPI_Get_count(&status, MPI_MATH_DATATYPE_PRECISION_C, &Size_receive_UMPI);
324 Size_receive_U = (C_INT_TYPE) Size_receive_UMPI;
325
326 cols_in_buffer_U = (C_INT_TYPE)Buf_to_receive_U[Size_receive_U-2];
327 rows_in_buffer_U = (C_INT_TYPE)Buf_to_receive_U[Size_receive_U-1];
328 }
329 else // if my_prow == 0, then I have already everything in my Buf_to_receive_U buffer
330 {
331 Size_receive_U = Size_send_U;
332 rows_in_buffer_U = rows_in_buffer_U_my_initial;
333 cols_in_buffer_U = cols_in_buffer_U_my_initial;
334 }
335 }
336 }
337 if(ratio != 1)
338 {
339 Buf_to_receive_U[Size_receive_U] = cols_in_buffer_U;
340 Buf_to_receive_U[Size_receive_U + 1] = rows_in_buffer_U;
341 Size_receive_U = Size_receive_U + 2;
342 }
343
345
346 if(my_pcol > 0)
347 {
348 where_to_send_B = (my_prow - my_pcol + np_cols)%np_rows; // shift = my_pcol
349 from_where_to_receive_B = (my_pcol + my_prow)%np_rows;
350
351 // send and receive in the row_comm
352 if(where_to_send_B != my_prow) // for the rectangular proc grids it may be possible that I need to "send to myself"; if it is not the case, then I send
353 {
354 // form array to send
355 C_LACPY("A", &na_rows, &nb_cols, B, &na_rows, Buf_to_send_B, &na_rows);
356 MPI_Sendrecv(Buf_to_send_B, (C_INT_MPI_TYPE) (nb_cols*na_rows), MPI_MATH_DATATYPE_PRECISION_C, (C_INT_MPI_TYPE) where_to_send_B, 0, Buf_to_receive_B, (C_INT_MPI_TYPE) (nb_cols*Buf_rows), MPI_MATH_DATATYPE_PRECISION_C, (C_INT_MPI_TYPE) from_where_to_receive_B, 0, col_comm, &status);
357 MPI_Get_count(&status, MPI_MATH_DATATYPE_PRECISION_C, &Size_receive_BMPI); // find out how many elements I have received
358 if (nb_cols!=0) Size_receive_B = (C_INT_TYPE) Size_receive_BMPI / nb_cols; // how many rows I have received
359 else Size_receive_B=0; // can happen only if nb_cols=Size_receive_BMPI=0
360 }
361 else
362 {
363 C_LACPY("A", &na_rows, &nb_cols, B, &na_rows, Buf_to_receive_B, &na_rows); // else I copy data like I have "received" it
364 Size_receive_B = na_rows;
365 }
366 }
367 else
368 {
369 C_LACPY("A", &na_rows, &nb_cols, B, &na_rows, Buf_to_receive_B, &na_rows); // if I am in the 0 proc row, I need not to send; so copy data like I have "received" it
370 Size_receive_B = na_rows;
371 }
372
374 where_to_send_U = (my_pcol - 1 + np_cols)%np_cols;
375 from_where_to_receive_U = (my_pcol + 1)%np_cols;
376 where_to_send_B = (my_prow - 1 + np_rows)%np_rows;
377 from_where_to_receive_B = (my_prow + 1)%np_rows;
378
379 for(i = 1; i < np_rows; i++)
380 {
381 // at this moment I need to send to neighbour what I have in the "received" arrays; that is why change pointers of the "received" and "send" arrays
382 double_ptr = Buf_to_send_U;
383 Buf_to_send_U = Buf_to_receive_U;
384 Buf_to_receive_U = double_ptr;
385
386 double_ptr = Buf_to_send_B;
387 Buf_to_send_B = Buf_to_receive_B;
388 Buf_to_receive_B = double_ptr;
389
390 Size_send_U = Size_receive_U;
391 Size_send_B = Size_receive_B;
392
394 MPI_Isend(Buf_to_send_U, (C_INT_MPI_TYPE) Size_send_U, MPI_MATH_DATATYPE_PRECISION_C, (C_INT_MPI_TYPE) where_to_send_U, 0, row_comm, &request_U_Send);
395 MPI_Irecv(Buf_to_receive_U, (C_INT_MPI_TYPE) (ratio*Size_U_stored), MPI_MATH_DATATYPE_PRECISION_C, (C_INT_MPI_TYPE) from_where_to_receive_U, 0, row_comm, &request_U_Recv);
397 MPI_Isend(Buf_to_send_B, (C_INT_MPI_TYPE) (Size_send_B*nb_cols), MPI_MATH_DATATYPE_PRECISION_C, (C_INT_MPI_TYPE) where_to_send_B, 0, col_comm, &request_B_Send);
398 MPI_Irecv(Buf_to_receive_B, (C_INT_MPI_TYPE) (Buf_rows*nb_cols), MPI_MATH_DATATYPE_PRECISION_C, (C_INT_MPI_TYPE) from_where_to_receive_B, 0, col_comm, &request_B_Recv);
400 cols_in_buffer_U = (C_INT_TYPE)Buf_to_send_U[Size_receive_U-2];
401 rows_in_buffer_U = (C_INT_TYPE)Buf_to_send_U[Size_receive_U-1];
402 //find minimal proc. column among those procs. who contributed in the current U buffer
403 proc_col_min = np_cols;
404 for(j = 0; j < ratio; j++)
405 {
406 col_of_origin_U = (my_pcol + my_prow + i - 1 + j*np_rows)%np_cols;
407 if(col_of_origin_U < proc_col_min)
408 proc_col_min = col_of_origin_U;
409 }
410 col_of_origin_U = proc_col_min;
411
412 num_of_blocks_in_U_buffer = ceil((math_type)cols_in_buffer_U/(math_type)nblk);
413
414 if (col_of_origin_U >= my_prow)
415 B_local_start = Buf_to_send_B;
416 else
417 B_local_start = Buf_to_send_B + nblk;
418
419 U_local_start = Buf_to_send_U;
420
421 for(j = 0; j < num_of_blocks_in_U_buffer; j++)
422 {
423 curr_rows = (j+1)*nblk;
424 if (curr_rows > rows_in_buffer_U)
425 curr_rows = rows_in_buffer_U;
426
427 if((j+1)*nblk <= cols_in_buffer_U)
428 b_rows_mult = nblk;
429 else
430 b_rows_mult = cols_in_buffer_U - j*nblk;
431
432 if(Size_receive_B!=0) C_GEMM("N", "N", &curr_rows, &nb_cols, &b_rows_mult, &done, U_local_start, &curr_rows, B_local_start, &Size_receive_B, &done, Res, &na_rows);
433
434 U_local_start = U_local_start + nblk*curr_rows;
435 B_local_start = B_local_start + nblk;
436 }
437
438 MPI_Wait(&request_U_Send, &status);
439 MPI_Wait(&request_U_Recv, &status);
440 MPI_Get_count(&status, MPI_MATH_DATATYPE_PRECISION_C, &Size_receive_UMPI); // find out how many elements I have received
441 Size_receive_U = (C_INT_TYPE) Size_receive_UMPI;
442
443 MPI_Wait(&request_B_Send, &status);
444 MPI_Wait(&request_B_Recv, &status);
445 MPI_Get_count(&status, MPI_MATH_DATATYPE_PRECISION_C, &Size_receive_BMPI); // find out how many elements I have received
446 if (nb_cols!=0) Size_receive_B = (C_INT_TYPE) Size_receive_BMPI / nb_cols; // how many rows I have received
447 else Size_receive_B=0; // can happen only if nb_cols=Size_receive_BMPI=0
448
449 }
450
451 // last iteration
452 cols_in_buffer_U = (C_INT_TYPE)Buf_to_receive_U[Size_receive_U-2];
453 rows_in_buffer_U = (C_INT_TYPE)Buf_to_receive_U[Size_receive_U-1];
454 //find minimal proc. column among those procs. who contributed in the current U buffer
455 proc_col_min = np_cols;
456 for(j = 0; j < ratio; j++)
457 {
458 col_of_origin_U = (my_pcol + my_prow + np_rows - 1 + j*np_rows)%np_cols;
459 if(col_of_origin_U < proc_col_min)
460 proc_col_min = col_of_origin_U;
461 }
462 col_of_origin_U = proc_col_min;
463
464 num_of_blocks_in_U_buffer = ceil((math_type)cols_in_buffer_U/(math_type)nblk);
465
466 if (col_of_origin_U >= my_prow)
467 B_local_start = Buf_to_receive_B;
468 else
469 B_local_start = Buf_to_receive_B + nblk;
470
471 U_local_start = Buf_to_receive_U;
472
473 for(j = 0; j < num_of_blocks_in_U_buffer; j++)
474 {
475 curr_rows = (j+1)*nblk;
476 if (curr_rows > rows_in_buffer_U)
477 curr_rows = rows_in_buffer_U;
478
479 if((j+1)*nblk <= cols_in_buffer_U)
480 b_rows_mult = nblk;
481 else
482 b_rows_mult = cols_in_buffer_U - j*nblk;
483
484 if(Size_receive_B!=0) C_GEMM("N", "N", &curr_rows, &nb_cols, &b_rows_mult, &done, U_local_start, &curr_rows, B_local_start, &Size_receive_B, &done, Res, &na_rows);
485
486 U_local_start = U_local_start + nblk*curr_rows;
487 B_local_start = B_local_start + nblk;
488 }
489
490 free(Buf_to_send_U);
491 free(Buf_to_receive_U);
492 free(Buf_to_send_B);
493 free(Buf_to_receive_B);
494 if(ratio != 1)
495 free(Buf_U);
496}
497
498
499void cannons_triang_rectangular_c_impl(math_type* U, math_type* B, int local_rowsCast, int local_colsCast,
500 C_INT_TYPE_PTR u_desc, C_INT_TYPE_PTR b_desc, math_type *Res, C_INT_MPI_TYPE row_comm, C_INT_MPI_TYPE col_comm)
501{
502 C_INT_TYPE local_rows, local_cols;
503
504 local_rows = (C_INT_TYPE) local_rowsCast;
505 local_cols = (C_INT_TYPE) local_colsCast;
506
507 MPI_Comm c_row_comm = MPI_Comm_f2c(row_comm);
508 MPI_Comm c_col_comm = MPI_Comm_f2c(col_comm);
509
510 C_INT_TYPE my_prow, my_pcol, np_rows, np_cols;
511 C_INT_MPI_TYPE my_prowMPI, my_pcolMPI, np_rowsMPI, np_colsMPI;
512
513 MPI_Comm_rank(c_row_comm, &my_prowMPI);
514 MPI_Comm_size(c_row_comm, &np_rowsMPI);
515 MPI_Comm_rank(c_col_comm, &my_pcolMPI);
516 MPI_Comm_size(c_col_comm, &np_colsMPI);
517
518 my_prow = (C_INT_TYPE) my_prowMPI;
519 my_pcol = (C_INT_TYPE) my_pcolMPI;
520 np_rows = (C_INT_TYPE) np_rowsMPI;
521 np_cols = (C_INT_TYPE) np_colsMPI;
522
523 // BEWARE
524 // in the cannons algorithm, column and row communicators are exchanged
525 // What we usually call row_comm in elpa, is thus passed to col_comm parameter of the function and vice versa
526 // (order is swapped in the following call)
527 // It is a bit unfortunate, maybe it should be changed in the Cannon algorithm to comply with ELPA standard notation?
528 cannons_triang_rectangular_impl(U, B, np_rows, np_cols, my_prow, my_pcol, u_desc, b_desc, Res, c_col_comm, c_row_comm);
529}
530
#define C_INT_TYPE
Definition cannon_back_template.c:63
#define cannons_triang_rectangular_impl
Definition cannon_back_template.c:79
#define cannons_triang_rectangular_c_impl
Definition cannon_back_template.c:83
#define C_INT_MPI_TYPE
Definition cannon_back_template.c:72
#define C_INT_TYPE_PTR
Definition cannon_back_template.c:62