Yet Another eXchange Tool 0.11.3
Loading...
Searching...
No Matches
xt_mpi_ddt_cache.c
Go to the documentation of this file.
1
12/*
13 * Keywords:
14 * Maintainer: Jörg Behrens <behrens@dkrz.de>
15 * Moritz Hanke <hanke@dkrz.de>
16 * Thomas Jahns <jahns@dkrz.de>
17 * URL: https://dkrz-sw.gitlab-pages.dkrz.de/yaxt/
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are
21 * met:
22 *
23 * Redistributions of source code must retain the above copyright notice,
24 * this list of conditions and the following disclaimer.
25 *
26 * Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in the
28 * documentation and/or other materials provided with the distribution.
29 *
30 * Neither the name of the DKRZ GmbH nor the names of its contributors
31 * may be used to endorse or promote products derived from this software
32 * without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
35 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
36 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
37 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
38 * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
39 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
40 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
41 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
42 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
43 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
44 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 */
46#ifdef HAVE_CONFIG_H
47#include "config.h"
48#endif
49
50#include <assert.h>
51#include <stdint.h>
52#include <string.h>
53
54#include <mpi.h>
55
56#include "xt/xt_mpi.h"
57#include "xt_arithmetic_util.h"
58#include "core/cksum.h"
59#include "core/core.h"
60#include "core/ppm_xfuncs.h"
61#include "xt_mpi_ddt_wrap.h"
62#include "xt_mpi_ddt_cache.h"
63
64#if ! HAVE_DECL___BUILTIN_CLZL \
65 && (HAVE_DECL___LZCNT && SIZEOF_LONG == SIZEOF_INT \
66 || HAVE_DECL___LZCNT64 && SIZEOF_LONG == 8 && CHAR_BIT == 8)
67#include <intrin.h>
68#endif
69
70
72 int count;
73 MPI_Datatype oldtype;
74};
75
78 MPI_Datatype oldtype;
79};
80
83 MPI_Aint stride;
84 MPI_Datatype oldtype;
85};
86
89 uint32_t disp_hash;
90 MPI_Datatype oldtype;
91};
92
94 int count;
96 MPI_Datatype oldtype;
97};
98
103
116
117static struct Xt_mpiddt_list_entry *
119{
120 size_t size_entries = ddt_list->size_entries;
121 ddt_list->size_entries = size_entries = size_entries ? size_entries * 2 : 8;
122 return ddt_list->entries
123 = xrealloc(ddt_list->entries, size_entries * sizeof (*ddt_list->entries));
124}
125
126#define GROW_DDT_LIST(ddt_list) \
127 do { \
128 if (ddt_list->num_entries == ddt_list->size_entries) \
129 entries = grow_ddt_list(ddt_list); \
130 } while (0)
131
132static inline void
133free_dt_unless_named(MPI_Datatype *dt, MPI_Comm comm)
134{
135 int num_integers, num_addresses, num_datatypes, combiner;
136 xt_mpi_call(MPI_Type_get_envelope(*dt, &num_integers,
137 &num_addresses, &num_datatypes, &combiner), comm);
138 if (combiner != MPI_COMBINER_NAMED)
139 xt_mpi_call(MPI_Type_free(dt), comm);
140}
141
142
143MPI_Datatype
145 struct Xt_mpiddt_list *ddt_list,
146 int count, MPI_Datatype oldtype,
147 MPI_Comm comm)
148{
149 MPI_Datatype dt;
150 if (ddt_list) {
151 struct Xt_mpiddt_list_entry *restrict entries = ddt_list->entries;
152 size_t num_entries = ddt_list->num_entries;
153 for (size_t i = 0; i < num_entries; ++i)
154 if (entries[i].combiner == MPI_COMBINER_CONTIGUOUS) {
155 struct Xt_mpi_contiguous_arg_desc *args
156 = &entries[i].args.contiguous;
157 if (args->count == count && args->oldtype == oldtype) {
158 entries[i].use_count += 1;
159 dt = entries[i].cached_dt;
160 goto dt_is_set;
161 }
162 }
163 GROW_DDT_LIST(ddt_list);
165 entries[num_entries] = (struct Xt_mpiddt_list_entry){
166 .args.contiguous = (struct Xt_mpi_contiguous_arg_desc){
167 .count = count, .oldtype = oldtype,
168 },
169 .cached_dt = dt,
170 .use_count = 1,
171 .combiner = MPI_COMBINER_CONTIGUOUS,
172 };
173 ddt_list->num_entries = num_entries + 1;
174 } else {
175 xt_mpi_call(MPI_Type_contiguous(count, oldtype, &dt), comm);
176 }
177dt_is_set:
178 return dt;
179}
180
181MPI_Datatype
183 struct Xt_mpiddt_list *ddt_list,
184 int count, int blocklength, int stride, MPI_Datatype oldtype,
185 MPI_Comm comm)
186{
187 MPI_Datatype dt;
188 if (ddt_list) {
189 struct Xt_mpiddt_list_entry *restrict entries = ddt_list->entries;
190 size_t num_entries = ddt_list->num_entries;
191 for (size_t i = 0; i < num_entries; ++i)
192 if (entries[i].combiner == MPI_COMBINER_VECTOR) {
193 struct Xt_mpi_vector_arg_desc *args = &entries[i].args.vector;
194 if (args->count == count && args->blocklength == blocklength
195 && args->oldtype == oldtype && args->stride == stride) {
196 entries[i].use_count += 1;
197 dt = entries[i].cached_dt;
198 goto dt_is_set;
199 }
200 }
201 GROW_DDT_LIST(ddt_list);
203 comm);
204 entries[num_entries] = (struct Xt_mpiddt_list_entry){
205 .args.vector = (struct Xt_mpi_vector_arg_desc){
206 .count = count, .blocklength = blocklength,
207 .stride = stride, .oldtype = oldtype,
208 },
209 .cached_dt = dt,
210 .use_count = 1,
211 .combiner = MPI_COMBINER_VECTOR,
212 };
213 ddt_list->num_entries = num_entries + 1;
214 } else {
215 xt_mpi_call(MPI_Type_vector(count, blocklength, stride, oldtype, &dt),
216 comm);
217 }
218dt_is_set:
219 return dt;
220}
221
222MPI_Datatype
224 struct Xt_mpiddt_list *ddt_list,
225 int count, int blocklength, MPI_Aint stride, MPI_Datatype oldtype,
226 MPI_Comm comm)
227{
228 MPI_Datatype dt;
229 if (ddt_list) {
230 struct Xt_mpiddt_list_entry *restrict entries = ddt_list->entries;
231 size_t num_entries = ddt_list->num_entries;
232 for (size_t i = 0; i < num_entries; ++i)
233 if (entries[i].combiner == MPI_COMBINER_HVECTOR) {
234 struct Xt_mpi_hvector_arg_desc *args = &entries[i].args.hvector;
235 if (args->count == count && args->blocklength == blocklength
236 && args->oldtype == oldtype && args->stride == stride) {
237 entries[i].use_count += 1;
238 dt = entries[i].cached_dt;
239 goto dt_is_set;
240 }
241 }
242 GROW_DDT_LIST(ddt_list);
244 oldtype, &dt), comm);
245 entries[num_entries] = (struct Xt_mpiddt_list_entry){
246 .args.hvector = (struct Xt_mpi_hvector_arg_desc){
247 .count = count, .blocklength = blocklength,
248 .stride = stride, .oldtype = oldtype,
249 },
250 .cached_dt = dt,
251 .use_count = 1,
252 .combiner = MPI_COMBINER_HVECTOR,
253 };
254 ddt_list->num_entries = num_entries + 1;
255 } else {
256 xt_mpi_call(MPI_Type_create_hvector(count, blocklength, stride,
257 oldtype, &dt), comm);
258 }
259dt_is_set:
260 return dt;
261}
262
263MPI_Datatype
265 struct Xt_mpiddt_list *ddt_list,
266 int count, int blocklength, const int disp[count], MPI_Datatype oldtype,
267 MPI_Comm comm)
268{
269 MPI_Datatype dt;
270 if (ddt_list) {
271 size_t disp_size = (count > 0 ? (size_t)count : (size_t)0) * sizeof (*disp);
272 uint32_t disp_hash = Xt_memcrc((const void *)disp, disp_size);
273 struct Xt_mpiddt_list_entry *restrict entries = ddt_list->entries;
274 size_t num_entries = ddt_list->num_entries;
275 int *disp_cmp = NULL;
276 for (size_t i = 0; i < num_entries; ++i)
277 if (entries[i].combiner == MPI_COMBINER_INDEXED_BLOCK) {
279 = &entries[i].args.indexed_block;
280 if (args->count == count && args->blocklength == blocklength
281 && args->disp_hash == disp_hash && args->oldtype == oldtype) {
282 if (!disp_cmp)
283 disp_cmp = xmalloc(disp_size + 2 * sizeof (int));
284 MPI_Datatype cached_dt = entries[i].cached_dt, oldtype_;
285 xt_mpi_call(MPI_Type_get_contents(cached_dt, count + 2, 0, 1,
286 disp_cmp, NULL, &oldtype_), comm);
287 free_dt_unless_named(&oldtype_, comm);
288 if (memcmp(disp, disp_cmp+2, disp_size))
289 continue;
290 entries[i].use_count += 1;
291 dt = cached_dt;
292 goto dt_is_set;
293 }
294 }
295 GROW_DDT_LIST(ddt_list);
297 disp, oldtype, &dt, comm);
298 entries[num_entries] = (struct Xt_mpiddt_list_entry){
299 .args.indexed_block = (struct Xt_mpi_indexed_block_arg_desc){
300 .count = count, .blocklength = blocklength,
301 .disp_hash = disp_hash, .oldtype = oldtype,
302 },
303 .cached_dt = dt,
304 .use_count = 1,
305 .combiner = MPI_COMBINER_INDEXED_BLOCK,
306 };
307 ddt_list->num_entries = num_entries + 1;
308 dt_is_set:
309 free(disp_cmp);
310 } else {
311 Xt_Type_create_indexed_block(count, blocklength,
312 disp, oldtype, &dt, comm);
313 }
314 return dt;
315}
316
317MPI_Datatype
319 struct Xt_mpiddt_list *ddt_list,
320 int count, int blocklength, const MPI_Aint disp[count], MPI_Datatype oldtype,
321 MPI_Comm comm)
322{
323 MPI_Datatype dt;
324 if (ddt_list) {
325 size_t count_ = count > 0 ? (size_t)count : (size_t)0,
326 disp_size = count_ * sizeof (*disp);
327 uint32_t disp_hash = Xt_memcrc((const void *)disp, disp_size);
328 struct Xt_mpiddt_list_entry *restrict entries = ddt_list->entries;
329 size_t num_entries = ddt_list->num_entries;
330 MPI_Aint *disp_cmp = NULL;
331 for (size_t i = 0; i < num_entries; ++i)
332 if (entries[i].combiner == MPI_COMBINER_HINDEXED_BLOCK) {
334 = &entries[i].args.indexed_block;
335 if (args->count == count && args->blocklength == blocklength
336 && args->disp_hash == disp_hash && args->oldtype == oldtype) {
337#if MPI_VERSION < 3
338#define disp_size (disp_size + (count_ + 2) * sizeof (int))
339#endif
340 if (!disp_cmp)
341 disp_cmp = xmalloc(disp_size);
342 MPI_Datatype cached_dt = entries[i].cached_dt, oldtype_;
343#if MPI_VERSION >= 3
344 int icmp[2];
345#else
346#undef disp_size
347 int *icmp = (void *)(disp_cmp + count_);
348#endif
349 xt_mpi_call(MPI_Type_get_contents(cached_dt, 2 + count, count, 1,
350 icmp, disp_cmp, &oldtype_), comm);
351 free_dt_unless_named(&oldtype_, comm);
352 if (memcmp(disp, disp_cmp, disp_size))
353 continue;
354 entries[i].use_count += 1;
355 dt = cached_dt;
356 goto dt_is_set;
357 }
358 }
359 GROW_DDT_LIST(ddt_list);
361 disp, oldtype, &dt, comm);
362 entries[num_entries] = (struct Xt_mpiddt_list_entry){
363 .args.indexed_block = (struct Xt_mpi_indexed_block_arg_desc){
364 .count = count, .blocklength = blocklength,
365 .disp_hash = disp_hash, .oldtype = oldtype,
366 },
367 .cached_dt = dt,
368 .use_count = 1,
369 .combiner = MPI_COMBINER_HINDEXED_BLOCK,
370 };
371 ddt_list->num_entries = num_entries + 1;
372 dt_is_set:
373 free(disp_cmp);
374 } else {
375 Xt_Type_create_hindexed_block(count, blocklength,
376 disp, oldtype, &dt, comm);
377 }
378 return dt;
379}
380
381
382MPI_Datatype
384 struct Xt_mpiddt_list *ddt_list,
385 int count, const int blocklength[count], const int disp[count],
386 MPI_Datatype oldtype, MPI_Comm comm)
387{
388 MPI_Datatype dt;
389 if (ddt_list) {
390 size_t asize = (count > 0 ? (size_t)count : (size_t)0) * sizeof (int);
391 uint32_t disp_hash = Xt_memcrc((const void *)disp, asize),
392 blocklength_hash = Xt_memcrc((const void *)blocklength, asize);
393 struct Xt_mpiddt_list_entry *restrict entries = ddt_list->entries;
394 size_t num_entries = ddt_list->num_entries;
395 int *acmp = NULL;
396 for (size_t i = 0; i < num_entries; ++i)
397 if (entries[i].combiner == MPI_COMBINER_INDEXED) {
398 struct Xt_mpi_indexed_arg_desc *args
399 = &entries[i].args.indexed;
400 if (args->count == count && args->blocklength_hash == blocklength_hash
401 && args->disp_hash == disp_hash && args->oldtype == oldtype) {
402 if (!acmp)
403 acmp = xmalloc(2 * asize + sizeof (int));
404 MPI_Datatype cached_dt = entries[i].cached_dt, oldtype_;
405 xt_mpi_call(MPI_Type_get_contents(cached_dt, 2 * count + 1, 0, 1,
406 acmp, NULL, &oldtype_), comm);
407 free_dt_unless_named(&oldtype_, comm);
408 if (memcmp(blocklength, acmp+1, asize)
409 || memcmp(disp, acmp+count+1, asize))
410 continue;
411 entries[i].use_count += 1;
412 dt = cached_dt;
413 goto dt_is_set;
414 }
415 }
416 GROW_DDT_LIST(ddt_list);
417 Xt_Type_indexed(count, blocklength,
418 disp, oldtype, &dt, comm);
419 entries[num_entries] = (struct Xt_mpiddt_list_entry){
420 .args.indexed = (struct Xt_mpi_indexed_arg_desc){
421 .count = count, .blocklength_hash = blocklength_hash,
422 .disp_hash = disp_hash, .oldtype = oldtype,
423 },
424 .cached_dt = dt,
425 .use_count = 1,
426 .combiner = MPI_COMBINER_INDEXED,
427 };
428 ddt_list->num_entries = num_entries + 1;
429 dt_is_set:
430 free(acmp);
431 } else {
432 Xt_Type_indexed(count, blocklength, disp, oldtype, &dt, comm);
433 }
434 return dt;
435}
436
437MPI_Datatype
439 struct Xt_mpiddt_list *ddt_list,
440 int count, const int blocklength[count], const MPI_Aint disp[count],
441 MPI_Datatype oldtype, MPI_Comm comm)
442{
443 MPI_Datatype dt;
444 if (ddt_list) {
445 size_t count_ = count > 0 ? (size_t)count : (size_t)0,
446 disp_size = count_ * sizeof (*disp),
447 blocklength_size = count_ * sizeof (*blocklength);
448 uint32_t disp_hash = Xt_memcrc((const void *)disp, disp_size),
449 blocklength_hash = Xt_memcrc((const void *)blocklength, blocklength_size);
450 struct Xt_mpiddt_list_entry *restrict entries = ddt_list->entries;
451 size_t num_entries = ddt_list->num_entries;
452 MPI_Aint *disp_cmp = NULL;
453 for (size_t i = 0; i < num_entries; ++i)
454 if (entries[i].combiner == MPI_COMBINER_HINDEXED) {
455 struct Xt_mpi_indexed_arg_desc *args
456 = &entries[i].args.indexed;
457 if (args->count == count && args->blocklength_hash == blocklength_hash
458 && args->disp_hash == disp_hash && args->oldtype == oldtype) {
459 if (!disp_cmp)
460 disp_cmp = xmalloc(sizeof (int) + blocklength_size + disp_size);
461 MPI_Datatype cached_dt = entries[i].cached_dt, oldtype_;
462 xt_mpi_call(MPI_Type_get_contents(
463 cached_dt, count + 1, count, 1,
464 (void *)(disp_cmp+count_), disp_cmp, &oldtype_), comm);
465 free_dt_unless_named(&oldtype_, comm);
466 if (memcmp(blocklength, disp_cmp+count_+1, blocklength_size)
467 || memcmp(disp, disp_cmp, disp_size))
468 continue;
469 entries[i].use_count += 1;
470 dt = cached_dt;
471 goto dt_is_set;
472 }
473 }
474 GROW_DDT_LIST(ddt_list);
475 Xt_Type_create_hindexed(count, blocklength,
476 disp, oldtype, &dt, comm);
477 entries[num_entries] = (struct Xt_mpiddt_list_entry){
478 .args.indexed = (struct Xt_mpi_indexed_arg_desc){
479 .count = count, .blocklength_hash = blocklength_hash,
480 .disp_hash = disp_hash, .oldtype = oldtype,
481 },
482 .cached_dt = dt,
483 .use_count = 1,
484 .combiner = MPI_COMBINER_HINDEXED,
485 };
486 ddt_list->num_entries = num_entries + 1;
487 dt_is_set:
488 free(disp_cmp);
489 } else {
490 Xt_Type_create_hindexed(count, blocklength,
491 disp, oldtype, &dt, comm);
492 }
493 return dt;
494}
495
496
497MPI_Datatype
499 struct Xt_mpiddt_list *ddt_list,
500 int count, const int blocklength[count],
501 const MPI_Aint disp[count],
502 const MPI_Datatype oldtype[count], MPI_Comm comm)
503{
504 MPI_Datatype dt;
505 if (ddt_list) {
506 size_t count_ = (count > 0 ? (size_t)count : (size_t)0),
507 disp_size = count_ * sizeof (disp[0]),
508 blocklength_size = count_ * sizeof (blocklength[0]),
509 oldtype_size = count_ * sizeof (oldtype[0]);
510 uint32_t disp_hash = Xt_memcrc((const void *)disp, disp_size),
511 blocklength_hash = Xt_memcrc((const void *)blocklength, blocklength_size),
512 oldtype_hash = Xt_memcrc((const void *)oldtype, oldtype_size);
513 struct Xt_mpiddt_list_entry *restrict entries = ddt_list->entries;
514 size_t num_entries = ddt_list->num_entries;
515 MPI_Aint *disp_cmp = NULL;
516 MPI_Datatype *oldtype_contents = NULL,
517 *oldtype_cmp = ddt_list->struct_dt;
518 int *icmp = NULL;
519 for (size_t i = 0; i < num_entries; ++i)
520 if (entries[i].combiner == MPI_COMBINER_STRUCT) {
521 struct Xt_mpi_struct_arg_desc *args
522 = &entries[i].args.struct_dt;
523 if (args->count == count && args->blocklength_hash == blocklength_hash
524 && args->disp_hash == disp_hash
525 && args->oldtype_hash == oldtype_hash) {
526 if (!disp_cmp) {
527 disp_cmp = xmalloc(disp_size + oldtype_size
528 + sizeof (int) + blocklength_size);
529 oldtype_contents = (void *)(disp_cmp + count_);
530 icmp = (void *)(oldtype_contents + count_);
531 }
532 MPI_Datatype cached_dt = entries[i].cached_dt;
533 xt_mpi_call(MPI_Type_get_contents(cached_dt, count+1, count, count,
534 icmp, disp_cmp, oldtype_contents), comm);
535 int oldtypes_mismatch = memcmp(oldtype, oldtype_cmp, oldtype_size);
536 for (size_t j = 0; j < count_; ++j)
537 free_dt_unless_named(oldtype_contents+j, comm);
538 assert(icmp[0] == count);
539 if (!oldtypes_mismatch && !memcmp(blocklength, icmp+1, blocklength_size)
540 && !memcmp(disp, disp_cmp, disp_size)) {
541 entries[i].use_count += 1;
542 dt = cached_dt;
543 goto dt_is_set;
544 }
545 }
546 oldtype_cmp += args->count;
547 }
548 GROW_DDT_LIST(ddt_list);
549 size_t struct_dt_size = (size_t)(oldtype_cmp - ddt_list->struct_dt),
550 struct_dt_size_p2 = next_2_pow(struct_dt_size),
551 struct_dt_needed = struct_dt_size + count_ + (oldtype_cmp == ddt_list->struct_dt);
552 if (struct_dt_needed > struct_dt_size_p2) {
553 ddt_list->struct_dt
554 = xrealloc(ddt_list->struct_dt,
555 next_2_pow(struct_dt_needed) * sizeof (*oldtype_cmp));
556 oldtype_cmp = ddt_list->struct_dt + struct_dt_size;
557 }
558 memcpy(oldtype_cmp, oldtype, oldtype_size);
559 Xt_Type_create_struct(count, blocklength, disp, oldtype, &dt, comm);
560 entries[num_entries] = (struct Xt_mpiddt_list_entry){
561 .args.struct_dt = (struct Xt_mpi_struct_arg_desc){
562 .count = count, .blocklength_hash = blocklength_hash,
563 .disp_hash = disp_hash, .oldtype_hash = oldtype_hash,
564 },
565 .cached_dt = dt,
566 .use_count = 1,
567 .combiner = MPI_COMBINER_STRUCT,
568 };
569 ddt_list->num_entries = num_entries + 1;
570 dt_is_set:
571 free(disp_cmp);
572 } else {
573 Xt_Type_create_struct(count, blocklength, disp, oldtype, &dt, comm);
574 }
575 return dt;
576}
577
578void
580 MPI_Datatype *dt, MPI_Comm comm)
581{
582 if (ddt_list) {
583 struct Xt_mpiddt_list_entry *restrict entries = ddt_list->entries;
584 size_t num_entries = ddt_list->num_entries;
585 MPI_Datatype dt_ = *dt;
586 for (size_t i = 0; i < num_entries; ++i)
587 if (entries[i].cached_dt == dt_) {
588#ifndef NDEBUG
589 int new_use_count =
590#endif
591 --entries[i].use_count;
592 assert(new_use_count >= 0);
597 *dt = MPI_DATATYPE_NULL;
598 return;
599 }
600 }
601 xt_mpi_call(MPI_Type_free(dt), comm);
602}
603
604void
606 MPI_Comm comm)
607{
608 if (ddt_list) ; else return;
609 struct Xt_mpiddt_list_entry *restrict entries = ddt_list->entries;
610 size_t num_entries = ddt_list->num_entries;
611 for (size_t i = 0; i < num_entries; ++i)
612 if (!entries[i].use_count)
613 xt_mpi_call(MPI_Type_free(&entries[i].cached_dt), comm);
614 free(entries);
615 free(ddt_list->struct_dt);
616 ddt_list->struct_dt = NULL;
617 ddt_list->entries = NULL;
618 ddt_list->num_entries = 0;
619 ddt_list->size_entries = 0;
620}
621
622void
624 size_t nmsg,
625 struct Xt_redist_msg msgs[nmsg])
626{
627 int world_rank;
628 MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
629 for (size_t i = 0, n = ddt_list->num_entries; i < n; ++i)
630 if (ddt_list->entries[i].use_count) {
631 MPI_Datatype cached_dt = ddt_list->entries[i].cached_dt;
632 for (size_t j = 0; j < nmsg; ++j)
633 if (msgs[j].datatype == cached_dt)
634 goto use_count_is_fine;
635 char buf[256];
636 sprintf(buf, "%d: cache inconsistency: In-use marked datatype "
637 "encountered that is not in any mesage!\n", world_rank);
638 Xt_abort(Xt_default_comm, buf, "xt_mpi_ddt_cache.c", __LINE__);
639 use_count_is_fine:
640 ;
641 }
642}
643
644/*
645 * Local Variables:
646 * c-basic-offset: 2
647 * coding: utf-8
648 * indent-tabs-mode: nil
649 * show-trailing-whitespace: t
650 * require-trailing-newline: t
651 * End:
652 */
@ MPI_COMM_WORLD
Definition core.h:73
int MPI_Comm
Definition core.h:64
add versions of standard API functions not returning on error
#define xrealloc(ptr, size)
Definition ppm_xfuncs.h:71
#define xmalloc(size)
Definition ppm_xfuncs.h:70
struct Xt_mpi_contiguous_arg_desc contiguous
int use_count
struct Xt_mpi_struct_arg_desc struct_dt
struct Xt_mpi_vector_arg_desc vector
struct Xt_mpi_indexed_block_arg_desc indexed_block
MPI_Datatype cached_dt
union Xt_mpiddt_list_entry::@17 args
int combiner
struct Xt_mpi_indexed_arg_desc indexed
struct Xt_mpi_hvector_arg_desc hvector
struct Xt_mpiddt_list_entry * entries
MPI_Datatype * struct_dt
int MPI_Type_create_hvector(int count, int blocklength, MPI_Aint stride, MPI_Datatype oldtype, MPI_Datatype *newtype)
int MPI_Type_contiguous(int count, MPI_Datatype oldtype, MPI_Datatype *newtype)
int MPI_Type_free(MPI_Datatype *datatype)
int MPI_Type_vector(int count, int blocklength, int stride, MPI_Datatype oldtype, MPI_Datatype *newtype)
static size_t next_2_pow(size_t v)
utility routines for MPI
#define xt_mpi_call(call, comm)
Definition xt_mpi.h:68
MPI_Datatype Xt_mpi_ddt_cache_acquire_hindexed(struct Xt_mpiddt_list *ddt_list, int count, const int blocklength[count], const MPI_Aint disp[count], MPI_Datatype oldtype, MPI_Comm comm)
void Xt_mpi_ddt_cache_free(struct Xt_mpiddt_list *ddt_list, MPI_Comm comm)
MPI_Datatype Xt_mpi_ddt_cache_acquire_indexed(struct Xt_mpiddt_list *ddt_list, int count, const int blocklength[count], const int disp[count], MPI_Datatype oldtype, MPI_Comm comm)
#define GROW_DDT_LIST(ddt_list)
void Xt_mpi_ddt_cache_check_retention(struct Xt_mpiddt_list *ddt_list, size_t nmsg, struct Xt_redist_msg msgs[nmsg])
MPI_Datatype Xt_mpi_ddt_cache_acquire_hvector(struct Xt_mpiddt_list *ddt_list, int count, int blocklength, MPI_Aint stride, MPI_Datatype oldtype, MPI_Comm comm)
static struct Xt_mpiddt_list_entry * grow_ddt_list(struct Xt_mpiddt_list *ddt_list)
#define disp_size
MPI_Datatype Xt_mpi_ddt_cache_acquire_indexed_block(struct Xt_mpiddt_list *ddt_list, int count, int blocklength, const int disp[count], MPI_Datatype oldtype, MPI_Comm comm)
MPI_Datatype Xt_mpi_ddt_cache_acquire_vector(struct Xt_mpiddt_list *ddt_list, int count, int blocklength, int stride, MPI_Datatype oldtype, MPI_Comm comm)
MPI_Datatype Xt_mpi_ddt_cache_acquire_hindexed_block(struct Xt_mpiddt_list *ddt_list, int count, int blocklength, const MPI_Aint disp[count], MPI_Datatype oldtype, MPI_Comm comm)
MPI_Datatype Xt_mpi_ddt_cache_acquire_contiguous(struct Xt_mpiddt_list *ddt_list, int count, MPI_Datatype oldtype, MPI_Comm comm)
MPI_Datatype Xt_mpi_ddt_cache_acquire_struct(struct Xt_mpiddt_list *ddt_list, int count, const int blocklength[count], const MPI_Aint disp[count], const MPI_Datatype oldtype[count], MPI_Comm comm)
void Xt_mpi_ddt_cache_entry_release(struct Xt_mpiddt_list *ddt_list, MPI_Datatype *dt, MPI_Comm comm)
static void free_dt_unless_named(MPI_Datatype *dt, MPI_Comm comm)
#define Xt_Type_create_struct(count, array_of_blocklengths, array_of_displacements, array_of_types, newtype, comm)
@ MPI_COMBINER_HINDEXED_BLOCK
#define Xt_Type_create_indexed_block(count, blocklength, disp, oldtype, newtype, comm)
#define Xt_Type_create_hindexed_block(count, blocklength, array_of_displacements, oldtype, newtype, comm)
#define Xt_Type_indexed(count, blocklength, disp, oldtype, newtype, comm)
#define Xt_Type_create_hindexed(count, blocklength, disp, oldtype, newtype, comm)