Kokkos Core Kernels Package Version of the Day
Loading...
Searching...
No Matches
Kokkos_UnorderedMap.hpp
Go to the documentation of this file.
1//@HEADER
2// ************************************************************************
3//
4// Kokkos v. 4.0
5// Copyright (2022) National Technology & Engineering
6// Solutions of Sandia, LLC (NTESS).
7//
8// Under the terms of Contract DE-NA0003525 with NTESS,
9// the U.S. Government retains certain rights in this software.
10//
11// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
12// See https://kokkos.org/LICENSE for license information.
13// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
14//
15//@HEADER
16
22
23#ifndef KOKKOS_UNORDERED_MAP_HPP
24#define KOKKOS_UNORDERED_MAP_HPP
25#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
26#define KOKKOS_IMPL_PUBLIC_INCLUDE
27#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
28#endif
29
30#include <Kokkos_Core.hpp>
31#include <Kokkos_Functional.hpp>
32
33#include <Kokkos_Bitset.hpp>
34
35#include <impl/Kokkos_Traits.hpp>
36#include <impl/Kokkos_UnorderedMap_impl.hpp>
37
38#include <iostream>
39
40#include <cstdint>
41
42namespace Kokkos {
43
44enum : unsigned { UnorderedMapInvalidIndex = ~0u };
45
59
60class UnorderedMapInsertResult {
61 private:
62 enum Status : uint32_t {
63 SUCCESS = 1u << 31,
64 EXISTING = 1u << 30,
65 FREED_EXISTING = 1u << 29,
66 LIST_LENGTH_MASK = ~(SUCCESS | EXISTING | FREED_EXISTING)
67 };
68
69 public:
71 KOKKOS_FORCEINLINE_FUNCTION
72 bool success() const { return (m_status & SUCCESS); }
73
75 KOKKOS_FORCEINLINE_FUNCTION
76 bool existing() const { return (m_status & EXISTING); }
77
79 KOKKOS_FORCEINLINE_FUNCTION
80 bool failed() const { return m_index == UnorderedMapInvalidIndex; }
81
84 KOKKOS_FORCEINLINE_FUNCTION
85 bool freed_existing() const { return (m_status & FREED_EXISTING); }
86
89 KOKKOS_FORCEINLINE_FUNCTION
90 uint32_t list_position() const { return (m_status & LIST_LENGTH_MASK); }
91
93 KOKKOS_FORCEINLINE_FUNCTION
94 uint32_t index() const { return m_index; }
95
96 KOKKOS_FORCEINLINE_FUNCTION
97 UnorderedMapInsertResult() : m_index(UnorderedMapInvalidIndex), m_status(0) {}
98
99 KOKKOS_FORCEINLINE_FUNCTION
100 void increment_list_position() {
101 m_status += (list_position() < LIST_LENGTH_MASK) ? 1u : 0u;
102 }
103
104 KOKKOS_FORCEINLINE_FUNCTION
105 void set_existing(uint32_t i, bool arg_freed_existing) {
106 m_index = i;
107 m_status =
108 EXISTING | (arg_freed_existing ? FREED_EXISTING : 0u) | list_position();
109 }
110
111 KOKKOS_FORCEINLINE_FUNCTION
112 void set_success(uint32_t i) {
113 m_index = i;
114 m_status = SUCCESS | list_position();
115 }
116
117 private:
118 uint32_t m_index;
119 uint32_t m_status;
120};
121
136template <class ValueTypeView, class ValuesIdxType>
138 using value_type = typename ValueTypeView::non_const_value_type;
139 struct NoOp {
140 KOKKOS_FUNCTION
141 void op(ValueTypeView, ValuesIdxType, const value_type) const {}
142 };
143 struct AtomicAdd {
144 KOKKOS_FUNCTION
145 void op(ValueTypeView values, ValuesIdxType values_idx,
146 const value_type v) const {
147 Kokkos::atomic_add(values.data() + values_idx, v);
148 }
149 };
150};
151
207template <typename Key, typename Value,
208 typename Device = Kokkos::DefaultExecutionSpace,
209 typename Hasher = pod_hash<std::remove_const_t<Key>>,
210 typename EqualTo = pod_equal_to<std::remove_const_t<Key>>>
211class UnorderedMap {
212 private:
213 using host_mirror_space =
214 typename ViewTraits<Key, Device, void, void>::host_mirror_space;
215
216 public:
218
219 // key_types
220 using declared_key_type = Key;
221 using key_type = std::remove_const_t<declared_key_type>;
222 using const_key_type = std::add_const_t<key_type>;
223
224 // value_types
225 using declared_value_type = Value;
226 using value_type = std::remove_const_t<declared_value_type>;
227 using const_value_type = std::add_const_t<value_type>;
228
229 using device_type = Device;
230 using execution_space = typename Device::execution_space;
231 using hasher_type = Hasher;
232 using equal_to_type = EqualTo;
233 using size_type = uint32_t;
234
235 // map_types
236 using declared_map_type =
237 UnorderedMap<declared_key_type, declared_value_type, device_type,
238 hasher_type, equal_to_type>;
239 using insertable_map_type = UnorderedMap<key_type, value_type, device_type,
240 hasher_type, equal_to_type>;
241 using modifiable_map_type =
242 UnorderedMap<const_key_type, value_type, device_type, hasher_type,
243 equal_to_type>;
244 using const_map_type = UnorderedMap<const_key_type, const_value_type,
245 device_type, hasher_type, equal_to_type>;
246
247 static const bool is_set = std::is_void<value_type>::value;
248 static const bool has_const_key =
249 std::is_same<const_key_type, declared_key_type>::value;
250 static const bool has_const_value =
251 is_set || std::is_same<const_value_type, declared_value_type>::value;
252
253 static const bool is_insertable_map =
254 !has_const_key && (is_set || !has_const_value);
255 static const bool is_modifiable_map = has_const_key && !has_const_value;
256 static const bool is_const_map = has_const_key && has_const_value;
257
258 using insert_result = UnorderedMapInsertResult;
259
260 using HostMirror =
261 UnorderedMap<Key, Value, host_mirror_space, Hasher, EqualTo>;
262
263 using histogram_type = Impl::UnorderedMapHistogram<const_map_type>;
265
266 private:
267 enum : size_type { invalid_index = ~static_cast<size_type>(0) };
268
269 using impl_value_type = std::conditional_t<is_set, int, declared_value_type>;
270
271 using key_type_view = std::conditional_t<
272 is_insertable_map, View<key_type *, device_type>,
274
275 using value_type_view = std::conditional_t<
276 is_insertable_map || is_modifiable_map,
279
280 using size_type_view = std::conditional_t<
281 is_insertable_map, View<size_type *, device_type>,
283
284 using bitset_type = std::conditional_t<is_insertable_map, Bitset<Device>,
286
287 enum { modified_idx = 0, erasable_idx = 1, failed_insert_idx = 2 };
288 enum { num_scalars = 3 };
290
291 public:
293
294 using default_op_type =
295 typename UnorderedMapInsertOpTypes<value_type_view, uint32_t>::NoOp;
296
305 UnorderedMap(size_type capacity_hint = 0, hasher_type hasher = hasher_type(),
306 equal_to_type equal_to = equal_to_type())
307 : m_bounded_insert(true),
308 m_hasher(hasher),
309 m_equal_to(equal_to),
310 m_size("UnorderedMap size"),
311 m_available_indexes(calculate_capacity(capacity_hint)),
312 m_hash_lists(view_alloc(WithoutInitializing, "UnorderedMap hash list"),
313 Impl::find_hash_size(capacity())),
314 m_next_index(view_alloc(WithoutInitializing, "UnorderedMap next index"),
315 capacity() + 1) // +1 so that the *_at functions can
316 // always return a valid reference
317 ,
318 m_keys("UnorderedMap keys", capacity()),
319 m_values("UnorderedMap values", (is_set ? 0 : capacity())),
320 m_scalars("UnorderedMap scalars") {
321 if (!is_insertable_map) {
322 Kokkos::Impl::throw_runtime_exception(
323 "Cannot construct a non-insertable (i.e. const key_type) "
324 "unordered_map");
325 }
326
327 Kokkos::deep_copy(m_hash_lists, invalid_index);
328 Kokkos::deep_copy(m_next_index, invalid_index);
329 }
330
331 void reset_failed_insert_flag() { reset_flag(failed_insert_idx); }
332
333 histogram_type get_histogram() { return histogram_type(*this); }
334
336 void clear() {
337 m_bounded_insert = true;
338
339 if (capacity() == 0) return;
340
341 m_available_indexes.clear();
342
343 Kokkos::deep_copy(m_hash_lists, invalid_index);
344 Kokkos::deep_copy(m_next_index, invalid_index);
345 {
346 const key_type tmp = key_type();
347 Kokkos::deep_copy(m_keys, tmp);
348 }
349 Kokkos::deep_copy(m_scalars, 0);
350 m_size() = 0;
351 }
352
353 KOKKOS_INLINE_FUNCTION constexpr bool is_allocated() const {
354 return (m_keys.is_allocated() && (is_set || m_values.is_allocated()) &&
355 m_scalars.is_allocated());
356 }
357
368 bool rehash(size_type requested_capacity = 0) {
369 const bool bounded_insert = (capacity() == 0) || (size() == 0u);
370 return rehash(requested_capacity, bounded_insert);
371 }
372
373 bool rehash(size_type requested_capacity, bool bounded_insert) {
374 if (!is_insertable_map) return false;
375
376 const size_type curr_size = size();
377 requested_capacity =
378 (requested_capacity < curr_size) ? curr_size : requested_capacity;
379
380 insertable_map_type tmp(requested_capacity, m_hasher, m_equal_to);
381
382 if (curr_size) {
383 tmp.m_bounded_insert = false;
384 Impl::UnorderedMapRehash<insertable_map_type> f(tmp, *this);
385 f.apply();
386 }
387 tmp.m_bounded_insert = bounded_insert;
388
389 *this = tmp;
390
391 return true;
392 }
393
401 size_type size() const {
402 if (capacity() == 0u) return 0u;
403 if (modified()) {
404 m_size() = m_available_indexes.count();
405 reset_flag(modified_idx);
406 }
407 return m_size();
408 }
409
415 bool failed_insert() const { return get_flag(failed_insert_idx); }
416
417 bool erasable() const {
418 return is_insertable_map ? get_flag(erasable_idx) : false;
419 }
420
421 bool begin_erase() {
422 bool result = !erasable();
423 if (is_insertable_map && result) {
424 execution_space().fence(
425 "Kokkos::UnorderedMap::begin_erase: fence before setting erasable "
426 "flag");
427 set_flag(erasable_idx);
428 }
429 return result;
430 }
431
432 bool end_erase() {
433 bool result = erasable();
434 if (is_insertable_map && result) {
435 execution_space().fence(
436 "Kokkos::UnorderedMap::end_erase: fence before erasing");
437 Impl::UnorderedMapErase<declared_map_type> f(*this);
438 f.apply();
439 execution_space().fence(
440 "Kokkos::UnorderedMap::end_erase: fence after erasing");
441 reset_flag(erasable_idx);
442 }
443 return result;
444 }
445
450 KOKKOS_FORCEINLINE_FUNCTION
451 size_type capacity() const { return m_available_indexes.size(); }
452
463 KOKKOS_INLINE_FUNCTION
464 size_type hash_capacity() const { return m_hash_lists.extent(0); }
465
466 //---------------------------------------------------------------------------
467 //---------------------------------------------------------------------------
468
480 template <typename InsertOpType = default_op_type>
481 KOKKOS_INLINE_FUNCTION insert_result
482 insert(key_type const &k, impl_value_type const &v = impl_value_type(),
483 [[maybe_unused]] InsertOpType arg_insert_op = InsertOpType()) const {
484 if constexpr (is_set) {
485 static_assert(std::is_same_v<InsertOpType, default_op_type>,
486 "Insert Operations are not supported on sets.");
487 }
488
489 insert_result result;
490
491 if (!is_insertable_map || capacity() == 0u ||
492 m_scalars((int)erasable_idx)) {
493 return result;
494 }
495
496 if (!m_scalars((int)modified_idx)) {
497 m_scalars((int)modified_idx) = true;
498 }
499
500 int volatile &failed_insert_ref = m_scalars((int)failed_insert_idx);
501
502 const size_type hash_value = m_hasher(k);
503 const size_type hash_list = hash_value % m_hash_lists.extent(0);
504
505 size_type *curr_ptr = &m_hash_lists[hash_list];
506 size_type new_index = invalid_index;
507
508 // Force integer multiply to long
509 size_type index_hint = static_cast<size_type>(
510 (static_cast<double>(hash_list) * capacity()) / m_hash_lists.extent(0));
511
512 size_type find_attempts = 0;
513
514 enum : unsigned { bounded_find_attempts = 32u };
515 const size_type max_attempts =
516 (m_bounded_insert &&
517 (bounded_find_attempts < m_available_indexes.max_hint()))
518 ? bounded_find_attempts
519 : m_available_indexes.max_hint();
520
521 bool not_done = true;
522
523#if defined(__MIC__)
524#pragma noprefetch
525#endif
526 while (not_done) {
527 // Continue searching the unordered list for this key,
528 // list will only be appended during insert phase.
529 // Need volatile_load as other threads may be appending.
530
531 // FIXME_SYCL replacement for memory_fence
532#ifdef KOKKOS_ENABLE_SYCL
533 size_type curr = Kokkos::atomic_load(curr_ptr);
534#else
535 size_type curr = volatile_load(curr_ptr);
536#endif
537
538 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
539 &m_keys[curr != invalid_index ? curr : 0]);
540#if defined(__MIC__)
541#pragma noprefetch
542#endif
543 while (curr != invalid_index && !m_equal_to(
544#ifdef KOKKOS_ENABLE_SYCL
545 Kokkos::atomic_load(&m_keys[curr])
546#else
547 volatile_load(&m_keys[curr])
548#endif
549 ,
550 k)) {
551 result.increment_list_position();
552 index_hint = curr;
553 curr_ptr = &m_next_index[curr];
554#ifdef KOKKOS_ENABLE_SYCL
555 curr = Kokkos::atomic_load(curr_ptr);
556#else
557 curr = volatile_load(curr_ptr);
558#endif
559 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
560 &m_keys[curr != invalid_index ? curr : 0]);
561 }
562
563 //------------------------------------------------------------
564 // If key already present then return that index.
565 if (curr != invalid_index) {
566 const bool free_existing = new_index != invalid_index;
567 if (free_existing) {
568 // Previously claimed an unused entry that was not inserted.
569 // Release this unused entry immediately.
570 if (!m_available_indexes.reset(new_index)) {
571 KOKKOS_IMPL_DO_NOT_USE_PRINTF("Unable to free existing\n");
572 }
573 }
574
575 result.set_existing(curr, free_existing);
576 if constexpr (!is_set) {
577 arg_insert_op.op(m_values, curr, v);
578 }
579 not_done = false;
580 }
581 //------------------------------------------------------------
582 // Key is not currently in the map.
583 // If the thread has claimed an entry try to insert now.
584 else {
585 //------------------------------------------------------------
586 // If have not already claimed an unused entry then do so now.
587 if (new_index == invalid_index) {
588 bool found = false;
589 // use the hash_list as the flag for the search direction
590 Kokkos::tie(found, index_hint) =
591 m_available_indexes.find_any_unset_near(index_hint, hash_list);
592
593 // found and index and this thread set it
594 if (!found && ++find_attempts >= max_attempts) {
595 failed_insert_ref = true;
596 not_done = false;
597 } else if (m_available_indexes.set(index_hint)) {
598 new_index = index_hint;
599 // Set key and value
600 KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_keys[new_index]);
601// FIXME_SYCL replacement for memory_fence
602#ifdef KOKKOS_ENABLE_SYCL
603 Kokkos::atomic_store(&m_keys[new_index], k);
604#else
605 m_keys[new_index] = k;
606#endif
607
608 if (!is_set) {
609 KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_values[new_index]);
610#ifdef KOKKOS_ENABLE_SYCL
611 Kokkos::atomic_store(&m_values[new_index], v);
612#else
613 m_values[new_index] = v;
614#endif
615 }
616
617#ifndef KOKKOS_ENABLE_SYCL
618 // Do not proceed until key and value are updated in global memory
619 memory_fence();
620#endif
621 }
622 } else if (failed_insert_ref) {
623 not_done = false;
624 }
625
626 // Attempt to append claimed entry into the list.
627 // Another thread may also be trying to append the same list so protect
628 // with atomic.
629 if (new_index != invalid_index &&
630 curr == atomic_compare_exchange(
631 curr_ptr, static_cast<size_type>(invalid_index),
632 new_index)) {
633 // Succeeded in appending
634 result.set_success(new_index);
635 not_done = false;
636 }
637 }
638 } // while ( not_done )
639
640 return result;
641 }
642
643 KOKKOS_INLINE_FUNCTION
644 bool erase(key_type const &k) const {
645 bool result = false;
646
647 if (is_insertable_map && 0u < capacity() && m_scalars((int)erasable_idx)) {
648 if (!m_scalars((int)modified_idx)) {
649 m_scalars((int)modified_idx) = true;
650 }
651
652 size_type index = find(k);
653 if (valid_at(index)) {
654 m_available_indexes.reset(index);
655 result = true;
656 }
657 }
658
659 return result;
660 }
661
669 KOKKOS_INLINE_FUNCTION
670 size_type find(const key_type &k) const {
671 size_type curr = 0u < capacity()
672 ? m_hash_lists(m_hasher(k) % m_hash_lists.extent(0))
673 : invalid_index;
674
675 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]);
676 while (curr != invalid_index && !m_equal_to(m_keys[curr], k)) {
677 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
678 &m_keys[curr != invalid_index ? curr : 0]);
679 curr = m_next_index[curr];
680 }
681
682 return curr;
683 }
684
689 KOKKOS_INLINE_FUNCTION
690 bool exists(const key_type &k) const { return valid_at(find(k)); }
691
700 template <typename Dummy = value_type>
701 KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
702 !std::is_void<Dummy>::value, // !is_set
703 std::conditional_t<has_const_value, impl_value_type, impl_value_type &>>
704 value_at(size_type i) const {
705 KOKKOS_EXPECTS(i < capacity());
706 return m_values[i];
707 }
708
715 KOKKOS_FORCEINLINE_FUNCTION
716 key_type key_at(size_type i) const {
717 KOKKOS_EXPECTS(i < capacity());
718 return m_keys[i];
719 }
720
721 KOKKOS_FORCEINLINE_FUNCTION
722 bool valid_at(size_type i) const { return m_available_indexes.test(i); }
723
724 template <typename SKey, typename SValue>
725 UnorderedMap(
726 UnorderedMap<SKey, SValue, Device, Hasher, EqualTo> const &src,
727 std::enable_if_t<
728 Impl::UnorderedMapCanAssign<declared_key_type, declared_value_type,
729 SKey, SValue>::value,
730 int> = 0)
731 : m_bounded_insert(src.m_bounded_insert),
732 m_hasher(src.m_hasher),
733 m_equal_to(src.m_equal_to),
734 m_size(src.m_size),
735 m_available_indexes(src.m_available_indexes),
736 m_hash_lists(src.m_hash_lists),
737 m_next_index(src.m_next_index),
738 m_keys(src.m_keys),
739 m_values(src.m_values),
740 m_scalars(src.m_scalars) {}
741
742 template <typename SKey, typename SValue>
743 std::enable_if_t<
744 Impl::UnorderedMapCanAssign<declared_key_type, declared_value_type, SKey,
745 SValue>::value,
746 declared_map_type &>
747 operator=(UnorderedMap<SKey, SValue, Device, Hasher, EqualTo> const &src) {
748 m_bounded_insert = src.m_bounded_insert;
749 m_hasher = src.m_hasher;
750 m_equal_to = src.m_equal_to;
751 m_size = src.m_size;
752 m_available_indexes = src.m_available_indexes;
753 m_hash_lists = src.m_hash_lists;
754 m_next_index = src.m_next_index;
755 m_keys = src.m_keys;
756 m_values = src.m_values;
757 m_scalars = src.m_scalars;
758 return *this;
759 }
760
761 template <typename SKey, typename SValue, typename SDevice>
762 std::enable_if_t<std::is_same<std::remove_const_t<SKey>, key_type>::value &&
763 std::is_same<std::remove_const_t<SValue>, value_type>::value>
764 create_copy_view(
765 UnorderedMap<SKey, SValue, SDevice, Hasher, EqualTo> const &src) {
766 if (m_hash_lists.data() != src.m_hash_lists.data()) {
767 insertable_map_type tmp;
768
769 tmp.m_bounded_insert = src.m_bounded_insert;
770 tmp.m_hasher = src.m_hasher;
771 tmp.m_equal_to = src.m_equal_to;
772 tmp.m_size() = src.m_size();
773 tmp.m_available_indexes = bitset_type(src.capacity());
774 tmp.m_hash_lists = size_type_view(
775 view_alloc(WithoutInitializing, "UnorderedMap hash list"),
776 src.m_hash_lists.extent(0));
777 tmp.m_next_index = size_type_view(
778 view_alloc(WithoutInitializing, "UnorderedMap next index"),
779 src.m_next_index.extent(0));
780 tmp.m_keys =
781 key_type_view(view_alloc(WithoutInitializing, "UnorderedMap keys"),
782 src.m_keys.extent(0));
783 tmp.m_values = value_type_view(
784 view_alloc(WithoutInitializing, "UnorderedMap values"),
785 src.m_values.extent(0));
786 tmp.m_scalars = scalars_view("UnorderedMap scalars");
787
788 Kokkos::deep_copy(tmp.m_available_indexes, src.m_available_indexes);
789
790 using raw_deep_copy =
791 Kokkos::Impl::DeepCopy<typename device_type::memory_space,
792 typename SDevice::memory_space>;
793
794 raw_deep_copy(tmp.m_hash_lists.data(), src.m_hash_lists.data(),
795 sizeof(size_type) * src.m_hash_lists.extent(0));
796 raw_deep_copy(tmp.m_next_index.data(), src.m_next_index.data(),
797 sizeof(size_type) * src.m_next_index.extent(0));
798 raw_deep_copy(tmp.m_keys.data(), src.m_keys.data(),
799 sizeof(key_type) * src.m_keys.extent(0));
800 if (!is_set) {
801 raw_deep_copy(tmp.m_values.data(), src.m_values.data(),
802 sizeof(impl_value_type) * src.m_values.extent(0));
803 }
804 raw_deep_copy(tmp.m_scalars.data(), src.m_scalars.data(),
805 sizeof(int) * num_scalars);
806
807 Kokkos::fence(
808 "Kokkos::UnorderedMap::create_copy_view: fence after copy to tmp");
809
810 *this = tmp;
811 }
812 }
813
815 private: // private member functions
816 bool modified() const { return get_flag(modified_idx); }
817
818 void set_flag(int flag) const {
819 using raw_deep_copy =
820 Kokkos::Impl::DeepCopy<typename device_type::memory_space,
821 Kokkos::HostSpace>;
822 const int true_ = true;
823 raw_deep_copy(m_scalars.data() + flag, &true_, sizeof(int));
824 Kokkos::fence(
825 "Kokkos::UnorderedMap::set_flag: fence after copying flag from "
826 "HostSpace");
827 }
828
829 void reset_flag(int flag) const {
830 using raw_deep_copy =
831 Kokkos::Impl::DeepCopy<typename device_type::memory_space,
832 Kokkos::HostSpace>;
833 const int false_ = false;
834 raw_deep_copy(m_scalars.data() + flag, &false_, sizeof(int));
835 Kokkos::fence(
836 "Kokkos::UnorderedMap::reset_flag: fence after copying flag from "
837 "HostSpace");
838 }
839
840 bool get_flag(int flag) const {
841 using raw_deep_copy =
842 Kokkos::Impl::DeepCopy<Kokkos::HostSpace,
843 typename device_type::memory_space>;
844 int result = false;
845 raw_deep_copy(&result, m_scalars.data() + flag, sizeof(int));
846 Kokkos::fence(
847 "Kokkos::UnorderedMap::get_flag: fence after copy to return value in "
848 "HostSpace");
849 return result;
850 }
851
852 static uint32_t calculate_capacity(uint32_t capacity_hint) {
853 // increase by 16% and round to nears multiple of 128
854 return capacity_hint
855 ? ((static_cast<uint32_t>(7ull * capacity_hint / 6u) + 127u) /
856 128u) *
857 128u
858 : 128u;
859 }
860
861 private: // private members
862 bool m_bounded_insert;
863 hasher_type m_hasher;
864 equal_to_type m_equal_to;
865 View<size_type, HostSpace> m_size;
866 bitset_type m_available_indexes;
867 size_type_view m_hash_lists;
868 size_type_view m_next_index;
869 key_type_view m_keys;
870 value_type_view m_values;
871 scalars_view m_scalars;
872
873 template <typename KKey, typename VValue, typename DDevice, typename HHash,
874 typename EEqualTo>
875 friend class UnorderedMap;
876
877 template <typename UMap>
878 friend struct Impl::UnorderedMapErase;
879
880 template <typename UMap>
881 friend struct Impl::UnorderedMapHistogram;
882
883 template <typename UMap>
884 friend struct Impl::UnorderedMapPrint;
885};
886
887// Specialization of deep_copy for two UnorderedMap objects.
888template <typename DKey, typename DT, typename DDevice, typename SKey,
889 typename ST, typename SDevice, typename Hasher, typename EqualTo>
890inline void deep_copy(
893 dst.create_copy_view(src);
894}
895
896} // namespace Kokkos
897
898#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
899#undef KOKKOS_IMPL_PUBLIC_INCLUDE
900#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
901#endif
902#endif // KOKKOS_UNORDERED_MAP_HPP
KOKKOS_FORCEINLINE_FUNCTION pair< T1 &, T2 & > tie(T1 &x, T2 &y)
Return a pair of references to the input arguments.
First element of the return value of UnorderedMap::insert().
KOKKOS_FORCEINLINE_FUNCTION bool failed() const
Did the map fail to insert the key due to insufficient capacity.
KOKKOS_FORCEINLINE_FUNCTION bool success() const
Did the map successful insert the key/value pair.
KOKKOS_FORCEINLINE_FUNCTION bool freed_existing() const
KOKKOS_FORCEINLINE_FUNCTION bool existing() const
Was the key already present in the map.
KOKKOS_FORCEINLINE_FUNCTION uint32_t list_position() const
KOKKOS_FORCEINLINE_FUNCTION uint32_t index() const
Index where the key can be found as long as the insert did not fail.
Thread-safe, performance-portable lookup table.
bool failed_insert() const
The current number of failed insert() calls.
KOKKOS_FORCEINLINE_FUNCTION key_type key_at(size_type i) const
Get the key with i as its direct index.
KOKKOS_INLINE_FUNCTION size_type hash_capacity() const
The number of hash table "buckets.".
KOKKOS_INLINE_FUNCTION bool exists(const key_type &k) const
Does the key exist in the map.
KOKKOS_INLINE_FUNCTION size_type find(const key_type &k) const
Find the given key k, if it exists in the table.
UnorderedMap(size_type capacity_hint=0, hasher_type hasher=hasher_type(), equal_to_type equal_to=equal_to_type())
Constructor.
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t< !std::is_void< Dummy >::value, std::conditional_t< has_const_value, impl_value_type, impl_value_type & > > value_at(size_type i) const
Get the value with i as its direct index.
void clear()
Clear all entries in the table.
size_type size() const
The number of entries in the table.
bool rehash(size_type requested_capacity=0)
Change the capacity of the the map.
KOKKOS_INLINE_FUNCTION insert_result insert(key_type const &k, impl_value_type const &v=impl_value_type(), InsertOpType arg_insert_op=InsertOpType()) const
View to an array of data.
ScopeGuard Some user scope issues have been identified with some Kokkos::finalize calls; ScopeGuard a...
Operations applied to the values array upon subsequent insertions.