mCRL2
Loading...
Searching...
No Matches
indexed_set.h
Go to the documentation of this file.
1// Author(s): Jan Friso Groote
2// Copyright: see the accompanying file COPYING or copy at
3// https://github.com/mCRL2org/mCRL2/blob/master/COPYING
4//
5// Distributed under the Boost Software License, Version 1.0.
6// (See accompanying file LICENSE_1_0.txt or copy at
7// http://www.boost.org/LICENSE_1_0.txt)
8//
12
13#ifndef MCRL2_UTILITIES_DETAIL_INDEXED_SET_H
14#define MCRL2_UTILITIES_DETAIL_INDEXED_SET_H
15#pragma once
16
18#include "mcrl2/utilities/indexed_set.h" // necessary for header test.
19
20namespace mcrl2
21{
22namespace utilities
23{
24namespace detail
25{
26
27static const std::size_t STEP = 1;
28
30static constexpr std::size_t EMPTY(std::numeric_limits<std::size_t>::max());
31
32static constexpr std::size_t RESERVED(std::numeric_limits<std::size_t>::max()-1);
33
34static constexpr float max_load_factor = 0.6f;
35
36static constexpr std::size_t PRIME_NUMBER = 999953;
37
38#ifndef NDEBUG // Numbers are small in debug mode for more intensive checks.
39 static constexpr std::size_t minimal_hashtable_size = 16;
40#else
41 static constexpr std::size_t minimal_hashtable_size = 2048;
42#endif
43 static constexpr std::size_t RESERVATION_FRACTION = 8; // If the reserved keys entries are exploited, 1/RESERVATION_FRACTION new
44 // keys are reserved. This is an expensive operation, as it is executed
45 // using a lock_exclusive.
46
48static_assert(minimal_hashtable_size>=8);
49
50} // namespace detail
51
52#define INDEXED_SET_TEMPLATE template <class Key, bool ThreadSafe, typename Hash, typename Equals, typename Allocator, typename KeyTable>
53#define INDEXED_SET indexed_set<Key, ThreadSafe, Hash, Equals, Allocator, KeyTable>
54
56inline void INDEXED_SET::reserve_indices(const std::size_t thread_index)
57{
58 lock_guard guard = m_shared_mutexes[thread_index].lock();
59
60 if (m_next_index + m_shared_mutexes.size() >= m_keys.size()) // otherwise another process already reserved entries, and nothing needs to be done.
61 {
62 assert(m_next_index <= m_keys.size());
63 m_keys.resize(m_keys.size() + std::max(m_keys.size() / detail::RESERVATION_FRACTION, m_shared_mutexes.size())); // Increase with at least the number of threads.
64
65 while ((detail::max_load_factor * m_hashtable.size()) < m_keys.size())
66 {
67 resize_hashtable();
68 }
69 }
70}
71
73inline typename INDEXED_SET::size_type INDEXED_SET::put_in_hashtable(
74 const key_type& key,
75 std::size_t value,
76 std::size_t& new_position)
77{
78 // Find a place to insert key and find whether key already exists.
79 assert(m_hashtable.size() > 0);
80
81 new_position = ((m_hasher(key) * detail::PRIME_NUMBER) >> 2) % m_hashtable.size();
82 std::size_t start = new_position;
83 utilities::mcrl2_unused(start); // suppress warning in release mode.
84
85 while (true)
86 {
87 std::size_t index = m_hashtable[new_position];
88 assert(index == detail::EMPTY || index == detail::RESERVED || index < m_keys.size());
89
90 if (index == detail::EMPTY)
91 {
92 // Found an empty spot, insert a new index belonging to key,
93 std::size_t pos=detail::EMPTY;
94 if (reinterpret_cast<std::atomic<std::size_t>*>(&m_hashtable[new_position])->compare_exchange_strong(pos,value))
95 {
96 return value;
97 }
98 index=pos; // Insertion failed, but another process put an alternative value "pos"
99 // at this position.
100 }
101
102 // If the index is RESERVED, we go into a busy loop, as another process
103 // will shortly change the RESERVED value into a sensible index.
104 if (index != detail::RESERVED)
105 {
106 assert(index!=detail::EMPTY);
107 if (m_equals(m_keys[index], key))
108 {
109 // key is already in the set, return position of key.
110 assert(index<m_next_index && m_next_index<=m_keys.size());
111 return index;
112 }
113 assert(m_hashtable.size()>0);
114 new_position = (new_position + detail::STEP) % m_hashtable.size();
115 assert(new_position != start); // In this case the hashtable is full, which should never happen.
116 }
117 }
118
119 // not reached.
120 std::abort();
121 return detail::EMPTY;
122}
123
125inline void INDEXED_SET::resize_hashtable()
126{
127 m_hashtable.assign(m_hashtable.size() * 2, detail::EMPTY);
128 size_t index = 0;
129 for (const Key& k: m_keys)
130 {
131 if (index<m_next_index)
132 {
133 std::size_t new_position; // The resulting new_position is not used here.
134 put_in_hashtable(k, index, new_position);
135 }
136 else
137 {
138 break;
139 }
140 ++index;
141 }
142}
143
145inline INDEXED_SET::indexed_set()
146 : indexed_set(1, detail::minimal_hashtable_size) // Run with one main thread.
147{}
148
150inline INDEXED_SET::indexed_set(std::size_t number_of_threads)
151 : indexed_set(number_of_threads, detail::minimal_hashtable_size)
152{
153 assert(number_of_threads != 0);
154}
155
157inline INDEXED_SET::indexed_set(
158 std::size_t number_of_threads,
159 std::size_t initial_size,
160 const hasher& hasher,
161 const key_equal& equals)
162 : m_hashtable(std::max(initial_size, detail::minimal_hashtable_size), detail::EMPTY),
163 m_mutex(new std::mutex()),
164 m_hasher(hasher),
165 m_equals(equals)
166{
167 assert(number_of_threads != 0);
168
169 // Insert the main mutex.
170 m_shared_mutexes.emplace_back();
171
172 for (std::size_t i = 1; i < ((number_of_threads == 1) ? 1 : number_of_threads + 1); ++i)
173 {
174 // Copy the mutex n times for all the other threads.
175 m_shared_mutexes.emplace_back(m_shared_mutexes[0]);
176 }
177}
178
180inline typename INDEXED_SET::size_type INDEXED_SET::index(const key_type& key, const std::size_t thread_index) const
181{
182 shared_guard guard = m_shared_mutexes[thread_index].lock_shared();
183 assert(m_hashtable.size() > 0);
184
185 std::size_t start = ((m_hasher(key) * detail::PRIME_NUMBER) >> 2) % m_hashtable.size();
186 std::size_t position = start;
187 do
188 {
189 std::size_t index = m_hashtable[position];
190 if (index == detail::EMPTY)
191 {
192 return npos; // Not found.
193 }
194 // If the index is RESERVED, go into a busy loop. Another thread will
195 // change this RESERVED index shortly into a sensible index.
196 if (index != detail::RESERVED)
197 {
198 assert(index < m_keys.size());
199 if (m_equals(key, m_keys[index]))
200 {
201 assert(index<m_next_index && m_next_index <= m_keys.size());
202 return index;
203 }
204
205 assert(m_hashtable.size() > 0);
206 position = (position + detail::STEP) % m_hashtable.size();
207 assert(position != start); // The hashtable is full. This should never happen.
208 }
209 }
210 while (true);
211
212 std::abort();
213 return npos; // Dummy return.
214}
215
217inline typename INDEXED_SET::const_iterator INDEXED_SET::find(const key_type& key, const std::size_t thread_index) const
218{
219 const std::size_t idx = index(key, thread_index);
220 if (idx < m_keys.size())
221 {
222 return begin(thread_index) + idx;
223 }
224
225 return end(thread_index);
226}
227
228
230inline const Key& INDEXED_SET::at(std::size_t index) const
231{
232 if (index >= m_next_index)
233 {
234 throw std::out_of_range("indexed_set: index too large: " + std::to_string(index) + " > " + std::to_string(m_next_index) + ".");
235 }
236
237 return m_keys[index];
238}
239
241inline const Key& INDEXED_SET::operator[](std::size_t index) const
242{
243 assert(index<m_keys.size());
244 const Key& key = m_keys[index];
245 return key;
246}
247
249inline void INDEXED_SET::clear(const std::size_t thread_index)
250{
251 lock_guard guard = m_shared_mutexes[thread_index].lock();
252 m_hashtable.assign(m_hashtable.size(), detail::EMPTY);
253
254 m_keys.clear();
255 m_next_index.store(0);
256}
257
258
260inline std::pair<typename INDEXED_SET::size_type, bool> INDEXED_SET::insert(const Key& key, const std::size_t thread_index)
261{
262 shared_guard guard = m_shared_mutexes[thread_index].lock_shared();
263 assert(m_next_index <= m_keys.size());
264 if (m_next_index + m_shared_mutexes.size() >= m_keys.size())
265 {
266 guard.unlock_shared();
267 reserve_indices(thread_index);
268 guard.lock_shared();
269 }
270 std::size_t new_position;
271 const std::size_t index = put_in_hashtable(key, detail::RESERVED, new_position);
272
273 if (index != detail::RESERVED) // Key already exists.
274 {
275 assert(index < m_next_index && m_next_index <= m_keys.size());
276 return std::make_pair(index, false);
277 }
278
279 const std::size_t new_index = m_next_index.fetch_add(1);
280 assert(new_index < m_keys.size());
281 m_keys[new_index] = key;
282
283 std::atomic_thread_fence(std::memory_order_seq_cst); // Necessary for ARM. std::memory_order_acquire and
284 // std::memory_order_release appear to work, too.
285 m_hashtable[new_position] = new_index;
286
287
288 assert(new_index < m_next_index && m_next_index <= m_keys.size());
289 return std::make_pair(new_index, true);
290}
291
292#undef INDEXED_SET_TEMPLATE
293#undef INDEXED_SET
294
295
296} // namespace utilities
297
298} // namespace mcrl2
299
300#endif // MCRL2_UTILITIES_DETAIL_INDEXED_SET_H
const basic_sort & pos()
Constructor for sort expression Pos.
Definition pos1.h:45
NUMERIC_TYPE value(const data_expression &r, typename std::enable_if< std::is_floating_point< NUMERIC_TYPE >::value >::type *=nullptr)
Yields the real value of a data expression.
data_expression max(const data_expression &e1, const data_expression &e2, const rewriter &)
bool equals(const cs_game_move &m0, const cs_game_move &m1, bool weak_transition=false)
static constexpr std::size_t RESERVED(std::numeric_limits< std::size_t >::max() -1)
static constexpr std::size_t PRIME_NUMBER
Definition indexed_set.h:36
static constexpr float max_load_factor
The load factor before the hash table is resized.
Definition indexed_set.h:34
static constexpr std::size_t minimal_hashtable_size
Definition indexed_set.h:39
static const std::size_t STEP
The position on which the next hash entry is searched.
Definition indexed_set.h:27
static constexpr std::size_t RESERVATION_FRACTION
Definition indexed_set.h:43
static constexpr std::size_t EMPTY(std::numeric_limits< std::size_t >::max())
in the hashtable we use the following constant to indicate free positions.
void mcrl2_unused(T &&...)
Function that can be used to silence unused parameter warnings.
Definition unused.h:20
A class that takes a linear process specification and checks all tau-summands of that LPS for conflue...
Definition indexed_set.h:72
STL namespace.
add your file description here.
#define INDEXED_SET_TEMPLATE
Definition indexed_set.h:52