13#ifndef MCRL2_UTILITIES_DETAIL_INDEXED_SET_H
14#define MCRL2_UTILITIES_DETAIL_INDEXED_SET_H
27static const std::size_t
STEP = 1;
30static constexpr std::size_t
EMPTY(std::numeric_limits<std::size_t>::max());
32static constexpr std::size_t
RESERVED(std::numeric_limits<std::size_t>::max()-1);
52#define INDEXED_SET_TEMPLATE template <class Key, bool ThreadSafe, typename Hash, typename Equals, typename Allocator, typename KeyTable>
53#define INDEXED_SET indexed_set<Key, ThreadSafe, Hash, Equals, Allocator, KeyTable>
56inline void INDEXED_SET::reserve_indices(
const std::size_t thread_index)
58 lock_guard guard = m_shared_mutexes[thread_index].lock();
60 if (m_next_index + m_shared_mutexes.size() >= m_keys.size())
62 assert(m_next_index <= m_keys.size());
73inline typename INDEXED_SET::size_type INDEXED_SET::put_in_hashtable(
76 std::size_t& new_position)
79 assert(m_hashtable.size() > 0);
82 std::size_t start = new_position;
87 std::size_t index = m_hashtable[new_position];
94 if (
reinterpret_cast<std::atomic<std::size_t>*
>(&m_hashtable[new_position])->compare_exchange_strong(pos,value))
107 if (m_equals(m_keys[index], key))
110 assert(index<m_next_index && m_next_index<=m_keys.size());
113 assert(m_hashtable.size()>0);
114 new_position = (new_position +
detail::STEP) % m_hashtable.size();
115 assert(new_position != start);
125inline void INDEXED_SET::resize_hashtable()
129 for (
const Key& k: m_keys)
131 if (index<m_next_index)
133 std::size_t new_position;
134 put_in_hashtable(k, index, new_position);
145inline INDEXED_SET::indexed_set()
150inline INDEXED_SET::indexed_set(std::size_t number_of_threads)
153 assert(number_of_threads != 0);
157inline INDEXED_SET::indexed_set(
158 std::size_t number_of_threads,
159 std::size_t initial_size,
160 const hasher& hasher,
161 const key_equal& equals)
163 m_mutex(new
std::mutex()),
167 assert(number_of_threads != 0);
170 m_shared_mutexes.emplace_back();
172 for (std::size_t i = 1; i < ((number_of_threads == 1) ? 1 : number_of_threads + 1); ++i)
175 m_shared_mutexes.emplace_back(m_shared_mutexes[0]);
180inline typename INDEXED_SET::size_type INDEXED_SET::index(
const key_type& key,
const std::size_t thread_index)
const
182 shared_guard guard = m_shared_mutexes[thread_index].lock_shared();
183 assert(m_hashtable.size() > 0);
185 std::size_t start = ((m_hasher(key) * detail::PRIME_NUMBER) >> 2) % m_hashtable.size();
186 std::size_t position = start;
189 std::size_t index = m_hashtable[position];
190 if (index == detail::EMPTY)
196 if (index != detail::RESERVED)
198 assert(index < m_keys.size());
199 if (m_equals(key, m_keys[index]))
201 assert(index<m_next_index && m_next_index <= m_keys.size());
205 assert(m_hashtable.size() > 0);
206 position = (position + detail::STEP) % m_hashtable.size();
207 assert(position != start);
217inline typename INDEXED_SET::const_iterator INDEXED_SET::find(
const key_type& key,
const std::size_t thread_index)
const
219 const std::size_t idx = index(key, thread_index);
220 if (idx < m_keys.size())
222 return begin(thread_index) + idx;
225 return end(thread_index);
230inline const Key& INDEXED_SET::at(std::size_t index)
const
232 if (index >= m_next_index)
234 throw std::out_of_range(
"indexed_set: index too large: " + std::to_string(index) +
" > " + std::to_string(m_next_index) +
".");
237 return m_keys[index];
241inline const Key& INDEXED_SET::operator[](std::size_t index)
const
243 assert(index<m_keys.size());
244 const Key& key = m_keys[index];
249inline void INDEXED_SET::clear(
const std::size_t thread_index)
251 lock_guard guard = m_shared_mutexes[thread_index].lock();
252 m_hashtable.assign(m_hashtable.size(), detail::EMPTY);
255 m_next_index.store(0);
260inline std::pair<typename INDEXED_SET::size_type, bool> INDEXED_SET::insert(
const Key& key,
const std::size_t thread_index)
262 shared_guard guard = m_shared_mutexes[thread_index].lock_shared();
263 assert(m_next_index <= m_keys.size());
264 if (m_next_index + m_shared_mutexes.size() >= m_keys.size())
266 guard.unlock_shared();
267 reserve_indices(thread_index);
270 std::size_t new_position;
271 const std::size_t index = put_in_hashtable(key, detail::RESERVED, new_position);
273 if (index != detail::RESERVED)
275 assert(index < m_next_index && m_next_index <= m_keys.size());
276 return std::make_pair(index,
false);
279 const std::size_t new_index = m_next_index.fetch_add(1);
280 assert(new_index < m_keys.size());
281 m_keys[new_index] = key;
283 std::atomic_thread_fence(std::memory_order_seq_cst);
285 m_hashtable[new_position] = new_index;
288 assert(new_index < m_next_index && m_next_index <= m_keys.size());
289 return std::make_pair(new_index,
true);
292#undef INDEXED_SET_TEMPLATE
const basic_sort & pos()
Constructor for sort expression Pos.
NUMERIC_TYPE value(const data_expression &r, typename std::enable_if< std::is_floating_point< NUMERIC_TYPE >::value >::type *=nullptr)
Yields the real value of a data expression.
data_expression max(const data_expression &e1, const data_expression &e2, const rewriter &)
bool equals(const cs_game_move &m0, const cs_game_move &m1, bool weak_transition=false)
static constexpr std::size_t RESERVED(std::numeric_limits< std::size_t >::max() -1)
static constexpr std::size_t PRIME_NUMBER
static constexpr float max_load_factor
The load factor before the hash table is resized.
static constexpr std::size_t minimal_hashtable_size
static const std::size_t STEP
The position on which the next hash entry is searched.
static constexpr std::size_t RESERVATION_FRACTION
static constexpr std::size_t EMPTY(std::numeric_limits< std::size_t >::max())
in the hashtable we use the following constant to indicate free positions.
void mcrl2_unused(T &&...)
Function that can be used to silence unused parameter warnings.
A class that takes a linear process specification and checks all tau-summands of that LPS for conflue...
add your file description here.
#define INDEXED_SET_TEMPLATE