mCRL2
Loading...
Searching...
No Matches
fixed_size_cache.h
Go to the documentation of this file.
1// Author(s): Maurice Laveaux
2// Copyright: see the accompanying file COPYING or copy at
3// https://github.com/mCRL2org/mCRL2/blob/master/COPYING
4//
5// Distributed under the Boost Software License, Version 1.0.
6// (See accompanying file LICENSE_1_0.txt or copy at
7// http://www.boost.org/LICENSE_1_0.txt)
8//
9
10#ifndef MCRL2_UTILITIES_FIXED_SIZE_CACHE_H
11#define MCRL2_UTILITIES_FIXED_SIZE_CACHE_H
12
15
16namespace mcrl2
17{
18namespace utilities
19{
20
25template<typename Policy>
27{
28public:
29 using key_type = typename Policy::key_type;
30 using iterator = typename Policy::map_type::iterator;
31 using const_iterator = typename Policy::map_type::const_iterator;
32
33 explicit fixed_size_cache(std::size_t max_size = 1024)
34 : m_map(max_size)
35 {
36 if (max_size == 0)
37 {
38 m_maximum_size = std::numeric_limits<std::size_t>::max();
39 }
40 else
41 {
42 // The reason for this is that the internal mapping might only support powers of two and as such
43 // we might as well use the additional capacity.
44 m_maximum_size = m_map.capacity();
45 }
46 }
47
48 const_iterator begin() const { return m_map.begin(); }
49 const_iterator end() const { return m_map.end(); }
50
51 void clear() { m_map.clear(); m_policy.clear(); }
52
53 std::size_t count(const key_type& key) const { return m_map.count(key); }
54
56 {
57 return m_map.find(key);
58 }
59
62 template<typename ...Args>
63 std::pair<iterator, bool> emplace(Args&&... args)
64 {
65 // The reason to split the find and emplace is that when we insert an element the replacement_candidate should not be
66 // the key that we just inserted. The other way around, when an element that we are looking for was first removed and
67 // then searched for also leads to unnecessary inserts.
68 auto result = find(args...);
69 if (result == m_map.end())
70 {
71 // If the cache would be full after an inserted.
72 if (m_map.size() + 1 >= m_maximum_size)
73 {
74 // Remove an existing element defined by the policy.
75 m_map.erase(m_policy.replacement_candidate(m_map));
76 }
77
78 // Insert an element and inform the policy that an element was inserted.
79 auto emplace_result = m_map.emplace(std::forward<Args>(args)...);
80 m_policy.inserted((*emplace_result.first).first);
81 return emplace_result;
82 }
83
84 assert(m_map.size() <= m_maximum_size);
85 return std::make_pair(result, false);
86 }
87
88protected:
89 typename Policy::map_type m_map;
90 Policy m_policy;
91
92 std::size_t m_maximum_size;
93};
94
99template<typename Policy, typename F, typename ...Args>
100class function_cache : public fixed_size_cache<Policy>
101{
102private:
104 using super::m_map;
106 using super::m_policy;
107 using super::find;
108
109public:
110 explicit function_cache(F cached_function = F(), std::size_t max_size = 1024)
111 : super(max_size),
112 m_cached_function(cached_function)
113 {}
114
117 auto operator()(Args... args) -> typename std::invoke_result_t<F, Args...>
118 {
119 // The reason to split the find and emplace is that when we insert an element the replacement_candidate should not be
120 // the key that we just inserted. The other way around, when an element that we are looking for was first removed and
121 // then searched for also leads to unnecessary inserts.
122 auto result = find(args...);
123 if (result == m_map.end())
124 {
125 // If the cache would be full after an inserted.
126 if (m_map.size() + 1 >= m_maximum_size)
127 {
128 // Remove an existing element defined by the policy.
129 m_map.erase(m_policy.replacement_candidate(m_map));
130 }
131
132 // Insert an element and inform the policy that an element was inserted.
133 auto emplace_result = m_map.emplace(std::make_pair(args..., m_cached_function(args...)));
134
135 m_policy.inserted((*emplace_result.first).first);
136 return emplace_result.first->second;
137 }
138
139 assert(m_map.size() <= m_maximum_size);
140 return result->second;
141 }
142
143private:
145};
146
147
148template<typename Key, typename T>
150
151template<typename F, typename Args>
154 F,
155 Args>;
156
157} // namespace utilities
158} // namespace mcrl2
159
160#endif // MCRL2_UTILITIES_FIXED_SIZE_CACHE_H
A cache keeps track of key-value pairs similar to a map. The difference is that a cache has (an optio...
typename Policy::key_type key_type
iterator find(const key_type &key)
std::size_t count(const key_type &key) const
Policy::map_type m_map
The underlying mapping from keys to their cached results.
std::pair< iterator, bool > emplace(Args &&... args)
Stores the given key-value pair in the cache. Depending on the cache policy and capacity an existing ...
Policy m_policy
The replacement policy for keys in the cache.
typename Policy::map_type::const_iterator const_iterator
typename Policy::map_type::iterator iterator
fixed_size_cache(std::size_t max_size=1024)
std::size_t m_maximum_size
The maximum number of elements to cache.
A cache keeps track of key-value pairs similar to a map. The difference is that a cache has (an optio...
iterator find(const key_type &key)
auto operator()(Args... args) -> typename std::invoke_result_t< F, Args... >
Stores the given key-value pair in the cache. Depending on the cache policy and capacity an existing ...
F m_cached_function
The function of which the results are cached.
Policy::map_type m_map
The underlying mapping from keys to their cached results.
function_cache(F cached_function=F(), std::size_t max_size=1024)
Policy m_policy
The replacement policy for keys in the cache.
std::size_t m_maximum_size
The maximum number of elements to cache.
A class that takes a linear process specification and checks all tau-summands of that LPS for conflue...
Definition indexed_set.h:72