AIStatefulTask ‐ Asynchronous, Stateful Task Scheduler library.

Threads-like task objects evolving through user-defined states.

AIStatefulTaskMutex.h
Go to the documentation of this file.
1
28#ifndef AISTATEFULTASKMUTEX_H
29#define AISTATEFULTASKMUTEX_H
30
31#include "threadsafe/aithreadsafe.h"
32#include "utils/NodeMemoryResource.h"
33#include "utils/threading/MpscQueue.h"
34#include "utils/FuzzyBool.h"
35#include "utils/cpu_relax.h"
36#include "debug.h"
37
38class AIStatefulTask;
39
40// Node in a singly linked list of tasks that are waiting for this mutex.
41struct AIStatefulTaskMutexNode : public utils::threading::MpscNode
42{
43 using condition_type = uint32_t; // Must be the same as AIStatefulTask::condition_type
44
45 AIStatefulTask* m_task;
46 condition_type const m_condition;
47
48 AIStatefulTaskMutexNode(AIStatefulTask* task, condition_type condition) : m_task(task), m_condition(condition) { }
49};
50
86{
87 using condition_type = uint32_t; // Must be the same as AIStatefulTask::condition_type
89
90 private:
91 class Queue : public utils::threading::MpscQueue
92 {
93 using MpscNode = utils::threading::MpscNode;
94 std::mutex m_abort_mutex;
95
96 public:
97 utils::FuzzyBool push(MpscNode* node)
98 {
99 node->m_next.store(nullptr, std::memory_order_relaxed);
100 MpscNode* prev = m_head.exchange(node, std::memory_order_relaxed);
101 // The only way node is guaranteed the next node that will be popped is when
102 // we just pushed to an empty list. In that case the situation right now is:
103 //
104 // m_tail --> m_stub ==> nullptr, node
105 // ^
106 // |
107 // prev
108 //
109 // This is a stable situation: calls to push by other threads just append
110 // on the right (change m_head), and calls to pop shouldn't happen (we
111 // are the owner of the lock; but even if it would happened, then pop will
112 // return nullptr and do nothing).
113 //
114 // If prev is not pointing to m_stub, then the situation is instead:
115 //
116 // m_tail --> node1 ==> nullptr, node
117 // ^
118 // |
119 // prev
120 //
121 // Possibly with more nodes (including m_stub) on the left, possibly incompletely
122 // linked, that can all be removed by calls to pop until we get this situation.
123 // That is also a stable situation as a call to pop in this case will return nullptr,
124 // because m_head is guaranteed unequal to m_tail.
125 //
126 // So in that case we are certain that at least one more pop() will finish
127 // before our node becomes the front node and we can return WasFalse.
128 //
129 // Assuming that prev does point to m_stub but we are not (yet) the next node
130 // that would be popped, for example, the situation is:
131 //
132 // m_tail --> node1 ==> m_stub ==> nullptr, node
133 // ^
134 // |
135 // prev
136 //
137 // then pop will return node1 and set m_tail to point to m_stub to get
138 // the first situation. If we see that m_tail points to a node that is not
139 // m_stub, then the pop for that node did not finish yet and we can return
140 // WasFalse. If we see that m_tail points to m_stub then the call to pop
141 // for the previous node might have only JUST finished. However, in that
142 // case we can return True (resulting in no call to wait) because a
143 // subsequent signal will simply be ignored [or cause this task to continue
144 // running if somehow it tried to get the lock again (later) and fail to
145 // get it (causing a call to wait then); however that is not possible: we
146 // ARE at the front of the queue and that can not change no matter what
147 // other threads do].
148 //
149 bool is_front = prev == &m_stub && m_tail.load(std::memory_order_acquire) == &m_stub;
150
151 // Here m_head points to the new node, which either points to null
152 // or already points to the NEXT node that was pushed AND completed, etc.
153 // Now fix the next pointer of the node that m_head was pointing at.
154 prev->m_next.store(node, std::memory_order_release);
155
156 return is_front ? fuzzy::True : fuzzy::WasFalse;
157 }
158
159 // This function may only be called by the consumer!
160 //
161 // Returns the node that will be returned by the next call to pop.
162 //
163 // If nullptr is returned then the push, of the next non-null node that will be returned by
164 // pop, didn't complete yet (or wasn't even called yet).
165 //
166 // If &m_stub is returned then the push, of the next non-null node that will be returned by
167 // pop wasn't called yet and the list is empty.
168 MpscNode const* peek() const
169 {
170 // If m_tail is not pointing to m_stub then it points to the node that will be
171 // returned by a call to pop.
172 //
173 // If m_head is not pointing to m_stub (and m_tail is) then m_stub->m_next is the
174 // next node that will be returned by a call to pop. If this m_next value is still
175 // nullptr then an on going push (that changed the value of m_head away from
176 // m_stub) didn't complete yet. In that case we wait until this m_next pointer is
177 // updated.
178 //
179 // If m_head points to m_stub (and m_tail too) then the queue is empty;
180 // m_stub.m_next is guaranteed to be null too in that case. We just return
181 // nullptr then.
182 MpscNode const* tail = m_tail.load(std::memory_order_relaxed);
183 if (tail != &m_stub)
184 return tail;
185 MpscNode const* head = m_head.load(std::memory_order_relaxed);
186 if (head == &m_stub)
187 return nullptr;
188 // Wait for the current push to complete.
189 MpscNode const* next;
190 while (!(next = m_stub.m_next.load(std::memory_order_acquire)))
191 cpu_relax();
192 return next;
193 }
194
195 // This function is only called by the task that owns node; in other words,
196 // node is the queue and will not be popped for the duration of this call.
197 utils::FuzzyBool is_front(MpscNode const* node) const
198 {
199 MpscNode const* tail = m_tail.load(std::memory_order_relaxed);
200 return (tail == node || tail == &m_stub && m_stub.m_next.load(std::memory_order_acquire) == node) ? fuzzy::True : fuzzy::WasFalse;
201 }
202 };
203
204 public:
206 static constexpr size_t node_size() { return sizeof(Node); }
208 static void init(utils::MemoryPagePool* mpp_ptr) { s_node_memory_resource.init(mpp_ptr, node_size()); }
209 static utils::NodeMemoryResource s_node_memory_resource;
210
211 private:
212 Queue m_queue;
213
214 public:
217 // Immediately after construction, nobody owns the lock:
218 //
219 // m_head --> m_stub
220
226 inline Node const* lock(AIStatefulTask* task, condition_type condition);
227
229 void unlock();
230
231 Node const* lock_blocking(AIStatefulTask* task)
232 {
233 Node const* node = lock(task, 0);
234 if (node)
235 return node;
236 //FIXME
237 ASSERT(false);
238 return nullptr;
239 }
240
241#if CW_DEBUG
242 // This is obviously a racy condition, unless called by the only consumer thread;
243 // which would be the thread running the task that currently has the lock, so if
244 // we know that that is the case then we don't need to call this function :p.
245 //
246 // Most notably, theoretically a task could be returned that is deleted by the time
247 // you use it. So, don't use the result. It is intended only for debug output
248 // (printing the returned pointer).
249 AIStatefulTask* debug_get_owner() const
250 {
251 AIStatefulTaskMutexNode const* next = static_cast<AIStatefulTaskMutexNode const*>(m_queue.peek());
252 // next might get deallocated right here, but even if that is the case then this still
253 // isn't UB since it is allocated from a utils::SimpleSegregatedStorage which never
254 // *actually* frees memory. At most next->m_task is a non-sensical value, although the
255 // chance for that is extremely small.
256 return next ? next->m_task : nullptr;
257 }
258#endif
259
260 private:
261 friend class AIStatefulTask;
262 // Is this object currently owned (locked) by us?
263 // May only be called from some multiplex_impl passing its own AIStatefulTask pointer,
264 // and therefore only by AIStatefulTask::is_self_locked(AIStatefulTaskMutex&).
265 //
266 // case MyTask_lock:
267 // set_state(MyTask_locked);
268 // if (!(m_handle = the_mutex.lock(this, MyTask_locked)))
269 // {
270 // wait(1);
271 // break;
272 // }
273 // [[fallthrough]];
274 // case MyTask_locked:
275 // {
276 // statefultask::Lock lock(the_mutex);
277 // ...
278 // if (is_self_locked(the_mutex, m_handle))
279 // ...
280 utils::FuzzyBool is_self_locked(Node const* handle) const
281 {
282 return m_queue.is_front(handle);
283 }
284};
285
286namespace statefultask {
287
288// Convenience class to automatically unlock the mutex upon leaving the current scope.
290{
291 private:
292 AIStatefulTaskMutex* m_mutex;
293
294 public:
295 AdoptLock(AIStatefulTaskMutex& mutex) : m_mutex(&mutex) { }
296 ~AdoptLock() { unlock(); }
297
298 void unlock()
299 {
300 if (m_mutex)
301 m_mutex->unlock();
302 m_mutex = nullptr;
303 }
304
305 void skip_unlock()
306 {
307 m_mutex = nullptr;
308 }
309};
310
311} // namespace statefultask
312
313#include "AIStatefulTask.h"
314#endif // AISTATEFULTASKMUTEX_H
315
316#ifndef AISTATEFULTASKMUTEX_H_definitions
317#define AISTATEFULTASKMUTEX_H_definitions
318
319AIStatefulTaskMutex::Node const* AIStatefulTaskMutex::lock(AIStatefulTask* task, condition_type condition)
320{
321 DoutEntering(dc::notice, "AIStatefulTaskMutex::lock(" << task << ", " << task->print_conditions(condition) << ") [mutex:" << this << "]");
322
323 Node* new_node = new (s_node_memory_resource.allocate(sizeof(Node))) Node(task, condition);
324 Dout(dc::notice, "Create new node at " << new_node << " [" << task << "]");
325
326 utils::FuzzyBool have_lock = m_queue.push(new_node);
327
328 // If the next return from pop() will return new_node, then have_lock MUST be True.
329 // If the next return from pop() will return another node then have_lock MUST be WasFalse.
330 if (have_lock.is_true())
331 {
332 Dout(dc::notice, "Mutex acquired [" << task << "]");
333 return new_node;
334 }
335 Dout(dc::notice, "Mutex already locked [" << task << "]");
336
337 // Obtaining the lock failed. Halt the task
338 return nullptr; // The caller must call task->wait(condition).
339}
340
341#endif // AISTATEFULTASKMUTEX_H_definitions
Declaration of base class AIStatefulTask.
Definition: AIStatefulTaskMutex.h:86
AIStatefulTaskMutex()
Construct an unlocked AIStatefulTaskMutex.
Definition: AIStatefulTaskMutex.h:216
Node const * lock(AIStatefulTask *task, condition_type condition)
Try to obtain ownership for owner (recursive locking allowed).
static utils::NodeMemoryResource s_node_memory_resource
Memory resource to allocate Node's from.
Definition: AIStatefulTaskMutex.h:209
static void init(utils::MemoryPagePool *mpp_ptr)
This must be called once before using a AIStatefulTaskMutex.
Definition: AIStatefulTaskMutex.h:208
static constexpr size_t node_size()
Returns the size of the nodes that will be allocated from s_node_memory_resource.
Definition: AIStatefulTaskMutex.h:206
void unlock()
Undo one (succcessful) call to lock.
Definition: AIStatefulTaskMutex.cxx:32
Definition: AIStatefulTask.h:96
Definition: AIStatefulTaskMutex.h:290
utils::FuzzyBool is_self_locked(AIStatefulTaskMutex const &stateful_task_mutex, AIStatefulTaskMutexNode const *handle)
Tasks defined by the library project are put into this namespace.
Definition: AIStatefulTask.h:857
Definition: AIStatefulTaskMutex.h:42