Branch data Line data Source code
1 : : /*
2 : : * Copyright © 2008 Ryan Lortie
3 : : * Copyright © 2010 Codethink Limited
4 : : *
5 : : * SPDX-License-Identifier: LGPL-2.1-or-later
6 : : *
7 : : * This library is free software; you can redistribute it and/or
8 : : * modify it under the terms of the GNU Lesser General Public
9 : : * License as published by the Free Software Foundation; either
10 : : * version 2.1 of the License, or (at your option) any later version.
11 : : *
12 : : * This library is distributed in the hope that it will be useful,
13 : : * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 : : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 : : * Lesser General Public License for more details.
16 : : *
17 : : * You should have received a copy of the GNU Lesser General Public
18 : : * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 : : *
20 : : * Author: Ryan Lortie <desrt@desrt.ca>
21 : : */
22 : :
23 : : #include "config.h"
24 : :
25 : : #include "gbitlock.h"
26 : :
27 : : #include <glib/gmacros.h>
28 : : #include <glib/gmessages.h>
29 : : #include <glib/gatomic.h>
30 : : #include <glib/gslist.h>
31 : : #include <glib/gthread.h>
32 : : #include <glib/gslice.h>
33 : :
34 : : #include "gtestutils.h"
35 : : #include "gthreadprivate.h"
36 : :
37 : : #ifdef G_BIT_LOCK_FORCE_FUTEX_EMULATION
38 : : #undef HAVE_FUTEX
39 : : #undef HAVE_FUTEX_TIME64
40 : : #endif
41 : :
42 : : #ifndef HAVE_FUTEX
43 : : static GMutex g_futex_mutex;
44 : : static GSList *g_futex_address_list = NULL;
45 : : #endif
46 : :
47 : : #if defined(HAVE_FUTEX) || defined(HAVE_FUTEX_TIME64)
48 : : /*
49 : : * We have headers for futex(2) on the build machine. This does not
50 : : * imply that every system that ever runs the resulting glib will have
51 : : * kernel support for futex, but you'd have to have a pretty old
52 : : * kernel in order for that not to be the case.
53 : : *
54 : : * If anyone actually gets bit by this, please file a bug. :)
55 : : */
56 : :
57 : : /* < private >
58 : : * g_futex_wait:
59 : : * @address: (type gpointer): a pointer to an integer
60 : : * @value: the value that should be at @address
61 : : *
62 : : * Atomically checks that the value stored at @address is equal to
63 : : * @value and then blocks. If the value stored at @address is not
64 : : * equal to @value then this function returns immediately.
65 : : *
66 : : * To unblock, call g_futex_wake() on @address.
67 : : *
68 : : * This call may spuriously unblock (for example, in response to the
69 : : * process receiving a signal) but this is not guaranteed. Unlike the
70 : : * Linux system call of a similar name, there is no guarantee that a
71 : : * waiting process will unblock due to a g_futex_wake() call in a
72 : : * separate process.
73 : : */
74 : : static void
75 : 7966779 : g_futex_wait (const gint *address,
76 : : gint value)
77 : : {
78 : 7966779 : g_futex_simple (address, (gsize) FUTEX_WAIT_PRIVATE, (gsize) value, NULL);
79 : 7966779 : }
80 : :
81 : : /* < private >
82 : : * g_futex_wake:
83 : : * @address: (type gpointer): a pointer to an integer
84 : : *
85 : : * Nominally, wakes one thread that is blocked in g_futex_wait() on
86 : : * @address (if any thread is currently waiting).
87 : : *
88 : : * As mentioned in the documentation for g_futex_wait(), spurious
89 : : * wakeups may occur. As such, this call may result in more than one
90 : : * thread being woken up.
91 : : */
92 : : static void
93 : 16277013 : g_futex_wake (const gint *address)
94 : : {
95 : 16277013 : g_futex_simple (address, (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL);
96 : 16277013 : }
97 : :
98 : : #else
99 : :
100 : : /* emulate futex(2) */
101 : : typedef struct
102 : : {
103 : : const gint *address;
104 : : gint ref_count;
105 : : GCond wait_queue;
106 : : } WaitAddress;
107 : :
108 : : static WaitAddress *
109 : 1930733 : g_futex_find_address (const gint *address)
110 : : {
111 : : GSList *node;
112 : :
113 : 18421036 : for (node = g_futex_address_list; node; node = node->next)
114 : : {
115 : 17197672 : WaitAddress *waiter = node->data;
116 : :
117 : 17197672 : if (waiter->address == address)
118 : 707369 : return waiter;
119 : : }
120 : :
121 : 1223364 : return NULL;
122 : : }
123 : :
124 : : static void
125 : 475487 : g_futex_wait (const gint *address,
126 : : gint value)
127 : : {
128 : 475487 : g_mutex_lock (&g_futex_mutex);
129 : 475487 : if G_LIKELY (g_atomic_int_get (address) == value)
130 : : {
131 : : WaitAddress *waiter;
132 : :
133 : 349776 : if ((waiter = g_futex_find_address (address)) == NULL)
134 : : {
135 : 177059 : waiter = g_slice_new (WaitAddress);
136 : 177059 : waiter->address = address;
137 : 177059 : g_cond_init (&waiter->wait_queue);
138 : 177059 : waiter->ref_count = 0;
139 : 177059 : g_futex_address_list =
140 : 177059 : g_slist_prepend (g_futex_address_list, waiter);
141 : : }
142 : :
143 : 349776 : waiter->ref_count++;
144 : 349776 : g_cond_wait (&waiter->wait_queue, &g_futex_mutex);
145 : :
146 : 349776 : if (!--waiter->ref_count)
147 : : {
148 : 177059 : g_futex_address_list =
149 : 177059 : g_slist_remove (g_futex_address_list, waiter);
150 : 177059 : g_cond_clear (&waiter->wait_queue);
151 : 177059 : g_slice_free (WaitAddress, waiter);
152 : : }
153 : : }
154 : 475487 : g_mutex_unlock (&g_futex_mutex);
155 : 475487 : }
156 : :
157 : : static void
158 : 1580957 : g_futex_wake (const gint *address)
159 : : {
160 : : WaitAddress *waiter;
161 : :
162 : : /* need to lock here for two reasons:
163 : : * 1) need to acquire/release lock to ensure waiter is not in
164 : : * the process of registering a wait
165 : : * 2) need to -stay- locked until the end to ensure a wake()
166 : : * in another thread doesn't cause 'waiter' to stop existing
167 : : */
168 : 1580957 : g_mutex_lock (&g_futex_mutex);
169 : 1580957 : if ((waiter = g_futex_find_address (address)))
170 : 534652 : g_cond_signal (&waiter->wait_queue);
171 : 1580957 : g_mutex_unlock (&g_futex_mutex);
172 : 1580957 : }
173 : : #endif
174 : :
175 : : #define CONTENTION_CLASSES 11
176 : : static gint g_bit_lock_contended[CONTENTION_CLASSES]; /* (atomic) */
177 : :
178 : : G_ALWAYS_INLINE static inline guint
179 : : bit_lock_contended_class (gconstpointer address)
180 : : {
181 : 321166423 : return ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended);
182 : : }
183 : :
184 : : #if (defined (i386) || defined (__amd64__))
185 : : #if G_GNUC_CHECK_VERSION(4, 5)
186 : : #define USE_ASM_GOTO 1
187 : : #endif
188 : : #endif
189 : :
190 : : static const gint *g_futex_int_address (const void *address);
191 : :
192 : : G_ALWAYS_INLINE static inline void
193 : : bit_lock_futex_wait (gconstpointer address, gboolean is_pointer_pointer, gint value)
194 : : {
195 : 8442266 : const guint CLASS = bit_lock_contended_class (address);
196 : :
197 : 8442266 : g_atomic_int_add (&g_bit_lock_contended[CLASS], +1);
198 : 8442266 : if (is_pointer_pointer)
199 : 7783916 : address = g_futex_int_address (address);
200 : 8442266 : g_futex_wait (address, value);
201 : 8442266 : g_atomic_int_add (&g_bit_lock_contended[CLASS], -1);
202 : 8442266 : }
203 : :
204 : : G_ALWAYS_INLINE static inline void
205 : : bit_lock_futex_maybe_wake (gconstpointer address, gboolean is_pointer_pointer)
206 : : {
207 : 321166423 : const guint CLASS = bit_lock_contended_class (address);
208 : :
209 : : /* Warning: unlocking may allow another thread to proceed and destroy the
210 : : * memory that @address points to. We thus must not dereference it anymore.
211 : : */
212 : :
213 : 321166423 : if (g_atomic_int_get (&g_bit_lock_contended[CLASS]))
214 : : {
215 : 17857970 : if (is_pointer_pointer)
216 : 16353318 : address = g_futex_int_address (address);
217 : 17857970 : g_futex_wake (address);
218 : : }
219 : 321166423 : }
220 : :
221 : : /**
222 : : * g_bit_lock_and_get:
223 : : * @address: (type gpointer): a pointer to an integer
224 : : * @lock_bit: a bit value between 0 and 31
225 : : * @out_val: (out) (optional): return location for the new value of the integer
226 : : *
227 : : * Sets the indicated @lock_bit in @address and atomically returns the new value.
228 : : *
229 : : * This is like [func@GLib.bit_lock], except it can atomically return the new value at
230 : : * @address (right after obtaining the lock). Thus the value returned in @out_val
231 : : * always has the @lock_bit set.
232 : : *
233 : : * Since: 2.86
234 : : **/
235 : : void
236 : 14972897 : g_bit_lock_and_get (gint *address,
237 : : guint lock_bit,
238 : : gint *out_val)
239 : : {
240 : 14972897 : const guint MASK = 1u << lock_bit;
241 : : guint v;
242 : :
243 : : #ifdef G_ENABLE_DEBUG
244 : 14972897 : g_assert (lock_bit < 32u);
245 : : #endif
246 : :
247 : : #ifdef USE_ASM_GOTO
248 : 14972897 : if (G_LIKELY (!out_val))
249 : : {
250 : : while (TRUE)
251 : : {
252 : 15626040 : __asm__ volatile goto("lock bts %1, (%0)\n"
253 : : "jc %l[contended]"
254 : : : /* no output */
255 : : : "r"(address), "r"(lock_bit)
256 : : : "cc", "memory"
257 : : : contended);
258 : 14969743 : return;
259 : :
260 : 656297 : contended:
261 : : {
262 : : guint v;
263 : :
264 : 656297 : v = (guint) g_atomic_int_get (address);
265 : 656297 : if (v & MASK)
266 : 655351 : bit_lock_futex_wait (address, FALSE, (gint) v);
267 : : }
268 : : }
269 : : }
270 : : #endif
271 : :
272 : 3154 : retry:
273 : 6153 : v = g_atomic_int_or ((guint *) address, MASK);
274 : 6153 : if (v & MASK)
275 : : {
276 : 2999 : bit_lock_futex_wait (address, FALSE, (gint) v);
277 : 2999 : goto retry;
278 : : }
279 : :
280 : 3154 : if (out_val)
281 : 3154 : *out_val = (gint) (v | MASK);
282 : : }
283 : :
284 : : /**
285 : : * g_bit_lock:
286 : : * @address: (type gpointer): a pointer to an integer
287 : : * @lock_bit: a bit value between 0 and 31
288 : : *
289 : : * Sets the indicated @lock_bit in @address. If the bit is already
290 : : * set, this call will block until g_bit_unlock() unsets the
291 : : * corresponding bit.
292 : : *
293 : : * Attempting to lock on two different bits within the same integer is
294 : : * not supported and will very probably cause deadlocks.
295 : : *
296 : : * The value of the bit that is set is (1u << @bit). If @bit is not
297 : : * between 0 and 31 then the result is undefined.
298 : : *
299 : : * This function accesses @address atomically. All other accesses to
300 : : * @address must be atomic in order for this function to work
301 : : * reliably. While @address has a `volatile` qualifier, this is a historical
302 : : * artifact and the argument passed to it should not be `volatile`.
303 : : *
304 : : * Since: 2.24
305 : : **/
306 : : void
307 : 14969743 : g_bit_lock (volatile gint *address,
308 : : gint lock_bit)
309 : : {
310 : 14969743 : g_bit_lock_and_get ((gint *) address, (guint) lock_bit, NULL);
311 : 14969743 : }
312 : :
313 : : /**
314 : : * g_bit_trylock:
315 : : * @address: (type gpointer): a pointer to an integer
316 : : * @lock_bit: a bit value between 0 and 31
317 : : *
318 : : * Sets the indicated @lock_bit in @address, returning %TRUE if
319 : : * successful. If the bit is already set, returns %FALSE immediately.
320 : : *
321 : : * Attempting to lock on two different bits within the same integer is
322 : : * not supported.
323 : : *
324 : : * The value of the bit that is set is (1u << @bit). If @bit is not
325 : : * between 0 and 31 then the result is undefined.
326 : : *
327 : : * This function accesses @address atomically. All other accesses to
328 : : * @address must be atomic in order for this function to work
329 : : * reliably. While @address has a `volatile` qualifier, this is a historical
330 : : * artifact and the argument passed to it should not be `volatile`.
331 : : *
332 : : * Returns: %TRUE if the lock was acquired
333 : : *
334 : : * Since: 2.24
335 : : **/
336 : : gboolean
337 : 2000000 : g_bit_trylock (volatile gint *address,
338 : : gint lock_bit)
339 : : {
340 : : #ifdef USE_ASM_GOTO
341 : : gboolean result;
342 : :
343 : 2000000 : __asm__ volatile ("lock bts %2, (%1)\n"
344 : : "setnc %%al\n"
345 : : "movzx %%al, %0"
346 : : : "=r" (result)
347 : : : "r" (address), "r" (lock_bit)
348 : : : "cc", "memory");
349 : :
350 : 2000000 : return result;
351 : : #else
352 : : gint *address_nonvolatile = (gint *) address;
353 : : guint mask = 1u << lock_bit;
354 : : guint v;
355 : :
356 : : v = g_atomic_int_or (address_nonvolatile, mask);
357 : :
358 : : return ~v & mask;
359 : : #endif
360 : : }
361 : :
362 : : /**
363 : : * g_bit_unlock:
364 : : * @address: (type gpointer): a pointer to an integer
365 : : * @lock_bit: a bit value between 0 and 31
366 : : *
367 : : * Clears the indicated @lock_bit in @address. If another thread is
368 : : * currently blocked in g_bit_lock() on this same bit then it will be
369 : : * woken up.
370 : : *
371 : : * This function accesses @address atomically. All other accesses to
372 : : * @address must be atomic in order for this function to work
373 : : * reliably. While @address has a `volatile` qualifier, this is a historical
374 : : * artifact and the argument passed to it should not be `volatile`.
375 : : *
376 : : * Since: 2.24
377 : : **/
378 : : void
379 : 16417524 : g_bit_unlock (volatile gint *address,
380 : : gint lock_bit)
381 : : {
382 : 16417524 : gint *address_nonvolatile = (gint *) address;
383 : :
384 : : #ifdef USE_ASM_GOTO
385 : 16417524 : __asm__ volatile ("lock btr %1, (%0)"
386 : : : /* no output */
387 : : : "r" (address), "r" (lock_bit)
388 : : : "cc", "memory");
389 : : #else
390 : : guint mask = 1u << lock_bit;
391 : :
392 : : g_atomic_int_and (address_nonvolatile, ~mask);
393 : : #endif
394 : :
395 : : /* Warning: unlocking may allow another thread to proceed and destroy the
396 : : * memory that @address points to. We thus must not dereference it anymore.
397 : : */
398 : : bit_lock_futex_maybe_wake (address_nonvolatile, FALSE);
399 : 16417524 : }
400 : :
401 : : /**
402 : : * g_bit_unlock_and_set:
403 : : * @address: (type gpointer): a pointer to an integer
404 : : * @lock_bit: a bit value between 0 and 31
405 : : * @new_val: the new value to set
406 : : * @preserve_mask: mask for bits from @address to preserve
407 : : *
408 : : * This is like [func@GLib.bit_unlock] but also atomically sets @address to
409 : : * @val.
410 : : *
411 : : * If @preserve_mask is not zero, then the @preserve_mask bits will be
412 : : * preserved in @address and are not set to @val.
413 : : *
414 : : * Note that the @lock_bit bit will always be unset regardless of
415 : : * @val, @preserve_mask and the currently set value in @address.
416 : : *
417 : : * Since: 2.86
418 : : **/
419 : : void
420 : 5005 : g_bit_unlock_and_set (gint *address,
421 : : guint lock_bit,
422 : : gint val,
423 : : gint preserve_mask)
424 : :
425 : : {
426 : 5005 : const guint MASK = 1u << lock_bit;
427 : :
428 : : #ifdef G_ENABLE_DEBUG
429 : 5005 : g_assert (lock_bit < 32u);
430 : : #endif
431 : :
432 : 5005 : if (G_UNLIKELY (preserve_mask != 0))
433 : : {
434 : : guint old_val;
435 : : guint new_val;
436 : :
437 : 3780 : old_val = (guint) g_atomic_int_get (address);
438 : :
439 : 3791 : again:
440 : 3791 : new_val = ((old_val & ((guint) preserve_mask)) | (((guint) val) & ~((guint) preserve_mask))) & ~MASK;
441 : 3791 : if (!g_atomic_int_compare_and_exchange_full (address, (gint) old_val, (gint) new_val, (gint *) &old_val))
442 : 11 : goto again;
443 : : }
444 : : else
445 : : {
446 : 1225 : g_atomic_int_set (address, (gint) (((guint) val) & ~MASK));
447 : : }
448 : :
449 : : /* Warning: unlocking may allow another thread to proceed and destroy the
450 : : * memory that @address points to. We thus must not dereference it anymore.
451 : : */
452 : : bit_lock_futex_maybe_wake (address, FALSE);
453 : 5005 : }
454 : :
455 : : /* We emulate pointer-sized futex(2) because the kernel API only
456 : : * supports integers.
457 : : *
458 : : * We assume that the 'interesting' part is always the lower order bits.
459 : : * This assumption holds because pointer bitlocks are restricted to
460 : : * using the low order bits of the pointer as the lock.
461 : : *
462 : : * On 32 bits, there is nothing to do since the pointer size is equal to
463 : : * the integer size. On little endian the lower-order bits don't move,
464 : : * so do nothing. Only on 64bit big endian do we need to do a bit of
465 : : * pointer arithmetic: the low order bits are shifted by 4 bytes. We
466 : : * have a helper function that always does the right thing here.
467 : : *
468 : : * Since we always consider the low-order bits of the integer value, a
469 : : * simple cast from (gsize) to (guint) always takes care of that.
470 : : *
471 : : * After that, pointer-sized futex becomes as simple as:
472 : : *
473 : : * g_futex_wait (g_futex_int_address (address), (guint) value);
474 : : *
475 : : * and
476 : : *
477 : : * g_futex_wake (g_futex_int_address (int_address));
478 : : */
479 : : G_ALWAYS_INLINE static inline const gint *
480 : : g_futex_int_address (const void *address)
481 : : {
482 : 24137234 : const gint *int_address = address;
483 : :
484 : : /* this implementation makes these (reasonable) assumptions: */
485 : : G_STATIC_ASSERT (G_BYTE_ORDER == G_LITTLE_ENDIAN ||
486 : : (G_BYTE_ORDER == G_BIG_ENDIAN &&
487 : : sizeof (int) == 4 &&
488 : : (sizeof (gpointer) == 4 || sizeof (gpointer) == 8)));
489 : :
490 : : #if G_BYTE_ORDER == G_BIG_ENDIAN && GLIB_SIZEOF_VOID_P == 8
491 : : int_address++;
492 : : #endif
493 : :
494 : 24137234 : return int_address;
495 : : }
496 : :
497 : : G_ALWAYS_INLINE static inline gpointer
498 : : pointer_bit_lock_mask_ptr (gpointer ptr, guint lock_bit, gboolean set, guintptr preserve_mask, gpointer preserve_ptr)
499 : : {
500 : : guintptr x_ptr;
501 : : guintptr x_preserve_ptr;
502 : : guintptr lock_mask;
503 : :
504 : 41442606 : x_ptr = (guintptr) ptr;
505 : :
506 : 20791094 : if (preserve_mask != 0)
507 : : {
508 : 20651520 : x_preserve_ptr = (guintptr) preserve_ptr;
509 : 20651520 : x_ptr = (x_preserve_ptr & preserve_mask) | (x_ptr & ~preserve_mask);
510 : : }
511 : :
512 : 41442606 : if (lock_bit == G_MAXUINT)
513 : 0 : return (gpointer) x_ptr;
514 : :
515 : 41442606 : lock_mask = (guintptr) (1u << lock_bit);
516 : 41442606 : if (set)
517 : 2 : return (gpointer) (x_ptr | lock_mask);
518 : : else
519 : 41442604 : return (gpointer) (x_ptr & ~lock_mask);
520 : : }
521 : :
522 : : /**
523 : : * g_pointer_bit_lock_and_get:
524 : : * @address: (not nullable): a pointer to a #gpointer-sized value
525 : : * @lock_bit: a bit value between 0 and 31
526 : : * @out_ptr: (out) (optional): returns the set pointer atomically.
527 : : * This is the value after setting the lock, it thus always has the
528 : : * lock bit set, while previously @address had the lockbit unset.
529 : : * You may also use g_pointer_bit_lock_mask_ptr() to clear the lock bit.
530 : : *
531 : : * This is equivalent to g_bit_lock, but working on pointers (or other
532 : : * pointer-sized values).
533 : : *
534 : : * For portability reasons, you may only lock on the bottom 32 bits of
535 : : * the pointer.
536 : : *
537 : : * Since: 2.80
538 : : **/
539 : : void
540 : 303320960 : (g_pointer_bit_lock_and_get) (gpointer address,
541 : : guint lock_bit,
542 : : guintptr *out_ptr)
543 : : {
544 : : guintptr mask;
545 : : guintptr v;
546 : :
547 : 303320960 : g_return_if_fail (lock_bit < 32);
548 : :
549 : 303320960 : mask = 1u << lock_bit;
550 : :
551 : : #ifdef USE_ASM_GOTO
552 : 303320960 : if (G_LIKELY (!out_ptr))
553 : : {
554 : : while (TRUE)
555 : : {
556 : 234236439 : __asm__ volatile goto ("lock bts %1, (%0)\n"
557 : : "jc %l[contended]"
558 : : : /* no output */
559 : 234236439 : : "r"(address), "r"((gsize) lock_bit)
560 : : : "cc", "memory"
561 : : : contended);
562 : 232671994 : return;
563 : :
564 : 1564445 : contended:
565 : 1564445 : v = (guintptr) g_atomic_pointer_get ((gpointer *) address);
566 : 1564445 : if (v & mask)
567 : 1398512 : bit_lock_futex_wait (address, TRUE, (gint) v);
568 : : }
569 : : }
570 : : #endif
571 : :
572 : 70648966 : retry:
573 : 77034370 : v = g_atomic_pointer_or ((gpointer *) address, mask);
574 : 77034370 : if (v & mask)
575 : : {
576 : 6385404 : bit_lock_futex_wait (address, TRUE, (gint) v);
577 : 6385404 : goto retry;
578 : : }
579 : :
580 : 70648966 : if (out_ptr)
581 : 70648966 : *out_ptr = (v | mask);
582 : : }
583 : :
584 : : /**
585 : : * g_pointer_bit_lock:
586 : : * @address: (not nullable): a pointer to a #gpointer-sized value
587 : : * @lock_bit: a bit value between 0 and 31
588 : : *
589 : : * This is equivalent to g_bit_lock, but working on pointers (or other
590 : : * pointer-sized values).
591 : : *
592 : : * For portability reasons, you may only lock on the bottom 32 bits of
593 : : * the pointer.
594 : : *
595 : : * While @address has a `volatile` qualifier, this is a historical
596 : : * artifact and the argument passed to it should not be `volatile`.
597 : : *
598 : : * Since: 2.30
599 : : **/
600 : : void
601 : 232671993 : (g_pointer_bit_lock) (volatile void *address,
602 : : gint lock_bit)
603 : : {
604 : 232671993 : g_pointer_bit_lock_and_get ((gpointer *) address, (guint) lock_bit, NULL);
605 : 232671993 : }
606 : :
607 : : /**
608 : : * g_pointer_bit_trylock:
609 : : * @address: (not nullable): a pointer to a #gpointer-sized value
610 : : * @lock_bit: a bit value between 0 and 31
611 : : *
612 : : * This is equivalent to g_bit_trylock(), but working on pointers (or
613 : : * other pointer-sized values).
614 : : *
615 : : * For portability reasons, you may only lock on the bottom 32 bits of
616 : : * the pointer.
617 : : *
618 : : * While @address has a `volatile` qualifier, this is a historical
619 : : * artifact and the argument passed to it should not be `volatile`.
620 : : *
621 : : * Returns: %TRUE if the lock was acquired
622 : : *
623 : : * Since: 2.30
624 : : **/
625 : : gboolean
626 : 2000000 : (g_pointer_bit_trylock) (volatile void *address,
627 : : gint lock_bit)
628 : : {
629 : 2000000 : g_return_val_if_fail (lock_bit < 32, FALSE);
630 : :
631 : : {
632 : : #ifdef USE_ASM_GOTO
633 : : gboolean result;
634 : :
635 : 2000000 : __asm__ volatile ("lock bts %2, (%1)\n"
636 : : "setnc %%al\n"
637 : : "movzx %%al, %0"
638 : : : "=r" (result)
639 : 2000000 : : "r" (address), "r" ((gsize) lock_bit)
640 : : : "cc", "memory");
641 : :
642 : 2000000 : return result;
643 : : #else
644 : : void *address_nonvolatile = (void *) address;
645 : : gpointer *pointer_address = address_nonvolatile;
646 : : gsize mask = 1u << lock_bit;
647 : : guintptr v;
648 : :
649 : : g_return_val_if_fail (lock_bit < 32, FALSE);
650 : :
651 : : v = g_atomic_pointer_or (pointer_address, mask);
652 : :
653 : : return (~(gsize) v & mask) != 0;
654 : : #endif
655 : : }
656 : : }
657 : :
658 : : /**
659 : : * g_pointer_bit_unlock:
660 : : * @address: (not nullable): a pointer to a #gpointer-sized value
661 : : * @lock_bit: a bit value between 0 and 31
662 : : *
663 : : * This is equivalent to g_bit_unlock, but working on pointers (or other
664 : : * pointer-sized values).
665 : : *
666 : : * For portability reasons, you may only lock on the bottom 32 bits of
667 : : * the pointer.
668 : : *
669 : : * While @address has a `volatile` qualifier, this is a historical
670 : : * artifact and the argument passed to it should not be `volatile`.
671 : : *
672 : : * Since: 2.30
673 : : **/
674 : : void
675 : 284078293 : (g_pointer_bit_unlock) (volatile void *address,
676 : : gint lock_bit)
677 : : {
678 : 284078293 : void *address_nonvolatile = (void *) address;
679 : :
680 : 284078293 : g_return_if_fail (lock_bit < 32);
681 : :
682 : : {
683 : : #ifdef USE_ASM_GOTO
684 : 284078293 : __asm__ volatile ("lock btr %1, (%0)"
685 : : : /* no output */
686 : 284078293 : : "r" (address), "r" ((gsize) lock_bit)
687 : : : "cc", "memory");
688 : : #else
689 : : gpointer *pointer_address = address_nonvolatile;
690 : : gsize mask = 1u << lock_bit;
691 : :
692 : : g_atomic_pointer_and (pointer_address, ~mask);
693 : : #endif
694 : :
695 : : /* Warning: unlocking may allow another thread to proceed and destroy the
696 : : * memory that @address points to. We thus must not dereference it anymore.
697 : : */
698 : : bit_lock_futex_maybe_wake (address_nonvolatile, TRUE);
699 : : }
700 : : }
701 : :
702 : : /**
703 : : * g_pointer_bit_lock_mask_ptr:
704 : : * @ptr: (nullable): the pointer to mask
705 : : * @lock_bit: the bit to set/clear. If set to `G_MAXUINT`, the
706 : : * lockbit is taken from @preserve_ptr or @ptr (depending on @preserve_mask).
707 : : * @set: whether to set (lock) the bit or unset (unlock). This
708 : : * has no effect, if @lock_bit is set to `G_MAXUINT`.
709 : : * @preserve_mask: if non-zero, a bit-mask for @preserve_ptr. The
710 : : * @preserve_mask bits from @preserve_ptr are set in the result.
711 : : * Note that the @lock_bit bit will be always set according to @set,
712 : : * regardless of @preserve_mask and @preserve_ptr (unless @lock_bit is
713 : : * `G_MAXUINT`).
714 : : * @preserve_ptr: (nullable): if @preserve_mask is non-zero, the bits
715 : : * from this pointer are set in the result.
716 : : *
717 : : * This mangles @ptr as g_pointer_bit_lock() and g_pointer_bit_unlock()
718 : : * do.
719 : : *
720 : : * Returns: the mangled pointer.
721 : : *
722 : : * Since: 2.80
723 : : **/
724 : : gpointer
725 : 111404 : g_pointer_bit_lock_mask_ptr (gpointer ptr, guint lock_bit, gboolean set, guintptr preserve_mask, gpointer preserve_ptr)
726 : : {
727 : 111404 : g_return_val_if_fail (lock_bit < 32u || lock_bit == G_MAXUINT, ptr);
728 : :
729 : 111404 : return pointer_bit_lock_mask_ptr (ptr, lock_bit, set, preserve_mask, preserve_ptr);
730 : : }
731 : :
732 : : /**
733 : : * g_pointer_bit_unlock_and_set:
734 : : * @address: (not nullable): a pointer to a #gpointer-sized value
735 : : * @lock_bit: a bit value between 0 and 31
736 : : * @ptr: the new pointer value to set
737 : : * @preserve_mask: if non-zero, those bits of the current pointer in @address
738 : : * are preserved.
739 : : * Note that the @lock_bit bit will be always unset regardless of
740 : : * @ptr, @preserve_mask and the currently set value in @address.
741 : : *
742 : : * This is equivalent to g_pointer_bit_unlock() and atomically setting
743 : : * the pointer value.
744 : : *
745 : : * Note that the lock bit will be cleared from the pointer. If the unlocked
746 : : * pointer that was set is not identical to @ptr, an assertion fails. In other
747 : : * words, @ptr must have @lock_bit unset. This also means, you usually can
748 : : * only use this on the lowest bits.
749 : : *
750 : : * Since: 2.80
751 : : **/
752 : 20665601 : void (g_pointer_bit_unlock_and_set) (void *address,
753 : : guint lock_bit,
754 : : gpointer ptr,
755 : : guintptr preserve_mask)
756 : : {
757 : 20665601 : gpointer *pointer_address = address;
758 : : gpointer ptr2;
759 : :
760 : 20665601 : g_return_if_fail (lock_bit < 32u);
761 : :
762 : 20665601 : if (preserve_mask != 0)
763 : : {
764 : 20651512 : gpointer old_ptr = g_atomic_pointer_get ((gpointer *) address);
765 : :
766 : 20651512 : again:
767 : 20651512 : ptr2 = pointer_bit_lock_mask_ptr (ptr, lock_bit, FALSE, preserve_mask, old_ptr);
768 : 20651512 : if (!g_atomic_pointer_compare_and_exchange_full (pointer_address, old_ptr, ptr2, &old_ptr))
769 : 0 : goto again;
770 : : }
771 : : else
772 : : {
773 : 14089 : ptr2 = pointer_bit_lock_mask_ptr (ptr, lock_bit, FALSE, 0, NULL);
774 : 14089 : g_atomic_pointer_set (pointer_address, ptr2);
775 : : }
776 : :
777 : : bit_lock_futex_maybe_wake (address, TRUE);
778 : :
779 : : /* It makes no sense, if unlocking mangles the pointer. Assert against
780 : : * that.
781 : : *
782 : : * Note that based on @preserve_mask, the pointer also gets mangled, which
783 : : * can make sense for the caller. We don't assert for that. */
784 : 20665601 : g_return_if_fail (ptr == pointer_bit_lock_mask_ptr (ptr, lock_bit, FALSE, 0, NULL));
785 : : }
|