Branch data Line data Source code
1 : : /* GLIB - Library of useful routines for C programming
2 : : * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
3 : : *
4 : : * gthread.c: posix thread system implementation
5 : : * Copyright 1998 Sebastian Wilhelmi; University of Karlsruhe
6 : : *
7 : : * SPDX-License-Identifier: LGPL-2.1-or-later
8 : : *
9 : : * This library is free software; you can redistribute it and/or
10 : : * modify it under the terms of the GNU Lesser General Public
11 : : * License as published by the Free Software Foundation; either
12 : : * version 2.1 of the License, or (at your option) any later version.
13 : : *
14 : : * This library is distributed in the hope that it will be useful,
15 : : * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 : : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 : : * Lesser General Public License for more details.
18 : : *
19 : : * You should have received a copy of the GNU Lesser General Public
20 : : * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 : : */
22 : :
23 : : /*
24 : : * Modified by the GLib Team and others 1997-2000. See the AUTHORS
25 : : * file for a list of people on the GLib Team. See the ChangeLog
26 : : * files for a list of changes. These files are distributed with
27 : : * GLib at ftp://ftp.gtk.org/pub/gtk/.
28 : : */
29 : :
30 : : /* The GMutex, GCond and GPrivate implementations in this file are some
31 : : * of the lowest-level code in GLib. All other parts of GLib (messages,
32 : : * memory, slices, etc) assume that they can freely use these facilities
33 : : * without risking recursion.
34 : : *
35 : : * As such, these functions are NOT permitted to call any other part of
36 : : * GLib.
37 : : *
38 : : * The thread manipulation functions (create, exit, join, etc.) have
39 : : * more freedom -- they can do as they please.
40 : : */
41 : :
42 : : #include "config.h"
43 : :
44 : : #include "gthread.h"
45 : :
46 : : #include "gmain.h"
47 : : #include "gmessages.h"
48 : : #include "gslice.h"
49 : : #include "gstrfuncs.h"
50 : : #include "gtestutils.h"
51 : : #include "gthreadprivate.h"
52 : : #include "gutils.h"
53 : :
54 : : #include <stdlib.h>
55 : : #include <stdio.h>
56 : : #include <string.h>
57 : : #include <errno.h>
58 : : #include <pthread.h>
59 : :
60 : : #include <sys/time.h>
61 : : #include <unistd.h>
62 : :
63 : : #ifdef HAVE_PTHREAD_SET_NAME_NP
64 : : #include <pthread_np.h>
65 : : #endif
66 : : #ifdef HAVE_SCHED_H
67 : : #include <sched.h>
68 : : #endif
69 : : #ifdef G_OS_WIN32
70 : : #include <windows.h>
71 : : #endif
72 : :
73 : : #if defined(HAVE_SYS_SCHED_GETATTR)
74 : : #include <sys/syscall.h>
75 : : #endif
76 : :
77 : : #if (defined(HAVE_FUTEX) || defined(HAVE_FUTEX_TIME64)) && \
78 : : (defined(HAVE_STDATOMIC_H) || defined(__ATOMIC_SEQ_CST))
79 : : #define USE_NATIVE_MUTEX
80 : : #endif
81 : :
82 : : static void
83 : 0 : g_thread_abort (gint status,
84 : : const gchar *function)
85 : : {
86 : 0 : fprintf (stderr, "GLib (gthread-posix.c): Unexpected error from C library during '%s': %s. Aborting.\n",
87 : : function, strerror (status));
88 : 0 : g_abort ();
89 : : }
90 : :
91 : : /* {{{1 GMutex */
92 : :
93 : : #if !defined(USE_NATIVE_MUTEX)
94 : :
95 : : static pthread_mutex_t *
96 : : g_mutex_impl_new (void)
97 : : {
98 : : pthread_mutexattr_t *pattr = NULL;
99 : : pthread_mutex_t *mutex;
100 : : gint status;
101 : : #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
102 : : pthread_mutexattr_t attr;
103 : : #endif
104 : :
105 : : mutex = malloc (sizeof (pthread_mutex_t));
106 : : if G_UNLIKELY (mutex == NULL)
107 : : g_thread_abort (errno, "malloc");
108 : :
109 : : #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
110 : : pthread_mutexattr_init (&attr);
111 : : pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
112 : : pattr = &attr;
113 : : #endif
114 : :
115 : : if G_UNLIKELY ((status = pthread_mutex_init (mutex, pattr)) != 0)
116 : : g_thread_abort (status, "pthread_mutex_init");
117 : :
118 : : #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
119 : : pthread_mutexattr_destroy (&attr);
120 : : #endif
121 : :
122 : : return mutex;
123 : : }
124 : :
125 : : static void
126 : : g_mutex_impl_free (pthread_mutex_t *mutex)
127 : : {
128 : : pthread_mutex_destroy (mutex);
129 : : free (mutex);
130 : : }
131 : :
132 : : static inline pthread_mutex_t *
133 : : g_mutex_get_impl (GMutex *mutex)
134 : : {
135 : : pthread_mutex_t *impl = g_atomic_pointer_get (&mutex->p);
136 : :
137 : : if G_UNLIKELY (impl == NULL)
138 : : {
139 : : impl = g_mutex_impl_new ();
140 : : if (!g_atomic_pointer_compare_and_exchange (&mutex->p, NULL, impl))
141 : : g_mutex_impl_free (impl);
142 : : impl = mutex->p;
143 : : }
144 : :
145 : : return impl;
146 : : }
147 : :
148 : :
149 : : /**
150 : : * g_mutex_init:
151 : : * @mutex: an uninitialized #GMutex
152 : : *
153 : : * Initializes a #GMutex so that it can be used.
154 : : *
155 : : * This function is useful to initialize a mutex that has been
156 : : * allocated on the stack, or as part of a larger structure.
157 : : * It is not necessary to initialize a mutex that has been
158 : : * statically allocated.
159 : : *
160 : : * |[<!-- language="C" -->
161 : : * typedef struct {
162 : : * GMutex m;
163 : : * ...
164 : : * } Blob;
165 : : *
166 : : * Blob *b;
167 : : *
168 : : * b = g_new (Blob, 1);
169 : : * g_mutex_init (&b->m);
170 : : * ]|
171 : : *
172 : : * To undo the effect of g_mutex_init() when a mutex is no longer
173 : : * needed, use g_mutex_clear().
174 : : *
175 : : * Calling g_mutex_init() on an already initialized #GMutex leads
176 : : * to undefined behaviour.
177 : : *
178 : : * Since: 2.32
179 : : */
180 : : void
181 : : g_mutex_init (GMutex *mutex)
182 : : {
183 : : mutex->p = g_mutex_impl_new ();
184 : : }
185 : :
186 : : /**
187 : : * g_mutex_clear:
188 : : * @mutex: an initialized #GMutex
189 : : *
190 : : * Frees the resources allocated to a mutex with g_mutex_init().
191 : : *
192 : : * This function should not be used with a #GMutex that has been
193 : : * statically allocated.
194 : : *
195 : : * Calling g_mutex_clear() on a locked mutex leads to undefined
196 : : * behaviour.
197 : : *
198 : : * Since: 2.32
199 : : */
200 : : void
201 : : g_mutex_clear (GMutex *mutex)
202 : : {
203 : : g_mutex_impl_free (mutex->p);
204 : : }
205 : :
206 : : /**
207 : : * g_mutex_lock:
208 : : * @mutex: a #GMutex
209 : : *
210 : : * Locks @mutex. If @mutex is already locked by another thread, the
211 : : * current thread will block until @mutex is unlocked by the other
212 : : * thread.
213 : : *
214 : : * #GMutex is neither guaranteed to be recursive nor to be
215 : : * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
216 : : * already been locked by the same thread results in undefined behaviour
217 : : * (including but not limited to deadlocks).
218 : : */
219 : : void
220 : : g_mutex_lock (GMutex *mutex)
221 : : {
222 : : gint status;
223 : :
224 : : if G_UNLIKELY ((status = pthread_mutex_lock (g_mutex_get_impl (mutex))) != 0)
225 : : g_thread_abort (status, "pthread_mutex_lock");
226 : : }
227 : :
228 : : /**
229 : : * g_mutex_unlock:
230 : : * @mutex: a #GMutex
231 : : *
232 : : * Unlocks @mutex. If another thread is blocked in a g_mutex_lock()
233 : : * call for @mutex, it will become unblocked and can lock @mutex itself.
234 : : *
235 : : * Calling g_mutex_unlock() on a mutex that is not locked by the
236 : : * current thread leads to undefined behaviour.
237 : : */
238 : : void
239 : : g_mutex_unlock (GMutex *mutex)
240 : : {
241 : : gint status;
242 : :
243 : : if G_UNLIKELY ((status = pthread_mutex_unlock (g_mutex_get_impl (mutex))) != 0)
244 : : g_thread_abort (status, "pthread_mutex_unlock");
245 : : }
246 : :
247 : : /**
248 : : * g_mutex_trylock:
249 : : * @mutex: a #GMutex
250 : : *
251 : : * Tries to lock @mutex. If @mutex is already locked by another thread,
252 : : * it immediately returns %FALSE. Otherwise it locks @mutex and returns
253 : : * %TRUE.
254 : : *
255 : : * #GMutex is neither guaranteed to be recursive nor to be
256 : : * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
257 : : * already been locked by the same thread results in undefined behaviour
258 : : * (including but not limited to deadlocks or arbitrary return values).
259 : : *
260 : : * Returns: %TRUE if @mutex could be locked
261 : : */
262 : : gboolean
263 : : g_mutex_trylock (GMutex *mutex)
264 : : {
265 : : gint status;
266 : :
267 : : if G_LIKELY ((status = pthread_mutex_trylock (g_mutex_get_impl (mutex))) == 0)
268 : : return TRUE;
269 : :
270 : : if G_UNLIKELY (status != EBUSY)
271 : : g_thread_abort (status, "pthread_mutex_trylock");
272 : :
273 : : return FALSE;
274 : : }
275 : :
276 : : #endif /* !defined(USE_NATIVE_MUTEX) */
277 : :
278 : : /* {{{1 GRecMutex */
279 : :
280 : : static pthread_mutex_t *
281 : 820 : g_rec_mutex_impl_new (void)
282 : : {
283 : : pthread_mutexattr_t attr;
284 : : pthread_mutex_t *mutex;
285 : :
286 : 820 : mutex = malloc (sizeof (pthread_mutex_t));
287 [ - + ]: 820 : if G_UNLIKELY (mutex == NULL)
288 : 0 : g_thread_abort (errno, "malloc");
289 : :
290 : 820 : pthread_mutexattr_init (&attr);
291 : 820 : pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE);
292 : 820 : pthread_mutex_init (mutex, &attr);
293 : 820 : pthread_mutexattr_destroy (&attr);
294 : :
295 : 820 : return mutex;
296 : : }
297 : :
298 : : static void
299 : 21 : g_rec_mutex_impl_free (pthread_mutex_t *mutex)
300 : : {
301 : 21 : pthread_mutex_destroy (mutex);
302 : 21 : free (mutex);
303 : 21 : }
304 : :
305 : : static inline pthread_mutex_t *
306 : 1632674 : g_rec_mutex_get_impl (GRecMutex *rec_mutex)
307 : : {
308 : 1632674 : pthread_mutex_t *impl = g_atomic_pointer_get (&rec_mutex->p);
309 : :
310 [ + + ]: 1632674 : if G_UNLIKELY (impl == NULL)
311 : : {
312 : 799 : impl = g_rec_mutex_impl_new ();
313 [ - + ]: 799 : if (!g_atomic_pointer_compare_and_exchange (&rec_mutex->p, NULL, impl))
314 : 0 : g_rec_mutex_impl_free (impl);
315 : 799 : impl = rec_mutex->p;
316 : : }
317 : :
318 : 1632674 : return impl;
319 : : }
320 : :
321 : : /**
322 : : * g_rec_mutex_init:
323 : : * @rec_mutex: an uninitialized #GRecMutex
324 : : *
325 : : * Initializes a #GRecMutex so that it can be used.
326 : : *
327 : : * This function is useful to initialize a recursive mutex
328 : : * that has been allocated on the stack, or as part of a larger
329 : : * structure.
330 : : *
331 : : * It is not necessary to initialise a recursive mutex that has been
332 : : * statically allocated.
333 : : *
334 : : * |[<!-- language="C" -->
335 : : * typedef struct {
336 : : * GRecMutex m;
337 : : * ...
338 : : * } Blob;
339 : : *
340 : : * Blob *b;
341 : : *
342 : : * b = g_new (Blob, 1);
343 : : * g_rec_mutex_init (&b->m);
344 : : * ]|
345 : : *
346 : : * Calling g_rec_mutex_init() on an already initialized #GRecMutex
347 : : * leads to undefined behaviour.
348 : : *
349 : : * To undo the effect of g_rec_mutex_init() when a recursive mutex
350 : : * is no longer needed, use g_rec_mutex_clear().
351 : : *
352 : : * Since: 2.32
353 : : */
354 : : void
355 : 21 : g_rec_mutex_init (GRecMutex *rec_mutex)
356 : : {
357 : 21 : rec_mutex->p = g_rec_mutex_impl_new ();
358 : 21 : }
359 : :
360 : : /**
361 : : * g_rec_mutex_clear:
362 : : * @rec_mutex: an initialized #GRecMutex
363 : : *
364 : : * Frees the resources allocated to a recursive mutex with
365 : : * g_rec_mutex_init().
366 : : *
367 : : * This function should not be used with a #GRecMutex that has been
368 : : * statically allocated.
369 : : *
370 : : * Calling g_rec_mutex_clear() on a locked recursive mutex leads
371 : : * to undefined behaviour.
372 : : *
373 : : * Since: 2.32
374 : : */
375 : : void
376 : 21 : g_rec_mutex_clear (GRecMutex *rec_mutex)
377 : : {
378 : 21 : g_rec_mutex_impl_free (rec_mutex->p);
379 : 21 : }
380 : :
381 : : /**
382 : : * g_rec_mutex_lock:
383 : : * @rec_mutex: a #GRecMutex
384 : : *
385 : : * Locks @rec_mutex. If @rec_mutex is already locked by another
386 : : * thread, the current thread will block until @rec_mutex is
387 : : * unlocked by the other thread. If @rec_mutex is already locked
388 : : * by the current thread, the 'lock count' of @rec_mutex is increased.
389 : : * The mutex will only become available again when it is unlocked
390 : : * as many times as it has been locked.
391 : : *
392 : : * Since: 2.32
393 : : */
394 : : void
395 : 1622663 : g_rec_mutex_lock (GRecMutex *mutex)
396 : : {
397 : 1622663 : pthread_mutex_lock (g_rec_mutex_get_impl (mutex));
398 : 1622663 : }
399 : :
400 : : /**
401 : : * g_rec_mutex_unlock:
402 : : * @rec_mutex: a #GRecMutex
403 : : *
404 : : * Unlocks @rec_mutex. If another thread is blocked in a
405 : : * g_rec_mutex_lock() call for @rec_mutex, it will become unblocked
406 : : * and can lock @rec_mutex itself.
407 : : *
408 : : * Calling g_rec_mutex_unlock() on a recursive mutex that is not
409 : : * locked by the current thread leads to undefined behaviour.
410 : : *
411 : : * Since: 2.32
412 : : */
413 : : void
414 : 1627690 : g_rec_mutex_unlock (GRecMutex *rec_mutex)
415 : : {
416 : 1627690 : pthread_mutex_unlock (rec_mutex->p);
417 : 1627690 : }
418 : :
419 : : /**
420 : : * g_rec_mutex_trylock:
421 : : * @rec_mutex: a #GRecMutex
422 : : *
423 : : * Tries to lock @rec_mutex. If @rec_mutex is already locked
424 : : * by another thread, it immediately returns %FALSE. Otherwise
425 : : * it locks @rec_mutex and returns %TRUE.
426 : : *
427 : : * Returns: %TRUE if @rec_mutex could be locked
428 : : *
429 : : * Since: 2.32
430 : : */
431 : : gboolean
432 : 10011 : g_rec_mutex_trylock (GRecMutex *rec_mutex)
433 : : {
434 [ + + ]: 10011 : if (pthread_mutex_trylock (g_rec_mutex_get_impl (rec_mutex)) != 0)
435 : 4984 : return FALSE;
436 : :
437 : 5027 : return TRUE;
438 : : }
439 : :
440 : : /* {{{1 GRWLock */
441 : :
442 : : static pthread_rwlock_t *
443 : 1044 : g_rw_lock_impl_new (void)
444 : : {
445 : : pthread_rwlock_t *rwlock;
446 : : gint status;
447 : :
448 : 1044 : rwlock = malloc (sizeof (pthread_rwlock_t));
449 [ - + ]: 1044 : if G_UNLIKELY (rwlock == NULL)
450 : 0 : g_thread_abort (errno, "malloc");
451 : :
452 [ - + ]: 1044 : if G_UNLIKELY ((status = pthread_rwlock_init (rwlock, NULL)) != 0)
453 : 0 : g_thread_abort (status, "pthread_rwlock_init");
454 : :
455 : 1044 : return rwlock;
456 : : }
457 : :
458 : : static void
459 : 52 : g_rw_lock_impl_free (pthread_rwlock_t *rwlock)
460 : : {
461 : 52 : pthread_rwlock_destroy (rwlock);
462 : 52 : free (rwlock);
463 : 52 : }
464 : :
465 : : static inline pthread_rwlock_t *
466 : 34827697 : g_rw_lock_get_impl (GRWLock *lock)
467 : : {
468 : 34827697 : pthread_rwlock_t *impl = g_atomic_pointer_get (&lock->p);
469 : :
470 [ + + ]: 34827697 : if G_UNLIKELY (impl == NULL)
471 : : {
472 : 992 : impl = g_rw_lock_impl_new ();
473 [ - + ]: 992 : if (!g_atomic_pointer_compare_and_exchange (&lock->p, NULL, impl))
474 : 0 : g_rw_lock_impl_free (impl);
475 : 992 : impl = lock->p;
476 : : }
477 : :
478 : 34827697 : return impl;
479 : : }
480 : :
481 : : /**
482 : : * g_rw_lock_init:
483 : : * @rw_lock: an uninitialized #GRWLock
484 : : *
485 : : * Initializes a #GRWLock so that it can be used.
486 : : *
487 : : * This function is useful to initialize a lock that has been
488 : : * allocated on the stack, or as part of a larger structure. It is not
489 : : * necessary to initialise a reader-writer lock that has been statically
490 : : * allocated.
491 : : *
492 : : * |[<!-- language="C" -->
493 : : * typedef struct {
494 : : * GRWLock l;
495 : : * ...
496 : : * } Blob;
497 : : *
498 : : * Blob *b;
499 : : *
500 : : * b = g_new (Blob, 1);
501 : : * g_rw_lock_init (&b->l);
502 : : * ]|
503 : : *
504 : : * To undo the effect of g_rw_lock_init() when a lock is no longer
505 : : * needed, use g_rw_lock_clear().
506 : : *
507 : : * Calling g_rw_lock_init() on an already initialized #GRWLock leads
508 : : * to undefined behaviour.
509 : : *
510 : : * Since: 2.32
511 : : */
512 : : void
513 : 52 : g_rw_lock_init (GRWLock *rw_lock)
514 : : {
515 : 52 : rw_lock->p = g_rw_lock_impl_new ();
516 : 52 : }
517 : :
518 : : /**
519 : : * g_rw_lock_clear:
520 : : * @rw_lock: an initialized #GRWLock
521 : : *
522 : : * Frees the resources allocated to a lock with g_rw_lock_init().
523 : : *
524 : : * This function should not be used with a #GRWLock that has been
525 : : * statically allocated.
526 : : *
527 : : * Calling g_rw_lock_clear() when any thread holds the lock
528 : : * leads to undefined behaviour.
529 : : *
530 : : * Since: 2.32
531 : : */
532 : : void
533 : 52 : g_rw_lock_clear (GRWLock *rw_lock)
534 : : {
535 : 52 : g_rw_lock_impl_free (rw_lock->p);
536 : 52 : }
537 : :
538 : : /**
539 : : * g_rw_lock_writer_lock:
540 : : * @rw_lock: a #GRWLock
541 : : *
542 : : * Obtain a write lock on @rw_lock. If another thread currently holds
543 : : * a read or write lock on @rw_lock, the current thread will block
544 : : * until all other threads have dropped their locks on @rw_lock.
545 : : *
546 : : * Calling g_rw_lock_writer_lock() while the current thread already
547 : : * owns a read or write lock on @rw_lock leads to undefined behaviour.
548 : : *
549 : : * Since: 2.32
550 : : */
551 : : void
552 : 920450 : g_rw_lock_writer_lock (GRWLock *rw_lock)
553 : : {
554 : 920450 : int retval = pthread_rwlock_wrlock (g_rw_lock_get_impl (rw_lock));
555 : :
556 [ - + ]: 920450 : if (retval != 0)
557 : 0 : g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval));
558 : 920450 : }
559 : :
560 : : /**
561 : : * g_rw_lock_writer_trylock:
562 : : * @rw_lock: a #GRWLock
563 : : *
564 : : * Tries to obtain a write lock on @rw_lock. If another thread
565 : : * currently holds a read or write lock on @rw_lock, it immediately
566 : : * returns %FALSE.
567 : : * Otherwise it locks @rw_lock and returns %TRUE.
568 : : *
569 : : * Returns: %TRUE if @rw_lock could be locked
570 : : *
571 : : * Since: 2.32
572 : : */
573 : : gboolean
574 : 1000006 : g_rw_lock_writer_trylock (GRWLock *rw_lock)
575 : : {
576 [ + + ]: 1000006 : if (pthread_rwlock_trywrlock (g_rw_lock_get_impl (rw_lock)) != 0)
577 : 522993 : return FALSE;
578 : :
579 : 477013 : return TRUE;
580 : : }
581 : :
582 : : /**
583 : : * g_rw_lock_writer_unlock:
584 : : * @rw_lock: a #GRWLock
585 : : *
586 : : * Release a write lock on @rw_lock.
587 : : *
588 : : * Calling g_rw_lock_writer_unlock() on a lock that is not held
589 : : * by the current thread leads to undefined behaviour.
590 : : *
591 : : * Since: 2.32
592 : : */
593 : : void
594 : 1397463 : g_rw_lock_writer_unlock (GRWLock *rw_lock)
595 : : {
596 : 1397463 : pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
597 : 1397463 : }
598 : :
599 : : /**
600 : : * g_rw_lock_reader_lock:
601 : : * @rw_lock: a #GRWLock
602 : : *
603 : : * Obtain a read lock on @rw_lock. If another thread currently holds
604 : : * the write lock on @rw_lock, the current thread will block until the
605 : : * write lock was (held and) released. If another thread does not hold
606 : : * the write lock, but is waiting for it, it is implementation defined
607 : : * whether the reader or writer will block. Read locks can be taken
608 : : * recursively.
609 : : *
610 : : * Calling g_rw_lock_reader_lock() while the current thread already
611 : : * owns a write lock leads to undefined behaviour. Read locks however
612 : : * can be taken recursively, in which case you need to make sure to
613 : : * call g_rw_lock_reader_unlock() the same amount of times.
614 : : *
615 : : * It is implementation-defined how many read locks are allowed to be
616 : : * held on the same lock simultaneously. If the limit is hit,
617 : : * or if a deadlock is detected, a critical warning will be emitted.
618 : : *
619 : : * Since: 2.32
620 : : */
621 : : void
622 : 15754884 : g_rw_lock_reader_lock (GRWLock *rw_lock)
623 : : {
624 : 15754884 : int retval = pthread_rwlock_rdlock (g_rw_lock_get_impl (rw_lock));
625 : :
626 [ - + ]: 15754884 : if (retval != 0)
627 : 0 : g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval));
628 : 15754884 : }
629 : :
630 : : /**
631 : : * g_rw_lock_reader_trylock:
632 : : * @rw_lock: a #GRWLock
633 : : *
634 : : * Tries to obtain a read lock on @rw_lock and returns %TRUE if
635 : : * the read lock was successfully obtained. Otherwise it
636 : : * returns %FALSE.
637 : : *
638 : : * Returns: %TRUE if @rw_lock could be locked
639 : : *
640 : : * Since: 2.32
641 : : */
642 : : gboolean
643 : 6 : g_rw_lock_reader_trylock (GRWLock *rw_lock)
644 : : {
645 [ + + ]: 6 : if (pthread_rwlock_tryrdlock (g_rw_lock_get_impl (rw_lock)) != 0)
646 : 2 : return FALSE;
647 : :
648 : 4 : return TRUE;
649 : : }
650 : :
651 : : /**
652 : : * g_rw_lock_reader_unlock:
653 : : * @rw_lock: a #GRWLock
654 : : *
655 : : * Release a read lock on @rw_lock.
656 : : *
657 : : * Calling g_rw_lock_reader_unlock() on a lock that is not held
658 : : * by the current thread leads to undefined behaviour.
659 : : *
660 : : * Since: 2.32
661 : : */
662 : : void
663 : 15754888 : g_rw_lock_reader_unlock (GRWLock *rw_lock)
664 : : {
665 : 15754888 : pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
666 : 15754888 : }
667 : :
668 : : /* {{{1 GCond */
669 : :
670 : : #if !defined(USE_NATIVE_MUTEX)
671 : :
672 : : static pthread_cond_t *
673 : : g_cond_impl_new (void)
674 : : {
675 : : pthread_condattr_t attr;
676 : : pthread_cond_t *cond;
677 : : gint status;
678 : :
679 : : pthread_condattr_init (&attr);
680 : :
681 : : #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
682 : : #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
683 : : if G_UNLIKELY ((status = pthread_condattr_setclock (&attr, CLOCK_MONOTONIC)) != 0)
684 : : g_thread_abort (status, "pthread_condattr_setclock");
685 : : #else
686 : : #error Cannot support GCond on your platform.
687 : : #endif
688 : :
689 : : cond = malloc (sizeof (pthread_cond_t));
690 : : if G_UNLIKELY (cond == NULL)
691 : : g_thread_abort (errno, "malloc");
692 : :
693 : : if G_UNLIKELY ((status = pthread_cond_init (cond, &attr)) != 0)
694 : : g_thread_abort (status, "pthread_cond_init");
695 : :
696 : : pthread_condattr_destroy (&attr);
697 : :
698 : : return cond;
699 : : }
700 : :
701 : : static void
702 : : g_cond_impl_free (pthread_cond_t *cond)
703 : : {
704 : : pthread_cond_destroy (cond);
705 : : free (cond);
706 : : }
707 : :
708 : : static inline pthread_cond_t *
709 : : g_cond_get_impl (GCond *cond)
710 : : {
711 : : pthread_cond_t *impl = g_atomic_pointer_get (&cond->p);
712 : :
713 : : if G_UNLIKELY (impl == NULL)
714 : : {
715 : : impl = g_cond_impl_new ();
716 : : if (!g_atomic_pointer_compare_and_exchange (&cond->p, NULL, impl))
717 : : g_cond_impl_free (impl);
718 : : impl = cond->p;
719 : : }
720 : :
721 : : return impl;
722 : : }
723 : :
724 : : /**
725 : : * g_cond_init:
726 : : * @cond: an uninitialized #GCond
727 : : *
728 : : * Initialises a #GCond so that it can be used.
729 : : *
730 : : * This function is useful to initialise a #GCond that has been
731 : : * allocated as part of a larger structure. It is not necessary to
732 : : * initialise a #GCond that has been statically allocated.
733 : : *
734 : : * To undo the effect of g_cond_init() when a #GCond is no longer
735 : : * needed, use g_cond_clear().
736 : : *
737 : : * Calling g_cond_init() on an already-initialised #GCond leads
738 : : * to undefined behaviour.
739 : : *
740 : : * Since: 2.32
741 : : */
742 : : void
743 : : g_cond_init (GCond *cond)
744 : : {
745 : : cond->p = g_cond_impl_new ();
746 : : }
747 : :
748 : : /**
749 : : * g_cond_clear:
750 : : * @cond: an initialised #GCond
751 : : *
752 : : * Frees the resources allocated to a #GCond with g_cond_init().
753 : : *
754 : : * This function should not be used with a #GCond that has been
755 : : * statically allocated.
756 : : *
757 : : * Calling g_cond_clear() for a #GCond on which threads are
758 : : * blocking leads to undefined behaviour.
759 : : *
760 : : * Since: 2.32
761 : : */
762 : : void
763 : : g_cond_clear (GCond *cond)
764 : : {
765 : : g_cond_impl_free (cond->p);
766 : : }
767 : :
768 : : /**
769 : : * g_cond_wait:
770 : : * @cond: a #GCond
771 : : * @mutex: a #GMutex that is currently locked
772 : : *
773 : : * Atomically releases @mutex and waits until @cond is signalled.
774 : : * When this function returns, @mutex is locked again and owned by the
775 : : * calling thread.
776 : : *
777 : : * When using condition variables, it is possible that a spurious wakeup
778 : : * may occur (ie: g_cond_wait() returns even though g_cond_signal() was
779 : : * not called). It's also possible that a stolen wakeup may occur.
780 : : * This is when g_cond_signal() is called, but another thread acquires
781 : : * @mutex before this thread and modifies the state of the program in
782 : : * such a way that when g_cond_wait() is able to return, the expected
783 : : * condition is no longer met.
784 : : *
785 : : * For this reason, g_cond_wait() must always be used in a loop. See
786 : : * the documentation for #GCond for a complete example.
787 : : **/
788 : : void
789 : : g_cond_wait (GCond *cond,
790 : : GMutex *mutex)
791 : : {
792 : : gint status;
793 : :
794 : : if G_UNLIKELY ((status = pthread_cond_wait (g_cond_get_impl (cond), g_mutex_get_impl (mutex))) != 0)
795 : : g_thread_abort (status, "pthread_cond_wait");
796 : : }
797 : :
798 : : /**
799 : : * g_cond_signal:
800 : : * @cond: a #GCond
801 : : *
802 : : * If threads are waiting for @cond, at least one of them is unblocked.
803 : : * If no threads are waiting for @cond, this function has no effect.
804 : : * It is good practice to hold the same lock as the waiting thread
805 : : * while calling this function, though not required.
806 : : */
807 : : void
808 : : g_cond_signal (GCond *cond)
809 : : {
810 : : gint status;
811 : :
812 : : if G_UNLIKELY ((status = pthread_cond_signal (g_cond_get_impl (cond))) != 0)
813 : : g_thread_abort (status, "pthread_cond_signal");
814 : : }
815 : :
816 : : /**
817 : : * g_cond_broadcast:
818 : : * @cond: a #GCond
819 : : *
820 : : * If threads are waiting for @cond, all of them are unblocked.
821 : : * If no threads are waiting for @cond, this function has no effect.
822 : : * It is good practice to lock the same mutex as the waiting threads
823 : : * while calling this function, though not required.
824 : : */
825 : : void
826 : : g_cond_broadcast (GCond *cond)
827 : : {
828 : : gint status;
829 : :
830 : : if G_UNLIKELY ((status = pthread_cond_broadcast (g_cond_get_impl (cond))) != 0)
831 : : g_thread_abort (status, "pthread_cond_broadcast");
832 : : }
833 : :
834 : : /**
835 : : * g_cond_wait_until:
836 : : * @cond: a #GCond
837 : : * @mutex: a #GMutex that is currently locked
838 : : * @end_time: the monotonic time to wait until
839 : : *
840 : : * Waits until either @cond is signalled or @end_time has passed.
841 : : *
842 : : * As with g_cond_wait() it is possible that a spurious or stolen wakeup
843 : : * could occur. For that reason, waiting on a condition variable should
844 : : * always be in a loop, based on an explicitly-checked predicate.
845 : : *
846 : : * %TRUE is returned if the condition variable was signalled (or in the
847 : : * case of a spurious wakeup). %FALSE is returned if @end_time has
848 : : * passed.
849 : : *
850 : : * The following code shows how to correctly perform a timed wait on a
851 : : * condition variable (extending the example presented in the
852 : : * documentation for #GCond):
853 : : *
854 : : * |[<!-- language="C" -->
855 : : * gpointer
856 : : * pop_data_timed (void)
857 : : * {
858 : : * gint64 end_time;
859 : : * gpointer data;
860 : : *
861 : : * g_mutex_lock (&data_mutex);
862 : : *
863 : : * end_time = g_get_monotonic_time () + 5 * G_TIME_SPAN_SECOND;
864 : : * while (!current_data)
865 : : * if (!g_cond_wait_until (&data_cond, &data_mutex, end_time))
866 : : * {
867 : : * // timeout has passed.
868 : : * g_mutex_unlock (&data_mutex);
869 : : * return NULL;
870 : : * }
871 : : *
872 : : * // there is data for us
873 : : * data = current_data;
874 : : * current_data = NULL;
875 : : *
876 : : * g_mutex_unlock (&data_mutex);
877 : : *
878 : : * return data;
879 : : * }
880 : : * ]|
881 : : *
882 : : * Notice that the end time is calculated once, before entering the
883 : : * loop and reused. This is the motivation behind the use of absolute
884 : : * time on this API -- if a relative time of 5 seconds were passed
885 : : * directly to the call and a spurious wakeup occurred, the program would
886 : : * have to start over waiting again (which would lead to a total wait
887 : : * time of more than 5 seconds).
888 : : *
889 : : * Returns: %TRUE on a signal, %FALSE on a timeout
890 : : * Since: 2.32
891 : : **/
892 : : gboolean
893 : : g_cond_wait_until (GCond *cond,
894 : : GMutex *mutex,
895 : : gint64 end_time)
896 : : {
897 : : struct timespec ts;
898 : : gint status;
899 : :
900 : : #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
901 : : /* end_time is given relative to the monotonic clock as returned by
902 : : * g_get_monotonic_time().
903 : : *
904 : : * Since this pthreads wants the relative time, convert it back again.
905 : : */
906 : : {
907 : : gint64 now = g_get_monotonic_time ();
908 : : gint64 relative;
909 : :
910 : : if (end_time <= now)
911 : : return FALSE;
912 : :
913 : : relative = end_time - now;
914 : :
915 : : ts.tv_sec = relative / 1000000;
916 : : ts.tv_nsec = (relative % 1000000) * 1000;
917 : :
918 : : if ((status = pthread_cond_timedwait_relative_np (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0)
919 : : return TRUE;
920 : : }
921 : : #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
922 : : /* This is the exact check we used during init to set the clock to
923 : : * monotonic, so if we're in this branch, timedwait() will already be
924 : : * expecting a monotonic clock.
925 : : */
926 : : {
927 : : ts.tv_sec = end_time / 1000000;
928 : : ts.tv_nsec = (end_time % 1000000) * 1000;
929 : :
930 : : if ((status = pthread_cond_timedwait (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0)
931 : : return TRUE;
932 : : }
933 : : #else
934 : : #error Cannot support GCond on your platform.
935 : : #endif
936 : :
937 : : if G_UNLIKELY (status != ETIMEDOUT)
938 : : g_thread_abort (status, "pthread_cond_timedwait");
939 : :
940 : : return FALSE;
941 : : }
942 : :
943 : : #endif /* defined(USE_NATIVE_MUTEX) */
944 : :
945 : : /* {{{1 GPrivate */
946 : :
947 : : /**
948 : : * GPrivate:
949 : : *
950 : : * The #GPrivate struct is an opaque data structure to represent a
951 : : * thread-local data key. It is approximately equivalent to the
952 : : * pthread_setspecific()/pthread_getspecific() APIs on POSIX and to
953 : : * TlsSetValue()/TlsGetValue() on Windows.
954 : : *
955 : : * If you don't already know why you might want this functionality,
956 : : * then you probably don't need it.
957 : : *
958 : : * #GPrivate is a very limited resource (as far as 128 per program,
959 : : * shared between all libraries). It is also not possible to destroy a
960 : : * #GPrivate after it has been used. As such, it is only ever acceptable
961 : : * to use #GPrivate in static scope, and even then sparingly so.
962 : : *
963 : : * See G_PRIVATE_INIT() for a couple of examples.
964 : : *
965 : : * The #GPrivate structure should be considered opaque. It should only
966 : : * be accessed via the g_private_ functions.
967 : : */
968 : :
969 : : /**
970 : : * G_PRIVATE_INIT:
971 : : * @notify: a #GDestroyNotify
972 : : *
973 : : * A macro to assist with the static initialisation of a #GPrivate.
974 : : *
975 : : * This macro is useful for the case that a #GDestroyNotify function
976 : : * should be associated with the key. This is needed when the key will be
977 : : * used to point at memory that should be deallocated when the thread
978 : : * exits.
979 : : *
980 : : * Additionally, the #GDestroyNotify will also be called on the previous
981 : : * value stored in the key when g_private_replace() is used.
982 : : *
983 : : * If no #GDestroyNotify is needed, then use of this macro is not
984 : : * required -- if the #GPrivate is declared in static scope then it will
985 : : * be properly initialised by default (ie: to all zeros). See the
986 : : * examples below.
987 : : *
988 : : * |[<!-- language="C" -->
989 : : * static GPrivate name_key = G_PRIVATE_INIT (g_free);
990 : : *
991 : : * // return value should not be freed
992 : : * const gchar *
993 : : * get_local_name (void)
994 : : * {
995 : : * return g_private_get (&name_key);
996 : : * }
997 : : *
998 : : * void
999 : : * set_local_name (const gchar *name)
1000 : : * {
1001 : : * g_private_replace (&name_key, g_strdup (name));
1002 : : * }
1003 : : *
1004 : : *
1005 : : * static GPrivate count_key; // no free function
1006 : : *
1007 : : * gint
1008 : : * get_local_count (void)
1009 : : * {
1010 : : * return GPOINTER_TO_INT (g_private_get (&count_key));
1011 : : * }
1012 : : *
1013 : : * void
1014 : : * set_local_count (gint count)
1015 : : * {
1016 : : * g_private_set (&count_key, GINT_TO_POINTER (count));
1017 : : * }
1018 : : * ]|
1019 : : *
1020 : : * Since: 2.32
1021 : : **/
1022 : :
1023 : : static pthread_key_t *
1024 : 0 : g_private_impl_new (GDestroyNotify notify)
1025 : : {
1026 : : pthread_key_t *key;
1027 : : gint status;
1028 : :
1029 : 0 : key = malloc (sizeof (pthread_key_t));
1030 [ # # ]: 0 : if G_UNLIKELY (key == NULL)
1031 : 0 : g_thread_abort (errno, "malloc");
1032 : 0 : status = pthread_key_create (key, notify);
1033 [ # # ]: 0 : if G_UNLIKELY (status != 0)
1034 : 0 : g_thread_abort (status, "pthread_key_create");
1035 : :
1036 : 0 : return key;
1037 : : }
1038 : :
1039 : : static void
1040 : 0 : g_private_impl_free (pthread_key_t *key)
1041 : : {
1042 : : gint status;
1043 : :
1044 : 0 : status = pthread_key_delete (*key);
1045 [ # # ]: 0 : if G_UNLIKELY (status != 0)
1046 : 0 : g_thread_abort (status, "pthread_key_delete");
1047 : 0 : free (key);
1048 : 0 : }
1049 : :
1050 : : static gpointer
1051 : 2213 : g_private_impl_new_direct (GDestroyNotify notify)
1052 : : {
1053 : 2213 : gpointer impl = (void *) (gssize) -1;
1054 : : pthread_key_t key;
1055 : : gint status;
1056 : :
1057 : 2213 : status = pthread_key_create (&key, notify);
1058 [ - + ]: 2213 : if G_UNLIKELY (status != 0)
1059 : 0 : g_thread_abort (status, "pthread_key_create");
1060 : :
1061 : 2213 : memcpy (&impl, &key, sizeof (pthread_key_t));
1062 : :
1063 : : /* pthread_key_create could theoretically put a NULL value into key.
1064 : : * If that happens, waste the result and create a new one, since we
1065 : : * use NULL to mean "not yet allocated".
1066 : : *
1067 : : * This will only happen once per program run.
1068 : : *
1069 : : * We completely avoid this problem for the case where pthread_key_t
1070 : : * is smaller than void* (for example, on 64 bit Linux) by putting
1071 : : * some high bits in the value of 'impl' to start with. Since we only
1072 : : * overwrite part of the pointer, we will never end up with NULL.
1073 : : */
1074 : : if (sizeof (pthread_key_t) == sizeof (gpointer))
1075 : : {
1076 : : if G_UNLIKELY (impl == NULL)
1077 : : {
1078 : : status = pthread_key_create (&key, notify);
1079 : : if G_UNLIKELY (status != 0)
1080 : : g_thread_abort (status, "pthread_key_create");
1081 : :
1082 : : memcpy (&impl, &key, sizeof (pthread_key_t));
1083 : :
1084 : : if G_UNLIKELY (impl == NULL)
1085 : : g_thread_abort (status, "pthread_key_create (gave NULL result twice)");
1086 : : }
1087 : : }
1088 : :
1089 : 2213 : return impl;
1090 : : }
1091 : :
1092 : : static void
1093 : 0 : g_private_impl_free_direct (gpointer impl)
1094 : : {
1095 : : pthread_key_t tmp;
1096 : : gint status;
1097 : :
1098 : 0 : memcpy (&tmp, &impl, sizeof (pthread_key_t));
1099 : :
1100 : 0 : status = pthread_key_delete (tmp);
1101 [ # # ]: 0 : if G_UNLIKELY (status != 0)
1102 : 0 : g_thread_abort (status, "pthread_key_delete");
1103 : 0 : }
1104 : :
1105 : : static inline pthread_key_t
1106 : 10051194 : g_private_get_impl (GPrivate *key)
1107 : : {
1108 : : if (sizeof (pthread_key_t) > sizeof (gpointer))
1109 : : {
1110 : : pthread_key_t *impl = g_atomic_pointer_get (&key->p);
1111 : :
1112 : : if G_UNLIKELY (impl == NULL)
1113 : : {
1114 : : impl = g_private_impl_new (key->notify);
1115 : : if (!g_atomic_pointer_compare_and_exchange (&key->p, NULL, impl))
1116 : : {
1117 : : g_private_impl_free (impl);
1118 : : impl = key->p;
1119 : : }
1120 : : }
1121 : :
1122 : : return *impl;
1123 : : }
1124 : : else
1125 : : {
1126 : 10051194 : gpointer impl = g_atomic_pointer_get (&key->p);
1127 : : pthread_key_t tmp;
1128 : :
1129 [ + + ]: 10051194 : if G_UNLIKELY (impl == NULL)
1130 : : {
1131 : 2213 : impl = g_private_impl_new_direct (key->notify);
1132 [ - + ]: 2213 : if (!g_atomic_pointer_compare_and_exchange (&key->p, NULL, impl))
1133 : : {
1134 : 0 : g_private_impl_free_direct (impl);
1135 : 0 : impl = key->p;
1136 : : }
1137 : : }
1138 : :
1139 : 10051194 : memcpy (&tmp, &impl, sizeof (pthread_key_t));
1140 : :
1141 : 10051194 : return tmp;
1142 : : }
1143 : : }
1144 : :
1145 : : /**
1146 : : * g_private_get:
1147 : : * @key: a #GPrivate
1148 : : *
1149 : : * Returns the current value of the thread local variable @key.
1150 : : *
1151 : : * If the value has not yet been set in this thread, %NULL is returned.
1152 : : * Values are never copied between threads (when a new thread is
1153 : : * created, for example).
1154 : : *
1155 : : * Returns: the thread-local value
1156 : : */
1157 : : gpointer
1158 : 9946282 : g_private_get (GPrivate *key)
1159 : : {
1160 : : /* quote POSIX: No errors are returned from pthread_getspecific(). */
1161 : 9946282 : return pthread_getspecific (g_private_get_impl (key));
1162 : : }
1163 : :
1164 : : /**
1165 : : * g_private_set:
1166 : : * @key: a #GPrivate
1167 : : * @value: the new value
1168 : : *
1169 : : * Sets the thread local variable @key to have the value @value in the
1170 : : * current thread.
1171 : : *
1172 : : * This function differs from g_private_replace() in the following way:
1173 : : * the #GDestroyNotify for @key is not called on the old value.
1174 : : */
1175 : : void
1176 : 104185 : g_private_set (GPrivate *key,
1177 : : gpointer value)
1178 : : {
1179 : : gint status;
1180 : :
1181 [ - + ]: 104185 : if G_UNLIKELY ((status = pthread_setspecific (g_private_get_impl (key), value)) != 0)
1182 : 0 : g_thread_abort (status, "pthread_setspecific");
1183 : 104185 : }
1184 : :
1185 : : /**
1186 : : * g_private_replace:
1187 : : * @key: a #GPrivate
1188 : : * @value: the new value
1189 : : *
1190 : : * Sets the thread local variable @key to have the value @value in the
1191 : : * current thread.
1192 : : *
1193 : : * This function differs from g_private_set() in the following way: if
1194 : : * the previous value was non-%NULL then the #GDestroyNotify handler for
1195 : : * @key is run on it.
1196 : : *
1197 : : * Since: 2.32
1198 : : **/
1199 : : void
1200 : 727 : g_private_replace (GPrivate *key,
1201 : : gpointer value)
1202 : : {
1203 : 727 : pthread_key_t impl = g_private_get_impl (key);
1204 : : gpointer old;
1205 : : gint status;
1206 : :
1207 : 727 : old = pthread_getspecific (impl);
1208 : :
1209 [ - + ]: 727 : if G_UNLIKELY ((status = pthread_setspecific (impl, value)) != 0)
1210 : 0 : g_thread_abort (status, "pthread_setspecific");
1211 : :
1212 [ + + + - ]: 727 : if (old && key->notify)
1213 : 63 : key->notify (old);
1214 : 727 : }
1215 : :
1216 : : /* {{{1 GThread */
1217 : :
1218 : : #define posix_check_err(err, name) G_STMT_START{ \
1219 : : int error = (err); \
1220 : : if (error) \
1221 : : g_error ("file %s: line %d (%s): error '%s' during '%s'", \
1222 : : __FILE__, __LINE__, G_STRFUNC, \
1223 : : g_strerror (error), name); \
1224 : : }G_STMT_END
1225 : :
1226 : : #define posix_check_cmd(cmd) posix_check_err (cmd, #cmd)
1227 : :
1228 : : typedef struct
1229 : : {
1230 : : GRealThread thread;
1231 : :
1232 : : pthread_t system_thread;
1233 : : gboolean joined;
1234 : : GMutex lock;
1235 : :
1236 : : void *(*proxy) (void *);
1237 : : } GThreadPosix;
1238 : :
1239 : : void
1240 : 11166 : g_system_thread_free (GRealThread *thread)
1241 : : {
1242 : 11166 : GThreadPosix *pt = (GThreadPosix *) thread;
1243 : :
1244 [ + + ]: 11166 : if (!pt->joined)
1245 : 295 : pthread_detach (pt->system_thread);
1246 : :
1247 : 11166 : g_mutex_clear (&pt->lock);
1248 : :
1249 : 11166 : g_slice_free (GThreadPosix, pt);
1250 : 11166 : }
1251 : :
1252 : : GRealThread *
1253 : 11778 : g_system_thread_new (GThreadFunc proxy,
1254 : : gulong stack_size,
1255 : : const char *name,
1256 : : GThreadFunc func,
1257 : : gpointer data,
1258 : : GError **error)
1259 : : {
1260 : : GThreadPosix *thread;
1261 : : GRealThread *base_thread;
1262 : : pthread_attr_t attr;
1263 : : gint ret;
1264 : :
1265 : 11778 : thread = g_slice_new0 (GThreadPosix);
1266 : 11778 : base_thread = (GRealThread*)thread;
1267 : 11778 : base_thread->ref_count = 2;
1268 : 11778 : base_thread->ours = TRUE;
1269 : 11778 : base_thread->thread.joinable = TRUE;
1270 : 11778 : base_thread->thread.func = func;
1271 : 11778 : base_thread->thread.data = data;
1272 : 11778 : base_thread->name = g_strdup (name);
1273 : 11778 : thread->proxy = proxy;
1274 : :
1275 [ - + ]: 11778 : posix_check_cmd (pthread_attr_init (&attr));
1276 : :
1277 : : #ifdef HAVE_PTHREAD_ATTR_SETSTACKSIZE
1278 [ - + ]: 11778 : if (stack_size)
1279 : : {
1280 : : #ifdef _SC_THREAD_STACK_MIN
1281 : 0 : long min_stack_size = sysconf (_SC_THREAD_STACK_MIN);
1282 [ # # ]: 0 : if (min_stack_size >= 0)
1283 : 0 : stack_size = MAX ((gulong) min_stack_size, stack_size);
1284 : : #endif /* _SC_THREAD_STACK_MIN */
1285 : : /* No error check here, because some systems can't do it and
1286 : : * we simply don't want threads to fail because of that. */
1287 : 0 : pthread_attr_setstacksize (&attr, stack_size);
1288 : : }
1289 : : #endif /* HAVE_PTHREAD_ATTR_SETSTACKSIZE */
1290 : :
1291 : : #ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
1292 : : {
1293 : : /* While this is the default, better be explicit about it */
1294 : 11778 : pthread_attr_setinheritsched (&attr, PTHREAD_INHERIT_SCHED);
1295 : : }
1296 : : #endif /* HAVE_PTHREAD_ATTR_SETINHERITSCHED */
1297 : :
1298 : 11778 : ret = pthread_create (&thread->system_thread, &attr, (void* (*)(void*))proxy, thread);
1299 : :
1300 [ - + ]: 11778 : posix_check_cmd (pthread_attr_destroy (&attr));
1301 : :
1302 [ + + ]: 11778 : if (ret == EAGAIN)
1303 : : {
1304 : 1 : g_set_error (error, G_THREAD_ERROR, G_THREAD_ERROR_AGAIN,
1305 : : "Error creating thread: %s", g_strerror (ret));
1306 : 1 : g_free (thread->thread.name);
1307 : 1 : g_slice_free (GThreadPosix, thread);
1308 : 1 : return NULL;
1309 : : }
1310 : :
1311 [ - + ]: 11777 : posix_check_err (ret, "pthread_create");
1312 : :
1313 : 11777 : g_mutex_init (&thread->lock);
1314 : :
1315 : 11777 : return (GRealThread *) thread;
1316 : : }
1317 : :
1318 : : /**
1319 : : * g_thread_yield:
1320 : : *
1321 : : * Causes the calling thread to voluntarily relinquish the CPU, so
1322 : : * that other threads can run.
1323 : : *
1324 : : * This function is often used as a method to make busy wait less evil.
1325 : : */
1326 : : void
1327 : 20058925 : g_thread_yield (void)
1328 : : {
1329 : 20058925 : sched_yield ();
1330 : 20058925 : }
1331 : :
1332 : : void
1333 : 10871 : g_system_thread_wait (GRealThread *thread)
1334 : : {
1335 : 10871 : GThreadPosix *pt = (GThreadPosix *) thread;
1336 : :
1337 : 10871 : g_mutex_lock (&pt->lock);
1338 : :
1339 [ + - ]: 10871 : if (!pt->joined)
1340 : : {
1341 [ - + ]: 10871 : posix_check_cmd (pthread_join (pt->system_thread, NULL));
1342 : 10871 : pt->joined = TRUE;
1343 : : }
1344 : :
1345 : 10871 : g_mutex_unlock (&pt->lock);
1346 : 10871 : }
1347 : :
1348 : : void
1349 : 13 : g_system_thread_exit (void)
1350 : : {
1351 : 13 : pthread_exit (NULL);
1352 : : }
1353 : :
1354 : : void
1355 : 4350 : g_system_thread_set_name (const gchar *name)
1356 : : {
1357 : : #if defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID)
1358 : : pthread_setname_np (name); /* on OS X and iOS */
1359 : : #elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID)
1360 : 4350 : pthread_setname_np (pthread_self (), name); /* on Linux and Solaris */
1361 : : #elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID_AND_ARG)
1362 : : pthread_setname_np (pthread_self (), "%s", (gchar *) name); /* on NetBSD */
1363 : : #elif defined(HAVE_PTHREAD_SET_NAME_NP)
1364 : : pthread_set_name_np (pthread_self (), name); /* on FreeBSD, DragonFlyBSD, OpenBSD */
1365 : : #endif
1366 : 4350 : }
1367 : :
1368 : : /* {{{1 GMutex and GCond futex implementation */
1369 : :
1370 : : #if defined(USE_NATIVE_MUTEX)
1371 : : /* We should expand the set of operations available in gatomic once we
1372 : : * have better C11 support in GCC in common distributions (ie: 4.9).
1373 : : *
1374 : : * Before then, let's define a couple of useful things for our own
1375 : : * purposes...
1376 : : */
1377 : :
1378 : : #ifdef HAVE_STDATOMIC_H
1379 : :
1380 : : #include <stdatomic.h>
1381 : :
1382 : : #define exchange_acquire(ptr, new) \
1383 : : atomic_exchange_explicit((atomic_uint *) (ptr), (new), __ATOMIC_ACQUIRE)
1384 : : #define compare_exchange_acquire(ptr, old, new) \
1385 : : atomic_compare_exchange_strong_explicit((atomic_uint *) (ptr), (old), (new), \
1386 : : __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
1387 : :
1388 : : #define exchange_release(ptr, new) \
1389 : : atomic_exchange_explicit((atomic_uint *) (ptr), (new), __ATOMIC_RELEASE)
1390 : : #define store_release(ptr, new) \
1391 : : atomic_store_explicit((atomic_uint *) (ptr), (new), __ATOMIC_RELEASE)
1392 : :
1393 : : #else
1394 : :
1395 : : #define exchange_acquire(ptr, new) \
1396 : : __atomic_exchange_4((ptr), (new), __ATOMIC_ACQUIRE)
1397 : : #define compare_exchange_acquire(ptr, old, new) \
1398 : : __atomic_compare_exchange_4((ptr), (old), (new), 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
1399 : :
1400 : : #define exchange_release(ptr, new) \
1401 : : __atomic_exchange_4((ptr), (new), __ATOMIC_RELEASE)
1402 : : #define store_release(ptr, new) \
1403 : : __atomic_store_4((ptr), (new), __ATOMIC_RELEASE)
1404 : :
1405 : : #endif
1406 : :
1407 : : /* Our strategy for the mutex is pretty simple:
1408 : : *
1409 : : * 0: not in use
1410 : : *
1411 : : * 1: acquired by one thread only, no contention
1412 : : *
1413 : : * 2: contended
1414 : : */
1415 : :
1416 : : typedef enum {
1417 : : G_MUTEX_STATE_EMPTY = 0,
1418 : : G_MUTEX_STATE_OWNED,
1419 : : G_MUTEX_STATE_CONTENDED,
1420 : : } GMutexState;
1421 : :
1422 : : /*
1423 : : * As such, attempting to acquire the lock should involve an increment.
1424 : : * If we find that the previous value was 0 then we can return
1425 : : * immediately.
1426 : : *
1427 : : * On unlock, we always store 0 to indicate that the lock is available.
1428 : : * If the value there was 1 before then we didn't have contention and
1429 : : * can return immediately. If the value was something other than 1 then
1430 : : * we have the contended case and need to wake a waiter.
1431 : : *
1432 : : * If it was not 0 then there is another thread holding it and we must
1433 : : * wait. We must always ensure that we mark a value >1 while we are
1434 : : * waiting in order to instruct the holder to do a wake operation on
1435 : : * unlock.
1436 : : */
1437 : :
1438 : : void
1439 : 29729 : g_mutex_init (GMutex *mutex)
1440 : : {
1441 : 29729 : mutex->i[0] = G_MUTEX_STATE_EMPTY;
1442 : 29729 : }
1443 : :
1444 : : void
1445 : 26794 : g_mutex_clear (GMutex *mutex)
1446 : : {
1447 [ - + ]: 26794 : if G_UNLIKELY (mutex->i[0] != G_MUTEX_STATE_EMPTY)
1448 : : {
1449 : 0 : fprintf (stderr, "g_mutex_clear() called on uninitialised or locked mutex\n");
1450 : 0 : g_abort ();
1451 : : }
1452 : 26794 : }
1453 : :
1454 : : G_GNUC_NO_INLINE
1455 : : static void
1456 : 11553677 : g_mutex_lock_slowpath (GMutex *mutex)
1457 : : {
1458 : : /* Set to contended. If it was empty before then we
1459 : : * just acquired the lock.
1460 : : *
1461 : : * Otherwise, sleep for as long as the contended state remains...
1462 : : */
1463 [ + + ]: 24579993 : while (exchange_acquire (&mutex->i[0], G_MUTEX_STATE_CONTENDED) != G_MUTEX_STATE_EMPTY)
1464 : : {
1465 [ + + + + ]: 13026316 : g_futex_simple (&mutex->i[0], (gsize) FUTEX_WAIT_PRIVATE,
1466 : : G_MUTEX_STATE_CONTENDED, NULL);
1467 : : }
1468 : 11553677 : }
1469 : :
1470 : : G_GNUC_NO_INLINE
1471 : : static void
1472 : 18348995 : g_mutex_unlock_slowpath (GMutex *mutex,
1473 : : guint prev)
1474 : : {
1475 : : /* We seem to get better code for the uncontended case by splitting
1476 : : * this out...
1477 : : */
1478 [ - + ]: 18348995 : if G_UNLIKELY (prev == G_MUTEX_STATE_EMPTY)
1479 : : {
1480 : 0 : fprintf (stderr, "Attempt to unlock mutex that was not locked\n");
1481 : 0 : g_abort ();
1482 : : }
1483 : :
1484 [ - + - - ]: 18348995 : g_futex_simple (&mutex->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL);
1485 : 18348993 : }
1486 : :
1487 : : void
1488 : 243783956 : g_mutex_lock (GMutex *mutex)
1489 : : {
1490 : : /* empty -> owned and we're done. Anything else, and we need to wait... */
1491 [ + + ]: 243783956 : if G_UNLIKELY (!g_atomic_int_compare_and_exchange (&mutex->i[0],
1492 : : G_MUTEX_STATE_EMPTY,
1493 : : G_MUTEX_STATE_OWNED))
1494 : 11553677 : g_mutex_lock_slowpath (mutex);
1495 : 243783956 : }
1496 : :
1497 : : void
1498 : 244375286 : g_mutex_unlock (GMutex *mutex)
1499 : : {
1500 : : guint prev;
1501 : :
1502 : 244375286 : prev = exchange_release (&mutex->i[0], G_MUTEX_STATE_EMPTY);
1503 : :
1504 : : /* 1-> 0 and we're done. Anything else and we need to signal... */
1505 [ + + ]: 244375286 : if G_UNLIKELY (prev != G_MUTEX_STATE_OWNED)
1506 : 18348995 : g_mutex_unlock_slowpath (mutex, prev);
1507 : 244375284 : }
1508 : :
1509 : : gboolean
1510 : 1119879 : g_mutex_trylock (GMutex *mutex)
1511 : : {
1512 : 1119879 : GMutexState empty = G_MUTEX_STATE_EMPTY;
1513 : :
1514 : : /* We don't want to touch the value at all unless we can move it from
1515 : : * exactly empty to owned.
1516 : : */
1517 : 1119879 : return compare_exchange_acquire (&mutex->i[0], &empty, G_MUTEX_STATE_OWNED);
1518 : : }
1519 : :
1520 : : /* Condition variables are implemented in a rather simple way as well.
1521 : : * In many ways, futex() as an abstraction is even more ideally suited
1522 : : * to condition variables than it is to mutexes.
1523 : : *
1524 : : * We store a generation counter. We sample it with the lock held and
1525 : : * unlock before sleeping on the futex.
1526 : : *
1527 : : * Signalling simply involves increasing the counter and making the
1528 : : * appropriate futex call.
1529 : : *
1530 : : * The only thing that is the slightest bit complicated is timed waits
1531 : : * because we must convert our absolute time to relative.
1532 : : */
1533 : :
1534 : : void
1535 : 269744 : g_cond_init (GCond *cond)
1536 : : {
1537 : 269744 : cond->i[0] = 0;
1538 : 269744 : }
1539 : :
1540 : : void
1541 : 268350 : g_cond_clear (GCond *cond)
1542 : : {
1543 : 268350 : }
1544 : :
1545 : : void
1546 : 709383 : g_cond_wait (GCond *cond,
1547 : : GMutex *mutex)
1548 : : {
1549 : 709383 : guint sampled = (guint) g_atomic_int_get (&cond->i[0]);
1550 : :
1551 : 709383 : g_mutex_unlock (mutex);
1552 [ + + + - ]: 709383 : g_futex_simple (&cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, NULL);
1553 : 709215 : g_mutex_lock (mutex);
1554 : 709215 : }
1555 : :
1556 : : void
1557 : 998779 : g_cond_signal (GCond *cond)
1558 : : {
1559 : 998779 : g_atomic_int_inc (&cond->i[0]);
1560 : :
1561 [ - + - - ]: 998779 : g_futex_simple (&cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL);
1562 : 998779 : }
1563 : :
1564 : : void
1565 : 34162 : g_cond_broadcast (GCond *cond)
1566 : : {
1567 : 34162 : g_atomic_int_inc (&cond->i[0]);
1568 : :
1569 [ - + - - ]: 34162 : g_futex_simple (&cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) INT_MAX, NULL);
1570 : 34162 : }
1571 : :
1572 : : gboolean
1573 : 2933 : g_cond_wait_until (GCond *cond,
1574 : : GMutex *mutex,
1575 : : gint64 end_time)
1576 : : {
1577 : : struct timespec now;
1578 : : struct timespec span;
1579 : :
1580 : : guint sampled;
1581 : : int res;
1582 : : gboolean success;
1583 : :
1584 [ - + ]: 2933 : if (end_time < 0)
1585 : 0 : return FALSE;
1586 : :
1587 : 2933 : clock_gettime (CLOCK_MONOTONIC, &now);
1588 : 2933 : span.tv_sec = (end_time / 1000000) - now.tv_sec;
1589 : 2933 : span.tv_nsec = ((end_time % 1000000) * 1000) - now.tv_nsec;
1590 [ + + ]: 2933 : if (span.tv_nsec < 0)
1591 : : {
1592 : 1112 : span.tv_nsec += 1000000000;
1593 : 1112 : span.tv_sec--;
1594 : : }
1595 : :
1596 [ - + ]: 2933 : if (span.tv_sec < 0)
1597 : 0 : return FALSE;
1598 : :
1599 : : /* `struct timespec` as defined by the libc headers does not necessarily
1600 : : * have any relation to the one used by the kernel for the `futex` syscall.
1601 : : *
1602 : : * Specifically, the libc headers might use 64-bit `time_t` while the kernel
1603 : : * headers use 32-bit types on certain systems.
1604 : : *
1605 : : * To get around this problem we
1606 : : * a) check if `futex_time64` is available, which only exists on 32-bit
1607 : : * platforms and always uses 64-bit `time_t`.
1608 : : * b) if `futex_time64` is available, but the Android runtime's API level
1609 : : * is < 30, `futex_time64` is blocked by seccomp and using it will cause
1610 : : * the app to be terminated. Skip to c).
1611 : : * https://android-review.googlesource.com/c/platform/bionic/+/1094758
1612 : : * c) otherwise (or if that returns `ENOSYS`), we call the normal `futex`
1613 : : * syscall with the `struct timespec` used by the kernel. By default, we
1614 : : * use `__kernel_long_t` for both its fields, which is equivalent to
1615 : : * `__kernel_old_time_t` and is available in the kernel headers for a
1616 : : * longer time.
1617 : : * d) With very old headers (~2.6.x), `__kernel_long_t` is not available, and
1618 : : * we use an older definition that uses `__kernel_time_t` and `long`.
1619 : : *
1620 : : * Also some 32-bit systems do not define `__NR_futex` at all and only
1621 : : * define `__NR_futex_time64`.
1622 : : */
1623 : :
1624 : 2933 : sampled = cond->i[0];
1625 : 2933 : g_mutex_unlock (mutex);
1626 : :
1627 : : #if defined(HAVE_FUTEX_TIME64)
1628 : : #if defined(__ANDROID__)
1629 : : if (__builtin_available (android 30, *)) {
1630 : : #else
1631 : : {
1632 : : #endif
1633 : : struct
1634 : : {
1635 : : gint64 tv_sec;
1636 : : gint64 tv_nsec;
1637 : : } span_arg;
1638 : :
1639 : : span_arg.tv_sec = span.tv_sec;
1640 : : span_arg.tv_nsec = span.tv_nsec;
1641 : :
1642 : : res = syscall (__NR_futex_time64, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, &span_arg);
1643 : :
1644 : : /* If the syscall does not exist (`ENOSYS`), we retry again below with the
1645 : : * normal `futex` syscall. This can happen if newer kernel headers are
1646 : : * used than the kernel that is actually running.
1647 : : */
1648 : : # if defined(HAVE_FUTEX)
1649 : : if (res >= 0 || errno != ENOSYS)
1650 : : # endif /* defined(HAVE_FUTEX) */
1651 : : {
1652 : : success = (res < 0 && errno == ETIMEDOUT) ? FALSE : TRUE;
1653 : : g_mutex_lock (mutex);
1654 : :
1655 : : return success;
1656 : : }
1657 : : }
1658 : : #endif
1659 : :
1660 : : #if defined(HAVE_FUTEX)
1661 : : {
1662 : : # ifdef __kernel_long_t
1663 : : # define KERNEL_SPAN_SEC_TYPE __kernel_long_t
1664 : : struct
1665 : : {
1666 : : __kernel_long_t tv_sec;
1667 : : __kernel_long_t tv_nsec;
1668 : : } span_arg;
1669 : : # else
1670 : : /* Very old kernel headers: version 2.6.32 and thereabouts */
1671 : : # define KERNEL_SPAN_SEC_TYPE __kernel_time_t
1672 : : struct
1673 : : {
1674 : : __kernel_time_t tv_sec;
1675 : : long tv_nsec;
1676 : : } span_arg;
1677 : : # endif
1678 : : /* Make sure to only ever call this if the end time actually fits into the target type */
1679 : : if (G_UNLIKELY (sizeof (KERNEL_SPAN_SEC_TYPE) < 8 && span.tv_sec > G_MAXINT32))
1680 : : g_error ("%s: Can’t wait for more than %us", G_STRFUNC, G_MAXINT32);
1681 : :
1682 : 2933 : span_arg.tv_sec = span.tv_sec;
1683 : 2933 : span_arg.tv_nsec = span.tv_nsec;
1684 : :
1685 : 2933 : res = syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, &span_arg);
1686 [ + + + + ]: 2806 : success = (res < 0 && errno == ETIMEDOUT) ? FALSE : TRUE;
1687 : 2806 : g_mutex_lock (mutex);
1688 : :
1689 : 2806 : return success;
1690 : : }
1691 : : # undef KERNEL_SPAN_SEC_TYPE
1692 : : #endif /* defined(HAVE_FUTEX) */
1693 : :
1694 : : /* We can't end up here because of the checks above */
1695 : : g_assert_not_reached ();
1696 : : }
1697 : :
1698 : : #endif
1699 : :
1700 : : /* {{{1 Epilogue */
1701 : : /* vim:set foldmethod=marker: */
|