Coverage Report

Created: 2022-07-08 09:39

/home/mdboom/Work/builds/cpython/Python/thread_pthread.h
Line
Count
Source (jump to first uncovered line)
1
#include "pycore_interp.h"    // _PyInterpreterState.threads.stacksize
2
3
/* Posix threads interface */
4
5
#include <stdlib.h>
6
#include <string.h>
7
#if defined(__APPLE__) || defined(HAVE_PTHREAD_DESTRUCTOR)
8
#define destructor xxdestructor
9
#endif
10
#include <pthread.h>
11
#if defined(__APPLE__) || defined(HAVE_PTHREAD_DESTRUCTOR)
12
#undef destructor
13
#endif
14
#include <signal.h>
15
16
#if defined(__linux__)
17
#   include <sys/syscall.h>     /* syscall(SYS_gettid) */
18
#elif defined(__FreeBSD__)
19
#   include <pthread_np.h>      /* pthread_getthreadid_np() */
20
#elif defined(__OpenBSD__)
21
#   include <unistd.h>          /* getthrid() */
22
#elif defined(_AIX)
23
#   include <sys/thread.h>      /* thread_self() */
24
#elif defined(__NetBSD__)
25
#   include <lwp.h>             /* _lwp_self() */
26
#elif defined(__DragonFly__)
27
#   include <sys/lwp.h>         /* lwp_gettid() */
28
#endif
29
30
/* The POSIX spec requires that use of pthread_attr_setstacksize
31
   be conditional on _POSIX_THREAD_ATTR_STACKSIZE being defined. */
32
#ifdef _POSIX_THREAD_ATTR_STACKSIZE
33
#ifndef THREAD_STACK_SIZE
34
#define THREAD_STACK_SIZE       0       /* use default stack size */
35
#endif
36
37
/* The default stack size for new threads on BSD is small enough that
38
 * we'll get hard crashes instead of 'maximum recursion depth exceeded'
39
 * exceptions.
40
 *
41
 * The default stack size below is the empirically determined minimal stack
42
 * sizes where a simple recursive function doesn't cause a hard crash.
43
 *
44
 * For macOS the value of THREAD_STACK_SIZE is determined in configure.ac
45
 * as it also depends on the other configure options like chosen sanitizer
46
 * runtimes.
47
 */
48
#if defined(__FreeBSD__) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
49
#undef  THREAD_STACK_SIZE
50
#define THREAD_STACK_SIZE       0x400000
51
#endif
52
#if defined(_AIX) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
53
#undef  THREAD_STACK_SIZE
54
#define THREAD_STACK_SIZE       0x200000
55
#endif
56
/* bpo-38852: test_threading.test_recursion_limit() checks that 1000 recursive
57
   Python calls (default recursion limit) doesn't crash, but raise a regular
58
   RecursionError exception. In debug mode, Python function calls allocates
59
   more memory on the stack, so use a stack of 8 MiB. */
60
#if defined(__ANDROID__) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
61
#   ifdef Py_DEBUG
62
#   undef  THREAD_STACK_SIZE
63
#   define THREAD_STACK_SIZE    0x800000
64
#   endif
65
#endif
66
#if defined(__VXWORKS__) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
67
#undef  THREAD_STACK_SIZE
68
#define THREAD_STACK_SIZE       0x100000
69
#endif
70
/* for safety, ensure a viable minimum stacksize */
71
#define THREAD_STACK_MIN        0x8000  /* 32 KiB */
72
#else  /* !_POSIX_THREAD_ATTR_STACKSIZE */
73
#ifdef THREAD_STACK_SIZE
74
#error "THREAD_STACK_SIZE defined but _POSIX_THREAD_ATTR_STACKSIZE undefined"
75
#endif
76
#endif
77
78
/* The POSIX spec says that implementations supporting the sem_*
79
   family of functions must indicate this by defining
80
   _POSIX_SEMAPHORES. */
81
#ifdef _POSIX_SEMAPHORES
82
/* On FreeBSD 4.x, _POSIX_SEMAPHORES is defined empty, so
83
   we need to add 0 to make it work there as well. */
84
#if (_POSIX_SEMAPHORES+0) == -1
85
#define HAVE_BROKEN_POSIX_SEMAPHORES
86
#else
87
#include <semaphore.h>
88
#include <errno.h>
89
#endif
90
#endif
91
92
93
/* Whether or not to use semaphores directly rather than emulating them with
94
 * mutexes and condition variables:
95
 */
96
#if (defined(_POSIX_SEMAPHORES) && !defined(HAVE_BROKEN_POSIX_SEMAPHORES) && \
97
     (defined(HAVE_SEM_TIMEDWAIT) || defined(HAVE_SEM_CLOCKWAIT)))
98
#  define USE_SEMAPHORES
99
#else
100
#  undef USE_SEMAPHORES
101
#endif
102
103
104
/* On platforms that don't use standard POSIX threads pthread_sigmask()
105
 * isn't present.  DEC threads uses sigprocmask() instead as do most
106
 * other UNIX International compliant systems that don't have the full
107
 * pthread implementation.
108
 */
109
#if defined(HAVE_PTHREAD_SIGMASK) && !defined(HAVE_BROKEN_PTHREAD_SIGMASK)
110
#  define SET_THREAD_SIGMASK pthread_sigmask
111
#else
112
#  define SET_THREAD_SIGMASK sigprocmask
113
#endif
114
115
116
/*
117
 * pthread_cond support
118
 */
119
120
#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
121
// monotonic is supported statically.  It doesn't mean it works on runtime.
122
#define CONDATTR_MONOTONIC
123
#endif
124
125
// NULL when pthread_condattr_setclock(CLOCK_MONOTONIC) is not supported.
126
static pthread_condattr_t *condattr_monotonic = NULL;
127
128
static void
129
init_condattr(void)
130
{
131
#ifdef CONDATTR_MONOTONIC
132
    static pthread_condattr_t ca;
133
    pthread_condattr_init(&ca);
134
    if (pthread_condattr_setclock(&ca, CLOCK_MONOTONIC) == 0) {
  Branch (134:9): [True: 73, False: 0]
135
        condattr_monotonic = &ca;  // Use monotonic clock
136
    }
137
#endif
138
}
139
140
int
141
_PyThread_cond_init(PyCOND_T *cond)
142
{
143
    return pthread_cond_init(cond, condattr_monotonic);
144
}
145
146
147
void
148
_PyThread_cond_after(long long us, struct timespec *abs)
149
{
150
    _PyTime_t timeout = _PyTime_FromMicrosecondsClamp(us);
151
    _PyTime_t t;
152
#ifdef CONDATTR_MONOTONIC
153
    if (condattr_monotonic) {
  Branch (153:9): [True: 294k, False: 0]
154
        t = _PyTime_GetMonotonicClock();
155
    }
156
    else
157
#endif
158
    {
159
        t = _PyTime_GetSystemClock();
160
    }
161
    t = _PyTime_Add(t, timeout);
162
    _PyTime_AsTimespec_clamp(t, abs);
163
}
164
165
166
/* A pthread mutex isn't sufficient to model the Python lock type
167
 * because, according to Draft 5 of the docs (P1003.4a/D5), both of the
168
 * following are undefined:
169
 *  -> a thread tries to lock a mutex it already has locked
170
 *  -> a thread tries to unlock a mutex locked by a different thread
171
 * pthread mutexes are designed for serializing threads over short pieces
172
 * of code anyway, so wouldn't be an appropriate implementation of
173
 * Python's locks regardless.
174
 *
175
 * The pthread_lock struct implements a Python lock as a "locked?" bit
176
 * and a <condition, mutex> pair.  In general, if the bit can be acquired
177
 * instantly, it is, else the pair is used to block the thread until the
178
 * bit is cleared.     9 May 1994 tim@ksr.com
179
 */
180
181
typedef struct {
182
    char             locked; /* 0=unlocked, 1=locked */
183
    /* a <cond, mutex> pair to handle an acquire of a locked lock */
184
    pthread_cond_t   lock_released;
185
    pthread_mutex_t  mut;
186
} pthread_lock;
187
188
#define CHECK_STATUS(name)  if (status != 0) 
{ perror(name); error = 1; }0
189
#define CHECK_STATUS_PTHREAD(name)  if (status != 0) { fprintf(stderr, \
190
    "%s: %s\n", name, strerror(status)); error = 1; }
191
192
/*
193
 * Initialization.
194
 */
195
static void
196
PyThread__init_thread(void)
197
{
198
#if defined(_AIX) && defined(__GNUC__)
199
    extern void pthread_init(void);
200
    pthread_init();
201
#endif
202
    init_condattr();
203
}
204
205
/*
206
 * Thread support.
207
 */
208
209
/* bpo-33015: pythread_callback struct and pythread_wrapper() cast
210
   "void func(void *)" to "void* func(void *)": always return NULL.
211
212
   PyThread_start_new_thread() uses "void func(void *)" type, whereas
213
   pthread_create() requires a void* return value. */
214
typedef struct {
215
    void (*func) (void *);
216
    void *arg;
217
} pythread_callback;
218
219
static void *
220
pythread_wrapper(void *arg)
221
{
222
    /* copy func and func_arg and free the temporary structure */
223
    pythread_callback *callback = arg;
224
    void (*func)(void *) = callback->func;
225
    void *func_arg = callback->arg;
226
    PyMem_RawFree(arg);
227
228
    func(func_arg);
229
    return NULL;
230
}
231
232
unsigned long
233
PyThread_start_new_thread(void (*func)(void *), void *arg)
234
{
235
    pthread_t th;
236
    int status;
237
#if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
238
    pthread_attr_t attrs;
239
#endif
240
#if defined(THREAD_STACK_SIZE)
241
    size_t      tss;
242
#endif
243
244
    if (!initialized)
  Branch (244:9): [True: 0, False: 5.70k]
245
        PyThread_init_thread();
246
247
#if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
248
    if (pthread_attr_init(&attrs) != 0)
  Branch (248:9): [True: 0, False: 5.70k]
249
        return PYTHREAD_INVALID_THREAD_ID;
250
#endif
251
#if defined(THREAD_STACK_SIZE)
252
    PyThreadState *tstate = _PyThreadState_GET();
253
    size_t stacksize = tstate ? 
tstate->interp->threads.stacksize5.70k
:
02
;
  Branch (253:24): [True: 5.70k, False: 2]
254
    tss = (stacksize != 0) ? 
stacksize20
:
THREAD_STACK_SIZE5.68k
;
  Branch (254:11): [True: 20, False: 5.68k]
255
    if (tss != 0) {
  Branch (255:9): [True: 20, False: 5.68k]
256
        if (pthread_attr_setstacksize(&attrs, tss) != 0) {
  Branch (256:13): [True: 0, False: 20]
257
            pthread_attr_destroy(&attrs);
258
            return PYTHREAD_INVALID_THREAD_ID;
259
        }
260
    }
261
#endif
262
#if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
263
    pthread_attr_setscope(&attrs, PTHREAD_SCOPE_SYSTEM);
264
#endif
265
266
    pythread_callback *callback = PyMem_RawMalloc(sizeof(pythread_callback));
267
268
    if (callback == NULL) {
  Branch (268:9): [True: 0, False: 5.70k]
269
      return PYTHREAD_INVALID_THREAD_ID;
270
    }
271
272
    callback->func = func;
273
    callback->arg = arg;
274
275
    status = pthread_create(&th,
276
#if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
277
                             &attrs,
278
#else
279
                             (pthread_attr_t*)NULL,
280
#endif
281
                             pythread_wrapper, callback);
282
283
#if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
284
    pthread_attr_destroy(&attrs);
285
#endif
286
287
    if (status != 0) {
  Branch (287:9): [True: 0, False: 5.70k]
288
        PyMem_RawFree(callback);
289
        return PYTHREAD_INVALID_THREAD_ID;
290
    }
291
292
    pthread_detach(th);
293
294
#if SIZEOF_PTHREAD_T <= SIZEOF_LONG
295
    return (unsigned long) th;
296
#else
297
    return (unsigned long) *(unsigned long *) &th;
298
#endif
299
}
300
301
/* XXX This implementation is considered (to quote Tim Peters) "inherently
302
   hosed" because:
303
     - It does not guarantee the promise that a non-zero integer is returned.
304
     - The cast to unsigned long is inherently unsafe.
305
     - It is not clear that the 'volatile' (for AIX?) are any longer necessary.
306
*/
307
unsigned long
308
PyThread_get_thread_ident(void)
309
{
310
    volatile pthread_t threadid;
311
    if (!initialized)
  Branch (311:9): [True: 0, False: 68.9M]
312
        PyThread_init_thread();
313
    threadid = pthread_self();
314
    return (unsigned long) threadid;
315
}
316
317
#ifdef PY_HAVE_THREAD_NATIVE_ID
318
unsigned long
319
PyThread_get_thread_native_id(void)
320
{
321
    if (!initialized)
  Branch (321:9): [True: 0, False: 16.4k]
322
        PyThread_init_thread();
323
#ifdef __APPLE__
324
    uint64_t native_id;
325
    (void) pthread_threadid_np(NULL, &native_id);
326
#elif defined(__linux__)
327
    pid_t native_id;
328
    native_id = syscall(SYS_gettid);
329
#elif defined(__FreeBSD__)
330
    int native_id;
331
    native_id = pthread_getthreadid_np();
332
#elif defined(__OpenBSD__)
333
    pid_t native_id;
334
    native_id = getthrid();
335
#elif defined(_AIX)
336
    tid_t native_id;
337
    native_id = thread_self();
338
#elif defined(__NetBSD__)
339
    lwpid_t native_id;
340
    native_id = _lwp_self();
341
#elif defined(__DragonFly__)
342
    lwpid_t native_id;
343
    native_id = lwp_gettid();
344
#endif
345
    return (unsigned long) native_id;
346
}
347
#endif
348
349
void _Py_NO_RETURN
350
PyThread_exit_thread(void)
351
{
352
    if (!initialized)
  Branch (352:9): [True: 0, False: 0]
353
        exit(0);
354
    pthread_exit(0);
355
}
356
357
#ifdef USE_SEMAPHORES
358
359
/*
360
 * Lock support.
361
 */
362
363
PyThread_type_lock
364
PyThread_allocate_lock(void)
365
{
366
    sem_t *lock;
367
    int status, error = 0;
368
369
    if (!initialized)
  Branch (369:9): [True: 73, False: 816k]
370
        PyThread_init_thread();
371
372
    lock = (sem_t *)PyMem_RawMalloc(sizeof(sem_t));
373
374
    if (lock) {
  Branch (374:9): [True: 816k, False: 0]
375
        status = sem_init(lock,0,1);
376
        CHECK_STATUS("sem_init");
377
378
        if (error) {
  Branch (378:13): [True: 0, False: 816k]
379
            PyMem_RawFree((void *)lock);
380
            lock = NULL;
381
        }
382
    }
383
384
    return (PyThread_type_lock)lock;
385
}
386
387
void
388
PyThread_free_lock(PyThread_type_lock lock)
389
{
390
    sem_t *thelock = (sem_t *)lock;
391
    int status, error = 0;
392
393
    (void) error; /* silence unused-but-set-variable warning */
394
395
    if (!thelock)
  Branch (395:9): [True: 0, False: 816k]
396
        return;
397
398
    status = sem_destroy(thelock);
399
    CHECK_STATUS("sem_destroy");
400
401
    PyMem_RawFree((void *)thelock);
402
}
403
404
/*
405
 * As of February 2002, Cygwin thread implementations mistakenly report error
406
 * codes in the return value of the sem_ calls (like the pthread_ functions).
407
 * Correct implementations return -1 and put the code in errno. This supports
408
 * either.
409
 */
410
static int
411
fix_status(int status)
412
{
413
    return (status == -1) ? errno : 
status15.0M
;
  Branch (413:12): [True: 761k, False: 15.0M]
414
}
415
416
PyLockStatus
417
PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
418
                            int intr_flag)
419
{
420
    PyLockStatus success;
421
    sem_t *thelock = (sem_t *)lock;
422
    int status, error = 0;
423
424
    (void) error; /* silence unused-but-set-variable warning */
425
426
    _PyTime_t timeout;  // relative timeout
427
    if (microseconds >= 0) {
  Branch (427:9): [True: 13.2M, False: 2.60M]
428
        // bpo-41710: PyThread_acquire_lock_timed() cannot report timeout
429
        // overflow to the caller, so clamp the timeout to
430
        // [_PyTime_MIN, _PyTime_MAX].
431
        //
432
        // _PyTime_MAX nanoseconds is around 292.3 years.
433
        //
434
        // _thread.Lock.acquire() and _thread.RLock.acquire() raise an
435
        // OverflowError if microseconds is greater than PY_TIMEOUT_MAX.
436
        timeout = _PyTime_FromMicrosecondsClamp(microseconds);
437
    }
438
    else {
439
        timeout = _PyTime_FromNanoseconds(-1);
440
    }
441
442
#ifdef HAVE_SEM_CLOCKWAIT
443
    struct timespec abs_timeout;
444
    // Local scope for deadline
445
    {
446
        _PyTime_t deadline = _PyTime_Add(_PyTime_GetMonotonicClock(), timeout);
447
        _PyTime_AsTimespec_clamp(deadline, &abs_timeout);
448
    }
449
#else
450
    _PyTime_t deadline = 0;
451
    if (timeout > 0 && !intr_flag) {
452
        deadline = _PyDeadline_Init(timeout);
453
    }
454
#endif
455
456
    while (1) {
  Branch (456:12): [Folded - Ignored]
457
        if (timeout > 0) {
  Branch (457:13): [True: 13.4k, False: 15.8M]
458
#ifdef HAVE_SEM_CLOCKWAIT
459
            status = fix_status(sem_clockwait(thelock, CLOCK_MONOTONIC,
460
                                              &abs_timeout));
461
#else
462
            _PyTime_t abs_time = _PyTime_Add(_PyTime_GetSystemClock(),
463
                                             timeout);
464
            struct timespec ts;
465
            _PyTime_AsTimespec_clamp(abs_time, &ts);
466
            status = fix_status(sem_timedwait(thelock, &ts));
467
#endif
468
        }
469
        else if (timeout == 0) {
  Branch (469:18): [True: 13.2M, False: 2.60M]
470
            status = fix_status(sem_trywait(thelock));
471
        }
472
        else {
473
            status = fix_status(sem_wait(thelock));
474
        }
475
476
        /* Retry if interrupted by a signal, unless the caller wants to be
477
           notified.  */
478
        if (intr_flag || 
status != EINTR15.7M
) {
  Branch (478:13): [True: 42.9k, False: 15.7M]
  Branch (478:26): [True: 15.7M, False: 0]
479
            break;
480
        }
481
482
        // sem_clockwait() uses an absolute timeout, there is no need
483
        // to recompute the relative timeout.
484
#ifndef HAVE_SEM_CLOCKWAIT
485
        if (timeout > 0) {
486
            /* wait interrupted by a signal (EINTR): recompute the timeout */
487
            timeout = _PyDeadline_Get(deadline);
488
            if (timeout < 0) {
489
                status = ETIMEDOUT;
490
                break;
491
            }
492
        }
493
#endif
494
    }
495
496
    /* Don't check the status if we're stopping because of an interrupt.  */
497
    if (!(intr_flag && 
status == EINTR43.0k
)) {
  Branch (497:11): [True: 43.0k, False: 15.7M]
  Branch (497:24): [True: 43, False: 43.0k]
498
        if (timeout > 0) {
  Branch (498:13): [True: 13.4k, False: 15.8M]
499
            if (status != ETIMEDOUT) {
  Branch (499:17): [True: 13.1k, False: 357]
500
#ifdef HAVE_SEM_CLOCKWAIT
501
                CHECK_STATUS("sem_clockwait");
502
#else
503
                CHECK_STATUS("sem_timedwait");
504
#endif
505
            }
506
        }
507
        else if (timeout == 0) {
  Branch (507:18): [True: 13.2M, False: 2.60M]
508
            if (status != EAGAIN) {
  Branch (508:17): [True: 12.4M, False: 761k]
509
                CHECK_STATUS("sem_trywait");
510
            }
511
        }
512
        else {
513
            CHECK_STATUS("sem_wait");
514
        }
515
    }
516
517
    if (status == 0) {
  Branch (517:9): [True: 15.0M, False: 761k]
518
        success = PY_LOCK_ACQUIRED;
519
    } else 
if (761k
intr_flag761k
&&
status == EINTR400
) {
  Branch (519:16): [True: 400, False: 761k]
  Branch (519:29): [True: 43, False: 357]
520
        success = PY_LOCK_INTR;
521
    } else {
522
        success = PY_LOCK_FAILURE;
523
    }
524
525
    return success;
526
}
527
528
void
529
PyThread_release_lock(PyThread_type_lock lock)
530
{
531
    sem_t *thelock = (sem_t *)lock;
532
    int status, error = 0;
533
534
    (void) error; /* silence unused-but-set-variable warning */
535
536
    status = sem_post(thelock);
537
    CHECK_STATUS("sem_post");
538
}
539
540
#else /* USE_SEMAPHORES */
541
542
/*
543
 * Lock support.
544
 */
545
PyThread_type_lock
546
PyThread_allocate_lock(void)
547
{
548
    pthread_lock *lock;
549
    int status, error = 0;
550
551
    if (!initialized)
552
        PyThread_init_thread();
553
554
    lock = (pthread_lock *) PyMem_RawCalloc(1, sizeof(pthread_lock));
555
    if (lock) {
556
        lock->locked = 0;
557
558
        status = pthread_mutex_init(&lock->mut, NULL);
559
        CHECK_STATUS_PTHREAD("pthread_mutex_init");
560
        /* Mark the pthread mutex underlying a Python mutex as
561
           pure happens-before.  We can't simply mark the
562
           Python-level mutex as a mutex because it can be
563
           acquired and released in different threads, which
564
           will cause errors. */
565
        _Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(&lock->mut);
566
567
        status = _PyThread_cond_init(&lock->lock_released);
568
        CHECK_STATUS_PTHREAD("pthread_cond_init");
569
570
        if (error) {
571
            PyMem_RawFree((void *)lock);
572
            lock = 0;
573
        }
574
    }
575
576
    return (PyThread_type_lock) lock;
577
}
578
579
void
580
PyThread_free_lock(PyThread_type_lock lock)
581
{
582
    pthread_lock *thelock = (pthread_lock *)lock;
583
    int status, error = 0;
584
585
    (void) error; /* silence unused-but-set-variable warning */
586
587
    /* some pthread-like implementations tie the mutex to the cond
588
     * and must have the cond destroyed first.
589
     */
590
    status = pthread_cond_destroy( &thelock->lock_released );
591
    CHECK_STATUS_PTHREAD("pthread_cond_destroy");
592
593
    status = pthread_mutex_destroy( &thelock->mut );
594
    CHECK_STATUS_PTHREAD("pthread_mutex_destroy");
595
596
    PyMem_RawFree((void *)thelock);
597
}
598
599
PyLockStatus
600
PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
601
                            int intr_flag)
602
{
603
    PyLockStatus success = PY_LOCK_FAILURE;
604
    pthread_lock *thelock = (pthread_lock *)lock;
605
    int status, error = 0;
606
607
    if (microseconds == 0) {
608
        status = pthread_mutex_trylock( &thelock->mut );
609
        if (status != EBUSY) {
610
            CHECK_STATUS_PTHREAD("pthread_mutex_trylock[1]");
611
        }
612
    }
613
    else {
614
        status = pthread_mutex_lock( &thelock->mut );
615
        CHECK_STATUS_PTHREAD("pthread_mutex_lock[1]");
616
    }
617
    if (status != 0) {
618
        goto done;
619
    }
620
621
    if (thelock->locked == 0) {
622
        success = PY_LOCK_ACQUIRED;
623
        goto unlock;
624
    }
625
    if (microseconds == 0) {
626
        goto unlock;
627
    }
628
629
    struct timespec abs_timeout;
630
    if (microseconds > 0) {
631
        _PyThread_cond_after(microseconds, &abs_timeout);
632
    }
633
    // Continue trying until we get the lock
634
635
    // mut must be locked by me -- part of the condition protocol
636
    while (1) {
637
        if (microseconds > 0) {
638
            status = pthread_cond_timedwait(&thelock->lock_released,
639
                                            &thelock->mut, &abs_timeout);
640
            if (status == 1) {
641
                break;
642
            }
643
            if (status == ETIMEDOUT) {
644
                break;
645
            }
646
            CHECK_STATUS_PTHREAD("pthread_cond_timedwait");
647
        }
648
        else {
649
            status = pthread_cond_wait(
650
                &thelock->lock_released,
651
                &thelock->mut);
652
            CHECK_STATUS_PTHREAD("pthread_cond_wait");
653
        }
654
655
        if (intr_flag && status == 0 && thelock->locked) {
656
            // We were woken up, but didn't get the lock.  We probably received
657
            // a signal.  Return PY_LOCK_INTR to allow the caller to handle
658
            // it and retry.
659
            success = PY_LOCK_INTR;
660
            break;
661
        }
662
663
        if (status == 0 && !thelock->locked) {
664
            success = PY_LOCK_ACQUIRED;
665
            break;
666
        }
667
668
        // Wait got interrupted by a signal: retry
669
    }
670
671
unlock:
672
    if (success == PY_LOCK_ACQUIRED) {
673
        thelock->locked = 1;
674
    }
675
    status = pthread_mutex_unlock( &thelock->mut );
676
    CHECK_STATUS_PTHREAD("pthread_mutex_unlock[1]");
677
678
done:
679
    if (error) {
680
        success = PY_LOCK_FAILURE;
681
    }
682
    return success;
683
}
684
685
void
686
PyThread_release_lock(PyThread_type_lock lock)
687
{
688
    pthread_lock *thelock = (pthread_lock *)lock;
689
    int status, error = 0;
690
691
    (void) error; /* silence unused-but-set-variable warning */
692
693
    status = pthread_mutex_lock( &thelock->mut );
694
    CHECK_STATUS_PTHREAD("pthread_mutex_lock[3]");
695
696
    thelock->locked = 0;
697
698
    /* wake up someone (anyone, if any) waiting on the lock */
699
    status = pthread_cond_signal( &thelock->lock_released );
700
    CHECK_STATUS_PTHREAD("pthread_cond_signal");
701
702
    status = pthread_mutex_unlock( &thelock->mut );
703
    CHECK_STATUS_PTHREAD("pthread_mutex_unlock[3]");
704
}
705
706
#endif /* USE_SEMAPHORES */
707
708
int
709
_PyThread_at_fork_reinit(PyThread_type_lock *lock)
710
{
711
    PyThread_type_lock new_lock = PyThread_allocate_lock();
712
    if (new_lock == NULL) {
  Branch (712:9): [True: 0, False: 5]
713
        return -1;
714
    }
715
716
    /* bpo-6721, bpo-40089: The old lock can be in an inconsistent state.
717
       fork() can be called in the middle of an operation on the lock done by
718
       another thread. So don't call PyThread_free_lock(*lock).
719
720
       Leak memory on purpose. Don't release the memory either since the
721
       address of a mutex is relevant. Putting two mutexes at the same address
722
       can lead to problems. */
723
724
    *lock = new_lock;
725
    return 0;
726
}
727
728
int
729
PyThread_acquire_lock(PyThread_type_lock lock, int waitflag)
730
{
731
    return PyThread_acquire_lock_timed(lock, waitflag ? 
-12.57M
:
07.80M
, /*intr_flag=*/0);
  Branch (731:46): [True: 2.57M, False: 7.80M]
732
}
733
734
/* set the thread stack size.
735
 * Return 0 if size is valid, -1 if size is invalid,
736
 * -2 if setting stack size is not supported.
737
 */
738
static int
739
_pythread_pthread_set_stacksize(size_t size)
740
{
741
#if defined(THREAD_STACK_SIZE)
742
    pthread_attr_t attrs;
743
    size_t tss_min;
744
    int rc = 0;
745
#endif
746
747
    /* set to default */
748
    if (size == 0) {
  Branch (748:9): [True: 10, False: 5]
749
        _PyInterpreterState_GET()->threads.stacksize = 0;
750
        return 0;
751
    }
752
753
#if defined(THREAD_STACK_SIZE)
754
#if defined(PTHREAD_STACK_MIN)
755
    tss_min = PTHREAD_STACK_MIN > THREAD_STACK_MIN ? PTHREAD_STACK_MIN
  Branch (755:15): [Folded - Ignored]
756
                                                   : THREAD_STACK_MIN;
757
#else
758
    tss_min = THREAD_STACK_MIN;
759
#endif
760
    if (size >= tss_min) {
  Branch (760:9): [True: 4, False: 1]
761
        /* validate stack size by setting thread attribute */
762
        if (pthread_attr_init(&attrs) == 0) {
  Branch (762:13): [True: 4, False: 0]
763
            rc = pthread_attr_setstacksize(&attrs, size);
764
            pthread_attr_destroy(&attrs);
765
            if (rc == 0) {
  Branch (765:17): [True: 4, False: 0]
766
                _PyInterpreterState_GET()->threads.stacksize = size;
767
                return 0;
768
            }
769
        }
770
    }
771
    return -1;
772
#else
773
    return -2;
774
#endif
775
}
776
777
#define THREAD_SET_STACKSIZE(x) _pythread_pthread_set_stacksize(x)
778
779
780
/* Thread Local Storage (TLS) API
781
782
   This API is DEPRECATED since Python 3.7.  See PEP 539 for details.
783
*/
784
785
/* Issue #25658: On platforms where native TLS key is defined in a way that
786
   cannot be safely cast to int, PyThread_create_key returns immediately a
787
   failure status and other TLS functions all are no-ops.  This indicates
788
   clearly that the old API is not supported on platforms where it cannot be
789
   used reliably, and that no effort will be made to add such support.
790
791
   Note: PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT will be unnecessary after
792
   removing this API.
793
*/
794
795
int
796
PyThread_create_key(void)
797
{
798
#ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
799
    pthread_key_t key;
800
    int fail = pthread_key_create(&key, NULL);
801
    if (fail)
  Branch (801:9): [True: 0, False: 0]
802
        return -1;
803
    if (key > INT_MAX) {
  Branch (803:9): [True: 0, False: 0]
804
        /* Issue #22206: handle integer overflow */
805
        pthread_key_delete(key);
806
        errno = ENOMEM;
807
        return -1;
808
    }
809
    return (int)key;
810
#else
811
    return -1;  /* never return valid key value. */
812
#endif
813
}
814
815
void
816
PyThread_delete_key(int key)
817
{
818
#ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
819
    pthread_key_delete(key);
820
#endif
821
}
822
823
void
824
PyThread_delete_key_value(int key)
825
{
826
#ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
827
    pthread_setspecific(key, NULL);
828
#endif
829
}
830
831
int
832
PyThread_set_key_value(int key, void *value)
833
{
834
#ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
835
    int fail = pthread_setspecific(key, value);
836
    return fail ? -1 : 0;
  Branch (836:12): [True: 0, False: 0]
837
#else
838
    return -1;
839
#endif
840
}
841
842
void *
843
PyThread_get_key_value(int key)
844
{
845
#ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
846
    return pthread_getspecific(key);
847
#else
848
    return NULL;
849
#endif
850
}
851
852
853
void
854
PyThread_ReInitTLS(void)
855
{
856
}
857
858
859
/* Thread Specific Storage (TSS) API
860
861
   Platform-specific components of TSS API implementation.
862
*/
863
864
int
865
PyThread_tss_create(Py_tss_t *key)
866
{
867
    assert(key != NULL);
868
    /* If the key has been created, function is silently skipped. */
869
    if (key->_is_initialized) {
  Branch (869:9): [True: 1, False: 112]
870
        return 0;
871
    }
872
873
    int fail = pthread_key_create(&(key->_key), NULL);
874
    if (fail) {
  Branch (874:9): [True: 0, False: 112]
875
        return -1;
876
    }
877
    key->_is_initialized = 1;
878
    return 0;
879
}
880
881
void
882
PyThread_tss_delete(Py_tss_t *key)
883
{
884
    assert(key != NULL);
885
    /* If the key has not been created, function is silently skipped. */
886
    if (!key->_is_initialized) {
  Branch (886:9): [True: 1, False: 108]
887
        return;
888
    }
889
890
    pthread_key_delete(key->_key);
891
    /* pthread has not provided the defined invalid value for the key. */
892
    key->_is_initialized = 0;
893
}
894
895
int
896
PyThread_tss_set(Py_tss_t *key, void *value)
897
{
898
    assert(key != NULL);
899
    int fail = pthread_setspecific(key->_key, value);
900
    return fail ? 
-10
: 0;
  Branch (900:12): [True: 0, False: 2.03M]
901
}
902
903
void *
904
PyThread_tss_get(Py_tss_t *key)
905
{
906
    assert(key != NULL);
907
    return pthread_getspecific(key->_key);
908
}