diff --git a/Include/internal/pycore_critical_section.h b/Include/internal/pycore_critical_section.h index 78cd0d54972..9ba2fce56d3 100644 --- a/Include/internal/pycore_critical_section.h +++ b/Include/internal/pycore_critical_section.h @@ -109,7 +109,7 @@ _PyCriticalSection_IsActive(uintptr_t tag) static inline void _PyCriticalSection_BeginMutex(PyCriticalSection *c, PyMutex *m) { - if (PyMutex_LockFast(&m->_bits)) { + if (PyMutex_LockFast(m)) { PyThreadState *tstate = _PyThreadState_GET(); c->_cs_mutex = m; c->_cs_prev = tstate->critical_section; @@ -170,8 +170,8 @@ _PyCriticalSection2_BeginMutex(PyCriticalSection2 *c, PyMutex *m1, PyMutex *m2) m2 = tmp; } - if (PyMutex_LockFast(&m1->_bits)) { - if (PyMutex_LockFast(&m2->_bits)) { + if (PyMutex_LockFast(m1)) { + if (PyMutex_LockFast(m2)) { PyThreadState *tstate = _PyThreadState_GET(); c->_cs_base._cs_mutex = m1; c->_cs_mutex2 = m2; diff --git a/Include/internal/pycore_lock.h b/Include/internal/pycore_lock.h index 57cbce8f126..8bcb23a6ce9 100644 --- a/Include/internal/pycore_lock.h +++ b/Include/internal/pycore_lock.h @@ -18,9 +18,10 @@ extern "C" { #define _Py_ONCE_INITIALIZED 4 static inline int -PyMutex_LockFast(uint8_t *lock_bits) +PyMutex_LockFast(PyMutex *m) { uint8_t expected = _Py_UNLOCKED; + uint8_t *lock_bits = &m->_bits; return _Py_atomic_compare_exchange_uint8(lock_bits, &expected, _Py_LOCKED); } diff --git a/Python/ceval_macros.h b/Python/ceval_macros.h index 9250b86e42c..398816d5f36 100644 --- a/Python/ceval_macros.h +++ b/Python/ceval_macros.h @@ -300,7 +300,7 @@ GETITEM(PyObject *v, Py_ssize_t i) { // avoid any potentially escaping calls (like PyStackRef_CLOSE) while the // object is locked. #ifdef Py_GIL_DISABLED -# define LOCK_OBJECT(op) PyMutex_LockFast(&(_PyObject_CAST(op))->ob_mutex._bits) +# define LOCK_OBJECT(op) PyMutex_LockFast(&(_PyObject_CAST(op))->ob_mutex) # define UNLOCK_OBJECT(op) PyMutex_Unlock(&(_PyObject_CAST(op))->ob_mutex) #else # define LOCK_OBJECT(op) (1)