Lines 633-639
mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
Link Here
|
633 |
{ |
633 |
{ |
634 |
struct pthread *curthread = _get_curthread(); |
634 |
struct pthread *curthread = _get_curthread(); |
635 |
uint32_t id; |
635 |
uint32_t id; |
636 |
int defered; |
636 |
int defered, error; |
637 |
|
637 |
|
638 |
if (__predict_false(m <= THR_MUTEX_DESTROYED)) { |
638 |
if (__predict_false(m <= THR_MUTEX_DESTROYED)) { |
639 |
if (m == THR_MUTEX_DESTROYED) |
639 |
if (m == THR_MUTEX_DESTROYED) |
Lines 647-652
mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
Link Here
|
647 |
if (__predict_false(m->m_owner != curthread)) |
647 |
if (__predict_false(m->m_owner != curthread)) |
648 |
return (EPERM); |
648 |
return (EPERM); |
649 |
|
649 |
|
|
|
650 |
error = 0; |
650 |
id = TID(curthread); |
651 |
id = TID(curthread); |
651 |
if (__predict_false( |
652 |
if (__predict_false( |
652 |
PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE && |
653 |
PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE && |
Lines 660-666
mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
Link Here
|
660 |
defered = 0; |
661 |
defered = 0; |
661 |
|
662 |
|
662 |
DEQUEUE_MUTEX(curthread, m); |
663 |
DEQUEUE_MUTEX(curthread, m); |
663 |
_thr_umutex_unlock2(&m->m_lock, id, mtx_defer); |
664 |
error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer); |
664 |
|
665 |
|
665 |
if (mtx_defer == NULL && defered) { |
666 |
if (mtx_defer == NULL && defered) { |
666 |
_thr_wake_all(curthread->defer_waiters, |
667 |
_thr_wake_all(curthread->defer_waiters, |
Lines 670-676
mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
Link Here
|
670 |
} |
671 |
} |
671 |
if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE) |
672 |
if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE) |
672 |
THR_CRITICAL_LEAVE(curthread); |
673 |
THR_CRITICAL_LEAVE(curthread); |
673 |
return (0); |
674 |
return (error); |
674 |
} |
675 |
} |
675 |
|
676 |
|
676 |
int |
677 |
int |