View | Details | Raw Unified | Return to bug 228694
Collapse All | Expand All

(-)b/sys/amd64/amd64/pmap.c (-1 / +1 lines)
Lines 513-519 pmap_delayed_invl_finished(void) Link Here
513
		pmap_invl_gen = invl_gen->gen;
513
		pmap_invl_gen = invl_gen->gen;
514
		if (ts != NULL) {
514
		if (ts != NULL) {
515
			turnstile_broadcast(ts, TS_SHARED_QUEUE);
515
			turnstile_broadcast(ts, TS_SHARED_QUEUE);
516
			turnstile_unpend(ts, TS_SHARED_LOCK);
516
			turnstile_unpend(ts);
517
		}
517
		}
518
		turnstile_chain_unlock(&invl_gen_ts);
518
		turnstile_chain_unlock(&invl_gen_ts);
519
	} else {
519
	} else {
(-)b/sys/kern/kern_mutex.c (-1 / +1 lines)
Lines 1029-1035 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v) Link Here
1029
	 * This turnstile is now no longer associated with the mutex.  We can
1029
	 * This turnstile is now no longer associated with the mutex.  We can
1030
	 * unlock the chain lock so a new turnstile may take it's place.
1030
	 * unlock the chain lock so a new turnstile may take it's place.
1031
	 */
1031
	 */
1032
	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1032
	turnstile_unpend(ts);
1033
	turnstile_chain_unlock(&m->lock_object);
1033
	turnstile_chain_unlock(&m->lock_object);
1034
}
1034
}
1035
1035
(-)b/sys/kern/kern_rmlock.c (-1 / +1 lines)
Lines 494-500 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker) Link Here
494
		ts = turnstile_lookup(&rm->lock_object);
494
		ts = turnstile_lookup(&rm->lock_object);
495
495
496
		turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
496
		turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
497
		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
497
		turnstile_unpend(ts);
498
		turnstile_chain_unlock(&rm->lock_object);
498
		turnstile_chain_unlock(&rm->lock_object);
499
	} else
499
	} else
500
		mtx_unlock_spin(&rm_spinlock);
500
		mtx_unlock_spin(&rm_spinlock);
(-)b/sys/kern/kern_rwlock.c (-3 / +3 lines)
Lines 822-828 __rw_runlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v Link Here
822
		ts = turnstile_lookup(&rw->lock_object);
822
		ts = turnstile_lookup(&rw->lock_object);
823
		MPASS(ts != NULL);
823
		MPASS(ts != NULL);
824
		turnstile_broadcast(ts, queue);
824
		turnstile_broadcast(ts, queue);
825
		turnstile_unpend(ts, TS_SHARED_LOCK);
825
		turnstile_unpend(ts);
826
		td->td_rw_rlocks--;
826
		td->td_rw_rlocks--;
827
		break;
827
		break;
828
	}
828
	}
Lines 1259-1265 __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF) Link Here
1259
	ts = turnstile_lookup(&rw->lock_object);
1259
	ts = turnstile_lookup(&rw->lock_object);
1260
	MPASS(ts != NULL);
1260
	MPASS(ts != NULL);
1261
	turnstile_broadcast(ts, queue);
1261
	turnstile_broadcast(ts, queue);
1262
	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1262
	turnstile_unpend(ts);
1263
	turnstile_chain_unlock(&rw->lock_object);
1263
	turnstile_chain_unlock(&rw->lock_object);
1264
}
1264
}
1265
1265
Lines 1405-1411 __rw_downgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) Link Here
1405
	 */
1405
	 */
1406
	if (rwait && !wwait) {
1406
	if (rwait && !wwait) {
1407
		turnstile_broadcast(ts, TS_SHARED_QUEUE);
1407
		turnstile_broadcast(ts, TS_SHARED_QUEUE);
1408
		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1408
		turnstile_unpend(ts);
1409
	} else
1409
	} else
1410
		turnstile_disown(ts);
1410
		turnstile_disown(ts);
1411
	turnstile_chain_unlock(&rw->lock_object);
1411
	turnstile_chain_unlock(&rw->lock_object);
(-)b/sys/kern/subr_turnstile.c (-1 / +1 lines)
Lines 903-909 turnstile_broadcast(struct turnstile *ts, int queue) Link Here
903
 * chain locked.
903
 * chain locked.
904
 */
904
 */
905
void
905
void
906
turnstile_unpend(struct turnstile *ts, int owner_type)
906
turnstile_unpend(struct turnstile *ts)
907
{
907
{
908
	TAILQ_HEAD( ,thread) pending_threads;
908
	TAILQ_HEAD( ,thread) pending_threads;
909
	struct turnstile *nts;
909
	struct turnstile *nts;
(-)b/sys/sys/turnstile.h (-5 / +1 lines)
Lines 83-92 struct turnstile; Link Here
83
#define	TS_EXCLUSIVE_QUEUE	0
83
#define	TS_EXCLUSIVE_QUEUE	0
84
#define	TS_SHARED_QUEUE		1
84
#define	TS_SHARED_QUEUE		1
85
85
86
/* The type of lock currently held. */
87
#define	TS_EXCLUSIVE_LOCK	TS_EXCLUSIVE_QUEUE
88
#define	TS_SHARED_LOCK		TS_SHARED_QUEUE
89
90
void	init_turnstiles(void);
86
void	init_turnstiles(void);
91
void	turnstile_adjust(struct thread *, u_char);
87
void	turnstile_adjust(struct thread *, u_char);
92
struct turnstile *turnstile_alloc(void);
88
struct turnstile *turnstile_alloc(void);
Lines 102-108 struct thread *turnstile_head(struct turnstile *, int); Link Here
102
struct turnstile *turnstile_lookup(struct lock_object *);
98
struct turnstile *turnstile_lookup(struct lock_object *);
103
int	turnstile_signal(struct turnstile *, int);
99
int	turnstile_signal(struct turnstile *, int);
104
struct turnstile *turnstile_trywait(struct lock_object *);
100
struct turnstile *turnstile_trywait(struct lock_object *);
105
void	turnstile_unpend(struct turnstile *, int);
101
void	turnstile_unpend(struct turnstile *);
106
void	turnstile_wait(struct turnstile *, struct thread *, int);
102
void	turnstile_wait(struct turnstile *, struct thread *, int);
107
struct thread *turnstile_lock(struct turnstile *, struct lock_object **);
103
struct thread *turnstile_lock(struct turnstile *, struct lock_object **);
108
void	turnstile_unlock(struct turnstile *, struct lock_object *);
104
void	turnstile_unlock(struct turnstile *, struct lock_object *);

Return to bug 228694