View | Details | Raw Unified | Return to bug 204340 | Differences between
and this patch

Collapse All | Expand All

(-)rpc/svc.h (+6 lines)
Lines 729-734 extern SVCPOOL* svcpool_create(const cha Link Here
729
extern void svcpool_destroy(SVCPOOL *pool);
729
extern void svcpool_destroy(SVCPOOL *pool);
730
730
731
/*
731
/*
732
 * Close a service pool.  Similar to svcpool_destroy(), but it does not
733
 * free the data structures.  As such, the pool can be used again.
734
 */
735
extern void svcpool_close(SVCPOOL *pool);
736
737
/*
732
 * Transport independent svc_create routine.
738
 * Transport independent svc_create routine.
733
 */
739
 */
734
extern int svc_create(SVCPOOL *, void (*)(struct svc_req *, SVCXPRT *),
740
extern int svc_create(SVCPOOL *, void (*)(struct svc_req *, SVCXPRT *),
(-)rpc/svc.c (+52 lines)
Lines 226-231 svcpool_destroy(SVCPOOL *pool) Link Here
226
}
226
}
227
227
228
/*
228
/*
229
 * Similar to svcpool_destroy(), except that it does not destroy the actual
230
 * data structures.  As such, "pool" may be used again.
231
 */
232
void
233
svcpool_close(SVCPOOL *pool)
234
{
235
	SVCGROUP *grp;
236
	SVCXPRT *xprt, *nxprt;
237
	struct svc_callout *s;
238
	struct svc_loss_callout *sl;
239
	struct svcxprt_list cleanup;
240
	int g;
241
242
	TAILQ_INIT(&cleanup);
243
244
	for (g = 0; g < SVC_MAXGROUPS; g++) {
245
		grp = &pool->sp_groups[g];
246
		mtx_lock(&grp->sg_lock);
247
		while ((xprt = TAILQ_FIRST(&grp->sg_xlist)) != NULL) {
248
			xprt_unregister_locked(xprt);
249
			TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
250
		}
251
		mtx_unlock(&grp->sg_lock);
252
	}
253
	TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
254
		SVC_RELEASE(xprt);
255
	}
256
257
	mtx_lock(&pool->sp_lock);
258
	while ((s = TAILQ_FIRST(&pool->sp_callouts)) != NULL) {
259
		mtx_unlock(&pool->sp_lock);
260
		svc_unreg(pool, s->sc_prog, s->sc_vers);
261
		mtx_lock(&pool->sp_lock);
262
	}
263
	while ((sl = TAILQ_FIRST(&pool->sp_lcallouts)) != NULL) {
264
		mtx_unlock(&pool->sp_lock);
265
		svc_loss_unreg(pool, sl->slc_dispatch);
266
		mtx_lock(&pool->sp_lock);
267
	}
268
269
	/* Now, initialize the pool's state for a fresh svc_run() call. */
270
	pool->sp_state = SVCPOOL_INIT;
271
	mtx_unlock(&pool->sp_lock);
272
	for (g = 0; g < SVC_MAXGROUPS; g++) {
273
		grp = &pool->sp_groups[g];
274
		mtx_lock(&grp->sg_lock);
275
		grp->sg_state = SVCPOOL_ACTIVE;
276
		mtx_unlock(&grp->sg_lock);
277
	}
278
}
279
280
/*
229
 * Sysctl handler to get the present thread count on a pool
281
 * Sysctl handler to get the present thread count on a pool
230
 */
282
 */
231
static int
283
static int
(-)fs/nfsserver/nfs_nfsdkrpc.c (-11 / +9 lines)
Lines 551-568 nfsrvd_init(int terminating) Link Here
551
		nfsd_master_proc = NULL;
551
		nfsd_master_proc = NULL;
552
		NFSD_UNLOCK();
552
		NFSD_UNLOCK();
553
		nfsrv_freeallbackchannel_xprts();
553
		nfsrv_freeallbackchannel_xprts();
554
		svcpool_destroy(nfsrvd_pool);
554
		svcpool_close(nfsrvd_pool);
555
		nfsrvd_pool = NULL;
555
		NFSD_LOCK();
556
	} else {
557
		NFSD_UNLOCK();
558
		nfsrvd_pool = svcpool_create("nfsd",
559
		    SYSCTL_STATIC_CHILDREN(_vfs_nfsd));
560
		nfsrvd_pool->sp_rcache = NULL;
561
		nfsrvd_pool->sp_assign = fhanew_assign;
562
		nfsrvd_pool->sp_done = fha_nd_complete;
556
		NFSD_LOCK();
563
		NFSD_LOCK();
557
	}
564
	}
558
559
	NFSD_UNLOCK();
560
561
	nfsrvd_pool = svcpool_create("nfsd", SYSCTL_STATIC_CHILDREN(_vfs_nfsd));
562
	nfsrvd_pool->sp_rcache = NULL;
563
	nfsrvd_pool->sp_assign = fhanew_assign;
564
	nfsrvd_pool->sp_done = fha_nd_complete;
565
566
	NFSD_LOCK();
567
}
565
}
568
566

Return to bug 204340