View | Details | Raw Unified | Return to bug 219645 | Differences between
and this patch

Collapse All | Expand All

(-)fs/nfs/nfsport.h (-1 / +1 lines)
Lines 1028-1034 struct nfsreq { Link Here
1028
};
1028
};
1029
1029
1030
#ifndef NFS_MAXBSIZE
1030
#ifndef NFS_MAXBSIZE
1031
#define	NFS_MAXBSIZE	MAXBCACHEBUF
1031
#define	NFS_MAXBSIZE	(maxbcachebuf)
1032
#endif
1032
#endif
1033
1033
1034
/*
1034
/*
(-)fs/nfs/nfsproto.h (-1 / +15 lines)
Lines 56-63 Link Here
56
#define	NFS_MAXDGRAMDATA 16384
56
#define	NFS_MAXDGRAMDATA 16384
57
#define	NFS_MAXPATHLEN	1024
57
#define	NFS_MAXPATHLEN	1024
58
#define	NFS_MAXNAMLEN	255
58
#define	NFS_MAXNAMLEN	255
59
/*
60
 * Calculating the maximum XDR overhead for an NFS RPC isn't easy.
61
 * NFS_MAXPKTHDR is antiquated and assumes AUTH_SYS over UDP.
62
 * NFS_MAXXDR should be sufficient for all NFS versions over TCP.
63
 * It includes:
64
 * - Maximum RPC message header. It can include 2 400byte authenticators plus
65
 *   a machine name of unlimited length, although it is usually relatively
66
 *   small.
67
 * - XDR overheads for the NFSv4 compound. This can include Owner and
68
 *   Owner_group strings, which are usually fairly small, but are allowed
69
 *   to be up to 1024 bytes each.
70
 * 4096 is overkill, but should always be sufficient.
71
 */
59
#define	NFS_MAXPKTHDR	404
72
#define	NFS_MAXPKTHDR	404
60
#define	NFS_MAXPACKET	(NFS_SRVMAXIO + 2048)
73
#define	NFS_MAXXDR	4096
74
#define	NFS_MAXPACKET	(NFS_SRVMAXIO + NFS_MAXXDR)
61
#define	NFS_MINPACKET	20
75
#define	NFS_MINPACKET	20
62
#define	NFS_FABLKSIZE	512	/* Size in bytes of a block wrt fa_blocks */
76
#define	NFS_FABLKSIZE	512	/* Size in bytes of a block wrt fa_blocks */
63
#define	NFSV4_MINORVERSION	0	/* V4 Minor version */
77
#define	NFSV4_MINORVERSION	0	/* V4 Minor version */
(-)fs/nfs/nfs_commonkrpc.c (-4 / +14 lines)
Lines 96-101 extern int nfscl_ticks; Link Here
96
extern void (*ncl_call_invalcaches)(struct vnode *);
96
extern void (*ncl_call_invalcaches)(struct vnode *);
97
extern int nfs_numnfscbd;
97
extern int nfs_numnfscbd;
98
extern int nfscl_debuglevel;
98
extern int nfscl_debuglevel;
99
extern int maxbcachebuf;
99
100
100
SVCPOOL		*nfscbd_pool;
101
SVCPOOL		*nfscbd_pool;
101
static int	nfsrv_gsscallbackson = 0;
102
static int	nfsrv_gsscallbackson = 0;
Lines 161-167 newnfs_connect(struct nfsmount *nmp, str Link Here
161
    struct ucred *cred, NFSPROC_T *p, int callback_retry_mult)
162
    struct ucred *cred, NFSPROC_T *p, int callback_retry_mult)
162
{
163
{
163
	int rcvreserve, sndreserve;
164
	int rcvreserve, sndreserve;
164
	int pktscale;
165
	int pktscale, pktscalesav;
165
	struct sockaddr *saddr;
166
	struct sockaddr *saddr;
166
	struct ucred *origcred;
167
	struct ucred *origcred;
167
	CLIENT *client;
168
	CLIENT *client;
Lines 210-215 newnfs_connect(struct nfsmount *nmp, str Link Here
210
		pktscale = 2;
211
		pktscale = 2;
211
	if (pktscale > 64)
212
	if (pktscale > 64)
212
		pktscale = 64;
213
		pktscale = 64;
214
	pktscalesav = pktscale;
213
	/*
215
	/*
214
	 * soreserve() can fail if sb_max is too small, so shrink pktscale
216
	 * soreserve() can fail if sb_max is too small, so shrink pktscale
215
	 * and try again if there is an error.
217
	 * and try again if there is an error.
Lines 228-235 newnfs_connect(struct nfsmount *nmp, str Link Here
228
		goto out;
230
		goto out;
229
	}
231
	}
230
	do {
232
	do {
231
	    if (error != 0 && pktscale > 2)
233
	    if (error != 0 && pktscale > 2) {
234
		if (nmp != NULL && nrp->nr_sotype == SOCK_STREAM &&
235
		    pktscale == pktscalesav)
236
		    printf("Consider increasing kern.ipc.maxsockbuf\n");
232
		pktscale--;
237
		pktscale--;
238
	    }
233
	    if (nrp->nr_sotype == SOCK_DGRAM) {
239
	    if (nrp->nr_sotype == SOCK_DGRAM) {
234
		if (nmp != NULL) {
240
		if (nmp != NULL) {
235
			sndreserve = (NFS_MAXDGRAMDATA + NFS_MAXPKTHDR) *
241
			sndreserve = (NFS_MAXDGRAMDATA + NFS_MAXPKTHDR) *
Lines 243-257 newnfs_connect(struct nfsmount *nmp, str Link Here
243
		if (nrp->nr_sotype != SOCK_STREAM)
249
		if (nrp->nr_sotype != SOCK_STREAM)
244
			panic("nfscon sotype");
250
			panic("nfscon sotype");
245
		if (nmp != NULL) {
251
		if (nmp != NULL) {
246
			sndreserve = (NFS_MAXBSIZE + NFS_MAXPKTHDR +
252
			sndreserve = (NFS_MAXBSIZE + NFS_MAXXDR +
247
			    sizeof (u_int32_t)) * pktscale;
253
			    sizeof (u_int32_t)) * pktscale;
248
			rcvreserve = (NFS_MAXBSIZE + NFS_MAXPKTHDR +
254
			rcvreserve = (NFS_MAXBSIZE + NFS_MAXXDR +
249
			    sizeof (u_int32_t)) * pktscale;
255
			    sizeof (u_int32_t)) * pktscale;
250
		} else {
256
		} else {
251
			sndreserve = rcvreserve = 1024 * pktscale;
257
			sndreserve = rcvreserve = 1024 * pktscale;
252
		}
258
		}
253
	    }
259
	    }
254
	    error = soreserve(so, sndreserve, rcvreserve);
260
	    error = soreserve(so, sndreserve, rcvreserve);
261
	    if (error != 0 && nmp != NULL && nrp->nr_sotype == SOCK_STREAM &&
262
		pktscale <= 2)
263
		printf("Must increase kern.ipc.maxsockbuf or reduce"
264
		    " rsize, wsize\n");
255
	} while (error != 0 && pktscale > 2);
265
	} while (error != 0 && pktscale > 2);
256
	soclose(so);
266
	soclose(so);
257
	if (error) {
267
	if (error) {
(-)fs/nfsclient/nfs_clrpcops.c (-4 / +23 lines)
Lines 4703-4709 nfsrpc_createsession(struct nfsmount *nm Link Here
4703
    struct nfssockreq *nrp, uint32_t sequenceid, int mds, struct ucred *cred,
4703
    struct nfssockreq *nrp, uint32_t sequenceid, int mds, struct ucred *cred,
4704
    NFSPROC_T *p)
4704
    NFSPROC_T *p)
4705
{
4705
{
4706
	uint32_t crflags, *tl;
4706
	uint32_t crflags, maxval, *tl;
4707
	struct nfsrv_descript nfsd;
4707
	struct nfsrv_descript nfsd;
4708
	struct nfsrv_descript *nd = &nfsd;
4708
	struct nfsrv_descript *nd = &nfsd;
4709
	int error, irdcnt;
4709
	int error, irdcnt;
Lines 4721-4728 nfsrpc_createsession(struct nfsmount *nm Link Here
4721
	/* Fill in fore channel attributes. */
4721
	/* Fill in fore channel attributes. */
4722
	NFSM_BUILD(tl, uint32_t *, 7 * NFSX_UNSIGNED);
4722
	NFSM_BUILD(tl, uint32_t *, 7 * NFSX_UNSIGNED);
4723
	*tl++ = 0;				/* Header pad size */
4723
	*tl++ = 0;				/* Header pad size */
4724
	*tl++ = txdr_unsigned(100000);		/* Max request size */
4724
	*tl++ = txdr_unsigned(nmp->nm_wsize + NFS_MAXXDR);/* Max request size */
4725
	*tl++ = txdr_unsigned(100000);		/* Max response size */
4725
	*tl++ = txdr_unsigned(nmp->nm_rsize + NFS_MAXXDR);/* Max reply size */
4726
	*tl++ = txdr_unsigned(4096);		/* Max response size cached */
4726
	*tl++ = txdr_unsigned(4096);		/* Max response size cached */
4727
	*tl++ = txdr_unsigned(20);		/* Max operations */
4727
	*tl++ = txdr_unsigned(20);		/* Max operations */
4728
	*tl++ = txdr_unsigned(64);		/* Max slots */
4728
	*tl++ = txdr_unsigned(64);		/* Max slots */
Lines 4769-4775 nfsrpc_createsession(struct nfsmount *nm Link Here
4769
4769
4770
		/* Get the fore channel slot count. */
4770
		/* Get the fore channel slot count. */
4771
		NFSM_DISSECT(tl, uint32_t *, 7 * NFSX_UNSIGNED);
4771
		NFSM_DISSECT(tl, uint32_t *, 7 * NFSX_UNSIGNED);
4772
		tl += 3;		/* Skip the other counts. */		
4772
		tl++;			/* Skip the header pad size. */
4773
4774
		/* Make sure nm_wsize is small enough. */
4775
		maxval = fxdr_unsigned(uint32_t, *tl++);
4776
		while (maxval < nmp->nm_wsize + NFS_MAXXDR) {
4777
			if (nmp->nm_wsize > 8096)
4778
				nmp->nm_wsize /= 2;
4779
			else
4780
				break;
4781
		}
4782
4783
		/* Make sure nm_rsize is small enough. */
4784
		maxval = fxdr_unsigned(uint32_t, *tl++);
4785
		while (maxval < nmp->nm_rsize + NFS_MAXXDR) {
4786
			if (nmp->nm_rsize > 8096)
4787
				nmp->nm_rsize /= 2;
4788
			else
4789
				break;
4790
		}
4791
4773
		sep->nfsess_maxcache = fxdr_unsigned(int, *tl++);
4792
		sep->nfsess_maxcache = fxdr_unsigned(int, *tl++);
4774
		tl++;
4793
		tl++;
4775
		sep->nfsess_foreslots = fxdr_unsigned(uint16_t, *tl++);
4794
		sep->nfsess_foreslots = fxdr_unsigned(uint16_t, *tl++);
(-)fs/nfsclient/nfs_clvfsops.c (+1 lines)
Lines 83-88 extern int nfscl_debuglevel; Link Here
83
extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON];
83
extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON];
84
extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON];
84
extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON];
85
extern struct mtx ncl_iod_mutex;
85
extern struct mtx ncl_iod_mutex;
86
extern int maxbcachebuf;
86
NFSCLSTATEMUTEX;
87
NFSCLSTATEMUTEX;
87
88
88
MALLOC_DEFINE(M_NEWNFSREQ, "newnfsclient_req", "NFS request header");
89
MALLOC_DEFINE(M_NEWNFSREQ, "newnfsclient_req", "NFS request header");
(-)kern/vfs_bio.c (-20 / +52 lines)
Lines 131-136 static void bufkva_reclaim(vmem_t *, int Link Here
131
static void bufkva_free(struct buf *);
131
static void bufkva_free(struct buf *);
132
static int buf_import(void *, void **, int, int);
132
static int buf_import(void *, void **, int, int);
133
static void buf_release(void *, void **, int);
133
static void buf_release(void *, void **, int);
134
static void maxbcachebuf_adjust(void);
134
135
135
#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
136
#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
136
    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
137
    defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
Lines 245-250 SYSCTL_LONG(_vfs, OID_AUTO, barrierwrite Link Here
245
SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD,
246
SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD,
246
    &unmapped_buf_allowed, 0,
247
    &unmapped_buf_allowed, 0,
247
    "Permit the use of the unmapped i/o");
248
    "Permit the use of the unmapped i/o");
249
static int bkvasize = BKVASIZE;
250
int maxbcachebuf = MAXBCACHEBUF;
251
SYSCTL_INT(_vfs, OID_AUTO, maxbcachebuf, CTLFLAG_RDTUN, &maxbcachebuf, 0,
252
    "Maximum size of a buffer cache block");
248
253
249
/*
254
/*
250
 * This lock synchronizes access to bd_request.
255
 * This lock synchronizes access to bd_request.
Lines 847-852 bd_wakeup(void) Link Here
847
}
852
}
848
853
849
/*
854
/*
855
 * Adjust the maxbcachbuf and bkvasize tunables.
856
 */
857
static void
858
maxbcachebuf_adjust(void)
859
{
860
	int i;
861
862
	/*
863
	 * maxbcachebuf must be a power of 2 >= MAXBSIZE.
864
	 * If it has been tuned, set bkvasize to maxbcachebuf / 2.
865
	 */
866
	i = 2;
867
	while (i * 2 <= maxbcachebuf)
868
		i *= 2;
869
	maxbcachebuf = i;
870
	if (maxbcachebuf < MAXBSIZE)
871
		maxbcachebuf = MAXBSIZE;
872
	if (maxbcachebuf != MAXBCACHEBUF && maxbcachebuf > MAXBSIZE) {
873
		bkvasize = maxbcachebuf / 2;
874
		if (bkvasize < BKVASIZE)
875
			bkvasize = BKVASIZE;
876
	}
877
	if (maxbcachebuf != MAXBCACHEBUF || bkvasize != BKVASIZE)
878
		printf("maxbcachebuf=%d bkvasize=%d\n", maxbcachebuf, bkvasize);
879
}
880
881
/*
850
 * bd_speedup - speedup the buffer cache flushing code
882
 * bd_speedup - speedup the buffer cache flushing code
851
 */
883
 */
852
void
884
void
Lines 893-900 kern_vfs_bio_buffer_alloc(caddr_t v, lon Link Here
893
	 */
925
	 */
894
	physmem_est = physmem_est * (PAGE_SIZE / 1024);
926
	physmem_est = physmem_est * (PAGE_SIZE / 1024);
895
927
928
	maxbcachebuf_adjust();
896
	/*
929
	/*
897
	 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
930
	 * The nominal buffer size (and minimum KVA allocation) is bkvasize.
898
	 * For the first 64MB of ram nominally allocate sufficient buffers to
931
	 * For the first 64MB of ram nominally allocate sufficient buffers to
899
	 * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
932
	 * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
900
	 * buffers to cover 1/10 of our ram over 64MB.  When auto-sizing
933
	 * buffers to cover 1/10 of our ram over 64MB.  When auto-sizing
Lines 904-910 kern_vfs_bio_buffer_alloc(caddr_t v, lon Link Here
904
	 * factor represents the 1/4 x ram conversion.
937
	 * factor represents the 1/4 x ram conversion.
905
	 */
938
	 */
906
	if (nbuf == 0) {
939
	if (nbuf == 0) {
907
		int factor = 4 * BKVASIZE / 1024;
940
		int factor = 4 * bkvasize / 1024;
908
941
909
		nbuf = 50;
942
		nbuf = 50;
910
		if (physmem_est > 4096)
943
		if (physmem_est > 4096)
Lines 914-927 kern_vfs_bio_buffer_alloc(caddr_t v, lon Link Here
914
			nbuf += min((physmem_est - 65536) * 2 / (factor * 5),
947
			nbuf += min((physmem_est - 65536) * 2 / (factor * 5),
915
			    32 * 1024 * 1024 / (factor * 5));
948
			    32 * 1024 * 1024 / (factor * 5));
916
949
917
		if (maxbcache && nbuf > maxbcache / BKVASIZE)
950
		if (maxbcache && nbuf > maxbcache / bkvasize)
918
			nbuf = maxbcache / BKVASIZE;
951
			nbuf = maxbcache / bkvasize;
919
		tuned_nbuf = 1;
952
		tuned_nbuf = 1;
920
	} else
953
	} else
921
		tuned_nbuf = 0;
954
		tuned_nbuf = 0;
922
955
923
	/* XXX Avoid unsigned long overflows later on with maxbufspace. */
956
	/* XXX Avoid unsigned long overflows later on with maxbufspace. */
924
	maxbuf = (LONG_MAX / 3) / BKVASIZE;
957
	maxbuf = (LONG_MAX / 3) / bkvasize;
925
	if (nbuf > maxbuf) {
958
	if (nbuf > maxbuf) {
926
		if (!tuned_nbuf)
959
		if (!tuned_nbuf)
927
			printf("Warning: nbufs lowered from %d to %ld\n", nbuf,
960
			printf("Warning: nbufs lowered from %d to %ld\n", nbuf,
Lines 943-950 kern_vfs_bio_buffer_alloc(caddr_t v, lon Link Here
943
	 * with ample KVA space.
976
	 * with ample KVA space.
944
	 */
977
	 */
945
	if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) {
978
	if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) {
946
		maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE;
979
		maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * bkvasize;
947
		buf_sz = (long)nbuf * BKVASIZE;
980
		buf_sz = (long)nbuf * bkvasize;
948
		if (buf_sz < maxbuf_sz / TRANSIENT_DENOM *
981
		if (buf_sz < maxbuf_sz / TRANSIENT_DENOM *
949
		    (TRANSIENT_DENOM - 1)) {
982
		    (TRANSIENT_DENOM - 1)) {
950
			/*
983
			/*
Lines 973-979 kern_vfs_bio_buffer_alloc(caddr_t v, lon Link Here
973
		if (bio_transient_maxcnt > 1024)
1006
		if (bio_transient_maxcnt > 1024)
974
			bio_transient_maxcnt = 1024;
1007
			bio_transient_maxcnt = 1024;
975
		if (tuned_nbuf)
1008
		if (tuned_nbuf)
976
			nbuf = buf_sz / BKVASIZE;
1009
			nbuf = buf_sz / bkvasize;
977
	}
1010
	}
978
1011
979
	/*
1012
	/*
Lines 1003-1009 bufinit(void) Link Here
1003
	struct buf *bp;
1036
	struct buf *bp;
1004
	int i;
1037
	int i;
1005
1038
1006
	CTASSERT(MAXBCACHEBUF >= MAXBSIZE);
1007
	mtx_init(&bqlocks[QUEUE_DIRTY], "bufq dirty lock", NULL, MTX_DEF);
1039
	mtx_init(&bqlocks[QUEUE_DIRTY], "bufq dirty lock", NULL, MTX_DEF);
1008
	mtx_init(&bqlocks[QUEUE_EMPTY], "bufq empty lock", NULL, MTX_DEF);
1040
	mtx_init(&bqlocks[QUEUE_EMPTY], "bufq empty lock", NULL, MTX_DEF);
1009
	for (i = QUEUE_CLEAN; i < QUEUE_CLEAN + CLEAN_QUEUES; i++)
1041
	for (i = QUEUE_CLEAN; i < QUEUE_CLEAN + CLEAN_QUEUES; i++)
Lines 1044-1056 bufinit(void) Link Here
1044
	 * used by most other requests.  The differential is required to 
1076
	 * used by most other requests.  The differential is required to 
1045
	 * ensure that metadata deadlocks don't occur.
1077
	 * ensure that metadata deadlocks don't occur.
1046
	 *
1078
	 *
1047
	 * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
1079
	 * maxbufspace is based on bkvasize.  Allocating buffers larger then
1048
	 * this may result in KVM fragmentation which is not handled optimally
1080
	 * this may result in KVM fragmentation which is not handled optimally
1049
	 * by the system. XXX This is less true with vmem.  We could use
1081
	 * by the system. XXX This is less true with vmem.  We could use
1050
	 * PAGE_SIZE.
1082
	 * PAGE_SIZE.
1051
	 */
1083
	 */
1052
	maxbufspace = (long)nbuf * BKVASIZE;
1084
	maxbufspace = (long)nbuf * bkvasize;
1053
	hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - MAXBCACHEBUF * 10);
1085
	hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - maxbcachebuf * 10);
1054
	lobufspace = (hibufspace / 20) * 19; /* 95% */
1086
	lobufspace = (hibufspace / 20) * 19; /* 95% */
1055
	bufspacethresh = lobufspace + (hibufspace - lobufspace) / 2;
1087
	bufspacethresh = lobufspace + (hibufspace - lobufspace) / 2;
1056
1088
Lines 1062-1070 bufinit(void) Link Here
1062
	 * The lower 1 MiB limit is the historical upper limit for
1094
	 * The lower 1 MiB limit is the historical upper limit for
1063
	 * hirunningspace.
1095
	 * hirunningspace.
1064
	 */
1096
	 */
1065
	hirunningspace = lmax(lmin(roundup(hibufspace / 64, MAXBCACHEBUF),
1097
	hirunningspace = lmax(lmin(roundup(hibufspace / 64, maxbcachebuf),
1066
	    16 * 1024 * 1024), 1024 * 1024);
1098
	    16 * 1024 * 1024), 1024 * 1024);
1067
	lorunningspace = roundup((hirunningspace * 2) / 3, MAXBCACHEBUF);
1099
	lorunningspace = roundup((hirunningspace * 2) / 3, maxbcachebuf);
1068
1100
1069
	/*
1101
	/*
1070
	 * Limit the amount of malloc memory since it is wired permanently into
1102
	 * Limit the amount of malloc memory since it is wired permanently into
Lines 1086-1094 bufinit(void) Link Here
1086
	 * To support extreme low-memory systems, make sure hidirtybuffers
1118
	 * To support extreme low-memory systems, make sure hidirtybuffers
1087
	 * cannot eat up all available buffer space.  This occurs when our
1119
	 * cannot eat up all available buffer space.  This occurs when our
1088
	 * minimum cannot be met.  We try to size hidirtybuffers to 3/4 our
1120
	 * minimum cannot be met.  We try to size hidirtybuffers to 3/4 our
1089
	 * buffer space assuming BKVASIZE'd buffers.
1121
	 * buffer space assuming bkvasize'd buffers.
1090
	 */
1122
	 */
1091
	while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
1123
	while ((long)hidirtybuffers * bkvasize > 3 * hibufspace / 4) {
1092
		hidirtybuffers >>= 1;
1124
		hidirtybuffers >>= 1;
1093
	}
1125
	}
1094
	lodirtybuffers = hidirtybuffers / 2;
1126
	lodirtybuffers = hidirtybuffers / 2;
Lines 2887-2893 getnewbuf_kva(struct buf *bp, int gbflag Link Here
2887
	if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_UNMAPPED) {
2919
	if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_UNMAPPED) {
2888
		/*
2920
		/*
2889
		 * In order to keep fragmentation sane we only allocate kva
2921
		 * In order to keep fragmentation sane we only allocate kva
2890
		 * in BKVASIZE chunks.  XXX with vmem we can do page size.
2922
		 * in bkvasize chunks.  XXX with vmem we can do page size.
2891
		 */
2923
		 */
2892
		maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
2924
		maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
2893
2925
Lines 3484-3492 getblk(struct vnode *vp, daddr_t blkno, Link Here
3484
	KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3516
	KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3485
	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3517
	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3486
	ASSERT_VOP_LOCKED(vp, "getblk");
3518
	ASSERT_VOP_LOCKED(vp, "getblk");
3487
	if (size > MAXBCACHEBUF)
3519
	if (size > maxbcachebuf)
3488
		panic("getblk: size(%d) > MAXBCACHEBUF(%d)\n", size,
3520
		panic("getblk: size(%d) > maxbcachebuf(%d)\n", size,
3489
		    MAXBCACHEBUF);
3521
		    maxbcachebuf);
3490
	if (!unmapped_buf_allowed)
3522
	if (!unmapped_buf_allowed)
3491
		flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3523
		flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3492
3524

Return to bug 219645