Lines 131-136
static void bufkva_reclaim(vmem_t *, int
Link Here
|
131 |
static void bufkva_free(struct buf *); |
131 |
static void bufkva_free(struct buf *); |
132 |
static int buf_import(void *, void **, int, int); |
132 |
static int buf_import(void *, void **, int, int); |
133 |
static void buf_release(void *, void **, int); |
133 |
static void buf_release(void *, void **, int); |
|
|
134 |
static void maxbcachebuf_adjust(void); |
134 |
|
135 |
|
135 |
#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ |
136 |
#if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ |
136 |
defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) |
137 |
defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) |
Lines 245-250
SYSCTL_LONG(_vfs, OID_AUTO, barrierwrite
Link Here
|
245 |
SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD, |
246 |
SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD, |
246 |
&unmapped_buf_allowed, 0, |
247 |
&unmapped_buf_allowed, 0, |
247 |
"Permit the use of the unmapped i/o"); |
248 |
"Permit the use of the unmapped i/o"); |
|
|
249 |
static int bkvasize = BKVASIZE; |
250 |
int maxbcachebuf = MAXBCACHEBUF; |
251 |
SYSCTL_INT(_vfs, OID_AUTO, maxbcachebuf, CTLFLAG_RDTUN, &maxbcachebuf, 0, |
252 |
"Maximum size of a buffer cache block"); |
248 |
|
253 |
|
249 |
/* |
254 |
/* |
250 |
* This lock synchronizes access to bd_request. |
255 |
* This lock synchronizes access to bd_request. |
Lines 847-852
bd_wakeup(void)
Link Here
|
847 |
} |
852 |
} |
848 |
|
853 |
|
849 |
/* |
854 |
/* |
|
|
855 |
* Adjust the maxbcachbuf and bkvasize tunables. |
856 |
*/ |
857 |
static void |
858 |
maxbcachebuf_adjust(void) |
859 |
{ |
860 |
int i; |
861 |
|
862 |
/* |
863 |
* maxbcachebuf must be a power of 2 >= MAXBSIZE. |
864 |
* If it has been tuned, set bkvasize to maxbcachebuf / 2. |
865 |
*/ |
866 |
i = 2; |
867 |
while (i * 2 <= maxbcachebuf) |
868 |
i *= 2; |
869 |
maxbcachebuf = i; |
870 |
if (maxbcachebuf < MAXBSIZE) |
871 |
maxbcachebuf = MAXBSIZE; |
872 |
if (maxbcachebuf != MAXBCACHEBUF && maxbcachebuf > MAXBSIZE) { |
873 |
bkvasize = maxbcachebuf / 2; |
874 |
if (bkvasize < BKVASIZE) |
875 |
bkvasize = BKVASIZE; |
876 |
} |
877 |
if (maxbcachebuf != MAXBCACHEBUF || bkvasize != BKVASIZE) |
878 |
printf("maxbcachebuf=%d bkvasize=%d\n", maxbcachebuf, bkvasize); |
879 |
} |
880 |
|
881 |
/* |
850 |
* bd_speedup - speedup the buffer cache flushing code |
882 |
* bd_speedup - speedup the buffer cache flushing code |
851 |
*/ |
883 |
*/ |
852 |
void |
884 |
void |
Lines 893-900
kern_vfs_bio_buffer_alloc(caddr_t v, lon
Link Here
|
893 |
*/ |
925 |
*/ |
894 |
physmem_est = physmem_est * (PAGE_SIZE / 1024); |
926 |
physmem_est = physmem_est * (PAGE_SIZE / 1024); |
895 |
|
927 |
|
|
|
928 |
maxbcachebuf_adjust(); |
896 |
/* |
929 |
/* |
897 |
* The nominal buffer size (and minimum KVA allocation) is BKVASIZE. |
930 |
* The nominal buffer size (and minimum KVA allocation) is bkvasize. |
898 |
* For the first 64MB of ram nominally allocate sufficient buffers to |
931 |
* For the first 64MB of ram nominally allocate sufficient buffers to |
899 |
* cover 1/4 of our ram. Beyond the first 64MB allocate additional |
932 |
* cover 1/4 of our ram. Beyond the first 64MB allocate additional |
900 |
* buffers to cover 1/10 of our ram over 64MB. When auto-sizing |
933 |
* buffers to cover 1/10 of our ram over 64MB. When auto-sizing |
Lines 904-910
kern_vfs_bio_buffer_alloc(caddr_t v, lon
Link Here
|
904 |
* factor represents the 1/4 x ram conversion. |
937 |
* factor represents the 1/4 x ram conversion. |
905 |
*/ |
938 |
*/ |
906 |
if (nbuf == 0) { |
939 |
if (nbuf == 0) { |
907 |
int factor = 4 * BKVASIZE / 1024; |
940 |
int factor = 4 * bkvasize / 1024; |
908 |
|
941 |
|
909 |
nbuf = 50; |
942 |
nbuf = 50; |
910 |
if (physmem_est > 4096) |
943 |
if (physmem_est > 4096) |
Lines 914-927
kern_vfs_bio_buffer_alloc(caddr_t v, lon
Link Here
|
914 |
nbuf += min((physmem_est - 65536) * 2 / (factor * 5), |
947 |
nbuf += min((physmem_est - 65536) * 2 / (factor * 5), |
915 |
32 * 1024 * 1024 / (factor * 5)); |
948 |
32 * 1024 * 1024 / (factor * 5)); |
916 |
|
949 |
|
917 |
if (maxbcache && nbuf > maxbcache / BKVASIZE) |
950 |
if (maxbcache && nbuf > maxbcache / bkvasize) |
918 |
nbuf = maxbcache / BKVASIZE; |
951 |
nbuf = maxbcache / bkvasize; |
919 |
tuned_nbuf = 1; |
952 |
tuned_nbuf = 1; |
920 |
} else |
953 |
} else |
921 |
tuned_nbuf = 0; |
954 |
tuned_nbuf = 0; |
922 |
|
955 |
|
923 |
/* XXX Avoid unsigned long overflows later on with maxbufspace. */ |
956 |
/* XXX Avoid unsigned long overflows later on with maxbufspace. */ |
924 |
maxbuf = (LONG_MAX / 3) / BKVASIZE; |
957 |
maxbuf = (LONG_MAX / 3) / bkvasize; |
925 |
if (nbuf > maxbuf) { |
958 |
if (nbuf > maxbuf) { |
926 |
if (!tuned_nbuf) |
959 |
if (!tuned_nbuf) |
927 |
printf("Warning: nbufs lowered from %d to %ld\n", nbuf, |
960 |
printf("Warning: nbufs lowered from %d to %ld\n", nbuf, |
Lines 943-950
kern_vfs_bio_buffer_alloc(caddr_t v, lon
Link Here
|
943 |
* with ample KVA space. |
976 |
* with ample KVA space. |
944 |
*/ |
977 |
*/ |
945 |
if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) { |
978 |
if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) { |
946 |
maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE; |
979 |
maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * bkvasize; |
947 |
buf_sz = (long)nbuf * BKVASIZE; |
980 |
buf_sz = (long)nbuf * bkvasize; |
948 |
if (buf_sz < maxbuf_sz / TRANSIENT_DENOM * |
981 |
if (buf_sz < maxbuf_sz / TRANSIENT_DENOM * |
949 |
(TRANSIENT_DENOM - 1)) { |
982 |
(TRANSIENT_DENOM - 1)) { |
950 |
/* |
983 |
/* |
Lines 973-979
kern_vfs_bio_buffer_alloc(caddr_t v, lon
Link Here
|
973 |
if (bio_transient_maxcnt > 1024) |
1006 |
if (bio_transient_maxcnt > 1024) |
974 |
bio_transient_maxcnt = 1024; |
1007 |
bio_transient_maxcnt = 1024; |
975 |
if (tuned_nbuf) |
1008 |
if (tuned_nbuf) |
976 |
nbuf = buf_sz / BKVASIZE; |
1009 |
nbuf = buf_sz / bkvasize; |
977 |
} |
1010 |
} |
978 |
|
1011 |
|
979 |
/* |
1012 |
/* |
Lines 1003-1009
bufinit(void)
Link Here
|
1003 |
struct buf *bp; |
1036 |
struct buf *bp; |
1004 |
int i; |
1037 |
int i; |
1005 |
|
1038 |
|
1006 |
CTASSERT(MAXBCACHEBUF >= MAXBSIZE); |
|
|
1007 |
mtx_init(&bqlocks[QUEUE_DIRTY], "bufq dirty lock", NULL, MTX_DEF); |
1039 |
mtx_init(&bqlocks[QUEUE_DIRTY], "bufq dirty lock", NULL, MTX_DEF); |
1008 |
mtx_init(&bqlocks[QUEUE_EMPTY], "bufq empty lock", NULL, MTX_DEF); |
1040 |
mtx_init(&bqlocks[QUEUE_EMPTY], "bufq empty lock", NULL, MTX_DEF); |
1009 |
for (i = QUEUE_CLEAN; i < QUEUE_CLEAN + CLEAN_QUEUES; i++) |
1041 |
for (i = QUEUE_CLEAN; i < QUEUE_CLEAN + CLEAN_QUEUES; i++) |
Lines 1044-1056
bufinit(void)
Link Here
|
1044 |
* used by most other requests. The differential is required to |
1076 |
* used by most other requests. The differential is required to |
1045 |
* ensure that metadata deadlocks don't occur. |
1077 |
* ensure that metadata deadlocks don't occur. |
1046 |
* |
1078 |
* |
1047 |
* maxbufspace is based on BKVASIZE. Allocating buffers larger then |
1079 |
* maxbufspace is based on bkvasize. Allocating buffers larger then |
1048 |
* this may result in KVM fragmentation which is not handled optimally |
1080 |
* this may result in KVM fragmentation which is not handled optimally |
1049 |
* by the system. XXX This is less true with vmem. We could use |
1081 |
* by the system. XXX This is less true with vmem. We could use |
1050 |
* PAGE_SIZE. |
1082 |
* PAGE_SIZE. |
1051 |
*/ |
1083 |
*/ |
1052 |
maxbufspace = (long)nbuf * BKVASIZE; |
1084 |
maxbufspace = (long)nbuf * bkvasize; |
1053 |
hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - MAXBCACHEBUF * 10); |
1085 |
hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - maxbcachebuf * 10); |
1054 |
lobufspace = (hibufspace / 20) * 19; /* 95% */ |
1086 |
lobufspace = (hibufspace / 20) * 19; /* 95% */ |
1055 |
bufspacethresh = lobufspace + (hibufspace - lobufspace) / 2; |
1087 |
bufspacethresh = lobufspace + (hibufspace - lobufspace) / 2; |
1056 |
|
1088 |
|
Lines 1062-1070
bufinit(void)
Link Here
|
1062 |
* The lower 1 MiB limit is the historical upper limit for |
1094 |
* The lower 1 MiB limit is the historical upper limit for |
1063 |
* hirunningspace. |
1095 |
* hirunningspace. |
1064 |
*/ |
1096 |
*/ |
1065 |
hirunningspace = lmax(lmin(roundup(hibufspace / 64, MAXBCACHEBUF), |
1097 |
hirunningspace = lmax(lmin(roundup(hibufspace / 64, maxbcachebuf), |
1066 |
16 * 1024 * 1024), 1024 * 1024); |
1098 |
16 * 1024 * 1024), 1024 * 1024); |
1067 |
lorunningspace = roundup((hirunningspace * 2) / 3, MAXBCACHEBUF); |
1099 |
lorunningspace = roundup((hirunningspace * 2) / 3, maxbcachebuf); |
1068 |
|
1100 |
|
1069 |
/* |
1101 |
/* |
1070 |
* Limit the amount of malloc memory since it is wired permanently into |
1102 |
* Limit the amount of malloc memory since it is wired permanently into |
Lines 1086-1094
bufinit(void)
Link Here
|
1086 |
* To support extreme low-memory systems, make sure hidirtybuffers |
1118 |
* To support extreme low-memory systems, make sure hidirtybuffers |
1087 |
* cannot eat up all available buffer space. This occurs when our |
1119 |
* cannot eat up all available buffer space. This occurs when our |
1088 |
* minimum cannot be met. We try to size hidirtybuffers to 3/4 our |
1120 |
* minimum cannot be met. We try to size hidirtybuffers to 3/4 our |
1089 |
* buffer space assuming BKVASIZE'd buffers. |
1121 |
* buffer space assuming bkvasize'd buffers. |
1090 |
*/ |
1122 |
*/ |
1091 |
while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) { |
1123 |
while ((long)hidirtybuffers * bkvasize > 3 * hibufspace / 4) { |
1092 |
hidirtybuffers >>= 1; |
1124 |
hidirtybuffers >>= 1; |
1093 |
} |
1125 |
} |
1094 |
lodirtybuffers = hidirtybuffers / 2; |
1126 |
lodirtybuffers = hidirtybuffers / 2; |
Lines 2887-2893
getnewbuf_kva(struct buf *bp, int gbflag
Link Here
|
2887 |
if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_UNMAPPED) { |
2919 |
if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_UNMAPPED) { |
2888 |
/* |
2920 |
/* |
2889 |
* In order to keep fragmentation sane we only allocate kva |
2921 |
* In order to keep fragmentation sane we only allocate kva |
2890 |
* in BKVASIZE chunks. XXX with vmem we can do page size. |
2922 |
* in bkvasize chunks. XXX with vmem we can do page size. |
2891 |
*/ |
2923 |
*/ |
2892 |
maxsize = (maxsize + BKVAMASK) & ~BKVAMASK; |
2924 |
maxsize = (maxsize + BKVAMASK) & ~BKVAMASK; |
2893 |
|
2925 |
|
Lines 3484-3492
getblk(struct vnode *vp, daddr_t blkno,
Link Here
|
3484 |
KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC, |
3516 |
KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC, |
3485 |
("GB_KVAALLOC only makes sense with GB_UNMAPPED")); |
3517 |
("GB_KVAALLOC only makes sense with GB_UNMAPPED")); |
3486 |
ASSERT_VOP_LOCKED(vp, "getblk"); |
3518 |
ASSERT_VOP_LOCKED(vp, "getblk"); |
3487 |
if (size > MAXBCACHEBUF) |
3519 |
if (size > maxbcachebuf) |
3488 |
panic("getblk: size(%d) > MAXBCACHEBUF(%d)\n", size, |
3520 |
panic("getblk: size(%d) > maxbcachebuf(%d)\n", size, |
3489 |
MAXBCACHEBUF); |
3521 |
maxbcachebuf); |
3490 |
if (!unmapped_buf_allowed) |
3522 |
if (!unmapped_buf_allowed) |
3491 |
flags &= ~(GB_UNMAPPED | GB_KVAALLOC); |
3523 |
flags &= ~(GB_UNMAPPED | GB_KVAALLOC); |
3492 |
|
3524 |
|