size_t avail;
long reserved;
avail = swap_pager_avail + vm_free_count();
avail = swap_pager_avail / 2 + vm_free_avail();
reserved = atomic_load_long(&tmpfs_pages_reserved);
if (__predict_false(avail < reserved))
return (0);
if (newpages < oldpages)
vm_object_page_remove(uobj, newpages, 0, 0);
}
if (newpages > oldpages) {
if (tmpfs_mem_avail() < newpages - oldpages) {
VM_OBJECT_WUNLOCK(uobj);
return (ENOSPC);
uobj->size = newpages;
VM_CNT_ADD(v_wire_count, -cnt);
u_int vm_free_avail(void);
u_int vm_free_count(void);
static inline u_int
vm_wire_count(void)
return (v);
u_int
vm_free_avail(void)
{
u_int free = vm_free_count();
if (free <= vm_cnt.v_free_target)
return (free - vm_cnt.v_free_target);
static u_int
vm_pagequeue_count(int pq)