vm_reserv.c 43 KB
Newer Older
1
/*-
2
3
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
 *
4
 * Copyright (c) 2002-2006 Rice University
Alan Cox's avatar
Alan Cox committed
5
 * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu>
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
 * All rights reserved.
 *
 * This software was developed for the FreeBSD Project by Alan L. Cox,
 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

/*
 *	Superpage reservation management module
36
37
38
 *
 * Any external functions defined by this module are only to be used by the
 * virtual memory system.
39
40
41
42
43
44
45
46
47
48
49
50
51
 */

#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");

#include "opt_vm.h"

#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/queue.h>
52
#include <sys/rwlock.h>
53
54
55
#include <sys/sbuf.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
56
57
#include <sys/counter.h>
#include <sys/ktr.h>
58
#include <sys/vmmeter.h>
59
#include <sys/smp.h>
60
61
62
63
64

#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
65
#include <vm/vm_pageout.h>
66
#include <vm/vm_phys.h>
67
#include <vm/vm_pagequeue.h>
68
#include <vm/vm_radix.h>
69
70
71
72
#include <vm/vm_reserv.h>

/*
 * The reservation system supports the speculative allocation of large physical
73
 * pages ("superpages").  Speculative allocation enables the fully automatic
74
75
76
77
78
79
 * utilization of superpages by the virtual memory system.  In other words, no
 * programmatic directives are required to use superpages.
 */

#if VM_NRESERVLEVEL > 0

80
81
82
83
#ifndef VM_LEVEL_0_ORDER_MAX
#define	VM_LEVEL_0_ORDER_MAX	VM_LEVEL_0_ORDER
#endif

84
85
86
87
/*
 * The number of small pages that are contained in a level 0 reservation
 */
#define	VM_LEVEL_0_NPAGES	(1 << VM_LEVEL_0_ORDER)
88
#define	VM_LEVEL_0_NPAGES_MAX	(1 << VM_LEVEL_0_ORDER_MAX)
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107

/*
 * The number of bits by which a physical address is shifted to obtain the
 * reservation number
 */
#define	VM_LEVEL_0_SHIFT	(VM_LEVEL_0_ORDER + PAGE_SHIFT)

/*
 * The size of a level 0 reservation in bytes
 */
#define	VM_LEVEL_0_SIZE		(1 << VM_LEVEL_0_SHIFT)

/*
 * Computes the index of the small page underlying the given (object, pindex)
 * within the reservation's array of small pages.
 */
#define	VM_RESERV_INDEX(object, pindex)	\
    (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))

Alan Cox's avatar
Alan Cox committed
108
109
110
111
112
113
114
115
116
117
118
119
120
121
/*
 * The size of a population map entry
 */
typedef	u_long		popmap_t;

/*
 * The number of bits in a population map entry
 */
#define	NBPOPMAP	(NBBY * sizeof(popmap_t))

/*
 * The number of population map entries in a reservation
 */
#define	NPOPMAP		howmany(VM_LEVEL_0_NPAGES, NBPOPMAP)
122
#define	NPOPMAP_MAX	howmany(VM_LEVEL_0_NPAGES_MAX, NBPOPMAP)
Alan Cox's avatar
Alan Cox committed
123

124
125
126
127
128
129
/*
 * Number of elapsed ticks before we update the LRU queue position.  Used
 * to reduce contention and churn on the list.
 */
#define	PARTPOPSLOP	1

130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
/*
 * Clear a bit in the population map.
 */
static __inline void
popmap_clear(popmap_t popmap[], int i)
{

	popmap[i / NBPOPMAP] &= ~(1UL << (i % NBPOPMAP));
}

/*
 * Set a bit in the population map.
 */
static __inline void
popmap_set(popmap_t popmap[], int i)
{

	popmap[i / NBPOPMAP] |= 1UL << (i % NBPOPMAP);
}

/*
 * Is a bit in the population map clear?
 */
static __inline boolean_t
popmap_is_clear(popmap_t popmap[], int i)
{

	return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) == 0);
}

/*
 * Is a bit in the population map set?
 */
static __inline boolean_t
popmap_is_set(popmap_t popmap[], int i)
{

	return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) != 0);
}

170
171
172
173
174
175
176
177
/*
 * The reservation structure
 *
 * A reservation structure is constructed whenever a large physical page is
 * speculatively allocated to an object.  The reservation provides the small
 * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
 * within that object.  The reservation's "popcnt" tracks the number of these
 * small physical pages that are in use at any given time.  When and if the
178
 * reservation is not fully utilized, it appears in the queue of partially
179
180
181
 * populated reservations.  The reservation always appears on the containing
 * object's list of reservations.
 *
182
 * A partially populated reservation can be broken and reclaimed at any time.
183
 *
184
185
 * r - vm_reserv_lock
 * d - vm_reserv_domain_lock
186
187
 * o - vm_reserv_object_lock
 * c - constant after boot
188
189
 */
struct vm_reserv {
190
191
192
193
194
	struct mtx	lock;			/* reservation lock. */
	TAILQ_ENTRY(vm_reserv) partpopq;	/* (d) per-domain queue. */
	LIST_ENTRY(vm_reserv) objq;		/* (o, r) object queue */
	vm_object_t	object;			/* (o, r) containing object */
	vm_pindex_t	pindex;			/* (o, r) offset in object */
195
	vm_page_t	pages;			/* (c) first page  */
196
197
	uint16_t	domain;			/* (c) NUMA domain. */
	uint16_t	popcnt;			/* (r) # of pages in use */
198
	int		lasttick;		/* (r) last pop update tick. */
199
	char		inpartpopq;		/* (d) */
200
	popmap_t	popmap[NPOPMAP_MAX];	/* (r) bit vector, used pages */
201
202
};

203
204
205
206
207
208
209
210
211
212
213
214
215
#define	vm_reserv_lockptr(rv)		(&(rv)->lock)
#define	vm_reserv_assert_locked(rv)					\
	    mtx_assert(vm_reserv_lockptr(rv), MA_OWNED)
#define	vm_reserv_lock(rv)		mtx_lock(vm_reserv_lockptr(rv))
#define	vm_reserv_trylock(rv)		mtx_trylock(vm_reserv_lockptr(rv))
#define	vm_reserv_unlock(rv)		mtx_unlock(vm_reserv_lockptr(rv))

static struct mtx_padalign vm_reserv_domain_locks[MAXMEMDOM];

#define	vm_reserv_domain_lockptr(d)	&vm_reserv_domain_locks[(d)]
#define	vm_reserv_domain_lock(d)	mtx_lock(vm_reserv_domain_lockptr(d))
#define	vm_reserv_domain_unlock(d)	mtx_unlock(vm_reserv_domain_lockptr(d))

216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
/*
 * The reservation array
 *
 * This array is analoguous in function to vm_page_array.  It differs in the
 * respect that it may contain a greater number of useful reservation
 * structures than there are (physical) superpages.  These "invalid"
 * reservation structures exist to trade-off space for time in the
 * implementation of vm_reserv_from_page().  Invalid reservation structures are
 * distinguishable from "valid" reservation structures by inspecting the
 * reservation's "pages" field.  Invalid reservation structures have a NULL
 * "pages" field.
 *
 * vm_reserv_from_page() maps a small (physical) page to an element of this
 * array by computing a physical reservation number from the page's physical
 * address.  The physical reservation number is used as the array index.
 *
 * An "active" reservation is a valid reservation structure that has a non-NULL
 * "object" field and a non-zero "popcnt" field.  In other words, every active
 * reservation belongs to a particular object.  Moreover, every active
 * reservation has an entry in the containing object's list of reservations.  
 */
static vm_reserv_t vm_reserv_array;

/*
240
 * The partially populated reservation queue
241
 *
242
243
244
 * This queue enables the fast recovery of an unused free small page from a
 * partially populated reservation.  The reservation at the head of this queue
 * is the least recently changed, partially populated reservation.
245
246
247
 *
 * Access to this queue is synchronized by the free page queue lock.
 */
248
static TAILQ_HEAD(, vm_reserv) vm_rvq_partpop[MAXMEMDOM];
249
250
251

static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info");

252
253
254
static counter_u64_t vm_reserv_broken = EARLY_COUNTER;
SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD,
    &vm_reserv_broken, "Cumulative number of broken reservations");
255

256
257
258
static counter_u64_t vm_reserv_freed = EARLY_COUNTER;
SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD,
    &vm_reserv_freed, "Cumulative number of freed reservations");
259

260
261
262
263
264
static int sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS);

SYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
    sysctl_vm_reserv_fullpop, "I", "Current number of full reservations");

265
266
267
static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);

SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
268
    sysctl_vm_reserv_partpopq, "A", "Partially populated reservation queues");
269

270
271
272
static counter_u64_t vm_reserv_reclaimed = EARLY_COUNTER;
SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
    &vm_reserv_reclaimed, "Cumulative number of reclaimed reservations");
273

274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
/*
 * The object lock pool is used to synchronize the rvq.  We can not use a
 * pool mutex because it is required before malloc works.
 *
 * The "hash" function could be made faster without divide and modulo.
 */
#define	VM_RESERV_OBJ_LOCK_COUNT	MAXCPU

struct mtx_padalign vm_reserv_object_mtx[VM_RESERV_OBJ_LOCK_COUNT];

#define	vm_reserv_object_lock_idx(object)			\
	    (((uintptr_t)object / sizeof(*object)) % VM_RESERV_OBJ_LOCK_COUNT)
#define	vm_reserv_object_lock_ptr(object)			\
	    &vm_reserv_object_mtx[vm_reserv_object_lock_idx((object))]
#define	vm_reserv_object_lock(object)				\
	    mtx_lock(vm_reserv_object_lock_ptr((object)))
#define	vm_reserv_object_unlock(object)				\
	    mtx_unlock(vm_reserv_object_lock_ptr((object)))

293
static void		vm_reserv_break(vm_reserv_t rv);
Alan Cox's avatar
Alan Cox committed
294
static void		vm_reserv_depopulate(vm_reserv_t rv, int index);
295
296
297
static vm_reserv_t	vm_reserv_from_page(vm_page_t m);
static boolean_t	vm_reserv_has_pindex(vm_reserv_t rv,
			    vm_pindex_t pindex);
Alan Cox's avatar
Alan Cox committed
298
static void		vm_reserv_populate(vm_reserv_t rv, int index);
299
static void		vm_reserv_reclaim(vm_reserv_t rv);
300

301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
/*
 * Returns the current number of full reservations.
 *
 * Since the number of full reservations is computed without acquiring the
 * free page queue lock, the returned value may be inexact.
 */
static int
sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS)
{
	vm_paddr_t paddr;
	struct vm_phys_seg *seg;
	vm_reserv_t rv;
	int fullpop, segind;

	fullpop = 0;
	for (segind = 0; segind < vm_phys_nsegs; segind++) {
		seg = &vm_phys_segs[segind];
		paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
		while (paddr + VM_LEVEL_0_SIZE <= seg->end) {
			rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT];
			fullpop += rv->popcnt == VM_LEVEL_0_NPAGES;
			paddr += VM_LEVEL_0_SIZE;
		}
	}
	return (sysctl_handle_int(oidp, &fullpop, 0, req));
}

328
/*
329
 * Describes the current state of the partially populated reservation queue.
330
331
332
333
334
335
 */
static int
sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
{
	struct sbuf sbuf;
	vm_reserv_t rv;
336
	int counter, error, domain, level, unused_pages;
337

338
339
340
	error = sysctl_wire_old_buffer(req, 0);
	if (error != 0)
		return (error);
341
	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
342
343
344
345
346
	sbuf_printf(&sbuf, "\nDOMAIN    LEVEL     SIZE  NUMBER\n\n");
	for (domain = 0; domain < vm_ndomains; domain++) {
		for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
			counter = 0;
			unused_pages = 0;
347
			vm_reserv_domain_lock(domain);
348
349
350
351
			TAILQ_FOREACH(rv, &vm_rvq_partpop[domain], partpopq) {
				counter++;
				unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
			}
352
			vm_reserv_domain_unlock(domain);
353
354
355
			sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n",
			    domain, level,
			    unused_pages * ((int)PAGE_SIZE / 1024), counter);
356
357
		}
	}
358
	error = sbuf_finish(&sbuf);
359
360
361
362
	sbuf_delete(&sbuf);
	return (error);
}

363
364
365
366
367
368
369
370
/*
 * Remove a reservation from the object's objq.
 */
static void
vm_reserv_remove(vm_reserv_t rv)
{
	vm_object_t object;

371
372
373
	vm_reserv_assert_locked(rv);
	CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
	    __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
	KASSERT(rv->object != NULL,
	    ("vm_reserv_remove: reserv %p is free", rv));
	KASSERT(!rv->inpartpopq,
	    ("vm_reserv_remove: reserv %p's inpartpopq is TRUE", rv));
	object = rv->object;
	vm_reserv_object_lock(object);
	LIST_REMOVE(rv, objq);
	rv->object = NULL;
	vm_reserv_object_unlock(object);
}

/*
 * Insert a new reservation into the object's objq.
 */
static void
vm_reserv_insert(vm_reserv_t rv, vm_object_t object, vm_pindex_t pindex)
{
	int i;

393
394
395
396
397
	vm_reserv_assert_locked(rv);
	CTR6(KTR_VM,
	    "%s: rv %p(%p) object %p new %p popcnt %d",
	    __FUNCTION__, rv, rv->pages, rv->object, object,
	   rv->popcnt);
398
399
400
401
402
403
404
405
406
407
408
409
	KASSERT(rv->object == NULL,
	    ("vm_reserv_insert: reserv %p isn't free", rv));
	KASSERT(rv->popcnt == 0,
	    ("vm_reserv_insert: reserv %p's popcnt is corrupted", rv));
	KASSERT(!rv->inpartpopq,
	    ("vm_reserv_insert: reserv %p's inpartpopq is TRUE", rv));
	for (i = 0; i < NPOPMAP; i++)
		KASSERT(rv->popmap[i] == 0,
		    ("vm_reserv_insert: reserv %p's popmap is corrupted", rv));
	vm_reserv_object_lock(object);
	rv->pindex = pindex;
	rv->object = object;
410
	rv->lasttick = ticks;
411
412
413
414
	LIST_INSERT_HEAD(&object->rvq, rv, objq);
	vm_reserv_object_unlock(object);
}

415
416
417
/*
 * Reduces the given reservation's population count.  If the population count
 * becomes zero, the reservation is destroyed.  Additionally, moves the
418
 * reservation to the tail of the partially populated reservation queue if the
419
420
421
 * population count is non-zero.
 */
static void
Alan Cox's avatar
Alan Cox committed
422
vm_reserv_depopulate(vm_reserv_t rv, int index)
423
{
424
	struct vm_domain *vmd;
425

426
427
428
	vm_reserv_assert_locked(rv);
	CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
	    __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
429
430
	KASSERT(rv->object != NULL,
	    ("vm_reserv_depopulate: reserv %p is free", rv));
431
	KASSERT(popmap_is_set(rv->popmap, index),
432
433
	    ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv,
	    index));
434
435
	KASSERT(rv->popcnt > 0,
	    ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv));
436
	KASSERT(rv->domain < vm_ndomains,
437
438
	    ("vm_reserv_depopulate: reserv %p's domain is corrupted %d",
	    rv, rv->domain));
439
	if (rv->popcnt == VM_LEVEL_0_NPAGES) {
440
441
442
443
		KASSERT(rv->pages->psind == 1,
		    ("vm_reserv_depopulate: reserv %p is already demoted",
		    rv));
		rv->pages->psind = 0;
444
	}
445
	popmap_clear(rv->popmap, index);
446
	rv->popcnt--;
447
448
449
450
451
452
453
454
455
456
457
458
459
	if ((unsigned)(ticks - rv->lasttick) >= PARTPOPSLOP ||
	    rv->popcnt == 0) {
		vm_reserv_domain_lock(rv->domain);
		if (rv->inpartpopq) {
			TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq);
			rv->inpartpopq = FALSE;
		}
		if (rv->popcnt != 0) {
			rv->inpartpopq = TRUE;
			TAILQ_INSERT_TAIL(&vm_rvq_partpop[rv->domain], rv, partpopq);
		}
		vm_reserv_domain_unlock(rv->domain);
		rv->lasttick = ticks;
460
461
	}
	vmd = VM_DOMAIN(rv->domain);
462
	if (rv->popcnt == 0) {
463
		vm_reserv_remove(rv);
464
		vm_domain_free_lock(vmd);
465
		vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER);
466
467
		vm_domain_free_unlock(vmd);
		counter_u64_add(vm_reserv_freed, 1);
468
	}
469
	vm_domain_freecnt_inc(vmd, 1);
470
471
472
473
474
475
476
477
478
479
480
481
}

/*
 * Returns the reservation to which the given page might belong.
 */
static __inline vm_reserv_t
vm_reserv_from_page(vm_page_t m)
{

	return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
}

482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
/*
 * Returns an existing reservation or NULL and initialized successor pointer.
 */
static vm_reserv_t
vm_reserv_from_object(vm_object_t object, vm_pindex_t pindex,
    vm_page_t mpred, vm_page_t *msuccp)
{
	vm_reserv_t rv;
	vm_page_t msucc;

	msucc = NULL;
	if (mpred != NULL) {
		KASSERT(mpred->object == object,
		    ("vm_reserv_from_object: object doesn't contain mpred"));
		KASSERT(mpred->pindex < pindex,
		    ("vm_reserv_from_object: mpred doesn't precede pindex"));
		rv = vm_reserv_from_page(mpred);
		if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
			goto found;
		msucc = TAILQ_NEXT(mpred, listq);
	} else
		msucc = TAILQ_FIRST(&object->memq);
	if (msucc != NULL) {
		KASSERT(msucc->pindex > pindex,
		    ("vm_reserv_from_object: msucc doesn't succeed pindex"));
		rv = vm_reserv_from_page(msucc);
		if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
			goto found;
	}
	rv = NULL;

found:
	*msuccp = msucc;

	return (rv);
}

519
520
521
522
523
524
525
526
527
528
529
530
531
/*
 * Returns TRUE if the given reservation contains the given page index and
 * FALSE otherwise.
 */
static __inline boolean_t
vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
{

	return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0);
}

/*
 * Increases the given reservation's population count.  Moves the reservation
532
 * to the tail of the partially populated reservation queue.
533
534
535
536
 *
 * The free page queue must be locked.
 */
static void
Alan Cox's avatar
Alan Cox committed
537
vm_reserv_populate(vm_reserv_t rv, int index)
538
539
{

540
541
542
	vm_reserv_assert_locked(rv);
	CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
	    __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
543
544
	KASSERT(rv->object != NULL,
	    ("vm_reserv_populate: reserv %p is free", rv));
545
	KASSERT(popmap_is_clear(rv->popmap, index),
546
547
	    ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv,
	    index));
548
549
	KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
	    ("vm_reserv_populate: reserv %p is already full", rv));
550
551
	KASSERT(rv->pages->psind == 0,
	    ("vm_reserv_populate: reserv %p is already promoted", rv));
552
	KASSERT(rv->domain < vm_ndomains,
553
554
	    ("vm_reserv_populate: reserv %p's domain is corrupted %d",
	    rv, rv->domain));
555
556
	popmap_set(rv->popmap, index);
	rv->popcnt++;
557
558
559
560
	if ((unsigned)(ticks - rv->lasttick) < PARTPOPSLOP &&
	    rv->inpartpopq && rv->popcnt != VM_LEVEL_0_NPAGES)
		return;
	rv->lasttick = ticks;
561
	vm_reserv_domain_lock(rv->domain);
562
	if (rv->inpartpopq) {
563
		TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq);
564
565
566
567
		rv->inpartpopq = FALSE;
	}
	if (rv->popcnt < VM_LEVEL_0_NPAGES) {
		rv->inpartpopq = TRUE;
568
		TAILQ_INSERT_TAIL(&vm_rvq_partpop[rv->domain], rv, partpopq);
569
570
571
572
	} else {
		KASSERT(rv->pages->psind == 0,
		    ("vm_reserv_populate: reserv %p is already promoted",
		    rv));
573
		rv->pages->psind = 1;
574
575
	}
	vm_reserv_domain_unlock(rv->domain);
576
577
}

578
/*
579
580
581
 * Attempts to allocate a contiguous set of physical pages from existing
 * reservations.  See vm_reserv_alloc_contig() for a description of the
 * function's parameters.
582
583
584
585
 *
 * The page "mpred" must immediately precede the offset "pindex" within the
 * specified object.
 *
586
 * The object must be locked.
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
 */
vm_page_t
vm_reserv_extend_contig(int req, vm_object_t object, vm_pindex_t pindex,
    int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
    u_long alignment, vm_paddr_t boundary, vm_page_t mpred)
{
	struct vm_domain *vmd;
	vm_paddr_t pa, size;
	vm_page_t m, msucc;
	vm_reserv_t rv;
	int i, index;

	VM_OBJECT_ASSERT_WLOCKED(object);
	KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));

	/*
	 * Is a reservation fundamentally impossible?
	 */
	if (pindex < VM_RESERV_INDEX(object, pindex) ||
	    pindex + npages > object->size || object->resident_page_count == 0)
		return (NULL);

	/*
	 * All reservations of a particular size have the same alignment.
	 * Assuming that the first page is allocated from a reservation, the
	 * least significant bits of its physical address can be determined
	 * from its offset from the beginning of the reservation and the size
	 * of the reservation.
	 *
	 * Could the specified index within a reservation of the smallest
	 * possible size satisfy the alignment and boundary requirements?
	 */
	pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
	if ((pa & (alignment - 1)) != 0)
		return (NULL);
	size = npages << PAGE_SHIFT;
	if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
		return (NULL);

	/*
	 * Look for an existing reservation.
	 */
	rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
	if (rv == NULL)
		return (NULL);
	KASSERT(object != kernel_object || rv->domain == domain,
	    ("vm_reserv_extend_contig: Domain mismatch from reservation."));
	index = VM_RESERV_INDEX(object, pindex);
	/* Does the allocation fit within the reservation? */
	if (index + npages > VM_LEVEL_0_NPAGES)
		return (NULL);
	domain = rv->domain;
	vmd = VM_DOMAIN(domain);
640
641
	vm_reserv_lock(rv);
	if (rv->object != object)
642
643
644
645
		goto out;
	m = &rv->pages[index];
	pa = VM_PAGE_TO_PHYS(m);
	if (pa < low || pa + size > high || (pa & (alignment - 1)) != 0 ||
646
	    ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
647
648
649
		goto out;
	/* Handle vm_page_rename(m, new_object, ...). */
	for (i = 0; i < npages; i++) {
650
		if (popmap_is_set(rv->popmap, index + i))
651
652
			goto out;
	}
653
654
	if (!vm_domain_allocate(vmd, req, npages))
		goto out;
655
656
	for (i = 0; i < npages; i++)
		vm_reserv_populate(rv, index + i);
657
	vm_reserv_unlock(rv);
658
	return (m);
659
660
661
662

out:
	vm_reserv_unlock(rv);
	return (NULL);
663
664
}

665
/*
666
 * Allocates a contiguous set of physical pages of the given size "npages"
667
 * from newly created reservations.  All of the physical pages
668
669
670
671
672
673
 * must be at or above the given physical address "low" and below the given
 * physical address "high".  The given value "alignment" determines the
 * alignment of the first physical page in the set.  If the given value
 * "boundary" is non-zero, then the set of physical pages cannot cross any
 * physical address boundary that is a multiple of that value.  Both
 * "alignment" and "boundary" must be a power of two.
674
 *
675
676
677
 * Callers should first invoke vm_reserv_extend_contig() to attempt an
 * allocation from existing reservations.
 *
678
679
680
 * The page "mpred" must immediately precede the offset "pindex" within the
 * specified object.
 *
681
682
683
 * The object and free page queue must be locked.
 */
vm_page_t
684
vm_reserv_alloc_contig(int req, vm_object_t object, vm_pindex_t pindex, int domain,
685
686
    u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
    vm_paddr_t boundary, vm_page_t mpred)
687
{
688
	struct vm_domain *vmd;
689
	vm_paddr_t pa, size;
690
	vm_page_t m, m_ret, msucc;
691
692
	vm_pindex_t first, leftcap, rightcap;
	vm_reserv_t rv;
693
694
	u_long allocpages, maxpages, minpages;
	int i, index, n;
695

696
	VM_OBJECT_ASSERT_WLOCKED(object);
697
	KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
698
699

	/*
700
	 * Is a reservation fundamentally impossible?
701
702
	 */
	if (pindex < VM_RESERV_INDEX(object, pindex) ||
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
	    pindex + npages > object->size)
		return (NULL);

	/*
	 * All reservations of a particular size have the same alignment.
	 * Assuming that the first page is allocated from a reservation, the
	 * least significant bits of its physical address can be determined
	 * from its offset from the beginning of the reservation and the size
	 * of the reservation.
	 *
	 * Could the specified index within a reservation of the smallest
	 * possible size satisfy the alignment and boundary requirements?
	 */
	pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
	if ((pa & (alignment - 1)) != 0)
		return (NULL);
	size = npages << PAGE_SHIFT;
	if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
721
722
723
		return (NULL);

	/*
724
725
726
	 * Callers should've extended an existing reservation prior to
	 * calling this function.  If a reservation exists it is
	 * incompatible with the allocation.
727
	 */
728
729
730
	rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
	if (rv != NULL)
		return (NULL);
731
732

	/*
733
	 * Could at least one reservation fit between the first index to the
734
735
	 * left that can be used ("leftcap") and the first index to the right
	 * that cannot be used ("rightcap")?
736
737
738
739
	 *
	 * We must synchronize with the reserv object lock to protect the
	 * pindex/object of the resulting reservations against rename while
	 * we are inspecting.
740
741
	 */
	first = pindex - VM_RESERV_INDEX(object, pindex);
742
743
744
745
	minpages = VM_RESERV_INDEX(object, pindex) + npages;
	maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES);
	allocpages = maxpages;
	vm_reserv_object_lock(object);
746
747
748
749
750
	if (mpred != NULL) {
		if ((rv = vm_reserv_from_page(mpred))->object != object)
			leftcap = mpred->pindex + 1;
		else
			leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
751
752
		if (leftcap > first) {
			vm_reserv_object_unlock(object);
753
			return (NULL);
754
		}
755
756
757
758
759
760
761
	}
	if (msucc != NULL) {
		if ((rv = vm_reserv_from_page(msucc))->object != object)
			rightcap = msucc->pindex;
		else
			rightcap = rv->pindex;
		if (first + maxpages > rightcap) {
762
763
			if (maxpages == VM_LEVEL_0_NPAGES) {
				vm_reserv_object_unlock(object);
764
				return (NULL);
765
			}
766
767
768
769
770
771
772

			/*
			 * At least one reservation will fit between "leftcap"
			 * and "rightcap".  However, a reservation for the
			 * last of the requested pages will not fit.  Reduce
			 * the size of the upcoming allocation accordingly.
			 */
773
774
775
			allocpages = minpages;
		}
	}
776
	vm_reserv_object_unlock(object);
777
778

	/*
779
	 * Would the last new reservation extend past the end of the object?
780
	 */
781
	if (first + maxpages > object->size) {
782
		/*
783
784
		 * Don't allocate the last new reservation if the object is a
		 * vnode or backed by another object that is a vnode. 
785
786
787
		 */
		if (object->type == OBJT_VNODE ||
		    (object->backing_object != NULL &&
788
789
790
791
792
		    object->backing_object->type == OBJT_VNODE)) {
			if (maxpages == VM_LEVEL_0_NPAGES)
				return (NULL);
			allocpages = minpages;
		}
793
794
795
796
		/* Speculate that the object may grow. */
	}

	/*
797
798
799
800
801
	 * Allocate the physical pages.  The alignment and boundary specified
	 * for this allocation may be different from the alignment and
	 * boundary specified for the requested pages.  For instance, the
	 * specified index may not be the first page within the first new
	 * reservation.
802
	 */
803
804
805
806
807
808
809
810
811
812
813
814
815
	m = NULL;
	vmd = VM_DOMAIN(domain);
	if (vm_domain_allocate(vmd, req, npages)) {
		vm_domain_free_lock(vmd);
		m = vm_phys_alloc_contig(domain, allocpages, low, high,
		    ulmax(alignment, VM_LEVEL_0_SIZE),
		    boundary > VM_LEVEL_0_SIZE ? boundary : 0);
		vm_domain_free_unlock(vmd);
		if (m == NULL) {
			vm_domain_freecnt_inc(vmd, npages);
			return (NULL);
		}
	} else
816
		return (NULL);
817
	KASSERT(vm_phys_domain(m) == domain,
818
	    ("vm_reserv_alloc_contig: Page domain does not match requested."));
819
820
821
822
823
824
825

	/*
	 * The allocated physical pages always begin at a reservation
	 * boundary, but they do not always end at a reservation boundary.
	 * Initialize every reservation that is completely covered by the
	 * allocated physical pages.
	 */
826
827
828
	m_ret = NULL;
	index = VM_RESERV_INDEX(object, pindex);
	do {
829
830
		rv = vm_reserv_from_page(m);
		KASSERT(rv->pages == m,
831
		    ("vm_reserv_alloc_contig: reserv %p's pages is corrupted",
832
		    rv));
833
		vm_reserv_lock(rv);
834
		vm_reserv_insert(rv, object, first);
835
836
		n = ulmin(VM_LEVEL_0_NPAGES - index, npages);
		for (i = 0; i < n; i++)
Alan Cox's avatar
Alan Cox committed
837
			vm_reserv_populate(rv, index + i);
838
839
840
841
842
		npages -= n;
		if (m_ret == NULL) {
			m_ret = &rv->pages[index];
			index = 0;
		}
843
		vm_reserv_unlock(rv);
844
845
846
		m += VM_LEVEL_0_NPAGES;
		first += VM_LEVEL_0_NPAGES;
		allocpages -= VM_LEVEL_0_NPAGES;
847
	} while (allocpages >= VM_LEVEL_0_NPAGES);
848
	return (m_ret);
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
}

/*
 * Attempts to extend an existing reservation and allocate the page to the
 * object.
 *
 * The page "mpred" must immediately precede the offset "pindex" within the
 * specified object.
 *
 * The object must be locked.
 */
vm_page_t
vm_reserv_extend(int req, vm_object_t object, vm_pindex_t pindex, int domain,
    vm_page_t mpred)
{
	struct vm_domain *vmd;
	vm_page_t m, msucc;
	vm_reserv_t rv;
867
	int index;
868
869

	VM_OBJECT_ASSERT_WLOCKED(object);
870
871

	/*
872
	 * Could a reservation currently exist?
873
	 */
874
875
	if (pindex < VM_RESERV_INDEX(object, pindex) ||
	    pindex >= object->size || object->resident_page_count == 0)
876
		return (NULL);
877
878
879
880
881
882

	/*
	 * Look for an existing reservation.
	 */
	rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
	if (rv == NULL)
883
		return (NULL);
884
885
886
887
888
889
890

	KASSERT(object != kernel_object || rv->domain == domain,
	    ("vm_reserv_extend: Domain mismatch from reservation."));
	domain = rv->domain;
	vmd = VM_DOMAIN(domain);
	index = VM_RESERV_INDEX(object, pindex);
	m = &rv->pages[index];
891
892
893
	vm_reserv_lock(rv);
	/* Handle reclaim race. */
	if (rv->object != object ||
894
	    /* Handle vm_page_rename(m, new_object, ...). */
895
	    popmap_is_set(rv->popmap, index)) {
896
		m = NULL;
897
		goto out;
898
	}
899
900
901
902
903
904
	if (vm_domain_allocate(vmd, req, 1) == 0)
		m = NULL;
	else
		vm_reserv_populate(rv, index);
out:
	vm_reserv_unlock(rv);
905

906
907
908
909
	return (m);
}

/*
910
911
912
 * Attempts to allocate a new reservation for the object, and allocates a
 * page from that reservation.  Callers should first invoke vm_reserv_extend()
 * to attempt an allocation from an existing reservation.
913
 *
914
915
916
 * The page "mpred" must immediately precede the offset "pindex" within the
 * specified object.
 *
917
918
919
 * The object and free page queue must be locked.
 */
vm_page_t
920
vm_reserv_alloc_page(int req, vm_object_t object, vm_pindex_t pindex, int domain,
921
    vm_page_t mpred)
922
{
923
	struct vm_domain *vmd;
924
	vm_page_t m, msucc;
925
926
	vm_pindex_t first, leftcap, rightcap;
	vm_reserv_t rv;
927
	int index;
928

929
	VM_OBJECT_ASSERT_WLOCKED(object);
930
931
932
933
934
935
936
937
938

	/*
	 * Is a reservation fundamentally impossible?
	 */
	if (pindex < VM_RESERV_INDEX(object, pindex) ||
	    pindex >= object->size)
		return (NULL);

	/*
939
940
941
	 * Callers should've extended an existing reservation prior to
	 * calling this function.  If a reservation exists it is
	 * incompatible with the allocation.
942
	 */
943
944
945
	rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
	if (rv != NULL)
		return (NULL);
946
947
948
949

	/*
	 * Could a reservation fit between the first index to the left that
	 * can be used and the first index to the right that cannot be used?
950
951
952
953
	 *
	 * We must synchronize with the reserv object lock to protect the
	 * pindex/object of the resulting reservations against rename while
	 * we are inspecting.
954
955
	 */
	first = pindex - VM_RESERV_INDEX(object, pindex);
956
	vm_reserv_object_lock(object);
957
958
959
960
961
	if (mpred != NULL) {
		if ((rv = vm_reserv_from_page(mpred))->object != object)
			leftcap = mpred->pindex + 1;
		else
			leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
962
963
		if (leftcap > first) {
			vm_reserv_object_unlock(object);
964
			return (NULL);
965
		}
966
967
968
969
970
971
	}
	if (msucc != NULL) {
		if ((rv = vm_reserv_from_page(msucc))->object != object)
			rightcap = msucc->pindex;
		else
			rightcap = rv->pindex;
972
973
		if (first + VM_LEVEL_0_NPAGES > rightcap) {
			vm_reserv_object_unlock(object);
974
			return (NULL);
975
		}
976
	}
977
	vm_reserv_object_unlock(object);
978
979
980
981
982
983
984
985
986
987
988
989
990
991

	/*
	 * Would a new reservation extend past the end of the object? 
	 */
	if (first + VM_LEVEL_0_NPAGES > object->size) {
		/*
		 * Don't allocate a new reservation if the object is a vnode or
		 * backed by another object that is a vnode. 
		 */
		if (object->type == OBJT_VNODE ||
		    (object->backing_object != NULL &&
		    object->backing_object->type == OBJT_VNODE))
			return (NULL);
		/* Speculate that the object may grow. */
992
	}
993
994
995
996

	/*
	 * Allocate and populate the new reservation.
	 */
997
998
999
1000
	m = NULL;
	vmd = VM_DOMAIN(domain);
	if (vm_domain_allocate(vmd, req, 1)) {
		vm_domain_free_lock(vmd);
For faster browsing, not all history is shown. View entire blame