summaryrefslogtreecommitdiff
path: root/src/mp/mp_bh.c
blob: 30293f294d1f38c79a837cd8cf2f5a23552ec228 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
/*-
 * See the file LICENSE for redistribution information.
 *
 * Copyright (c) 1996, 2015 Oracle and/or its affiliates.  All rights reserved.
 *
 * $Id$
 */

#include "db_config.h"

#include "db_int.h"
#include "dbinc/db_page.h"		/* Required for diagnostic code. */
#include "dbinc/mp.h"
#include "dbinc/log.h"
#include "dbinc/txn.h"

static int __memp_pgwrite
	       __P((ENV *, DB_MPOOLFILE *, DB_MPOOL_HASH *, BH *));

/*
 * __memp_bhwrite --
 *	Write the page associated with a given buffer header.
 *
 * PUBLIC: int __memp_bhwrite __P((DB_MPOOL *,
 * PUBLIC:      DB_MPOOL_HASH *, MPOOLFILE *, BH *, int));
 */
int
__memp_bhwrite(dbmp, hp, mfp, bhp, open_extents)
	DB_MPOOL *dbmp;
	DB_MPOOL_HASH *hp;
	MPOOLFILE *mfp;
	BH *bhp;
	int open_extents;
{
	DB_MPOOLFILE *dbmfp;
	DB_MPREG *mpreg;
	ENV *env;
	int opened, ret;

	env = dbmp->env;
	opened = 0;

	/*
	 * If the file has been removed or is a closed temporary file, we're
	 * done -- the page-write function knows how to handle the fact that
	 * we don't have (or need!) any real file descriptor information.
	 */
	if (mfp->deadfile)
		return (__memp_pgwrite(env, NULL, hp, bhp));

	/*
	 * Walk the process' DB_MPOOLFILE list and find a file descriptor for
	 * the file.  We also check that the descriptor is open for writing.
	 */
	MUTEX_LOCK(env, dbmp->mutex);
	TAILQ_FOREACH(dbmfp, &dbmp->dbmfq, q)
		if (dbmfp->mfp == mfp && !F_ISSET(dbmfp, MP_READONLY)) {
			++dbmfp->ref;
			break;
		}
	MUTEX_UNLOCK(env, dbmp->mutex);

	if (dbmfp != NULL) {
		/*
		 * Temporary files may not have been created.  We only handle
		 * temporary files in this path, because only the process that
		 * created a temporary file will ever flush buffers to it.
		 */
		if (dbmfp->fhp == NULL) {
			/* We may not be allowed to create backing files. */
			if (mfp->no_backing_file) {
				--dbmfp->ref;
				return (EPERM);
			}

			MUTEX_LOCK(env, dbmp->mutex);
			if (dbmfp->fhp == NULL) {
				ret = __db_tmp_open(env,
				    F_ISSET(env->dbenv, DB_ENV_DIRECT_DB) ?
				    DB_OSO_DIRECT : 0, &dbmfp->fhp);
			} else
				ret = 0;
			MUTEX_UNLOCK(env, dbmp->mutex);
			if (ret != 0) {
				__db_errx(env, DB_STR("3014",
			    "unable to create temporary backing file"));
				--dbmfp->ref;
				return (ret);
			}
		}

		goto pgwrite;
	}

	/*
	 * There's no file handle for this file in our process.
	 *
	 * !!!
	 * It's the caller's choice if we're going to open extent files.
	 */
	if (!open_extents && F_ISSET(mfp, MP_EXTENT))
		return (EPERM);

	/*
	 * !!!
	 * Don't try to attach to temporary files.  There are two problems in
	 * trying to do that.  First, if we have different privileges than the
	 * process that "owns" the temporary file, we might create the backing
	 * disk file such that the owning process couldn't read/write its own
	 * buffers, e.g., memp_trickle running as root creating a file owned
	 * as root, mode 600.  Second, if the temporary file has already been
	 * created, we don't have any way of finding out what its real name is,
	 * and, even if we did, it was already unlinked (so that it won't be
	 * left if the process dies horribly).  This decision causes a problem,
	 * however: if the temporary file consumes the entire buffer cache,
	 * and the owner doesn't flush the buffers to disk, we could end up
	 * with resource starvation, and the memp_trickle thread couldn't do
	 * anything about it.  That's a pretty unlikely scenario, though.
	 *
	 * Note we should never get here when the temporary file in question
	 * has already been closed in another process, in which case it should
	 * be marked dead.
	 */
	if (F_ISSET(mfp, MP_TEMP) || mfp->no_backing_file)
		return (EPERM);

	/*
	 * It's not a page from a file we've opened.  If the file requires
	 * application-specific input/output processing, see if this process
	 * has ever registered information as to how to write this type of
	 * file.  If not, there's nothing we can do.
	 */
	if (mfp->ftype != 0 && mfp->ftype != DB_FTYPE_SET) {
		MUTEX_LOCK(env, dbmp->mutex);
		LIST_FOREACH(mpreg, &dbmp->dbregq, q)
			if (mpreg->ftype == mfp->ftype)
				break;
		MUTEX_UNLOCK(env, dbmp->mutex);
		if (mpreg == NULL)
			return (EPERM);
	}

	/*
	 * Try and open the file, specifying the known underlying shared area.
	 *
	 * !!!
	 * There's no negative cache, so we may repeatedly try and open files
	 * that we have previously tried (and failed) to open.
	 */
	if ((ret = __memp_fcreate(env, &dbmfp)) != 0)
		return (ret);
	/*
	 * The open will set MP_FLUSH and so we need to keep
	 * a checkpoint from closing this before we finish with it.
	 */
	dbmfp->ref++;
	opened = 1;
	if ((ret = __memp_fopen(dbmfp, mfp, NULL,
	    NULL, DB_FLUSH | DB_DURABLE_UNKNOWN, 0, mfp->pagesize)) != 0) {
		dbmfp->ref--;
		(void)__memp_fclose(dbmfp, 0);

		/*
		 * Ignore any error if the file is marked dead, assume the file
		 * was removed from under us.
		 */
		if (!mfp->deadfile)
			return (ret);

		dbmfp = NULL;
	}

pgwrite:
	MVCC_MPROTECT(bhp->buf, mfp->pagesize,
	    PROT_READ | PROT_WRITE | PROT_EXEC);
	ret = __memp_pgwrite(env, dbmfp, hp, bhp);
	if (dbmfp == NULL)
		return (ret);

	/*
	 * Discard our reference, and, if we're the last reference, make sure
	 * the file eventually gets closed.
	 */
	MUTEX_LOCK(env, dbmp->mutex);
	if (!opened && dbmfp->ref == 1) {
		/*
		 * If we are the last reference, then we need to mark
		 * this as having been used to flush.  If this dbmf
		 * has not been counted as a neutral reference do it.
		 *
		 * Getting the mfp mutex while holding the dbmp is
		 * ok we never do it in the reverse order.
		 */
		if (!F_ISSET(dbmfp, MP_FLUSH)) {
			F_SET(dbmfp, MP_FLUSH);
			MUTEX_LOCK(env,dbmfp->mfp->mutex);
			if (!F_ISSET(dbmfp, MP_FOR_FLUSH)) {
				mfp->neutral_cnt++;
				F_SET(dbmfp, MP_FOR_FLUSH);
			}
			MUTEX_UNLOCK(env, dbmfp->mfp->mutex);
		}
	} else
		--dbmfp->ref;
	MUTEX_UNLOCK(env, dbmp->mutex);

	return (ret);
}

/*
 * __memp_pgread --
 *	Read a page from a file.
 *
 * PUBLIC: int __memp_pgread __P((DB_MPOOLFILE *, BH *, int));
 */
int
__memp_pgread(dbmfp, bhp, can_create)
	DB_MPOOLFILE *dbmfp;
	BH *bhp;
	int can_create;
{
	ENV *env;
	MPOOLFILE *mfp;
	size_t len, nr;
	u_int32_t pagesize;
	int ret;

	env = dbmfp->env;
	mfp = dbmfp->mfp;
	pagesize = mfp->pagesize;

	/* We should never be called with a dirty or unlocked buffer. */
	DB_ASSERT(env, !F_ISSET(bhp, BH_DIRTY_CREATE | BH_FROZEN));
	DB_ASSERT(env, can_create ||
	    F_ISSET(bhp, BH_TRASH) || !F_ISSET(bhp, BH_DIRTY));
	DB_ASSERT(env, F_ISSET(bhp, BH_EXCLUSIVE));

	/* Mark the buffer as in transition. */
	F_SET(bhp, BH_TRASH);

	/*
	 * Temporary files may not yet have been created.  We don't create
	 * them now, we create them when the pages have to be flushed.
	 */
	nr = 0;
	if (dbmfp->fhp != NULL) {
		PERFMON3(env, mpool, read, __memp_fn(dbmfp), bhp->pgno, bhp);
		if ((ret = __os_io(env, DB_IO_READ, dbmfp->fhp,
		    bhp->pgno, pagesize, 0, pagesize, bhp->buf, &nr)) != 0)
			goto err;
	}

	/*
	 * The page may not exist; if it doesn't, nr may well be 0, but we
	 * expect the underlying OS calls not to return an error code in
	 * this case.
	 */
	if (nr < pagesize) {
		/*
		 * Don't output error messages for short reads.  In particular,
		 * DB recovery processing may request pages never written to
		 * disk or for which only some part have been written to disk,
		 * in which case we won't find the page.  The caller must know
		 * how to handle the error.
		 */
		if (!can_create) {
			ret = USR_ERR(env, DB_PAGE_NOTFOUND);
			goto err;
		}

		/* Clear any bytes that need to be cleared. */
		len = mfp->clear_len == DB_CLEARLEN_NOTSET ?
		    pagesize : mfp->clear_len;
		memset(bhp->buf, 0, len);

#if defined(DIAGNOSTIC) || defined(UMRW)
		/*
		 * If we're running in diagnostic mode, corrupt any bytes on
		 * the page that are unknown quantities for the caller.
		 */
		if (len < pagesize)
			memset(bhp->buf + len, CLEAR_BYTE, pagesize - len);
#endif
		STAT_INC_VERB(env, mpool, page_create,
		    mfp->stat.st_page_create, __memp_fn(dbmfp), bhp->pgno);
	} else
		STAT_INC_VERB(env, mpool, page_in,
		    mfp->stat.st_page_in, __memp_fn(dbmfp), bhp->pgno);

	/* Call any pgin function. */
	ret = mfp->ftype == 0 ? 0 : __memp_pg(dbmfp, bhp->pgno, bhp->buf, 1);

	/*
	 * If no errors occurred, the data is now valid, clear the BH_TRASH
	 * flag.
	 */
	if (ret == 0)
		F_CLR(bhp, BH_TRASH);
err:	return (ret);
}

/*
 * __memp_pgwrite --
 *	Write a page to a file.
 */
static int
__memp_pgwrite(env, dbmfp, hp, bhp)
	ENV *env;
	DB_MPOOLFILE *dbmfp;
	DB_MPOOL_HASH *hp;
	BH *bhp;
{
	DB_LSN lsn;
	MPOOLFILE *mfp;
	size_t nw;
	int ret;
	void * buf;

	/*
	 * Since writing does not require exclusive access, another thread
	 * could have already written this buffer.
	 */
	if (!F_ISSET(bhp, BH_DIRTY))
		return (0);

	mfp = dbmfp == NULL ? NULL : dbmfp->mfp;
	ret = 0;
	buf = NULL;

	/* We should never be called with a frozen or trashed buffer. */
	DB_ASSERT(env, !F_ISSET(bhp, BH_FROZEN | BH_TRASH));

	/*
	 * It's possible that the underlying file doesn't exist, either
	 * because of an outright removal or because it was a temporary
	 * file that's been closed.
	 *
	 * !!!
	 * Once we pass this point, we know that dbmfp and mfp aren't NULL,
	 * and that we have a valid file reference.
	 */
	if (mfp == NULL || mfp->deadfile)
		goto file_dead;

	/*
	 * If the page is in a file for which we have LSN information, we have
	 * to ensure the appropriate log records are on disk.
	 */
	if (LOGGING_ON(env) && mfp->lsn_off != DB_LSN_OFF_NOTSET &&
	    !IS_CLIENT_PGRECOVER(env)) {
		memcpy(&lsn, bhp->buf + mfp->lsn_off, sizeof(DB_LSN));
		if (!IS_NOT_LOGGED_LSN(lsn) &&
		    (ret = __log_flush(env, &lsn)) != 0)
			goto err;
	}

#ifdef DIAGNOSTIC
	/*
	 * Verify write-ahead logging semantics.
	 *
	 * !!!
	 * Two special cases.  There is a single field on the meta-data page,
	 * the last-page-number-in-the-file field, for which we do not log
	 * changes.  If the page was originally created in a database that
	 * didn't have logging turned on, we can see a page marked dirty but
	 * for which no corresponding log record has been written.  However,
	 * the only way that a page can be created for which there isn't a
	 * previous log record and valid LSN is when the page was created
	 * without logging turned on, and so we check for that special-case
	 * LSN value.
	 *
	 * Second, when a client is reading database pages from a master
	 * during an internal backup, we may get pages modified after
	 * the current end-of-log.
	 */
	if (LOGGING_ON(env) && !IS_NOT_LOGGED_LSN(LSN(bhp->buf)) &&
	    !IS_CLIENT_PGRECOVER(env)) {
		/*
		 * There is a potential race here.  If we are in the midst of
		 * switching log files, it's possible we could test against the
		 * old file and the new offset in the log region's LSN.  If we
		 * fail the first test, acquire the log mutex and check again.
		 */
		DB_LOG *dblp;
		LOG *lp;

		dblp = env->lg_handle;
		lp = dblp->reginfo.primary;
		if (!lp->db_log_inmemory &&
		    LOG_COMPARE(&lp->s_lsn, &LSN(bhp->buf)) <= 0) {
			MUTEX_LOCK(env, lp->mtx_flush);
			DB_ASSERT(env, F_ISSET(env->dbenv, DB_ENV_NOLOCKING) ||
			    LOG_COMPARE(&lp->s_lsn, &LSN(bhp->buf)) > 0);
			MUTEX_UNLOCK(env, lp->mtx_flush);
		}
	}
#endif

#ifndef HAVE_ATOMICFILEREAD
	if (mfp->backup_in_progress != 0) {
		MUTEX_READLOCK(env, mfp->mtx_write);
		if (bhp->pgno >= mfp->low_pgno && bhp->pgno <= mfp->high_pgno) {
			MUTEX_UNLOCK(env, mfp->mtx_write);
			ret = EAGAIN;
			goto err;
		}
		atomic_inc(env, &mfp->writers);
		MUTEX_UNLOCK(env, mfp->mtx_write);
	} else
		atomic_inc(env, &mfp->writers);
#endif

	/*
	 * Call any pgout function.  If we have the page exclusive then
	 * we are going to reuse it otherwise make a copy of the page so
	 * that others can continue looking at the page while we write it.
	 */
	buf = bhp->buf;
	if (mfp->ftype != 0) {
		if (F_ISSET(bhp, BH_EXCLUSIVE))
			F_SET(bhp, BH_TRASH);
		else {
			if ((ret = __os_malloc(env, mfp->pagesize, &buf)) != 0)
				goto err;
			memcpy(buf, bhp->buf, mfp->pagesize);
		}
		if ((ret = __memp_pg(dbmfp, bhp->pgno, buf, 0)) != 0)
			goto err;
	}

	PERFMON3(env, mpool, write, __memp_fn(dbmfp), bhp->pgno, bhp);
	/* Write the page. */
	if ((ret = __os_io(env, DB_IO_WRITE, dbmfp->fhp, bhp->pgno,
	    mfp->pagesize, 0, mfp->pagesize, buf, &nw)) != 0) {
#ifndef HAVE_ATOMICFILEREAD
		atomic_dec(env, &mfp->writers);
#endif
		__db_errx(env, DB_STR_A("3015",
		    "%s: write failed for page %lu", "%s %lu"),
		    __memp_fn(dbmfp), (u_long)bhp->pgno);
		goto err;
	}
#ifndef HAVE_ATOMICFILEREAD
	atomic_dec(env, &mfp->writers);
#endif
	STAT_INC_VERB(env, mpool, page_out,
	    mfp->stat.st_page_out, __memp_fn(dbmfp), bhp->pgno);
	if (bhp->pgno > mfp->last_flushed_pgno) {
		MUTEX_LOCK(env, mfp->mutex);
		if (bhp->pgno > mfp->last_flushed_pgno)
			mfp->last_flushed_pgno = bhp->pgno;
		MUTEX_UNLOCK(env, mfp->mutex);
	}

err:
file_dead:
	if (buf != NULL && buf != bhp->buf)
		__os_free(env, buf);
	/*
	 * !!!
	 * Once we pass this point, dbmfp and mfp may be NULL, we may not have
	 * a valid file reference.
	 */

	/*
	 * Update the hash bucket statistics, reset the flags.  If we were
	 * successful, the page is no longer dirty.  Someone else may have
	 * also written the page so we need to latch the hash bucket here
	 * to get the accounting correct.  Since we have the buffer
	 * shared it cannot be marked dirty again till we release it.
	 * This is the only place we update the flags field only holding
	 * a shared latch.
	 */
	if (F_ISSET(bhp, BH_DIRTY | BH_TRASH)) {
		MUTEX_LOCK(env, hp->mtx_hash);
		DB_ASSERT(env, !SH_CHAIN_HASNEXT(bhp, vc));
		if (ret == 0 && F_ISSET(bhp, BH_DIRTY)) {
			F_CLR(bhp, BH_DIRTY | BH_DIRTY_CREATE);
			DB_ASSERT(env, atomic_read(&hp->hash_page_dirty) > 0);
			atomic_dec(env, &hp->hash_page_dirty);
		}

		/* put the page back if necessary. */
		if ((ret != 0 || BH_REFCOUNT(bhp) > 1) &&
		    F_ISSET(bhp, BH_TRASH)) {
			ret = __memp_pg(dbmfp, bhp->pgno, bhp->buf, 1);
			F_CLR(bhp, BH_TRASH);
		}
		MUTEX_UNLOCK(env, hp->mtx_hash);
	}

	return (ret);
}

/*
 * __memp_pg --
 *	Call the pgin/pgout routine.
 *
 * PUBLIC: int __memp_pg __P((DB_MPOOLFILE *, db_pgno_t, void *, int));
 */
int
__memp_pg(dbmfp, pgno, buf, is_pgin)
	DB_MPOOLFILE *dbmfp;
	db_pgno_t pgno;
	void *buf;
	int is_pgin;
{
	DBT dbt, *dbtp;
	DB_MPOOL *dbmp;
	DB_MPREG *mpreg;
	ENV *env;
	MPOOLFILE *mfp;
	int ftype, ret;

	env = dbmfp->env;
	dbmp = env->mp_handle;
	mfp = dbmfp->mfp;

	if ((ftype = mfp->ftype) == DB_FTYPE_SET)
		mpreg = dbmp->pg_inout;
	else {
		MUTEX_LOCK(env, dbmp->mutex);
		LIST_FOREACH(mpreg, &dbmp->dbregq, q)
			if (ftype == mpreg->ftype)
				break;
		MUTEX_UNLOCK(env, dbmp->mutex);
	}
	if (mpreg == NULL)
		return (0);

	if (mfp->pgcookie_len == 0)
		dbtp = NULL;
	else {
		DB_SET_DBT(dbt, R_ADDR(
		    dbmp->reginfo, mfp->pgcookie_off), mfp->pgcookie_len);
		dbtp = &dbt;
	}

	if (is_pgin) {
		if (mpreg->pgin != NULL && (ret =
		    mpreg->pgin(env->dbenv, pgno, buf, dbtp)) != 0)
			goto err;
	} else
		if (mpreg->pgout != NULL && (ret =
		    mpreg->pgout(env->dbenv, pgno, buf, dbtp)) != 0)
			goto err;

	return (0);

err:	__db_errx(env, DB_STR_A("3016",
	    "%s: %s failed for page %lu", "%s %s %lu"), __memp_fn(dbmfp),
	    is_pgin ? DB_STR_P("pgin") : DB_STR_P("pgout"), (u_long)pgno);
	return (ret);
}

/*
 * __memp_bhfree --
 *	Free a bucket header and its referenced data.
 *
 *	The hash bucket is unlocked before returning except when flags includes
 *	BH_FREE_UNLOCKED -- or there was no hp passed in to begin with.
 *
 * PUBLIC: int __memp_bhfree __P((DB_MPOOL *,
 * PUBLIC:	REGINFO *, MPOOLFILE *, DB_MPOOL_HASH *, BH *, u_int32_t));
 */
int
__memp_bhfree(dbmp, infop, mfp, hp, bhp, flags)
	DB_MPOOL *dbmp;
	REGINFO *infop;
	MPOOLFILE *mfp;
	DB_MPOOL_HASH *hp;
	BH *bhp;
	u_int32_t flags;
{
	ENV *env;
#ifdef DIAGNOSTIC
	DB_LSN vlsn;
#endif
	BH *prev_bhp;
	MPOOL *c_mp;
	int ret, t_ret;
#ifdef DIAG_MVCC
	size_t pagesize;
#endif

	ret = 0;

	/*
	 * Assumes the hash bucket is locked and the MPOOL is not.
	 */
	env = dbmp->env;
#ifdef DIAG_MVCC
	if (mfp != NULL)
		pagesize = mfp->pagesize;
#endif

	DB_ASSERT(env, LF_ISSET(BH_FREE_UNLOCKED) ||
	    (hp != NULL && MUTEX_IS_OWNED(env, hp->mtx_hash)));
	DB_ASSERT(env, BH_REFCOUNT(bhp) == 1 &&
	    !F_ISSET(bhp, BH_DIRTY | BH_FROZEN));
	DB_ASSERT(env, LF_ISSET(BH_FREE_UNLOCKED) ||
	    SH_CHAIN_SINGLETON(bhp, vc) || (SH_CHAIN_HASNEXT(bhp, vc) &&
	    (SH_CHAIN_NEXTP(bhp, vc, __bh)->td_off == bhp->td_off ||
	    bhp->td_off == INVALID_ROFF ||
	    IS_MAX_LSN(*VISIBLE_LSN(env, bhp)) ||
	    F_ISSET(bhp, BH_UNREACHABLE) ||
	    BH_OBSOLETE(bhp, hp->old_reader, vlsn))));

	PERFMON3(env, mpool, evict, __memp_fns(dbmp, mfp), bhp->pgno, bhp);
	if (FLD_ISSET(env->dbenv->verbose, DB_VERB_MVCC))
		__db_msg(env, "bhfree pgno %lu roff %lx",
		    (u_long)bhp->pgno, (u_long)R_OFFSET(dbmp->reginfo, bhp));
	/*
	 * Delete the buffer header from the hash bucket queue or the
	 * version chain.
	 */
	if (hp == NULL)
		goto no_hp;
	prev_bhp = SH_CHAIN_PREV(bhp, vc, __bh);
	if (!SH_CHAIN_HASNEXT(bhp, vc)) {
		if (prev_bhp != NULL)
			SH_TAILQ_INSERT_AFTER(&hp->hash_bucket,
			    bhp, prev_bhp, hq, __bh);
		SH_TAILQ_REMOVE(&hp->hash_bucket, bhp, hq, __bh);
	}
	SH_CHAIN_REMOVE(bhp, vc, __bh);

	/*
	 * Remove the reference to this buffer from the transaction that
	 * created it, if any.  When the BH_FREE_UNLOCKED flag is set, we're
	 * discarding the environment, so the transaction region is already
	 * gone.
	 */
	if (bhp->td_off != INVALID_ROFF && !LF_ISSET(BH_FREE_UNLOCKED)) {
		ret = __txn_remove_buffer(
		    env, BH_OWNER(env, bhp), hp->mtx_hash);
		bhp->td_off = INVALID_ROFF;
	}

	/*
	 * We're going to use the memory for something else -- it had better be
	 * accessible.
	 */
no_hp:	if (mfp != NULL)
		MVCC_MPROTECT(bhp->buf,
		    pagesize, PROT_READ | PROT_WRITE | PROT_EXEC);

	/*
	 * Discard the hash bucket's mutex, it's no longer needed, and
	 * we don't want to be holding it when acquiring other locks.
	 */
	if (!LF_ISSET(BH_FREE_UNLOCKED))
		MUTEX_UNLOCK(env, hp->mtx_hash);

	/*
	 * If we're only removing this header from the chain for reuse, we're
	 * done.
	 */
	if (LF_ISSET(BH_FREE_REUSE))
		return (ret);

	/*
	 * If we're not reusing the buffer immediately, free the buffer for
	 * real.
	 */
	if (!LF_ISSET(BH_FREE_UNLOCKED))
		MUTEX_UNLOCK(env, bhp->mtx_buf);
	if (LF_ISSET(BH_FREE_FREEMEM)) {
		if ((ret = __mutex_free(env, &bhp->mtx_buf)) != 0)
			return (ret);
		MPOOL_REGION_LOCK(env, infop);

		MVCC_BHUNALIGN(bhp);
		__memp_free(infop, bhp);
		c_mp = infop->primary;
		c_mp->pages--;

		MPOOL_REGION_UNLOCK(env, infop);
	}

	if (mfp == NULL)
		return (ret);

	/*
	 * Decrement the reference count of the underlying MPOOLFILE.
	 * If this is its last reference, remove it.
	 */
	MUTEX_LOCK(env, mfp->mutex);
	if (--mfp->block_cnt == 0 && mfp->mpf_cnt == 0) {
		if ((t_ret = __memp_mf_discard(dbmp, mfp, 0)) != 0 && ret == 0)
			ret = t_ret;
	} else
		MUTEX_UNLOCK(env, mfp->mutex);

	return (ret);
}