diff options
Diffstat (limited to 'src/lock')
-rw-r--r-- | src/lock/Design | 2 | ||||
-rw-r--r-- | src/lock/lock.c | 50 | ||||
-rw-r--r-- | src/lock/lock_alloc.incl | 2 | ||||
-rw-r--r-- | src/lock/lock_deadlock.c | 47 | ||||
-rw-r--r-- | src/lock/lock_failchk.c | 9 | ||||
-rw-r--r-- | src/lock/lock_id.c | 306 | ||||
-rw-r--r-- | src/lock/lock_list.c | 2 | ||||
-rw-r--r-- | src/lock/lock_method.c | 2 | ||||
-rw-r--r-- | src/lock/lock_region.c | 43 | ||||
-rw-r--r-- | src/lock/lock_stat.c | 208 | ||||
-rw-r--r-- | src/lock/lock_stub.c | 7 | ||||
-rw-r--r-- | src/lock/lock_timer.c | 2 | ||||
-rw-r--r-- | src/lock/lock_util.c | 2 |
13 files changed, 431 insertions, 251 deletions
diff --git a/src/lock/Design b/src/lock/Design index f82bc7e8..2a1d1145 100644 --- a/src/lock/Design +++ b/src/lock/Design @@ -298,4 +298,4 @@ A: We currently do not support any automatic configuration for FINE_GRAIN locking. When we do, will need to document that atomicity discussion listed above (it is bug-report #553). -Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved. +Copyright (c) 2010, 2015 Oracle and/or its affiliates. All rights reserved. diff --git a/src/lock/lock.c b/src/lock/lock.c index e4627734..bcebbe44 100644 --- a/src/lock/lock.c +++ b/src/lock/lock.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved. * * $Id$ */ @@ -31,8 +31,8 @@ static int __lock_trade __P((ENV *, DB_LOCK *, DB_LOCKER *)); static int __lock_vec_api __P((ENV *, u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **)); -static const char __db_lock_invalid[] = "%s: Lock is no longer valid"; -static const char __db_locker_invalid[] = "Locker is not valid"; +#define LOCK_INVALID_ERR DB_STR_A("2056", "%s: Lock is no longer valid", "%s") +#define LOCKER_INVALID_ERR DB_STR("2057", "Locker is not valid") #ifdef DEBUG extern void __db_loadme (void); @@ -111,7 +111,8 @@ __lock_vec(env, sh_locker, flags, list, nlist, elistp) DB_LOCKREQ *list, **elistp; { struct __db_lock *lp, *next_lock; - DB_LOCK lock; DB_LOCKOBJ *sh_obj; + DB_LOCK lock; + DB_LOCKOBJ *sh_obj; DB_LOCKREGION *region; DB_LOCKTAB *lt; DBT *objlist, *np; @@ -200,12 +201,18 @@ __lock_vec(env, sh_locker, flags, list, nlist, elistp) if (writes == 1 || lp->mode == DB_LOCK_READ || lp->mode == DB_LOCK_READ_UNCOMMITTED) { - SH_LIST_REMOVE(lp, - locker_links, __db_lock); + /* + * It is safe to look at lp before + * locking because any threads sharing + * this locker must not be in the API + * at the same time. + */ sh_obj = SH_OFF_TO_PTR(lp, lp->obj, DB_LOCKOBJ); ndx = sh_obj->indx; OBJECT_LOCK_NDX(lt, region, ndx); + SH_LIST_REMOVE(lp, + locker_links, __db_lock); /* * We are not letting lock_put_internal * unlink the lock, so we'll have to @@ -423,7 +430,7 @@ __lock_get_api(env, locker, flags, obj, lock_mode, lock) region = env->lk_handle->reginfo.primary; LOCK_LOCKERS(env, region); - ret = __lock_getlocker_int(env->lk_handle, locker, 0, &sh_locker); + ret = __lock_getlocker_int(env->lk_handle, locker, 0, NULL, &sh_locker); UNLOCK_LOCKERS(env, region); LOCK_SYSTEM_LOCK(env->lk_handle, region); if (ret == 0) @@ -979,12 +986,21 @@ in_abort: newl->status = DB_LSTAT_WAITING; goto err; } + /* + * Sleep until someone releases a lock which might let us in. + * Since we want to set the thread state back to ACTIVE, don't + * use the normal MUTEX_LOCK() macro, which would immediately + * return a panic error code. Instead, return the panic after + * restoring the thread state. + */ PERFMON2(env, lock, suspend, (DBT *) obj, lock_mode); - MUTEX_LOCK(env, newl->mtx_lock); + ret = __mutex_lock(env, newl->mtx_lock); PERFMON2(env, lock, resume, (DBT *) obj, lock_mode); if (ip != NULL) ip->dbth_state = THREAD_ACTIVE; + if (ret != 0) + return (ret); LOCK_SYSTEM_LOCK(lt, region); OBJECT_LOCK_NDX(lt, region, ndx); @@ -1165,7 +1181,7 @@ __lock_put_nolock(env, lock, runp, flags) lockp = R_ADDR(<->reginfo, lock->off); DB_ASSERT(env, lock->gen == lockp->gen); if (lock->gen != lockp->gen) { - __db_errx(env, __db_lock_invalid, "DB_LOCK->lock_put"); + __db_errx(env, LOCK_INVALID_ERR, "DB_LOCK->lock_put"); LOCK_INIT(*lock); return (EINVAL); } @@ -1224,7 +1240,7 @@ __lock_downgrade(env, lock, new_mode, flags) lockp = R_ADDR(<->reginfo, lock->off); if (lock->gen != lockp->gen) { - __db_errx(env, __db_lock_invalid, "lock_downgrade"); + __db_errx(env, LOCK_INVALID_ERR, "lock_downgrade"); ret = EINVAL; goto out; } @@ -1662,7 +1678,7 @@ __lock_inherit_locks(lt, sh_locker, flags) * locks, so inheritance is easy! */ if (sh_locker == NULL) { - __db_errx(env, __db_locker_invalid); + __db_errx(env, LOCKER_INVALID_ERR); return (EINVAL); } @@ -1683,11 +1699,15 @@ __lock_inherit_locks(lt, sh_locker, flags) for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock); lp != NULL; lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock)) { - SH_LIST_REMOVE(lp, locker_links, __db_lock); - - /* See if the parent already has a lock. */ + /* + * See if the parent already has a lock. It is safe to look at + * lp before locking it because any threads sharing this locker + * must not be in the API with the same time. + */ obj = SH_OFF_TO_PTR(lp, lp->obj, DB_LOCKOBJ); OBJECT_LOCK_NDX(lt, region, obj->indx); + SH_LIST_REMOVE(lp, locker_links, __db_lock); + SH_TAILQ_FOREACH(hlp, &obj->holders, links, __db_lock) if (hlp->holder == poff && lp->mode == hlp->mode) break; @@ -1917,7 +1937,7 @@ __lock_trade(env, lock, new_locker) /* If the lock is already released, simply return. */ if (lp->gen != lock->gen) - return (DB_NOTFOUND); + return (USR_ERR(env, DB_NOTFOUND)); if (new_locker == NULL) { __db_errx(env, DB_STR("2040", "Locker does not exist")); diff --git a/src/lock/lock_alloc.incl b/src/lock/lock_alloc.incl index edea07d2..e10cbcbf 100644 --- a/src/lock/lock_alloc.incl +++ b/src/lock/lock_alloc.incl @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2015 Oracle and/or its affiliates. All rights reserved. * * $Id$ */ diff --git a/src/lock/lock_deadlock.c b/src/lock/lock_deadlock.c index 3c00d7f1..79086687 100644 --- a/src/lock/lock_deadlock.c +++ b/src/lock/lock_deadlock.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved. * * $Id$ */ @@ -683,38 +683,45 @@ again: memset(bitmap, 0, count * sizeof(u_int32_t) * nentries); /* * Now for each locker, record its last lock and set abort status. * We need to look at the heldby list carefully. We have the LOCKERS - * locked so they cannot go away. The lock at the head of the - * list can be removed by locking the object it points at. - * Since lock memory is not freed if we get a lock we can look - * at it safely but SH_LIST_FIRST is not atomic, so we check that - * the list has not gone empty during that macro. We check abort - * status after building the bit maps so that we will not detect - * a blocked transaction without noting that it is already aborting. + * locked so they cannot go away. The LOCK_SYSTEM_LOCK keeps things + * steady when the lock table is not partitioned. However, if there are + * multiple lock partitions then the head of the heldby list can be + * changed by another thread locking the object it points at. That + * thread will have OBJECT_LOCK()'d that lock's partition. We need to + * look at the lock entry in order to determine which partition to + * mutex_lock. Since lock structs are never really freed, once we get + * the pointer we can look at it safely. However SH_LIST_FIRST is not + * atomic, so we first fetch the pointer and then check that the list + * was not empty during the fetch. This lets us at least mutex_lock the + * partition of the lock. Afterwards, we retry if the lock is no longer + * the first for that locker -- it might have changed to something ELSE + * since then. We check abort status after building the bit maps so that + * we will not pick a blocked transaction without noting that it is + * already aborting. */ for (id = 0; id < count; id++) { if (!id_array[id].valid) continue; - if ((ret = __lock_getlocker_int(lt, - id_array[id].id, 0, &lockerp)) != 0 || lockerp == NULL) + if ((ret = __lock_getlocker_int(lt, id_array[id].id, + 0, NULL, &lockerp)) != 0 || lockerp == NULL) continue; /* - * If this is a master transaction, try to - * find one of its children's locks first, - * as they are probably more recent. + * If this is a master transaction, try to find one of its + * children's locks first, as they are probably more recent. */ child = SH_LIST_FIRST(&lockerp->child_locker, __db_locker); if (child != NULL) { do { -c_retry: lp = SH_LIST_FIRST(&child->heldby, __db_lock); - if (SH_LIST_EMPTY(&child->heldby) || lp == NULL) +c_retry: lp = SH_LIST_FIRSTP(&child->heldby, __db_lock); + if (__SH_LIST_WAS_EMPTY(&child->heldby, lp)) goto c_next; if (F_ISSET(child, DB_LOCKER_INABORT)) id_array[id].in_abort = 1; ndx = lp->indx; OBJECT_LOCK_NDX(lt, region, ndx); - if (lp != SH_LIST_FIRST( + if (lp != SH_LIST_FIRSTP( &child->heldby, __db_lock) || ndx != lp->indx) { OBJECT_UNLOCK(lt, region, ndx); @@ -733,11 +740,11 @@ c_next: child = SH_LIST_NEXT( } while (child != NULL); } -l_retry: lp = SH_LIST_FIRST(&lockerp->heldby, __db_lock); - if (!SH_LIST_EMPTY(&lockerp->heldby) && lp != NULL) { +l_retry: lp = SH_LIST_FIRSTP(&lockerp->heldby, __db_lock); + if (!__SH_LIST_WAS_EMPTY(&lockerp->heldby, lp)) { ndx = lp->indx; OBJECT_LOCK_NDX(lt, region, ndx); - if (lp != SH_LIST_FIRST(&lockerp->heldby, __db_lock) || + if (lp != SH_LIST_FIRSTP(&lockerp->heldby, __db_lock) || lp->indx != ndx) { OBJECT_UNLOCK(lt, region, ndx); goto l_retry; @@ -869,7 +876,7 @@ __dd_abort(env, info, statusp) * detecting, return that. */ if ((ret = __lock_getlocker_int(lt, - info->last_locker_id, 0, &lockerp)) != 0) + info->last_locker_id, 0, NULL, &lockerp)) != 0) goto err; if (lockerp == NULL || F_ISSET(lockerp, DB_LOCKER_INABORT)) { *statusp = DB_ALREADY_ABORTED; diff --git a/src/lock/lock_failchk.c b/src/lock/lock_failchk.c index 59fb010f..84f757bf 100644 --- a/src/lock/lock_failchk.c +++ b/src/lock/lock_failchk.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved. * * $Id$ */ @@ -15,7 +15,7 @@ /* * __lock_failchk -- * Check for locks held by dead threads of control and release - * read locks. If any write locks were held by dead non-trasnactional + * read locks. If any write locks were held by dead non-transactional * lockers then we must abort and run recovery. Otherwise we release * read locks for lockers owned by dead threads. Write locks for * dead transactional lockers will be freed when we abort the transaction. @@ -98,9 +98,8 @@ retry: LOCK_LOCKERS(env, lrp); /* * This locker is most likely referenced by a cursor * which is owned by a dead thread. Normally the - * cursor would be available for other threads - * but we assume the dead thread will never release - * it. + * cursor would be available for other threads but we + * assume the dead thread will never release it. */ if (lip->id < TXN_MINIMUM && (ret = __lock_freelocker(lt, lip)) != 0) diff --git a/src/lock/lock_id.c b/src/lock/lock_id.c index 24b545d1..e0dbaa01 100644 --- a/src/lock/lock_id.c +++ b/src/lock/lock_id.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved. * * $Id$ */ @@ -17,7 +17,7 @@ static int __lock_freelocker_int /* * __lock_id_pp -- - * ENV->lock_id pre/post processing. + * DB_ENV->lock_id pre/post processing. * * PUBLIC: int __lock_id_pp __P((DB_ENV *, u_int32_t *)); */ @@ -43,7 +43,11 @@ __lock_id_pp(dbenv, idp) /* * __lock_id -- - * ENV->lock_id. + * Allocate a new lock id as well as a locker struct to hold it. If we wrap + * around then we find the minimum currently in use and make sure we can + * stay below that. This is similar to __txn_begin_int's code to recover + * txn ids. + * * * PUBLIC: int __lock_id __P((ENV *, u_int32_t *, DB_LOCKER **)); */ @@ -59,22 +63,15 @@ __lock_id(env, idp, lkp) u_int32_t id, *ids; int nids, ret; - lk = NULL; lt = env->lk_handle; region = lt->reginfo.primary; id = DB_LOCK_INVALIDID; - ret = 0; - - id = DB_LOCK_INVALIDID; lk = NULL; + ret = 0; LOCK_LOCKERS(env, region); /* - * Allocate a new lock id. If we wrap around then we find the minimum - * currently in use and make sure we can stay below that. This code is - * similar to code in __txn_begin_int for recovering txn ids. - * * Our current valid range can span the maximum valid value, so check * for it and wrap manually. */ @@ -98,7 +95,7 @@ __lock_id(env, idp, lkp) id = ++region->lock_id; /* Allocate a locker for this id. */ - ret = __lock_getlocker_int(lt, id, 1, &lk); + ret = __lock_getlocker_int(lt, id, 1, NULL, &lk); err: UNLOCK_LOCKERS(env, region); @@ -165,7 +162,8 @@ __lock_id_free_pp(dbenv, id) LOCK_LOCKERS(env, region); if ((ret = - __lock_getlocker_int(env->lk_handle, id, 0, &sh_locker)) == 0) { + __lock_getlocker_int(env->lk_handle, + id, 0, NULL, &sh_locker)) == 0) { if (sh_locker != NULL) ret = __lock_freelocker_int(lt, region, sh_locker, 1); else { @@ -194,8 +192,10 @@ __lock_id_free(env, sh_locker) ENV *env; DB_LOCKER *sh_locker; { + DB_LOCKER locker; DB_LOCKREGION *region; DB_LOCKTAB *lt; + DB_MSGBUF mb; int ret; lt = env->lk_handle; @@ -203,9 +203,14 @@ __lock_id_free(env, sh_locker) ret = 0; if (sh_locker->nlocks != 0) { - __db_errx(env, DB_STR("2046", - "Locker still has locks")); - ret = EINVAL; + locker = *sh_locker; + ret = USR_ERR(env, EINVAL); + __db_errx(env, DB_STR_A("2046", + "Locker %d still has %d locks", "%d %d"), + locker.id, locker.nlocks ); + DB_MSGBUF_INIT(&mb); + (void)__lock_dump_locker(env, &mb, lt, sh_locker); + DB_MSGBUF_FLUSH(env, &mb); goto err; } @@ -243,17 +248,19 @@ __lock_id_set(env, cur_id, max_id) } /* - * __lock_getlocker -- - * Get a locker in the locker hash table. The create parameter - * indicates if the locker should be created if it doesn't exist in - * the table. + * __lock_getlocker,__lock_getlocker_int -- + * Get a locker in the locker hash table. The create parameter indicates + * whether the locker should be created if it doesn't exist in the table. If + * there's a matching locker cached in the thread info, use that without + * locking. * - * This must be called with the locker mutex lock if create == 1. + * The internal version does not check the thread info cache; it must be called + * with the locker mutex locked. * * PUBLIC: int __lock_getlocker __P((DB_LOCKTAB *, * PUBLIC: u_int32_t, int, DB_LOCKER **)); * PUBLIC: int __lock_getlocker_int __P((DB_LOCKTAB *, - * PUBLIC: u_int32_t, int, DB_LOCKER **)); + * PUBLIC: u_int32_t, int, DB_THREAD_INFO *, DB_LOCKER **)); */ int __lock_getlocker(lt, locker, create, retp) @@ -263,32 +270,47 @@ __lock_getlocker(lt, locker, create, retp) DB_LOCKER **retp; { DB_LOCKREGION *region; + DB_THREAD_INFO *ip; ENV *env; int ret; COMPQUIET(region, NULL); env = lt->env; region = lt->reginfo.primary; - + ENV_GET_THREAD_INFO(env, ip); + + /* Check to see if the locker is already in the thread info */ + if (ip != NULL && ip->dbth_local_locker != INVALID_ROFF) { + *retp = (DB_LOCKER *) + R_ADDR(<->reginfo, ip->dbth_local_locker); + if ((*retp)->id == locker) { + DB_ASSERT(env, !F_ISSET(*retp, DB_LOCKER_FREE)); +#ifdef HAVE_STATISTICS + region->stat.st_nlockers_hit++; +#endif + return (0); + } + } LOCK_LOCKERS(env, region); - ret = __lock_getlocker_int(lt, locker, create, retp); + ret = __lock_getlocker_int(lt, locker, create, ip, retp); UNLOCK_LOCKERS(env, region); - return (ret); } int -__lock_getlocker_int(lt, locker, create, retp) +__lock_getlocker_int(lt, locker, create, ip, retp) DB_LOCKTAB *lt; u_int32_t locker; int create; + DB_THREAD_INFO *ip; DB_LOCKER **retp; { DB_LOCKER *sh_locker; DB_LOCKREGION *region; - DB_THREAD_INFO *ip; +#ifdef DIAGNOSTIC + DB_THREAD_INFO *diag; +#endif ENV *env; - db_mutex_t mutex; u_int32_t i, indx, nlockers; int ret; @@ -304,59 +326,85 @@ __lock_getlocker_int(lt, locker, create, retp) SH_TAILQ_FOREACH(sh_locker, <->locker_tab[indx], links, __db_locker) if (sh_locker->id == locker) break; + if (sh_locker == NULL && create) { - nlockers = 0; - /* Create new locker and then insert it into hash table. */ - if ((ret = __mutex_alloc(env, MTX_LOGICAL_LOCK, - DB_MUTEX_LOGICAL_LOCK | DB_MUTEX_SELF_BLOCK, - &mutex)) != 0) - return (ret); - else - MUTEX_LOCK(env, mutex); - if ((sh_locker = SH_TAILQ_FIRST( - ®ion->free_lockers, __db_locker)) == NULL) { - nlockers = region->stat.st_lockers >> 2; - /* Just in case. */ - if (nlockers == 0) - nlockers = 1; - if (region->stat.st_maxlockers != 0 && - region->stat.st_maxlockers < - region->stat.st_lockers + nlockers) - nlockers = region->stat.st_maxlockers - - region->stat.st_lockers; - /* - * Don't hold lockers when getting the region, - * we could deadlock. When creating a locker - * there is no race since the id allocation - * is synchronized. - */ - UNLOCK_LOCKERS(env, region); - LOCK_REGION_LOCK(env); - /* - * If the max memory is not sized for max objects, - * allocate as much as possible. - */ - F_SET(<->reginfo, REGION_TRACKED); - while (__env_alloc(<->reginfo, nlockers * - sizeof(struct __db_locker), &sh_locker) != 0) - if ((nlockers >> 1) == 0) - break; - F_CLR(<->reginfo, REGION_TRACKED); - LOCK_REGION_UNLOCK(lt->env); - LOCK_LOCKERS(env, region); - for (i = 0; i < nlockers; i++) { + /* Can we reuse a locker struct cached in the thread info? */ + if (ip != NULL && ip->dbth_local_locker != INVALID_ROFF && + (sh_locker = (DB_LOCKER*)R_ADDR(<->reginfo, + ip->dbth_local_locker))->id == DB_LOCK_INVALIDID) { + DB_ASSERT(env, !F_ISSET(sh_locker, DB_LOCKER_FREE)); +#ifdef HAVE_STATISTICS + region->stat.st_nlockers_reused++; +#endif + } else { + /* Create new locker and insert it into hash table. */ + if ((sh_locker = SH_TAILQ_FIRST( + ®ion->free_lockers, __db_locker)) == NULL) { + nlockers = region->stat.st_lockers >> 2; + /* Just in case. */ + if (nlockers == 0) + nlockers = 1; + if (region->stat.st_maxlockers != 0 && + region->stat.st_maxlockers < + region->stat.st_lockers + nlockers) + nlockers = region->stat.st_maxlockers - + region->stat.st_lockers; + /* + * Don't hold lockers when getting the region, + * we could deadlock. When creating a locker + * there is no race since the id allocation + * is synchronized. + */ + UNLOCK_LOCKERS(env, region); + LOCK_REGION_LOCK(env); + /* + * If the max memory is not sized for max + * objects, allocate as much as possible. + */ + F_SET(<->reginfo, REGION_TRACKED); + while (__env_alloc(<->reginfo, nlockers * + sizeof(struct __db_locker), + &sh_locker) != 0) { + nlockers >>= 1; + if (nlockers == 0) + break; + } + F_CLR(<->reginfo, REGION_TRACKED); + LOCK_REGION_UNLOCK(lt->env); + LOCK_LOCKERS(env, region); + for (i = 0; i < nlockers; i++) { + SH_TAILQ_INSERT_HEAD( + ®ion->free_lockers, + sh_locker, links, __db_locker); + sh_locker->mtx_locker = MUTEX_INVALID; +#ifdef DIAGNOSTIC + sh_locker->prev_locker = INVALID_ROFF; +#endif + sh_locker++; + } + if (nlockers == 0) + return (__lock_nomem(env, + "locker entries")); + region->stat.st_lockers += nlockers; + sh_locker = SH_TAILQ_FIRST( + ®ion->free_lockers, __db_locker); + } + SH_TAILQ_REMOVE( + ®ion->free_lockers, + sh_locker, links, __db_locker); + } + F_CLR(sh_locker, DB_LOCKER_FREE); + if (sh_locker->mtx_locker == MUTEX_INVALID) { + if ((ret = __mutex_alloc(env, MTX_LOGICAL_LOCK, + DB_MUTEX_LOGICAL_LOCK | DB_MUTEX_SELF_BLOCK, + &sh_locker->mtx_locker)) != 0) { SH_TAILQ_INSERT_HEAD(®ion->free_lockers, sh_locker, links, __db_locker); - sh_locker++; + return (ret); } - if (nlockers == 0) - return (__lock_nomem(env, "locker entries")); - region->stat.st_lockers += nlockers; - sh_locker = SH_TAILQ_FIRST( - ®ion->free_lockers, __db_locker); + MUTEX_LOCK(env, sh_locker->mtx_locker); } - SH_TAILQ_REMOVE( - ®ion->free_lockers, sh_locker, links, __db_locker); + ++region->nlockers; #ifdef HAVE_STATISTICS STAT_PERFMON2(env, lock, nlockers, region->nlockers, locker); @@ -365,10 +413,10 @@ __lock_getlocker_int(lt, locker, create, retp) region->stat.st_maxnlockers, region->nlockers, locker); #endif + sh_locker->id = locker; env->dbenv->thread_id( env->dbenv, &sh_locker->pid, &sh_locker->tid); - sh_locker->mtx_locker = mutex; sh_locker->dd_id = 0; sh_locker->master_locker = INVALID_ROFF; sh_locker->parent_locker = INVALID_ROFF; @@ -386,10 +434,20 @@ __lock_getlocker_int(lt, locker, create, retp) <->locker_tab[indx], sh_locker, links, __db_locker); SH_TAILQ_INSERT_HEAD(®ion->lockers, sh_locker, ulinks, __db_locker); - ENV_GET_THREAD_INFO(env, ip); + + if (ip != NULL && ip->dbth_local_locker == INVALID_ROFF) + ip->dbth_local_locker = + R_OFFSET(<->reginfo, sh_locker); #ifdef DIAGNOSTIC - if (ip != NULL) - ip->dbth_locker = R_OFFSET(<->reginfo, sh_locker); + /* + * __db_has_pagelock checks for proper locking by dbth_locker. + */ + if ((diag = ip) == NULL) + ENV_GET_THREAD_INFO(env, diag); + if (diag != NULL) { + sh_locker->prev_locker = diag->dbth_locker; + diag->dbth_locker = R_OFFSET(<->reginfo, sh_locker); + } #endif } @@ -420,7 +478,7 @@ __lock_addfamilylocker(env, pid, id, is_family) LOCK_LOCKERS(env, region); /* get/create the parent locker info */ - if ((ret = __lock_getlocker_int(lt, pid, 1, &mlockerp)) != 0) + if ((ret = __lock_getlocker_int(lt, pid, 1, NULL, &mlockerp)) != 0) goto err; /* @@ -430,7 +488,7 @@ __lock_addfamilylocker(env, pid, id, is_family) * we manipulate it, nor can another child in the * family be created at the same time. */ - if ((ret = __lock_getlocker_int(lt, id, 1, &lockerp)) != 0) + if ((ret = __lock_getlocker_int(lt, id, 1, NULL, &lockerp)) != 0) goto err; /* Point to our parent. */ @@ -466,9 +524,9 @@ err: UNLOCK_LOCKERS(env, region); } /* - * __lock_freelocker_int + * __lock_freelocker_int -- * Common code for deleting a locker; must be called with the - * locker bucket locked. + * lockers mutex locked. */ static int __lock_freelocker_int(lt, region, sh_locker, reallyfree) @@ -478,15 +536,21 @@ __lock_freelocker_int(lt, region, sh_locker, reallyfree) int reallyfree; { ENV *env; + DB_MSGBUF mb; + DB_THREAD_INFO *ip; u_int32_t indx; int ret; env = lt->env; - - if (SH_LIST_FIRST(&sh_locker->heldby, __db_lock) != NULL) { - __db_errx(env, DB_STR("2047", - "Freeing locker with locks")); - return (EINVAL); + if (!SH_LIST_EMPTY(&sh_locker->heldby)) { + ret = USR_ERR(env, EINVAL); + __db_errx(env, + DB_STR("2060", "Freeing locker %x with locks"), + sh_locker->id); + DB_MSGBUF_INIT(&mb); + (void)__lock_dump_locker(env, &mb, lt, sh_locker); + DB_MSGBUF_FLUSH(env, &mb); + return (ret); } /* If this is part of a family, we must fix up its links. */ @@ -499,16 +563,29 @@ __lock_freelocker_int(lt, region, sh_locker, reallyfree) LOCKER_HASH(lt, region, sh_locker->id, indx); SH_TAILQ_REMOVE(<->locker_tab[indx], sh_locker, links, __db_locker); - if (sh_locker->mtx_locker != MUTEX_INVALID && - (ret = __mutex_free(env, &sh_locker->mtx_locker)) != 0) - return (ret); - SH_TAILQ_INSERT_HEAD(®ion->free_lockers, sh_locker, - links, __db_locker); SH_TAILQ_REMOVE(®ion->lockers, sh_locker, ulinks, __db_locker); region->nlockers--; STAT_PERFMON2(env, lock, nlockers, region->nlockers, sh_locker->id); + /* + * If this locker is cached in the thread info, zero the id and + * leave it allocated. Otherwise, put it back on the free list. + */ + ENV_GET_THREAD_INFO(env, ip); + if (ip != NULL && ip->dbth_local_locker == + R_OFFSET(<->reginfo, sh_locker)) { + DB_ASSERT(env, + MUTEX_IS_BUSY(env, sh_locker->mtx_locker)); + sh_locker->id = DB_LOCK_INVALIDID; + } else { + if (sh_locker->mtx_locker != MUTEX_INVALID && (ret = + __mutex_free(env, &sh_locker->mtx_locker)) != 0) + return (ret); + F_SET(sh_locker, DB_LOCKER_FREE); + SH_TAILQ_INSERT_HEAD(®ion->free_lockers, sh_locker, + links, __db_locker); + } } return (0); @@ -518,7 +595,7 @@ __lock_freelocker_int(lt, region, sh_locker, reallyfree) * __lock_freelocker * Remove a locker its family from the hash table. * - * This must be called without the locker bucket locked. + * This must be called without the lockers mutex locked. * * PUBLIC: int __lock_freelocker __P((DB_LOCKTAB *, DB_LOCKER *)); */ @@ -570,3 +647,42 @@ __lock_familyremove(lt, sh_locker) return (ret); } + +/* + * __lock_local_locker_invalidate -- + * Search the thread info table's cached lockers and discard any reference + * to this mutex. + * + * PUBLIC: int __lock_local_locker_invalidate __P((ENV *, db_mutex_t)); + */ +int +__lock_local_locker_invalidate(env, mutex) + ENV *env; + db_mutex_t mutex; +{ + DB_HASHTAB *htab; + DB_LOCKER *locker; + DB_THREAD_INFO *ip; + u_int32_t i; + char buf[DB_THREADID_STRLEN]; + + htab = env->thr_hashtab; + for (i = 0; i < env->thr_nbucket; i++) { + SH_TAILQ_FOREACH(ip, &htab[i], dbth_links, __db_thread_info) { + if (ip->dbth_local_locker == INVALID_ROFF) + continue; + locker = (DB_LOCKER *)R_ADDR(&env->lk_handle->reginfo, + ip->dbth_local_locker); + if (locker->mtx_locker == mutex) { + __db_msg(env, +DB_STR_A("2061", "Removing cached locker mutex %lu reference by %s", "%lu %s"), + (u_long)mutex, + env->dbenv->thread_id_string(env->dbenv, + locker->pid, locker->tid, buf)); + locker->mtx_locker = MUTEX_INVALID; + return (0); + } + } + } + return (0); +} diff --git a/src/lock/lock_list.c b/src/lock/lock_list.c index 1e3d2a55..5d55e4a0 100644 --- a/src/lock/lock_list.c +++ b/src/lock/lock_list.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved. * * $Id$ */ diff --git a/src/lock/lock_method.c b/src/lock/lock_method.c index 0cc2e19d..0e6c0428 100644 --- a/src/lock/lock_method.c +++ b/src/lock/lock_method.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved. * * $Id$ */ diff --git a/src/lock/lock_region.c b/src/lock/lock_region.c index 1aae1815..ecc7ba47 100644 --- a/src/lock/lock_region.c +++ b/src/lock/lock_region.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved. * * $Id$ */ @@ -120,13 +120,15 @@ __lock_open(env) } /* - * A process joining the region may have reset the lock and transaction - * timeouts. + * Lock and transaction timeouts will be ignored when joining the + * environment, so print a warning if either was set. */ - if (dbenv->lk_timeout != 0) - region->lk_timeout = dbenv->lk_timeout; - if (dbenv->tx_timeout != 0) - region->tx_timeout = dbenv->tx_timeout; + if (dbenv->lk_timeout != 0 && region->lk_timeout != dbenv->lk_timeout) + __db_msg(env, DB_STR("2058", +"Warning: Ignoring DB_SET_LOCK_TIMEOUT when joining the environment.")); + if (dbenv->tx_timeout != 0 && region->tx_timeout != dbenv->tx_timeout) + __db_msg(env, DB_STR("2059", +"Warning: Ignoring DB_SET_TXN_TIMEOUT when joining the environment.")); LOCK_REGION_UNLOCK(env); region_locked = 0; @@ -396,13 +398,30 @@ __lock_env_refresh(env) R_ADDR(reginfo, lr->locker_mem_off)); } - /* Detach from the region. */ - ret = __env_region_detach(env, reginfo, 0); + ret = __lock_region_detach(env, lt); - /* Discard DB_LOCKTAB. */ - __os_free(env, lt); - env->lk_handle = NULL; + return (ret); +} + +/* + * __lock_region_detach -- + * + * PUBLIC: int __lock_region_detach __P((ENV *, DB_LOCKTAB *)); + */ +int +__lock_region_detach(env, lt) + ENV *env; + DB_LOCKTAB *lt; +{ + int ret; + ret = 0; + if (lt != NULL) { + ret = __env_region_detach(env, <->reginfo, 0); + /* Discard DB_LOCKTAB. */ + __os_free(env, lt); + env->lk_handle = NULL; + } return (ret); } diff --git a/src/lock/lock_stat.c b/src/lock/lock_stat.c index 11b934aa..1ce0796a 100644 --- a/src/lock/lock_stat.c +++ b/src/lock/lock_stat.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved. * * $Id$ */ @@ -15,8 +15,6 @@ #include "dbinc/db_am.h" #ifdef HAVE_STATISTICS -static int __lock_dump_locker - __P((ENV *, DB_MSGBUF *, DB_LOCKTAB *, DB_LOCKER *)); static int __lock_dump_object __P((DB_LOCKTAB *, DB_MSGBUF *, DB_LOCKOBJ *)); static int __lock_print_all __P((ENV *, u_int32_t)); static int __lock_print_stats __P((ENV *, u_int32_t)); @@ -363,6 +361,11 @@ __lock_print_stats(env, flags) __db_dl(env, "Maximum number of lockers at any one time", (u_long)sp->st_maxnlockers); __db_dl(env, + "Number of hits in the thread locker cache", + (u_long)sp->st_nlockers_hit); + __db_dl(env, + "Total number of lockers reused", (u_long)sp->st_nlockers_reused); + __db_dl(env, "Number of current lock objects", (u_long)sp->st_nobjects); __db_dl(env, "Maximum number of lock objects at any one time", (u_long)sp->st_maxnobjects); @@ -463,9 +466,17 @@ __lock_print_all(env, flags) if (timespecisset(&lrp->next_timeout)) { #ifdef HAVE_STRFTIME time_t t = (time_t)lrp->next_timeout.tv_sec; + struct tm *tm_p; char tbuf[64]; +#ifdef HAVE_LOCALTIME_R + struct tm tm; + + tm_p = localtime_r(&t, &tm); +#else + tm_p = localtime(&t); +#endif if (strftime(tbuf, sizeof(tbuf), - "%m-%d-%H:%M:%S", localtime(&t)) != 0) + "%m-%d-%H:%M:%S", tm_p) != 0) __db_msg(env, "next_timeout: %s.%09lu", tbuf, (u_long)lrp->next_timeout.tv_nsec); else @@ -519,80 +530,6 @@ __lock_print_all(env, flags) } static int -__lock_dump_locker(env, mbp, lt, lip) - ENV *env; - DB_MSGBUF *mbp; - DB_LOCKTAB *lt; - DB_LOCKER *lip; -{ - DB_LOCKREGION *lrp; - struct __db_lock *lp; - char buf[DB_THREADID_STRLEN]; - u_int32_t ndx; - - lrp = lt->reginfo.primary; - - __db_msgadd(env, - mbp, "%8lx dd=%2ld locks held %-4d write locks %-4d pid/thread %s", - (u_long)lip->id, (long)lip->dd_id, lip->nlocks, lip->nwrites, - env->dbenv->thread_id_string(env->dbenv, lip->pid, lip->tid, buf)); - __db_msgadd(env, mbp, - " flags %-4x priority %-10u", lip->flags, lip->priority); - - if (timespecisset(&lip->tx_expire)) { -#ifdef HAVE_STRFTIME - time_t t = (time_t)lip->tx_expire.tv_sec; - char tbuf[64]; - if (strftime(tbuf, sizeof(tbuf), - "%m-%d-%H:%M:%S", localtime(&t)) != 0) - __db_msgadd(env, mbp, "expires %s.%09lu", - tbuf, (u_long)lip->tx_expire.tv_nsec); - else -#endif - __db_msgadd(env, mbp, "expires %lu.%09lu", - (u_long)lip->tx_expire.tv_sec, - (u_long)lip->tx_expire.tv_nsec); - } - if (F_ISSET(lip, DB_LOCKER_TIMEOUT)) - __db_msgadd( - env, mbp, " lk timeout %lu", (u_long)lip->lk_timeout); - if (timespecisset(&lip->lk_expire)) { -#ifdef HAVE_STRFTIME - time_t t = (time_t)lip->lk_expire.tv_sec; - char tbuf[64]; - if (strftime(tbuf, - sizeof(tbuf), "%m-%d-%H:%M:%S", localtime(&t)) != 0) - __db_msgadd(env, mbp, " lk expires %s.%09lu", - tbuf, (u_long)lip->lk_expire.tv_nsec); - else -#endif - __db_msgadd(env, mbp, " lk expires %lu.%09lu", - (u_long)lip->lk_expire.tv_sec, - (u_long)lip->lk_expire.tv_nsec); - } - DB_MSGBUF_FLUSH(env, mbp); - - /* - * We need some care here since the list may change while we - * look. - */ -retry: SH_LIST_FOREACH(lp, &lip->heldby, locker_links, __db_lock) { - if (!SH_LIST_EMPTY(&lip->heldby) && lp != NULL) { - ndx = lp->indx; - OBJECT_LOCK_NDX(lt, lrp, ndx); - if (lp->indx == ndx) - __lock_printlock(lt, mbp, lp, 1); - else { - OBJECT_UNLOCK(lt, lrp, ndx); - goto retry; - } - OBJECT_UNLOCK(lt, lrp, ndx); - } - } - return (0); -} - -static int __lock_dump_object(lt, mbp, op) DB_LOCKTAB *lt; DB_MSGBUF *mbp; @@ -619,6 +556,31 @@ __lock_print_header(env) "Count", "Status", "----------------- Object ---------------"); } +#else /* !HAVE_STATISTICS */ + +int +__lock_stat_pp(dbenv, statp, flags) + DB_ENV *dbenv; + DB_LOCK_STAT **statp; + u_int32_t flags; +{ + COMPQUIET(statp, NULL); + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbenv->env)); +} + +int +__lock_stat_print_pp(dbenv, flags) + DB_ENV *dbenv; + u_int32_t flags; +{ + COMPQUIET(flags, 0); + + return (__db_stat_not_built(dbenv->env)); +} +#endif + /* * __lock_printlock -- * @@ -744,27 +706,81 @@ __lock_printlock(lt, mbp, lp, ispgno) DB_MSGBUF_FLUSH(env, mbp); } -#else /* !HAVE_STATISTICS */ - +/* + * __lock_dump_locker -- + * Display the identity and statistics of a locker. This is used during + * diagnostic error paths as well as when printing statistics. + * + * PUBLIC: int __lock_dump_locker + * PUBLIC: __P((ENV *, DB_MSGBUF *, DB_LOCKTAB *, DB_LOCKER *)); + */ int -__lock_stat_pp(dbenv, statp, flags) - DB_ENV *dbenv; - DB_LOCK_STAT **statp; - u_int32_t flags; +__lock_dump_locker(env, mbp, lt, lip) + ENV *env; + DB_MSGBUF *mbp; + DB_LOCKTAB *lt; + DB_LOCKER *lip; { - COMPQUIET(statp, NULL); - COMPQUIET(flags, 0); + DB_LOCKREGION *lrp; + struct __db_lock *lp; + char buf[DB_THREADID_STRLEN]; + u_int32_t ndx; - return (__db_stat_not_built(dbenv->env)); -} + lrp = lt->reginfo.primary; -int -__lock_stat_print_pp(dbenv, flags) - DB_ENV *dbenv; - u_int32_t flags; -{ - COMPQUIET(flags, 0); + __db_msgadd(env, + mbp, "%8lx dd=%2ld locks held %-4d write locks %-4d pid/thread %s", + (u_long)lip->id, (long)lip->dd_id, lip->nlocks, lip->nwrites, + env->dbenv->thread_id_string(env->dbenv, lip->pid, lip->tid, buf)); + __db_msgadd(env, mbp, + " flags %-4x priority %-10u", lip->flags, lip->priority); - return (__db_stat_not_built(dbenv->env)); -} + if (timespecisset(&lip->tx_expire)) { +#ifdef HAVE_STRFTIME + time_t t = (time_t)lip->tx_expire.tv_sec; + char tbuf[64]; + if (strftime(tbuf, sizeof(tbuf), + "%m-%d-%H:%M:%S", localtime(&t)) != 0) + __db_msgadd(env, mbp, "expires %s.%09lu", + tbuf, (u_long)lip->tx_expire.tv_nsec); + else #endif + __db_msgadd(env, mbp, "expires %lu.%09lu", + (u_long)lip->tx_expire.tv_sec, + (u_long)lip->tx_expire.tv_nsec); + } + if (F_ISSET(lip, DB_LOCKER_TIMEOUT)) + __db_msgadd( + env, mbp, " lk timeout %lu", (u_long)lip->lk_timeout); + if (timespecisset(&lip->lk_expire)) { +#ifdef HAVE_STRFTIME + time_t t = (time_t)lip->lk_expire.tv_sec; + char tbuf[64]; + if (strftime(tbuf, + sizeof(tbuf), "%m-%d-%H:%M:%S", localtime(&t)) != 0) + __db_msgadd(env, mbp, " lk expires %s.%09lu", + tbuf, (u_long)lip->lk_expire.tv_nsec); + else +#endif + __db_msgadd(env, mbp, " lk expires %lu.%09lu", + (u_long)lip->lk_expire.tv_sec, + (u_long)lip->lk_expire.tv_nsec); + } + DB_MSGBUF_FLUSH(env, mbp); + + /* We need some care here since the list may change while we look. */ +retry: SH_LIST_FOREACH(lp, &lip->heldby, locker_links, __db_lock) { + if (!SH_LIST_EMPTY(&lip->heldby) && lp != NULL) { + ndx = lp->indx; + OBJECT_LOCK_NDX(lt, lrp, ndx); + if (lp->indx == ndx) + __lock_printlock(lt, mbp, lp, 1); + else { + OBJECT_UNLOCK(lt, lrp, ndx); + goto retry; + } + OBJECT_UNLOCK(lt, lrp, ndx); + } + } + return (0); +} diff --git a/src/lock/lock_stub.c b/src/lock/lock_stub.c index 3875af55..a916c6df 100644 --- a/src/lock/lock_stub.c +++ b/src/lock/lock_stub.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved. * * $Id$ */ @@ -359,6 +359,7 @@ size_t __lock_region_max(env) ENV *env; { + COMPQUIET(env, NULL); return (0); } @@ -367,6 +368,7 @@ __lock_region_size(env, other_alloc) ENV *env; size_t other_alloc; { + COMPQUIET(env, NULL); COMPQUIET(other_alloc, 0); return (0); } @@ -584,6 +586,7 @@ __lock_list_print(env, mbp, list) DBT *list; { COMPQUIET(env, NULL); + COMPQUIET(mbp, NULL); COMPQUIET(list, NULL); } @@ -625,7 +628,7 @@ __lock_change(env, old_lock, new_lock) ENV *env; DB_LOCK *old_lock, *new_lock; { - COMPQUIET(env, NULL); COMPQUIET(old_lock, NULL); COMPQUIET(new_lock, NULL); + return (__db_nolocking(env)); } diff --git a/src/lock/lock_timer.c b/src/lock/lock_timer.c index 943047f0..9744438a 100644 --- a/src/lock/lock_timer.c +++ b/src/lock/lock_timer.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved. * * $Id$ */ diff --git a/src/lock/lock_util.c b/src/lock/lock_util.c index f7029cd7..07fdce72 100644 --- a/src/lock/lock_util.c +++ b/src/lock/lock_util.c @@ -1,7 +1,7 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2015 Oracle and/or its affiliates. All rights reserved. * * $Id$ */ |