summaryrefslogtreecommitdiff
path: root/storage/innobase/lock
diff options
context:
space:
mode:
authorNirbhay Choubey <nirbhay@skysql.com>2014-05-21 11:09:55 -0400
committerNirbhay Choubey <nirbhay@skysql.com>2014-05-21 11:09:55 -0400
commit086af8367ed2499adae378638225ceb14c85f046 (patch)
tree953720d86a4decd67a24b560ffbe277900ff9609 /storage/innobase/lock
parent558995ad84ca1348dfe681a8d111650225fcc205 (diff)
parent1170a54060168d885cbf682836342d4fc4ccae1a (diff)
downloadmariadb-git-086af8367ed2499adae378638225ceb14c85f046.tar.gz
bzr merge -r4209 maria/10.0.
Diffstat (limited to 'storage/innobase/lock')
-rw-r--r--storage/innobase/lock/lock0lock.cc179
1 files changed, 81 insertions, 98 deletions
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index ae32738cb17..a9556b49ac6 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -858,16 +858,11 @@ lock_reset_lock_and_trx_wait(
/*=========================*/
lock_t* lock) /*!< in/out: record lock */
{
+ ut_ad(lock->trx->lock.wait_lock == lock);
ut_ad(lock_get_wait(lock));
ut_ad(lock_mutex_own());
- /* Reset the back pointer in trx to this waiting lock request */
- if (!(lock->type_mode & LOCK_CONV_BY_OTHER)) {
- ut_ad(lock->trx->lock.wait_lock == lock);
- lock->trx->lock.wait_lock = NULL;
- } else {
- ut_ad(lock_get_type_low(lock) == LOCK_REC);
- }
+ lock->trx->lock.wait_lock = NULL;
lock->type_mode &= ~LOCK_WAIT;
}
@@ -1550,11 +1545,11 @@ lock_rec_has_expl(
if (lock->trx == trx
&& !lock_rec_get_insert_intention(lock)
- && !lock_is_wait_not_by_other(lock->type_mode)
&& lock_mode_stronger_or_eq(
lock_get_mode(lock),
static_cast<enum lock_mode>(
precise_mode & LOCK_MODE_MASK))
+ && !lock_get_wait(lock)
&& (!lock_rec_get_rec_not_gap(lock)
|| (precise_mode & LOCK_REC_NOT_GAP)
|| heap_no == PAGE_HEAP_NO_SUPREMUM)
@@ -1802,6 +1797,57 @@ lock_sec_rec_some_has_impl(
return(trx_id);
}
+#ifdef UNIV_DEBUG
+/*********************************************************************//**
+Checks if some transaction, other than given trx_id, has an explicit
+lock on the given rec, in the given precise_mode.
+@return the transaction, whose id is not equal to trx_id, that has an
+explicit lock on the given rec, in the given precise_mode or NULL.*/
+static
+trx_t*
+lock_rec_other_trx_holds_expl(
+/*==========================*/
+ ulint precise_mode, /*!< in: LOCK_S or LOCK_X
+ possibly ORed to LOCK_GAP or
+ LOCK_REC_NOT_GAP. */
+ trx_id_t trx_id, /*!< in: trx holding implicit
+ lock on rec */
+ const rec_t* rec, /*!< in: user record */
+ const buf_block_t* block) /*!< in: buffer block
+ containing the record */
+{
+ trx_t* holds = NULL;
+
+ lock_mutex_enter();
+
+ if (trx_t *impl_trx = trx_rw_is_active(trx_id, NULL)) {
+ ulint heap_no = page_rec_get_heap_no(rec);
+ mutex_enter(&trx_sys->mutex);
+
+ for (trx_t* t = UT_LIST_GET_FIRST(trx_sys->rw_trx_list);
+ t != NULL;
+ t = UT_LIST_GET_NEXT(trx_list, t)) {
+
+ lock_t *expl_lock = lock_rec_has_expl(
+ precise_mode, block, heap_no, t);
+
+ if (expl_lock && expl_lock->trx != impl_trx) {
+ /* An explicit lock is held by trx other than
+ the trx holding the implicit lock. */
+ holds = expl_lock->trx;
+ break;
+ }
+ }
+
+ mutex_exit(&trx_sys->mutex);
+ }
+
+ lock_mutex_exit();
+
+ return(holds);
+}
+#endif /* UNIV_DEBUG */
+
/*********************************************************************//**
Return approximate number or record locks (bits set in the bitmap) for
this transaction. Since delete-marked records may be removed, the
@@ -2022,7 +2068,7 @@ lock_rec_create(
}
ut_ad(trx_mutex_own(trx));
- if (lock_is_wait_not_by_other(type_mode)) {
+ if (type_mode & LOCK_WAIT) {
lock_set_lock_and_trx_wait(lock, trx);
}
@@ -2065,12 +2111,11 @@ lock_rec_enqueue_waiting(
const buf_block_t* block, /*!< in: buffer block containing
the record */
ulint heap_no,/*!< in: heap number of the record */
- lock_t* lock, /*!< in: lock object; NULL if a new
- one should be created. */
dict_index_t* index, /*!< in: index of record */
que_thr_t* thr) /*!< in: query thread */
{
trx_t* trx;
+ lock_t* lock;
trx_id_t victim_trx_id;
ut_ad(lock_mutex_own());
@@ -2108,31 +2153,13 @@ lock_rec_enqueue_waiting(
ut_ad(0);
}
- if (lock == NULL) {
- /* Enqueue the lock request that will wait
- to be granted, note that we already own
- the trx mutex. */
+ /* Enqueue the lock request that will wait to be granted, note that
+ we already own the trx mutex. */
+ lock = lock_rec_create(
#ifdef WITH_WSREP
- if (wsrep_on(trx->mysql_thd) &&
- trx->lock.was_chosen_as_deadlock_victim) {
- return(DB_DEADLOCK);
- }
- lock = lock_rec_create(
- c_lock, thr,
- type_mode | LOCK_WAIT, block, heap_no,
- index, trx, TRUE);
-#else
- lock = lock_rec_create(
- type_mode | LOCK_WAIT, block, heap_no,
- index, trx, TRUE);
-#endif /*WITH_WSREP */
- } else {
- ut_ad(lock->type_mode & LOCK_WAIT);
- ut_ad(lock->type_mode & LOCK_CONV_BY_OTHER);
-
- lock->type_mode &= ~LOCK_CONV_BY_OTHER;
- lock_set_lock_and_trx_wait(lock, trx);
- }
+ c_lock, thr,
+#endif /* WITH_WSREP */
+ type_mode | LOCK_WAIT, block, heap_no, index, trx, TRUE);
/* Release the mutex to obey the latching order.
This is safe, because lock_deadlock_check_and_resolve()
@@ -2428,7 +2455,6 @@ lock_rec_lock_slow(
#ifdef WITH_WSREP
lock_t* c_lock(NULL);
#endif
- lock_t* lock;
dberr_t err = DB_SUCCESS;
ut_ad(lock_mutex_own());
@@ -2448,26 +2474,7 @@ lock_rec_lock_slow(
trx = thr_get_trx(thr);
trx_mutex_enter(trx);
- lock = lock_rec_has_expl(mode, block, heap_no, trx);
- if (lock) {
- if (lock->type_mode & LOCK_CONV_BY_OTHER) {
- /* This lock or lock waiting was created by the other
- transaction, not by the transaction (trx) itself.
- So, the transaction (trx) should treat it collectly
- according as whether granted or not. */
-
- if (lock->type_mode & LOCK_WAIT) {
- /* This lock request was not granted yet.
- Should wait for granted. */
-
- goto enqueue_waiting;
- } else {
- /* This lock request was already granted.
- Just clearing the flag. */
-
- lock->type_mode &= ~LOCK_CONV_BY_OTHER;
- }
- }
+ if (lock_rec_has_expl(mode, block, heap_no, trx)) {
/* The trx already has a strong enough lock on rec: do
nothing */
@@ -2486,18 +2493,15 @@ lock_rec_lock_slow(
have a lock strong enough already granted on the
record, we have to wait. */
- ut_ad(lock == NULL);
-enqueue_waiting:
#ifdef WITH_WSREP
/* c_lock is NULL here if jump to enqueue_waiting happened
but it's ok because lock is not NULL in that case and c_lock
is not used. */
- err = lock_rec_enqueue_waiting(
- c_lock, mode, block, heap_no,
- lock, index, thr);
+ err=
+ lock_rec_enqueue_waiting(c_lock, mode, block, heap_no, index, thr);
#else
err = lock_rec_enqueue_waiting(
- mode, block, heap_no, lock, index, thr);
+ mode, block, heap_no, index, thr);
#endif /* WITH_WSREP */
} else if (!impl) {
@@ -2594,7 +2598,7 @@ lock_rec_has_to_wait_in_queue(
heap_no = lock_rec_find_set_bit(wait_lock);
bit_offset = heap_no / 8;
- bit_mask = 1 << (heap_no % 8);
+ bit_mask = static_cast<ulint>(1 << (heap_no % 8));
for (lock = lock_rec_get_first_on_page_addr(space, page_no);
lock != wait_lock;
@@ -2654,8 +2658,7 @@ lock_grant(
TRX_QUE_LOCK_WAIT state, and there is no need to end the lock wait
for it */
- if (!(lock->type_mode & LOCK_CONV_BY_OTHER)
- && lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) {
+ if (lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) {
que_thr_t* thr;
thr = que_thr_end_lock_wait(lock->trx);
@@ -2682,7 +2685,6 @@ lock_rec_cancel(
ut_ad(lock_mutex_own());
ut_ad(lock_get_type_low(lock) == LOCK_REC);
- ut_ad(!(lock->type_mode & LOCK_CONV_BY_OTHER));
/* Reset the bit (there can be only one set bit) in the lock bitmap */
lock_rec_reset_nth_bit(lock, lock_rec_find_set_bit(lock));
@@ -2849,12 +2851,8 @@ lock_rec_reset_and_release_wait(
lock != NULL;
lock = lock_rec_get_next(heap_no, lock)) {
- if (lock_is_wait_not_by_other(lock->type_mode)) {
+ if (lock_get_wait(lock)) {
lock_rec_cancel(lock);
- } else if (lock_get_wait(lock)) {
- /* just reset LOCK_WAIT */
- lock_rec_reset_nth_bit(lock, heap_no);
- lock_reset_lock_and_trx_wait(lock);
} else {
lock_rec_reset_nth_bit(lock, heap_no);
}
@@ -4181,7 +4179,7 @@ lock_deadlock_search(
}
ut_a(lock == NULL && ctx->depth == 0);
-
+
/* No deadlock found. */
return(0);
}
@@ -4345,7 +4343,6 @@ lock_table_create(
ut_ad(table && trx);
ut_ad(lock_mutex_own());
ut_ad(trx_mutex_own(trx));
- ut_ad(!(type_mode & LOCK_CONV_BY_OTHER));
/* Non-locking autocommit read-only transactions should not set
any locks. */
@@ -5551,11 +5548,14 @@ lock_print_info_summary(
the state of the variable for display. */
switch (purge_sys->state){
- case PURGE_STATE_EXIT:
case PURGE_STATE_INIT:
/* Should never be in this state while the system is running. */
ut_error;
+ case PURGE_STATE_EXIT:
+ fprintf(file, "exited");
+ break;
+
case PURGE_STATE_DISABLED:
fprintf(file, "disabled");
break;
@@ -6349,11 +6349,11 @@ lock_rec_insert_check_and_lock(
#ifdef WITH_WSREP
err = lock_rec_enqueue_waiting(c_lock,
LOCK_X | LOCK_GAP | LOCK_INSERT_INTENTION,
- block, next_rec_heap_no, NULL, index, thr);
+ block, next_rec_heap_no, index, thr);
#else
err = lock_rec_enqueue_waiting(
LOCK_X | LOCK_GAP | LOCK_INSERT_INTENTION,
- block, next_rec_heap_no, NULL, index, thr);
+ block, next_rec_heap_no, index, thr);
#endif /* WITH_WSREP */
trx_mutex_exit(trx);
@@ -6432,6 +6432,9 @@ lock_rec_convert_impl_to_expl(
/* The transaction can be committed before the
trx_is_active(trx_id, NULL) check below, because we are not
holding lock_mutex. */
+
+ ut_ad(!lock_rec_other_trx_holds_expl(LOCK_S | LOCK_REC_NOT_GAP,
+ trx_id, rec, block));
}
if (trx_id != 0) {
@@ -6450,29 +6453,10 @@ lock_rec_convert_impl_to_expl(
if (impl_trx != NULL
&& !lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, block,
- heap_no, impl_trx)) {
+ heap_no, impl_trx)) {
ulint type_mode = (LOCK_REC | LOCK_X
| LOCK_REC_NOT_GAP);
- /* If the delete-marked record was locked already,
- we should reserve lock waiting for impl_trx as
- implicit lock. Because cannot lock at this moment.*/
-
- if (rec_get_deleted_flag(rec, rec_offs_comp(offsets))
-#ifdef WITH_WSREP
- && !wsrep_thd_is_BF(impl_trx->mysql_thd, FALSE)
- /* BF-BF conflict is possible if advancing into
- lock_rec_other_has_conflicting*/
-#endif /* WITH_WSREP */
- && lock_rec_other_has_conflicting(
- static_cast<enum lock_mode>
- (LOCK_X | LOCK_REC_NOT_GAP), block,
- heap_no, impl_trx)) {
-
- type_mode |= (LOCK_WAIT
- | LOCK_CONV_BY_OTHER);
- }
-
lock_rec_add_to_queue(
type_mode, block, heap_no, index,
impl_trx, FALSE);
@@ -7120,7 +7104,6 @@ lock_cancel_waiting_and_release(
ut_ad(lock_mutex_own());
ut_ad(trx_mutex_own(lock->trx));
- ut_ad(!(lock->type_mode & LOCK_CONV_BY_OTHER));
lock->trx->lock.cancel = TRUE;