summaryrefslogtreecommitdiff
path: root/storage/innobase/lock
diff options
context:
space:
mode:
authorVlad Lesin <vlad_lesin@mail.ru>2022-03-07 13:03:53 +0300
committerVlad Lesin <vlad_lesin@mail.ru>2022-03-07 16:42:05 +0300
commit86c1bf118a48dd0bab80346f6d65c112ab2e486d (patch)
tree288fdc130a34fe06a7ffd639dc1d4bad3f44f3ce /storage/innobase/lock
parent02da00a98ceb8f82a65ed72a74b53b6d5ae3d144 (diff)
downloadmariadb-git-86c1bf118a48dd0bab80346f6d65c112ab2e486d.tar.gz
MDEV-27992 DELETE fails to delete record after blocking is released
MDEV-27025 allows to insert records before the record on which DELETE is locked, as a result the DELETE misses those records, what causes serious ACID violation. Revert MDEV-27025, MDEV-27550. The test which shows the scenario of ACID violation is added.
Diffstat (limited to 'storage/innobase/lock')
-rw-r--r--storage/innobase/lock/lock0lock.cc287
-rw-r--r--storage/innobase/lock/lock0prdt.cc22
2 files changed, 120 insertions, 189 deletions
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index 77e98e966c4..37e23b56dfc 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2014, 2021, 2022, MariaDB Corporation.
+Copyright (c) 2014, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1144,18 +1144,19 @@ static void wsrep_kill_victim(const trx_t * const trx, const lock_t *lock)
/*********************************************************************//**
Checks if some other transaction has a conflicting explicit lock request
in the queue, so that we have to wait.
-@param[in] mode LOCK_S or LOCK_X, possibly ORed to LOCK_GAP or LOC_REC_NOT_GAP,
-LOCK_INSERT_INTENTION
-@param[in] block buffer block containing the record
-@param[in] heap_no heap number of the record
-@param[in] trx our transaction
-@param[out] was_ignored true if conflicting locks waiting for the current
-transaction were ignored
@return lock or NULL */
-static lock_t *lock_rec_other_has_conflicting(ulint mode,
- const buf_block_t *block,
- ulint heap_no, const trx_t *trx,
- bool *was_ignored= NULL)
+static
+lock_t*
+lock_rec_other_has_conflicting(
+/*===========================*/
+ ulint mode, /*!< in: LOCK_S or LOCK_X,
+ possibly ORed to LOCK_GAP or
+ LOC_REC_NOT_GAP,
+ LOCK_INSERT_INTENTION */
+ const buf_block_t* block, /*!< in: buffer block containing
+ the record */
+ ulint heap_no,/*!< in: heap number of the record */
+ const trx_t* trx) /*!< in: our transaction */
{
lock_t* lock;
@@ -1167,16 +1168,6 @@ static lock_t *lock_rec_other_has_conflicting(ulint mode,
lock != NULL;
lock = lock_rec_get_next(heap_no, lock)) {
- /* There can't be lock loops for one record, because
- all waiting locks of the record will always wait for the same
- lock of the record in a cell array, and check for
- conflicting lock will always start with the first lock for the
- heap_no, and go ahead with the same order(the order of the
- locks in the cell array) */
- if (lock_get_wait(lock) && lock->trx->lock.wait_trx == trx) {
- if (was_ignored) *was_ignored= true;
- continue;
- }
if (lock_rec_has_to_wait(true, trx, mode, lock, is_supremum)) {
#ifdef WITH_WSREP
if (trx->is_wsrep()) {
@@ -1333,7 +1324,6 @@ static void check_trx_state(const trx_t *trx)
/** Create a new record lock and inserts it to the lock queue,
without checking for deadlocks or conflicts.
-@param[in] c_lock conflicting lock
@param[in] type_mode lock mode and wait flag; type will be replaced
with LOCK_REC
@param[in] space tablespace id
@@ -1343,15 +1333,11 @@ without checking for deadlocks or conflicts.
@param[in] index the index tree
@param[in,out] trx transaction
@param[in] holds_trx_mutex whether the caller holds trx->mutex
-@param[in] insert_before_waiting if true, inserts new B-tree record lock
-just after the last non-waiting lock of the current transaction which is
-located before the first waiting for the current transaction lock, otherwise
-the lock is inserted at the end of the queue
@return created lock */
lock_t*
lock_rec_create_low(
- lock_t* c_lock,
#ifdef WITH_WSREP
+ lock_t* c_lock, /*!< conflicting lock */
que_thr_t* thr, /*!< thread owning trx */
#endif
ulint type_mode,
@@ -1361,8 +1347,7 @@ lock_rec_create_low(
ulint heap_no,
dict_index_t* index,
trx_t* trx,
- bool holds_trx_mutex,
- bool insert_before_waiting)
+ bool holds_trx_mutex)
{
lock_t* lock;
ulint n_bits;
@@ -1472,7 +1457,7 @@ lock_rec_create_low(
}
trx->lock.que_state = TRX_QUE_LOCK_WAIT;
- lock_set_lock_and_trx_wait(lock, trx, c_lock);
+ lock_set_lock_and_trx_wait(lock, trx);
UT_LIST_ADD_LAST(trx->lock.trx_locks, lock);
trx->lock.wait_thr = thr;
@@ -1500,46 +1485,15 @@ lock_rec_create_low(
trx_mutex_exit(c_lock->trx);
} else
#endif /* WITH_WSREP */
- if (insert_before_waiting
- && !(type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE))) {
- /* Try to insert the lock just after the last non-waiting
- lock of the current transaction which immediately
- precedes the first waiting lock request. */
- uint32_t lock_hash_val = lock_rec_hash(space, page_no);
- hash_cell_t& cell = lock_sys.rec_hash->array[lock_hash_val];
-
- lock_t* last_non_waiting = NULL;
-
- for (lock_t* l = lock_rec_get_first(lock_sys.rec_hash, space,
- page_no, lock_hash_val, heap_no); l;
- l = lock_rec_get_next(heap_no, l)) {
- if (lock_get_wait(lock)
- && l->trx->lock.wait_trx == trx) {
- break;
- }
- if (l->trx == trx) {
- last_non_waiting = l;
- }
- }
-
- if (!last_non_waiting) {
- goto append_last;
- }
-
- cell.insert_after(*last_non_waiting, *lock, &lock_t::hash);
- }
- else {
-append_last:
- if (!(type_mode & (LOCK_WAIT | LOCK_PREDICATE | LOCK_PRDT_PAGE))
- && innodb_lock_schedule_algorithm
- == INNODB_LOCK_SCHEDULE_ALGORITHM_VATS
- && !thd_is_replication_slave_thread(trx->mysql_thd)) {
- HASH_PREPEND(lock_t, hash, lock_sys.rec_hash,
- lock_rec_fold(space, page_no), lock);
- } else {
- HASH_INSERT(lock_t, hash, lock_hash_get(type_mode),
- lock_rec_fold(space, page_no), lock);
- }
+ if (!(type_mode & (LOCK_WAIT | LOCK_PREDICATE | LOCK_PRDT_PAGE))
+ && innodb_lock_schedule_algorithm
+ == INNODB_LOCK_SCHEDULE_ALGORITHM_VATS
+ && !thd_is_replication_slave_thread(trx->mysql_thd)) {
+ HASH_PREPEND(lock_t, hash, lock_sys.rec_hash,
+ lock_rec_fold(space, page_no), lock);
+ } else {
+ HASH_INSERT(lock_t, hash, lock_hash_get(type_mode),
+ lock_rec_fold(space, page_no), lock);
}
if (!holds_trx_mutex) {
@@ -1547,7 +1501,7 @@ append_last:
}
ut_ad(trx_mutex_own(trx));
if (type_mode & LOCK_WAIT) {
- lock_set_lock_and_trx_wait(lock, trx, c_lock);
+ lock_set_lock_and_trx_wait(lock, trx);
}
UT_LIST_ADD_LAST(trx->lock.trx_locks, lock);
if (!holds_trx_mutex) {
@@ -1707,7 +1661,6 @@ lock_rec_insert_to_head(
/** Enqueue a waiting request for a lock which cannot be granted immediately.
Check for deadlocks.
-@param[in] c_lock conflicting lock
@param[in] type_mode the requested lock mode (LOCK_S or LOCK_X)
possibly ORed with LOCK_GAP or
LOCK_REC_NOT_GAP, ORed with
@@ -1726,7 +1679,9 @@ Check for deadlocks.
(or it happened to commit) */
dberr_t
lock_rec_enqueue_waiting(
- lock_t* c_lock,
+#ifdef WITH_WSREP
+ lock_t* c_lock, /*!< conflicting lock */
+#endif
ulint type_mode,
const buf_block_t* block,
ulint heap_no,
@@ -1764,9 +1719,9 @@ lock_rec_enqueue_waiting(
/* Enqueue the lock request that will wait to be granted, note that
we already own the trx mutex. */
- lock_t* lock = lock_rec_create(c_lock,
+ lock_t* lock = lock_rec_create(
#ifdef WITH_WSREP
- thr,
+ c_lock, thr,
#endif
type_mode | LOCK_WAIT, block, heap_no, index, trx, TRUE);
@@ -1830,20 +1785,22 @@ on the record, and the request to be added is not a waiting request, we
can reuse a suitable record lock object already existing on the same page,
just setting the appropriate bit in its bitmap. This is a low-level function
which does NOT check for deadlocks or lock compatibility!
-@param[in] type_mode lock mode, wait, gap etc. flags; type is ignored and
-replaced by LOCK_REC
-@param[in] block buffer block containing the record
-@param[in] heap_no heap number of the record
-@param[in] index index of record
-@param[in,out] trx transaction
-@param[in] caller_owns_trx_mutex, TRUE if caller owns the transaction mutex
-@param[in] insert_before_waiting true=insert B-tree record lock right before
-a waiting lock request; false=insert the lock at the end of the queue
@return lock where the bit was set */
-static void lock_rec_add_to_queue(ulint type_mode, const buf_block_t *block,
- ulint heap_no, dict_index_t *index,
- trx_t *trx, bool caller_owns_trx_mutex,
- bool insert_before_waiting= false)
+static
+void
+lock_rec_add_to_queue(
+/*==================*/
+ ulint type_mode,/*!< in: lock mode, wait, gap
+ etc. flags; type is ignored
+ and replaced by LOCK_REC */
+ const buf_block_t* block, /*!< in: buffer block containing
+ the record */
+ ulint heap_no,/*!< in: heap number of the record */
+ dict_index_t* index, /*!< in: index of record */
+ trx_t* trx, /*!< in/out: transaction */
+ bool caller_owns_trx_mutex)
+ /*!< in: TRUE if caller owns the
+ transaction mutex */
{
#ifdef UNIV_DEBUG
ut_ad(lock_mutex_own());
@@ -1932,16 +1889,11 @@ static void lock_rec_add_to_queue(ulint type_mode, const buf_block_t *block,
}
}
- /* Note: We will not pass any conflicting lock to lock_rec_create(),
- because we should be moving an existing waiting lock request. */
- ut_ad(!(type_mode & LOCK_WAIT) || trx->lock.wait_trx);
-
- lock_rec_create(NULL,
+ lock_rec_create(
#ifdef WITH_WSREP
- NULL,
+ NULL, NULL,
#endif
- type_mode, block, heap_no, index, trx, caller_owns_trx_mutex,
- insert_before_waiting);
+ type_mode, block, heap_no, index, trx, caller_owns_trx_mutex);
}
/*********************************************************************//**
@@ -1997,23 +1949,28 @@ lock_rec_lock(
/* Do nothing if the trx already has a strong enough lock on rec */
if (!lock_rec_has_expl(mode, block, heap_no, trx))
{
- bool was_ignored = false;
- if (lock_t *c_lock= lock_rec_other_has_conflicting(
- mode, block, heap_no, trx, &was_ignored))
+ if (
+#ifdef WITH_WSREP
+ lock_t *c_lock=
+#endif
+ lock_rec_other_has_conflicting(mode, block, heap_no, trx))
{
/*
If another transaction has a non-gap conflicting
request in the queue, as this transaction does not
have a lock strong enough already granted on the
record, we have to wait. */
- err = lock_rec_enqueue_waiting(c_lock, mode, block, heap_no, index,
- thr, NULL);
+ err = lock_rec_enqueue_waiting(
+#ifdef WITH_WSREP
+ c_lock,
+#endif /* WITH_WSREP */
+ mode, block, heap_no, index, thr, NULL);
}
else if (!impl)
{
/* Set the requested lock on the record. */
lock_rec_add_to_queue(LOCK_REC | mode, block, heap_no, index, trx,
- true, was_ignored);
+ true);
err= DB_SUCCESS_LOCKED_REC;
}
}
@@ -2039,9 +1996,9 @@ lock_rec_lock(
Note that we don't own the trx mutex.
*/
if (!impl)
- lock_rec_create(NULL,
+ lock_rec_create(
#ifdef WITH_WSREP
- NULL,
+ NULL, NULL,
#endif
mode, block, heap_no, index, trx, false);
@@ -2280,17 +2237,8 @@ static void lock_rec_dequeue_from_page(lock_t* in_lock)
if (!lock_get_wait(lock)) {
continue;
}
-
- ut_ad(lock->trx->lock.wait_trx);
- ut_ad(lock->trx->lock.wait_lock);
-
- if (const lock_t* c = lock_rec_has_to_wait_in_queue(
- lock)) {
- trx_mutex_enter(lock->trx);
- lock->trx->lock.wait_trx = c->trx;
- trx_mutex_exit(lock->trx);
- }
- else {
+ const lock_t* c = lock_rec_has_to_wait_in_queue(lock);
+ if (!c) {
/* Grant the lock */
ut_ad(lock->trx != in_lock->trx);
lock_grant(lock);
@@ -2564,8 +2512,7 @@ lock_rec_move_low(
lock_rec_reset_nth_bit(lock, donator_heap_no);
if (type_mode & LOCK_WAIT) {
- ut_ad(lock->trx->lock.wait_lock == lock);
- lock->type_mode &= ~LOCK_WAIT;
+ lock_reset_lock_and_trx_wait(lock);
}
/* Note that we FIRST reset the bit, and then set the lock:
@@ -2682,8 +2629,8 @@ lock_move_reorganize_page(
lock_rec_bitmap_reset(lock);
if (lock_get_wait(lock)) {
- ut_ad(lock->trx->lock.wait_lock == lock);
- lock->type_mode&= ~LOCK_WAIT;
+
+ lock_reset_lock_and_trx_wait(lock);
}
lock = lock_rec_get_next_on_page(lock);
@@ -2858,9 +2805,7 @@ lock_move_rec_list_end(
ut_ad(!page_rec_is_metadata(orec));
if (type_mode & LOCK_WAIT) {
- ut_ad(lock->trx->lock.wait_lock ==
- lock);
- lock->type_mode&= ~LOCK_WAIT;
+ lock_reset_lock_and_trx_wait(lock);
}
lock_rec_add_to_queue(
@@ -2957,9 +2902,7 @@ lock_move_rec_list_start(
ut_ad(!page_rec_is_metadata(prev));
if (type_mode & LOCK_WAIT) {
- ut_ad(lock->trx->lock.wait_lock
- == lock);
- lock->type_mode&= ~LOCK_WAIT;
+ lock_reset_lock_and_trx_wait(lock);
}
lock_rec_add_to_queue(
@@ -3054,9 +2997,7 @@ lock_rtr_move_rec_list(
if (rec1_heap_no < lock->un_member.rec_lock.n_bits
&& lock_rec_reset_nth_bit(lock, rec1_heap_no)) {
if (type_mode & LOCK_WAIT) {
- ut_ad(lock->trx->lock.wait_lock
- == lock);
- lock->type_mode&= ~LOCK_WAIT;
+ lock_reset_lock_and_trx_wait(lock);
}
lock_rec_add_to_queue(
@@ -3510,8 +3451,10 @@ lock_table_create(
in dictionary cache */
ulint type_mode,/*!< in: lock mode possibly ORed with
LOCK_WAIT */
- trx_t* trx, /*!< in: trx */
- lock_t* c_lock = NULL /*!< in: conflicting lock */
+ trx_t* trx /*!< in: trx */
+#ifdef WITH_WSREP
+ , lock_t* c_lock = NULL /*!< in: conflicting lock */
+#endif
)
{
lock_t* lock;
@@ -3594,7 +3537,8 @@ lock_table_create(
ut_list_append(table->locks, lock, TableLockGetNode());
if (type_mode & LOCK_WAIT) {
- lock_set_lock_and_trx_wait(lock, trx, c_lock);
+
+ lock_set_lock_and_trx_wait(lock, trx);
}
lock->trx->lock.table_locks.push_back(lock);
@@ -3749,8 +3693,10 @@ lock_table_enqueue_waiting(
ulint mode, /*!< in: lock mode this transaction is
requesting */
dict_table_t* table, /*!< in/out: table */
- que_thr_t* thr, /*!< in: query thread */
- lock_t* c_lock /*!< in: conflicting lock or NULL */
+ que_thr_t* thr /*!< in: query thread */
+#ifdef WITH_WSREP
+ , lock_t* c_lock /*!< in: conflicting lock or NULL */
+#endif
)
{
trx_t* trx;
@@ -3781,7 +3727,11 @@ lock_table_enqueue_waiting(
#endif /* WITH_WSREP */
/* Enqueue the lock request that will wait to be granted */
- lock = lock_table_create(table, ulint(mode) | LOCK_WAIT, trx, c_lock);
+ lock = lock_table_create(table, ulint(mode) | LOCK_WAIT, trx
+#ifdef WITH_WSREP
+ , c_lock
+#endif
+ );
const trx_t* victim_trx =
DeadlockChecker::check_and_resolve(lock, trx);
@@ -3937,7 +3887,11 @@ lock_table(
if (wait_for != NULL) {
err = lock_table_enqueue_waiting(ulint(mode) | flags, table,
- thr, wait_for);
+ thr
+#ifdef WITH_WSREP
+ , wait_for
+#endif
+ );
} else {
lock_table_create(table, ulint(mode) | flags, trx);
@@ -3985,7 +3939,7 @@ lock_table_ix_resurrect(
Checks if a waiting table lock request still has to wait in a queue.
@return TRUE if still has to wait */
static
-const lock_t*
+bool
lock_table_has_to_wait_in_queue(
/*============================*/
const lock_t* wait_lock) /*!< in: waiting table lock */
@@ -4004,11 +3958,11 @@ lock_table_has_to_wait_in_queue(
if (lock_has_to_wait(wait_lock, lock)) {
- return(lock);
+ return(true);
}
}
- return(NULL);
+ return(false);
}
/*************************************************************//**
@@ -4037,17 +3991,9 @@ lock_table_dequeue(
lock != NULL;
lock = UT_LIST_GET_NEXT(un_member.tab_lock.locks, lock)) {
- if (!lock_get_wait(lock))
- continue;
-
- ut_ad(lock->trx->lock.wait_trx);
- ut_ad(lock->trx->lock.wait_lock);
+ if (lock_get_wait(lock)
+ && !lock_table_has_to_wait_in_queue(lock)) {
- if (const lock_t *c = lock_table_has_to_wait_in_queue(lock)) {
- trx_mutex_enter(lock->trx);
- lock->trx->lock.wait_trx = c->trx;
- trx_mutex_exit(lock->trx);
- } else {
/* Grant the lock */
ut_ad(in_lock->trx != lock->trx);
lock_grant(lock);
@@ -4243,16 +4189,8 @@ released:
if (!lock_get_wait(lock)) {
continue;
}
- ut_ad(lock->trx->lock.wait_trx);
- ut_ad(lock->trx->lock.wait_lock);
- if (const lock_t* c = lock_rec_has_to_wait_in_queue(
- lock)) {
- if (lock->trx != trx)
- trx_mutex_enter(lock->trx);
- lock->trx->lock.wait_trx = c->trx;
- if (lock->trx != trx)
- trx_mutex_exit(lock->trx);
- } else {
+ const lock_t* c = lock_rec_has_to_wait_in_queue(lock);
+ if (!c) {
/* Grant the lock */
ut_ad(trx != lock->trx);
lock_grant(lock);
@@ -4983,7 +4921,7 @@ func_exit:
wsrep_report_bf_lock_wait(impl_trx->mysql_thd, impl_trx->id);
wsrep_report_bf_lock_wait(other_lock->trx->mysql_thd, other_lock->trx->id);
- if (!lock_rec_has_expl(LOCK_S | LOCK_REC_NOT_GAP,
+ if (!lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP,
block, heap_no,
impl_trx)) {
ib::info() << "WSREP impl BF lock conflict";
@@ -4992,20 +4930,7 @@ func_exit:
#endif /* WITH_WSREP */
{
ut_ad(lock_get_wait(other_lock));
- /* After MDEV-27025 fix the following case is
- possible:
- 1. trx 1 acquires S-lock;
- 2. trx 2 creates X-lock waiting for trx 1;
- 3. trx 1 creates implicit lock, as
- lock_rec_other_has_conflicting() returns no
- conflicting trx 2 X-lock, the explicit lock
- will not be created;
- 4. trx 3 creates waiting X-lock,
- it will wait for S-lock of trx 1.
- That is why we relaxing the condition here and
- check only for S-lock.
- */
- ut_ad(lock_rec_has_expl(LOCK_S | LOCK_REC_NOT_GAP,
+ ut_ad(lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP,
block, heap_no, impl_trx));
}
}
@@ -5411,13 +5336,19 @@ lock_rec_insert_check_and_lock(
const ulint type_mode = LOCK_X | LOCK_GAP | LOCK_INSERT_INTENTION;
- if (lock_t* c_lock =
+ if (
+#ifdef WITH_WSREP
+ lock_t* c_lock =
+#endif /* WITH_WSREP */
lock_rec_other_has_conflicting(type_mode, block, heap_no, trx)) {
/* Note that we may get DB_SUCCESS also here! */
trx_mutex_enter(trx);
- err = lock_rec_enqueue_waiting(c_lock, type_mode, block,
- heap_no, index, thr, NULL);
+ err = lock_rec_enqueue_waiting(
+#ifdef WITH_WSREP
+ c_lock,
+#endif /* WITH_WSREP */
+ type_mode, block, heap_no, index, thr, NULL);
trx_mutex_exit(trx);
} else {
@@ -5494,7 +5425,7 @@ lock_rec_convert_impl_to_expl_for_trx(
&& !lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP,
block, heap_no, trx)) {
lock_rec_add_to_queue(LOCK_REC | LOCK_X | LOCK_REC_NOT_GAP,
- block, heap_no, index, trx, true, true);
+ block, heap_no, index, trx, true);
}
lock_mutex_exit();
diff --git a/storage/innobase/lock/lock0prdt.cc b/storage/innobase/lock/lock0prdt.cc
index 15624cf79af..9827243177d 100644
--- a/storage/innobase/lock/lock0prdt.cc
+++ b/storage/innobase/lock/lock0prdt.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2014, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2018, 2022 MariaDB Corporation.
+Copyright (c) 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -487,13 +487,9 @@ lock_prdt_add_to_queue(
}
}
- /* Note: We will not pass any conflicting lock to lock_rec_create(),
- because we should be moving an existing waiting lock request. */
- ut_ad(!(type_mode & LOCK_WAIT) || trx->lock.wait_trx);
-
- lock = lock_rec_create(NULL,
+ lock = lock_rec_create(
#ifdef WITH_WSREP
- NULL, /* FIXME: replicate SPATIAL INDEX locks */
+ NULL, NULL, /* FIXME: replicate SPATIAL INDEX locks */
#endif
type_mode, block, PRDT_HEAPNO, index, trx,
caller_owns_trx_mutex);
@@ -583,7 +579,9 @@ lock_prdt_insert_check_and_lock(
trx_mutex_enter(trx);
err = lock_rec_enqueue_waiting(
+#ifdef WITH_WSREP
NULL, /* FIXME: replicate SPATIAL INDEX locks */
+#endif
LOCK_X | LOCK_PREDICATE | LOCK_INSERT_INTENTION,
block, PRDT_HEAPNO, index, thr, prdt);
@@ -831,9 +829,9 @@ lock_prdt_lock(
lock_t* lock = lock_rec_get_first_on_page(hash, block);
if (lock == NULL) {
- lock = lock_rec_create(NULL,
+ lock = lock_rec_create(
#ifdef WITH_WSREP
- NULL, /* FIXME: replicate SPATIAL INDEX locks */
+ NULL, NULL, /* FIXME: replicate SPATIAL INDEX locks */
#endif
ulint(mode) | type_mode, block, PRDT_HEAPNO,
index, trx, FALSE);
@@ -863,8 +861,10 @@ lock_prdt_lock(
if (wait_for != NULL) {
err = lock_rec_enqueue_waiting(
+#ifdef WITH_WSREP
NULL, /* FIXME: replicate
SPATIAL INDEX locks */
+#endif
ulint(mode) | type_mode,
block, PRDT_HEAPNO,
index, thr, prdt);
@@ -948,9 +948,9 @@ lock_place_prdt_page_lock(
}
if (lock == NULL) {
- lock = lock_rec_create_low(NULL,
+ lock = lock_rec_create_low(
#ifdef WITH_WSREP
- NULL, /* FIXME: replicate SPATIAL INDEX locks */
+ NULL, NULL, /* FIXME: replicate SPATIAL INDEX locks */
#endif
mode, space, page_no, NULL, PRDT_HEAPNO,
index, trx, FALSE);