diff options
Diffstat (limited to 'storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race_3.cc')
-rw-r--r-- | storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race_3.cc | 122 |
1 files changed, 64 insertions, 58 deletions
diff --git a/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race_3.cc b/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race_3.cc index 8f0d86c9f64..8458bae6b8c 100644 --- a/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race_3.cc +++ b/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race_3.cc @@ -34,77 +34,82 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. along with PerconaFT. If not, see <http://www.gnu.org/licenses/>. ======= */ -#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." +#ident \ + "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." +#include "lock_request.h" +#include <pthread.h> #include <iostream> #include <thread> -#include <pthread.h> -#include "test.h" #include "locktree.h" -#include "lock_request.h" +#include "test.h" -// Suppose that 3 threads are running a lock acquire, release, retry sequence. There is -// a race in the retry algorithm with 2 threads running lock retry simultaneously. The -// first thread to run retry sets a flag that will cause the second thread to skip the -// lock retries. If the first thread progressed past the contended lock, then the second -// threa will HANG until its lock timer pops, even when the contended lock is no longer held. +// Suppose that 3 threads are running a lock acquire, release, retry sequence. +// There is a race in the retry algorithm with 2 threads running lock retry +// simultaneously. The first thread to run retry sets a flag that will cause +// the second thread to skip the lock retries. If the first thread progressed +// past the contended lock, then the second threa will HANG until its lock timer +// pops, even when the contended lock is no longer held. -// This test exposes this problem as a test hang. The group retry algorithm fixes the race -// in the lock request retry algorihm and this test should no longer hang. +// This test exposes this problem as a test hang. The group retry algorithm +// fixes the race in the lock request retry algorihm and this test should no +// longer hang. namespace toku { -// use 1000 when after_retry_all is implemented, otherwise use 100000 -static const int n_tests = 1000; // 100000; - -static void after_retry_all(void) { - usleep(10000); -} - -static void run_locker(locktree *lt, TXNID txnid, const DBT *key, pthread_barrier_t *b) { - for (int i = 0; i < n_tests; i++) { - int r; - r = pthread_barrier_wait(b); assert(r == 0 || r == PTHREAD_BARRIER_SERIAL_THREAD); - - lock_request request; - request.create(); - - request.set(lt, txnid, key, key, lock_request::type::WRITE, false); - - // try to acquire the lock - r = request.start(); - if (r == DB_LOCK_NOTGRANTED) { - // wait for the lock to be granted - r = request.wait(1000 * 1000); - } - - if (r == 0) { - // release the lock - range_buffer buffer; - buffer.create(); - buffer.append(key, key); - lt->release_locks(txnid, &buffer); - buffer.destroy(); - - // retry pending lock requests - lock_request::retry_all_lock_requests(lt, nullptr, after_retry_all); + // use 1000 when after_retry_all is implemented, otherwise use 100000 + static const int n_tests = 1000; // 100000; + + static void after_retry_all(void) { usleep(10000); } + + static void run_locker(locktree *lt, + TXNID txnid, + const DBT *key, + pthread_barrier_t *b) { + for (int i = 0; i < n_tests; i++) { + int r; + r = pthread_barrier_wait(b); + assert(r == 0 || r == PTHREAD_BARRIER_SERIAL_THREAD); + + lock_request request; + request.create(); + + request.set(lt, txnid, key, key, lock_request::type::WRITE, false); + + // try to acquire the lock + r = request.start(); + if (r == DB_LOCK_NOTGRANTED) { + // wait for the lock to be granted + r = request.wait(1000 * 1000); + } + + if (r == 0) { + // release the lock + range_buffer buffer; + buffer.create(); + buffer.append(key, key); + lt->release_locks(txnid, &buffer); + buffer.destroy(); + + // retry pending lock requests + lock_request::retry_all_lock_requests(lt, nullptr, after_retry_all); + } + + request.destroy(); + memset(&request, 0xab, sizeof request); + + toku_pthread_yield(); + if ((i % 10) == 0) + std::cerr << std::this_thread::get_id() << " " << i + << std::endl; } - - request.destroy(); - memset(&request, 0xab, sizeof request); - - toku_pthread_yield(); - if ((i % 10) == 0) - std::cout << std::this_thread::get_id() << " " << i << std::endl; } -} } /* namespace toku */ int main(void) { - toku::locktree lt; - DICTIONARY_ID dict_id = { 1 }; + DICTIONARY_ID dict_id = {1}; lt.create(nullptr, dict_id, toku::dbt_comparator); const DBT *one = toku::get_dbt(1); @@ -112,16 +117,17 @@ int main(void) { const int n_workers = 3; std::thread worker[n_workers]; pthread_barrier_t b; - int r = pthread_barrier_init(&b, nullptr, n_workers); assert(r == 0); + int r = pthread_barrier_init(&b, nullptr, n_workers); + assert(r == 0); for (int i = 0; i < n_workers; i++) { worker[i] = std::thread(toku::run_locker, <, i, one, &b); } for (int i = 0; i < n_workers; i++) { worker[i].join(); } - r = pthread_barrier_destroy(&b); assert(r == 0); + r = pthread_barrier_destroy(&b); + assert(r == 0); lt.release_reference(); lt.destroy(); return 0; } - |