summaryrefslogtreecommitdiff
path: root/storage/innobase/trx
diff options
context:
space:
mode:
Diffstat (limited to 'storage/innobase/trx')
-rw-r--r--storage/innobase/trx/trx0purge.cc307
-rw-r--r--storage/innobase/trx/trx0rec.cc10
-rw-r--r--storage/innobase/trx/trx0rseg.cc3
-rw-r--r--storage/innobase/trx/trx0trx.cc27
-rw-r--r--storage/innobase/trx/trx0undo.cc120
5 files changed, 223 insertions, 244 deletions
diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc
index 841b014019b..f5795c9c128 100644
--- a/storage/innobase/trx/trx0purge.cc
+++ b/storage/innobase/trx/trx0purge.cc
@@ -274,13 +274,11 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr)
if (undo->state != TRX_UNDO_CACHED) {
/* The undo log segment will not be reused */
ut_a(undo->id < TRX_RSEG_N_SLOTS);
- compile_time_assert(FIL_NULL == 0xffffffff);
+ static_assert(FIL_NULL == 0xffffffff, "");
mtr->memset(rseg_header,
TRX_RSEG + TRX_RSEG_UNDO_SLOTS
+ undo->id * TRX_RSEG_SLOT_SIZE, 4, 0xff);
- MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_USED);
-
uint32_t hist_size = mach_read_from_4(
TRX_RSEG_HISTORY_SIZE + TRX_RSEG
+ rseg_header->page.frame);
@@ -362,7 +360,6 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr)
if (undo->state == TRX_UNDO_CACHED) {
UT_LIST_ADD_FIRST(rseg->undo_cached, undo);
- MONITOR_INC(MONITOR_NUM_UNDO_SLOT_CACHED);
} else {
ut_ad(undo->state == TRX_UNDO_TO_PURGE);
ut_free(undo);
@@ -384,161 +381,168 @@ static dberr_t trx_purge_remove_log_hdr(buf_block_t *rseg, buf_block_t* log,
uint16_t(offset + TRX_UNDO_HISTORY_NODE), mtr);
}
-MY_ATTRIBUTE((nonnull, warn_unused_result))
-/** Free an undo log segment, and remove the header from the history list.
-@param[in,out] mtr mini-transaction
-@param[in,out] rseg rollback segment
-@param[in] hdr_addr file address of log_hdr
-@return error code */
-static dberr_t
-trx_purge_free_segment(mtr_t &mtr, trx_rseg_t* rseg, fil_addr_t hdr_addr)
+/** Free an undo log segment.
+@param block rollback segment header page
+@param mtr mini-transaction */
+static void trx_purge_free_segment(buf_block_t *block, mtr_t &mtr)
{
- mtr.commit();
- log_free_check();
- mtr.start();
-
- const page_id_t hdr_page_id{rseg->space->id, hdr_addr.page};
- dberr_t err;
- buf_block_t *rseg_hdr= rseg->get(&mtr, &err);
- if (!rseg_hdr)
- return err;
- buf_block_t *block= buf_page_get_gen(hdr_page_id, 0, RW_X_LATCH,
- nullptr, BUF_GET_POSSIBLY_FREED,
- &mtr, &err);
- if (!block)
- return err;
-
- const uint32_t seg_size=
- flst_get_len(TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST + block->page.frame);
-
- err= trx_purge_remove_log_hdr(rseg_hdr, block, hdr_addr.boffset, &mtr);
- if (UNIV_UNLIKELY(err != DB_SUCCESS))
- return err;
-
- ut_ad(rseg->curr_size >= seg_size);
- rseg->curr_size-= seg_size;
- rseg->history_size--;
-
- byte *hist= TRX_RSEG + TRX_RSEG_HISTORY_SIZE + rseg_hdr->page.frame;
- ut_ad(mach_read_from_4(hist) >= seg_size);
- mtr.write<4>(*rseg_hdr, hist, mach_read_from_4(hist) - seg_size);
-
while (!fseg_free_step_not_header(TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER +
block->page.frame, &mtr))
{
block->fix();
+ const page_id_t id{block->page.id()};
mtr.commit();
/* NOTE: If the server is killed after the log that was produced
up to this point was written, and before the log from the mtr.commit()
in our caller is written, then the pages belonging to the
undo log will become unaccessible garbage.
- This does not matters when using multiple innodb_undo_tablespaces;
+ This does not matter when using multiple innodb_undo_tablespaces;
innodb_undo_log_truncate=ON will be able to reclaim the space. */
log_free_check();
mtr.start();
block->page.lock.x_lock();
- mtr.memo_push(block, MTR_MEMO_PAGE_X_MODIFY);
+ if (UNIV_UNLIKELY(block->page.id() != id))
+ {
+ block->unfix();
+ block->page.lock.x_unlock();
+ block= buf_page_get_gen(id, 0, RW_X_LATCH, nullptr, BUF_GET, &mtr);
+ if (!block)
+ return;
+ }
+ else
+ mtr.memo_push(block, MTR_MEMO_PAGE_X_MODIFY);
}
while (!fseg_free_step(TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER +
block->page.frame, &mtr));
- return DB_SUCCESS;
}
/** Remove unnecessary history data from a rollback segment.
@param[in,out] rseg rollback segment
@param[in] limit truncate anything before this
@return error code */
-static
-dberr_t
-trx_purge_truncate_rseg_history(
- trx_rseg_t& rseg,
- const purge_sys_t::iterator& limit)
+static dberr_t
+trx_purge_truncate_rseg_history(trx_rseg_t& rseg,
+ const purge_sys_t::iterator& limit)
{
- fil_addr_t hdr_addr;
- mtr_t mtr;
+ fil_addr_t hdr_addr;
+ mtr_t mtr;
- mtr.start();
+ log_free_check();
+ mtr.start();
- dberr_t err;
- buf_block_t* rseg_hdr = rseg.get(&mtr, &err);
- if (!rseg_hdr) {
- goto func_exit;
- }
+ dberr_t err;
+reget:
+ buf_block_t *rseg_hdr= rseg.get(&mtr, &err);
+ if (!rseg_hdr)
+ {
+func_exit:
+ mtr.commit();
+ return err;
+ }
- hdr_addr = flst_get_last(TRX_RSEG + TRX_RSEG_HISTORY
- + rseg_hdr->page.frame);
- hdr_addr.boffset = static_cast<uint16_t>(hdr_addr.boffset
- - TRX_UNDO_HISTORY_NODE);
+ hdr_addr= flst_get_last(TRX_RSEG + TRX_RSEG_HISTORY + rseg_hdr->page.frame);
+ hdr_addr.boffset= static_cast<uint16_t>(hdr_addr.boffset -
+ TRX_UNDO_HISTORY_NODE);
loop:
- if (hdr_addr.page == FIL_NULL) {
-func_exit:
- mtr.commit();
- return err;
- }
+ if (hdr_addr.page == FIL_NULL)
+ goto func_exit;
- buf_block_t* block = buf_page_get_gen(page_id_t(rseg.space->id,
- hdr_addr.page),
- 0, RW_X_LATCH, nullptr,
- BUF_GET_POSSIBLY_FREED,
- &mtr, &err);
- if (!block) {
- goto func_exit;
- }
+ buf_block_t *b=
+ buf_page_get_gen(page_id_t(rseg.space->id, hdr_addr.page),
+ 0, RW_X_LATCH, nullptr, BUF_GET_POSSIBLY_FREED,
+ &mtr, &err);
+ if (!b)
+ goto func_exit;
- const trx_id_t undo_trx_no = mach_read_from_8(
- block->page.frame + hdr_addr.boffset + TRX_UNDO_TRX_NO);
+ const trx_id_t undo_trx_no=
+ mach_read_from_8(b->page.frame + hdr_addr.boffset + TRX_UNDO_TRX_NO);
- if (undo_trx_no >= limit.trx_no) {
- if (undo_trx_no == limit.trx_no) {
- err = trx_undo_truncate_start(
- &rseg, hdr_addr.page,
- hdr_addr.boffset, limit.undo_no);
- }
+ if (undo_trx_no >= limit.trx_no)
+ {
+ if (undo_trx_no == limit.trx_no)
+ err = trx_undo_truncate_start(&rseg, hdr_addr.page,
+ hdr_addr.boffset, limit.undo_no);
+ goto func_exit;
+ }
- goto func_exit;
- }
+ fil_addr_t prev_hdr_addr=
+ flst_get_prev_addr(b->page.frame + hdr_addr.boffset +
+ TRX_UNDO_HISTORY_NODE);
+ prev_hdr_addr.boffset= static_cast<uint16_t>(prev_hdr_addr.boffset -
+ TRX_UNDO_HISTORY_NODE);
+ err= trx_purge_remove_log_hdr(rseg_hdr, b, hdr_addr.boffset, &mtr);
+ if (UNIV_UNLIKELY(err != DB_SUCCESS))
+ goto func_exit;
- fil_addr_t prev_hdr_addr = flst_get_prev_addr(
- block->page.frame + hdr_addr.boffset + TRX_UNDO_HISTORY_NODE);
- prev_hdr_addr.boffset = static_cast<uint16_t>(prev_hdr_addr.boffset
- - TRX_UNDO_HISTORY_NODE);
-
- if (!rseg.is_referenced()
- && rseg.needs_purge <= (purge_sys.head.trx_no
- ? purge_sys.head.trx_no
- : purge_sys.tail.trx_no)
- && mach_read_from_2(TRX_UNDO_SEG_HDR + TRX_UNDO_STATE
- + block->page.frame)
- == TRX_UNDO_TO_PURGE
- && !mach_read_from_2(block->page.frame + hdr_addr.boffset
- + TRX_UNDO_NEXT_LOG)) {
- /* We can free the whole log segment.
- This will call trx_purge_remove_log_hdr(). */
- err = trx_purge_free_segment(mtr, &rseg, hdr_addr);
- } else {
- /* Remove the log hdr from the rseg history. */
- rseg.history_size--;
- err = trx_purge_remove_log_hdr(rseg_hdr, block,
- hdr_addr.boffset, &mtr);
- }
+ rseg_hdr->fix();
- mtr.commit();
- if (err != DB_SUCCESS) {
- return err;
- }
- mtr.start();
+ if (mach_read_from_2(b->page.frame + hdr_addr.boffset + TRX_UNDO_NEXT_LOG) ||
+ rseg.is_referenced() ||
+ rseg.needs_purge > (purge_sys.head.trx_no
+ ? purge_sys.head.trx_no
+ : purge_sys.tail.trx_no))
+ /* We cannot free the entire undo page. */;
+ else
+ {
+ const uint32_t seg_size=
+ flst_get_len(TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST + b->page.frame);
+ switch (mach_read_from_2(TRX_UNDO_SEG_HDR + TRX_UNDO_STATE +
+ b->page.frame)) {
+ case TRX_UNDO_TO_PURGE:
+ {
+ byte *hist= TRX_RSEG + TRX_RSEG_HISTORY_SIZE + rseg_hdr->page.frame;
+ ut_ad(mach_read_from_4(hist) >= seg_size);
+ mtr.write<4>(*rseg_hdr, hist, mach_read_from_4(hist) - seg_size);
+ }
+ free_segment:
+ ut_ad(rseg.curr_size >= seg_size);
+ rseg.curr_size-= seg_size;
+ trx_purge_free_segment(b, mtr);
+ break;
+ case TRX_UNDO_CACHED:
+ /* rseg.undo_cached must point to this page */
+ trx_undo_t *undo= UT_LIST_GET_FIRST(rseg.undo_cached);
+ for (; undo; undo= UT_LIST_GET_NEXT(undo_list, undo))
+ if (undo->hdr_page_no == hdr_addr.page)
+ goto found_cached;
+ ut_ad("inconsistent undo logs" == 0);
+ break;
+ found_cached:
+ UT_LIST_REMOVE(rseg.undo_cached, undo);
+ static_assert(FIL_NULL == 0xffffffff, "");
+ if (UNIV_UNLIKELY(mach_read_from_4(TRX_RSEG + TRX_RSEG_FORMAT +
+ rseg_hdr->page.frame)))
+ trx_rseg_format_upgrade(rseg_hdr, &mtr);
+ mtr.memset(rseg_hdr, TRX_RSEG + TRX_RSEG_UNDO_SLOTS +
+ undo->id * TRX_RSEG_SLOT_SIZE, 4, 0xff);
+ ut_free(undo);
+ mtr.write<8,mtr_t::MAYBE_NOP>(*rseg_hdr, TRX_RSEG + TRX_RSEG_MAX_TRX_ID +
+ rseg_hdr->page.frame,
+ trx_sys.get_max_trx_id() - 1);
+ goto free_segment;
+ }
+ }
- hdr_addr = prev_hdr_addr;
+ hdr_addr= prev_hdr_addr;
- rseg_hdr = rseg.get(&mtr, &err);
- if (!rseg_hdr) {
- goto func_exit;
- }
+ mtr.commit();
+ ut_ad(rseg.history_size > 0);
+ rseg.history_size--;
+ log_free_check();
+ mtr.start();
+ rseg_hdr->page.lock.x_lock();
+ if (UNIV_UNLIKELY(rseg_hdr->page.id() != rseg.page_id()))
+ {
+ rseg_hdr->unfix();
+ rseg_hdr->page.lock.x_unlock();
+ goto reget;
+ }
+ mtr.memo_push(rseg_hdr, MTR_MEMO_PAGE_X_MODIFY);
- goto loop;
+ goto loop;
}
/** Cleanse purge queue to remove the rseg that reside in undo-tablespace
@@ -1234,43 +1238,6 @@ trx_purge_attach_undo_recs(ulint n_purge_threads)
return(n_pages_handled);
}
-/*******************************************************************//**
-Calculate the DML delay required.
-@return delay in microseconds or ULINT_MAX */
-static
-ulint
-trx_purge_dml_delay(void)
-/*=====================*/
-{
- /* Determine how much data manipulation language (DML) statements
- need to be delayed in order to reduce the lagging of the purge
- thread. */
- ulint delay = 0; /* in microseconds; default: no delay */
-
- /* If purge lag is set then calculate the new DML delay. */
-
- if (srv_max_purge_lag > 0) {
- double ratio = static_cast<double>(trx_sys.history_size()) /
- static_cast<double>(srv_max_purge_lag);
-
- if (ratio > 1.0) {
- /* If the history list length exceeds the
- srv_max_purge_lag, the data manipulation
- statements are delayed by at least 5000
- microseconds. */
- delay = (ulint) ((ratio - .5) * 10000);
- }
-
- if (delay > srv_max_purge_lag_delay) {
- delay = srv_max_purge_lag_delay;
- }
-
- MONITOR_SET(MONITOR_DML_PURGE_DELAY, delay);
- }
-
- return(delay);
-}
-
extern tpool::waitable_task purge_worker_task;
/** Wait for pending purge jobs to complete. */
@@ -1314,18 +1281,18 @@ TRANSACTIONAL_INLINE void purge_sys_t::clone_end_view()
/**
Run a purge batch.
-@param n_tasks number of purge tasks to submit to the queue
-@param truncate whether to truncate the history at the end of the batch
+@param n_tasks number of purge tasks to submit to the queue
+@param history_size trx_sys.history_size()
+@param truncate whether to truncate the history at the end of the batch
@return number of undo log pages handled in the batch */
-TRANSACTIONAL_TARGET ulint trx_purge(ulint n_tasks, bool truncate)
+TRANSACTIONAL_TARGET
+ulint trx_purge(ulint n_tasks, ulint history_size, bool truncate)
{
que_thr_t* thr = NULL;
ulint n_pages_handled;
ut_ad(n_tasks > 0);
- srv_dml_needed_delay = trx_purge_dml_delay();
-
purge_sys.clone_oldest_view();
#ifdef UNIV_DEBUG
@@ -1337,6 +1304,24 @@ TRANSACTIONAL_TARGET ulint trx_purge(ulint n_tasks, bool truncate)
/* Fetch the UNDO recs that need to be purged. */
n_pages_handled = trx_purge_attach_undo_recs(n_tasks);
+ {
+ ulint delay = n_pages_handled ? srv_max_purge_lag : 0;
+ if (UNIV_UNLIKELY(delay)) {
+ if (delay >= history_size) {
+ no_throttle:
+ delay = 0;
+ } else if (const ulint max_delay =
+ srv_max_purge_lag_delay) {
+ delay = std::min(max_delay,
+ 10000 * history_size / delay
+ - 5000);
+ } else {
+ goto no_throttle;
+ }
+ }
+ srv_dml_needed_delay = delay;
+ }
+
/* Submit tasks to workers queue if using multi-threaded purge. */
for (ulint i = n_tasks; --i; ) {
thr = que_fork_scheduler_round_robin(purge_sys.query, thr);
diff --git a/storage/innobase/trx/trx0rec.cc b/storage/innobase/trx/trx0rec.cc
index e70516a2d2d..c1a7b08b717 100644
--- a/storage/innobase/trx/trx0rec.cc
+++ b/storage/innobase/trx/trx0rec.cc
@@ -1875,26 +1875,28 @@ trx_undo_report_row_operation(
}
mtr_t mtr;
+ dberr_t err;
mtr.start();
trx_undo_t** pundo;
trx_rseg_t* rseg;
const bool is_temp = index->table->is_temporary();
+ buf_block_t* undo_block;
if (is_temp) {
mtr.set_log_mode(MTR_LOG_NO_REDO);
-
rseg = trx->get_temp_rseg();
pundo = &trx->rsegs.m_noredo.undo;
+ undo_block = trx_undo_assign_low<true>(trx, rseg, pundo,
+ &mtr, &err);
} else {
ut_ad(!trx->read_only);
ut_ad(trx->id);
pundo = &trx->rsegs.m_redo.undo;
rseg = trx->rsegs.m_redo.rseg;
+ undo_block = trx_undo_assign_low<false>(trx, rseg, pundo,
+ &mtr, &err);
}
- dberr_t err;
- buf_block_t* undo_block = trx_undo_assign_low(trx, rseg, pundo,
- &err, &mtr);
trx_undo_t* undo = *pundo;
ut_ad((err == DB_SUCCESS) == (undo_block != NULL));
if (UNIV_UNLIKELY(undo_block == NULL)) {
diff --git a/storage/innobase/trx/trx0rseg.cc b/storage/innobase/trx/trx0rseg.cc
index 1dc3c18fc09..e730530adb2 100644
--- a/storage/innobase/trx/trx0rseg.cc
+++ b/storage/innobase/trx/trx0rseg.cc
@@ -395,7 +395,6 @@ void trx_rseg_t::reinit(uint32_t page)
{
next= UT_LIST_GET_NEXT(undo_list, undo);
UT_LIST_REMOVE(undo_cached, undo);
- MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_CACHED);
ut_free(undo);
}
@@ -404,6 +403,7 @@ void trx_rseg_t::reinit(uint32_t page)
last_commit_and_offset= 0;
last_page_no= FIL_NULL;
curr_size= 1;
+ ref.store(0, std::memory_order_release);
}
/** Read the undo log lists.
@@ -428,7 +428,6 @@ static dberr_t trx_undo_lists_init(trx_rseg_t *rseg,
if (is_undo_empty)
is_undo_empty= !undo->size || undo->state == TRX_UNDO_CACHED;
rseg->curr_size+= undo->size;
- MONITOR_INC(MONITOR_NUM_UNDO_SLOT_USED);
}
}
diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc
index e88f7824ba6..0f85aec540f 100644
--- a/storage/innobase/trx/trx0trx.cc
+++ b/storage/innobase/trx/trx0trx.cc
@@ -816,28 +816,17 @@ static void trx_assign_rseg_low(trx_t *trx)
static Atomic_counter<unsigned> rseg_slot;
unsigned slot = rseg_slot++ % TRX_SYS_N_RSEGS;
ut_d(if (trx_rseg_n_slots_debug) slot = 0);
+ ut_d(const auto start_scan_slot = slot);
+ ut_d(bool look_for_rollover = false);
trx_rseg_t* rseg;
-#ifdef UNIV_DEBUG
- ulint start_scan_slot = slot;
- bool look_for_rollover = false;
-#endif /* UNIV_DEBUG */
-
bool allocated;
do {
for (;;) {
rseg = &trx_sys.rseg_array[slot];
-
-#ifdef UNIV_DEBUG
- /* Ensure that we are not revisiting the same
- slot that we have already inspected. */
- if (look_for_rollover) {
- ut_ad(start_scan_slot != slot);
- }
- look_for_rollover = true;
-#endif /* UNIV_DEBUG */
-
+ ut_ad(!look_for_rollover || start_scan_slot != slot);
+ ut_d(look_for_rollover = true);
ut_d(if (!trx_rseg_n_slots_debug))
slot = (slot + 1) % TRX_SYS_N_RSEGS;
@@ -1038,7 +1027,13 @@ trx_write_serialisation_history(
mtr_t temp_mtr;
temp_mtr.start();
temp_mtr.set_log_mode(MTR_LOG_NO_REDO);
- trx_undo_set_state_at_finish(undo, &temp_mtr);
+ buf_block_t* block= buf_page_get(page_id_t(SRV_TMP_SPACE_ID,
+ undo->hdr_page_no),
+ 0, RW_X_LATCH, mtr);
+ ut_a(block);
+ temp_mtr.write<2>(*block, TRX_UNDO_SEG_HDR + TRX_UNDO_STATE
+ + block->page.frame, TRX_UNDO_TO_PURGE);
+ undo->state = TRX_UNDO_TO_PURGE;
temp_mtr.commit();
}
diff --git a/storage/innobase/trx/trx0undo.cc b/storage/innobase/trx/trx0undo.cc
index 3b9c598e745..cbbf316fe69 100644
--- a/storage/innobase/trx/trx0undo.cc
+++ b/storage/innobase/trx/trx0undo.cc
@@ -535,8 +535,6 @@ trx_undo_seg_create(fil_space_t *space, buf_block_t *rseg_hdr, ulint *id,
+ slot_no * TRX_RSEG_SLOT_SIZE + rseg_hdr->page.frame,
block->page.id().page_no());
- MONITOR_INC(MONITOR_NUM_UNDO_SLOT_USED);
-
*err = DB_SUCCESS;
return block;
}
@@ -991,7 +989,6 @@ static void trx_undo_seg_free(const trx_undo_t *undo)
static_assert(FIL_NULL == 0xffffffff, "compatibility");
mtr.memset(rseg_header, TRX_RSEG + TRX_RSEG_UNDO_SLOTS +
undo->id * TRX_RSEG_SLOT_SIZE, 4, 0xff);
- MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_USED);
}
}
@@ -1150,7 +1147,6 @@ corrupted_type:
UT_LIST_ADD_LAST(rseg->undo_list, undo);
} else {
UT_LIST_ADD_LAST(rseg->undo_cached, undo);
- MONITOR_INC(MONITOR_NUM_UNDO_SLOT_CACHED);
}
mtr.commit();
@@ -1289,27 +1285,25 @@ trx_undo_create(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** undo,
@param[in,out] rseg rollback segment
@param[out] pundo the undo log memory object
@param[in,out] mtr mini-transaction
+@param[out] err error code
@return the undo log block
@retval NULL if none cached */
static
buf_block_t*
trx_undo_reuse_cached(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** pundo,
- mtr_t* mtr)
+ mtr_t* mtr, dberr_t *err)
{
- if (rseg->is_persistent()) {
- ut_ad(rseg->is_referenced());
- if (rseg->needs_purge <= trx->id) {
- /* trx_purge_truncate_history() compares
- rseg->needs_purge <= head.trx_no
- so we need to compensate for that.
- The rseg->needs_purge after crash
- recovery would be at least trx->id + 1,
- because that is the minimum possible value
- assigned by trx_serialise() on commit. */
- rseg->needs_purge = trx->id + 1;
- }
- } else {
- ut_ad(!rseg->is_referenced());
+ ut_ad(rseg->is_persistent());
+ ut_ad(rseg->is_referenced());
+ if (rseg->needs_purge <= trx->id) {
+ /* trx_purge_truncate_history() compares
+ rseg->needs_purge <= head.trx_no
+ so we need to compensate for that.
+ The rseg->needs_purge after crash
+ recovery would be at least trx->id + 1,
+ because that is the minimum possible value
+ assigned by trx_serialise() on commit. */
+ rseg->needs_purge = trx->id + 1;
}
trx_undo_t* undo = UT_LIST_GET_FIRST(rseg->undo_cached);
@@ -1320,15 +1314,15 @@ trx_undo_reuse_cached(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** pundo,
ut_ad(undo->size == 1);
ut_ad(undo->id < TRX_RSEG_N_SLOTS);
- buf_block_t* block = buf_page_get(page_id_t(undo->rseg->space->id,
- undo->hdr_page_no),
- 0, RW_X_LATCH, mtr);
+ buf_block_t* block = buf_page_get_gen(page_id_t(undo->rseg->space->id,
+ undo->hdr_page_no),
+ 0, RW_X_LATCH, nullptr, BUF_GET,
+ mtr, err);
if (!block) {
return NULL;
}
UT_LIST_REMOVE(rseg->undo_cached, undo);
- MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_CACHED);
*pundo = undo;
@@ -1374,11 +1368,12 @@ trx_undo_assign(trx_t* trx, dberr_t* err, mtr_t* mtr)
BUF_GET, mtr, err);
}
+ *err = DB_SUCCESS;
trx_rseg_t* rseg = trx->rsegs.m_redo.rseg;
rseg->latch.wr_lock(SRW_LOCK_CALL);
buf_block_t* block = trx_undo_reuse_cached(
- trx, rseg, &trx->rsegs.m_redo.undo, mtr);
+ trx, rseg, &trx->rsegs.m_redo.undo, mtr, err);
if (!block) {
block = trx_undo_create(trx, rseg, &trx->rsegs.m_redo.undo,
@@ -1387,8 +1382,6 @@ trx_undo_assign(trx_t* trx, dberr_t* err, mtr_t* mtr)
if (!block) {
goto func_exit;
}
- } else {
- *err = DB_SUCCESS;
}
UT_LIST_ADD_FIRST(rseg->undo_list, trx->rsegs.m_redo.undo);
@@ -1400,18 +1393,20 @@ func_exit:
/** Assign an undo log for a transaction.
A new undo log is created or a cached undo log reused.
+@tparam is_temp whether this is temporary undo log
@param[in,out] trx transaction
@param[in] rseg rollback segment
@param[out] undo the undo log
-@param[out] err error code
@param[in,out] mtr mini-transaction
+@param[out] err error code
@return the undo log block
-@retval NULL on error */
+@retval nullptr on error */
+template<bool is_temp>
buf_block_t*
-trx_undo_assign_low(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** undo,
- dberr_t* err, mtr_t* mtr)
+trx_undo_assign_low(trx_t *trx, trx_rseg_t *rseg, trx_undo_t **undo,
+ mtr_t *mtr, dberr_t *err)
{
- ut_d(const bool is_temp = rseg == trx->rsegs.m_noredo.rseg);
+ ut_ad(is_temp == (rseg == trx->rsegs.m_noredo.rseg));
ut_ad(is_temp || rseg == trx->rsegs.m_redo.rseg);
ut_ad(undo == (is_temp
? &trx->rsegs.m_noredo.undo
@@ -1431,19 +1426,24 @@ trx_undo_assign_low(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** undo,
*err = DB_TOO_MANY_CONCURRENT_TRXS; return NULL;
);
+ *err = DB_SUCCESS;
rseg->latch.wr_lock(SRW_LOCK_CALL);
- buf_block_t* block = trx_undo_reuse_cached(trx, rseg, undo, mtr);
-
- if (!block) {
- block = trx_undo_create(trx, rseg, undo, err, mtr);
- ut_ad(!block == (*err != DB_SUCCESS));
- if (!block) {
- goto func_exit;
- }
+ buf_block_t* block;
+ if (is_temp) {
+ ut_ad(!UT_LIST_GET_LEN(rseg->undo_cached));
} else {
- *err = DB_SUCCESS;
+ block = trx_undo_reuse_cached(trx, rseg, undo, mtr, err);
+ if (block) {
+ goto got_block;
+ }
+ }
+ block = trx_undo_create(trx, rseg, undo, err, mtr);
+ ut_ad(!block == (*err != DB_SUCCESS));
+ if (!block) {
+ goto func_exit;
}
+got_block:
UT_LIST_ADD_FIRST(rseg->undo_list, *undo);
func_exit:
@@ -1451,6 +1451,13 @@ func_exit:
return block;
}
+template buf_block_t*
+trx_undo_assign_low<false>(trx_t *trx, trx_rseg_t *rseg, trx_undo_t **undo,
+ mtr_t *mtr, dberr_t *err);
+template buf_block_t*
+trx_undo_assign_low<true>(trx_t *trx, trx_rseg_t *rseg, trx_undo_t **undo,
+ mtr_t *mtr, dberr_t *err);
+
/******************************************************************//**
Sets the state of the undo log segment at a transaction finish.
@return undo log segment header page, x-latched */
@@ -1461,6 +1468,7 @@ trx_undo_set_state_at_finish(
mtr_t* mtr) /*!< in: mtr */
{
ut_ad(undo->id < TRX_RSEG_N_SLOTS);
+ ut_ad(undo->rseg->is_persistent());
buf_block_t *block=
buf_page_get(page_id_t(undo->rseg->space->id, undo->hdr_page_no), 0,
@@ -1532,29 +1540,19 @@ the data can be discarded.
@param undo temporary undo log */
void trx_undo_commit_cleanup(trx_undo_t *undo)
{
- trx_rseg_t* rseg = undo->rseg;
- ut_ad(rseg->space == fil_system.temp_space);
-
- rseg->latch.wr_lock(SRW_LOCK_CALL);
-
- UT_LIST_REMOVE(rseg->undo_list, undo);
-
- if (undo->state == TRX_UNDO_CACHED) {
- UT_LIST_ADD_FIRST(rseg->undo_cached, undo);
- MONITOR_INC(MONITOR_NUM_UNDO_SLOT_CACHED);
- undo = nullptr;
- } else {
- ut_ad(undo->state == TRX_UNDO_TO_PURGE);
-
- /* Delete first the undo log segment in the file */
- trx_undo_seg_free(undo);
+ trx_rseg_t *rseg= undo->rseg;
+ ut_ad(rseg->space == fil_system.temp_space);
+ rseg->latch.wr_lock(SRW_LOCK_CALL);
- ut_ad(rseg->curr_size > undo->size);
- rseg->curr_size -= undo->size;
- }
+ UT_LIST_REMOVE(rseg->undo_list, undo);
+ ut_ad(undo->state == TRX_UNDO_TO_PURGE);
+ /* Delete first the undo log segment in the file */
+ trx_undo_seg_free(undo);
+ ut_ad(rseg->curr_size > undo->size);
+ rseg->curr_size-= undo->size;
- rseg->latch.wr_unlock();
- ut_free(undo);
+ rseg->latch.wr_unlock();
+ ut_free(undo);
}
/** At shutdown, frees the undo logs of a transaction. */