diff options
author | Marko Mäkelä <marko.makela@mariadb.com> | 2020-12-01 14:55:46 +0200 |
---|---|---|
committer | Marko Mäkelä <marko.makela@mariadb.com> | 2020-12-01 14:55:46 +0200 |
commit | 81ab9ea63f5d3ad4909bb75552008d6b035a371b (patch) | |
tree | 6e65113d85d36c9f488da15540ddd601e7641fee | |
parent | c537576495e1e651bf3dc63e5a569fcddd9fbec8 (diff) | |
parent | e6b3e38d62d13206ae982fc7b740d5d8024b207a (diff) | |
download | mariadb-git-81ab9ea63f5d3ad4909bb75552008d6b035a371b.tar.gz |
Merge 10.2 into 10.3
66 files changed, 1895 insertions, 230 deletions
diff --git a/extra/mariabackup/backup_copy.cc b/extra/mariabackup/backup_copy.cc index 6d1739e3dc3..0335f90682d 100644 --- a/extra/mariabackup/backup_copy.cc +++ b/extra/mariabackup/backup_copy.cc @@ -864,21 +864,14 @@ datafile_rsync_backup(const char *filepath, bool save_to_list, FILE *f) return(true); } - -static -bool -backup_file_vprintf(const char *filename, const char *fmt, va_list ap) +bool backup_file_print_buf(const char *filename, const char *buf, int buf_len) { ds_file_t *dstfile = NULL; MY_STAT stat; /* unused for now */ - char *buf = 0; - int buf_len; const char *action; memset(&stat, 0, sizeof(stat)); - buf_len = vasprintf(&buf, fmt, ap); - stat.st_size = buf_len; stat.st_mtime = my_time(0); @@ -902,7 +895,6 @@ backup_file_vprintf(const char *filename, const char *fmt, va_list ap) /* close */ msg(" ...done"); - free(buf); if (ds_close(dstfile)) { goto error_close; @@ -911,7 +903,6 @@ backup_file_vprintf(const char *filename, const char *fmt, va_list ap) return(true); error: - free(buf); if (dstfile != NULL) { ds_close(dstfile); } @@ -919,8 +910,21 @@ error: error_close: msg("Error: backup file failed."); return(false); /*ERROR*/ -} + return true; +}; + +static +bool +backup_file_vprintf(const char *filename, const char *fmt, va_list ap) +{ + char *buf = 0; + int buf_len; + buf_len = vasprintf(&buf, fmt, ap); + bool result = backup_file_print_buf(filename, buf, buf_len); + free(buf); + return result; +} bool backup_file_printf(const char *filename, const char *fmt, ...) @@ -1443,7 +1447,7 @@ out: return(ret); } -void backup_fix_ddl(void); +void backup_fix_ddl(CorruptedPages &); lsn_t get_current_lsn(MYSQL *connection) { @@ -1468,7 +1472,7 @@ lsn_t get_current_lsn(MYSQL *connection) lsn_t server_lsn_after_lock; extern void backup_wait_for_lsn(lsn_t lsn); /** Start --backup */ -bool backup_start() +bool backup_start(CorruptedPages &corrupted_pages) { if (!opt_no_lock) { if (opt_safe_slave_backup) { @@ -1503,7 +1507,7 @@ bool backup_start() msg("Waiting for log copy thread to read lsn %llu", (ulonglong)server_lsn_after_lock); backup_wait_for_lsn(server_lsn_after_lock); - backup_fix_ddl(); + backup_fix_ddl(corrupted_pages); // There is no need to stop slave thread before coping non-Innodb data when // --no-lock option is used because --no-lock option requires that no DDL or diff --git a/extra/mariabackup/backup_copy.h b/extra/mariabackup/backup_copy.h index 7c886719f37..62b2b1bc232 100644 --- a/extra/mariabackup/backup_copy.h +++ b/extra/mariabackup/backup_copy.h @@ -33,7 +33,7 @@ copy_file(ds_ctxt_t *datasink, uint thread_n); /** Start --backup */ -bool backup_start(); +bool backup_start(CorruptedPages &corrupted_pages); /** Release resources after backup_start() */ void backup_release(); /** Finish after backup_start() and backup_release() */ @@ -51,5 +51,6 @@ directory_exists(const char *dir, bool create); lsn_t get_current_lsn(MYSQL *connection); +bool backup_file_print_buf(const char *filename, const char *buf, int buf_len); #endif diff --git a/extra/mariabackup/backup_debug.h b/extra/mariabackup/backup_debug.h new file mode 100644 index 00000000000..cefbc287361 --- /dev/null +++ b/extra/mariabackup/backup_debug.h @@ -0,0 +1,32 @@ +#pragma once +#include "my_dbug.h" +#ifndef DBUG_OFF +extern char *dbug_mariabackup_get_val(const char *event, const char *key); +/* +In debug mode, execute SQL statement that was passed via environment. +To use this facility, you need to + +1. Add code DBUG_EXECUTE_MARIABACKUP_EVENT("my_event_name", key);); + to the code. key is usually a table name +2. Set environment variable my_event_name_$key SQL statement you want to execute + when event occurs, in DBUG_EXECUTE_IF from above. + In mtr , you can set environment via 'let' statement (do not use $ as the first char + for the variable) +3. start mariabackup with --dbug=+d,debug_mariabackup_events +*/ +extern void dbug_mariabackup_event( + const char *event,const char *key); +#define DBUG_MARIABACKUP_EVENT(A, B) \ + DBUG_EXECUTE_IF("mariabackup_events", \ + dbug_mariabackup_event(A,B);); +#define DBUG_EXECUTE_FOR_KEY(EVENT, KEY, CODE) \ + DBUG_EXECUTE_IF("mariabackup_inject_code", {\ + char *dbug_val = dbug_mariabackup_get_val(EVENT, KEY); \ + if (dbug_val && *dbug_val) CODE \ + }) +#else +#define DBUG_MARIABACKUP_EVENT(A,B) +#define DBUG_MARIABACKUP_EVENT_LOCK(A,B) +#define DBUG_EXECUTE_FOR_KEY(EVENT, KEY, CODE) +#endif + diff --git a/extra/mariabackup/encryption_plugin.cc b/extra/mariabackup/encryption_plugin.cc index a3242078293..dbaa67e1324 100644 --- a/extra/mariabackup/encryption_plugin.cc +++ b/extra/mariabackup/encryption_plugin.cc @@ -18,7 +18,6 @@ #include <mysql.h> #include <xtrabackup.h> #include <encryption_plugin.h> -#include <backup_copy.h> #include <sql_plugin.h> #include <sstream> #include <vector> diff --git a/extra/mariabackup/fil_cur.cc b/extra/mariabackup/fil_cur.cc index 67d1fb173c0..16342768ce9 100644 --- a/extra/mariabackup/fil_cur.cc +++ b/extra/mariabackup/fil_cur.cc @@ -35,6 +35,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA #include "common.h" #include "read_filt.h" #include "xtrabackup.h" +#include "backup_debug.h" /* Size of read buffer in pages (640 pages = 10M for 16K sized pages) */ #define XB_FIL_CUR_PAGES 640 @@ -371,16 +372,15 @@ static bool page_is_corrupted(const byte *page, ulint page_no, return buf_page_is_corrupted(true, page, cursor->page_size, space); } -/************************************************************************ -Reads and verifies the next block of pages from the source +/** Reads and verifies the next block of pages from the source file. Positions the cursor after the last read non-corrupted page. - +@param[in,out] cursor source file cursor +@param[out] corrupted_pages adds corrupted pages if +opt_log_innodb_page_corruption is set @return XB_FIL_CUR_SUCCESS if some have been read successfully, XB_FIL_CUR_EOF if there are no more pages to read and XB_FIL_CUR_ERROR on error. */ -xb_fil_cur_result_t -xb_fil_cur_read( -/*============*/ - xb_fil_cur_t* cursor) /*!< in/out: source file cursor */ +xb_fil_cur_result_t xb_fil_cur_read(xb_fil_cur_t* cursor, + CorruptedPages &corrupted_pages) { byte* page; ulint i; @@ -454,20 +454,40 @@ read_retry: retry_count--; if (retry_count == 0) { + const char *ignore_corruption_warn = opt_log_innodb_page_corruption ? + " WARNING!!! The corruption is ignored due to" + " log-innodb-page-corruption option, the backup can contain" + " corrupted data." : ""; msg(cursor->thread_n, "Error: failed to read page after " "10 retries. File %s seems to be " - "corrupted.", cursor->abs_path); - ret = XB_FIL_CUR_ERROR; + "corrupted.%s", cursor->abs_path, ignore_corruption_warn); buf_page_print(page, cursor->page_size); - break; + if (opt_log_innodb_page_corruption) { + corrupted_pages.add_page(cursor->node->name, cursor->node->space->id, + page_no); + retry_count = 1; + } + else { + ret = XB_FIL_CUR_ERROR; + break; + } + } + else { + msg(cursor->thread_n, "Database page corruption detected at page " + ULINTPF ", retrying...", + page_no); + os_thread_sleep(100000); + goto read_retry; } - msg(cursor->thread_n, "Database page corruption detected at page " - ULINTPF ", retrying...", - page_no); - os_thread_sleep(100000); - goto read_retry; } + DBUG_EXECUTE_FOR_KEY("add_corrupted_page_for", cursor->node->space->name, + { + ulint corrupted_page_no = strtoul(dbug_val, NULL, 10); + if (page_no == corrupted_page_no) + corrupted_pages.add_page(cursor->node->name, cursor->node->space->id, + corrupted_page_no); + }); cursor->buf_read += page_size; cursor->buf_npages++; } diff --git a/extra/mariabackup/fil_cur.h b/extra/mariabackup/fil_cur.h index 0f2d620ff7b..b534aa31c28 100644 --- a/extra/mariabackup/fil_cur.h +++ b/extra/mariabackup/fil_cur.h @@ -29,6 +29,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA #include "read_filt.h" #include "srv0start.h" #include "srv0srv.h" +#include "xtrabackup.h" struct xb_fil_cur_t { pfs_os_file_t file; /*!< source file handle */ @@ -90,17 +91,15 @@ xb_fil_cur_open( uint thread_n, /*!< thread number for diagnostics */ ulonglong max_file_size = ULLONG_MAX); -/************************************************************************ -Reads and verifies the next block of pages from the source +/** Reads and verifies the next block of pages from the source file. Positions the cursor after the last read non-corrupted page. - +@param[in,out] cursor source file cursor +@param[out] corrupted_pages adds corrupted pages if +opt_log_innodb_page_corruption is set @return XB_FIL_CUR_SUCCESS if some have been read successfully, XB_FIL_CUR_EOF if there are no more pages to read and XB_FIL_CUR_ERROR on error. */ -xb_fil_cur_result_t -xb_fil_cur_read( -/*============*/ - xb_fil_cur_t* cursor); /*!< in/out: source file cursor */ - +xb_fil_cur_result_t xb_fil_cur_read(xb_fil_cur_t *cursor, + CorruptedPages &corrupted_pages); /************************************************************************ Close the source file cursor opened with xb_fil_cur_open() and its associated read filter. */ diff --git a/extra/mariabackup/write_filt.cc b/extra/mariabackup/write_filt.cc index 75ddf9fa99e..6341d90ab0f 100644 --- a/extra/mariabackup/write_filt.cc +++ b/extra/mariabackup/write_filt.cc @@ -27,13 +27,12 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA #include "common.h" #include "write_filt.h" #include "fil_cur.h" -#include "xtrabackup.h" #include <os0proc.h> /************************************************************************ Write-through page write filter. */ static my_bool wf_wt_init(xb_write_filt_ctxt_t *ctxt, char *dst_name, - xb_fil_cur_t *cursor); + xb_fil_cur_t *cursor, CorruptedPages *corrupted_pages); static my_bool wf_wt_process(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile); xb_write_filt_t wf_write_through = { @@ -46,7 +45,7 @@ xb_write_filt_t wf_write_through = { /************************************************************************ Incremental page write filter. */ static my_bool wf_incremental_init(xb_write_filt_ctxt_t *ctxt, char *dst_name, - xb_fil_cur_t *cursor); + xb_fil_cur_t *cursor, CorruptedPages *corrupted_pages); static my_bool wf_incremental_process(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile); static my_bool wf_incremental_finalize(xb_write_filt_ctxt_t *ctxt, @@ -66,11 +65,11 @@ Initialize incremental page write filter. @return TRUE on success, FALSE on error. */ static my_bool wf_incremental_init(xb_write_filt_ctxt_t *ctxt, char *dst_name, - xb_fil_cur_t *cursor) + xb_fil_cur_t *cursor, CorruptedPages *corrupted_pages) { char meta_name[FN_REFLEN]; xb_wf_incremental_ctxt_t *cp = - &(ctxt->u.wf_incremental_ctxt); + &(ctxt->wf_incremental_ctxt); ctxt->cursor = cursor; @@ -101,7 +100,9 @@ wf_incremental_init(xb_write_filt_ctxt_t *ctxt, char *dst_name, strcat(dst_name, ".delta"); mach_write_to_4(cp->delta_buf, 0x78747261UL); /*"xtra"*/ + cp->npages = 1; + cp->corrupted_pages = corrupted_pages; return(TRUE); } @@ -118,15 +119,16 @@ wf_incremental_process(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile) byte *page; const ulint page_size = cursor->page_size.physical(); - xb_wf_incremental_ctxt_t *cp = &(ctxt->u.wf_incremental_ctxt); + xb_wf_incremental_ctxt_t *cp = &(ctxt->wf_incremental_ctxt); for (i = 0, page = cursor->buf; i < cursor->buf_npages; i++, page += page_size) { - if (incremental_lsn >= mach_read_from_8(page + FIL_PAGE_LSN)) { - + if ((!cp->corrupted_pages || + !cp->corrupted_pages->contains(cursor->node->space->id, + cursor->buf_page_no + i)) && + incremental_lsn >= mach_read_from_8(page + FIL_PAGE_LSN)) continue; - } /* updated page */ if (cp->npages == page_size / 4) { @@ -164,7 +166,7 @@ wf_incremental_finalize(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile) xb_fil_cur_t *cursor = ctxt->cursor; const ulint page_size = cursor->page_size.physical(); - xb_wf_incremental_ctxt_t *cp = &(ctxt->u.wf_incremental_ctxt); + xb_wf_incremental_ctxt_t *cp = &(ctxt->wf_incremental_ctxt); if (cp->npages != page_size / 4) { mach_write_to_4(cp->delta_buf + cp->npages * 4, 0xFFFFFFFFUL); @@ -186,7 +188,7 @@ Free the incremental page write filter's buffer. */ static void wf_incremental_deinit(xb_write_filt_ctxt_t *ctxt) { - xb_wf_incremental_ctxt_t *cp = &(ctxt->u.wf_incremental_ctxt); + xb_wf_incremental_ctxt_t *cp = &(ctxt->wf_incremental_ctxt); os_mem_free_large(cp->delta_buf, cp->delta_buf_size); } @@ -196,7 +198,7 @@ Initialize the write-through page write filter. @return TRUE on success, FALSE on error. */ static my_bool wf_wt_init(xb_write_filt_ctxt_t *ctxt, char *dst_name __attribute__((unused)), - xb_fil_cur_t *cursor) + xb_fil_cur_t *cursor, CorruptedPages *) { ctxt->cursor = cursor; diff --git a/extra/mariabackup/write_filt.h b/extra/mariabackup/write_filt.h index febf25f2a8a..6c3ef24291f 100644 --- a/extra/mariabackup/write_filt.h +++ b/extra/mariabackup/write_filt.h @@ -27,26 +27,26 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA #include "fil_cur.h" #include "datasink.h" +#include "xtrabackup.h" /* Incremental page filter context */ typedef struct { ulint delta_buf_size; byte *delta_buf; ulint npages; + CorruptedPages *corrupted_pages; } xb_wf_incremental_ctxt_t; /* Page filter context used as an opaque structure by callers */ typedef struct { xb_fil_cur_t *cursor; - union { - xb_wf_incremental_ctxt_t wf_incremental_ctxt; - } u; + xb_wf_incremental_ctxt_t wf_incremental_ctxt; } xb_write_filt_ctxt_t; typedef struct { my_bool (*init)(xb_write_filt_ctxt_t *ctxt, char *dst_name, - xb_fil_cur_t *cursor); + xb_fil_cur_t *cursor, CorruptedPages *corrupted_pages); my_bool (*process)(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile); my_bool (*finalize)(xb_write_filt_ctxt_t *, ds_file_t *dstfile); void (*deinit)(xb_write_filt_ctxt_t *); diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index ad36893353d..74605162a78 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -77,6 +77,7 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA #include <list> #include <sstream> #include <set> +#include <fstream> #include <mysql.h> #define G_PTR uchar* @@ -104,6 +105,9 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA #include <crc_glue.h> #include <log.h> #include <derror.h> +#include "backup_debug.h" + +#define MB_CORRUPTED_PAGES_FILE "innodb_corrupted_pages" int sys_var_init(); @@ -300,6 +304,7 @@ my_bool opt_noversioncheck = FALSE; my_bool opt_no_backup_locks = FALSE; my_bool opt_decompress = FALSE; my_bool opt_remove_original; +my_bool opt_log_innodb_page_corruption; my_bool opt_lock_ddl_per_table = FALSE; static my_bool opt_check_privileges; @@ -364,6 +369,208 @@ struct ddl_tracker_t { static ddl_tracker_t ddl_tracker; +// Convert non-null terminated filename to space name +std::string filename_to_spacename(const byte *filename, size_t len); + +CorruptedPages::CorruptedPages() { ut_a(!pthread_mutex_init(&m_mutex, NULL)); } + +CorruptedPages::~CorruptedPages() { ut_a(!pthread_mutex_destroy(&m_mutex)); } + +void CorruptedPages::add_page_no_lock(const char *space_name, ulint space_id, + ulint page_no, bool convert_space_name) +{ + space_info_t &space_info = m_spaces[space_id]; + if (space_info.space_name.empty()) + space_info.space_name= + convert_space_name + ? filename_to_spacename(reinterpret_cast<const byte *>(space_name), + strlen(space_name)) + : space_name; + (void)space_info.pages.insert(page_no); +} + +void CorruptedPages::add_page(const char *file_name, ulint space_id, + ulint page_no) +{ + ut_a(!pthread_mutex_lock(&m_mutex)); + add_page_no_lock(file_name, space_id, page_no, true); + ut_a(!pthread_mutex_unlock(&m_mutex)); +} + +bool CorruptedPages::contains(ulint space_id, ulint page_no) const +{ + bool result = false; + ut_a(!pthread_mutex_lock(&m_mutex)); + container_t::const_iterator space_it= m_spaces.find(space_id); + if (space_it != m_spaces.end()) + result = space_it->second.pages.count(page_no); + ut_a(!pthread_mutex_unlock(&m_mutex)); + return result; +} + +void CorruptedPages::drop_space(ulint space_id) +{ + ut_a(!pthread_mutex_lock(&m_mutex)); + m_spaces.erase(space_id); + ut_a(!pthread_mutex_unlock(&m_mutex)); +} + +void CorruptedPages::rename_space(ulint space_id, const std::string &new_name) +{ + ut_a(!pthread_mutex_lock(&m_mutex)); + container_t::iterator space_it = m_spaces.find(space_id); + if (space_it != m_spaces.end()) + space_it->second.space_name = new_name; + ut_a(!pthread_mutex_unlock(&m_mutex)); +} + +bool CorruptedPages::print_to_file(const char *filename) const +{ + std::ostringstream out; + ut_a(!pthread_mutex_lock(&m_mutex)); + if (!m_spaces.size()) + { + ut_a(!pthread_mutex_unlock(&m_mutex)); + return true; + } + for (container_t::const_iterator space_it= + m_spaces.begin(); + space_it != m_spaces.end(); ++space_it) + { + out << space_it->second.space_name << " " << space_it->first << "\n"; + bool first_page_no= true; + for (std::set<ulint>::const_iterator page_it= + space_it->second.pages.begin(); + page_it != space_it->second.pages.end(); ++page_it) + if (first_page_no) + { + out << *page_it; + first_page_no= false; + } + else + out << " " << *page_it; + out << "\n"; + } + ut_a(!pthread_mutex_unlock(&m_mutex)); + if (xtrabackup_backup) + return backup_file_print_buf(filename, out.str().c_str(), + out.str().size()); + std::ofstream outfile; + outfile.open(filename); + if (!outfile.is_open()) + die("Can't open %s, error number: %d, error message: %s", filename, errno, + strerror(errno)); + outfile << out.str(); + return true; +} + +void CorruptedPages::read_from_file(const char *file_name) +{ + MY_STAT mystat; + if (!my_stat(file_name, &mystat, MYF(0))) + return; + std::ifstream infile; + infile.open(file_name); + if (!infile.is_open()) + die("Can't open %s, error number: %d, error message: %s", file_name, errno, + strerror(errno)); + std::string line; + std::string space_name; + ulint space_id; + ulint line_number= 0; + while (std::getline(infile, line)) + { + ++line_number; + std::istringstream iss(line); + if (line_number & 1) { + if (!(iss >> space_name)) + die("Can't parse space name from corrupted pages file at " + "line " ULINTPF, + line_number); + if (!(iss >> space_id)) + die("Can't parse space id from corrupted pages file at line " ULINTPF, + line_number); + } + else + { + ulint page_no; + while ((iss >> page_no)) + add_page_no_lock(space_name.c_str(), space_id, page_no, false); + if (!iss.eof()) + die("Corrupted pages file parse error on line number " ULINTPF, + line_number); + } + } +} + +bool CorruptedPages::empty() const +{ + ut_a(!pthread_mutex_lock(&m_mutex)); + bool result= !m_spaces.size(); + ut_a(!pthread_mutex_unlock(&m_mutex)); + return result; +} + +static void xb_load_single_table_tablespace(const std::string &space_name, + bool set_size); +static void xb_data_files_close(); +static fil_space_t* fil_space_get_by_name(const char* name); + +void CorruptedPages::zero_out_free_pages() +{ + container_t non_free_pages; + byte* buf= static_cast<byte*>(ut_malloc_nokey(2 * srv_page_size)); + byte* zero_page = static_cast<byte*>(ut_align(buf, srv_page_size)); + memset(zero_page, 0, srv_page_size); + + ut_a(!pthread_mutex_lock(&m_mutex)); + for (container_t::const_iterator space_it= m_spaces.begin(); + space_it != m_spaces.end(); ++space_it) + { + ulint space_id = space_it->first; + const std::string &space_name = space_it->second.space_name; + // There is no need to close tablespaces explixitly as they will be closed + // in innodb_shutdown(). + xb_load_single_table_tablespace(space_name, false); + mutex_enter(&fil_system.mutex); + fil_space_t *space = fil_space_get_by_name(space_name.c_str()); + mutex_exit(&fil_system.mutex); + if (!space) + die("Can't find space object for space name %s to check corrupted page", + space_name.c_str()); + for (std::set<ulint>::const_iterator page_it= + space_it->second.pages.begin(); + page_it != space_it->second.pages.end(); ++page_it) + { + bool is_free= fseg_page_is_free(space, *page_it); + if (!is_free) { + space_info_t &space_info = non_free_pages[space_id]; + space_info.pages.insert(*page_it); + if (space_info.space_name.empty()) + space_info.space_name = space_name; + msg("Error: corrupted page " ULINTPF + " of tablespace %s can not be fixed", + *page_it, space_name.c_str()); + } + else + { + const page_id_t page_id(space->id, *page_it); + dberr_t err= fil_io(IORequestWrite, true, page_id, univ_page_size, 0, + univ_page_size.physical(), zero_page, NULL); + if (err != DB_SUCCESS) + die("Can't zero out corrupted page " ULINTPF " of tablespace %s", + *page_it, space_name.c_str()); + msg("Corrupted page " ULINTPF + " of tablespace %s was successfuly fixed.", + *page_it, space_name.c_str()); + } + } + } + m_spaces.swap(non_free_pages); + ut_a(!pthread_mutex_unlock(&m_mutex)); + ut_free(buf); +} + /* Simple datasink creation tracking...add datasinks in the reverse order you want them destroyed. */ #define XTRABACKUP_MAX_DATASINKS 10 @@ -377,11 +584,12 @@ xtrabackup_add_datasink(ds_ctxt_t *ds) datasinks[actual_datasinks] = ds; actual_datasinks++; } - -typedef void (*process_single_tablespace_func_t)(const char *dirname, const char *filname, bool is_remote); +typedef void (*process_single_tablespace_func_t)(const char *dirname, + const char *filname, + bool is_remote, + bool set_size); static dberr_t enumerate_ibd_files(process_single_tablespace_func_t callback); - /* ======== Datafiles iterator ======== */ struct datafiles_iter_t { fil_space_t *space; @@ -731,6 +939,7 @@ typedef struct { uint *count; pthread_mutex_t* count_mutex; os_thread_id_t id; + CorruptedPages *corrupted_pages; } data_thread_ctxt_t; /* ======== for option and variables ======== */ @@ -833,7 +1042,8 @@ enum options_xtrabackup OPT_LOCK_DDL_PER_TABLE, OPT_ROCKSDB_DATADIR, OPT_BACKUP_ROCKSDB, - OPT_XTRA_CHECK_PRIVILEGES + OPT_XTRA_CHECK_PRIVILEGES, + OPT_XB_IGNORE_INNODB_PAGE_CORRUPTION }; struct my_option xb_client_options[]= { @@ -1230,6 +1440,17 @@ struct my_option xb_client_options[]= { " uses old (pre-4.1.1) protocol.", &opt_secure_auth, &opt_secure_auth, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, + + {"log-innodb-page-corruption", OPT_XB_IGNORE_INNODB_PAGE_CORRUPTION, + "Continue backup if innodb corrupted pages are found. The pages are " + "logged in " MB_CORRUPTED_PAGES_FILE + " and backup is finished with error. " + "--prepare will try to fix corrupted pages. If " MB_CORRUPTED_PAGES_FILE + " exists after --prepare in base backup directory, backup still contains " + "corrupted pages and can not be considered as consistent.", + &opt_log_innodb_page_corruption, &opt_log_innodb_page_corruption, 0, + GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + #define MYSQL_CLIENT #include "sslopt-longopts.h" #undef MYSQL_CLIENT @@ -1512,7 +1733,8 @@ debug_sync_point(const char *name) static std::set<std::string> tables_for_export; -static void append_export_table(const char *dbname, const char *tablename, bool is_remote) +static void append_export_table(const char *dbname, const char *tablename, + bool is_remote, bool set_size) { if(dbname && tablename && !is_remote) { @@ -2542,7 +2764,8 @@ for full backup, pages filter for incremental backup, etc. @return FALSE on success and TRUE on error */ static my_bool xtrabackup_copy_datafile(fil_node_t *node, uint thread_n, const char *dest_name, - const xb_write_filt_t &write_filter) + const xb_write_filt_t &write_filter, + CorruptedPages &corrupted_pages) { char dst_name[FN_REFLEN]; ds_file_t *dstfile = NULL; @@ -2607,7 +2830,8 @@ static my_bool xtrabackup_copy_datafile(fil_node_t *node, uint thread_n, ut_a(write_filter.process != NULL); if (write_filter.init != NULL && - !write_filter.init(&write_filt_ctxt, dst_name, &cursor)) { + !write_filter.init(&write_filt_ctxt, dst_name, &cursor, + opt_log_innodb_page_corruption ? &corrupted_pages : NULL)) { msg (thread_n, "mariabackup: error: failed to initialize page write filter."); goto error; } @@ -2627,7 +2851,8 @@ static my_bool xtrabackup_copy_datafile(fil_node_t *node, uint thread_n, } /* The main copy loop */ - while ((res = xb_fil_cur_read(&cursor)) == XB_FIL_CUR_SUCCESS) { + while ((res = xb_fil_cur_read(&cursor, corrupted_pages)) == + XB_FIL_CUR_SUCCESS) { if (!write_filter.process(&write_filt_ctxt, dstfile)) { goto error; } @@ -2912,6 +3137,21 @@ static os_thread_ret_t DECLARE_THREAD(io_watching_thread)(void*) } #ifndef DBUG_OFF +char *dbug_mariabackup_get_val(const char *event, const char *key) +{ + char envvar[FN_REFLEN]; + if (key) { + snprintf(envvar, sizeof(envvar), "%s_%s", event, key); + char *slash = strchr(envvar, '/'); + if (slash) + *slash = '_'; + } else { + strncpy(envvar, event, sizeof envvar - 1); + envvar[sizeof envvar - 1] = '\0'; + } + return getenv(envvar); +} + /* In debug mode, execute SQL statement that was passed via environment. To use this facility, you need to @@ -2924,35 +3164,15 @@ To use this facility, you need to for the variable) 3. start mariabackup with --dbug=+d,debug_mariabackup_events */ -static void dbug_mariabackup_event(const char *event,const char *key) +void dbug_mariabackup_event(const char *event,const char *key) { - char envvar[FN_REFLEN]; - if (key) { - snprintf(envvar, sizeof(envvar), "%s_%s", event, key); - char *slash = strchr(envvar, '/'); - if (slash) - *slash = '_'; - } else { - strncpy(envvar, event, sizeof envvar - 1); - envvar[sizeof envvar - 1] = '\0'; - } - char *sql = getenv(envvar); - if (sql) { + char *sql = dbug_mariabackup_get_val(event, key); + if (sql && *sql) { msg("dbug_mariabackup_event : executing '%s'", sql); xb_mysql_query(mysql_connection, sql, false, true); } - } -#define DBUG_MARIABACKUP_EVENT(A, B) DBUG_EXECUTE_IF("mariabackup_events", dbug_mariabackup_event(A,B);); -#define DBUG_MB_INJECT_CODE(EVENT, KEY, CODE) \ - DBUG_EXECUTE_IF("mariabackup_inject_code", {\ - char *env = getenv(EVENT); \ - if (env && !strcmp(env, KEY)) { CODE } \ - }) -#else -#define DBUG_MARIABACKUP_EVENT(A,B) -#define DBUG_MB_INJECT_CODE(EVENT, KEY, CODE) -#endif +#endif // DBUG_OFF /************************************************************************** Datafiles copying thread.*/ @@ -2965,6 +3185,7 @@ DECLARE_THREAD(data_copy_thread_func)( data_thread_ctxt_t *ctxt = (data_thread_ctxt_t *) arg; uint num = ctxt->num; fil_node_t* node; + ut_ad(ctxt->corrupted_pages); /* Initialize mysys thread-specific memory so we can @@ -2976,11 +3197,12 @@ DECLARE_THREAD(data_copy_thread_func)( while ((node = datafiles_iter_next(ctxt->it)) != NULL) { DBUG_MARIABACKUP_EVENT("before_copy", node->space->name); - DBUG_MB_INJECT_CODE("wait_innodb_redo_before_copy", node->space->name, + DBUG_EXECUTE_FOR_KEY("wait_innodb_redo_before_copy", node->space->name, backup_wait_for_lsn(get_current_lsn(mysql_connection));); /* copy the datafile */ if (xtrabackup_copy_datafile(node, num, NULL, - xtrabackup_incremental ? wf_incremental : wf_write_through)) + xtrabackup_incremental ? wf_incremental : wf_write_through, + *ctxt->corrupted_pages)) die("failed to copy datafile."); DBUG_MARIABACKUP_EVENT("after_copy", node->space->name); @@ -3115,15 +3337,22 @@ xb_new_datafile(const char *name, bool is_remote) } -static -void -xb_load_single_table_tablespace( - const char *dirname, - const char *filname, - bool is_remote) +/** Load tablespace. + +@param[in] dirname directory name of the tablespace to open +@param[in] filname file name of the tablespece to open +@param[in] is_remote true if tablespace file is .isl +@param[in] set_size true if we need to set tablespace size in pages explixitly. +If this parameter is set, the size and free pages limit will not be read +from page 0. +*/ +static void xb_load_single_table_tablespace(const char *dirname, + const char *filname, + bool is_remote, bool set_size) { ut_ad(srv_operation == SRV_OPERATION_BACKUP - || srv_operation == SRV_OPERATION_RESTORE_DELTA); + || srv_operation == SRV_OPERATION_RESTORE_DELTA + || srv_operation == SRV_OPERATION_RESTORE); /* Ignore .isl files on XtraBackup recovery. All tablespaces must be local. */ if (is_remote && srv_operation == SRV_OPERATION_RESTORE_DELTA) { @@ -3171,13 +3400,12 @@ xb_load_single_table_tablespace( bool is_empty_file = file->exists() && file->is_empty_file(); if (err == DB_SUCCESS && file->space_id() != SRV_TMP_SPACE_ID) { - os_offset_t node_size = os_file_get_size(file->handle()); - os_offset_t n_pages; - - ut_a(node_size != (os_offset_t) -1); - - n_pages = node_size / page_size_t(file->flags()).physical(); - + os_offset_t n_pages = 0; + if (set_size) { + os_offset_t node_size = os_file_get_size(file->handle()); + ut_a(node_size != (os_offset_t) -1); + n_pages = node_size / page_size_t(file->flags()).physical(); + } space = fil_space_create( name, file->space_id(), file->flags(), FIL_TYPE_TABLESPACE, NULL/* TODO: crypt_data */); @@ -3205,6 +3433,27 @@ xb_load_single_table_tablespace( ut_free(name); } +static void xb_load_single_table_tablespace(const std::string &space_name, + bool set_size) +{ + std::string name(space_name); + bool is_remote= access((name + ".ibd").c_str(), R_OK) != 0; + const char *extension= is_remote ? ".isl" : ".ibd"; + name.append(extension); + char buf[FN_REFLEN]; + strncpy(buf, name.c_str(), sizeof buf - 1); + buf[sizeof buf - 1]= '\0'; + const char *dbname= buf; + char *p= strchr(buf, '/'); + if (p == 0) + die("Unexpected tablespace %s filename %s", space_name.c_str(), + name.c_str()); + ut_a(p); + *p= 0; + const char *tablename= p + 1; + xb_load_single_table_tablespace(dbname, tablename, is_remote, set_size); +} + /** Scan the database directories under the MySQL datadir, looking for .ibd files and determining the space id in each of them. @return DB_SUCCESS or error number */ @@ -3246,7 +3495,7 @@ static dberr_t enumerate_ibd_files(process_single_tablespace_func_t callback) bool is_ibd = !is_isl && ends_with(dbinfo.name,".ibd"); if (is_isl || is_ibd) { - (*callback)(NULL, dbinfo.name, is_isl); + (*callback)(NULL, dbinfo.name, is_isl, false); } } @@ -3303,7 +3552,7 @@ static dberr_t enumerate_ibd_files(process_single_tablespace_func_t callback) if (strlen(fileinfo.name) > 4) { bool is_isl= false; if (ends_with(fileinfo.name, ".ibd") || ((is_isl = ends_with(fileinfo.name, ".isl")))) - (*callback)(dbinfo.name, fileinfo.name, is_isl); + (*callback)(dbinfo.name, fileinfo.name, is_isl, false); } } @@ -4062,6 +4311,7 @@ static bool xtrabackup_backup_func() uint i; uint count; pthread_mutex_t count_mutex; + CorruptedPages corrupted_pages; data_thread_ctxt_t *data_threads; pthread_mutex_init(&backup_mutex, NULL); pthread_cond_init(&scanned_lsn_cond, NULL); @@ -4347,6 +4597,7 @@ fail_before_log_copying_thread_start: data_threads[i].num = i+1; data_threads[i].count = &count; data_threads[i].count_mutex = &count_mutex; + data_threads[i].corrupted_pages = &corrupted_pages; os_thread_create(data_copy_thread_func, data_threads + i, &data_threads[i].id); } @@ -4367,7 +4618,7 @@ fail_before_log_copying_thread_start: datafiles_iter_free(it); } - bool ok = backup_start(); + bool ok = backup_start(corrupted_pages); if (ok) { ok = xtrabackup_backup_low(); @@ -4384,6 +4635,9 @@ fail_before_log_copying_thread_start: } } + if (opt_log_innodb_page_corruption) + ok = corrupted_pages.print_to_file(MB_CORRUPTED_PAGES_FILE); + if (!ok) { goto fail; } @@ -4411,7 +4665,13 @@ fail_before_log_copying_thread_start: log_file_op = NULL; pthread_mutex_destroy(&backup_mutex); pthread_cond_destroy(&scanned_lsn_cond); - return(true); + if (opt_log_innodb_page_corruption && !corrupted_pages.empty()) { + msg("Error: corrupted innodb pages are found and logged to " + MB_CORRUPTED_PAGES_FILE " file"); + return false; + } + else + return(true); } @@ -4433,7 +4693,7 @@ FTWRL. This ensures consistent backup in presence of DDL. It is the responsibility of the prepare phase to deal with .new, .ren, and .del files. */ -void backup_fix_ddl(void) +void backup_fix_ddl(CorruptedPages &corrupted_pages) { std::set<std::string> new_tables; std::set<std::string> dropped_tables; @@ -4456,6 +4716,7 @@ void backup_fix_ddl(void) if (ddl_tracker.drops.find(id) != ddl_tracker.drops.end()) { dropped_tables.insert(name); + corrupted_pages.drop_space(id); continue; } @@ -4476,15 +4737,21 @@ void backup_fix_ddl(void) /* table was renamed, but we need a full copy of it because of optimized DDL. We emulate a drop/create.*/ dropped_tables.insert(name); + if (opt_log_innodb_page_corruption) + corrupted_pages.drop_space(id); new_tables.insert(new_name); } else { /* Renamed, and no optimized DDL*/ renamed_tables[name] = new_name; + if (opt_log_innodb_page_corruption) + corrupted_pages.rename_space(id, new_name); } } else if (has_optimized_ddl) { /* Table was recreated, or optimized DDL ran. In both cases we need a full copy in the backup.*/ new_tables.insert(name); + if (opt_log_innodb_page_corruption) + corrupted_pages.drop_space(id); } } @@ -4504,6 +4771,8 @@ void backup_fix_ddl(void) if (ddl_tracker.drops.find(id) == ddl_tracker.drops.end()) { dropped_tables.erase(name); new_tables.insert(name); + if (opt_log_innodb_page_corruption) + corrupted_pages.drop_space(id); } } @@ -4550,23 +4819,7 @@ void backup_fix_ddl(void) const char *space_name = iter->c_str(); if (check_if_skip_table(space_name)) continue; - std::string name(*iter); - bool is_remote = access((name + ".ibd").c_str(), R_OK) != 0; - const char *extension = is_remote ? ".isl" : ".ibd"; - name.append(extension); - char buf[FN_REFLEN]; - strncpy(buf, name.c_str(), sizeof buf - 1); - buf[sizeof buf - 1] = '\0'; - const char *dbname = buf; - char *p = strchr(buf, '/'); - if (p == 0) { - msg("Unexpected tablespace %s filename %s", space_name, name.c_str()); - ut_a(0); - } - ut_a(p); - *p = 0; - const char *tablename = p + 1; - xb_load_single_table_tablespace(dbname, tablename, is_remote); + xb_load_single_table_tablespace(*iter, false); } it = datafiles_iter_new(); @@ -4579,7 +4832,8 @@ void backup_fix_ddl(void) continue; std::string dest_name(node->space->name); dest_name.append(".new"); - xtrabackup_copy_datafile(node, 0, dest_name.c_str(), wf_write_through); + xtrabackup_copy_datafile(node, 0, dest_name.c_str(), wf_write_through, + corrupted_pages); } datafiles_iter_free(it); @@ -5492,6 +5746,7 @@ static ibool prepare_handle_del_files(const char *datadir, const char *db, const @return whether the operation succeeded */ static bool xtrabackup_prepare_func(char** argv) { + CorruptedPages corrupted_pages; char metadata_path[FN_REFLEN]; /* cd to target-dir */ @@ -5664,6 +5919,30 @@ static bool xtrabackup_prepare_func(char** argv) goto error_cleanup; } + corrupted_pages.read_from_file(MB_CORRUPTED_PAGES_FILE); + if (xtrabackup_incremental) + { + char inc_filename[FN_REFLEN]; + sprintf(inc_filename, "%s/%s", xtrabackup_incremental_dir, + MB_CORRUPTED_PAGES_FILE); + corrupted_pages.read_from_file(inc_filename); + } + if (!corrupted_pages.empty()) + corrupted_pages.zero_out_free_pages(); + if (corrupted_pages.empty()) + { + if (!xtrabackup_incremental && unlink(MB_CORRUPTED_PAGES_FILE) && + errno != ENOENT) + { + char errbuf[MYSYS_STRERROR_SIZE]; + my_strerror(errbuf, sizeof(errbuf), errno); + die("Error: unlink %s failed: %s", MB_CORRUPTED_PAGES_FILE, + errbuf); + } + } + else + corrupted_pages.print_to_file(MB_CORRUPTED_PAGES_FILE); + if (xtrabackup_rollback_xa) { /* Please do not merge MDEV-21168 fix in 10.5+ */ @@ -5775,7 +6054,7 @@ static bool xtrabackup_prepare_func(char** argv) error_cleanup: xb_filters_free(); - return ok && !ib::error::was_logged(); + return ok && !ib::error::was_logged() && corrupted_pages.empty(); } /************************************************************************** diff --git a/extra/mariabackup/xtrabackup.h b/extra/mariabackup/xtrabackup.h index 2dbdd442f95..827def4be5b 100644 --- a/extra/mariabackup/xtrabackup.h +++ b/extra/mariabackup/xtrabackup.h @@ -25,6 +25,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA #include "datasink.h" #include "xbstream.h" #include "changed_page_bitmap.h" +#include <set> struct xb_delta_info_t { @@ -35,6 +36,32 @@ struct xb_delta_info_t ulint space_id; }; +class CorruptedPages +{ +public: + CorruptedPages(); + ~CorruptedPages(); + void add_page(const char *file_name, ulint space_id, ulint page_no); + bool contains(ulint space_id, ulint page_no) const; + void drop_space(ulint space_id); + void rename_space(ulint space_id, const std::string &new_name); + bool print_to_file(const char *file_name) const; + void read_from_file(const char *file_name); + bool empty() const; + void zero_out_free_pages(); + +private: + void add_page_no_lock(const char *space_name, ulint space_id, ulint page_no, + bool convert_space_name); + struct space_info_t { + std::string space_name; + std::set<ulint> pages; + }; + typedef std::map<ulint, space_info_t> container_t; + mutable pthread_mutex_t m_mutex; + container_t m_spaces; +}; + /* value of the --incremental option */ extern lsn_t incremental_lsn; @@ -110,6 +137,7 @@ extern my_bool opt_remove_original; extern my_bool opt_extended_validation; extern my_bool opt_encrypted_backup; extern my_bool opt_lock_ddl_per_table; +extern my_bool opt_log_innodb_page_corruption; extern char *opt_incremental_history_name; extern char *opt_incremental_history_uuid; diff --git a/mysql-test/lib/mtr_report.pm b/mysql-test/lib/mtr_report.pm index 32250802815..473b21441e2 100644 --- a/mysql-test/lib/mtr_report.pm +++ b/mysql-test/lib/mtr_report.pm @@ -514,6 +514,10 @@ sub mtr_report_stats ($$$$) { # if a test case has to be retried it should have the result MTR_RES_FAILED in jUnit XML if ($test->{'result'} eq "MTR_RES_FAILED" || $test->{'retries'} > 0) { my $logcontents = $test->{'logfile-failed'} || $test->{'logfile'}; + # remove any double ] that would end the cdata + $logcontents =~ s/]]/\x{fffd}/g; + # replace wide characters that aren't allowed in XML 1.0 + $logcontents =~ s/[\x00-\x08\x0B\x0C\x0E-\x1F]/\x{fffd}/g; $xml_report .= qq(>\n\t\t\t<failure message="" type="MTR_RES_FAILED">\n<![CDATA[$logcontents]]>\n\t\t\t</failure>\n\t\t</testcase>\n); } elsif ($test->{'result'} eq "MTR_RES_SKIPPED" && $test->{'disable'}) { @@ -530,9 +534,9 @@ sub mtr_report_stats ($$$$) { # save to file my $xml_file = $::opt_xml_report; - open XML_FILE, ">", $xml_file or die "Cannot create file $xml_file: $!"; - print XML_FILE $xml_report; - close XML_FILE; + open (my $XML_UFILE, '>:encoding(UTF-8)', $xml_file) or die 'Cannot create file $xml_file: $!'; + print $XML_UFILE $xml_report; + close $XML_UFILE or warn "File close failed!"; } if (@$extra_warnings) diff --git a/mysql-test/main/derived_cond_pushdown.result b/mysql-test/main/derived_cond_pushdown.result index 7de4253593f..1be7897d3af 100644 --- a/mysql-test/main/derived_cond_pushdown.result +++ b/mysql-test/main/derived_cond_pushdown.result @@ -10610,6 +10610,47 @@ a abc DROP VIEW v1; DROP TABLE t1; +# +# MDEV-19179: pushdown into UNION of aggregation selects whose +# corresponding columns have different names +# +create table t1 (a int); +insert into t1 values (3), (7), (1); +select * +from (select min(a) as x from t1 union all select max(a) as y from t1) t +where x>0; +x +1 +7 +explain extended select * +from (select min(a) as x from t1 union all select max(a) as y from t1) t +where x>0; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY <derived2> ALL NULL NULL NULL NULL 6 100.00 Using where +2 DERIVED t1 ALL NULL NULL NULL NULL 3 100.00 +3 UNION t1 ALL NULL NULL NULL NULL 3 100.00 +Warnings: +Note 1003 /* select#1 */ select `t`.`x` AS `x` from (/* select#2 */ select min(`test`.`t1`.`a`) AS `x` from `test`.`t1` having `x` > 0 union all /* select#3 */ select max(`test`.`t1`.`a`) AS `x` from `test`.`t1` having `x` > 0) `t` where `t`.`x` > 0 +prepare stmt from "select * +from (select min(a) as x from t1 union all select max(a) as y from t1) t +where x>0"; +execute stmt; +x +1 +7 +execute stmt; +x +1 +7 +deallocate prepare stmt; +create view v1(m) as +select min(a) as x from t1 union all select max(a) as y from t1; +select * from v1 where m > 0; +m +1 +7 +drop view v1; +drop table t1; # End of 10.2 tests # # MDEV-14579: pushdown conditions into materialized views/derived tables diff --git a/mysql-test/main/derived_cond_pushdown.test b/mysql-test/main/derived_cond_pushdown.test index c98330da07d..1b980fd3685 100644 --- a/mysql-test/main/derived_cond_pushdown.test +++ b/mysql-test/main/derived_cond_pushdown.test @@ -2185,6 +2185,34 @@ SELECT * FROM v1 WHERE IF( a REGEXP 'def', 'foo', a ) IN ('abc', 'foobar'); DROP VIEW v1; DROP TABLE t1; +--echo # +--echo # MDEV-19179: pushdown into UNION of aggregation selects whose +--echo # corresponding columns have different names +--echo # + +create table t1 (a int); +insert into t1 values (3), (7), (1); + +let $q= +select * +from (select min(a) as x from t1 union all select max(a) as y from t1) t +where x>0; + +eval $q; +eval explain extended $q; + +eval prepare stmt from "$q"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +create view v1(m) as +select min(a) as x from t1 union all select max(a) as y from t1; +select * from v1 where m > 0; + +drop view v1; +drop table t1; + --echo # End of 10.2 tests --echo # diff --git a/mysql-test/main/lock_view.test b/mysql-test/main/lock_view.test index dd8809ab89d..4b1adac5be1 100644 --- a/mysql-test/main/lock_view.test +++ b/mysql-test/main/lock_view.test @@ -1,4 +1,5 @@ source include/not_embedded.inc; +source include/have_perfschema.inc; # # LOCK TABLES and privileges on views # diff --git a/mysql-test/main/mysqldump-system.test b/mysql-test/main/mysqldump-system.test index 1fc0a45b3dc..3efe8376e18 100644 --- a/mysql-test/main/mysqldump-system.test +++ b/mysql-test/main/mysqldump-system.test @@ -3,6 +3,10 @@ --source include/have_udf.inc --source include/platform.inc +if (!$AUTH_SOCKET_SO) { + --skip Need auth socket plugin +} + --echo # --echo # MDEV-23630: mysqldump to logically dump system tables --echo # @@ -18,6 +22,7 @@ create user USER; if (`SELECT CONVERT(@@VERSION_COMPILE_OS USING latin1) NOT IN ('Win32', 'Win64', 'Windows')`) { +--error 0,ER_PLUGIN_INSTALLED --eval install plugin /*M!100401 IF NOT EXISTS */ unix_socket soname '$AUTH_SOCKET_SO'; alter user USER identified via unix_socket; } diff --git a/mysql-test/main/sp.result b/mysql-test/main/sp.result index 97fd48bc208..5ad68afb5ad 100644 --- a/mysql-test/main/sp.result +++ b/mysql-test/main/sp.result @@ -8469,8 +8469,25 @@ ERROR 22007: Incorrect integer value: 'y' for column ``.``.`a` at row 1 DROP TABLE t1; SET sql_mode=DEFAULT; # -# Start of 10.3 tests +# MDEV-24220: error when opening a table for the second call of SP # +CREATE TABLE t1 (a INT, b INT); +INSERT INTO t1 VALUES (1,1),(2,2); +CREATE VIEW v1 AS SELECT MAX(a) as f FROM t1; +CREATE PROCEDURE p1() +BEGIN +SELECT * FROM v1; +END $ +CALL p1; +f +2 +ALTER TABLE t1 DROP a; +CALL p1; +ERROR HY000: View 'test.v1' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them +DROP PROCEDURE p1; +DROP VIEW v1; +DROP TABLE t1; +#End of 10.2 tests # # MDEV-12007 Allow ROW variables as a cursor FETCH target # diff --git a/mysql-test/main/sp.test b/mysql-test/main/sp.test index 8cc1c352c4a..ed4d8d63524 100644 --- a/mysql-test/main/sp.test +++ b/mysql-test/main/sp.test @@ -10004,9 +10004,30 @@ DROP TABLE t1; SET sql_mode=DEFAULT; --echo # ---echo # Start of 10.3 tests +--echo # MDEV-24220: error when opening a table for the second call of SP --echo # +CREATE TABLE t1 (a INT, b INT); +INSERT INTO t1 VALUES (1,1),(2,2); +CREATE VIEW v1 AS SELECT MAX(a) as f FROM t1; +--delimiter $ +CREATE PROCEDURE p1() +BEGIN + SELECT * FROM v1; +END $ +--delimiter ; + +CALL p1; +ALTER TABLE t1 DROP a; +-- error ER_VIEW_INVALID +CALL p1; + +DROP PROCEDURE p1; +DROP VIEW v1; +DROP TABLE t1; + +--echo #End of 10.2 tests + --echo # --echo # MDEV-12007 Allow ROW variables as a cursor FETCH target --echo # diff --git a/mysql-test/main/xa.result b/mysql-test/main/xa.result index f37a3c36531..f02cb17b6ac 100644 --- a/mysql-test/main/xa.result +++ b/mysql-test/main/xa.result @@ -294,7 +294,7 @@ DROP TABLE t1; # # Bug#12352846 - TRANS_XA_START(THD*): # ASSERTION THD->TRANSACTION.XID_STATE.XID.IS_NULL() -# FAILED +# FAILED # CREATE TABLE t1 (a INT) ENGINE=InnoDB; CREATE TABLE t2 (a INT) ENGINE=InnoDB; @@ -345,6 +345,32 @@ connection default; XA END 'xid1'; XA ROLLBACK 'xid1'; DROP TABLE t1, t2, t3; +# +# MDEV 15532 XA: Assertion `!log->same_pk' failed in +# row_log_table_apply_delete +# +CREATE TABLE t1 (a INT) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1),(2); +connect con1,localhost,root,,test; +XA START 'xid'; +UPDATE t1 SET a = 5; +connection default; +SET innodb_lock_wait_timeout= 2, lock_wait_timeout= 2; +ALTER TABLE non_existing_table1; +ERROR 42S02: Table 'test.non_existing_table1' doesn't exist +ALTER TABLE t1 FORCE;; +connection con1; +ALTER TABLE non_existing_table2; +ERROR XAE07: XAER_RMFAIL: The command cannot be executed when global transaction is in the ACTIVE state +DELETE FROM t1 LIMIT 1; +connection default; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +connection con1; +XA END 'xid'; +XA ROLLBACK 'xid'; +DROP TABLE t1; +disconnect con1; +connection default; XA BEGIN 'xid'; CREATE TEMPORARY SEQUENCE s; ERROR XAE07: XAER_RMFAIL: The command cannot be executed when global transaction is in the ACTIVE state diff --git a/mysql-test/main/xa.test b/mysql-test/main/xa.test index ce8f3834b03..9b0b54d0405 100644 --- a/mysql-test/main/xa.test +++ b/mysql-test/main/xa.test @@ -390,7 +390,7 @@ DROP TABLE t1; --echo # --echo # Bug#12352846 - TRANS_XA_START(THD*): --echo # ASSERTION THD->TRANSACTION.XID_STATE.XID.IS_NULL() ---echo # FAILED +--echo # FAILED --echo # CREATE TABLE t1 (a INT) ENGINE=InnoDB; @@ -447,7 +447,7 @@ CREATE TABLE t1 (pk INT PRIMARY KEY) ENGINE=InnoDB; CREATE TABLE t2 (pk INT PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t2 VALUES (1),(2); CREATE TABLE t3 (i INT) ENGINE=InnoDB; - + XA BEGIN 'xid1'; REPLACE INTO t1 SELECT * FROM t2; @@ -476,6 +476,45 @@ XA END 'xid1'; XA ROLLBACK 'xid1'; DROP TABLE t1, t2, t3; +--echo # +--echo # MDEV 15532 XA: Assertion `!log->same_pk' failed in +--echo # row_log_table_apply_delete +--echo # + +CREATE TABLE t1 (a INT) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1),(2); + +--connect (con1,localhost,root,,test) + +XA START 'xid'; +UPDATE t1 SET a = 5; + +--connection default +SET innodb_lock_wait_timeout= 2, lock_wait_timeout= 2; + +--error ER_NO_SUCH_TABLE +ALTER TABLE non_existing_table1; + +--send ALTER TABLE t1 FORCE; + +--connection con1 +--error ER_XAER_RMFAIL + +ALTER TABLE non_existing_table2; +DELETE FROM t1 LIMIT 1; + +--connection default +--error ER_LOCK_WAIT_TIMEOUT +--reap + +# Cleanup +--connection con1 +XA END 'xid'; +XA ROLLBACK 'xid'; +DROP TABLE t1; +--disconnect con1 +connection default; + --source include/wait_until_count_sessions.inc # diff --git a/mysql-test/suite/galera/r/galera_as_slave_replay.result b/mysql-test/suite/galera/r/galera_as_slave_replay.result new file mode 100644 index 00000000000..760617be5f7 --- /dev/null +++ b/mysql-test/suite/galera/r/galera_as_slave_replay.result @@ -0,0 +1,95 @@ +connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2; +connection node_2a; +connection node_1; +RESET MASTER; +connection node_2a; +START SLAVE; +connection node_1; +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)) engine=innodb; +INSERT INTO t1 VALUES (1, 'a'); +INSERT INTO t1 VALUES (3, 'a'); +set binlog_format=STATEMENT; +SET AUTOCOMMIT=ON; +START TRANSACTION; +SELECT * FROM t1 FOR UPDATE; +f1 f2 +1 a +3 a +UPDATE t1 SET f2 = 'c' WHERE f1 > 1; +connection node_2a; +SET SESSION wsrep_sync_wait = 0; +connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3; +connection node_3; +SET SESSION wsrep_sync_wait = 0; +connection node_2a; +SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_enter_sync'; +SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb"; +connection node_3; +INSERT INTO test.t1 VALUES (2, 'b'); +connection node_1; +COMMIT; +connection node_2a; +SET SESSION wsrep_on = 0; +SET SESSION wsrep_on = 1; +SET GLOBAL debug_dbug = ""; +SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb"; +connection node_2a; +SET GLOBAL wsrep_provider_options = 'dbug='; +SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_enter_sync'; +connection node_1; +SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'a'; +COUNT(*) = 1 +1 +SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c'; +COUNT(*) = 1 +1 +SELECT * FROM t1; +f1 f2 +1 a +3 c +connection node_2a; +set session wsrep_sync_wait=15; +set session wsrep_sync_wait=0; +wsrep_local_replays +1 +SELECT * FROM t1; +f1 f2 +1 a +2 b +3 c +SET DEBUG_SYNC = "RESET"; +# +# test phase with real abort +# +connection node_1; +set binlog_format=ROW; +insert into t1 values (4, 'd'); +SET AUTOCOMMIT=ON; +START TRANSACTION; +UPDATE t1 SET f2 = 'd' WHERE f1 = 3; +connection node_2a; +SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_enter_sync'; +SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb"; +connection node_3; +UPDATE test.t1 SET f2 = 'e' WHERE f1 = 3; +connection node_1; +COMMIT; +connection node_2a; +SET GLOBAL debug_dbug = ""; +SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb"; +connection node_2a; +SET GLOBAL wsrep_provider_options = 'dbug='; +SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_enter_sync'; +SET DEBUG_SYNC = "RESET"; +connection node_2a; +set session wsrep_sync_wait=15; +SELECT COUNT(*) = 1 FROM test.t1 WHERE f2 = 'e'; +COUNT(*) = 1 +1 +set session wsrep_sync_wait=0; +STOP SLAVE; +RESET SLAVE; +DROP TABLE t1; +connection node_1; +DROP TABLE t1; +RESET MASTER; diff --git a/mysql-test/suite/galera/r/galera_toi_alter_auto_increment.result b/mysql-test/suite/galera/r/galera_toi_alter_auto_increment.result index 8a86dfd11e2..68b67977550 100644 --- a/mysql-test/suite/galera/r/galera_toi_alter_auto_increment.result +++ b/mysql-test/suite/galera/r/galera_toi_alter_auto_increment.result @@ -1,5 +1,5 @@ connection node_1; -CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB; +CREATE TABLE ten (f1 INTEGER NOT NULL PRIMARY KEY) ENGINE=InnoDB; INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB; INSERT INTO t1 (f2) SELECT 1 FROM ten; diff --git a/mysql-test/suite/galera/t/galera_as_slave_replay.cnf b/mysql-test/suite/galera/t/galera_as_slave_replay.cnf new file mode 100644 index 00000000000..b1f9d7e9cbd --- /dev/null +++ b/mysql-test/suite/galera/t/galera_as_slave_replay.cnf @@ -0,0 +1,11 @@ +!include ../galera_2nodes_as_slave.cnf + +[mysqld] +binlog-format=row + +[mysqld.1] +wsrep_restart_slave=1 + +[mysqld.2] +wsrep_restart_slave=1 + diff --git a/mysql-test/suite/galera/t/galera_as_slave_replay.test b/mysql-test/suite/galera/t/galera_as_slave_replay.test new file mode 100644 index 00000000000..93f95349e6d --- /dev/null +++ b/mysql-test/suite/galera/t/galera_as_slave_replay.test @@ -0,0 +1,200 @@ +# +# This test tests the operation of transaction replay for async replication slave. +# If a potentially conflicting galera transaction arrives at +# just the right time during the commit and has lock conflict with async replication transaction +# applied by slave SQL thread, then the async replication transaction should either abort +# or rollback and replay (depending on the nature of lock conflict). +# + +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/galera_have_debug_sync.inc + +--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2 + +--connection node_2a +--source include/galera_cluster.inc +#--source suite/galera/include/galera_have_debug_sync.inc + +# +# node 1 is native MariaDB server operating as async replication master +# +--connection node_1 +RESET MASTER; + +--connection node_2a +# +# count the number of wsrep replay's done in the node +# +--let $wsrep_local_replays_old = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'` + + +# +# nodes 2 and 3 form a galera cluster, node 2 operates as slave for native MariaDB naster in node 1 +# +--disable_query_log +--eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$NODE_MYPORT_1; +--enable_query_log +START SLAVE; + +--connection node_1 +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)) engine=innodb; +INSERT INTO t1 VALUES (1, 'a'); +INSERT INTO t1 VALUES (3, 'a'); + +# +# use statement format replication to cause a false positive conflict with async replication transaction +# and galera replication. The conflict will be on GAP lock, and slave SQL thread should rollback +# and replay +# +set binlog_format=STATEMENT; + +SET AUTOCOMMIT=ON; +START TRANSACTION; + +SELECT * FROM t1 FOR UPDATE; +UPDATE t1 SET f2 = 'c' WHERE f1 > 1; + +--connection node_2a +# wait for create table and inserts to be replicated from master +SET SESSION wsrep_sync_wait = 0; +--let $wait_condition = SELECT COUNT(*) = 2 FROM test.t1; +--source include/wait_condition.inc + +# wait for create table and inserts to be replicated in cluster +--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3 +--connection node_3 +SET SESSION wsrep_sync_wait = 0; +--let $wait_condition = SELECT COUNT(*) = 2 FROM test.t1; +--source include/wait_condition.inc + +--connection node_2a +# Block the future commit of async replication +--let $galera_sync_point = commit_monitor_enter_sync +--source include/galera_set_sync_point.inc + +# block also the applier before applying begins +SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb"; + +# +# now inject a conflicting insert from node 3, it will replicate with +# earlier seqno (than async transaction) and pause before applying in node 2 +# +--connection node_3 +INSERT INTO test.t1 VALUES (2, 'b'); + +# +# send the update from master, this will succeed here, beceuase of async replication. +# async replication will apply this in node 2 and pause before commit phase, +--connection node_1 +--error 0 +COMMIT; + +# Wait until async slave commit is blocked in node_2 +--connection node_2a +--source include/galera_wait_sync_point.inc + +# +# release the applier +# note: have to clear wsrep_apply_cb sync point first, as async replication will go for replay +# and as this sync point, after BF applier is released to progress +# +SET GLOBAL debug_dbug = ""; +SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb"; + +# Unblock the async slave commit +--connection node_2a +--source include/galera_clear_sync_point.inc +--source include/galera_signal_sync_point.inc + +--connection node_1 + +SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'a'; +SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c'; +SELECT * FROM t1; + +--connection node_2a + +# wsrep_local_replays has increased by 1 +set session wsrep_sync_wait=15; +--let $wsrep_local_replays_new = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'` +set session wsrep_sync_wait=0; + +--disable_query_log +--eval SELECT $wsrep_local_replays_new - $wsrep_local_replays_old = 1 AS wsrep_local_replays; +--enable_query_log + +# +# replaying of async transaction should be effective, and row 3 having 'c' in f2 +# +SELECT * FROM t1; +SET DEBUG_SYNC = "RESET"; + +#******************************************************************************** +# test phase 2 +#******************************************************************************** + +--echo # +--echo # test phase with real abort +--echo # + +--connection node_1 + +set binlog_format=ROW; + +insert into t1 values (4, 'd'); + +SET AUTOCOMMIT=ON; +START TRANSACTION; + +UPDATE t1 SET f2 = 'd' WHERE f1 = 3; + +--connection node_2a +# wait for the last insert to be replicated from master +--let $wait_condition = SELECT COUNT(*) = 4 FROM test.t1; +--source include/wait_condition.inc + +# Block the commit +--let $galera_sync_point = commit_monitor_enter_sync +--source include/galera_set_sync_point.inc + +# block applier +SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb"; + +# Inject a conflicting update from node 3 +--connection node_3 +UPDATE test.t1 SET f2 = 'e' WHERE f1 = 3; + +# send the update from master +--connection node_1 +--error 0 +COMMIT; + +--connection node_2a + +# release the applier +SET GLOBAL debug_dbug = ""; +SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb"; + + +# Unblock the async slave commit +--connection node_2a +--source include/galera_clear_sync_point.inc +--source include/galera_signal_sync_point.inc +SET DEBUG_SYNC = "RESET"; + +--connection node_2a + +set session wsrep_sync_wait=15; +SELECT COUNT(*) = 1 FROM test.t1 WHERE f2 = 'e'; +set session wsrep_sync_wait=0; + +STOP SLAVE; +RESET SLAVE; + +DROP TABLE t1; + +--connection node_1 +DROP TABLE t1; +RESET MASTER; diff --git a/mysql-test/suite/galera/t/galera_fk_cascade_delete.test b/mysql-test/suite/galera/t/galera_fk_cascade_delete.test index 6f0de0a1f4a..a3e0dbcf36f 100644 --- a/mysql-test/suite/galera/t/galera_fk_cascade_delete.test +++ b/mysql-test/suite/galera/t/galera_fk_cascade_delete.test @@ -40,11 +40,19 @@ set wsrep_sync_wait=0; --let $wait_condition = SELECT COUNT(*) = 2 FROM child; --source include/wait_condition.inc +--let $wait_condition = SELECT COUNT(*) = 2 FROM parent; +--source include/wait_condition.inc +--let $wait_condition = SELECT COUNT(*) = 2 FROM grandparent; +--source include/wait_condition.inc DELETE FROM grandparent WHERE id = 1; --connection node_1 --let $wait_condition = SELECT COUNT(*) = 1 FROM child; --source include/wait_condition.inc +--let $wait_condition = SELECT COUNT(*) = 1 FROM parent; +--source include/wait_condition.inc +--let $wait_condition = SELECT COUNT(*) = 1 FROM grandparent; +--source include/wait_condition.inc SELECT COUNT(*), COUNT(*) = 0 FROM parent WHERE grandparent_id = 1; SELECT COUNT(*), COUNT(*) = 0 FROM child WHERE parent_id = 1; diff --git a/mysql-test/suite/galera/t/galera_rsu_simple.test b/mysql-test/suite/galera/t/galera_rsu_simple.test index 5841dbd8006..aa6f25b6db6 100644 --- a/mysql-test/suite/galera/t/galera_rsu_simple.test +++ b/mysql-test/suite/galera/t/galera_rsu_simple.test @@ -8,6 +8,9 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB; --connection node_2 +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1' +--source include/wait_condition.inc + SET SESSION wsrep_OSU_method = "RSU"; ALTER TABLE t1 ADD COLUMN f2 INTEGER; SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; diff --git a/mysql-test/suite/galera/t/galera_toi_alter_auto_increment.test b/mysql-test/suite/galera/t/galera_toi_alter_auto_increment.test index 641d2101c80..793e87cb53e 100644 --- a/mysql-test/suite/galera/t/galera_toi_alter_auto_increment.test +++ b/mysql-test/suite/galera/t/galera_toi_alter_auto_increment.test @@ -7,7 +7,7 @@ --source include/have_innodb.inc --connection node_1 -CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB; +CREATE TABLE ten (f1 INTEGER NOT NULL PRIMARY KEY) ENGINE=InnoDB; INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB; @@ -83,6 +83,8 @@ SET GLOBAL auto_increment_offset = 1; CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB; --connection node_2a +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1' +--source include/wait_condition.inc ALTER TABLE t1 AUTO_INCREMENT=100; diff --git a/mysql-test/suite/innodb/r/foreign_key.result b/mysql-test/suite/innodb/r/foreign_key.result index 8c0678506f4..a01c70b2353 100644 --- a/mysql-test/suite/innodb/r/foreign_key.result +++ b/mysql-test/suite/innodb/r/foreign_key.result @@ -412,6 +412,7 @@ CREATE TABLE x AS SELECT * FROM t1; ERROR XAE07: XAER_RMFAIL: The command cannot be executed when global transaction is in the ACTIVE state connect con1,localhost,root,,test; SET foreign_key_checks= OFF, innodb_lock_wait_timeout= 1; +SET lock_wait_timeout=5; ALTER TABLE t1 ADD FOREIGN KEY f (a) REFERENCES t1 (pk), LOCK=EXCLUSIVE; ERROR HY000: Lock wait timeout exceeded; try restarting transaction disconnect con1; diff --git a/mysql-test/suite/innodb/t/foreign_key.test b/mysql-test/suite/innodb/t/foreign_key.test index 380d4520d87..7756d48046f 100644 --- a/mysql-test/suite/innodb/t/foreign_key.test +++ b/mysql-test/suite/innodb/t/foreign_key.test @@ -414,6 +414,7 @@ INSERT INTO t1 VALUES (1,2); CREATE TABLE x AS SELECT * FROM t1; --connect (con1,localhost,root,,test) SET foreign_key_checks= OFF, innodb_lock_wait_timeout= 1; +SET lock_wait_timeout=5; --error ER_LOCK_WAIT_TIMEOUT ALTER TABLE t1 ADD FOREIGN KEY f (a) REFERENCES t1 (pk), LOCK=EXCLUSIVE;# Cleanup --disconnect con1 diff --git a/mysql-test/suite/mariabackup/include/corrupt-page.pl b/mysql-test/suite/mariabackup/include/corrupt-page.pl new file mode 100644 index 00000000000..d5c75dbde55 --- /dev/null +++ b/mysql-test/suite/mariabackup/include/corrupt-page.pl @@ -0,0 +1,146 @@ +use strict; +use warnings; +use Fcntl qw(:DEFAULT :seek); +do "$ENV{MTR_SUITE_DIR}/../innodb/include/crc32.pl"; + +sub corrupt_space_page_id { + my $file_name = shift; + my @pages_to_corrupt = @_; + + my $page_size = $ENV{INNODB_PAGE_SIZE}; + + sysopen my $ibd_file, $file_name, O_RDWR || die "Cannot open $file_name\n"; + sysread($ibd_file, $_, 38) || die "Cannot read $file_name\n"; + my $space = unpack("x[34]N", $_); + foreach my $page_no (@pages_to_corrupt) { + $space += 10; # generate wrong space id + sysseek($ibd_file, $page_size * $page_no, SEEK_SET) + || die "Cannot seek $file_name\n"; + + my $head = pack("Nx[18]", $page_no + 10); # generate wrong page number + my $body = chr(0) x ($page_size - 38 - 8); + + # Calculate innodb_checksum_algorithm=crc32 for the unencrypted page. + # The following bytes are excluded: + # bytes 0..3 (the checksum is stored there) + # bytes 26..37 (encryption key version, post-encryption checksum, tablespace id) + # bytes $page_size-8..$page_size-1 (checksum, LSB of FIL_PAGE_LSN) + my $polynomial = 0x82f63b78; # CRC-32C + my $ck = mycrc32($head, 0, $polynomial) ^ mycrc32($body, 0, $polynomial); + + my $page= pack("N",$ck).$head.pack("NNN",1,$ck,$space).$body.pack("Nx[4]",$ck); + die unless syswrite($ibd_file, $page, $page_size) == $page_size; + } + close $ibd_file; +} + +sub extend_space { + my $file_name = shift; + my $n_pages = shift; + + my $page_size = $ENV{INNODB_PAGE_SIZE}; + my $page; + + sysopen my $ibd_file, $file_name, O_RDWR || die "Cannot open $file_name\n"; + sysread($ibd_file, $page, $page_size) + || die "Cannot read $file_name\n"; + my $size = unpack("N", substr($page, 46, 4)); + my $packed_new_size = pack("N", $size + $n_pages); + substr($page, 46, 4, $packed_new_size); + + my $head = substr($page, 4, 22); + my $body = substr($page, 38, $page_size - 38 - 8); + my $polynomial = 0x82f63b78; # CRC-32C + my $ck = mycrc32($head, 0, $polynomial) ^ mycrc32($body, 0, $polynomial); + my $packed_ck = pack("N", $ck); + substr($page, 0, 4, $packed_ck); + substr($page, $page_size - 8, 4, $packed_ck); + + sysseek($ibd_file, 0, SEEK_SET) + || die "Cannot seek $file_name\n"; + die unless syswrite($ibd_file, $page, $page_size) == $page_size; + + sysseek($ibd_file, 0, SEEK_END) + || die "Cannot seek $file_name\n"; + my $pages_size = $page_size*$n_pages; + my $pages = chr(0) x $pages_size; + die unless syswrite($ibd_file, $pages, $pages_size) == $pages_size; + close $ibd_file; + return $size; +} + +sub die_if_page_is_not_zero { + my $file_name = shift; + my @pages_to_check = @_; + + no locale; + my $page_size = $ENV{INNODB_PAGE_SIZE}; + my $zero_page = chr(0) x $page_size; + sysopen my $ibd_file, $file_name, O_RDWR || die "Cannot open $file_name\n"; + foreach my $page_no_to_check (@pages_to_check) { + sysseek($ibd_file, $page_size*$page_no_to_check, SEEK_SET) || + die "Cannot seek $file_name\n"; + sysread($ibd_file, my $read_page, $page_size) || + die "Cannot read $file_name\n"; + die "The page $page_no_to_check is not zero-filed in $file_name" + if ($read_page cmp $zero_page); + } + close $ibd_file; +} + +sub print_corrupted_pages_file { + my $file_in = shift; + my $file_out = shift; + open my $fh, '<', $file_in || die $!; + my $line_number = 0; + my $space = {}; + my @spaces; + while (my $line = <$fh>) { + ++$line_number; + if ($line_number & 1) { + my ($name, $id) = split(/ /, $line); + $space->{name} = $name; + } + else { + $space->{pages} = $line; + push (@spaces, $space); + $space = {}; + } + } + close $fh; + my @sorted_spaces = sort { $a->{name} cmp $b->{name} } @spaces; + open $fh, '>', $file_out || die $!; + foreach my $space (@sorted_spaces) { + print $fh $space->{name}; + print $fh "\n"; + print $fh $space->{pages}; + } + close $fh; +} + +sub append_corrupted_pages { + my $file_name = shift; + my $space_name = shift; + my $pages = shift; + open my $fh, '<', $file_name || die $!; + my $line_number = 0; + my $space_line; + while (my $line = <$fh>) { + ++$line_number; + if ($line_number & 1) { + my ($name, $id) = split(/ /, $line); + if ($name eq $space_name) { + $space_line = $line; + last; + } + } + } + close $fh; + if (not defined $space_line) { + die "Can't find requested space $space_name in file $file_name"; + } + open $fh, '>>', $file_name || die $!; + print $fh $space_line; + print $fh "$pages\n"; + close $fh; +} diff --git a/mysql-test/suite/mariabackup/incremental_ddl_during_backup.test b/mysql-test/suite/mariabackup/incremental_ddl_during_backup.test index 49e952eefea..1ee6038f072 100644 --- a/mysql-test/suite/mariabackup/incremental_ddl_during_backup.test +++ b/mysql-test/suite/mariabackup/incremental_ddl_during_backup.test @@ -22,7 +22,7 @@ INSERT into t1 values(1); --let after_copy_test_t2=DROP TABLE test.t2 --let after_copy_test_t3=CREATE INDEX a_i ON test.t3(i); --let before_copy_test_t10=DROP TABLE test.t10 ---let wait_innodb_redo_before_copy=test/t10 +--let wait_innodb_redo_before_copy_test_t10 = 1 # mariabackup should crash with assertion if MDEV-24026 is not fixed exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$incremental_dir --incremental-basedir=$basedir --dbug=+d,mariabackup_events,mariabackup_inject_code; diff --git a/mysql-test/suite/mariabackup/log_page_corruption.opt b/mysql-test/suite/mariabackup/log_page_corruption.opt new file mode 100644 index 00000000000..c44c611ed60 --- /dev/null +++ b/mysql-test/suite/mariabackup/log_page_corruption.opt @@ -0,0 +1 @@ +--innodb-checksum-algorithm=crc32 diff --git a/mysql-test/suite/mariabackup/log_page_corruption.result b/mysql-test/suite/mariabackup/log_page_corruption.result new file mode 100644 index 00000000000..13e373b2f70 --- /dev/null +++ b/mysql-test/suite/mariabackup/log_page_corruption.result @@ -0,0 +1,141 @@ +######## +# Test for generating "innodb_corrupted_pages" file during full and +# incremental backup, including DDL processing +### + +CREATE TABLE t1_corrupted(c INT) ENGINE INNODB; +CREATE TABLE t2_corrupted(c INT) ENGINE INNODB; +CREATE TABLE t3(c INT) ENGINE INNODB; +CREATE TABLE t5_corrupted_to_rename(c INT) ENGINE INNODB; +CREATE TABLE t6_corrupted_to_drop(c INT) ENGINE INNODB; +CREATE TABLE t7_corrupted_to_alter(c INT) ENGINE INNODB; +CREATE TABLE t1_inc_corrupted(c INT) ENGINE INNODB; +CREATE TABLE t2_inc_corrupted(c INT) ENGINE INNODB; +CREATE TABLE t3_inc(c INT) ENGINE INNODB; +CREATE TABLE t5_inc_corrupted_to_rename(c INT) ENGINE INNODB; +CREATE TABLE t6_inc_corrupted_to_drop(c INT) ENGINE INNODB; +CREATE TABLE t7_inc_corrupted_to_alter(c INT) ENGINE INNODB; +INSERT INTO t1_corrupted VALUES (3), (4), (5), (6), (7), (8), (9); +INSERT INTO t2_corrupted VALUES (3), (4), (5), (6), (7), (8), (9); +INSERT INTO t3 VALUES (3), (4), (5), (6), (7), (8), (9); +INSERT INTO t5_corrupted_to_rename VALUES (3), (4), (5), (6), (7), (8), (9); +INSERT INTO t6_corrupted_to_drop VALUES (3), (4), (5), (6), (7), (8), (9); +INSERT INTO t7_corrupted_to_alter VALUES (3), (4), (5), (6), (7), (8), (9); +# Corrupt tables +# Backup must fail due to page corruption +FOUND 1 /Database page corruption detected.*/ in backup.log +# "innodb_corrupted_pages" file must not exist +# Backup must fail, but "innodb_corrupted_pages" file must be created due to --log-innodb-page-corruption option +FOUND 1 /Database page corruption detected.*/ in backup.log +--- "innodb_corrupted_pages" file content: --- +test/t1_corrupted +6 8 9 +test/t2_corrupted +7 8 10 +test/t4_corrupted_new +1 +test/t5_corrupted_to_rename_renamed +6 +test/t7_corrupted_to_alter +3 +------ +INSERT INTO t1_inc_corrupted VALUES (3), (4), (5), (6), (7), (8), (9); +INSERT INTO t2_inc_corrupted VALUES (3), (4), (5), (6), (7), (8), (9); +INSERT INTO t3_inc VALUES (3), (4), (5), (6), (7), (8), (9); +# Backup must fail, but "innodb_corrupted_pages" file must be created due to --log-innodb-page-corruption option +--- "innodb_corrupted_pages" file content: --- +test/t1_corrupted +6 8 9 +test/t1_inc_corrupted +6 8 9 +test/t2_corrupted +7 8 10 +test/t2_inc_corrupted +7 8 10 +test/t4_inc_corrupted_new +1 +test/t5_corrupted_to_rename_renamed +6 +test/t5_inc_corrupted_to_rename_renamed +6 +test/t7_inc_corrupted_to_alter +3 +------ +# Check if corrupted pages were copied to delta files, and non-corrupted pages are not copied. +DROP TABLE t1_corrupted; +DROP TABLE t2_corrupted; +DROP TABLE t4_corrupted_new; +DROP TABLE t5_corrupted_to_rename_renamed; +DROP TABLE t7_corrupted_to_alter; +DROP TABLE t1_inc_corrupted; +DROP TABLE t2_inc_corrupted; +DROP TABLE t4_inc_corrupted_new; +DROP TABLE t5_inc_corrupted_to_rename_renamed; +DROP TABLE t7_inc_corrupted_to_alter; + +######## +# Test for --prepare with "innodb_corrupted_pages" file +### + +# Extend some tablespace and corrupt extended pages for full backup +# Full backup with --log-innodb-page-corruption +--- "innodb_corrupted_pages" file content: --- +test/t3 +6 8 +------ +# Extend some tablespace and corrupt extended pages for incremental backup +# Incremental backup --log-innodb-page-corruption +--- "innodb_corrupted_pages" file content: --- +test/t3 +6 8 +test/t3_inc +6 8 +------ +# Full backup prepare +# "innodb_corrupted_pages" file must not exist after successful prepare +FOUND 1 /was successfuly fixed.*/ in backup.log +# Check that fixed pages are zero-filled +# Incremental backup prepare +# "innodb_corrupted_pages" file must not exist after successful prepare +# do not remove "innodb_corrupted_pages" in incremental dir +FOUND 1 /was successfuly fixed.*/ in backup.log +# Check that fixed pages are zero-filled +# shutdown server +# remove datadir +# xtrabackup move back +# restart server +SELECT * FROM t3; +c +3 +4 +5 +6 +7 +8 +9 +SELECT * FROM t3_inc; +c +3 +4 +5 +6 +7 +8 +9 +# Test the case when not all corrupted pages are fixed + +# Add some fake corrupted pages +# Full backup prepare +FOUND 1 /Error: corrupted page.*/ in backup.log +--- "innodb_corrupted_pages" file content: --- +test/t3 +3 +------ +# Incremental backup prepare +FOUND 1 /Error: corrupted page.*/ in backup.log +--- "innodb_corrupted_pages" file content: --- +test/t3 +3 +------ +DROP TABLE t3; +DROP TABLE t3_inc; diff --git a/mysql-test/suite/mariabackup/log_page_corruption.test b/mysql-test/suite/mariabackup/log_page_corruption.test new file mode 100644 index 00000000000..e9419687288 --- /dev/null +++ b/mysql-test/suite/mariabackup/log_page_corruption.test @@ -0,0 +1,426 @@ +--source include/have_debug.inc + +--echo ######## +--echo # Test for generating "innodb_corrupted_pages" file during full and +--echo # incremental backup, including DDL processing +--echo ### +--echo + +CREATE TABLE t1_corrupted(c INT) ENGINE INNODB; +CREATE TABLE t2_corrupted(c INT) ENGINE INNODB; +CREATE TABLE t3(c INT) ENGINE INNODB; +CREATE TABLE t5_corrupted_to_rename(c INT) ENGINE INNODB; +CREATE TABLE t6_corrupted_to_drop(c INT) ENGINE INNODB; +CREATE TABLE t7_corrupted_to_alter(c INT) ENGINE INNODB; + +CREATE TABLE t1_inc_corrupted(c INT) ENGINE INNODB; +CREATE TABLE t2_inc_corrupted(c INT) ENGINE INNODB; +CREATE TABLE t3_inc(c INT) ENGINE INNODB; +CREATE TABLE t5_inc_corrupted_to_rename(c INT) ENGINE INNODB; +CREATE TABLE t6_inc_corrupted_to_drop(c INT) ENGINE INNODB; +CREATE TABLE t7_inc_corrupted_to_alter(c INT) ENGINE INNODB; + +# Fill tables with several pages +INSERT INTO t1_corrupted VALUES (3), (4), (5), (6), (7), (8), (9); +INSERT INTO t2_corrupted VALUES (3), (4), (5), (6), (7), (8), (9); +INSERT INTO t3 VALUES (3), (4), (5), (6), (7), (8), (9); +INSERT INTO t5_corrupted_to_rename VALUES (3), (4), (5), (6), (7), (8), (9); +INSERT INTO t6_corrupted_to_drop VALUES (3), (4), (5), (6), (7), (8), (9); +INSERT INTO t7_corrupted_to_alter VALUES (3), (4), (5), (6), (7), (8), (9); + +--let MYSQLD_DATADIR=`select @@datadir` +--let INNODB_PAGE_SIZE=`select @@innodb_page_size` + +--source include/shutdown_mysqld.inc +--echo # Corrupt tables +perl; +do "$ENV{MTR_SUITE_DIR}/include/corrupt-page.pl"; +my $schema = "$ENV{MYSQLD_DATADIR}/test"; + +my $last_page_no = extend_space("$schema/t1_corrupted.ibd", 4); +corrupt_space_page_id("$schema/t1_corrupted.ibd", + $last_page_no, $last_page_no + 2, $last_page_no + 3); + +$last_page_no = extend_space("$schema/t2_corrupted.ibd", 5); +corrupt_space_page_id("$schema/t2_corrupted.ibd", + $last_page_no + 1, $last_page_no + 2, $last_page_no + 4); + +$last_page_no = extend_space("$schema/t5_corrupted_to_rename.ibd", 1); +corrupt_space_page_id("$schema/t5_corrupted_to_rename.ibd", $last_page_no); + +$last_page_no = extend_space("$schema/t6_corrupted_to_drop.ibd", ); +corrupt_space_page_id("$schema/t6_corrupted_to_drop.ibd", $last_page_no); +EOF +--source include/start_mysqld.inc + +--let targetdir=$MYSQLTEST_VARDIR/tmp/backup +--let $backuplog=$MYSQLTEST_VARDIR/tmp/backup.log +--let corrupted_pages_file = $targetdir/innodb_corrupted_pages +--let corrupted_pages_file_filt = $MYSQLTEST_VARDIR/tmp/innodb_corrupted_pages_filt +--let perl_result_file=$MYSQLTEST_VARDIR/tmp/perl_result + +--echo # Backup must fail due to page corruption +--disable_result_log +--error 1 +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir > $backuplog; +--enable_result_log + +--let SEARCH_PATTERN=Database page corruption detected.* +--let SEARCH_FILE=$backuplog +--source include/search_pattern_in_file.inc +--echo # "innodb_corrupted_pages" file must not exist +--error 1 +--file_exists $corrupted_pages_file +--rmdir $targetdir + +--let after_load_tablespaces=CREATE TABLE test.t4_corrupted_new ENGINE=INNODB SELECT UUID() from test.seq_1_to_10 +--let add_corrupted_page_for_test_t4_corrupted_new=1 +--let after_copy_test_t5_corrupted_to_rename=RENAME TABLE test.t5_corrupted_to_rename TO test.t5_corrupted_to_rename_renamed +--let after_copy_test_t6_corrupted_to_drop=DROP TABLE test.t6_corrupted_to_drop +--let after_copy_test_t7_corrupted_to_alter=ALTER TABLE test.t7_corrupted_to_alter ADD COLUMN (d INT) +--let add_corrupted_page_for_test_t7_corrupted_to_alter=3 + +--echo # Backup must fail, but "innodb_corrupted_pages" file must be created due to --log-innodb-page-corruption option +--disable_result_log +--error 1 +--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --log-innodb-page-corruption --target-dir=$targetdir --dbug=+d,mariabackup_events,mariabackup_inject_code > $backuplog +--enable_result_log + +--let SEARCH_PATTERN=Database page corruption detected.* +--let SEARCH_FILE=$backuplog +--source include/search_pattern_in_file.inc +--echo --- "innodb_corrupted_pages" file content: --- +perl; +do "$ENV{MTR_SUITE_DIR}/include/corrupt-page.pl"; +print_corrupted_pages_file($ENV{corrupted_pages_file}, + $ENV{corrupted_pages_file_filt}); +EOF +--cat_file $corrupted_pages_file_filt +--echo ------ +--let after_load_tablespaces= +--let add_corrupted_page_for_test_t4_corrupted_new= +--let after_copy_test_t5_corrupted_to_rename= +--let after_copy_test_t6_corrupted_to_drop= +--let after_copy_test_t7_corrupted_to_alter= +--let add_corrupted_page_for_test_t7_corrupted_to_alter= +# Fill tables for incremental backup with several pages +INSERT INTO t1_inc_corrupted VALUES (3), (4), (5), (6), (7), (8), (9); +INSERT INTO t2_inc_corrupted VALUES (3), (4), (5), (6), (7), (8), (9); +INSERT INTO t3_inc VALUES (3), (4), (5), (6), (7), (8), (9); + +--source include/shutdown_mysqld.inc +perl; +do "$ENV{MTR_SUITE_DIR}/include/corrupt-page.pl"; +my $schema="$ENV{MYSQLD_DATADIR}/test"; + +open(my $fh, '>', $ENV{perl_result_file}) or die $!; + +my $last_page_no = extend_space("$schema/t1_inc_corrupted.ibd", 4); +corrupt_space_page_id("$schema/t1_inc_corrupted.ibd", + $last_page_no, $last_page_no + 2, $last_page_no + 3); +print $fh "$last_page_no\n"; + +$last_page_no = extend_space("$schema/t2_inc_corrupted.ibd", 5); +corrupt_space_page_id("$schema/t2_inc_corrupted.ibd", + $last_page_no + 1, $last_page_no + 2, $last_page_no + 4); +print $fh "$last_page_no\n"; + +$last_page_no = extend_space("$schema/t5_inc_corrupted_to_rename.ibd", 1); +corrupt_space_page_id("$schema/t5_inc_corrupted_to_rename.ibd", $last_page_no); +print $fh "$last_page_no\n"; + +$last_page_no = extend_space("$schema/t6_inc_corrupted_to_drop.ibd", ); +corrupt_space_page_id("$schema/t6_inc_corrupted_to_drop.ibd", $last_page_no); + +close $fh; +EOF +--source include/start_mysqld.inc + +--let incdir=$MYSQLTEST_VARDIR/tmp/backup_inc + +--let after_load_tablespaces=CREATE TABLE test.t4_inc_corrupted_new ENGINE=INNODB SELECT UUID() from test.seq_1_to_10 +--let add_corrupted_page_for_test_t4_inc_corrupted_new=1 +--let after_copy_test_t5_inc_corrupted_to_rename=RENAME TABLE test.t5_inc_corrupted_to_rename TO test.t5_inc_corrupted_to_rename_renamed +--let after_copy_test_t6_inc_corrupted_to_drop=DROP TABLE test.t6_inc_corrupted_to_drop +--let after_copy_test_t7_inc_corrupted_to_alter=ALTER TABLE test.t7_inc_corrupted_to_alter ADD COLUMN (d INT) +--let add_corrupted_page_for_test_t7_inc_corrupted_to_alter=3 + +--echo # Backup must fail, but "innodb_corrupted_pages" file must be created due to --log-innodb-page-corruption option +--disable_result_log +--error 1 +--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --log-innodb-page-corruption --target-dir=$incdir --incremental-basedir=$targetdir --dbug=+d,mariabackup_events,mariabackup_inject_code > $backuplog +--disable_result_log + +--let after_load_tablespaces= +--let add_corrupted_page_for_test_t4_inc_corrupted_new= +--let after_copy_test_t5_inc_corrupted_to_rename= +--let after_copy_test_t6_inc_corrupted_to_drop= +--let after_copy_test_t7_inc_corrupted_to_alter= +--let add_corrupted_page_for_test_t7_inc_corrupted_to_alter= + +--let SEARCH_PATTERN=Database page corruption detected.* +--let SEARCH_FILE=$backuplog +--source include/search_pattern_in_file.inc +--let corrupted_pages_file = $incdir/innodb_corrupted_pages +--echo --- "innodb_corrupted_pages" file content: --- +perl; +do "$ENV{MTR_SUITE_DIR}/include/corrupt-page.pl"; +print_corrupted_pages_file($ENV{corrupted_pages_file}, + $ENV{corrupted_pages_file_filt}); +EOF +--cat_file $corrupted_pages_file_filt +--echo ------ + +--echo # Check if corrupted pages were copied to delta files, and non-corrupted pages are not copied. +perl; +use strict; +use warnings; +my $schema = "$ENV{incdir}/test"; + +open(my $fh, '<', $ENV{perl_result_file}) or die $!; + +my $last_page_no = <$fh>; +die_if_no_pages("$schema/t1_corrupted.ibd.delta", + $last_page_no, $last_page_no + 2, $last_page_no + 3); + +$last_page_no = <$fh>; +die_if_no_pages("$schema/t2_corrupted.ibd.delta", + $last_page_no + 1, $last_page_no + 2, $last_page_no + 4); + +$last_page_no = <$fh>; +die_if_no_pages("$schema/t5_corrupted_to_rename_renamed.ibd.delta", + $last_page_no); + +close $fh; + +die_if_not_empty("$schema/t3.ibd.delta"); + +sub read_first_page_from_delta { + my $file_name = shift; + my $pages_count = shift; + + open my $file, '<:raw', $file_name || die "Cannot open $file_name\n"; + read $file, my $buffer, $pages_count*4 || die "Cannot read $file_name\n"; + close $file; + + return unpack("N[$pages_count]", $buffer); +} + +sub die_if_no_pages { + my $file_name = shift; + my @check_pages = @_; + my @read_pages = + read_first_page_from_delta($file_name, scalar(@check_pages) + 1); + for (my $i = 1; $i < @check_pages + 1; ++$i) { + my $check_page_no = $check_pages[$i - 1]; + die "Corrupted page $check_page_no was not copied to $file_name." + if ($i >= @read_pages || $read_pages[$i] != $check_page_no); + } +} + +sub die_if_not_empty { + my $file_name = shift; + my ($magic, $full) = read_first_page_from_delta($file_name, 2); + die "Delta $file_name must be empty." + if ($full != 0xFFFFFFFF); +} +EOF +--rmdir $incdir +--rmdir $targetdir + +DROP TABLE t1_corrupted; +DROP TABLE t2_corrupted; +DROP TABLE t4_corrupted_new; +DROP TABLE t5_corrupted_to_rename_renamed; +DROP TABLE t7_corrupted_to_alter; +DROP TABLE t1_inc_corrupted; +DROP TABLE t2_inc_corrupted; +DROP TABLE t4_inc_corrupted_new; +DROP TABLE t5_inc_corrupted_to_rename_renamed; +DROP TABLE t7_inc_corrupted_to_alter; + +--echo +--echo ######## +--echo # Test for --prepare with "innodb_corrupted_pages" file +--echo ### +--echo + +--echo # Extend some tablespace and corrupt extended pages for full backup +--source include/shutdown_mysqld.inc +perl; +do "$ENV{MTR_SUITE_DIR}/include/corrupt-page.pl"; +my $schema="$ENV{MYSQLD_DATADIR}/test"; +my $last_page_no = extend_space("$schema/t3.ibd", 3); +corrupt_space_page_id("$schema/t3.ibd", $last_page_no, $last_page_no + 2); +open(my $fh, '>', $ENV{perl_result_file}) or die $!; +print $fh "$last_page_no\n"; +close $fh; +EOF +--source include/start_mysqld.inc + +--echo # Full backup with --log-innodb-page-corruption +--disable_result_log +--error 1 +--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --log-innodb-page-corruption --target-dir=$targetdir +--enable_result_log +--let corrupted_pages_file = $targetdir/innodb_corrupted_pages +--echo --- "innodb_corrupted_pages" file content: --- +perl; +do "$ENV{MTR_SUITE_DIR}/include/corrupt-page.pl"; +print_corrupted_pages_file($ENV{corrupted_pages_file}, + $ENV{corrupted_pages_file_filt}); +EOF +--cat_file $corrupted_pages_file_filt +--echo ------ + +--echo # Extend some tablespace and corrupt extended pages for incremental backup +--source include/shutdown_mysqld.inc +perl; +do "$ENV{MTR_SUITE_DIR}/include/corrupt-page.pl"; +my $schema="$ENV{MYSQLD_DATADIR}/test"; +my $last_page_no = extend_space("$schema/t3_inc.ibd", 3); +corrupt_space_page_id("$schema/t3_inc.ibd", $last_page_no, $last_page_no + 2); +open(my $fh, '>>', $ENV{perl_result_file}) or die $!; +print $fh "$last_page_no"; +close $fh; +EOF +--source include/start_mysqld.inc + +--echo # Incremental backup --log-innodb-page-corruption +--disable_result_log +--error 1 +--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --log-innodb-page-corruption --target-dir=$incdir --incremental-basedir=$targetdir --dbug=+d,mariabackup_events,mariabackup_inject_code > $backuplog +--disable_result_log +--let corrupted_pages_file = $incdir/innodb_corrupted_pages +--echo --- "innodb_corrupted_pages" file content: --- +perl; +do "$ENV{MTR_SUITE_DIR}/include/corrupt-page.pl"; +print_corrupted_pages_file($ENV{corrupted_pages_file}, + $ENV{corrupted_pages_file_filt}); +EOF +--cat_file $corrupted_pages_file_filt +--echo ------ + +--let targetdir2=$targetdir-2 +--let incdir2=$incdir-2 +perl; +use lib "lib"; +use My::Handles { suppress_init_messages => 1 }; +use My::File::Path; +copytree($ENV{'targetdir'}, $ENV{'targetdir2'}); +copytree($ENV{'incdir'}, $ENV{'incdir2'}); +EOF + +--echo # Full backup prepare +--disable_result_log +exec $XTRABACKUP --prepare --target-dir=$targetdir > $backuplog; +--enable_result_log + +--echo # "innodb_corrupted_pages" file must not exist after successful prepare +--error 1 +--file_exists $targetdir/innodb_corrupted_pages +--let SEARCH_PATTERN=was successfuly fixed.* +--let SEARCH_FILE=$backuplog +--source include/search_pattern_in_file.inc + +--echo # Check that fixed pages are zero-filled +perl; +do "$ENV{MTR_SUITE_DIR}/include/corrupt-page.pl"; +open(my $fh, '<', $ENV{perl_result_file}) or die $!; +my $last_page_no = <$fh>; +close $fh; +my $schema = "$ENV{targetdir}/test"; +die_if_page_is_not_zero("$schema/t3.ibd", $last_page_no, $last_page_no + 2); +EOF + +--echo # Incremental backup prepare +--disable_result_log +exec $XTRABACKUP --prepare --target-dir=$targetdir --incremental-dir=$incdir > $backuplog; +--enable_result_log + +--echo # "innodb_corrupted_pages" file must not exist after successful prepare +--error 1 +--file_exists $targetdir/innodb_corrupted_pages +--echo # do not remove "innodb_corrupted_pages" in incremental dir +--file_exists $incdir/innodb_corrupted_pages +--let SEARCH_PATTERN=was successfuly fixed.* +--let SEARCH_FILE=$backuplog +--source include/search_pattern_in_file.inc + +--echo # Check that fixed pages are zero-filled +perl; +do "$ENV{MTR_SUITE_DIR}/include/corrupt-page.pl"; +open(my $fh, '<', $ENV{perl_result_file}) or die $!; +my $last_page_no_full = <$fh>; +my $last_page_no_inc = <$fh>; +close $fh; +my $schema = "$ENV{targetdir}/test"; +die_if_page_is_not_zero("$schema/t3.ibd", + $last_page_no_full, $last_page_no_full + 2); +die_if_page_is_not_zero("$schema/t3_inc.ibd", + $last_page_no_inc, $last_page_no_inc + 2); +EOF + +--source include/restart_and_restore.inc + +SELECT * FROM t3; +SELECT * FROM t3_inc; + +--echo # Test the case when not all corrupted pages are fixed +--echo +--echo # Add some fake corrupted pages +perl; +do "$ENV{MTR_SUITE_DIR}/include/corrupt-page.pl"; +append_corrupted_pages( + "$ENV{targetdir2}/innodb_corrupted_pages", 'test/t3', '3 4'); +append_corrupted_pages( + "$ENV{incdir2}/innodb_corrupted_pages", 'test/t3_inc', '4 5'); +EOF + +--echo # Full backup prepare +--disable_result_log +--error 1 +exec $XTRABACKUP --prepare --target-dir=$targetdir2 > $backuplog; +--enable_result_log + +--let SEARCH_PATTERN=Error: corrupted page.* +--let SEARCH_FILE=$backuplog +--source include/search_pattern_in_file.inc +--let corrupted_pages_file = $targetdir2/innodb_corrupted_pages +--echo --- "innodb_corrupted_pages" file content: --- +perl; +do "$ENV{MTR_SUITE_DIR}/include/corrupt-page.pl"; +print_corrupted_pages_file($ENV{corrupted_pages_file}, + $ENV{corrupted_pages_file_filt}); +EOF +--cat_file $corrupted_pages_file_filt +--echo ------ + +--echo # Incremental backup prepare +--disable_result_log +--error 1 +exec $XTRABACKUP --prepare --target-dir=$targetdir2 --incremental-dir=$incdir2 > $backuplog; +--enable_result_log + +--let SEARCH_PATTERN=Error: corrupted page.* +--let SEARCH_FILE=$backuplog +--source include/search_pattern_in_file.inc +--let corrupted_pages_file = $targetdir2/innodb_corrupted_pages +--echo --- "innodb_corrupted_pages" file content: --- +perl; +do "$ENV{MTR_SUITE_DIR}/include/corrupt-page.pl"; +print_corrupted_pages_file($ENV{corrupted_pages_file}, + $ENV{corrupted_pages_file_filt}); +EOF +--cat_file $corrupted_pages_file_filt +--echo ------ + +DROP TABLE t3; +DROP TABLE t3_inc; +--remove_file $backuplog +--remove_file $perl_result_file +--remove_file $corrupted_pages_file_filt +--rmdir $targetdir +--rmdir $targetdir2 +--rmdir $incdir +--rmdir $incdir2 diff --git a/mysql-test/suite/roles/show_grants.result b/mysql-test/suite/roles/show_grants.result index 634fab34131..048397bb290 100644 --- a/mysql-test/suite/roles/show_grants.result +++ b/mysql-test/suite/roles/show_grants.result @@ -146,3 +146,18 @@ drop role test_role2; delete from mysql.roles_mapping where Role='test_role1'; delete from mysql.roles_mapping where Role='test_role2'; flush privileges; +# +# MDEV-24289: show grants missing with grant option +# +create role anel; +GRANT SELECT, UPDATE, DELETE, ALTER ON *.* TO 'anel'; +SHOW GRANTS for 'anel'; +Grants for anel +GRANT SELECT, UPDATE, DELETE, ALTER ON *.* TO `anel` +create role MariaDB_admin; +GRANT SELECT, UPDATE, DELETE, ALTER ON *.* TO 'MariaDB_admin' WITH GRANT OPTION; +SHOW GRANTS for 'MariaDB_admin'; +Grants for MariaDB_admin +GRANT SELECT, UPDATE, DELETE, ALTER ON *.* TO `MariaDB_admin` WITH GRANT OPTION +drop role MariaDB_admin; +drop role anel; diff --git a/mysql-test/suite/roles/show_grants.test b/mysql-test/suite/roles/show_grants.test index 9c15d8b8b2b..fc2165ac53b 100644 --- a/mysql-test/suite/roles/show_grants.test +++ b/mysql-test/suite/roles/show_grants.test @@ -88,3 +88,16 @@ drop role test_role2; delete from mysql.roles_mapping where Role='test_role1'; delete from mysql.roles_mapping where Role='test_role2'; flush privileges; + +--echo # +--echo # MDEV-24289: show grants missing with grant option +--echo # +create role anel; +GRANT SELECT, UPDATE, DELETE, ALTER ON *.* TO 'anel'; +SHOW GRANTS for 'anel'; + +create role MariaDB_admin; +GRANT SELECT, UPDATE, DELETE, ALTER ON *.* TO 'MariaDB_admin' WITH GRANT OPTION; +SHOW GRANTS for 'MariaDB_admin'; +drop role MariaDB_admin; +drop role anel; diff --git a/scripts/wsrep_sst_rsync.sh b/scripts/wsrep_sst_rsync.sh index 7638627c1ce..755ecc01bc1 100644 --- a/scripts/wsrep_sst_rsync.sh +++ b/scripts/wsrep_sst_rsync.sh @@ -399,6 +399,14 @@ then MODULE="rsync_sst" RSYNC_PID="$WSREP_SST_OPT_DATA/$MODULE.pid" + # give some time for lingering rsync from previous SST to complete + check_round=0 + while check_pid $RSYNC_PID && [ $check_round -lt 10 ] + do + wsrep_log_info "lingering rsync daemon found at startup, waiting for it to exit" + check_round=$(( check_round + 1 )) + sleep 1 + done if check_pid $RSYNC_PID then diff --git a/sql/item.h b/sql/item.h index c3675adb74f..f161b55e950 100644 --- a/sql/item.h +++ b/sql/item.h @@ -816,6 +816,11 @@ public: void set_name_no_truncate(THD *thd, const char *str, uint length, CHARSET_INFO *cs); void init_make_send_field(Send_field *tmp_field,enum enum_field_types type); + void share_name_with(const Item *item) + { + name= item->name; + is_autogenerated_name= item->is_autogenerated_name; + } virtual void cleanup(); virtual void make_send_field(THD *thd, Send_field *field); diff --git a/sql/log_event.cc b/sql/log_event.cc index 4792a2c9f0e..c7fc6a04f64 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -7528,10 +7528,10 @@ error: if (thd->transaction_rollback_request) { trans_rollback_implicit(thd); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); } else if (! thd->in_multi_stmt_transaction_mode()) - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); else thd->mdl_context.release_statement_locks(); @@ -9008,7 +9008,7 @@ int Xid_log_event::do_apply_event(rpl_group_info *rgi) "COMMIT /* implicit, from Xid_log_event */"); thd->variables.option_bits&= ~OPTION_GTID_BEGIN; res= trans_commit(thd); /* Automatically rolls back on error. */ - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); #ifdef WITH_WSREP if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data); diff --git a/sql/mdl.cc b/sql/mdl.cc index 8d6780671d1..952648ce2d8 100644 --- a/sql/mdl.cc +++ b/sql/mdl.cc @@ -2845,6 +2845,9 @@ void MDL_context::rollback_to_savepoint(const MDL_savepoint &mdl_savepoint) void MDL_context::release_transactional_locks() { DBUG_ENTER("MDL_context::release_transactional_locks"); + /* Fail if there are active transactions */ + DBUG_ASSERT(!(current_thd->server_status & + (SERVER_STATUS_IN_TRANS | SERVER_STATUS_IN_TRANS_READONLY))); release_locks_stored_before(MDL_STATEMENT, NULL); release_locks_stored_before(MDL_TRANSACTION, NULL); DBUG_VOID_RETURN; diff --git a/sql/rpl_gtid.cc b/sql/rpl_gtid.cc index a8b39d6d15b..8408025d389 100644 --- a/sql/rpl_gtid.cc +++ b/sql/rpl_gtid.cc @@ -433,7 +433,7 @@ rpl_slave_state::truncate_state_table(THD *thd) close_thread_tables(thd); ha_commit_trans(thd, TRUE); } - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); } reenable_binlog(thd); @@ -864,7 +864,7 @@ end: } else { - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); #ifdef HAVE_REPLICATION rpl_group_info::pending_gtid_deletes_free(delete_list); #endif diff --git a/sql/rpl_injector.cc b/sql/rpl_injector.cc index 597a357e4e2..442d28fceb3 100644 --- a/sql/rpl_injector.cc +++ b/sql/rpl_injector.cc @@ -68,34 +68,34 @@ injector::transaction::~transaction() */ int injector::transaction::commit() { - DBUG_ENTER("injector::transaction::commit()"); - int error= m_thd->binlog_flush_pending_rows_event(true); - /* - Cluster replication does not preserve statement or - transaction boundaries of the master. Instead, a new - transaction on replication slave is started when a new GCI - (global checkpoint identifier) is issued, and is committed - when the last event of the check point has been received and - processed. This ensures consistency of each cluster in - cluster replication, and there is no requirement for stronger - consistency: MySQL replication is asynchronous with other - engines as well. - - A practical consequence of that is that row level replication - stream passed through the injector thread never contains - COMMIT events. - Here we should preserve the server invariant that there is no - outstanding statement transaction when the normal transaction - is committed by committing the statement transaction - explicitly. - */ - trans_commit_stmt(m_thd); - if (!trans_commit(m_thd)) - { - close_thread_tables(m_thd); - m_thd->mdl_context.release_transactional_locks(); - } - DBUG_RETURN(error); + DBUG_ENTER("injector::transaction::commit()"); + int error= m_thd->binlog_flush_pending_rows_event(true); + /* + Cluster replication does not preserve statement or + transaction boundaries of the master. Instead, a new + transaction on replication slave is started when a new GCI + (global checkpoint identifier) is issued, and is committed + when the last event of the check point has been received and + processed. This ensures consistency of each cluster in + cluster replication, and there is no requirement for stronger + consistency: MySQL replication is asynchronous with other + engines as well. + + A practical consequence of that is that row level replication + stream passed through the injector thread never contains + COMMIT events. + Here we should preserve the server invariant that there is no + outstanding statement transaction when the normal transaction + is committed by committing the statement transaction + explicitly. + */ + trans_commit_stmt(m_thd); + if (!trans_commit(m_thd)) + { + close_thread_tables(m_thd); + m_thd->release_transactional_locks(); + } + DBUG_RETURN(error); } diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index 7bc38d28961..5e6837c32c7 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -1998,7 +1998,7 @@ end: ha_commit_trans(thd, FALSE); ha_commit_trans(thd, TRUE); close_thread_tables(thd); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); } return err; @@ -2288,7 +2288,7 @@ void rpl_group_info::cleanup_context(THD *thd, bool error) if (unlikely(error)) { - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); if (thd == rli->sql_driver_thd) { @@ -2402,10 +2402,10 @@ void rpl_group_info::slave_close_thread_tables(THD *thd) if (thd->transaction_rollback_request) { trans_rollback_implicit(thd); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); } else if (! thd->in_multi_stmt_transaction_mode()) - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); else thd->mdl_context.release_statement_locks(); diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 59a304deb99..bd41de78c9a 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -2346,10 +2346,10 @@ sp_head::execute_procedure(THD *thd, List<Item> *args) if (thd->transaction_rollback_request) { trans_rollback_implicit(thd); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); } else if (! thd->in_multi_stmt_transaction_mode()) - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); else thd->mdl_context.release_statement_locks(); } @@ -3457,10 +3457,10 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp, if (thd->transaction_rollback_request) { trans_rollback_implicit(thd); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); } else if (! thd->in_multi_stmt_transaction_mode()) - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); else thd->mdl_context.release_statement_locks(); } diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index c4b8d41d04c..205ab7f5974 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -8938,6 +8938,8 @@ static bool show_global_privileges(THD *thd, ACL_USER_BASE *acl_entry, add_user_parameters(thd, &global, (ACL_USER *)acl_entry, (want_access & GRANT_ACL)); + else if (want_access & GRANT_ACL) + global.append(STRING_WITH_LEN(" WITH GRANT OPTION")); protocol->prepare_for_resend(); protocol->store(global.ptr(),global.length(),global.charset()); if (protocol->write()) diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc index 8e956eb0f8c..b5c8514c302 100644 --- a/sql/sql_admin.cc +++ b/sql/sql_admin.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2010, 2015, Oracle and/or its affiliates. - Copyright (c) 2011, 2019, MariaDB + Copyright (c) 2011, 2020, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -43,7 +43,7 @@ static bool admin_recreate_table(THD *thd, TABLE_LIST *table_list) trans_rollback_stmt(thd); trans_rollback(thd); close_thread_tables(thd); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); /* table_list->table has been closed and freed. Do not reference @@ -116,7 +116,7 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list, acquire the exclusive lock to satisfy MDL asserts and avoid deadlocks. */ - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); /* Attempt to do full-blown table open in mysql_admin_table() has failed. Let us try to open at least a .FRM for this table. @@ -285,7 +285,7 @@ end: } /* In case of a temporary table there will be no metadata lock. */ if (unlikely(error) && has_mdl_lock) - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); DBUG_RETURN(error); } @@ -563,7 +563,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, trans_rollback(thd); close_thread_tables(thd); table->table= NULL; - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); table->mdl_request.init(MDL_key::TABLE, table->db.str, table->table_name.str, MDL_SHARED_NO_READ_WRITE, MDL_TRANSACTION); } @@ -623,7 +623,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, trans_rollback_stmt(thd); trans_rollback(thd); close_thread_tables(thd); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); DBUG_PRINT("admin", ("simple error, admin next table")); continue; case -1: // error, message could be written to net @@ -690,7 +690,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, trans_commit_stmt(thd); trans_commit(thd); close_thread_tables(thd); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); lex->reset_query_tables_list(FALSE); /* Restore Query_tables_list::sql_command value to make statement @@ -823,7 +823,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, thd->open_options|= extra_open_options; close_thread_tables(thd); table->table= NULL; - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); table->mdl_request.init(MDL_key::TABLE, table->db.str, table->table_name.str, MDL_SHARED_NO_READ_WRITE, MDL_TRANSACTION); table->mdl_request.set_type(MDL_SHARED_READ); @@ -1055,7 +1055,7 @@ send_result_message: trans_commit_stmt(thd); trans_commit(thd); close_thread_tables(thd); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); /* Clear references to TABLE and MDL_ticket after releasing them. */ table->mdl_request.ticket= NULL; @@ -1208,7 +1208,7 @@ send_result_message: goto err; } close_thread_tables(thd); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); /* If it is CHECK TABLE v1, v2, v3, and v1, v2, v3 are views, we will run @@ -1246,7 +1246,7 @@ err: table->table= 0; } close_thread_tables(thd); // Shouldn't be needed - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); thd->resume_subsequent_commits(suspended_wfc); DBUG_RETURN(TRUE); } diff --git a/sql/sql_base.cc b/sql/sql_base.cc index ada1a7cd36a..f7d93bec0df 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -8975,7 +8975,7 @@ close_mysql_tables(THD *thd) if (! thd->in_sub_stmt) trans_commit_stmt(thd); close_thread_tables(thd); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); } /* diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 869ce72f861..a7057e3d5d4 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -4739,7 +4739,7 @@ void destroy_thd(MYSQL_THD thd) void reset_thd(MYSQL_THD thd) { close_thread_tables(thd); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); thd->free_items(); free_root(thd->mem_root, MYF(MY_KEEP_PREALLOC)); } diff --git a/sql/sql_class.h b/sql/sql_class.h index 537bfe037d4..33342b16aad 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -4469,6 +4469,13 @@ public: locked_tables_mode= mode_arg; } void leave_locked_tables_mode(); + /* Relesae transactional locks if there are no active transactions */ + void release_transactional_locks() + { + if (!(server_status & + (SERVER_STATUS_IN_TRANS | SERVER_STATUS_IN_TRANS_READONLY))) + mdl_context.release_transactional_locks(); + } int decide_logging_format(TABLE_LIST *tables); /* In Some cases when decide_logging_format is called it does not have all diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 11d3a13b8b9..eb840e02a27 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -1271,7 +1271,8 @@ bool pushdown_cond_for_derived(THD *thd, Item *cond, TABLE_LIST *derived) DBUG_RETURN(false); st_select_lex_unit *unit= derived->get_unit(); - st_select_lex *sl= unit->first_select(); + st_select_lex *first_sl= unit->first_select(); + st_select_lex *sl= first_sl; if (derived->prohibit_cond_pushdown) DBUG_RETURN(false); @@ -1419,7 +1420,24 @@ bool pushdown_cond_for_derived(THD *thd, Item *cond, TABLE_LIST *derived) if (!extracted_cond_copy) continue; } - + + /* + Rename the columns of all non-first selects of a union to be compatible + by names with the columns of the first select. It will allow to use copies + of the same expression pushed into having clauses of different selects. + */ + if (sl != first_sl) + { + DBUG_ASSERT(sl->item_list.elements == first_sl->item_list.elements); + List_iterator_fast<Item> it(sl->item_list); + List_iterator_fast<Item> nm_it(unit->types); + Item * item; + while((item= it++)) + { + item->share_name_with(nm_it++); + } + } + /* Transform the references to the 'derived' columns from the condition pushed into the having clause of sl to make them usable in the new context diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 34c4211070b..b203fffaa72 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -3050,7 +3050,7 @@ pthread_handler_t handle_delayed_insert(void *arg) if (thd->mdl_context.clone_ticket(&di->grl_protection) || thd->mdl_context.clone_ticket(&di->table_list.mdl_request)) { - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); di->handler_thread_initialized= TRUE; goto err; } @@ -3246,7 +3246,7 @@ pthread_handler_t handle_delayed_insert(void *arg) thd->set_killed(KILL_CONNECTION_HARD); // If error close_thread_tables(thd); // Free the table - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); mysql_cond_broadcast(&di->cond_client); // Safety mysql_mutex_lock(&LOCK_delayed_create); // Because of delayed_get_table @@ -4722,7 +4722,8 @@ bool select_create::send_eof() WSREP_ERROR("Appending table key for CTAS failed: %s, %d", (wsrep_thd_query(thd)) ? wsrep_thd_query(thd) : "void", rcode); - return true; + abort_result_set(); + DBUG_RETURN(true); } /* If commit fails, we should be able to reset the OK status. */ thd->get_stmt_da()->set_overwrite_status(TRUE); diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 43bfa274537..c51a6f24aab 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2058,7 +2058,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, locks. */ trans_rollback_implicit(thd); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); } thd->cleanup_after_query(); @@ -2123,7 +2123,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, ulonglong options= (ulonglong) (uchar) packet[0]; if (trans_commit_implicit(thd)) break; - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); if (check_global_access(thd,RELOAD_ACL)) break; general_log_print(thd, command, NullS); @@ -2156,7 +2156,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (trans_commit_implicit(thd)) break; close_thread_tables(thd); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); my_ok(thd); break; } @@ -2971,7 +2971,7 @@ err: /* Close tables and release metadata locks. */ close_thread_tables(thd); DBUG_ASSERT(!thd->locked_tables_mode); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); return TRUE; } @@ -3694,7 +3694,7 @@ mysql_execute_command(THD *thd) /* Commit the normal transaction if one is active. */ bool commit_failed= trans_commit_implicit(thd); /* Release metadata locks acquired in this transaction. */ - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); if (commit_failed) { WSREP_DEBUG("implicit commit failed, MDL released: %lld", @@ -4938,7 +4938,7 @@ mysql_execute_command(THD *thd) { res= trans_commit_implicit(thd); thd->locked_tables_list.unlock_locked_tables(thd); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); thd->variables.option_bits&= ~(OPTION_TABLE_LOCK); } if (thd->global_read_lock.is_acquired()) @@ -4952,7 +4952,7 @@ mysql_execute_command(THD *thd) res= trans_commit_implicit(thd); thd->locked_tables_list.unlock_locked_tables(thd); /* Release transactional metadata locks. */ - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); if (res) goto error; @@ -5533,7 +5533,7 @@ mysql_execute_command(THD *thd) DBUG_PRINT("info", ("Executing SQLCOM_BEGIN thd: %p", thd)); if (trans_begin(thd, lex->start_transaction_opt)) { - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); WSREP_DEBUG("BEGIN failed, MDL released: %lld", (longlong) thd->thread_id); goto error; @@ -5551,7 +5551,7 @@ mysql_execute_command(THD *thd) (thd->variables.completion_type == 2 && lex->tx_release != TVL_NO)); bool commit_failed= trans_commit(thd); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); if (commit_failed) { WSREP_DEBUG("COMMIT failed, MDL released: %lld", @@ -5602,7 +5602,7 @@ mysql_execute_command(THD *thd) (thd->variables.completion_type == 2 && lex->tx_release != TVL_NO)); bool rollback_failed= trans_rollback(thd); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); if (rollback_failed) { @@ -5915,7 +5915,6 @@ mysql_execute_command(THD *thd) case SQLCOM_XA_COMMIT: { bool commit_failed= trans_xa_commit(thd); - thd->mdl_context.release_transactional_locks(); if (commit_failed) { WSREP_DEBUG("XA commit failed, MDL released: %lld", @@ -5933,7 +5932,6 @@ mysql_execute_command(THD *thd) case SQLCOM_XA_ROLLBACK: { bool rollback_failed= trans_xa_rollback(thd); - thd->mdl_context.release_transactional_locks(); if (rollback_failed) { WSREP_DEBUG("XA rollback failed, MDL released: %lld", @@ -6161,7 +6159,7 @@ finish: */ THD_STAGE_INFO(thd, stage_rollback_implicit); trans_rollback_implicit(thd); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); } else if (stmt_causes_implicit_commit(thd, CF_IMPLICIT_COMMIT_END)) { @@ -6175,7 +6173,7 @@ finish: /* Commit the normal transaction if one is active. */ trans_commit_implicit(thd); thd->get_stmt_da()->set_overwrite_status(false); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); } } else if (! thd->in_sub_stmt && ! thd->in_multi_stmt_transaction_mode()) @@ -6190,7 +6188,7 @@ finish: - If in autocommit mode, or outside a transactional context, automatically release metadata locks of the current statement. */ - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); } else if (! thd->in_sub_stmt) { @@ -6214,7 +6212,7 @@ finish: { WSREP_DEBUG("Forcing release of transactional locks for thd: %lld", (longlong) thd->thread_id); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); } #endif /* WITH_WSREP */ diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 0c35b2dd1d8..b8c7eae638c 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -4247,7 +4247,7 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) if (thd->transaction_rollback_request) { trans_rollback_implicit(thd); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); } /* Preserve CHANGE MASTER attributes */ diff --git a/sql/sql_union.cc b/sql/sql_union.cc index e73e55bd600..5fe9aea91ac 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -2059,6 +2059,7 @@ bool st_select_lex::cleanup() delete join; join= 0; } + leaf_tables.empty(); for (SELECT_LEX_UNIT *lex_unit= first_inner_unit(); lex_unit ; lex_unit= lex_unit->next_unit()) { diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 60308e4413c..70c0a65ca5d 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -4037,7 +4037,7 @@ static bool fix_autocommit(sys_var *self, THD *thd, enum_var_type type) if (trans_commit_stmt(thd) || trans_commit(thd)) { thd->variables.option_bits&= ~OPTION_AUTOCOMMIT; - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); WSREP_DEBUG("autocommit, MDL TRX lock released: %lld", (longlong) thd->thread_id); return true; diff --git a/sql/transaction.cc b/sql/transaction.cc index e2c120ffebb..7f94d98179e 100644 --- a/sql/transaction.cc +++ b/sql/transaction.cc @@ -197,7 +197,7 @@ bool trans_begin(THD *thd, uint flags) Release transactional metadata locks only after the transaction has been committed. */ - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); // The RO/RW options are mutually exclusive. DBUG_ASSERT(!((flags & MYSQL_START_TRANS_OPT_READ_ONLY) && @@ -894,11 +894,13 @@ bool trans_xa_prepare(THD *thd) /** Commit and terminate the a XA transaction. + Transactional locks are released if transaction ended @param thd Current thread @retval FALSE Success @retval TRUE Failure + */ bool trans_xa_commit(THD *thd) @@ -989,6 +991,7 @@ bool trans_xa_commit(THD *thd) thd->transaction.xid_state.xa_state= XA_NOTR; trans_track_end_trx(thd); + thd->mdl_context.release_transactional_locks(); DBUG_RETURN(res); } @@ -996,6 +999,7 @@ bool trans_xa_commit(THD *thd) /** Roll back and terminate a XA transaction. + Transactional locks are released if transaction ended @param thd Current thread @@ -1046,6 +1050,7 @@ bool trans_xa_rollback(THD *thd) thd->transaction.xid_state.xa_state= XA_NOTR; trans_track_end_trx(thd); + thd->mdl_context.release_transactional_locks(); DBUG_RETURN(res); } diff --git a/sql/wsrep_applier.cc b/sql/wsrep_applier.cc index 4e19e15680e..bc9b68400f4 100644 --- a/sql/wsrep_applier.cc +++ b/sql/wsrep_applier.cc @@ -189,7 +189,7 @@ static wsrep_cb_status_t wsrep_apply_events(THD* thd, trans_rollback(thd); thd->locked_tables_list.unlock_locked_tables(thd); /* Release transactional metadata locks. */ - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); thd->wsrep_conflict_state= NO_CONFLICT; DBUG_RETURN(WSREP_CB_FAILURE); } @@ -371,7 +371,7 @@ wsrep_cb_status_t wsrep_commit_cb(void* const ctx, /* Cleanup */ wsrep_set_apply_format(thd, NULL); - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); thd->reset_query(); /* Mutex protected */ free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); thd->tx_isolation= (enum_tx_isolation) thd->variables.tx_isolation; diff --git a/sql/wsrep_thd.cc b/sql/wsrep_thd.cc index d00183abc80..d2c62e47edf 100644 --- a/sql/wsrep_thd.cc +++ b/sql/wsrep_thd.cc @@ -75,7 +75,7 @@ void wsrep_client_rollback(THD *thd) } /* Release transactional metadata locks. */ - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); /* release explicit MDL locks */ thd->mdl_context.release_explicit_locks(); @@ -212,7 +212,7 @@ void wsrep_replay_sp_transaction(THD* thd) thd->locked_tables_list.unlock_locked_tables(thd); thd->variables.option_bits&= ~(OPTION_TABLE_LOCK); } - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); mysql_mutex_unlock(&thd->LOCK_thd_data); THD *replay_thd= new THD(true); @@ -351,7 +351,7 @@ void wsrep_replay_transaction(THD *thd) thd->locked_tables_list.unlock_locked_tables(thd); thd->variables.option_bits&= ~(OPTION_TABLE_LOCK); } - thd->mdl_context.release_transactional_locks(); + thd->release_transactional_locks(); /* Replaying will call MYSQL_START_STATEMENT when handling BEGIN Query_log_event so end statement must be called before diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc index d6320e44dd8..8f5ad517eb9 100644 --- a/storage/innobase/dict/dict0dict.cc +++ b/storage/innobase/dict/dict0dict.cc @@ -2162,6 +2162,7 @@ dict_index_remove_from_cache_low( if (index->online_log) { ut_ad(index->online_status == ONLINE_INDEX_CREATION); row_log_free(index->online_log); + index->online_log = NULL; } /* Remove the index from the list of indexes of the table */ diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc index e10bc70260d..d8931d35224 100644 --- a/storage/innobase/dict/dict0stats.cc +++ b/storage/innobase/dict/dict0stats.cc @@ -1981,7 +1981,7 @@ static index_stats_t dict_stats_analyze_index(dict_index_t* index) since it will be faster and will give better results. */ if (root_level == 0 - || N_SAMPLE_PAGES(index) * n_uniq > index->stat_n_leaf_pages) { + || N_SAMPLE_PAGES(index) * n_uniq > result.n_leaf_pages) { if (root_level == 0) { DEBUG_PRINTF(" %s(): just one page," diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index 672f322f609..008f3b7d3fc 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -5810,6 +5810,7 @@ error_handling_drop_uncached: if (ok && a == 1) { row_log_free( index->online_log); + index->online_log = NULL; ok = false; }); @@ -7273,6 +7274,7 @@ innobase_online_rebuild_log_free( == ONLINE_INDEX_CREATION); clust_index->online_status = ONLINE_INDEX_COMPLETE; row_log_free(clust_index->online_log); + clust_index->online_log = NULL; DEBUG_SYNC_C("innodb_online_rebuild_log_free_aborted"); } diff --git a/storage/innobase/include/row0log.h b/storage/innobase/include/row0log.h index fac1b950e2e..232019aee9f 100644 --- a/storage/innobase/include/row0log.h +++ b/storage/innobase/include/row0log.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2020, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -69,7 +69,7 @@ Free the row log for an index that was being created online. */ void row_log_free( /*=========*/ - row_log_t*& log) /*!< in,own: row log */ + row_log_t* log) /*!< in,own: row log */ MY_ATTRIBUTE((nonnull)); /******************************************************//** diff --git a/storage/innobase/include/row0log.ic b/storage/innobase/include/row0log.ic index ba7eb7b025c..44d17bbcdf1 100644 --- a/storage/innobase/include/row0log.ic +++ b/storage/innobase/include/row0log.ic @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2011, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2020, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -38,6 +39,7 @@ row_log_abort_sec( ut_ad(!dict_index_is_clust(index)); dict_index_set_online_status(index, ONLINE_INDEX_ABORTED); row_log_free(index->online_log); + index->online_log = NULL; } /******************************************************//** diff --git a/storage/innobase/mtr/mtr0mtr.cc b/storage/innobase/mtr/mtr0mtr.cc index 2f22e8e0c16..2e364ba4945 100644 --- a/storage/innobase/mtr/mtr0mtr.cc +++ b/storage/innobase/mtr/mtr0mtr.cc @@ -736,7 +736,7 @@ inline lsn_t mtr_t::finish_write(ulint len) return start_lsn; } -/** Find out whether a block was X-latched by the mini-transaction */ +/** Find out whether a block was not X-latched by the mini-transaction */ struct FindBlockX { const buf_block_t █ @@ -746,7 +746,7 @@ struct FindBlockX /** @return whether the block was not found x-latched */ bool operator()(const mtr_memo_slot_t *slot) const { - return slot->object != &block || slot->type == MTR_MEMO_PAGE_X_FIX; + return slot->object != &block || slot->type != MTR_MEMO_PAGE_X_FIX; } }; diff --git a/storage/innobase/row/row0log.cc b/storage/innobase/row/row0log.cc index a3bf91a1c74..728902165b1 100644 --- a/storage/innobase/row/row0log.cc +++ b/storage/innobase/row/row0log.cc @@ -3235,7 +3235,6 @@ row_log_allocate( } dict_index_set_online_status(index, ONLINE_INDEX_CREATION); - index->online_log = log; if (log_tmp_is_encrypted()) { ulint size = srv_sort_buf_size; @@ -3248,6 +3247,7 @@ row_log_allocate( } } + index->online_log = log; /* While we might be holding an exclusive data dictionary lock here, in row_log_abort_sec() we will not always be holding it. Use atomic operations in both cases. */ @@ -3261,7 +3261,7 @@ Free the row log for an index that was being created online. */ void row_log_free( /*=========*/ - row_log_t*& log) /*!< in,own: row log */ + row_log_t* log) /*!< in,own: row log */ { MONITOR_ATOMIC_DEC(MONITOR_ONLINE_CREATE_INDEX); @@ -3281,7 +3281,6 @@ row_log_free( mutex_free(&log->mutex); ut_free(log); - log = NULL; } /******************************************************//** diff --git a/storage/oqgraph/CMakeLists.txt b/storage/oqgraph/CMakeLists.txt index 638ac00dc01..b36f965d725 100644 --- a/storage/oqgraph/CMakeLists.txt +++ b/storage/oqgraph/CMakeLists.txt @@ -39,6 +39,10 @@ int main() { return 0; } ENDIF() ENDFUNCTION() +IF(PLUGIN_OQGRAPH STREQUAL "NO") + RETURN() +ENDIF() + IF(NOT DEFINED OQGRAPH_OK) CHECK_OQGRAPH() IF (NOT OQGRAPH_OK) diff --git a/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test b/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test index e5de6246f60..68fe02bbd86 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/checkpoint.test @@ -87,7 +87,7 @@ let $checkpoint = $MYSQL_TMP_DIR/already-existing-directory; --mkdir $checkpoint let $succeeds = 0; --source set_checkpoint.inc ---exec rm -rf $checkpoint +rmdir $checkpoint; --disable_result_log truncate table t1; |