summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2022-03-30 09:34:07 +0300
committerMarko Mäkelä <marko.makela@mariadb.com>2022-03-30 09:34:07 +0300
commit5c69e936308b9b636d3e58aff624d2716f289fbd (patch)
treec04c8a4d2a160977b027d0c1b3acc44a70f53301
parent88ce8a3d8be0346b325bc4da75894cd15e255857 (diff)
parenta4d753758fd5305853ba339a0cd57d1675d5aa8c (diff)
downloadmariadb-git-5c69e936308b9b636d3e58aff624d2716f289fbd.tar.gz
Merge 10.7 into 10.8
-rw-r--r--CREDITS2
-rw-r--r--client/mysqlbinlog.cc1
-rw-r--r--client/mysqltest.cc455
-rw-r--r--cmake/submodules.cmake35
-rwxr-xr-xdebian/autobake-deb.sh7
-rw-r--r--debian/mariadb-plugin-columnstore.install119
-rw-r--r--debian/mariadb-plugin-columnstore.postinst8
-rw-r--r--debian/mariadb-plugin-columnstore.postrm24
-rw-r--r--debian/mariadb-plugin-columnstore.prerm8
-rw-r--r--debian/mariadb-plugin-columnstore.triggers1
-rw-r--r--extra/innochecksum.cc2
-rw-r--r--include/my_atomic.h18
-rw-r--r--mysql-test/main/contributors.result2
-rw-r--r--mysql-test/main/create_or_replace.result45
-rw-r--r--mysql-test/main/create_or_replace.test47
-rw-r--r--mysql-test/main/ctype_utf32.result25
-rw-r--r--mysql-test/main/ctype_utf32.test19
-rw-r--r--mysql-test/main/multi_update_innodb.result15
-rw-r--r--mysql-test/main/multi_update_innodb.test19
-rw-r--r--mysql-test/main/opt_tvc.result27
-rw-r--r--mysql-test/main/opt_tvc.test26
-rw-r--r--mysql-test/main/processlist.result20
-rw-r--r--mysql-test/main/processlist.test35
-rw-r--r--mysql-test/main/sp-cursor.result63
-rw-r--r--mysql-test/main/sp-cursor.test56
-rw-r--r--mysql-test/main/view.result28
-rw-r--r--mysql-test/main/view.test26
-rw-r--r--mysql-test/suite/binlog/r/binlog_autocommit_off_no_hang.result6
-rw-r--r--mysql-test/suite/binlog/r/binlog_mysqlbinlog_raw_flush.result7
-rw-r--r--mysql-test/suite/binlog/t/binlog_autocommit_off_no_hang-master.opt1
-rw-r--r--mysql-test/suite/binlog/t/binlog_autocommit_off_no_hang.test45
-rw-r--r--mysql-test/suite/binlog/t/binlog_mysqlbinlog_raw_flush.test45
-rw-r--r--mysql-test/suite/compat/oracle/r/sp-package.result308
-rw-r--r--mysql-test/suite/compat/oracle/t/sp-package.test327
-rw-r--r--mysql-test/suite/galera/disabled.def1
-rw-r--r--mysql-test/suite/galera/include/kill_galera.inc2
-rw-r--r--mysql-test/suite/galera/r/MDEV-24143.result23
-rw-r--r--mysql-test/suite/galera/r/MDEV-27713.result46
-rw-r--r--mysql-test/suite/galera/r/galera_bf_abort_ps_bind.result37
-rw-r--r--mysql-test/suite/galera/r/galera_ist_restart_joiner.result1
-rw-r--r--mysql-test/suite/galera/t/MDEV-24143.test20
-rw-r--r--mysql-test/suite/galera/t/MDEV-27713.test67
-rw-r--r--mysql-test/suite/galera/t/MW-44.test6
-rw-r--r--mysql-test/suite/galera/t/galera_bf_abort_ps_bind.cnf7
-rw-r--r--mysql-test/suite/galera/t/galera_bf_abort_ps_bind.test58
-rw-r--r--mysql-test/suite/galera/t/galera_ist_restart_joiner.test14
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_garbd_backup.result41
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_garbd_backup.cnf13
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_garbd_backup.test134
-rw-r--r--mysql-test/suite/galera_sr/disabled.def1
-rw-r--r--mysql-test/suite/galera_sr/r/MDEV-27553.result25
-rw-r--r--mysql-test/suite/galera_sr/t/MDEV-27553.test65
-rw-r--r--mysql-test/suite/innodb/r/row_format_redundant.result23
-rw-r--r--mysql-test/suite/innodb/t/alter_crash_rebuild.test26
-rw-r--r--mysql-test/suite/innodb/t/row_format_redundant.test27
-rw-r--r--mysql-test/suite/innodb_zip/r/innochecksum.result3
-rw-r--r--mysql-test/suite/innodb_zip/r/innochecksum_2.result3
-rw-r--r--mysql-test/suite/innodb_zip/r/innochecksum_3.result1
-rw-r--r--mysql-test/suite/innodb_zip/t/innochecksum.test8
-rw-r--r--mysql-test/suite/rpl/r/mdev_24667.result30
-rw-r--r--mysql-test/suite/rpl/t/mdev_24667.cnf8
-rw-r--r--mysql-test/suite/rpl/t/mdev_24667.test56
-rw-r--r--mysys/my_rename.c9
-rw-r--r--plugin/server_audit/server_audit.c6
-rw-r--r--scripts/CMakeLists.txt1
-rw-r--r--scripts/mysql_install_db.sh2
-rw-r--r--scripts/mysql_system_tables_fix.sql9
-rw-r--r--scripts/wsrep_sst_backup.sh112
-rw-r--r--sql/contributors.h2
-rw-r--r--sql/handler.h4
-rw-r--r--sql/item.cc22
-rw-r--r--sql/item_cmpfunc.cc7
-rw-r--r--sql/item_cmpfunc.h2
-rw-r--r--sql/log.cc2
-rw-r--r--sql/rpl_rli.cc2
-rw-r--r--sql/semisync_master.cc1
-rw-r--r--sql/sp_head.cc1
-rw-r--r--sql/sql_class.cc1
-rw-r--r--sql/sql_class.h26
-rw-r--r--sql/sql_lex.cc87
-rw-r--r--sql/sql_lex.h9
-rw-r--r--sql/sql_parse.cc22
-rw-r--r--sql/sql_prepare.cc12
-rw-r--r--sql/sql_show.cc12
-rw-r--r--sql/sql_table.cc49
-rw-r--r--sql/sql_tvc.cc31
-rw-r--r--sql/sql_update.cc5
-rw-r--r--sql/sql_view.cc3
-rw-r--r--sql/sql_yacc.yy50
-rw-r--r--sql/table.cc22
-rw-r--r--sql/unireg.cc11
-rw-r--r--sql/wsrep_client_service.cc1
-rw-r--r--sql/wsrep_high_priority_service.cc1
-rw-r--r--sql/wsrep_mysqld.cc87
-rw-r--r--sql/wsrep_mysqld.h1
-rw-r--r--sql/wsrep_sst.cc81
-rw-r--r--storage/innobase/btr/btr0btr.cc24
-rw-r--r--storage/innobase/buf/buf0buf.cc58
-rw-r--r--storage/innobase/buf/buf0flu.cc10
-rw-r--r--storage/innobase/dict/dict0crea.cc62
-rw-r--r--storage/innobase/dict/dict0load.cc652
-rw-r--r--storage/innobase/fil/fil0fil.cc40
-rw-r--r--storage/innobase/fts/fts0fts.cc4
-rw-r--r--storage/innobase/handler/ha_innodb.cc7
-rw-r--r--storage/innobase/handler/handler0alter.cc4
-rw-r--r--storage/innobase/handler/i_s.cc17
-rw-r--r--storage/innobase/include/dict0load.h7
-rw-r--r--storage/innobase/include/fil0fil.h7
-rw-r--r--storage/innobase/include/log0log.h8
-rw-r--r--storage/innobase/include/mtr0mtr.h9
-rw-r--r--storage/innobase/include/rem0rec.h8
-rw-r--r--storage/innobase/log/log0log.cc18
-rw-r--r--storage/innobase/mtr/mtr0mtr.cc59
-rw-r--r--storage/innobase/rem/rem0rec.cc11
-rw-r--r--storage/innobase/row/row0import.cc2
-rw-r--r--storage/innobase/row/row0mysql.cc2
-rw-r--r--storage/innobase/row/row0row.cc16
-rw-r--r--storage/innobase/row/row0sel.cc43
-rw-r--r--storage/innobase/srv/srv0start.cc4
-rw-r--r--storage/maria/ma_open.c4
-rw-r--r--storage/perfschema/unittest/stub_pfs_global.h14
-rw-r--r--strings/decimal.c32
-rw-r--r--tpool/aio_liburing.cc3
-rw-r--r--tpool/aio_linux.cc3
-rw-r--r--tpool/tpool.h12
-rw-r--r--tpool/tpool_generic.cc15
126 files changed, 3585 insertions, 881 deletions
diff --git a/CREDITS b/CREDITS
index f5e87e18752..35092602ccf 100644
--- a/CREDITS
+++ b/CREDITS
@@ -4,9 +4,11 @@ organization registered in the USA.
The current main sponsors of the MariaDB Foundation are:
Alibaba Cloud https://www.alibabacloud.com/ (2017)
+Intel https://www.intel.com (2022)
MariaDB Corporation https://www.mariadb.com (2013)
Microsoft https://microsoft.com/ (2017)
ServiceNow https://servicenow.com (2019)
+SIT https://sit.org (2022)
Tencent Cloud https://cloud.tencent.com (2017)
Development Bank of Singapore https://dbs.com (2016)
IBM https://www.ibm.com (2017)
diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc
index 600810d7dab..21980ca03d6 100644
--- a/client/mysqlbinlog.cc
+++ b/client/mysqlbinlog.cc
@@ -2893,6 +2893,7 @@ static Exit_status handle_event_raw_mode(PRINT_EVENT_INFO *print_event_info,
error("Could not write into log file '%s'", out_file_name);
DBUG_RETURN(ERROR_STOP);
}
+ fflush(result_file);
DBUG_RETURN(OK_CONTINUE);
}
diff --git a/client/mysqltest.cc b/client/mysqltest.cc
index 92a24154cbc..e411ff49df4 100644
--- a/client/mysqltest.cc
+++ b/client/mysqltest.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
- Copyright (c) 2009, 2021, MariaDB
+ Copyright (c) 2009, 2022, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -319,6 +319,7 @@ struct st_connection
char *name;
size_t name_len;
MYSQL_STMT* stmt;
+ MYSQL_BIND *ps_params;
/* Set after send to disallow other queries before reap */
my_bool pending;
@@ -393,6 +394,10 @@ enum enum_commands {
Q_ENABLE_PREPARE_WARNINGS, Q_DISABLE_PREPARE_WARNINGS,
Q_RESET_CONNECTION,
Q_OPTIMIZER_TRACE,
+ Q_PS_PREPARE,
+ Q_PS_BIND,
+ Q_PS_EXECUTE,
+ Q_PS_CLOSE,
Q_UNKNOWN, /* Unknown command. */
Q_COMMENT, /* Comments, ignored. */
Q_COMMENT_WITH_COMMAND,
@@ -506,6 +511,10 @@ const char *command_names[]=
"disable_prepare_warnings",
"reset_connection",
"optimizer_trace",
+ "PS_prepare",
+ "PS_bind",
+ "PS_execute",
+ "PS_close",
0
};
@@ -7958,6 +7967,15 @@ static void handle_no_active_connection(struct st_command *command,
var_set_errno(2006);
}
+/* handler functions to execute prepared statement calls in client C API */
+void run_prepare_stmt(struct st_connection *cn, struct st_command *command, const char *query,
+ size_t query_len, DYNAMIC_STRING *ds, DYNAMIC_STRING *ds_warnings);
+void run_bind_stmt(struct st_connection *cn, struct st_command *command, const char *query,
+ size_t query_len, DYNAMIC_STRING *ds, DYNAMIC_STRING *ds_warnings);
+void run_execute_stmt(struct st_connection *cn, struct st_command *command, const char *query,
+ size_t query_len, DYNAMIC_STRING *ds, DYNAMIC_STRING *ds_warnings);
+void run_close_stmt(struct st_connection *cn, struct st_command *command, const char *query,
+ size_t query_len, DYNAMIC_STRING *ds, DYNAMIC_STRING *ds_warnings);
/*
Run query using MySQL C API
@@ -7989,6 +8007,32 @@ void run_query_normal(struct st_connection *cn, struct st_command *command,
DBUG_VOID_RETURN;
}
+ /* handle prepared statement commands */
+ switch (command->type) {
+ case Q_PS_PREPARE:
+ run_prepare_stmt(cn, command, query, query_len, ds, ds_warnings);
+ flags &= ~QUERY_SEND_FLAG;
+ goto end;
+ break;
+ case Q_PS_BIND:
+ run_bind_stmt(cn, command, query, query_len, ds, ds_warnings);
+ flags &= ~QUERY_SEND_FLAG;
+ goto end;
+ break;
+ case Q_PS_EXECUTE:
+ run_execute_stmt(cn, command, query, query_len, ds, ds_warnings);
+ flags &= ~QUERY_SEND_FLAG;
+ goto end;
+ break;
+ case Q_PS_CLOSE:
+ run_close_stmt(cn, command, query, query_len, ds, ds_warnings);
+ flags &= ~QUERY_SEND_FLAG;
+ goto end;
+ break;
+ default: /* not a prepared statement command */
+ break;
+ }
+
if (flags & QUERY_SEND_FLAG)
{
/*
@@ -8562,6 +8606,411 @@ end:
DBUG_VOID_RETURN;
}
+/*
+ prepare query using prepared statement C API
+
+ SYNPOSIS
+ run_prepare_stmt
+ mysql - mysql handle
+ command - current command pointer
+ query - query string to execute
+ query_len - length query string to execute
+ ds - output buffer where to store result form query
+
+ RETURN VALUE
+ error - function will not return
+*/
+
+void run_prepare_stmt(struct st_connection *cn, struct st_command *command, const char *query, size_t query_len, DYNAMIC_STRING *ds, DYNAMIC_STRING *ds_warnings)
+{
+
+ MYSQL *mysql= cn->mysql;
+ MYSQL_STMT *stmt;
+ DYNAMIC_STRING ds_prepare_warnings;
+ DBUG_ENTER("run_prepare_stmt");
+ DBUG_PRINT("query", ("'%-.60s'", query));
+
+ /*
+ Init a new stmt if it's not already one created for this connection
+ */
+ if(!(stmt= cn->stmt))
+ {
+ if (!(stmt= mysql_stmt_init(mysql)))
+ die("unable to init stmt structure");
+ cn->stmt= stmt;
+ }
+
+ /* Init dynamic strings for warnings */
+ if (!disable_warnings)
+ {
+ init_dynamic_string(&ds_prepare_warnings, NULL, 0, 256);
+ }
+
+ /*
+ Prepare the query
+ */
+ char* PS_query= command->first_argument;
+ size_t PS_query_len= command->end - command->first_argument;
+ if (do_stmt_prepare(cn, PS_query, PS_query_len))
+ {
+ handle_error(command, mysql_stmt_errno(stmt),
+ mysql_stmt_error(stmt), mysql_stmt_sqlstate(stmt), ds);
+ goto end;
+ }
+
+ /*
+ Get the warnings from mysql_stmt_prepare and keep them in a
+ separate string
+ */
+ if (!disable_warnings)
+ append_warnings(&ds_prepare_warnings, mysql);
+ end:
+ DBUG_VOID_RETURN;
+}
+
+/*
+ bind parameters for a prepared statement C API
+
+ SYNPOSIS
+ run_bind_stmt
+ mysql - mysql handle
+ command - current command pointer
+ query - query string to execute
+ query_len - length query string to execute
+ ds - output buffer where to store result form query
+
+ RETURN VALUE
+ error - function will not return
+*/
+
+void run_bind_stmt(struct st_connection *cn, struct st_command *command,
+ const char *query, size_t query_len, DYNAMIC_STRING *ds,
+ DYNAMIC_STRING *ds_warnings
+ )
+{
+ MYSQL_STMT *stmt= cn->stmt;
+ DBUG_ENTER("run_bind_stmt");
+ DBUG_PRINT("query", ("'%-.60s'", query));
+ MYSQL_BIND *ps_params= cn->ps_params;
+ if (ps_params)
+ {
+ for (size_t i=0; i<stmt->param_count; i++)
+ {
+ my_free(ps_params[i].buffer);
+ ps_params[i].buffer= NULL;
+ }
+ my_free(ps_params);
+ ps_params= NULL;
+ }
+
+ /* Init PS-parameters. */
+ cn->ps_params= ps_params = (MYSQL_BIND*)my_malloc(PSI_NOT_INSTRUMENTED,
+ sizeof(MYSQL_BIND) *
+ stmt->param_count,
+ MYF(MY_WME));
+ bzero((char *) ps_params, sizeof(MYSQL_BIND) * stmt->param_count);
+
+ int i=0;
+ char *c;
+ long *l;
+ double *d;
+
+ char *p= strtok((char*)command->first_argument, " ");
+ while (p != nullptr)
+ {
+ (void)strtol(p, &c, 10);
+ if (!*c)
+ {
+ ps_params[i].buffer_type= MYSQL_TYPE_LONG;
+ l= (long*)my_malloc(PSI_NOT_INSTRUMENTED, sizeof(long), MYF(MY_WME));
+ *l= strtol(p, &c, 10);
+ ps_params[i].buffer= (void*)l;
+ ps_params[i].buffer_length= 8;
+ }
+ else
+ {
+ (void)strtod(p, &c);
+ if (!*c)
+ {
+ ps_params[i].buffer_type= MYSQL_TYPE_DECIMAL;
+ d= (double*)my_malloc(PSI_NOT_INSTRUMENTED, sizeof(double),
+ MYF(MY_WME));
+ *d= strtod(p, &c);
+ ps_params[i].buffer= (void*)d;
+ ps_params[i].buffer_length= 8;
+ }
+ else
+ {
+ ps_params[i].buffer_type= MYSQL_TYPE_STRING;
+ ps_params[i].buffer= strdup(p);
+ ps_params[i].buffer_length= (unsigned long)strlen(p);
+ }
+ }
+
+ p= strtok(nullptr, " ");
+ i++;
+ }
+
+ int rc= mysql_stmt_bind_param(stmt, ps_params);
+ if (rc)
+ {
+ die("mysql_stmt_bind_param() failed': %d %s",
+ mysql_stmt_errno(stmt), mysql_stmt_error(stmt));
+ }
+
+ DBUG_VOID_RETURN;
+}
+
+/*
+ execute query using prepared statement C API
+
+ SYNPOSIS
+ run_axecute_stmt
+ mysql - mysql handle
+ command - current command pointer
+ query - query string to execute
+ query_len - length query string to execute
+ ds - output buffer where to store result form query
+
+ RETURN VALUE
+ error - function will not return
+*/
+
+void run_execute_stmt(struct st_connection *cn, struct st_command *command,
+ const char *query, size_t query_len, DYNAMIC_STRING *ds,
+ DYNAMIC_STRING *ds_warnings
+ )
+{
+ MYSQL_RES *res= NULL; /* Note that here 'res' is meta data result set */
+ MYSQL *mysql= cn->mysql;
+ MYSQL_STMT *stmt= cn->stmt;
+ DYNAMIC_STRING ds_execute_warnings;
+ DBUG_ENTER("run_execute_stmt");
+ DBUG_PRINT("query", ("'%-.60s'", query));
+
+ /* Init dynamic strings for warnings */
+ if (!disable_warnings)
+ {
+ init_dynamic_string(&ds_execute_warnings, NULL, 0, 256);
+ }
+
+#if MYSQL_VERSION_ID >= 50000
+ if (cursor_protocol_enabled)
+ {
+ /*
+ Use cursor when retrieving result
+ */
+ ulong type= CURSOR_TYPE_READ_ONLY;
+ if (mysql_stmt_attr_set(stmt, STMT_ATTR_CURSOR_TYPE, (void*) &type))
+ die("mysql_stmt_attr_set(STMT_ATTR_CURSOR_TYPE) failed': %d %s",
+ mysql_stmt_errno(stmt), mysql_stmt_error(stmt));
+ }
+#endif
+
+ /*
+ Execute the query
+ */
+ if (do_stmt_execute(cn))
+ {
+ handle_error(command, mysql_stmt_errno(stmt),
+ mysql_stmt_error(stmt), mysql_stmt_sqlstate(stmt), ds);
+ goto end;
+ }
+
+ /*
+ When running in cursor_protocol get the warnings from execute here
+ and keep them in a separate string for later.
+ */
+ if (cursor_protocol_enabled && !disable_warnings)
+ append_warnings(&ds_execute_warnings, mysql);
+
+ /*
+ We instruct that we want to update the "max_length" field in
+ mysql_stmt_store_result(), this is our only way to know how much
+ buffer to allocate for result data
+ */
+ {
+ my_bool one= 1;
+ if (mysql_stmt_attr_set(stmt, STMT_ATTR_UPDATE_MAX_LENGTH, (void*) &one))
+ die("mysql_stmt_attr_set(STMT_ATTR_UPDATE_MAX_LENGTH) failed': %d %s",
+ mysql_stmt_errno(stmt), mysql_stmt_error(stmt));
+ }
+
+ /*
+ If we got here the statement succeeded and was expected to do so,
+ get data. Note that this can still give errors found during execution!
+ Store the result of the query if if will return any fields
+ */
+ if (mysql_stmt_field_count(stmt) && mysql_stmt_store_result(stmt))
+ {
+ handle_error(command, mysql_stmt_errno(stmt),
+ mysql_stmt_error(stmt), mysql_stmt_sqlstate(stmt), ds);
+ goto end;
+ }
+
+ /* If we got here the statement was both executed and read successfully */
+ handle_no_error(command);
+ if (!disable_result_log)
+ {
+ /*
+ Not all statements creates a result set. If there is one we can
+ now create another normal result set that contains the meta
+ data. This set can be handled almost like any other non prepared
+ statement result set.
+ */
+ if ((res= mysql_stmt_result_metadata(stmt)) != NULL)
+ {
+ /* Take the column count from meta info */
+ MYSQL_FIELD *fields= mysql_fetch_fields(res);
+ uint num_fields= mysql_num_fields(res);
+
+ if (display_metadata)
+ append_metadata(ds, fields, num_fields);
+
+ if (!display_result_vertically)
+ append_table_headings(ds, fields, num_fields);
+
+ append_stmt_result(ds, stmt, fields, num_fields);
+
+ mysql_free_result(res); /* Free normal result set with meta data */
+
+ /*
+ Normally, if there is a result set, we do not show warnings from the
+ prepare phase. This is because some warnings are generated both during
+ prepare and execute; this would generate different warning output
+ between normal and ps-protocol test runs.
+
+ The --enable_prepare_warnings command can be used to change this so
+ that warnings from both the prepare and execute phase are shown.
+ */
+ }
+ else
+ {
+ /*
+ This is a query without resultset
+ */
+ }
+
+ /*
+ Fetch info before fetching warnings, since it will be reset
+ otherwise.
+ */
+ if (!disable_info)
+ append_info(ds, mysql_stmt_affected_rows(stmt), mysql_info(mysql));
+
+ if (display_session_track_info)
+ append_session_track_info(ds, mysql);
+
+
+ if (!disable_warnings)
+ {
+ /* Get the warnings from execute */
+
+ /* Append warnings to ds - if there are any */
+ if (append_warnings(&ds_execute_warnings, mysql) ||
+ ds_execute_warnings.length ||
+ ds_warnings->length)
+ {
+ dynstr_append_mem(ds, "Warnings:\n", 10);
+ if (ds_warnings->length)
+ dynstr_append_mem(ds, ds_warnings->str,
+ ds_warnings->length);
+ if (ds_execute_warnings.length)
+ dynstr_append_mem(ds, ds_execute_warnings.str,
+ ds_execute_warnings.length);
+ }
+ }
+ }
+
+end:
+ if (!disable_warnings)
+ {
+ dynstr_free(&ds_execute_warnings);
+ }
+
+ /*
+ We save the return code (mysql_stmt_errno(stmt)) from the last call sent
+ to the server into the mysqltest builtin variable $mysql_errno. This
+ variable then can be used from the test case itself.
+ */
+
+ var_set_errno(mysql_stmt_errno(stmt));
+
+ revert_properties();
+
+ /* Close the statement if reconnect, need new prepare */
+ {
+#ifndef EMBEDDED_LIBRARY
+ my_bool reconnect;
+ mysql_get_option(mysql, MYSQL_OPT_RECONNECT, &reconnect);
+ if (reconnect)
+#else
+ if (mysql->reconnect)
+#endif
+ {
+ if (cn->ps_params)
+ {
+ for (size_t i=0; i<stmt->param_count; i++)
+ {
+ my_free(cn->ps_params[i].buffer);
+ cn->ps_params[i].buffer= NULL;
+ }
+ my_free(cn->ps_params);
+ }
+ mysql_stmt_close(stmt);
+ cn->stmt= NULL;
+ cn->ps_params= NULL;
+ }
+ }
+ DBUG_VOID_RETURN;
+}
+
+/*
+ close a prepared statement C API
+
+ SYNPOSIS
+ run_close_stmt
+ mysql - mysql handle
+ command - current command pointer
+ query - query string to execute
+ query_len - length query string to execute
+ ds - output buffer where to store result form query
+
+ RETURN VALUE
+ error - function will not return
+*/
+
+void run_close_stmt(struct st_connection *cn, struct st_command *command,
+ const char *query, size_t query_len, DYNAMIC_STRING *ds,
+ DYNAMIC_STRING *ds_warnings
+ )
+{
+ MYSQL_STMT *stmt= cn->stmt;
+ DBUG_ENTER("run_close_stmt");
+ DBUG_PRINT("query", ("'%-.60s'", query));
+
+ if (cn->ps_params)
+ {
+
+ for (size_t i=0; i<stmt->param_count; i++)
+ {
+ my_free(cn->ps_params[i].buffer);
+ cn->ps_params[i].buffer= NULL;
+ }
+ my_free(cn->ps_params);
+ }
+
+ /* Close the statement */
+ if (stmt)
+ {
+ mysql_stmt_close(stmt);
+ cn->stmt= NULL;
+ }
+ cn->ps_params= NULL;
+
+ DBUG_VOID_RETURN;
+}
+
/*
@@ -9622,6 +10071,10 @@ int main(int argc, char **argv)
/* fall through */
case Q_QUERY:
case Q_REAP:
+ case Q_PS_PREPARE:
+ case Q_PS_BIND:
+ case Q_PS_EXECUTE:
+ case Q_PS_CLOSE:
{
my_bool old_display_result_vertically= display_result_vertically;
/* Default is full query, both reap and send */
diff --git a/cmake/submodules.cmake b/cmake/submodules.cmake
index 91f9f9e487a..34dcfbea77a 100644
--- a/cmake/submodules.cmake
+++ b/cmake/submodules.cmake
@@ -17,20 +17,29 @@ IF(GIT_EXECUTABLE AND EXISTS "${CMAKE_SOURCE_DIR}/.git")
${GIT_EXECUTABLE} config cmake.update-submodules yes")
ELSEIF(git_config_get_result EQUAL 128)
SET(update_result 0)
- ELSEIF (cmake_update_submodules MATCHES force)
- MESSAGE(STATUS "Updating submodules (forced)")
- EXECUTE_PROCESS(COMMAND "${GIT_EXECUTABLE}" submodule update --init --force --recursive --depth=1
- WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
- RESULT_VARIABLE update_result)
- ELSEIF (cmake_update_submodules MATCHES yes)
- EXECUTE_PROCESS(COMMAND "${GIT_EXECUTABLE}" submodule update --init --recursive --depth=1
- WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
- RESULT_VARIABLE update_result)
ELSE()
- MESSAGE(STATUS "Updating submodules")
- EXECUTE_PROCESS(COMMAND "${GIT_EXECUTABLE}" submodule update --init --recursive --depth=1
- WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
- RESULT_VARIABLE update_result)
+ SET(UPDATE_SUBMODULES_COMMAND
+ "${GIT_EXECUTABLE}" submodule update --init --recursive)
+ # Old Git may not work with "--depth 1".
+ # See also: https://github.com/git/git/commit/fb43e31f2b43076e7a30c9cd00d0241cb8cf97eb
+ IF(NOT GIT_VERSION_STRING VERSION_LESS "2.8.0")
+ SET(UPDATE_SUBMODULES_COMMAND ${UPDATE_SUBMODULES_COMMAND} --depth 1)
+ ENDIF()
+ IF(cmake_update_submodules MATCHES force)
+ MESSAGE(STATUS "Updating submodules (forced)")
+ EXECUTE_PROCESS(COMMAND ${UPDATE_SUBMODULES_COMMAND} --force
+ WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
+ RESULT_VARIABLE update_result)
+ ELSEIF(cmake_update_submodules MATCHES yes)
+ EXECUTE_PROCESS(COMMAND ${UPDATE_SUBMODULES_COMMAND}
+ WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
+ RESULT_VARIABLE update_result)
+ ELSE()
+ MESSAGE(STATUS "Updating submodules")
+ EXECUTE_PROCESS(COMMAND ${UPDATE_SUBMODULES_COMMAND}
+ WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
+ RESULT_VARIABLE update_result)
+ ENDIF()
ENDIF()
ENDIF()
diff --git a/debian/autobake-deb.sh b/debian/autobake-deb.sh
index 706f4655570..60532dd8e0b 100755
--- a/debian/autobake-deb.sh
+++ b/debian/autobake-deb.sh
@@ -151,13 +151,6 @@ dch -b -D "${CODENAME}" -v "${VERSION}" "Automatic build with ${LOGSTRING}." --c
echo "Creating package version ${VERSION} ... "
-# On Gitlab-CI, use -b to build binary only packages as there is
-# no need to waste time on generating the source package.
-if [[ $GITLAB_CI ]]
-then
- BUILDPACKAGE_FLAGS="-b"
-fi
-
# Use eatmydata is available to build faster with less I/O, skipping fsync()
# during the entire build process (safe because a build can always be restarted)
if which eatmydata > /dev/null
diff --git a/debian/mariadb-plugin-columnstore.install b/debian/mariadb-plugin-columnstore.install
deleted file mode 100644
index 39f4e8c7279..00000000000
--- a/debian/mariadb-plugin-columnstore.install
+++ /dev/null
@@ -1,119 +0,0 @@
-etc/columnstore/Columnstore.xml
-etc/columnstore/ErrorMessage.txt
-etc/columnstore/MessageFile.txt
-etc/columnstore/storagemanager.cnf
-etc/mysql/mariadb.conf.d/columnstore.cnf
-usr/bin/mcsRebuildEM
-usr/bin/DDLProc
-usr/bin/DMLProc
-usr/bin/ExeMgr
-usr/bin/PrimProc
-usr/bin/StorageManager
-usr/bin/WriteEngineServer
-usr/bin/clearShm
-usr/bin/cleartablelock
-usr/bin/columnstore-post-install
-usr/bin/columnstore-pre-uninstall
-usr/bin/columnstoreDBWrite
-usr/bin/columnstoreSyslogSetup.sh
-usr/bin/columnstore_run.sh
-usr/bin/colxml
-usr/bin/controllernode
-usr/bin/cpimport
-usr/bin/cpimport.bin
-usr/bin/cplogger
-usr/bin/cspasswd
-usr/bin/cskeys
-usr/bin/dbbuilder
-usr/bin/dbrmctl
-usr/bin/ddlcleanup
-usr/bin/editem
-usr/bin/idbmeminfo
-usr/bin/load_brm
-usr/bin/mariadb-columnstore-start.sh
-usr/bin/mariadb-columnstore-stop.sh
-usr/bin/mcs-savebrm.py
-usr/bin/mcs-loadbrm.py
-usr/bin/mcs-stop-controllernode.sh
-usr/bin/mcsGetConfig
-usr/bin/mcsSetConfig
-usr/bin/mycnfUpgrade
-usr/bin/post-mysql-install
-usr/bin/post-mysqld-install
-usr/bin/reset_locks
-usr/bin/rollback
-usr/bin/save_brm
-usr/bin/smcat
-usr/bin/smls
-usr/bin/smput
-usr/bin/smrm
-usr/bin/testS3Connection
-usr/bin/viewtablelock
-usr/bin/workernode
-usr/lib/*/libbatchloader.so
-usr/lib/*/libbrm.so
-usr/lib/*/libcacheutils.so
-usr/lib/*/libcloudio.so
-usr/lib/*/libcommon.so
-usr/lib/*/libcompress.so
-usr/lib/*/libconfigcpp.so
-usr/lib/*/libdataconvert.so
-usr/lib/*/libddlcleanuputil.so
-usr/lib/*/libddlpackage.so
-usr/lib/*/libddlpackageproc.so
-usr/lib/*/libdmlpackage.so
-usr/lib/*/libdmlpackageproc.so
-usr/lib/*/libexecplan.so
-usr/lib/*/libfuncexp.so
-usr/lib/*/libidbdatafile.so
-usr/lib/*/libjoblist.so
-usr/lib/*/libjoiner.so
-usr/lib/*/liblibmysql_client.so
-usr/lib/*/libloggingcpp.so
-usr/lib/*/libmarias3.so
-usr/lib/*/libmessageqcpp.so
-usr/lib/*/liboamcpp.so
-usr/lib/*/libquerystats.so
-usr/lib/*/libquerytele.so
-usr/lib/*/libregr.so
-usr/lib/*/librowgroup.so
-usr/lib/*/librwlock.so
-usr/lib/*/libstoragemanager.so
-usr/lib/*/libthreadpool.so
-usr/lib/*/libthrift.so
-usr/lib/*/libudfsdk.so
-usr/lib/*/libwindowfunction.so
-usr/lib/*/libwriteengine.so
-usr/lib/*/libwriteengineclient.so
-usr/lib/*/libwriteengineredistribute.so
-usr/lib/*/libdatatypes.so
-usr/lib/mysql/plugin/ha_columnstore.so
-usr/lib/mysql/plugin/libregr_mysql.so
-usr/lib/mysql/plugin/libudf_mysql.so
-usr/sbin/install_mcs_mysql.sh
-usr/share/columnstore/calremoveuserpriority.sql
-usr/share/columnstore/calsetuserpriority.sql
-usr/share/columnstore/calshowprocesslist.sql
-usr/share/columnstore/columnstoreAlias
-usr/share/columnstore/columnstoreLogRotate
-usr/share/columnstore/columnstoreSyslog
-usr/share/columnstore/columnstoreSyslog-ng
-usr/share/columnstore/columnstoreSyslog7
-usr/share/columnstore/columnstore_info.sql
-usr/share/columnstore/dumpcat_mysql.sql
-usr/share/columnstore/gitversionEngine
-usr/share/columnstore/mariadb-columnstore.service
-usr/share/columnstore/mcs-controllernode.service
-usr/share/columnstore/mcs-ddlproc.service
-usr/share/columnstore/mcs-dmlproc.service
-usr/share/columnstore/mcs-exemgr.service
-usr/share/columnstore/mcs-loadbrm.service
-usr/share/columnstore/mcs-primproc.service
-usr/share/columnstore/mcs-storagemanager.service
-usr/share/columnstore/mcs-workernode.service
-usr/share/columnstore/mcs-writeengineserver.service
-usr/share/columnstore/myCnf-exclude-args.text
-usr/share/columnstore/myCnf-include-args.text
-usr/share/columnstore/releasenum
-usr/share/columnstore/syscatalog_mysql.sql
-var/lib/columnstore/local/module
diff --git a/debian/mariadb-plugin-columnstore.postinst b/debian/mariadb-plugin-columnstore.postinst
deleted file mode 100644
index 97a1bba22c8..00000000000
--- a/debian/mariadb-plugin-columnstore.postinst
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-set -e
-
-# Install ColumnStore
-columnstore-post-install install
-
-#DEBHELPER#
diff --git a/debian/mariadb-plugin-columnstore.postrm b/debian/mariadb-plugin-columnstore.postrm
deleted file mode 100644
index b2e19774ec1..00000000000
--- a/debian/mariadb-plugin-columnstore.postrm
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-set -e
-
-if [ "$1" = "purge" ]; then
- rm -rf /var/lib/columnstore
- rm -rf /etc/columnstore
- rm -f /etc/mysql/mariadb.conf.d/columnstore.cnf.rpmsave
-fi
-
-# Automatically restart MariaDB after ColumnStore plugin has been removed
-case "$1" in
- purge|remove|disappear)
- if [ -d /run/systemd/system ]; then
- # If systemd
- deb-systemd-invoke restart mariadb.service >/dev/null
- elif [ -x "/etc/init.d/mariadb" ]; then
- # Fall-back to SysV init
- invoke-rc.d mariadb restart || exit $?
- fi
- ;;
-esac
-
-#DEBHELPER#
diff --git a/debian/mariadb-plugin-columnstore.prerm b/debian/mariadb-plugin-columnstore.prerm
deleted file mode 100644
index f6a27f14499..00000000000
--- a/debian/mariadb-plugin-columnstore.prerm
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-set -e
-
-columnstore-pre-uninstall
-
-
-#DEBHELPER#
diff --git a/debian/mariadb-plugin-columnstore.triggers b/debian/mariadb-plugin-columnstore.triggers
deleted file mode 100644
index dd866036784..00000000000
--- a/debian/mariadb-plugin-columnstore.triggers
+++ /dev/null
@@ -1 +0,0 @@
-activate-noawait ldconfig
diff --git a/extra/innochecksum.cc b/extra/innochecksum.cc
index a60f38cb7b1..ce40051f1a8 100644
--- a/extra/innochecksum.cc
+++ b/extra/innochecksum.cc
@@ -1188,6 +1188,8 @@ static struct my_option innochecksum_options[] = {
{"allow-mismatches", 'a', "Maximum checksum mismatch allowed.",
&allow_mismatches, &allow_mismatches, 0,
GET_ULL, REQUIRED_ARG, 0, 0, ULLONG_MAX, 0, 1, 0},
+ {"write", 'w', "Rewrite the checksum.",
+ &do_write, &do_write, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"page-type-summary", 'S', "Display a count of each page type "
"in a tablespace.", &page_type_summary, &page_type_summary, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
diff --git a/include/my_atomic.h b/include/my_atomic.h
index 81da9e35cf9..270134a6caf 100644
--- a/include/my_atomic.h
+++ b/include/my_atomic.h
@@ -2,7 +2,7 @@
#define MY_ATOMIC_INCLUDED
/* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
- Copyright (c) 2018, 2020, MariaDB
+ Copyright (c) 2018, 2022, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -115,22 +115,6 @@
#include "atomic/gcc_builtins.h"
#endif
-#if SIZEOF_LONG == 4
-#define my_atomic_addlong(A,B) my_atomic_add32((int32*) (A), (B))
-#define my_atomic_loadlong(A) my_atomic_load32((int32*) (A))
-#define my_atomic_loadlong_explicit(A,O) my_atomic_load32_explicit((int32*) (A), (O))
-#define my_atomic_storelong(A,B) my_atomic_store32((int32*) (A), (B))
-#define my_atomic_faslong(A,B) my_atomic_fas32((int32*) (A), (B))
-#define my_atomic_caslong(A,B,C) my_atomic_cas32((int32*) (A), (int32*) (B), (C))
-#else
-#define my_atomic_addlong(A,B) my_atomic_add64((int64*) (A), (B))
-#define my_atomic_loadlong(A) my_atomic_load64((int64*) (A))
-#define my_atomic_loadlong_explicit(A,O) my_atomic_load64_explicit((int64*) (A), (O))
-#define my_atomic_storelong(A,B) my_atomic_store64((int64*) (A), (B))
-#define my_atomic_faslong(A,B) my_atomic_fas64((int64*) (A), (B))
-#define my_atomic_caslong(A,B,C) my_atomic_cas64((int64*) (A), (int64*) (B), (C))
-#endif
-
#ifndef MY_MEMORY_ORDER_SEQ_CST
#define MY_MEMORY_ORDER_RELAXED
#define MY_MEMORY_ORDER_CONSUME
diff --git a/mysql-test/main/contributors.result b/mysql-test/main/contributors.result
index 0c7ca03a2c5..8d72373696c 100644
--- a/mysql-test/main/contributors.result
+++ b/mysql-test/main/contributors.result
@@ -5,6 +5,8 @@ Tencent Cloud https://cloud.tencent.com Platinum Sponsor of the MariaDB Foundati
Microsoft https://microsoft.com/ Platinum Sponsor of the MariaDB Foundation
MariaDB Corporation https://mariadb.com Founding member, Platinum Sponsor of the MariaDB Foundation
ServiceNow https://servicenow.com Platinum Sponsor of the MariaDB Foundation
+Intel https://www.intel.com Platinum Sponsor of the MariaDB Foundation
+SIT https://sit.org Platinum Sponsor of the MariaDB Foundation
Visma https://visma.com Gold Sponsor of the MariaDB Foundation
DBS https://dbs.com Gold Sponsor of the MariaDB Foundation
IBM https://www.ibm.com Gold Sponsor of the MariaDB Foundation
diff --git a/mysql-test/main/create_or_replace.result b/mysql-test/main/create_or_replace.result
index 294b0623fc1..178b7182666 100644
--- a/mysql-test/main/create_or_replace.result
+++ b/mysql-test/main/create_or_replace.result
@@ -1,3 +1,5 @@
+SET @save_persistent=@@GLOBAL.innodb_stats_persistent;
+SET GLOBAL innodb_stats_persistent=OFF;
CREATE TABLE t2 (a int);
INSERT INTO t2 VALUES(1),(2),(3);
#
@@ -258,8 +260,7 @@ Note 1051 Unknown table 'test.t1,mysqltest2.t2'
create table test.t1 (i int) engine=myisam;
create table mysqltest2.t2 like test.t1;
lock table test.t1 write, mysqltest2.t2 write;
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
# MDL_BACKUP_DDL NULL Backup lock
# MDL_BACKUP_DML NULL Backup lock
@@ -272,8 +273,7 @@ ERROR 42000: A table must have at least 1 column
show tables;
Tables_in_test
t2
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
# MDL_BACKUP_DDL NULL Backup lock
# MDL_BACKUP_DML NULL Backup lock
@@ -282,16 +282,14 @@ THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
# MDL_SHARED_NO_READ_WRITE NULL Table metadata lock mysqltest2 t2
create or replace table mysqltest2.t2;
ERROR 42000: A table must have at least 1 column
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
create table t1 (i int);
drop table t1;
create table test.t1 (i int);
create table mysqltest2.t2 like test.t1;
lock table test.t1 write, mysqltest2.t2 write;
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
# MDL_BACKUP_DDL NULL Backup lock
# MDL_BACKUP_DML NULL Backup lock
@@ -304,8 +302,7 @@ ERROR 42S21: Duplicate column name 'a'
show tables;
Tables_in_test
t2
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
# MDL_BACKUP_DDL NULL Backup lock
# MDL_BACKUP_DML NULL Backup lock
@@ -314,16 +311,14 @@ THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
# MDL_SHARED_NO_READ_WRITE NULL Table metadata lock mysqltest2 t2
create or replace table mysqltest2.t2 (a int) select 1 as 'a', 2 as 'a';
ERROR 42S21: Duplicate column name 'a'
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
create table t1 (i int);
drop table t1;
create table test.t1 (i int) engine=innodb;
create table mysqltest2.t2 like test.t1;
lock table test.t1 write, mysqltest2.t2 write;
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
# MDL_BACKUP_DDL NULL Backup lock
# MDL_INTENTION_EXCLUSIVE NULL Schema metadata lock mysqltest2
@@ -335,8 +330,7 @@ drop table test.t1,mysqltest2.t2;
create table test.t1 (i int) engine=aria transactional=1 checksum=1;
create table mysqltest2.t2 like test.t1;
lock table test.t1 write, mysqltest2.t2 write;
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
# MDL_BACKUP_DDL NULL Backup lock
# MDL_INTENTION_EXCLUSIVE NULL Schema metadata lock mysqltest2
@@ -353,8 +347,7 @@ drop table test.t1;
#
create table t1 (i int);
lock table t1 write;
-select * from information_schema.metadata_lock_info
-where table_schema!='mysql' or table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
# MDL_BACKUP_DDL NULL Backup lock
# MDL_BACKUP_DML NULL Backup lock
@@ -365,8 +358,7 @@ ERROR 22001: Data too long for column 'a' at row 1
show tables;
Tables_in_test
t2
-select * from information_schema.metadata_lock_info
-where table_schema!='mysql' or table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
create table t1 (i int);
drop table t1;
@@ -454,8 +446,7 @@ drop view t1;
#
create table t1 (a int);
lock table t1 write, t2 read;
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
# MDL_BACKUP_DDL NULL Backup lock
# MDL_BACKUP_DML NULL Backup lock
@@ -463,8 +454,7 @@ THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
# MDL_SHARED_NO_READ_WRITE NULL Table metadata lock test t1
# MDL_SHARED_READ NULL Table metadata lock test t2
create or replace table t1 (i int);
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
# MDL_BACKUP_DDL NULL Backup lock
# MDL_BACKUP_DML NULL Backup lock
@@ -472,8 +462,7 @@ THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
# MDL_SHARED_NO_READ_WRITE NULL Table metadata lock test t1
# MDL_SHARED_READ NULL Table metadata lock test t2
create or replace table t1 like t2;
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
# MDL_BACKUP_DDL NULL Backup lock
# MDL_BACKUP_DML NULL Backup lock
@@ -481,8 +470,7 @@ THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
# MDL_SHARED_NO_READ_WRITE NULL Table metadata lock test t1
# MDL_SHARED_READ NULL Table metadata lock test t2
create or replace table t1 select 1 as f1;
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
# MDL_BACKUP_DDL NULL Backup lock
# MDL_BACKUP_DML NULL Backup lock
@@ -580,3 +568,4 @@ ERROR HY000: Table 't3' was not locked with LOCK TABLES
UNLOCK TABLES;
DROP TABLE t3;
# End of 10.4 tests
+SET GLOBAL innodb_stats_persistent=@save_persistent;
diff --git a/mysql-test/main/create_or_replace.test b/mysql-test/main/create_or_replace.test
index 7fa08d13847..573e0e177c2 100644
--- a/mysql-test/main/create_or_replace.test
+++ b/mysql-test/main/create_or_replace.test
@@ -5,6 +5,9 @@
--source include/have_innodb.inc
--source include/have_metadata_lock_info.inc
+SET @save_persistent=@@GLOBAL.innodb_stats_persistent;
+SET GLOBAL innodb_stats_persistent=OFF;
+
#
# Create help table
#
@@ -212,21 +215,18 @@ create table mysqltest2.t2 like test.t1;
lock table test.t1 write, mysqltest2.t2 write;
--replace_column 1 #
--sorted_result
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
--error ER_TABLE_MUST_HAVE_COLUMNS
create or replace table test.t1;
show tables;
--replace_column 1 #
--sorted_result
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
--error ER_TABLE_MUST_HAVE_COLUMNS
create or replace table mysqltest2.t2;
--replace_column 1 #
--sorted_result
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
create table t1 (i int);
drop table t1;
@@ -235,21 +235,18 @@ create table mysqltest2.t2 like test.t1;
lock table test.t1 write, mysqltest2.t2 write;
--replace_column 1 #
--sorted_result
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
--error ER_DUP_FIELDNAME
create or replace table test.t1 (a int) select 1 as 'a', 2 as 'a';
show tables;
--replace_column 1 #
--sorted_result
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
--error ER_DUP_FIELDNAME
create or replace table mysqltest2.t2 (a int) select 1 as 'a', 2 as 'a';
--replace_column 1 #
--sorted_result
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
create table t1 (i int);
drop table t1;
@@ -258,8 +255,7 @@ create table mysqltest2.t2 like test.t1;
lock table test.t1 write, mysqltest2.t2 write;
--replace_column 1 #
--sorted_result
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
unlock tables;
drop table test.t1,mysqltest2.t2;
@@ -268,8 +264,7 @@ create table mysqltest2.t2 like test.t1;
lock table test.t1 write, mysqltest2.t2 write;
--replace_column 1 #
--sorted_result
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
unlock tables;
drop table t1;
@@ -285,15 +280,13 @@ create table t1 (i int);
lock table t1 write;
--replace_column 1 #
--sorted_result
-select * from information_schema.metadata_lock_info
-where table_schema!='mysql' or table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
--error ER_DATA_TOO_LONG
create or replace table t1 (a char(1)) engine=Innodb select 'foo' as a;
show tables;
--replace_column 1 #
--sorted_result
-select * from information_schema.metadata_lock_info
-where table_schema!='mysql' or table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
create table t1 (i int);
drop table t1;
@@ -371,24 +364,20 @@ create table t1 (a int);
lock table t1 write, t2 read;
--replace_column 1 #
--sorted_result
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
create or replace table t1 (i int);
--replace_column 1 #
--sorted_result
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
create or replace table t1 like t2;
--replace_column 1 #
--sorted_result
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
create or replace table t1 select 1 as f1;
--replace_column 1 #
--sorted_result
-select * from information_schema.metadata_lock_info
-where table_name not like 'innodb_%_stats';
+select * from information_schema.metadata_lock_info;
drop table t1;
unlock tables;
@@ -520,3 +509,5 @@ UNLOCK TABLES;
DROP TABLE t3;
--echo # End of 10.4 tests
+
+SET GLOBAL innodb_stats_persistent=@save_persistent;
diff --git a/mysql-test/main/ctype_utf32.result b/mysql-test/main/ctype_utf32.result
index cf9db875290..069e4174c9d 100644
--- a/mysql-test/main/ctype_utf32.result
+++ b/mysql-test/main/ctype_utf32.result
@@ -2913,5 +2913,30 @@ t1 CREATE TABLE `t1` (
DROP TABLE t1;
SET NAMES utf8;
#
+# MDEV-28078 Garbage on multiple equal ENUMs with tricky character sets
+#
+CREATE TABLE t1 (
+c1 ENUM ('a','b') CHARACTER SET utf32 DEFAULT 'a',
+c2 ENUM ('a','b') CHARACTER SET utf32 DEFAULT 'a'
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` enum('a','b') CHARACTER SET utf32 DEFAULT 'a',
+ `c2` enum('a','b') CHARACTER SET utf32 DEFAULT 'a'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 (
+c1 ENUM ('00000061','00000062') DEFAULT '00000061' COLLATE latin1_bin,
+c2 ENUM ('a','b') DEFAULT 'a' COLLATE utf32_general_ci
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` enum('00000061','00000062') CHARACTER SET latin1 COLLATE latin1_bin DEFAULT '00000061',
+ `c2` enum('a','b') CHARACTER SET utf32 DEFAULT 'a'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+#
# End of 10.2 tests
#
diff --git a/mysql-test/main/ctype_utf32.test b/mysql-test/main/ctype_utf32.test
index 6944fdb30be..0e50405f871 100644
--- a/mysql-test/main/ctype_utf32.test
+++ b/mysql-test/main/ctype_utf32.test
@@ -1068,5 +1068,24 @@ SET NAMES utf8;
--echo #
+--echo # MDEV-28078 Garbage on multiple equal ENUMs with tricky character sets
+--echo #
+
+CREATE TABLE t1 (
+ c1 ENUM ('a','b') CHARACTER SET utf32 DEFAULT 'a',
+ c2 ENUM ('a','b') CHARACTER SET utf32 DEFAULT 'a'
+);
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (
+ c1 ENUM ('00000061','00000062') DEFAULT '00000061' COLLATE latin1_bin,
+ c2 ENUM ('a','b') DEFAULT 'a' COLLATE utf32_general_ci
+);
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+
+--echo #
--echo # End of 10.2 tests
--echo #
diff --git a/mysql-test/main/multi_update_innodb.result b/mysql-test/main/multi_update_innodb.result
index 2ec7eb3065e..52bbece4fa0 100644
--- a/mysql-test/main/multi_update_innodb.result
+++ b/mysql-test/main/multi_update_innodb.result
@@ -207,4 +207,19 @@ ERROR 23000: Duplicate entry '0000-00-00 00:00:00' for key 'f2k'
DROP VIEW v1;
DROP TABLE t3,t4;
SET @@sql_mode=@save_sql_mode;
+#
# End of 10.2 tests
+#
+#
+# MDEV-28095 crash in multi-update and implicit grouping
+#
+CREATE TABLE t1 (a int) engine=innodb;
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (b int);
+INSERT INTO t2 VALUES (1),(2);
+UPDATE t1 NATURAL JOIN t2 SET a = 1 ORDER BY AVG (a) ;
+ERROR HY000: Invalid use of group function
+DROP TABLE t1, t2;
+#
+# End of 10.3 tests
+#
diff --git a/mysql-test/main/multi_update_innodb.test b/mysql-test/main/multi_update_innodb.test
index 04736482011..02f6a7a3316 100644
--- a/mysql-test/main/multi_update_innodb.test
+++ b/mysql-test/main/multi_update_innodb.test
@@ -243,4 +243,23 @@ DROP VIEW v1;
DROP TABLE t3,t4;
SET @@sql_mode=@save_sql_mode;
+--echo #
--echo # End of 10.2 tests
+--echo #
+
+--echo #
+--echo # MDEV-28095 crash in multi-update and implicit grouping
+--echo #
+CREATE TABLE t1 (a int) engine=innodb;
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (b int);
+INSERT INTO t2 VALUES (1),(2);
+--error ER_INVALID_GROUP_FUNC_USE
+UPDATE t1 NATURAL JOIN t2 SET a = 1 ORDER BY AVG (a) ;
+DROP TABLE t1, t2;
+
+
+--echo #
+--echo # End of 10.3 tests
+--echo #
+
diff --git a/mysql-test/main/opt_tvc.result b/mysql-test/main/opt_tvc.result
index 9752aa71bfb..9b6d97492cd 100644
--- a/mysql-test/main/opt_tvc.result
+++ b/mysql-test/main/opt_tvc.result
@@ -732,3 +732,30 @@ a b
4 4
drop table t1;
SET @@in_predicate_conversion_threshold= default;
+#
+# MDEV-27937: Prepared statement with ? in the list if IN predicate
+#
+set in_predicate_conversion_threshold=2;
+create table t1 (id int, a int, b int);
+insert into t1 values (1,3,30), (2,7,70), (3,1,10);
+prepare stmt from "
+select * from t1 where a in (7, ?, 5, 1);
+";
+execute stmt using 3;
+id a b
+1 3 30
+2 7 70
+3 1 10
+deallocate prepare stmt;
+prepare stmt from "
+select * from t1 where (a,b) in ((7,70), (3,?), (5,50), (1,10));
+";
+execute stmt using 30;
+id a b
+1 3 30
+2 7 70
+3 1 10
+deallocate prepare stmt;
+drop table t1;
+set in_predicate_conversion_threshold=default;
+# End of 10.3 tests
diff --git a/mysql-test/main/opt_tvc.test b/mysql-test/main/opt_tvc.test
index e4e8c6d7919..f8469f22aa1 100644
--- a/mysql-test/main/opt_tvc.test
+++ b/mysql-test/main/opt_tvc.test
@@ -428,3 +428,29 @@ eval $query;
drop table t1;
SET @@in_predicate_conversion_threshold= default;
+--echo #
+--echo # MDEV-27937: Prepared statement with ? in the list if IN predicate
+--echo #
+
+set in_predicate_conversion_threshold=2;
+
+create table t1 (id int, a int, b int);
+insert into t1 values (1,3,30), (2,7,70), (3,1,10);
+
+prepare stmt from "
+select * from t1 where a in (7, ?, 5, 1);
+";
+execute stmt using 3;
+deallocate prepare stmt;
+
+prepare stmt from "
+select * from t1 where (a,b) in ((7,70), (3,?), (5,50), (1,10));
+";
+execute stmt using 30;
+deallocate prepare stmt;
+
+drop table t1;
+
+set in_predicate_conversion_threshold=default;
+
+--echo # End of 10.3 tests
diff --git a/mysql-test/main/processlist.result b/mysql-test/main/processlist.result
index 2d3228a6d91..d99160f5c74 100644
--- a/mysql-test/main/processlist.result
+++ b/mysql-test/main/processlist.result
@@ -40,3 +40,23 @@ utf8mb4_string xxx😎yyy
#
# End of 10.1 tests
#
+#
+# Start of 10.3 tests
+#
+#
+# MDEV-28131 Unexpected warning while selecting from information_schema.processlist
+#
+connect conn1, localhost, root,,;
+connection conn1;
+SELECT SLEEP(1000);
+connection default;
+SELECT progress FROM information_schema.processlist WHERE info='SELECT SLEEP(1000)';
+progress
+0.000
+connection conn1;
+Got one of the listed errors
+connection default;
+disconnect conn1;
+#
+# End of 10.3 tests
+#
diff --git a/mysql-test/main/processlist.test b/mysql-test/main/processlist.test
index 8e98701459a..f419f57ea2f 100644
--- a/mysql-test/main/processlist.test
+++ b/mysql-test/main/processlist.test
@@ -70,3 +70,38 @@ SELECT INFO, INFO_BINARY, 'xxx😎yyy' AS utf8mb4_string FROM INFORMATION_SCHEMA
--echo #
--echo # End of 10.1 tests
--echo #
+
+--echo #
+--echo # Start of 10.3 tests
+--echo #
+
+--echo #
+--echo # MDEV-28131 Unexpected warning while selecting from information_schema.processlist
+--echo #
+
+connect (conn1, localhost, root,,);
+connection conn1;
+let $ID= `select connection_id()`;
+send SELECT SLEEP(1000);
+connection default;
+let $wait_timeout= 10;
+let $wait_condition=select count(*)=1 from information_schema.processlist
+where state='User sleep' and info='SELECT SLEEP(1000)';
+--source include/wait_condition.inc
+SELECT progress FROM information_schema.processlist WHERE info='SELECT SLEEP(1000)';
+disable_query_log;
+eval kill $ID;
+enable_query_log;
+let $wait_timeout= 10;
+let $wait_condition=select count(*)=0 from information_schema.processlist
+where state='User sleep' and info='SELECT SLEEP(1000)';
+--source include/wait_condition.inc
+connection conn1;
+--error 2013,ER_CONNECTION_KILLED
+reap;
+connection default;
+disconnect conn1;
+
+--echo #
+--echo # End of 10.3 tests
+--echo #
diff --git a/mysql-test/main/sp-cursor.result b/mysql-test/main/sp-cursor.result
index dc38ad64069..2218f8c36fe 100644
--- a/mysql-test/main/sp-cursor.result
+++ b/mysql-test/main/sp-cursor.result
@@ -738,6 +738,69 @@ c
DROP PROCEDURE p1;
DROP TABLE t1;
#
+# MDEV-26009: Server crash when calling twice procedure using FOR-loop
+#
+CREATE TABLE t1 ( id int, name varchar(24));
+INSERT INTO t1 values (1, 'x'), (2, 'y'), (3, 'z');
+create function get_name(_id int) returns varchar(24)
+return (select name from t1 where id = _id);
+select get_name(id) from t1;
+get_name(id)
+x
+y
+z
+create procedure test_proc()
+begin
+declare _cur cursor for select get_name(id) from t1;
+for row in _cur do select 1; end for;
+end;
+^^
+call test_proc();
+1
+1
+1
+1
+1
+1
+call test_proc();
+1
+1
+1
+1
+1
+1
+drop procedure test_proc;
+drop function get_name;
+drop table t1;
+CREATE TABLE t1 (id int, name varchar(24));
+INSERT INTO t1 (id, name) VALUES (1, 'x'),(2, 'y'),(3, 'z');
+create function get_name(_id int) returns varchar(24)
+return (select name from t1 where id = _id);
+create view v1 as select get_name(id) from t1;
+create procedure test_proc()
+begin
+declare _cur cursor for select 1 from v1;
+for row in _cur do select 1; end for;
+end$$
+call test_proc();
+1
+1
+1
+1
+1
+1
+call test_proc();
+1
+1
+1
+1
+1
+1
+drop procedure test_proc;
+drop view v1;
+drop function get_name;
+drop table t1;
+#
# Start of 10.8 tests
#
#
diff --git a/mysql-test/main/sp-cursor.test b/mysql-test/main/sp-cursor.test
index f86721f41e8..44fe6ba51e8 100644
--- a/mysql-test/main/sp-cursor.test
+++ b/mysql-test/main/sp-cursor.test
@@ -747,6 +747,62 @@ DROP TABLE t1;
--echo #
+--echo # MDEV-26009: Server crash when calling twice procedure using FOR-loop
+--echo #
+
+
+CREATE TABLE t1 ( id int, name varchar(24));
+INSERT INTO t1 values (1, 'x'), (2, 'y'), (3, 'z');
+
+create function get_name(_id int) returns varchar(24)
+ return (select name from t1 where id = _id);
+
+select get_name(id) from t1;
+
+delimiter ^^;
+
+create procedure test_proc()
+begin
+ declare _cur cursor for select get_name(id) from t1;
+ for row in _cur do select 1; end for;
+end;
+^^
+delimiter ;^^
+
+call test_proc();
+call test_proc();
+
+drop procedure test_proc;
+drop function get_name;
+drop table t1;
+
+
+CREATE TABLE t1 (id int, name varchar(24));
+INSERT INTO t1 (id, name) VALUES (1, 'x'),(2, 'y'),(3, 'z');
+
+create function get_name(_id int) returns varchar(24)
+ return (select name from t1 where id = _id);
+
+create view v1 as select get_name(id) from t1;
+
+delimiter $$;
+create procedure test_proc()
+begin
+ declare _cur cursor for select 1 from v1;
+ for row in _cur do select 1; end for;
+end$$
+delimiter ;$$
+
+call test_proc();
+call test_proc();
+
+drop procedure test_proc;
+drop view v1;
+drop function get_name;
+drop table t1;
+
+
+--echo #
--echo # Start of 10.8 tests
--echo #
diff --git a/mysql-test/main/view.result b/mysql-test/main/view.result
index 075e50e1b0a..35471454cb4 100644
--- a/mysql-test/main/view.result
+++ b/mysql-test/main/view.result
@@ -6846,6 +6846,34 @@ id bar
Drop View v1;
Drop table t1;
#
+# MDEV-24281: Execution of PREPARE from CREATE VIEW statement
+#
+create table t1 (s1 int);
+insert into t1 values (3), (7), (1);
+prepare stmt from "
+create view v1 as select 's1', s1, 1 as My_exp_s1 from t1;
+";
+execute stmt;
+deallocate prepare stmt;
+show create view v1;
+View Create View character_set_client collation_connection
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select 's1' AS `My_exp_1_s1`,`t1`.`s1` AS `s1`,1 AS `My_exp_s1` from `t1` latin1 latin1_swedish_ci
+select * from v1;
+My_exp_1_s1 s1 My_exp_s1
+s1 3 1
+s1 7 1
+s1 1 1
+drop view v1;
+prepare stmt from "
+create view v1 as select 's1', s1, 1 as My_exp_s1 from t1;
+";
+execute stmt;
+execute stmt;
+ERROR 42S01: Table 'v1' already exists
+deallocate prepare stmt;
+drop view v1;
+drop table t1;
+#
# End of 10.3 tests
#
#
diff --git a/mysql-test/main/view.test b/mysql-test/main/view.test
index 825274756a8..49b339c9f4c 100644
--- a/mysql-test/main/view.test
+++ b/mysql-test/main/view.test
@@ -6577,6 +6577,32 @@ Drop View v1;
Drop table t1;
--echo #
+--echo # MDEV-24281: Execution of PREPARE from CREATE VIEW statement
+--echo #
+
+create table t1 (s1 int);
+insert into t1 values (3), (7), (1);
+
+prepare stmt from "
+create view v1 as select 's1', s1, 1 as My_exp_s1 from t1;
+";
+execute stmt;
+deallocate prepare stmt;
+show create view v1;
+select * from v1;
+drop view v1;
+
+prepare stmt from "
+create view v1 as select 's1', s1, 1 as My_exp_s1 from t1;
+";
+execute stmt;
+--error ER_TABLE_EXISTS_ERROR
+execute stmt;
+deallocate prepare stmt;
+drop view v1;
+drop table t1;
+
+--echo #
--echo # End of 10.3 tests
--echo #
diff --git a/mysql-test/suite/binlog/r/binlog_autocommit_off_no_hang.result b/mysql-test/suite/binlog/r/binlog_autocommit_off_no_hang.result
new file mode 100644
index 00000000000..71eecd881ca
--- /dev/null
+++ b/mysql-test/suite/binlog/r/binlog_autocommit_off_no_hang.result
@@ -0,0 +1,6 @@
+ALTER TABLE mysql.gtid_slave_pos ENGINE=innodb;
+# Restart the server so mysqld reads the gtid_slave_pos using innodb
+# Set gtid_slave_pos should not hang
+SET GLOBAL gtid_slave_pos=@@gtid_binlog_pos;
+COMMIT;
+RESET MASTER;
diff --git a/mysql-test/suite/binlog/r/binlog_mysqlbinlog_raw_flush.result b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_raw_flush.result
new file mode 100644
index 00000000000..9148f0e8c2b
--- /dev/null
+++ b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_raw_flush.result
@@ -0,0 +1,7 @@
+CREATE TABLE t1 (a int);
+FLUSH LOGS;
+INSERT INTO t1 VALUES (1);
+# timeout TIMEOUT MYSQL_BINLOG --raw --read-from-remote-server --user=root --host=127.0.0.1 --port=MASTER_MYPORT --stop-never --result-file=MYSQLTEST_VARDIR/tmp/ master-bin.000001
+# MYSQL_BINLOG MYSQLTEST_VARDIR/tmp/master-bin.000002 > MYSQLTEST_VARDIR/tmp/local-bin.000002.out
+FOUND 1 /GTID 0-1-2/ in local-bin.000002.out
+DROP TABLE t1;
diff --git a/mysql-test/suite/binlog/t/binlog_autocommit_off_no_hang-master.opt b/mysql-test/suite/binlog/t/binlog_autocommit_off_no_hang-master.opt
new file mode 100644
index 00000000000..e0fa81e6eeb
--- /dev/null
+++ b/mysql-test/suite/binlog/t/binlog_autocommit_off_no_hang-master.opt
@@ -0,0 +1 @@
+--autocommit=0
diff --git a/mysql-test/suite/binlog/t/binlog_autocommit_off_no_hang.test b/mysql-test/suite/binlog/t/binlog_autocommit_off_no_hang.test
new file mode 100644
index 00000000000..8f1dbb2a2dd
--- /dev/null
+++ b/mysql-test/suite/binlog/t/binlog_autocommit_off_no_hang.test
@@ -0,0 +1,45 @@
+#
+# Purpose:
+# When the mysql.gtid_slave_pos table uses the InnoDB engine, and mysqld
+# starts, it reads the table and begins a transaction. After mysqld reads the
+# value, it should end the transaction and release all associated locks.
+# The bug reported in DBAAS-7828 shows that when autocommit is off, the locks
+# are not released, resulting in indefinite hangs on future attempts to change
+# gtid_slave_pos. This test ensures its fix such that the locks are properly
+# released.
+#
+# References:
+# DBAAS-7828: Primary/replica: configuration change of "autocommit=0" can
+# not be applied
+#
+
+--source include/have_innodb.inc
+--source include/have_log_bin.inc
+
+# Reading gtid_slave_pos table is format independent so just use one for
+# reduced test time
+--source include/have_binlog_format_row.inc
+
+--let old_slave_pos_engine= query_get_value(SHOW TABLE STATUS FROM mysql LIKE 'gtid_slave_pos', Engine, 1)
+
+# Use a transactional engine
+ALTER TABLE mysql.gtid_slave_pos ENGINE=innodb;
+
+--echo # Restart the server so mysqld reads the gtid_slave_pos using innodb
+--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+--shutdown_server
+--source include/wait_until_disconnected.inc
+--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+
+--echo # Set gtid_slave_pos should not hang
+SET GLOBAL gtid_slave_pos=@@gtid_binlog_pos;
+COMMIT;
+
+# Revert table type
+--disable_query_log
+--eval ALTER TABLE mysql.gtid_slave_pos ENGINE=$old_slave_pos_engine
+--enable_query_log
+
+RESET MASTER;
diff --git a/mysql-test/suite/binlog/t/binlog_mysqlbinlog_raw_flush.test b/mysql-test/suite/binlog/t/binlog_mysqlbinlog_raw_flush.test
new file mode 100644
index 00000000000..f95fc0137a2
--- /dev/null
+++ b/mysql-test/suite/binlog/t/binlog_mysqlbinlog_raw_flush.test
@@ -0,0 +1,45 @@
+#
+# Purpose:
+# When using mariadb-binlog with options for --raw and --stop-never, events
+# from the master's currently active log file should be written to their
+# respective log file specified by --result-file, and shown on-disk. This test
+# ensures that the log files on disk, created by mariadb-binlog, have the most
+# up-to-date events from the master.
+#
+# Methodology:
+# On the master, rotate to a newly active binlog file and write an event to
+# it. Read the master's binlog using mariadb-binlog with --raw and --stop-never
+# and write the data to an intermediary binlog file (a timeout is used on this
+# command to ensure it exits). Read the local intermediary binlog file to ensure
+# that the master's most recent event exists in the local file.
+#
+# References:
+# MDEV-14608: mysqlbinlog lastest backupfile size is 0
+#
+
+--source include/linux.inc
+--source include/have_log_bin.inc
+
+# Create newly active log
+CREATE TABLE t1 (a int);
+FLUSH LOGS;
+INSERT INTO t1 VALUES (1);
+
+# Read binlog data from master to intermediary result file
+--let TIMEOUT=1
+--echo # timeout TIMEOUT MYSQL_BINLOG --raw --read-from-remote-server --user=root --host=127.0.0.1 --port=MASTER_MYPORT --stop-never --result-file=MYSQLTEST_VARDIR/tmp/ master-bin.000001
+--error 124 # Error 124 means timeout was reached
+--exec timeout $TIMEOUT $MYSQL_BINLOG --raw --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT --stop-never --result-file=$MYSQLTEST_VARDIR/tmp/ master-bin.000001
+
+# Ensure the binlog output has the most recent events from the master
+--echo # MYSQL_BINLOG MYSQLTEST_VARDIR/tmp/master-bin.000002 > MYSQLTEST_VARDIR/tmp/local-bin.000002.out
+--exec $MYSQL_BINLOG $MYSQLTEST_VARDIR/tmp/master-bin.000002 > $MYSQLTEST_VARDIR/tmp/local-bin.000002.out
+--let SEARCH_PATTERN= GTID 0-1-2
+--let SEARCH_FILE= $MYSQLTEST_VARDIR/tmp/local-bin.000002.out
+--source include/search_pattern_in_file.inc
+
+# Cleanup
+DROP TABLE t1;
+--remove_file $MYSQLTEST_VARDIR/tmp/master-bin.000001
+--remove_file $MYSQLTEST_VARDIR/tmp/master-bin.000002
+--remove_file $MYSQLTEST_VARDIR/tmp/local-bin.000002.out
diff --git a/mysql-test/suite/compat/oracle/r/sp-package.result b/mysql-test/suite/compat/oracle/r/sp-package.result
index 273c2f3ea7d..daa244a3c5a 100644
--- a/mysql-test/suite/compat/oracle/r/sp-package.result
+++ b/mysql-test/suite/compat/oracle/r/sp-package.result
@@ -2960,3 +2960,311 @@ END $$
CALL xyz.xyz123(17,18,@R);
DROP PACKAGE xyz;
DROP TABLE t1;
+#
+# MDEV-28166 sql_mode=ORACLE: fully qualified package function calls do not work: db.pkg.func()
+#
+SELECT `db `.pkg.func();
+ERROR 42000: Incorrect database name 'db '
+SELECT db.`pkg `.func();
+ERROR 42000: Incorrect routine name 'pkg '
+SELECT db.pkg.`func `();
+ERROR 42000: Incorrect routine name 'func '
+CREATE DATABASE db1;
+USE db1;
+CREATE PACKAGE pkg1 AS
+FUNCTION f1 RETURN TEXT;
+FUNCTION f2_db1_pkg1_f1 RETURN TEXT;
+FUNCTION f2_pkg1_f1 RETURN TEXT;
+FUNCTION f2_f1 RETURN TEXT;
+END;
+$$
+CREATE PACKAGE BODY pkg1
+AS
+FUNCTION f1 RETURN TEXT IS
+BEGIN
+RETURN 'This is db1.pkg1.f1';
+END;
+FUNCTION f2_db1_pkg1_f1 RETURN TEXT IS
+BEGIN
+RETURN db1.pkg1.f1();
+END;
+FUNCTION f2_pkg1_f1 RETURN TEXT IS
+BEGIN
+RETURN pkg1.f1();
+END;
+FUNCTION f2_f1 RETURN TEXT IS
+BEGIN
+RETURN f1();
+END;
+END;
+$$
+USE db1;
+SELECT pkg1.f2_db1_pkg1_f1();
+pkg1.f2_db1_pkg1_f1()
+This is db1.pkg1.f1
+SELECT pkg1.f2_pkg1_f1();
+pkg1.f2_pkg1_f1()
+This is db1.pkg1.f1
+SELECT pkg1.f2_f1();
+pkg1.f2_f1()
+This is db1.pkg1.f1
+SELECT db1.pkg1.f2_db1_pkg1_f1();
+db1.pkg1.f2_db1_pkg1_f1()
+This is db1.pkg1.f1
+SELECT db1.pkg1.f2_pkg1_f1();
+db1.pkg1.f2_pkg1_f1()
+This is db1.pkg1.f1
+SELECT db1.pkg1.f2_f1();
+db1.pkg1.f2_f1()
+This is db1.pkg1.f1
+USE test;
+SELECT db1.pkg1.f2_db1_pkg1_f1();
+db1.pkg1.f2_db1_pkg1_f1()
+This is db1.pkg1.f1
+SELECT db1.pkg1.f2_pkg1_f1();
+db1.pkg1.f2_pkg1_f1()
+This is db1.pkg1.f1
+SELECT db1.pkg1.f2_f1();
+db1.pkg1.f2_f1()
+This is db1.pkg1.f1
+DROP DATABASE db1;
+CREATE DATABASE db1;
+CREATE DATABASE db2;
+CREATE PACKAGE db1.pkg1 AS
+FUNCTION f1 RETURN TEXT;
+END;
+$$
+CREATE PACKAGE BODY db1.pkg1 AS
+FUNCTION f1 RETURN TEXT AS
+BEGIN
+RETURN 'This is db1.pkg1.f1';
+END;
+END;
+$$
+CREATE PACKAGE db2.pkg1 AS
+FUNCTION f1 RETURN TEXT;
+FUNCTION var1 RETURN TEXT;
+FUNCTION var2 RETURN TEXT;
+END;
+$$
+CREATE PACKAGE BODY db2.pkg1 AS
+m_var1 TEXT;
+m_var2 TEXT;
+FUNCTION f1 RETURN TEXT AS
+BEGIN
+RETURN 'This is db2.pkg1.f1';
+END;
+FUNCTION var1 RETURN TEXT AS
+BEGIN
+RETURN m_var1;
+END;
+FUNCTION var2 RETURN TEXT AS
+BEGIN
+RETURN m_var2;
+END;
+BEGIN
+m_var1:= db1.pkg1.f1();
+m_var2:= db2.pkg1.f1();
+END;
+$$
+SELECT db2.pkg1.var1(), db2.pkg1.var2();
+db2.pkg1.var1() db2.pkg1.var2()
+This is db1.pkg1.f1 This is db2.pkg1.f1
+DROP DATABASE db1;
+DROP DATABASE db2;
+CREATE PACKAGE pkg1 AS
+FUNCTION f1(a TEXT) RETURN TEXT;
+END;
+$$
+CREATE PACKAGE BODY pkg1 AS
+FUNCTION f1(a TEXT) RETURN TEXT AS
+BEGIN
+RETURN a;
+END;
+END;
+$$
+SELECT test.pkg1.f1('xxx');
+test.pkg1.f1('xxx')
+xxx
+SELECT test.pkg1.f1('xxx' AS a);
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'AS a)' at line 1
+DROP PACKAGE pkg1;
+#
+# MDEV-19328 sql_mode=ORACLE: Package function in VIEW
+#
+SET sql_mode=ORACLE;
+CREATE PACKAGE test1 AS
+FUNCTION f_test RETURN number;
+END test1;
+$$
+CREATE PACKAGE BODY test1
+AS
+FUNCTION f_test RETURN NUMBER IS
+BEGIN
+RETURN 1;
+END;
+END test1;
+$$
+SET sql_mode=ORACLE;
+CREATE VIEW v_test AS SELECT 1 AS c1 FROM DUAL WHERE 1=test1.f_test();
+SELECT * FROM v_test;
+c1
+1
+SHOW CREATE VIEW v_test;
+View v_test
+Create View CREATE VIEW "v_test" AS select 1 AS "c1" from DUAL where 1 = "test"."test1"."f_test"()
+character_set_client latin1
+collation_connection latin1_swedish_ci
+SET sql_mode=DEFAULT;
+SELECT * FROM v_test;
+c1
+1
+SHOW CREATE VIEW v_test;
+View v_test
+Create View CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v_test` AS select 1 AS `c1` from DUAL where 1 = `test`.`test1`.`f_test`()
+character_set_client latin1
+collation_connection latin1_swedish_ci
+DROP VIEW v_test;
+SET sql_mode=DEFAULT;
+CREATE VIEW v_test AS SELECT 1 AS c1 FROM DUAL WHERE 1=test1.f_test();
+ERROR 42000: FUNCTION test1.f_test does not exist
+SET sql_mode=ORACLE;
+CREATE VIEW v_test AS SELECT 1 AS c1 FROM DUAL WHERE 1=test.test1.f_test();
+SELECT * FROM v_test;
+c1
+1
+SHOW CREATE VIEW v_test;
+View v_test
+Create View CREATE VIEW "v_test" AS select 1 AS "c1" from DUAL where 1 = "test"."test1"."f_test"()
+character_set_client latin1
+collation_connection latin1_swedish_ci
+SET sql_mode=DEFAULT;
+SELECT * FROM v_test;
+c1
+1
+SHOW CREATE VIEW v_test;
+View v_test
+Create View CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v_test` AS select 1 AS `c1` from DUAL where 1 = `test`.`test1`.`f_test`()
+character_set_client latin1
+collation_connection latin1_swedish_ci
+DROP VIEW v_test;
+SET sql_mode=DEFAULT;
+CREATE VIEW v_test AS SELECT 1 AS c1 FROM DUAL WHERE 1=test.test1.f_test();
+SELECT * FROM v_test;
+c1
+1
+SHOW CREATE VIEW v_test;
+View v_test
+Create View CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v_test` AS select 1 AS `c1` from DUAL where 1 = `test`.`test1`.`f_test`()
+character_set_client latin1
+collation_connection latin1_swedish_ci
+SET sql_mode=ORACLE;
+SELECT * FROM v_test;
+c1
+1
+SHOW CREATE VIEW v_test;
+View v_test
+Create View CREATE VIEW "v_test" AS select 1 AS "c1" from DUAL where 1 = "test"."test1"."f_test"()
+character_set_client latin1
+collation_connection latin1_swedish_ci
+DROP VIEW v_test;
+SET sql_mode=ORACLE;
+DROP PACKAGE test1;
+#
+# MDEV-19804 sql_mode=ORACLE: call procedure in packages
+#
+CALL `db1 `.pkg.p;
+ERROR 42000: Incorrect database name 'db1 '
+CALL db1.`pkg `.p;
+ERROR 42000: Incorrect routine name 'pkg '
+CALL db1.pkg.`p `;
+ERROR 42000: Incorrect routine name 'p '
+SET sql_mode=ORACLE;
+CREATE PACKAGE pkg1 as
+PROCEDURE p1();
+END;
+$$
+CREATE PACKAGE BODY pkg1 as
+PROCEDURE p1() as
+BEGIN
+SELECT 'test-function' AS c1;
+END;
+END;
+$$
+CALL pkg1.p1;
+c1
+test-function
+CALL test.pkg1.p1;
+c1
+test-function
+SET sql_mode=DEFAULT;
+CALL test.pkg1.p1;
+c1
+test-function
+SET sql_mode=ORACLE;
+BEGIN
+CALL pkg1.p1;
+CALL test.pkg1.p1;
+END
+$$
+c1
+test-function
+c1
+test-function
+BEGIN
+pkg1.p1;
+test.pkg1.p1;
+END
+$$
+c1
+test-function
+c1
+test-function
+DROP PACKAGE pkg1;
+CREATE DATABASE db1;
+CREATE PACKAGE db1.pkg1 AS
+PROCEDURE p1(a OUT TEXT);
+END;
+$$
+CREATE PACKAGE BODY db1.pkg1 AS
+PROCEDURE p1(a OUT TEXT) AS
+BEGIN
+a:= 'This is db1.pkg1.p1';
+END;
+END;
+$$
+CREATE DATABASE db2;
+CREATE PACKAGE db2.pkg1 AS
+FUNCTION var1 RETURN TEXT;
+PROCEDURE p1(a OUT TEXT);
+PROCEDURE p2_db1_pkg1_p1;
+END;
+$$
+CREATE PACKAGE BODY db2.pkg1 AS
+m_var1 TEXT;
+FUNCTION var1 RETURN TEXT AS
+BEGIN
+RETURN m_var1;
+END;
+PROCEDURE p1(a OUT TEXT) AS
+BEGIN
+a:= 'This is db2.pkg1.p1';
+END;
+PROCEDURE p2_db1_pkg1_p1 AS
+a TEXT;
+BEGIN
+db1.pkg1.p1(a);
+SELECT a;
+END;
+BEGIN
+db1.pkg1.p1(m_var1);
+END;
+$$
+SELECT db2.pkg1.var1();
+db2.pkg1.var1()
+This is db1.pkg1.p1
+CALL db2.pkg1.p2_db1_pkg1_p1;
+a
+This is db1.pkg1.p1
+DROP DATABASE db1;
+DROP DATABASE db2;
diff --git a/mysql-test/suite/compat/oracle/t/sp-package.test b/mysql-test/suite/compat/oracle/t/sp-package.test
index edad90e547f..615ce51e195 100644
--- a/mysql-test/suite/compat/oracle/t/sp-package.test
+++ b/mysql-test/suite/compat/oracle/t/sp-package.test
@@ -2689,3 +2689,330 @@ CALL xyz.xyz123(17,18,@R);
DROP PACKAGE xyz;
DROP TABLE t1;
--disable_prepare_warnings
+
+
+--echo #
+--echo # MDEV-28166 sql_mode=ORACLE: fully qualified package function calls do not work: db.pkg.func()
+--echo #
+
+--error ER_WRONG_DB_NAME
+SELECT `db `.pkg.func();
+--error ER_SP_WRONG_NAME
+SELECT db.`pkg `.func();
+--error ER_SP_WRONG_NAME
+SELECT db.pkg.`func `();
+
+
+CREATE DATABASE db1;
+USE db1;
+
+DELIMITER $$;
+CREATE PACKAGE pkg1 AS
+ FUNCTION f1 RETURN TEXT;
+ FUNCTION f2_db1_pkg1_f1 RETURN TEXT;
+ FUNCTION f2_pkg1_f1 RETURN TEXT;
+ FUNCTION f2_f1 RETURN TEXT;
+END;
+$$
+CREATE PACKAGE BODY pkg1
+AS
+ FUNCTION f1 RETURN TEXT IS
+ BEGIN
+ RETURN 'This is db1.pkg1.f1';
+ END;
+ FUNCTION f2_db1_pkg1_f1 RETURN TEXT IS
+ BEGIN
+ RETURN db1.pkg1.f1();
+ END;
+ FUNCTION f2_pkg1_f1 RETURN TEXT IS
+ BEGIN
+ RETURN pkg1.f1();
+ END;
+ FUNCTION f2_f1 RETURN TEXT IS
+ BEGIN
+ RETURN f1();
+ END;
+END;
+$$
+DELIMITER ;$$
+
+USE db1;
+SELECT pkg1.f2_db1_pkg1_f1();
+SELECT pkg1.f2_pkg1_f1();
+SELECT pkg1.f2_f1();
+
+SELECT db1.pkg1.f2_db1_pkg1_f1();
+SELECT db1.pkg1.f2_pkg1_f1();
+SELECT db1.pkg1.f2_f1();
+
+USE test;
+SELECT db1.pkg1.f2_db1_pkg1_f1();
+SELECT db1.pkg1.f2_pkg1_f1();
+SELECT db1.pkg1.f2_f1();
+
+DROP DATABASE db1;
+
+
+#
+# Testing db.pkg.func() in the package initialization section
+#
+
+CREATE DATABASE db1;
+CREATE DATABASE db2;
+
+DELIMITER $$;
+CREATE PACKAGE db1.pkg1 AS
+ FUNCTION f1 RETURN TEXT;
+END;
+$$
+CREATE PACKAGE BODY db1.pkg1 AS
+ FUNCTION f1 RETURN TEXT AS
+ BEGIN
+ RETURN 'This is db1.pkg1.f1';
+ END;
+END;
+$$
+DELIMITER ;$$
+
+
+DELIMITER $$;
+CREATE PACKAGE db2.pkg1 AS
+ FUNCTION f1 RETURN TEXT;
+ FUNCTION var1 RETURN TEXT;
+ FUNCTION var2 RETURN TEXT;
+END;
+$$
+CREATE PACKAGE BODY db2.pkg1 AS
+ m_var1 TEXT;
+ m_var2 TEXT;
+ FUNCTION f1 RETURN TEXT AS
+ BEGIN
+ RETURN 'This is db2.pkg1.f1';
+ END;
+ FUNCTION var1 RETURN TEXT AS
+ BEGIN
+ RETURN m_var1;
+ END;
+ FUNCTION var2 RETURN TEXT AS
+ BEGIN
+ RETURN m_var2;
+ END;
+BEGIN
+ m_var1:= db1.pkg1.f1();
+ m_var2:= db2.pkg1.f1();
+END;
+$$
+DELIMITER ;$$
+
+SELECT db2.pkg1.var1(), db2.pkg1.var2();
+
+DROP DATABASE db1;
+DROP DATABASE db2;
+
+#
+# Make sure fully qualified package function call does not support AS syntax:
+# SELECT db.pkg.func(10 AS a);
+#
+
+DELIMITER $$;
+CREATE PACKAGE pkg1 AS
+ FUNCTION f1(a TEXT) RETURN TEXT;
+END;
+$$
+CREATE PACKAGE BODY pkg1 AS
+ FUNCTION f1(a TEXT) RETURN TEXT AS
+ BEGIN
+ RETURN a;
+ END;
+END;
+$$
+DELIMITER ;$$
+SELECT test.pkg1.f1('xxx');
+--error ER_PARSE_ERROR
+SELECT test.pkg1.f1('xxx' AS a);
+DROP PACKAGE pkg1;
+
+
+--echo #
+--echo # MDEV-19328 sql_mode=ORACLE: Package function in VIEW
+--echo #
+
+SET sql_mode=ORACLE;
+DELIMITER $$;
+CREATE PACKAGE test1 AS
+ FUNCTION f_test RETURN number;
+END test1;
+$$
+CREATE PACKAGE BODY test1
+AS
+ FUNCTION f_test RETURN NUMBER IS
+ BEGIN
+ RETURN 1;
+ END;
+END test1;
+$$
+DELIMITER ;$$
+
+
+SET sql_mode=ORACLE;
+CREATE VIEW v_test AS SELECT 1 AS c1 FROM DUAL WHERE 1=test1.f_test();
+SELECT * FROM v_test;
+--vertical_results
+SHOW CREATE VIEW v_test;
+--horizontal_results
+SET sql_mode=DEFAULT;
+SELECT * FROM v_test;
+--vertical_results
+SHOW CREATE VIEW v_test;
+--horizontal_results
+DROP VIEW v_test;
+
+
+SET sql_mode=DEFAULT;
+--error ER_SP_DOES_NOT_EXIST
+CREATE VIEW v_test AS SELECT 1 AS c1 FROM DUAL WHERE 1=test1.f_test();
+
+
+SET sql_mode=ORACLE;
+CREATE VIEW v_test AS SELECT 1 AS c1 FROM DUAL WHERE 1=test.test1.f_test();
+SELECT * FROM v_test;
+--vertical_results
+SHOW CREATE VIEW v_test;
+--horizontal_results
+SET sql_mode=DEFAULT;
+SELECT * FROM v_test;
+--vertical_results
+SHOW CREATE VIEW v_test;
+--horizontal_results
+DROP VIEW v_test;
+
+
+SET sql_mode=DEFAULT;
+CREATE VIEW v_test AS SELECT 1 AS c1 FROM DUAL WHERE 1=test.test1.f_test();
+SELECT * FROM v_test;
+--vertical_results
+SHOW CREATE VIEW v_test;
+--horizontal_results
+SET sql_mode=ORACLE;
+SELECT * FROM v_test;
+--vertical_results
+SHOW CREATE VIEW v_test;
+--horizontal_results
+DROP VIEW v_test;
+
+SET sql_mode=ORACLE;
+DROP PACKAGE test1;
+
+
+--echo #
+--echo # MDEV-19804 sql_mode=ORACLE: call procedure in packages
+--echo #
+
+--error ER_WRONG_DB_NAME
+CALL `db1 `.pkg.p;
+--error ER_SP_WRONG_NAME
+CALL db1.`pkg `.p;
+--error ER_SP_WRONG_NAME
+CALL db1.pkg.`p `;
+
+
+SET sql_mode=ORACLE;
+DELIMITER $$;
+CREATE PACKAGE pkg1 as
+ PROCEDURE p1();
+END;
+$$
+CREATE PACKAGE BODY pkg1 as
+ PROCEDURE p1() as
+ BEGIN
+ SELECT 'test-function' AS c1;
+ END;
+END;
+$$
+DELIMITER ;$$
+
+CALL pkg1.p1;
+CALL test.pkg1.p1;
+
+# In sql_mode=DEFAULT we support fully qualified package function names
+# (this is needed for VIEWs). Let's make sure we also support fully
+# qualified package procedure names, for symmetry
+
+SET sql_mode=DEFAULT;
+CALL test.pkg1.p1;
+SET sql_mode=ORACLE;
+
+DELIMITER $$;
+BEGIN
+ CALL pkg1.p1;
+ CALL test.pkg1.p1;
+END
+$$
+DELIMITER ;$$
+
+DELIMITER $$;
+BEGIN
+ pkg1.p1;
+ test.pkg1.p1;
+END
+$$
+DELIMITER ;$$
+
+DROP PACKAGE pkg1;
+
+
+#
+# Testing packages in different databases calling each other
+# in routines and in the initialization section.
+#
+
+CREATE DATABASE db1;
+DELIMITER $$;
+CREATE PACKAGE db1.pkg1 AS
+ PROCEDURE p1(a OUT TEXT);
+END;
+$$
+CREATE PACKAGE BODY db1.pkg1 AS
+ PROCEDURE p1(a OUT TEXT) AS
+ BEGIN
+ a:= 'This is db1.pkg1.p1';
+ END;
+END;
+$$
+DELIMITER ;$$
+
+CREATE DATABASE db2;
+DELIMITER $$;
+CREATE PACKAGE db2.pkg1 AS
+ FUNCTION var1 RETURN TEXT;
+ PROCEDURE p1(a OUT TEXT);
+ PROCEDURE p2_db1_pkg1_p1;
+END;
+$$
+CREATE PACKAGE BODY db2.pkg1 AS
+ m_var1 TEXT;
+ FUNCTION var1 RETURN TEXT AS
+ BEGIN
+ RETURN m_var1;
+ END;
+ PROCEDURE p1(a OUT TEXT) AS
+ BEGIN
+ a:= 'This is db2.pkg1.p1';
+ END;
+ PROCEDURE p2_db1_pkg1_p1 AS
+ a TEXT;
+ BEGIN
+ db1.pkg1.p1(a);
+ SELECT a;
+ END;
+BEGIN
+ db1.pkg1.p1(m_var1);
+END;
+$$
+DELIMITER ;$$
+
+SELECT db2.pkg1.var1();
+CALL db2.pkg1.p2_db1_pkg1_p1;
+
+DROP DATABASE db1;
+DROP DATABASE db2;
diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def
index eb1aba72162..76992e9d59b 100644
--- a/mysql-test/suite/galera/disabled.def
+++ b/mysql-test/suite/galera/disabled.def
@@ -44,3 +44,4 @@ pxc-421: wsrep_provider is read-only for security reasons
query_cache: MDEV-15805 Test failure on galera.query_cache
versioning_trx_id: MDEV-18590: galera.versioning_trx_id: Test failure: mysqltest: Result content mismatch
galera_bf_abort_at_after_statement : Unstable
+galera_bf_abort_ps_bind : MDEV-28193 Galera test failure on galera_bf_abort_ps_bind
diff --git a/mysql-test/suite/galera/include/kill_galera.inc b/mysql-test/suite/galera/include/kill_galera.inc
index 98ebf4ff35d..56118df84f9 100644
--- a/mysql-test/suite/galera/include/kill_galera.inc
+++ b/mysql-test/suite/galera/include/kill_galera.inc
@@ -2,7 +2,7 @@
if (!$kill_signal)
{
---let $kill_signal = 9
+--let $kill_signal = KILL
}
# Write file to make mysql-test-run.pl expect the crash, but don't start it
diff --git a/mysql-test/suite/galera/r/MDEV-24143.result b/mysql-test/suite/galera/r/MDEV-24143.result
new file mode 100644
index 00000000000..860d8a35834
--- /dev/null
+++ b/mysql-test/suite/galera/r/MDEV-24143.result
@@ -0,0 +1,23 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (c1 BIGINT NOT NULL PRIMARY KEY, c2 BINARY (10), c3 DATETIME);
+SELECT get_lock ('test2', 0);
+get_lock ('test2', 0)
+1
+DROP TABLE t1;
+CREATE TABLE t1 (c1 SMALLINT NOT NULL AUTO_INCREMENT PRIMARY KEY);
+INSERT INTO t1 VALUES (1);
+SET SESSION wsrep_trx_fragment_size=10;
+SET SESSION autocommit=0;
+SELECT * FROM t1 WHERE c1 <=0 ORDER BY c1 DESC;
+c1
+INSERT INTO t1 VALUES (4),(3),(1),(2);
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+CREATE TABLE t1 (pk INT PRIMARY KEY, b INT) ENGINE=SEQUENCE;
+ERROR 42S01: Table 't1' already exists
+ALTER TABLE t1 DROP COLUMN c2;
+ERROR 42000: Can't DROP COLUMN `c2`; check that it exists
+SELECT get_lock ('test', 1.5);
+get_lock ('test', 1.5)
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/MDEV-27713.result b/mysql-test/suite/galera/r/MDEV-27713.result
new file mode 100644
index 00000000000..14575cb484d
--- /dev/null
+++ b/mysql-test/suite/galera/r/MDEV-27713.result
@@ -0,0 +1,46 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (
+f1 INT,
+f2 VARCHAR(255) PRIMARY KEY
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+INSERT INTO t1 VALUES(1, 'abc');
+connection node_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (2,'def');
+connection node_2;
+SET GLOBAL event_scheduler=ON;
+CREATE PROCEDURE update_table()
+BEGIN
+SET AUTOCOMMIT=OFF;
+DO GET_LOCK('local_lock', 0);
+SET DEBUG_SYNC = 'innodb_row_update_for_mysql_begin SIGNAL blocked WAIT_FOR continue';
+UPDATE t1 SET f2 = 'jkl' WHERE f1 != 2;
+DO RELEASE_LOCK('local_lock');
+END|
+CREATE DEFINER=current_user
+EVENT event
+ON SCHEDULE AT CURRENT_TIMESTAMP
+ON COMPLETION PRESERVE
+ENABLE
+DO CALL update_table();
+connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+SET DEBUG_SYNC = 'now WAIT_FOR blocked';
+connect node_2b, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+SET GLOBAL debug_dbug = "+d,sync.wsrep_apply_cb";
+connection node_1;
+COMMIT;
+connection node_2b;
+SET DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached";
+SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
+connection node_2a;
+SET DEBUG_SYNC = 'now SIGNAL continue';
+connection node_2;
+SET GLOBAL event_scheduler=default;
+DROP PROCEDURE update_table;
+DROP EVENT event;
+SET DEBUG_SYNC='reset';
+SET GLOBAL debug_dbug = DEFAULT;
+connection node_1;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_bf_abort_ps_bind.result b/mysql-test/suite/galera/r/galera_bf_abort_ps_bind.result
new file mode 100644
index 00000000000..adc7da58eae
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_bf_abort_ps_bind.result
@@ -0,0 +1,37 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t (i int primary key auto_increment, j varchar(20) character set utf8);
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1a;
+SET SESSION wsrep_sync_wait = 0;
+connection node_1;
+insert into t values (1, 'first');
+PS_prepare INSERT INTO t(j) VALUES (?);;
+PS_bind node1;
+PS_execute;
+PS_execute;
+select * from t;
+i j
+1 first
+3 node1
+5 node1
+PS_close;
+PS_prepare INSERT INTO t(j) VALUES (?);;
+PS_bind node1;
+begin;
+update t set j='node1' where i=1;
+connection node_2;
+update t set j='node2' where i=1;
+connection node_1a;
+connection node_1;
+PS_execute;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+PS_execute;
+commit;
+select * from t;
+i j
+1 node2
+3 node1
+5 node1
+7 node1
+drop table t;
diff --git a/mysql-test/suite/galera/r/galera_ist_restart_joiner.result b/mysql-test/suite/galera/r/galera_ist_restart_joiner.result
index 7cb6d90840e..2f78c475762 100644
--- a/mysql-test/suite/galera/r/galera_ist_restart_joiner.result
+++ b/mysql-test/suite/galera/r/galera_ist_restart_joiner.result
@@ -18,6 +18,7 @@ SET SESSION wsrep_on=ON;
connection node_1;
UPDATE t1 SET f2 = 'd' WHERE f1 > 3;
connection node_2;
+Killing server ...
connection node_1;
UPDATE t1 SET f2 = 'e' WHERE f1 > 4;
connection node_2;
diff --git a/mysql-test/suite/galera/t/MDEV-24143.test b/mysql-test/suite/galera/t/MDEV-24143.test
new file mode 100644
index 00000000000..e58f147cb7c
--- /dev/null
+++ b/mysql-test/suite/galera/t/MDEV-24143.test
@@ -0,0 +1,20 @@
+--source include/galera_cluster.inc
+--source include/have_sequence.inc
+
+CREATE TABLE t1 (c1 BIGINT NOT NULL PRIMARY KEY, c2 BINARY (10), c3 DATETIME);
+SELECT get_lock ('test2', 0);
+DROP TABLE t1;
+CREATE TABLE t1 (c1 SMALLINT NOT NULL AUTO_INCREMENT PRIMARY KEY);
+INSERT INTO t1 VALUES (1);
+SET SESSION wsrep_trx_fragment_size=10;
+SET SESSION autocommit=0;
+SELECT * FROM t1 WHERE c1 <=0 ORDER BY c1 DESC;
+--error ER_LOCK_DEADLOCK
+INSERT INTO t1 VALUES (4),(3),(1),(2);
+--error ER_TABLE_EXISTS_ERROR
+CREATE TABLE t1 (pk INT PRIMARY KEY, b INT) ENGINE=SEQUENCE;
+--error ER_CANT_DROP_FIELD_OR_KEY
+ALTER TABLE t1 DROP COLUMN c2;
+SELECT get_lock ('test', 1.5);
+DROP TABLE t1;
+
diff --git a/mysql-test/suite/galera/t/MDEV-27713.test b/mysql-test/suite/galera/t/MDEV-27713.test
new file mode 100644
index 00000000000..4bfcd7e3d50
--- /dev/null
+++ b/mysql-test/suite/galera/t/MDEV-27713.test
@@ -0,0 +1,67 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+--source include/have_debug.inc
+--source include/have_debug_sync.inc
+--source include/big_test.inc
+
+CREATE TABLE t1 (
+ f1 INT,
+ f2 VARCHAR(255) PRIMARY KEY
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+INSERT INTO t1 VALUES(1, 'abc');
+
+--connection node_1
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (2,'def');
+
+--connection node_2
+
+SET GLOBAL event_scheduler=ON;
+
+DELIMITER |;
+CREATE PROCEDURE update_table()
+BEGIN
+ SET AUTOCOMMIT=OFF;
+ DO GET_LOCK('local_lock', 0);
+ SET DEBUG_SYNC = 'innodb_row_update_for_mysql_begin SIGNAL blocked WAIT_FOR continue';
+ UPDATE t1 SET f2 = 'jkl' WHERE f1 != 2;
+ DO RELEASE_LOCK('local_lock');
+END|
+DELIMITER ;|
+
+CREATE DEFINER=current_user
+ EVENT event
+ ON SCHEDULE AT CURRENT_TIMESTAMP
+ ON COMPLETION PRESERVE
+ ENABLE
+ DO CALL update_table();
+
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+SET DEBUG_SYNC = 'now WAIT_FOR blocked';
+
+# Applier control thread
+--connect node_2b, 127.0.0.1, root, , test, $NODE_MYPORT_2
+SET GLOBAL debug_dbug = "+d,sync.wsrep_apply_cb";
+
+--connection node_1
+COMMIT;
+
+# Applier control thread
+--connection node_2b
+SET DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached";
+SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
+
+--connection node_2a
+SET DEBUG_SYNC = 'now SIGNAL continue';
+
+--connection node_2
+SET GLOBAL event_scheduler=default;
+DROP PROCEDURE update_table;
+DROP EVENT event;
+SET DEBUG_SYNC='reset';
+SET GLOBAL debug_dbug = DEFAULT;
+
+--connection node_1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/MW-44.test b/mysql-test/suite/galera/t/MW-44.test
index a2acfc57f6c..7b479d45844 100644
--- a/mysql-test/suite/galera/t/MW-44.test
+++ b/mysql-test/suite/galera/t/MW-44.test
@@ -19,7 +19,11 @@ SET SESSION wsrep_osu_method=RSU;
ALTER TABLE t1 ADD COLUMN f2 INTEGER;
SET SESSION wsrep_osu_method=TOI;
---let $wait_condition = SELECT COUNT(*) = 2 FROM mysql.general_log WHERE argument LIKE "CREATE%" OR argument LIKE "ALTER%"
+--let $wait_condition = SELECT COUNT(*) = 1 FROM mysql.general_log WHERE argument LIKE "CREATE%" AND command_type != 'Prepare'
+--let $wait_condition_on_error_output = SELECT * FROM mysql.general_log
+--source include/wait_condition_with_debug.inc
+
+--let $wait_condition = SELECT COUNT(*) = 1 FROM mysql.general_log WHERE argument LIKE "ALTER%" AND command_type != 'Prepare'
--let $wait_condition_on_error_output = SELECT * FROM mysql.general_log
--source include/wait_condition_with_debug.inc
diff --git a/mysql-test/suite/galera/t/galera_bf_abort_ps_bind.cnf b/mysql-test/suite/galera/t/galera_bf_abort_ps_bind.cnf
new file mode 100644
index 00000000000..62cf1854032
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_bf_abort_ps_bind.cnf
@@ -0,0 +1,7 @@
+!include ../galera_2nodes.cnf
+
+[mysqld.1]
+wsrep-debug=1
+
+[mysqld.2]
+wsrep-debug=1
diff --git a/mysql-test/suite/galera/t/galera_bf_abort_ps_bind.test b/mysql-test/suite/galera/t/galera_bf_abort_ps_bind.test
new file mode 100644
index 00000000000..a840f612a82
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_bf_abort_ps_bind.test
@@ -0,0 +1,58 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+CREATE TABLE t (i int primary key auto_increment, j varchar(20) character set utf8);
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--connection node_1a
+SET SESSION wsrep_sync_wait = 0;
+
+--connection node_1
+insert into t values (1, 'first');
+
+# prepare a statement for inserting rows into table t
+--PS_prepare INSERT INTO t(j) VALUES (?);
+
+# bind parameter, to insert with column j having value 'node1'
+--PS_bind node1
+
+# insert two rows with the PS
+# this is for showing that two execute commands can follow a bind command
+--PS_execute
+--PS_execute
+select * from t;
+
+# close the prepared statement, and prepare a new PS,
+# this happens to be same as the first PS
+# also bind parameter for the PS
+--PS_close
+--PS_prepare INSERT INTO t(j) VALUES (?);
+--PS_bind node1
+
+# start a transaction and make one update
+# leaving the transaction open
+begin;
+update t set j='node1' where i=1;
+
+# replicate a transaction from node2, which BF aborts the open
+# transaction in node1
+--connection node_2
+update t set j='node2' where i=1;
+
+# wait until the BF has completed, and update from node_2 has committed
+--connection node_1a
+--let $wait_condition = SELECT COUNT(*) = 1 FROM t WHERE j='node2'
+--source include/wait_condition.inc
+
+# continue the open transaction, trying to insert third row, deadlock is now observed
+--connection node_1
+--error ER_LOCK_DEADLOCK
+--PS_execute
+
+# try to insert one more row
+--PS_execute
+commit;
+
+select * from t;
+
+drop table t;
diff --git a/mysql-test/suite/galera/t/galera_ist_restart_joiner.test b/mysql-test/suite/galera/t/galera_ist_restart_joiner.test
index f56d0e657bd..c535ac455b9 100644
--- a/mysql-test/suite/galera/t/galera_ist_restart_joiner.test
+++ b/mysql-test/suite/galera/t/galera_ist_restart_joiner.test
@@ -61,19 +61,7 @@ UPDATE t1 SET f2 = 'd' WHERE f1 > 3;
# Kill node #2 while IST is in progress
--connection node_2
-
-# Kill the connected server
---disable_reconnect
-
---perl
- my $pid_filename = $ENV{'KILL_NODE_PIDFILE'};
- my $mysqld_pid = `cat $pid_filename`;
- chomp($mysqld_pid);
- system("kill -9 $mysqld_pid");
- exit(0);
-EOF
-
---source include/wait_until_disconnected.inc
+--source include/kill_galera.inc
--connection node_1
--source include/wait_until_connected_again.inc
diff --git a/mysql-test/suite/galera_3nodes/r/galera_garbd_backup.result b/mysql-test/suite/galera_3nodes/r/galera_garbd_backup.result
new file mode 100644
index 00000000000..f176ef1dd7f
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes/r/galera_garbd_backup.result
@@ -0,0 +1,41 @@
+connection node_1;
+connection node_1;
+connection node_2;
+connection node_3;
+connection node_1;
+SET GLOBAL innodb_max_dirty_pages_pct=99;
+SET GLOBAL innodb_max_dirty_pages_pct_lwm=99;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER, f2 varchar(1024)) Engine=InnoDB;
+CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
+INSERT INTO t1 (f2) SELECT REPEAT('x', 1024) FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;
+connection node_2;
+Killing node #3 to free ports for garbd ...
+connection node_3;
+connection node_1;
+SET GLOBAL debug_dbug = "+d,sync.wsrep_donor_state";
+Starting garbd ...
+SET SESSION debug_sync = "now WAIT_FOR sync.wsrep_donor_state_reached";
+SET GLOBAL innodb_max_dirty_pages_pct_lwm=0;
+SET GLOBAL innodb_max_dirty_pages_pct=0;
+SET SESSION debug_sync = "now SIGNAL signal.wsrep_donor_state";
+SET GLOBAL debug_dbug = "";
+SET debug_sync='RESET';
+connection node_2;
+Killing garbd ...
+connection node_1;
+connection node_2;
+DROP TABLE t1;
+DROP TABLE ten;
+Restarting node #3 to satisfy MTR's end-of-test checks
+connection node_3;
+connection node_1;
+SET GLOBAL innodb_max_dirty_pages_pct = 75.000000;
+SET GLOBAL innodb_max_dirty_pages_pct_lwm = 0.000000;
+connection node_1;
+CALL mtr.add_suppression("WSREP: Protocol violation\. JOIN message sender 1\.0 \(.*\) is not in state transfer \(SYNCED\)");
+connection node_2;
+CALL mtr.add_suppression("WSREP: Protocol violation\. JOIN message sender 1\.0 \(.*\) is not in state transfer \(SYNCED\)");
+connection node_3;
+CALL mtr.add_suppression("WSREP: Protocol violation\. JOIN message sender 1\.0 \(.*\) is not in state transfer \(SYNCED\)");
diff --git a/mysql-test/suite/galera_3nodes/t/galera_garbd_backup.cnf b/mysql-test/suite/galera_3nodes/t/galera_garbd_backup.cnf
new file mode 100644
index 00000000000..8b7cb948a87
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes/t/galera_garbd_backup.cnf
@@ -0,0 +1,13 @@
+!include ../galera_3nodes.cnf
+
+[mysqld]
+wsrep_sst_method=rsync
+
+[mysqld.1]
+wsrep_node_name=node1
+
+[mysqld.2]
+wsrep_node_name=node2
+
+[mysqld.3]
+wsrep_node_name=node3
diff --git a/mysql-test/suite/galera_3nodes/t/galera_garbd_backup.test b/mysql-test/suite/galera_3nodes/t/galera_garbd_backup.test
new file mode 100644
index 00000000000..302bf430dde
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes/t/galera_garbd_backup.test
@@ -0,0 +1,134 @@
+#
+# A very basic test for the galera arbitrator. We shut down node #3 and use its port allocation to start garbd.
+# As MTR does not allow multiple servers to be down at the same time, we are limited as to what we can test.
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+--source include/have_garbd.inc
+--source include/big_test.inc
+--source include/have_debug.inc
+--source include/have_debug_sync.inc
+
+--connection node_1
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--let $node_3=node_3
+
+--let $galera_connection_name = node_3
+--let $galera_server_number = 3
+--source include/galera_connect.inc
+--source suite/galera/include/galera_base_port.inc
+--let $NODE_GALERAPORT_3 = $_NODE_GALERAPORT
+
+--source ../galera/include/auto_increment_offset_save.inc
+
+# Save galera ports
+--connection node_1
+--source suite/galera/include/galera_base_port.inc
+--let $NODE_GALERAPORT_1 = $_NODE_GALERAPORT
+--let $datadir= `SELECT @@datadir`
+
+--let $innodb_max_dirty_pages_pct = `SELECT @@innodb_max_dirty_pages_pct`
+--let $innodb_max_dirty_pages_pct_lwm = `SELECT @@innodb_max_dirty_pages_pct_lwm`
+
+SET GLOBAL innodb_max_dirty_pages_pct=99;
+SET GLOBAL innodb_max_dirty_pages_pct_lwm=99;
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER, f2 varchar(1024)) Engine=InnoDB;
+CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
+INSERT INTO t1 (f2) SELECT REPEAT('x', 1024) FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;
+
+--connection node_2
+--source suite/galera/include/galera_base_port.inc
+--let $NODE_GALERAPORT_2 = $_NODE_GALERAPORT
+
+--echo Killing node #3 to free ports for garbd ...
+--connection node_3
+--source include/shutdown_mysqld.inc
+
+--connection node_1
+--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
+# stop SST donor thread when node is in donor state
+SET GLOBAL debug_dbug = "+d,sync.wsrep_donor_state";
+
+--echo Starting garbd ...
+--exec $MTR_GARBD_EXE --address "gcomm://127.0.0.1:$NODE_GALERAPORT_1" --group my_wsrep_cluster --donor node1 --sst backup --options 'base_port=$NODE_GALERAPORT_3' > $MYSQL_TMP_DIR/garbd.log 2>&1 &
+
+SET SESSION debug_sync = "now WAIT_FOR sync.wsrep_donor_state_reached";
+
+#
+# get hash of data directory contents before BP dirty page flushing
+#
+--exec find $datadir -type f ! -name tables_flushed ! -name backup_sst_complete -exec md5sum {} \; | md5sum >$MYSQLTEST_VARDIR/tmp/innodb_before
+
+# this should force buffer pool flushing, if not already done by donor state change transfer
+SET GLOBAL innodb_max_dirty_pages_pct_lwm=0;
+SET GLOBAL innodb_max_dirty_pages_pct=0;
+
+--disable_query_log
+--disable_result_log
+select f1 from t1;
+select * from ten;
+--enable_result_log
+--enable_query_log
+
+#
+#
+# record the hash of data directory contents after BP dirty page flushing
+#
+--exec find $datadir -type f ! -name tables_flushed ! -name backup_sst_complete -exec md5sum {} \; | md5sum >$MYSQLTEST_VARDIR/tmp/innodb_after
+
+# there should be no disk writes
+--diff_files $MYSQLTEST_VARDIR/tmp/innodb_before $MYSQLTEST_VARDIR/tmp/innodb_after
+
+SET SESSION debug_sync = "now SIGNAL signal.wsrep_donor_state";
+SET GLOBAL debug_dbug = "";
+SET debug_sync='RESET';
+
+--connection node_2
+
+#
+# garbd will die automatically, because of the backup SST script
+# but just to be sure, sending explicit kill here, as well
+#
+--echo Killing garbd ...
+# FreeBSD's /bin/pkill only supports short versions of the options:
+# -o Select only the oldest (least recently started)
+# -f Match against full argument lists
+--error 0,1
+--exec pkill -o -f garbd.*$NODE_GALERAPORT_3
+
+--connection node_1
+--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
+--connection node_2
+
+DROP TABLE t1;
+DROP TABLE ten;
+
+--echo Restarting node #3 to satisfy MTR's end-of-test checks
+--connection node_3
+let $restart_noprint=2;
+--source include/start_mysqld.inc
+
+--connection node_1
+--eval SET GLOBAL innodb_max_dirty_pages_pct = $innodb_max_dirty_pages_pct
+--eval SET GLOBAL innodb_max_dirty_pages_pct_lwm = $innodb_max_dirty_pages_pct_lwm
+
+--source ../galera/include/auto_increment_offset_restore.inc
+
+--connection node_1
+CALL mtr.add_suppression("WSREP: Protocol violation\. JOIN message sender 1\.0 \(.*\) is not in state transfer \(SYNCED\)");
+
+--connection node_2
+CALL mtr.add_suppression("WSREP: Protocol violation\. JOIN message sender 1\.0 \(.*\) is not in state transfer \(SYNCED\)");
+
+--connection node_3
+CALL mtr.add_suppression("WSREP: Protocol violation\. JOIN message sender 1\.0 \(.*\) is not in state transfer \(SYNCED\)");
diff --git a/mysql-test/suite/galera_sr/disabled.def b/mysql-test/suite/galera_sr/disabled.def
index 9f6ae2a51ef..e45118f6f0a 100644
--- a/mysql-test/suite/galera_sr/disabled.def
+++ b/mysql-test/suite/galera_sr/disabled.def
@@ -14,4 +14,5 @@ GCF-1018B : MDEV-18534 wsrep::transaction::adopt(): Assertion `transaction.is_st
GCF-1060 : MDEV-20848 galera_sr.GCF_1060
GCF-585 : MDEV-24698 galera_sr.GCF-585 MTR failed with SIGABRT: no such a transition REPLICATING -> APPLYING
galera-features#56 : MDEV-24896
+GCF-1060 : MDEV-26528 wrong usage of mutex LOCK_thd_kill and LOCK_thd_kill
galera_sr_shutdown_master : MDEV-23612: galera_sr.galera_sr_shutdown_master MTR failed: WSREP_SST: [ERROR] Possible timeout in receving first data from donor in gtid stage
diff --git a/mysql-test/suite/galera_sr/r/MDEV-27553.result b/mysql-test/suite/galera_sr/r/MDEV-27553.result
index f6f81bd13f1..5a6a5bd4956 100644
--- a/mysql-test/suite/galera_sr/r/MDEV-27553.result
+++ b/mysql-test/suite/galera_sr/r/MDEV-27553.result
@@ -1,23 +1,36 @@
connection node_2;
connection node_1;
-CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
connection node_1;
+connection node_2;
+connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+connection node_2;
SET SESSION wsrep_trx_fragment_size=1;
START TRANSACTION;
INSERT INTO t1 VALUES (1);
+SELECT COUNT(*) `Expect 1` FROM mysql.wsrep_streaming_log;
+Expect 1
+1
SET @@global.debug_dbug="+d,ha_index_init_fail";
ROLLBACK;
-connection node_2;
+connection node_1;
+SET SESSION wsrep_sync_wait = 0;
SELECT COUNT(*) `Expect 0` FROM mysql.wsrep_streaming_log;
Expect 0
0
-connection node_1;
+connection node_2;
SET @@global.debug_dbug="";
+SET SESSION wsrep_sync_wait = 0;
SELECT COUNT(*) `Expect 1` FROM mysql.wsrep_streaming_log;
Expect 1
1
-SET SESSION wsrep_on=OFF;
-DELETE FROM mysql.wsrep_streaming_log;
-SET SESSION wsrep_on=ON;
+connection node_2;
+SET GLOBAL wsrep_on=OFF;
+# restart
+SELECT COUNT(*) `Expect 0` FROM mysql.wsrep_streaming_log;
+Expect 0
+0
DROP TABLE t1;
CALL mtr.add_suppression("WSREP: Failed to init table for index scan");
+CALL mtr.add_suppression("WSREP: Failed to apply write set");
+CALL mtr.add_suppression("Failed to report last committed");
diff --git a/mysql-test/suite/galera_sr/t/MDEV-27553.test b/mysql-test/suite/galera_sr/t/MDEV-27553.test
index d17af175512..5c557db9201 100644
--- a/mysql-test/suite/galera_sr/t/MDEV-27553.test
+++ b/mysql-test/suite/galera_sr/t/MDEV-27553.test
@@ -5,29 +5,76 @@
--source include/galera_cluster.inc
--source include/have_debug.inc
+--let $node_1=node_1
+--let $node_2=node_2
+--source suite/galera/include/auto_increment_offset_save.inc
+
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
---connection node_1
---let $wsrep_cluster_address_orig = `SELECT @@wsrep_cluster_address`
+--connection node_2
SET SESSION wsrep_trx_fragment_size=1;
START TRANSACTION;
INSERT INTO t1 VALUES (1);
-# This will result in failure to remove fragments
-# from streaming log, in the following ROLLBACK.
+SELECT COUNT(*) `Expect 1` FROM mysql.wsrep_streaming_log;
+
+#
+# Issue ROLLBACK and make sure it fails to clean up
+# the streaming log. Failure to remove fragments
+# results in apply failure of the rollback fragment.
+# The node should disconnect from the cluster.
+#
SET @@global.debug_dbug="+d,ha_index_init_fail";
ROLLBACK;
---connection node_2
+
+#
+# Expect the cluster to shrink
+#
+--connection node_1
+SET SESSION wsrep_sync_wait = 0;
+--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
+#
+# ROLLBACK should clean up the streaming log just fine in node 1
+#
SELECT COUNT(*) `Expect 0` FROM mysql.wsrep_streaming_log;
---connection node_1
+#
+# Expect the failure on ROLLBACK to leave a entry in streaming log
+#
+--connection node_2
SET @@global.debug_dbug="";
+SET SESSION wsrep_sync_wait = 0;
+# Expect node to be disconnected
+--let wait_condition = SELECT VARIABLE_VALUE = 'Disconnected' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
+--source include/wait_condition.inc
+
SELECT COUNT(*) `Expect 1` FROM mysql.wsrep_streaming_log;
-SET SESSION wsrep_on=OFF;
-DELETE FROM mysql.wsrep_streaming_log;
-SET SESSION wsrep_on=ON;
+
+#
+# Restart node 2, so that it joins the cluster back
+#
+--connection node_2
+SET GLOBAL wsrep_on=OFF;
+--source include/restart_mysqld.inc
+
+#
+# After restart, the streaming log is empty in node 2
+#
+SELECT COUNT(*) `Expect 0` FROM mysql.wsrep_streaming_log;
+
+#
+# Cleanup
+#
DROP TABLE t1;
CALL mtr.add_suppression("WSREP: Failed to init table for index scan");
+CALL mtr.add_suppression("WSREP: Failed to apply write set");
+CALL mtr.add_suppression("Failed to report last committed");
+
+--source suite/galera/include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/innodb/r/row_format_redundant.result b/mysql-test/suite/innodb/r/row_format_redundant.result
index 8a629d06dd8..b798832e96f 100644
--- a/mysql-test/suite/innodb/r/row_format_redundant.result
+++ b/mysql-test/suite/innodb/r/row_format_redundant.result
@@ -1,3 +1,4 @@
+SET GLOBAL innodb_fast_shutdown=0;
# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/row_format_redundant --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/row_format_redundant --innodb-data-file-path=ibdata1:1M:autoextend --innodb-undo-tablespaces=0 --innodb-stats-persistent=0
SET GLOBAL innodb_file_per_table=1;
#
@@ -8,25 +9,17 @@ SET GLOBAL innodb_file_per_table=ON;
create table t1 (a int not null, d varchar(15) not null, b
varchar(198) not null, c char(156)) engine=InnoDB
row_format=redundant;
-insert into t1 values(123, 'abcdef', 'jghikl', 'mnop');
-insert into t1 values(456, 'abcdef', 'jghikl', 'mnop');
-insert into t1 values(789, 'abcdef', 'jghikl', 'mnop');
-insert into t1 values(134, 'kasdfsdsadf', 'adfjlasdkfjasd', 'adfsadflkasdasdfljasdf');
-insert into t1 select * from t1;
-insert into t1 select * from t1;
-insert into t1 select * from t1;
-insert into t1 select * from t1;
-insert into t1 select * from t1;
-insert into t1 select * from t1;
-insert into t1 select * from t1;
-insert into t1 select * from t1;
-insert into t1 select * from t1;
-insert into t1 select * from t1;
+create temporary table t like t1;
+insert into t values(123, 'abcdef', 'jghikl', 'mnop');
+insert into t values(456, 'abcdef', 'jghikl', 'mnop');
+insert into t values(789, 'abcdef', 'jghikl', 'mnop');
+insert into t values(134, 'kasdfsdsadf', 'adfjlasdkfjasd', 'adfsadflkasdasdfljasdf');
+insert into t1 select a,d,b,c from t, seq_1_to_1024;
SET GLOBAL innodb_file_per_table=OFF;
create table t2 (a int not null, d varchar(15) not null, b
varchar(198) not null, c char(156), fulltext ftsic(c)) engine=InnoDB
row_format=redundant;
-insert into t2 select * from t1;
+insert into t2 select a,d,b,c from t, seq_1_to_1024;
create table t3 (a int not null, d varchar(15) not null, b varchar(198),
c varchar(150), index k1(c(99), b(56)), index k2(b(5), c(10))) engine=InnoDB
row_format=redundant;
diff --git a/mysql-test/suite/innodb/t/alter_crash_rebuild.test b/mysql-test/suite/innodb/t/alter_crash_rebuild.test
new file mode 100644
index 00000000000..500cd28e5c5
--- /dev/null
+++ b/mysql-test/suite/innodb/t/alter_crash_rebuild.test
@@ -0,0 +1,26 @@
+--source include/have_innodb.inc
+--source include/have_debug.inc
+--source include/have_debug_sync.inc
+
+CREATE TABLE t1 (a INT NOT NULL) ENGINE=InnoDB STATS_PERSISTENT=0;
+
+connect ddl,localhost,root;
+SET DEBUG_SYNC='after_trx_committed_in_memory SIGNAL stuck WAIT_FOR ever EXECUTE 2';
+send ALTER TABLE t1 ADD PRIMARY KEY(a);
+
+connection default;
+SET DEBUG_SYNC='now WAIT_FOR stuck';
+SET DEBUG_SYNC='now SIGNAL ever';
+SET DEBUG_SYNC='now WAIT_FOR stuck';
+
+SET GLOBAL innodb_log_checkpoint_now=ON;
+
+--let $shutdown_timeout=0
+--source include/restart_mysqld.inc
+
+disconnect ddl;
+
+SHOW CREATE TABLE t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+--source include/wait_all_purged.inc
diff --git a/mysql-test/suite/innodb/t/row_format_redundant.test b/mysql-test/suite/innodb/t/row_format_redundant.test
index aadbfd2b773..6de7597e983 100644
--- a/mysql-test/suite/innodb/t/row_format_redundant.test
+++ b/mysql-test/suite/innodb/t/row_format_redundant.test
@@ -1,6 +1,7 @@
--source include/have_innodb.inc
# Embedded mode doesn't allow restarting
--source include/not_embedded.inc
+--source include/have_sequence.inc
--disable_query_log
call mtr.add_suppression("InnoDB: Table `mysql`\\.`innodb_table_stats` not found");
@@ -21,6 +22,8 @@ let bugdir= $MYSQLTEST_VARDIR/tmp/row_format_redundant;
--let $d=$d --innodb-data-file-path=ibdata1:1M:autoextend
--let $d=$d --innodb-undo-tablespaces=0 --innodb-stats-persistent=0
--let $restart_parameters= $d
+# Ensure that any DDL records from previous tests have been purged.
+SET GLOBAL innodb_fast_shutdown=0;
--source include/restart_mysqld.inc
SET GLOBAL innodb_file_per_table=1;
@@ -35,27 +38,21 @@ create table t1 (a int not null, d varchar(15) not null, b
varchar(198) not null, c char(156)) engine=InnoDB
row_format=redundant;
-insert into t1 values(123, 'abcdef', 'jghikl', 'mnop');
-insert into t1 values(456, 'abcdef', 'jghikl', 'mnop');
-insert into t1 values(789, 'abcdef', 'jghikl', 'mnop');
-insert into t1 values(134, 'kasdfsdsadf', 'adfjlasdkfjasd', 'adfsadflkasdasdfljasdf');
-insert into t1 select * from t1;
-insert into t1 select * from t1;
-insert into t1 select * from t1;
-insert into t1 select * from t1;
-insert into t1 select * from t1;
-insert into t1 select * from t1;
-insert into t1 select * from t1;
-insert into t1 select * from t1;
-insert into t1 select * from t1;
-insert into t1 select * from t1;
+create temporary table t like t1;
+
+insert into t values(123, 'abcdef', 'jghikl', 'mnop');
+insert into t values(456, 'abcdef', 'jghikl', 'mnop');
+insert into t values(789, 'abcdef', 'jghikl', 'mnop');
+insert into t values(134, 'kasdfsdsadf', 'adfjlasdkfjasd', 'adfsadflkasdasdfljasdf');
+
+insert into t1 select a,d,b,c from t, seq_1_to_1024;
SET GLOBAL innodb_file_per_table=OFF;
create table t2 (a int not null, d varchar(15) not null, b
varchar(198) not null, c char(156), fulltext ftsic(c)) engine=InnoDB
row_format=redundant;
-insert into t2 select * from t1;
+insert into t2 select a,d,b,c from t, seq_1_to_1024;
create table t3 (a int not null, d varchar(15) not null, b varchar(198),
c varchar(150), index k1(c(99), b(56)), index k2(b(5), c(10))) engine=InnoDB
diff --git a/mysql-test/suite/innodb_zip/r/innochecksum.result b/mysql-test/suite/innodb_zip/r/innochecksum.result
index e6525af4b52..bb94de7a369 100644
--- a/mysql-test/suite/innodb_zip/r/innochecksum.result
+++ b/mysql-test/suite/innodb_zip/r/innochecksum.result
@@ -14,7 +14,8 @@ FOUND 1 /Error: --no-check must be associated with --write option./ in my_restar
FOUND 1 /unknown variable 'strict-check=innodb'/ in my_restart.err
[7]: check the innochecksum with short form strict-check & no-check , an error is expected
FOUND 1 /unknown option '-C'/ in my_restart.err
-FOUND 1 /unknown variable 'write=crc32'/ in my_restart.err
+FOUND 1 /ignoring option '--write' due to invalid value 'crc32'/ in my_restart.err
+FOUND 1 /Error: --no-check must be associated with --write option/ in my_restart.err
# restart
SELECT * FROM tab1;
c1 c2
diff --git a/mysql-test/suite/innodb_zip/r/innochecksum_2.result b/mysql-test/suite/innodb_zip/r/innochecksum_2.result
index 7be6f6f9af3..681d8e1f4c7 100644
--- a/mysql-test/suite/innodb_zip/r/innochecksum_2.result
+++ b/mysql-test/suite/innodb_zip/r/innochecksum_2.result
@@ -27,6 +27,7 @@ end-page 0
page 0
no-check FALSE
allow-mismatches 0
+write FALSE
page-type-summary FALSE
page-type-dump MYSQLTEST_VARDIR/tmp/dump.txt
per-page-details FALSE
@@ -54,6 +55,7 @@ See https://mariadb.com/kb/en/library/innochecksum/ for usage hints.
-n, --no-check Ignore the checksum verification.
-a, --allow-mismatches=#
Maximum checksum mismatch allowed.
+ -w, --write Rewrite the checksum.
-S, --page-type-summary
Display a count of each page type in a tablespace.
-D, --page-type-dump=name
@@ -75,6 +77,7 @@ end-page 0
page 0
no-check FALSE
allow-mismatches 0
+write FALSE
page-type-summary FALSE
page-type-dump (No default value)
per-page-details FALSE
diff --git a/mysql-test/suite/innodb_zip/r/innochecksum_3.result b/mysql-test/suite/innodb_zip/r/innochecksum_3.result
index 280528f4200..03a31194c63 100644
--- a/mysql-test/suite/innodb_zip/r/innochecksum_3.result
+++ b/mysql-test/suite/innodb_zip/r/innochecksum_3.result
@@ -133,6 +133,7 @@ end-page 0
page 0
no-check FALSE
allow-mismatches 0
+write FALSE
page-type-summary FALSE
page-type-dump MYSQLTEST_VARDIR/tmp/dump.txt
per-page-details FALSE
diff --git a/mysql-test/suite/innodb_zip/t/innochecksum.test b/mysql-test/suite/innodb_zip/t/innochecksum.test
index 60860adeac8..b78cd4329de 100644
--- a/mysql-test/suite/innodb_zip/t/innochecksum.test
+++ b/mysql-test/suite/innodb_zip/t/innochecksum.test
@@ -57,9 +57,15 @@ let SEARCH_PATTERN= unknown option '-C';
--error 1
--exec $INNOCHECKSUM --no-check --write=crc32 $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE
-let SEARCH_PATTERN= unknown variable 'write=crc32';
+--let SEARCH_PATTERN= ignoring option '--write' due to invalid value 'crc32'
--source include/search_pattern_in_file.inc
+--error 1
+--exec $INNOCHECKSUM --no-check $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE
+--let SEARCH_PATTERN= Error: --no-check must be associated with --write option
+--source include/search_pattern_in_file.inc
+
+--exec $INNOCHECKSUM --no-check --write $MYSQLD_DATADIR/test/tab1.ibd
--source include/start_mysqld.inc
SELECT * FROM tab1;
diff --git a/mysql-test/suite/rpl/r/mdev_24667.result b/mysql-test/suite/rpl/r/mdev_24667.result
new file mode 100644
index 00000000000..7c7342d63d6
--- /dev/null
+++ b/mysql-test/suite/rpl/r/mdev_24667.result
@@ -0,0 +1,30 @@
+include/rpl_init.inc [topology=1->2->3]
+call mtr.add_suppression('Unsafe statement written to the binary log using ');
+connection server_1;
+set binlog_format=statement;
+#first bug
+create table t1 (a int);
+create temporary table tmp like t1;
+load data local infile 'MYSQLTEST_VARDIR/load_data' INTO TABLE tmp;
+insert into t1 select * from tmp;
+#second bug
+create table t2 (a int);
+create temporary table tmp2 like t2;
+insert into tmp2 values(10);
+update tmp2 set a = 20 limit 1;
+Warnings:
+Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. The statement is unsafe because it uses a LIMIT clause. This is unsafe because the set of rows included cannot be predicted
+insert into t2 select * from tmp2;
+connection server_2;
+connection server_3;
+#t1 should have 2 rows
+select count(*) = 2 from t1;
+count(*) = 2
+1
+#t2 should have 1 rows with a = 20
+select * from t2;
+a
+20
+connection server_1;
+drop table t1, t2, tmp, tmp2;
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/mdev_24667.cnf b/mysql-test/suite/rpl/t/mdev_24667.cnf
new file mode 100644
index 00000000000..58b605ad928
--- /dev/null
+++ b/mysql-test/suite/rpl/t/mdev_24667.cnf
@@ -0,0 +1,8 @@
+!include ../my.cnf
+
+[mysqld.3]
+log-slave-updates
+
+[ENV]
+SERVER_MYPORT_3= @mysqld.3.port
+SERVER_MYSOCK_3= @mysqld.3.socket
diff --git a/mysql-test/suite/rpl/t/mdev_24667.test b/mysql-test/suite/rpl/t/mdev_24667.test
new file mode 100644
index 00000000000..d8490b335db
--- /dev/null
+++ b/mysql-test/suite/rpl/t/mdev_24667.test
@@ -0,0 +1,56 @@
+#
+# MDEV-24667 LOAD DATA INFILE/inserted rows not written to binlog
+#
+# In this test we will have a replication configuration like 1->2->3
+# 1 will have statement format
+# 2 and 3 will have mixed format
+# We will make some updates on temporary table which are unsafe , So 2 must
+# Log these queries in row format, Since it is on tmp table , It wont be logged
+# So the next query which copies the data from tmp table to normal must be logged
+# into the row format. Instead of checking for the binlog We will compare the
+# results on the 3, If no binlog is lost(ie it is logged into row format), There
+# should not be any data loss.
+--let $rpl_topology=1->2->3
+--source include/rpl_init.inc
+--source include/have_binlog_format_mixed.inc
+call mtr.add_suppression('Unsafe statement written to the binary log using ');
+--connection server_1
+
+set binlog_format=statement;
+--echo #first bug
+create table t1 (a int);
+create temporary table tmp like t1;
+--write_file $MYSQLTEST_VARDIR/load_data
+1
+2
+EOF
+--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+eval load data local infile '$MYSQLTEST_VARDIR/load_data' INTO TABLE tmp;
+insert into t1 select * from tmp;
+
+--echo #second bug
+create table t2 (a int);
+#insert into t2 values(10);
+create temporary table tmp2 like t2;
+insert into tmp2 values(10);
+update tmp2 set a = 20 limit 1;
+insert into t2 select * from tmp2;
+--save_master_pos
+
+--connection server_2
+--sync_with_master
+--save_master_pos
+
+--connection server_3
+--sync_with_master
+--echo #t1 should have 2 rows
+select count(*) = 2 from t1;
+--echo #t2 should have 1 rows with a = 20
+select * from t2;
+
+
+# cleanup
+--connection server_1
+drop table t1, t2, tmp, tmp2;
+--remove_file $MYSQLTEST_VARDIR/load_data
+--source include/rpl_end.inc
diff --git a/mysys/my_rename.c b/mysys/my_rename.c
index 93a59342b6c..73fc2fbe47c 100644
--- a/mysys/my_rename.c
+++ b/mysys/my_rename.c
@@ -46,12 +46,15 @@ static BOOL win_rename_with_retries(const char *from, const char *to)
for (int retry= RENAME_MAX_RETRIES; retry--;)
{
- DWORD ret = MoveFileEx(from, to,
+ BOOL ret= MoveFileEx(from, to,
MOVEFILE_COPY_ALLOWED | MOVEFILE_REPLACE_EXISTING);
- DBUG_ASSERT(fp == NULL || (ret == FALSE && GetLastError() == ERROR_SHARING_VIOLATION));
+ if (ret)
+ return ret;
- if (!ret && (GetLastError() == ERROR_SHARING_VIOLATION))
+ DWORD last_error= GetLastError();
+ if (last_error == ERROR_SHARING_VIOLATION ||
+ last_error == ERROR_ACCESS_DENIED)
{
#ifndef DBUG_OFF
/*
diff --git a/plugin/server_audit/server_audit.c b/plugin/server_audit/server_audit.c
index eac7f77fb6b..e8eacc2bd5d 100644
--- a/plugin/server_audit/server_audit.c
+++ b/plugin/server_audit/server_audit.c
@@ -2368,6 +2368,9 @@ int get_db_mysql57(MYSQL_THD thd, char **name, size_t *len)
#ifdef __x86_64__
db_off= 608;
db_len_off= 616;
+#elif __aarch64__
+ db_off= 632;
+ db_len_off= 640;
#else
db_off= 0;
db_len_off= 0;
@@ -2378,6 +2381,9 @@ int get_db_mysql57(MYSQL_THD thd, char **name, size_t *len)
#ifdef __x86_64__
db_off= 536;
db_len_off= 544;
+#elif __aarch64__
+ db_off= 552;
+ db_len_off= 560;
#else
db_off= 0;
db_len_off= 0;
diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt
index 92611b18267..aca78f38322 100644
--- a/scripts/CMakeLists.txt
+++ b/scripts/CMakeLists.txt
@@ -297,6 +297,7 @@ ELSE()
wsrep_sst_mysqldump
wsrep_sst_rsync
wsrep_sst_mariabackup
+ wsrep_sst_backup
)
# The following script is sourced from other SST scripts, so it should
# not be made executable.
diff --git a/scripts/mysql_install_db.sh b/scripts/mysql_install_db.sh
index 4f499d8e577..face88c7e29 100644
--- a/scripts/mysql_install_db.sh
+++ b/scripts/mysql_install_db.sh
@@ -554,7 +554,7 @@ mysqld_install_cmd_line()
{
"$mysqld_bootstrap" $defaults $defaults_group_suffix "$mysqld_opt" --bootstrap $silent_startup\
"--basedir=$basedir" "--datadir=$ldata" --log-warnings=0 --enforce-storage-engine="" \
- "--plugin-dir=${plugindir}" --loose-disable-plugin-file-key-management \
+ "--plugin-dir=${plugindir}" \
$args --max_allowed_packet=8M \
--net_buffer_length=16K
}
diff --git a/scripts/mysql_system_tables_fix.sql b/scripts/mysql_system_tables_fix.sql
index 172fe46ae7e..4a29b94e6f5 100644
--- a/scripts/mysql_system_tables_fix.sql
+++ b/scripts/mysql_system_tables_fix.sql
@@ -1,5 +1,5 @@
-- Copyright (C) 2003, 2013 Oracle and/or its affiliates.
--- Copyright (C) 2010, 2018 MariaDB Corporation
+-- Copyright (C) 2010, 2022, MariaDB Corporation
--
-- This program is free software; you can redistribute it and/or modify
-- it under the terms of the GNU General Public License as published by
@@ -31,6 +31,13 @@ set alter_algorithm=DEFAULT;
set @have_innodb= (select count(engine) from information_schema.engines where engine='INNODB' and support != 'NO');
+# MDEV-21873: 10.2 to 10.3 upgrade doesn't remove semi-sync reference from
+# mysql.plugin table.
+# As per suggested fix, check INFORMATION_SCHEMA.PLUGINS
+# and if semisync plugins aren't there, delete them from mysql.plugin.
+DELETE FROM mysql.plugin WHERE name="rpl_semi_sync_master" AND NOT EXISTS (SELECT * FROM INFORMATION_SCHEMA.PLUGINS WHERE PLUGIN_NAME="rpl_semi_sync_master");
+DELETE FROM mysql.plugin WHERE name="rpl_semi_sync_slave" AND NOT EXISTS (SELECT * FROM INFORMATION_SCHEMA.PLUGINS WHERE PLUGIN_NAME="rpl_semi_sync_slave");
+
--
-- Ensure that all tables are of type Aria and transactional
--
diff --git a/scripts/wsrep_sst_backup.sh b/scripts/wsrep_sst_backup.sh
new file mode 100644
index 00000000000..55e11ddffc0
--- /dev/null
+++ b/scripts/wsrep_sst_backup.sh
@@ -0,0 +1,112 @@
+#!/usr/bin/env bash
+
+set -ue
+
+# Copyright (C) 2017-2021 MariaDB
+# Copyright (C) 2010-2014 Codership Oy
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; see the file COPYING. If not, write to the
+# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston
+# MA 02110-1335 USA.
+
+# This is a reference script for rsync-based state snapshot transfer
+
+RSYNC_REAL_PID=0 # rsync process id
+STUNNEL_REAL_PID=0 # stunnel process id
+
+OS="$(uname)"
+[ "$OS" = 'Darwin' ] && export -n LD_LIBRARY_PATH
+
+# Setting the path for lsof on CentOS
+export PATH="/usr/sbin:/sbin:$PATH"
+
+. $(dirname "$0")/wsrep_sst_common
+
+MAGIC_FILE="$WSREP_SST_OPT_DATA/backup_sst_complete"
+rm -rf "$MAGIC_FILE"
+
+WSREP_LOG_DIR=${WSREP_LOG_DIR:-""}
+# if WSREP_LOG_DIR env. variable is not set, try to get it from my.cnf
+if [ -z "$WSREP_LOG_DIR" ]; then
+ WSREP_LOG_DIR=$(parse_cnf mysqld innodb-log-group-home-dir '')
+fi
+
+if [ -n "$WSREP_LOG_DIR" ]; then
+ # handle both relative and absolute paths
+ WSREP_LOG_DIR=$(cd $WSREP_SST_OPT_DATA; mkdir -p "$WSREP_LOG_DIR"; cd $WSREP_LOG_DIR; pwd -P)
+else
+ # default to datadir
+ WSREP_LOG_DIR=$(cd $WSREP_SST_OPT_DATA; pwd -P)
+fi
+
+if [ "$WSREP_SST_OPT_ROLE" = 'donor' ]
+then
+
+ [ -f "$MAGIC_FILE" ] && rm -f "$MAGIC_FILE"
+
+ RC=0
+
+ if [ $WSREP_SST_OPT_BYPASS -eq 0 ]; then
+
+ FLUSHED="$WSREP_SST_OPT_DATA/tables_flushed"
+ ERROR="$WSREP_SST_OPT_DATA/sst_error"
+
+ [ -f "$FLUSHED" ] && rm -f "$FLUSHED"
+ [ -f "$ERROR" ] && rm -f "$ERROR"
+
+ echo "flush tables"
+
+ # Wait for :
+ # (a) Tables to be flushed, AND
+ # (b) Cluster state ID & wsrep_gtid_domain_id to be written to the file, OR
+ # (c) ERROR file, in case flush tables operation failed.
+
+ while [ ! -r "$FLUSHED" ] && \
+ ! grep -q -F ':' '--' "$FLUSHED" >/dev/null 2>&1
+ do
+ # Check whether ERROR file exists.
+ if [ -f "$ERROR" ]; then
+ # Flush tables operation failed.
+ rm -f "$ERROR"
+ exit 255
+ fi
+ sleep 0.2
+ done
+
+ STATE=$(cat "$FLUSHED")
+ rm -f "$FLUSHED"
+
+
+ else # BYPASS
+
+ wsrep_log_info "Bypassing state dump."
+ fi
+
+ echo 'continue' # now server can resume updating data
+
+ echo "$STATE" > "$MAGIC_FILE"
+
+ echo "done $STATE"
+
+elif [ "$WSREP_SST_OPT_ROLE" = 'joiner' ]
+then
+ wsrep_log_error "Unrecognized role: '$WSREP_SST_OPT_ROLE'"
+ exit 22 # EINVAL
+
+
+else
+ wsrep_log_error "Unrecognized role: '$WSREP_SST_OPT_ROLE'"
+ exit 22 # EINVAL
+fi
+
+exit 0
diff --git a/sql/contributors.h b/sql/contributors.h
index e16448ee985..bc8ba4eabbb 100644
--- a/sql/contributors.h
+++ b/sql/contributors.h
@@ -42,6 +42,8 @@ struct show_table_contributors_st show_table_contributors[]= {
{"Microsoft", "https://microsoft.com/", "Platinum Sponsor of the MariaDB Foundation"},
{"MariaDB Corporation", "https://mariadb.com", "Founding member, Platinum Sponsor of the MariaDB Foundation"},
{"ServiceNow", "https://servicenow.com", "Platinum Sponsor of the MariaDB Foundation"},
+ {"Intel", "https://www.intel.com", "Platinum Sponsor of the MariaDB Foundation"},
+ {"SIT", "https://sit.org", "Platinum Sponsor of the MariaDB Foundation"},
{"Visma", "https://visma.com", "Gold Sponsor of the MariaDB Foundation"},
{"DBS", "https://dbs.com", "Gold Sponsor of the MariaDB Foundation"},
{"IBM", "https://www.ibm.com", "Gold Sponsor of the MariaDB Foundation"},
diff --git a/sql/handler.h b/sql/handler.h
index 56e828e5465..1f96c35ed6b 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -2,7 +2,7 @@
#define HANDLER_INCLUDED
/*
Copyright (c) 2000, 2019, Oracle and/or its affiliates.
- Copyright (c) 2009, 2021, MariaDB
+ Copyright (c) 2009, 2022, MariaDB
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
@@ -5253,7 +5253,7 @@ static inline const char *ha_resolve_storage_engine_name(const handlerton *db_ty
static inline bool ha_check_storage_engine_flag(const handlerton *db_type, uint32 flag)
{
- return db_type == NULL ? FALSE : MY_TEST(db_type->flags & flag);
+ return db_type && (db_type->flags & flag);
}
static inline bool ha_storage_engine_is_enabled(const handlerton *db_type)
diff --git a/sql/item.cc b/sql/item.cc
index 09c83a341c6..f515dd3924d 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2018, Oracle and/or its affiliates.
- Copyright (c) 2010, 2021, MariaDB Corporation.
+ Copyright (c) 2010, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -2758,9 +2758,11 @@ LEX_CSTRING Item_sp::func_name_cstring(THD *thd) const
/* Calculate length to avoid reallocation of string for sure */
size_t len= (((m_name->m_explicit_name ? m_name->m_db.length : 0) +
m_name->m_name.length)*2 + //characters*quoting
- 2 + // ` and `
+ 2 + // quotes for the function name
+ 2 + // quotes for the package name
(m_name->m_explicit_name ?
3 : 0) + // '`', '`' and '.' for the db
+ 1 + // '.' between package and function
1 + // end of string
ALIGN_SIZE(1)); // to avoid String reallocation
String qname((char *)alloc_root(thd->mem_root, len), len,
@@ -2772,7 +2774,21 @@ LEX_CSTRING Item_sp::func_name_cstring(THD *thd) const
append_identifier(thd, &qname, &m_name->m_db);
qname.append('.');
}
- append_identifier(thd, &qname, &m_name->m_name);
+ if (m_sp && m_sp->m_handler == &sp_handler_package_function)
+ {
+ /*
+ In case of a package function split `pkg.func` and print
+ quoted `pkg` and `func` separately, so the entire result looks like:
+ `db`.`pkg`.`func`
+ */
+ Database_qualified_name tmp= Database_qualified_name::split(m_name->m_name);
+ DBUG_ASSERT(tmp.m_db.length);
+ append_identifier(thd, &qname, &tmp.m_db);
+ qname.append('.');
+ append_identifier(thd, &qname, &tmp.m_name);
+ }
+ else
+ append_identifier(thd, &qname, &m_name->m_name);
return { qname.c_ptr_safe(), qname.length() };
}
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 0d22465af6b..034833a0ba2 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -4711,10 +4711,11 @@ void Item_func_in::mark_as_condition_AND_part(TABLE_LIST *embedding)
Query_arena *arena, backup;
arena= thd->activate_stmt_arena_if_needed(&backup);
- if (to_be_transformed_into_in_subq(thd))
+ if (!transform_into_subq_checked)
{
- transform_into_subq= true;
- thd->lex->current_select->in_funcs.push_back(this, thd->mem_root);
+ if ((transform_into_subq= to_be_transformed_into_in_subq(thd)))
+ thd->lex->current_select->in_funcs.push_back(this, thd->mem_root);
+ transform_into_subq_checked= true;
}
if (arena)
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index 3767c2172e8..fe55f524f89 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -2490,6 +2490,7 @@ protected:
SEL_TREE *get_func_mm_tree(RANGE_OPT_PARAM *param,
Field *field, Item *value) override;
bool transform_into_subq;
+ bool transform_into_subq_checked;
public:
/// An array of values, created when the bisection lookup method is used
in_vector *array;
@@ -2512,6 +2513,7 @@ public:
Item_func_opt_neg(thd, list),
Predicant_to_list_comparator(thd, arg_count - 1),
transform_into_subq(false),
+ transform_into_subq_checked(false),
array(0), have_null(0),
arg_types_compatible(FALSE), emb_on_expr_nest(0)
{ }
diff --git a/sql/log.cc b/sql/log.cc
index 5b5f42ffee2..61c400a647c 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -5999,6 +5999,8 @@ THD::binlog_start_trans_and_stmt()
}
Gtid_log_event gtid_event(this, seqno, domain_id, true,
LOG_EVENT_SUPPRESS_USE_F, true, 0);
+ // Replicated events in writeset doesn't have checksum
+ gtid_event.checksum_alg= BINLOG_CHECKSUM_ALG_OFF;
gtid_event.server_id= server_id;
writer.write(&gtid_event);
wsrep_write_cache_buf(&tmp_io_cache, &buf, &len);
diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc
index 94b1db74637..88a0e346245 100644
--- a/sql/rpl_rli.cc
+++ b/sql/rpl_rli.cc
@@ -1673,7 +1673,7 @@ end:
{
table->file->ha_index_or_rnd_end();
ha_commit_trans(thd, FALSE);
- ha_commit_trans(thd, TRUE);
+ trans_commit(thd);
}
if (table_opened)
{
diff --git a/sql/semisync_master.cc b/sql/semisync_master.cc
index 2480eebf8d7..cfe29824328 100644
--- a/sql/semisync_master.cc
+++ b/sql/semisync_master.cc
@@ -1229,6 +1229,7 @@ int Repl_semi_sync_master::flush_net(THD *thd,
net_clear(net, 0);
net->pkt_nr++;
+ net->compress_pkt_nr++;
result = 0;
rpl_semi_sync_master_net_wait_num++;
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index 88c1a38aa45..baf4d7d1d82 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -3608,6 +3608,7 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp,
lex_query_tables_own_last= m_lex->query_tables_own_last;
prelocking_tables= *lex_query_tables_own_last;
*lex_query_tables_own_last= NULL;
+ m_lex->query_tables_last= m_lex->query_tables_own_last;
m_lex->mark_as_requiring_prelocking(NULL);
}
thd->rollback_item_tree_changes();
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index bf6efd9fd52..27875e3837b 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -707,6 +707,7 @@ THD::THD(my_thread_id id, bool is_wsrep_applier)
wsrep_was_on(false),
wsrep_ignore_table(false),
wsrep_aborter(0),
+ wsrep_delayed_BF_abort(false),
/* wsrep-lib */
m_wsrep_next_trx_id(WSREP_UNDEFINED_TRX_ID),
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 829c938d077..f7f646e8310 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -4617,13 +4617,13 @@ public:
*/
DBUG_PRINT("debug",
("temporary_tables: %s, in_sub_stmt: %s, system_thread: %s",
- YESNO(has_thd_temporary_tables()), YESNO(in_sub_stmt),
+ YESNO(has_temporary_tables()), YESNO(in_sub_stmt),
show_system_thread(system_thread)));
if (in_sub_stmt == 0)
{
if (wsrep_binlog_format() == BINLOG_FORMAT_ROW)
set_current_stmt_binlog_format_row();
- else if (!has_thd_temporary_tables())
+ else if (!has_temporary_tables())
set_current_stmt_binlog_format_stmt();
}
DBUG_VOID_RETURN;
@@ -5300,6 +5300,10 @@ public:
/* thread who has started kill for this THD protected by LOCK_thd_data*/
my_thread_id wsrep_aborter;
+ /* true if BF abort is observed in do_command() right after reading
+ client's packet, and if the client has sent PS execute command. */
+ bool wsrep_delayed_BF_abort;
+
/*
Transaction id:
* m_wsrep_next_trx_id is assigned on the first query after
@@ -5331,7 +5335,10 @@ public:
{
return m_wsrep_next_trx_id;
}
-
+ /*
+ If node is async slave and have parallel execution, wait for prior commits.
+ */
+ bool wsrep_parallel_slave_wait_for_prior_commit();
private:
wsrep_trx_id_t m_wsrep_next_trx_id; /* cast from query_id_t */
/* wsrep-lib */
@@ -7545,6 +7552,19 @@ public:
}
void copy(MEM_ROOT *mem_root, const LEX_CSTRING &db,
const LEX_CSTRING &name);
+
+ static Database_qualified_name split(const LEX_CSTRING &txt)
+ {
+ DBUG_ASSERT(txt.str[txt.length] == '\0'); // Expect 0-terminated input
+ const char *dot= strchr(txt.str, '.');
+ if (!dot)
+ return Database_qualified_name(NULL, 0, txt.str, txt.length);
+ size_t dblen= dot - txt.str;
+ Lex_cstring db(txt.str, dblen);
+ Lex_cstring name(txt.str + dblen + 1, txt.length - dblen - 1);
+ return Database_qualified_name(db, name);
+ }
+
// Export db and name as a qualified name string: 'db.name'
size_t make_qname(char *dst, size_t dstlen) const
{
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index e1fd7b5ae2b..4aab5b57582 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -9269,6 +9269,43 @@ bool LEX::call_statement_start(THD *thd, const Lex_ident_sys_st *name1,
}
+bool LEX::call_statement_start(THD *thd,
+ const Lex_ident_sys_st *db,
+ const Lex_ident_sys_st *pkg,
+ const Lex_ident_sys_st *proc)
+{
+ Database_qualified_name q_db_pkg(db, pkg);
+ Database_qualified_name q_pkg_proc(pkg, proc);
+ sp_name *spname;
+
+ sql_command= SQLCOM_CALL;
+
+ if (check_db_name(reinterpret_cast<LEX_STRING*>
+ (const_cast<LEX_CSTRING*>
+ (static_cast<const LEX_CSTRING*>(db)))))
+ {
+ my_error(ER_WRONG_DB_NAME, MYF(0), db->str);
+ return NULL;
+ }
+ if (check_routine_name(pkg) ||
+ check_routine_name(proc))
+ return NULL;
+
+ // Concat `pkg` and `name` to `pkg.name`
+ LEX_CSTRING pkg_dot_proc;
+ if (q_pkg_proc.make_qname(thd->mem_root, &pkg_dot_proc) ||
+ check_ident_length(&pkg_dot_proc) ||
+ !(spname= new (thd->mem_root) sp_name(db, &pkg_dot_proc, true)))
+ return NULL;
+
+ sp_handler_package_function.add_used_routine(thd->lex, thd, spname);
+ sp_handler_package_body.add_used_routine(thd->lex, thd, &q_db_pkg);
+
+ return !(m_sql_cmd= new (thd->mem_root) Sql_cmd_call(spname,
+ &sp_handler_package_procedure));
+}
+
+
sp_package *LEX::get_sp_package() const
{
return sphead ? sphead->get_package() : NULL;
@@ -9543,6 +9580,56 @@ Item *LEX::make_item_func_call_generic(THD *thd, Lex_ident_cli_st *cdb,
}
+/*
+ Create a 3-step qualified function call.
+ Currently it's possible for package routines only, e.g.:
+ SELECT db.pkg.func();
+*/
+Item *LEX::make_item_func_call_generic(THD *thd,
+ Lex_ident_cli_st *cdb,
+ Lex_ident_cli_st *cpkg,
+ Lex_ident_cli_st *cfunc,
+ List<Item> *args)
+{
+ static Lex_cstring dot(".", 1);
+ Lex_ident_sys db(thd, cdb), pkg(thd, cpkg), func(thd, cfunc);
+ Database_qualified_name q_db_pkg(db, pkg);
+ Database_qualified_name q_pkg_func(pkg, func);
+ sp_name *qname;
+
+ if (db.is_null() || pkg.is_null() || func.is_null())
+ return NULL; // EOM
+
+ if (check_db_name((LEX_STRING*) static_cast<LEX_CSTRING*>(&db)))
+ {
+ my_error(ER_WRONG_DB_NAME, MYF(0), db.str);
+ return NULL;
+ }
+ if (check_routine_name(&pkg) ||
+ check_routine_name(&func))
+ return NULL;
+
+ // Concat `pkg` and `name` to `pkg.name`
+ LEX_CSTRING pkg_dot_func;
+ if (q_pkg_func.make_qname(thd->mem_root, &pkg_dot_func) ||
+ check_ident_length(&pkg_dot_func) ||
+ !(qname= new (thd->mem_root) sp_name(&db, &pkg_dot_func, true)))
+ return NULL;
+
+ sp_handler_package_function.add_used_routine(thd->lex, thd, qname);
+ sp_handler_package_body.add_used_routine(thd->lex, thd, &q_db_pkg);
+
+ thd->lex->safe_to_cache_query= 0;
+
+ if (args && args->elements > 0)
+ return new (thd->mem_root) Item_func_sp(thd, thd->lex->current_context(),
+ qname, &sp_handler_package_function,
+ *args);
+ return new (thd->mem_root) Item_func_sp(thd, thd->lex->current_context(),
+ qname, &sp_handler_package_function);
+}
+
+
Item *LEX::make_item_func_call_native_or_parse_error(THD *thd,
Lex_ident_cli_st &name,
List<Item> *args)
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index e8bac90fe5a..b692f33bc66 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -3866,6 +3866,10 @@ public:
bool call_statement_start(THD *thd, const Lex_ident_sys_st *name);
bool call_statement_start(THD *thd, const Lex_ident_sys_st *name1,
const Lex_ident_sys_st *name2);
+ bool call_statement_start(THD *thd,
+ const Lex_ident_sys_st *db,
+ const Lex_ident_sys_st *pkg,
+ const Lex_ident_sys_st *proc);
sp_variable *find_variable(const LEX_CSTRING *name,
sp_pcontext **ctx,
const Sp_rcontext_handler **rh) const;
@@ -4110,6 +4114,11 @@ public:
Item *make_item_func_sysdate(THD *thd, uint fsp);
Item *make_item_func_call_generic(THD *thd, Lex_ident_cli_st *db,
Lex_ident_cli_st *name, List<Item> *args);
+ Item *make_item_func_call_generic(THD *thd,
+ Lex_ident_cli_st *db,
+ Lex_ident_cli_st *pkg,
+ Lex_ident_cli_st *name,
+ List<Item> *args);
Item *make_item_func_call_native_or_parse_error(THD *thd,
Lex_ident_cli_st &name,
List<Item> *args);
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 77c55e3daf3..1ea44a28f1d 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1141,8 +1141,7 @@ static bool wsrep_tables_accessible_when_detached(const TABLE_LIST *tables)
static bool wsrep_command_no_result(char command)
{
- return (command == COM_STMT_PREPARE ||
- command == COM_STMT_FETCH ||
+ return (command == COM_STMT_FETCH ||
command == COM_STMT_SEND_LONG_DATA ||
command == COM_STMT_CLOSE);
}
@@ -1340,7 +1339,13 @@ dispatch_command_return do_command(THD *thd, bool blocking)
DBUG_ASSERT(!thd->mdl_context.has_locks());
DBUG_ASSERT(!thd->get_stmt_da()->is_set());
/* We let COM_QUIT and COM_STMT_CLOSE to execute even if wsrep aborted. */
- if (command != COM_STMT_CLOSE &&
+ if (command == COM_STMT_EXECUTE)
+ {
+ WSREP_DEBUG("PS BF aborted at do_command");
+ thd->wsrep_delayed_BF_abort= true;
+ }
+ if (command != COM_STMT_CLOSE &&
+ command != COM_STMT_EXECUTE &&
command != COM_QUIT)
{
my_error(ER_LOCK_DEADLOCK, MYF(0));
@@ -1423,6 +1428,17 @@ out:
if (unlikely(wsrep_service_started))
wsrep_after_command_after_result(thd);
}
+
+ if (thd->wsrep_delayed_BF_abort)
+ {
+ my_error(ER_LOCK_DEADLOCK, MYF(0));
+ WSREP_DEBUG("Deadlock error for PS query: %s", thd->query());
+ thd->reset_killed();
+ thd->mysys_var->abort = 0;
+ thd->wsrep_retry_counter = 0;
+
+ thd->wsrep_delayed_BF_abort= false;
+ }
#endif /* WITH_WSREP */
DBUG_RETURN(return_value);
}
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index e025147c71e..83c064c63c6 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -2463,6 +2463,10 @@ static bool check_prepared_statement(Prepared_statement *stmt)
goto error;
}
+#ifdef WITH_WSREP
+ if (wsrep_sync_wait(thd, sql_command))
+ goto error;
+#endif
switch (sql_command) {
case SQLCOM_REPLACE:
case SQLCOM_INSERT:
@@ -4612,7 +4616,13 @@ Prepared_statement::execute_loop(String *expanded_query,
if (set_parameters(expanded_query, packet, packet_end))
return TRUE;
-
+#ifdef WITH_WSREP
+ if (thd->wsrep_delayed_BF_abort)
+ {
+ WSREP_DEBUG("delayed BF abort, quitting execute_loop, stmt: %d", id);
+ return TRUE;
+ }
+#endif /* WITH_WSREP */
reexecute:
// Make sure that reprepare() did not create any new Items.
DBUG_ASSERT(thd->free_list == NULL);
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 2fec59fcbf3..0625705c48e 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2015, Oracle and/or its affiliates.
- Copyright (c) 2009, 2021, MariaDB
+ Copyright (c) 2009, 2022, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -3331,6 +3331,16 @@ static my_bool processlist_callback(THD *tmp, processlist_callback_arg *arg)
arg->table->field[11]->store((double) tmp->progress.counter /
(double) max_counter*100.0);
}
+ else
+ {
+ /*
+ This is a DECIMAL column without DEFAULT.
+ restore_record() fills its Field::ptr to zero bytes,
+ according to pack_length(). But an array of zero bytes
+ is not a valid decimal. Set it explicitly to 0.
+ */
+ arg->table->field[11]->store((longlong) 0, true);
+ }
mysql_mutex_unlock(&tmp->LOCK_thd_data);
}
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 55f732669af..44fa8709b1d 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -9759,7 +9759,7 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db,
{
bool engine_changed, error, frm_is_created= false, error_handler_pushed= false;
bool no_ha_table= true; /* We have not created table in storage engine yet */
- TABLE *table, *new_table;
+ TABLE *table, *new_table= nullptr;
DDL_LOG_STATE ddl_log_state;
Turn_errors_to_warnings_handler errors_to_warnings;
@@ -9793,7 +9793,7 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db,
bool varchar= create_info->varchar, table_creation_was_logged= 0;
bool binlog_as_create_select= 0, log_if_exists= 0;
uint tables_opened;
- handlerton *new_db_type, *old_db_type= nullptr;
+ handlerton *new_db_type= create_info->db_type, *old_db_type;
ha_rows copied=0, deleted=0;
LEX_CUSTRING frm= {0,0};
LEX_CSTRING backup_name;
@@ -10144,22 +10144,24 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db,
create_info->used_fields |= HA_CREATE_USED_ROW_FORMAT;
}
+ old_db_type= table->s->db_type();
+ new_db_type= create_info->db_type;
+
DBUG_PRINT("info", ("old type: %s new type: %s",
- ha_resolve_storage_engine_name(table->s->db_type()),
- ha_resolve_storage_engine_name(create_info->db_type)));
- if (ha_check_storage_engine_flag(table->s->db_type(), HTON_ALTER_NOT_SUPPORTED))
+ ha_resolve_storage_engine_name(old_db_type),
+ ha_resolve_storage_engine_name(new_db_type)));
+ if (ha_check_storage_engine_flag(old_db_type, HTON_ALTER_NOT_SUPPORTED))
{
DBUG_PRINT("info", ("doesn't support alter"));
- my_error(ER_ILLEGAL_HA, MYF(0), hton_name(table->s->db_type())->str,
+ my_error(ER_ILLEGAL_HA, MYF(0), hton_name(old_db_type)->str,
alter_ctx.db.str, alter_ctx.table_name.str);
DBUG_RETURN(true);
}
- if (ha_check_storage_engine_flag(create_info->db_type,
- HTON_ALTER_NOT_SUPPORTED))
+ if (ha_check_storage_engine_flag(new_db_type, HTON_ALTER_NOT_SUPPORTED))
{
DBUG_PRINT("info", ("doesn't support alter"));
- my_error(ER_ILLEGAL_HA, MYF(0), hton_name(create_info->db_type)->str,
+ my_error(ER_ILLEGAL_HA, MYF(0), hton_name(new_db_type)->str,
alter_ctx.new_db.str, alter_ctx.new_name.str);
DBUG_RETURN(true);
}
@@ -10323,6 +10325,17 @@ do_continue:;
if (parse_engine_part_options(thd, table))
DBUG_RETURN(true);
}
+ /*
+ If the old table had partitions and we are doing ALTER TABLE ...
+ engine= <new_engine>, the new table must preserve the original
+ partitioning. This means that the new engine is still the
+ partitioning engine, not the engine specified in the parser.
+ This is discovered in prep_alter_part_table, which in such case
+ updates create_info->db_type.
+ It's therefore important that the assignment below is done
+ after prep_alter_part_table.
+ */
+ new_db_type= create_info->db_type;
#endif
if (mysql_prepare_alter_table(thd, table, create_info, alter_info,
@@ -10401,7 +10414,7 @@ do_continue:;
Alter_info::ALTER_TABLE_ALGORITHM_INPLACE)
|| is_inplace_alter_impossible(table, create_info, alter_info)
|| IF_PARTITIONING((partition_changed &&
- !(table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION)), 0))
+ !(old_db_type->partition_flags() & HA_USE_AUTO_PARTITION)), 0))
{
if (alter_info->algorithm(thd) ==
Alter_info::ALTER_TABLE_ALGORITHM_INPLACE)
@@ -10419,25 +10432,11 @@ do_continue:;
request table rebuild. Set ALTER_RECREATE flag to force table
rebuild.
*/
- if (create_info->db_type == table->s->db_type() &&
+ if (new_db_type == old_db_type &&
create_info->used_fields & HA_CREATE_USED_ENGINE)
alter_info->flags|= ALTER_RECREATE;
/*
- If the old table had partitions and we are doing ALTER TABLE ...
- engine= <new_engine>, the new table must preserve the original
- partitioning. This means that the new engine is still the
- partitioning engine, not the engine specified in the parser.
- This is discovered in prep_alter_part_table, which in such case
- updates create_info->db_type.
- It's therefore important that the assignment below is done
- after prep_alter_part_table.
- */
- new_db_type= create_info->db_type;
- old_db_type= table->s->db_type();
- new_table= NULL;
-
- /*
Handling of symlinked tables:
If no rename:
Create new data file and index file on the same disk as the
diff --git a/sql/sql_tvc.cc b/sql/sql_tvc.cc
index bdaf6829fbd..6efdf5f9471 100644
--- a/sql/sql_tvc.cc
+++ b/sql/sql_tvc.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, 2020, MariaDB
+/* Copyright (c) 2017, 2022, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -929,13 +929,11 @@ Item *Item_func_in::in_predicate_to_in_subs_transformer(THD *thd,
{
if (!transform_into_subq)
return this;
-
+
Json_writer_object trace_wrapper(thd);
Json_writer_object trace_conv(thd, "in_to_subquery_conversion");
trace_conv.add("item", this);
- transform_into_subq= false;
-
List<List_item> values;
LEX *lex= thd->lex;
@@ -1109,15 +1107,38 @@ uint32 Item_func_in::max_length_of_left_expr()
bool Item_func_in::to_be_transformed_into_in_subq(THD *thd)
{
+ bool is_row_list= args[1]->type() == Item::ROW_ITEM;
uint values_count= arg_count-1;
- if (args[1]->type() == Item::ROW_ITEM)
+ if (is_row_list)
values_count*= ((Item_row *)(args[1]))->cols();
if (thd->variables.in_subquery_conversion_threshold == 0 ||
thd->variables.in_subquery_conversion_threshold > values_count)
return false;
+ if (!(thd->lex->context_analysis_only & CONTEXT_ANALYSIS_ONLY_PREPARE))
+ return true;
+
+ /* Occurence of '?' in IN list is checked only for PREPARE <stmt> commands */
+ for (uint i=1; i < arg_count; i++)
+ {
+ if (!is_row_list)
+ {
+ if (args[i]->type() == Item::PARAM_ITEM)
+ return false;
+ }
+ else
+ {
+ Item_row *row_list= (Item_row *)(args[i]);
+ for (uint j=0; j < row_list->cols(); j++)
+ {
+ if (row_list->element_index(j)->type() == Item::PARAM_ITEM)
+ return false;
+ }
+ }
+ }
+
return true;
}
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 6a5f1f39e6d..843966f13c8 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -2296,6 +2296,11 @@ multi_update::initialize_tables(JOIN *join)
if (unlikely((thd->variables.option_bits & OPTION_SAFE_UPDATES) &&
error_if_full_join(join)))
DBUG_RETURN(1);
+ if (join->implicit_grouping)
+ {
+ my_error(ER_INVALID_GROUP_FUNC_USE, MYF(0));
+ DBUG_RETURN(1);
+ }
main_table=join->join_tab->table;
table_to_update= 0;
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index e5bea13896a..9f9ef6335f3 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -100,7 +100,8 @@ static void make_unique_view_field_name(THD *thd, Item *target,
itc.rewind();
}
- target->orig_name= target->name.str;
+ if (!target->orig_name)
+ target->orig_name= target->name.str;
target->set_name(thd, buff, name_len, system_charset_info);
}
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index ea1eebc1236..ce319ff880c 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2015, Oracle and/or its affiliates.
- Copyright (c) 2010, 2020, 2021, MariaDB
+ Copyright (c) 2010, 2022, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -2989,9 +2989,29 @@ sp_suid:
;
call:
- CALL_SYM sp_name
+ CALL_SYM ident
{
- if (unlikely(Lex->call_statement_start(thd, $2)))
+ if (unlikely(Lex->call_statement_start(thd, &$2)))
+ MYSQL_YYABORT;
+ }
+ opt_sp_cparam_list
+ {
+ if (Lex->check_cte_dependencies_and_resolve_references())
+ MYSQL_YYABORT;
+ }
+ | CALL_SYM ident '.' ident
+ {
+ if (unlikely(Lex->call_statement_start(thd, &$2, &$4)))
+ MYSQL_YYABORT;
+ }
+ opt_sp_cparam_list
+ {
+ if (Lex->check_cte_dependencies_and_resolve_references())
+ MYSQL_YYABORT;
+ }
+ | CALL_SYM ident '.' ident '.' ident
+ {
+ if (unlikely(Lex->call_statement_start(thd, &$2, &$4, &$6)))
MYSQL_YYABORT;
}
opt_sp_cparam_list
@@ -10443,6 +10463,11 @@ function_call_generic:
if (unlikely(!($$= Lex->make_item_func_call_generic(thd, &$1, &$3, $5))))
MYSQL_YYABORT;
}
+ | ident_cli '.' ident_cli '.' ident_cli '(' opt_expr_list ')'
+ {
+ if (unlikely(!($$= Lex->make_item_func_call_generic(thd, &$1, &$3, &$5, $7))))
+ MYSQL_YYABORT;
+ }
;
fulltext_options:
@@ -18298,6 +18323,10 @@ sp_statement:
MYSQL_YYABORT;
}
opt_sp_cparam_list
+ {
+ if (Lex->check_cte_dependencies_and_resolve_references())
+ MYSQL_YYABORT;
+ }
| ident_cli_directly_assignable '.' ident
{
Lex_ident_sys tmp(thd, &$1);
@@ -18306,6 +18335,21 @@ sp_statement:
MYSQL_YYABORT;
}
opt_sp_cparam_list
+ {
+ if (Lex->check_cte_dependencies_and_resolve_references())
+ MYSQL_YYABORT;
+ }
+ | ident_cli_directly_assignable '.' ident '.' ident
+ {
+ Lex_ident_sys tmp(thd, &$1);
+ if (unlikely(Lex->call_statement_start(thd, &tmp, &$3, &$5)))
+ MYSQL_YYABORT;
+ }
+ opt_sp_cparam_list
+ {
+ if (Lex->check_cte_dependencies_and_resolve_references())
+ MYSQL_YYABORT;
+ }
;
sp_if_then_statements:
diff --git a/sql/table.cc b/sql/table.cc
index 481baf96eeb..474299e059e 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2017, Oracle and/or its affiliates.
- Copyright (c) 2008, 2021, MariaDB
+ Copyright (c) 2008, 2022, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1780,6 +1780,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
Field_data_type_info_array field_data_type_info_array;
MEM_ROOT *old_root= thd->mem_root;
Virtual_column_info **table_check_constraints;
+ bool *interval_unescaped= NULL;
extra2_fields extra2;
bool extra_index_flags_present= FALSE;
DBUG_ENTER("TABLE_SHARE::init_from_binary_frm_image");
@@ -2242,6 +2243,13 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
goto err;
+ if (interval_count)
+ {
+ if (!(interval_unescaped= (bool*) my_alloca(interval_count * sizeof(bool))))
+ goto err;
+ bzero(interval_unescaped, interval_count * sizeof(bool));
+ }
+
field_ptr= share->field;
table_check_constraints= share->check_constraints;
read_length=(uint) (share->fields * field_pack_length +
@@ -2596,11 +2604,17 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
if (share->mysql_version < 100200)
attr.pack_flag&= ~FIELDFLAG_LONG_DECIMAL;
- if (interval_nr && attr.charset->mbminlen > 1)
+ if (interval_nr && attr.charset->mbminlen > 1 &&
+ !interval_unescaped[interval_nr - 1])
{
- /* Unescape UCS2 intervals from HEX notation */
+ /*
+ Unescape UCS2/UTF16/UTF32 intervals from HEX notation.
+ Note, ENUM/SET columns with equal value list share a single
+ copy of TYPELIB. Unescape every TYPELIB only once.
+ */
TYPELIB *interval= share->intervals + interval_nr - 1;
unhex_type2(interval);
+ interval_unescaped[interval_nr - 1]= true;
}
#ifndef TO_BE_DELETED_ON_PRODUCTION
@@ -3348,6 +3362,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
share->error= OPEN_FRM_OK;
thd->status_var.opened_shares++;
thd->mem_root= old_root;
+ my_afree(interval_unescaped);
DBUG_RETURN(0);
err:
@@ -3375,6 +3390,7 @@ err:
open_table_error(share, OPEN_FRM_CORRUPTED, share->open_errno);
thd->mem_root= old_root;
+ my_afree(interval_unescaped);
DBUG_RETURN(HA_ERR_NOT_A_TABLE);
}
diff --git a/sql/unireg.cc b/sql/unireg.cc
index 7f120af73d3..e6f52cd953e 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -988,7 +988,16 @@ static uint get_interval_id(uint *int_count,List<Create_field> &create_fields,
while ((field=it++) != last_field)
{
- if (field->interval_id && field->interval->count == interval->count)
+ /*
+ ENUM/SET columns with equal value lists share a single
+ copy of the underlying TYPELIB.
+ Fields with different mbminlen can't reuse TYPELIBs, because:
+ - mbminlen==1 are written to FRM as is
+ - mbminlen>1 are written to FRM in hex-encoded format
+ */
+ if (field->interval_id &&
+ field->interval->count == interval->count &&
+ field->charset->mbminlen == last_field->charset->mbminlen)
{
const char **a,**b;
for (a=field->interval->type_names, b=interval->type_names ;
diff --git a/sql/wsrep_client_service.cc b/sql/wsrep_client_service.cc
index 6ab2834a219..ab746dfa1fb 100644
--- a/sql/wsrep_client_service.cc
+++ b/sql/wsrep_client_service.cc
@@ -340,6 +340,7 @@ int Wsrep_client_service::bf_rollback()
m_thd->global_read_lock.unlock_global_read_lock(m_thd);
}
m_thd->release_transactional_locks();
+ mysql_ull_cleanup(m_thd);
m_thd->mdl_context.release_explicit_locks();
DBUG_RETURN(ret);
diff --git a/sql/wsrep_high_priority_service.cc b/sql/wsrep_high_priority_service.cc
index 1f6537a1351..db6ba43edec 100644
--- a/sql/wsrep_high_priority_service.cc
+++ b/sql/wsrep_high_priority_service.cc
@@ -380,6 +380,7 @@ int Wsrep_high_priority_service::rollback(const wsrep::ws_handle& ws_handle,
}
int ret= (trans_rollback_stmt(m_thd) || trans_rollback(m_thd));
m_thd->release_transactional_locks();
+ mysql_ull_cleanup(m_thd);
m_thd->mdl_context.release_explicit_locks();
free_root(m_thd->mem_root, MYF(MY_KEEP_PREALLOC));
diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc
index a2a9ef68e96..eef1fde8c7d 100644
--- a/sql/wsrep_mysqld.cc
+++ b/sql/wsrep_mysqld.cc
@@ -1594,6 +1594,73 @@ wsrep_sync_wait_upto (THD* thd,
return ret;
}
+bool wsrep_is_show_query(enum enum_sql_command command)
+{
+ DBUG_ASSERT(command >= 0 && command <= SQLCOM_END);
+ return (sql_command_flags[command] & CF_STATUS_COMMAND) != 0;
+}
+
+static bool wsrep_is_diagnostic_query(enum enum_sql_command command)
+{
+ assert(command >= 0 && command <= SQLCOM_END);
+ return (sql_command_flags[command] & CF_DIAGNOSTIC_STMT) != 0;
+}
+
+static enum enum_wsrep_sync_wait
+wsrep_sync_wait_mask_for_command(enum enum_sql_command command)
+{
+ switch (command)
+ {
+ case SQLCOM_SELECT:
+ case SQLCOM_CHECKSUM:
+ return WSREP_SYNC_WAIT_BEFORE_READ;
+ case SQLCOM_DELETE:
+ case SQLCOM_DELETE_MULTI:
+ case SQLCOM_UPDATE:
+ case SQLCOM_UPDATE_MULTI:
+ return WSREP_SYNC_WAIT_BEFORE_UPDATE_DELETE;
+ case SQLCOM_REPLACE:
+ case SQLCOM_INSERT:
+ case SQLCOM_REPLACE_SELECT:
+ case SQLCOM_INSERT_SELECT:
+ return WSREP_SYNC_WAIT_BEFORE_INSERT_REPLACE;
+ default:
+ if (wsrep_is_diagnostic_query(command))
+ {
+ return WSREP_SYNC_WAIT_NONE;
+ }
+ if (wsrep_is_show_query(command))
+ {
+ switch (command)
+ {
+ case SQLCOM_SHOW_PROFILE:
+ case SQLCOM_SHOW_PROFILES:
+ case SQLCOM_SHOW_SLAVE_HOSTS:
+ case SQLCOM_SHOW_RELAYLOG_EVENTS:
+ case SQLCOM_SHOW_SLAVE_STAT:
+ case SQLCOM_SHOW_BINLOG_STAT:
+ case SQLCOM_SHOW_ENGINE_STATUS:
+ case SQLCOM_SHOW_ENGINE_MUTEX:
+ case SQLCOM_SHOW_ENGINE_LOGS:
+ case SQLCOM_SHOW_PROCESSLIST:
+ case SQLCOM_SHOW_PRIVILEGES:
+ return WSREP_SYNC_WAIT_NONE;
+ default:
+ return WSREP_SYNC_WAIT_BEFORE_SHOW;
+ }
+ }
+ }
+ return WSREP_SYNC_WAIT_NONE;
+}
+
+bool wsrep_sync_wait(THD* thd, enum enum_sql_command command)
+{
+ bool res = false;
+ if (WSREP_CLIENT(thd) && thd->variables.wsrep_sync_wait)
+ res = wsrep_sync_wait(thd, wsrep_sync_wait_mask_for_command(command));
+ return res;
+}
+
void wsrep_keys_free(wsrep_key_arr_t* key_arr)
{
for (size_t i= 0; i < key_arr->keys_len; ++i)
@@ -2789,6 +2856,12 @@ int wsrep_to_isolation_begin(THD *thd, const char *db_, const char *table_,
return 0;
}
+ if (thd->wsrep_parallel_slave_wait_for_prior_commit())
+ {
+ WSREP_WARN("TOI: wait_for_prior_commit() returned error.");
+ return -1;
+ }
+
int ret= 0;
mysql_mutex_lock(&thd->LOCK_thd_data);
@@ -3319,11 +3392,6 @@ extern bool wsrep_thd_ignore_table(THD *thd)
return thd->wsrep_ignore_table;
}
-bool wsrep_is_show_query(enum enum_sql_command command)
-{
- DBUG_ASSERT(command >= 0 && command <= SQLCOM_END);
- return (sql_command_flags[command] & CF_STATUS_COMMAND) != 0;
-}
bool wsrep_create_like_table(THD* thd, TABLE_LIST* table,
TABLE_LIST* src_table,
HA_CREATE_INFO *create_info)
@@ -3594,6 +3662,15 @@ enum wsrep::streaming_context::fragment_unit wsrep_fragment_unit(ulong unit)
}
}
+bool THD::wsrep_parallel_slave_wait_for_prior_commit()
+{
+ if (rgi_slave && rgi_slave->is_parallel_exec && wait_for_prior_commit())
+ {
+ return 1;
+ }
+ return 0;
+}
+
/***** callbacks for wsrep service ************/
my_bool get_wsrep_recovery()
diff --git a/sql/wsrep_mysqld.h b/sql/wsrep_mysqld.h
index 9cbe186aedf..88c0e84a1ee 100644
--- a/sql/wsrep_mysqld.h
+++ b/sql/wsrep_mysqld.h
@@ -225,6 +225,7 @@ extern bool wsrep_check_mode_after_open_table (THD *thd, const handlerton *hton,
extern bool wsrep_check_mode_before_cmd_execute (THD *thd);
extern bool wsrep_must_sync_wait (THD* thd, uint mask= WSREP_SYNC_WAIT_BEFORE_READ);
extern bool wsrep_sync_wait (THD* thd, uint mask= WSREP_SYNC_WAIT_BEFORE_READ);
+extern bool wsrep_sync_wait (THD* thd, enum enum_sql_command command);
extern enum wsrep::provider::status
wsrep_sync_wait_upto (THD* thd, wsrep_gtid_t* upto, int timeout);
extern int wsrep_check_opts();
diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc
index 786d8b9bbf5..fd2f6d592f4 100644
--- a/sql/wsrep_sst.cc
+++ b/sql/wsrep_sst.cc
@@ -1,4 +1,4 @@
-/* Copyright 2008-2020 Codership Oy <http://www.codership.com>
+/* Copyright 2008-2022 Codership Oy <http://www.codership.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -33,6 +33,7 @@
#include <cstdio>
#include <cstdlib>
+#include "debug_sync.h"
#include <my_service_manager.h>
@@ -1510,6 +1511,33 @@ static int run_sql_command(THD *thd, const char *query)
return 0;
}
+static void sst_disallow_writes (THD* thd, bool yes)
+{
+ char query_str[64]= { 0, };
+ ssize_t const query_max= sizeof(query_str) - 1;
+ CHARSET_INFO *current_charset;
+
+ current_charset= thd->variables.character_set_client;
+
+ if (!is_supported_parser_charset(current_charset))
+ {
+ /* Do not use non-supported parser character sets */
+ WSREP_WARN("Current client character set is non-supported parser character set: %s", current_charset->cs_name.str);
+ thd->variables.character_set_client= &my_charset_latin1;
+ WSREP_WARN("For SST temporally setting character set to : %s",
+ my_charset_latin1.cs_name.str);
+ }
+
+ snprintf (query_str, query_max, "SET GLOBAL innodb_disallow_writes=%d",
+ yes ? 1 : 0);
+
+ if (run_sql_command(thd, query_str))
+ {
+ WSREP_ERROR("Failed to disallow InnoDB writes");
+ }
+ thd->variables.character_set_client= current_charset;
+}
+
static int sst_flush_tables(THD* thd)
{
@@ -1571,6 +1599,11 @@ static int sst_flush_tables(THD* thd)
else
{
WSREP_INFO("Tables flushed.");
+
+ /* disable further disk IO */
+ sst_disallow_writes(thd, true);
+ WSREP_INFO("Disabled further disk IO.");
+
/*
Tables have been flushed. Create a file with cluster state ID and
wsrep_gtid_domain_id.
@@ -1580,6 +1613,9 @@ static int sst_flush_tables(THD* thd)
(long long)wsrep_locked_seqno, wsrep_gtid_server.domain_id);
err= sst_create_file(flush_success, content);
+ if (err)
+ WSREP_INFO("Creating file for flush_success failed %d",err);
+
const char base_name[]= "tables_flushed";
ssize_t const full_len= strlen(mysql_real_data_home) + strlen(base_name)+2;
char *real_name= (char*) malloc(full_len);
@@ -1619,34 +1655,6 @@ static int sst_flush_tables(THD* thd)
return err;
}
-
-static void sst_disallow_writes (THD* thd, bool yes)
-{
- char query_str[64]= { 0, };
- ssize_t const query_max= sizeof(query_str) - 1;
- CHARSET_INFO *current_charset;
-
- current_charset= thd->variables.character_set_client;
-
- if (!is_supported_parser_charset(current_charset))
- {
- /* Do not use non-supported parser character sets */
- WSREP_WARN("Current client character set is non-supported parser character set: %s", current_charset->cs_name.str);
- thd->variables.character_set_client= &my_charset_latin1;
- WSREP_WARN("For SST temporally setting character set to : %s",
- my_charset_latin1.cs_name.str);
- }
-
- snprintf (query_str, query_max, "SET GLOBAL innodb_disallow_writes=%d",
- yes ? 1 : 0);
-
- if (run_sql_command(thd, query_str))
- {
- WSREP_ERROR("Failed to disallow InnoDB writes");
- }
- thd->variables.character_set_client= current_charset;
-}
-
static void* sst_donor_thread (void* a)
{
sst_thread_arg* arg= (sst_thread_arg*)a;
@@ -1694,8 +1702,7 @@ wait_signal:
err= sst_flush_tables (thd.ptr);
if (!err)
{
- sst_disallow_writes (thd.ptr, true);
- /*
+ /*
Lets also keep statements that modify binary logs (like RESET LOGS,
RESET MASTER) from proceeding until the files have been transferred
to the joiner node.
@@ -1706,6 +1713,18 @@ wait_signal:
}
locked= true;
+
+ WSREP_INFO("Donor state reached");
+
+ DBUG_EXECUTE_IF("sync.wsrep_donor_state",
+ {
+ const char act[]=
+ "now "
+ "SIGNAL sync.wsrep_donor_state_reached "
+ "WAIT_FOR signal.wsrep_donor_state";
+ assert(!debug_sync_set_action(thd.ptr,
+ STRING_WITH_LEN(act)));
+ };);
goto wait_signal;
}
}
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc
index 08a0ae55793..9998fe52fc4 100644
--- a/storage/innobase/btr/btr0btr.cc
+++ b/storage/innobase/btr/btr0btr.cc
@@ -2,7 +2,7 @@
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2014, 2021, MariaDB Corporation.
+Copyright (c) 2014, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1334,11 +1334,23 @@ static void btr_page_reorganize_low(page_cur_t *cursor, dict_index_t *index,
else
ut_ad(cursor->rec == page_get_infimum_rec(block->page.frame));
- if (block->page.id().page_no() == index->page &&
- fil_page_get_type(old->page.frame) == FIL_PAGE_TYPE_INSTANT)
+ mtr->set_log_mode(log_mode);
+
+ if (block->page.id().page_no() != index->page ||
+ fil_page_get_type(old->page.frame) != FIL_PAGE_TYPE_INSTANT)
+ ut_ad(!memcmp(old->page.frame, block->page.frame, PAGE_HEADER));
+ else if (!index->is_instant())
+ {
+ ut_ad(!memcmp(old->page.frame, block->page.frame, FIL_PAGE_TYPE));
+ ut_ad(!memcmp(old->page.frame + FIL_PAGE_TYPE + 2,
+ block->page.frame + FIL_PAGE_TYPE + 2,
+ PAGE_HEADER - FIL_PAGE_TYPE - 2));
+ mtr->write<2,mtr_t::FORCED>(*block, FIL_PAGE_TYPE + block->page.frame,
+ FIL_PAGE_INDEX);
+ }
+ else
{
/* Preserve the PAGE_INSTANT information. */
- ut_ad(index->is_instant());
memcpy_aligned<2>(FIL_PAGE_TYPE + block->page.frame,
FIL_PAGE_TYPE + old->page.frame, 2);
memcpy_aligned<2>(PAGE_HEADER + PAGE_INSTANT + block->page.frame,
@@ -1358,9 +1370,10 @@ static void btr_page_reorganize_low(page_cur_t *cursor, dict_index_t *index,
memcpy(PAGE_OLD_SUPREMUM + block->page.frame,
PAGE_OLD_SUPREMUM + old->page.frame, 8);
}
+
+ ut_ad(!memcmp(old->page.frame, block->page.frame, PAGE_HEADER));
}
- ut_ad(!memcmp(old->page.frame, block->page.frame, PAGE_HEADER));
ut_ad(!memcmp(old->page.frame + PAGE_MAX_TRX_ID + PAGE_HEADER,
block->page.frame + PAGE_MAX_TRX_ID + PAGE_HEADER,
PAGE_DATA - (PAGE_MAX_TRX_ID + PAGE_HEADER)));
@@ -1369,7 +1382,6 @@ static void btr_page_reorganize_low(page_cur_t *cursor, dict_index_t *index,
lock_move_reorganize_page(block, old);
/* Write log for the changes, if needed. */
- mtr->set_log_mode(log_mode);
if (log_mode == MTR_LOG_ALL)
{
/* Check and log the changes in the page header. */
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index f0cde7a4055..f9ee51e466b 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -630,10 +630,6 @@ bool buf_page_is_corrupted(bool check_lsn, const byte *read_buf,
return false;
}
-#ifndef UNIV_INNOCHECKSUM
- uint32_t crc32 = 0;
- bool crc32_inited = false;
-#endif /* !UNIV_INNOCHECKSUM */
const ulint zip_size = fil_space_t::zip_size(fsp_flags);
const uint16_t page_type = fil_page_get_type(read_buf);
@@ -728,6 +724,8 @@ bool buf_page_is_corrupted(bool check_lsn, const byte *read_buf,
return false;
}
+ const uint32_t crc32 = buf_calc_page_crc32(read_buf);
+
/* Very old versions of InnoDB only stored 8 byte lsn to the
start and the end of the page. */
@@ -738,18 +736,14 @@ bool buf_page_is_corrupted(bool check_lsn, const byte *read_buf,
!= mach_read_from_4(read_buf + FIL_PAGE_LSN)
&& checksum_field2 != BUF_NO_CHECKSUM_MAGIC) {
- crc32 = buf_calc_page_crc32(read_buf);
- crc32_inited = true;
-
DBUG_EXECUTE_IF(
"page_intermittent_checksum_mismatch", {
- static int page_counter;
- if (page_counter++ == 2) {
- crc32++;
- }
- });
+ static int page_counter;
+ if (page_counter++ == 2) return true;
+ });
- if (checksum_field2 != crc32
+ if ((checksum_field1 != crc32
+ || checksum_field2 != crc32)
&& checksum_field2
!= buf_calc_page_old_checksum(read_buf)) {
return true;
@@ -759,25 +753,11 @@ bool buf_page_is_corrupted(bool check_lsn, const byte *read_buf,
switch (checksum_field1) {
case 0:
case BUF_NO_CHECKSUM_MAGIC:
- break;
- default:
- if (!crc32_inited) {
- crc32 = buf_calc_page_crc32(read_buf);
- crc32_inited = true;
- }
-
- if (checksum_field1 != crc32
- && checksum_field1
- != buf_calc_page_new_checksum(read_buf)) {
- return true;
- }
+ return false;
}
-
- return crc32_inited
- && ((checksum_field1 == crc32
- && checksum_field2 != crc32)
- || (checksum_field1 != crc32
- && checksum_field2 == crc32));
+ return (checksum_field1 != crc32 || checksum_field2 != crc32)
+ && checksum_field1
+ != buf_calc_page_new_checksum(read_buf);
}
#endif /* !UNIV_INNOCHECKSUM */
}
@@ -2150,17 +2130,21 @@ void buf_pool_t::watch_unset(const page_id_t id, buf_pool_t::hash_chain &chain)
buf_page_t *w;
{
transactional_lock_guard<page_hash_latch> g{page_hash.lock_get(chain)};
- /* The page must exist because watch_set() increments buf_fix_count. */
+ /* The page must exist because watch_set() did fix(). */
w= page_hash.get(id, chain);
- const auto state= w->state();
- ut_ad(state >= buf_page_t::UNFIXED);
- ut_ad(~buf_page_t::LRU_MASK & state);
ut_ad(w->in_page_hash);
- if (state != buf_page_t::UNFIXED + 1 || !watch_is_sentinel(*w))
+ if (!watch_is_sentinel(*w))
{
- w->unfix();
+ no_watch:
+ ut_d(const auto s=) w->unfix();
+ ut_ad(~buf_page_t::LRU_MASK & s);
w= nullptr;
}
+ const auto state= w->state();
+ ut_ad(~buf_page_t::LRU_MASK & state);
+ ut_ad(state >= buf_page_t::UNFIXED);
+ if (state != buf_page_t::UNFIXED + 1)
+ goto no_watch;
}
if (!w)
diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc
index 928e64cbba4..de0890411ba 100644
--- a/storage/innobase/buf/buf0flu.cc
+++ b/storage/innobase/buf/buf0flu.cc
@@ -1765,17 +1765,19 @@ inline void log_t::write_checkpoint(lsn_t end_lsn) noexcept
else
#endif
{
- n_pending_checkpoint_writes++;
+ ut_ad(!checkpoint_pending);
+ checkpoint_pending= true;
latch.wr_unlock();
/* FIXME: issue an asynchronous write */
log.write(offset, {c, get_block_size()});
if (srv_file_flush_method != SRV_O_DSYNC)
ut_a(log.flush());
latch.wr_lock(SRW_LOCK_CALL);
- n_pending_checkpoint_writes--;
+ ut_ad(checkpoint_pending);
+ checkpoint_pending= false;
}
- ut_ad(!n_pending_checkpoint_writes);
+ ut_ad(!checkpoint_pending);
next_checkpoint_no++;
last_checkpoint_lsn= next_checkpoint_lsn;
@@ -1833,7 +1835,7 @@ static bool log_checkpoint_low(lsn_t oldest_lsn, lsn_t end_lsn)
ut_ad(log_sys.get_flushed_lsn() >= flush_lsn);
- if (log_sys.n_pending_checkpoint_writes)
+ if (log_sys.checkpoint_pending)
{
/* A checkpoint write is running */
log_sys.latch.wr_unlock();
diff --git a/storage/innobase/dict/dict0crea.cc b/storage/innobase/dict/dict0crea.cc
index 68bb2c44d3f..47e9d9cff0c 100644
--- a/storage/innobase/dict/dict0crea.cc
+++ b/storage/innobase/dict/dict0crea.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2021, MariaDB Corporation.
+Copyright (c) 2017, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -849,7 +849,7 @@ uint32_t dict_drop_index_tree(btr_pcur_t *pcur, trx_t *trx, mtr_t *mtr)
len > DICT_NUM_FIELDS__SYS_INDEXES)
{
rec_corrupted:
- ib::error() << "Corrupted SYS_INDEXES record";
+ sql_print_error("InnoDB: Corrupted SYS_INDEXES record");
return 0;
}
@@ -1331,7 +1331,7 @@ bool dict_sys_t::load_sys_tables()
{
sys_foreign= nullptr;
mismatch= true;
- ib::error() << "Invalid definition of SYS_FOREIGN";
+ sql_print_error("InnoDB: Invalid definition of SYS_FOREIGN");
}
if (!(sys_foreign_cols= load_table(SYS_TABLE[SYS_FOREIGN_COLS],
DICT_ERR_IGNORE_FK_NOKEY)));
@@ -1343,7 +1343,7 @@ bool dict_sys_t::load_sys_tables()
{
sys_foreign_cols= nullptr;
mismatch= true;
- ib::error() << "Invalid definition of SYS_FOREIGN_COLS";
+ sql_print_error("InnoDB: Invalid definition of SYS_FOREIGN_COLS");
}
if (!(sys_virtual= load_table(SYS_TABLE[SYS_VIRTUAL],
DICT_ERR_IGNORE_FK_NOKEY)));
@@ -1354,7 +1354,7 @@ bool dict_sys_t::load_sys_tables()
{
sys_virtual= nullptr;
mismatch= true;
- ib::error() << "Invalid definition of SYS_VIRTUAL";
+ sql_print_error("InnoDB: Invalid definition of SYS_VIRTUAL");
}
unlock();
return mismatch;
@@ -1370,8 +1370,8 @@ dberr_t dict_sys_t::create_or_check_sys_tables()
if (load_sys_tables())
{
- ib::info() << "Set innodb_read_only=1 or innodb_force_recovery=3"
- " to start up";
+ sql_print_information("InnoDB: Set innodb_read_only=1 "
+ "or innodb_force_recovery=3 to start up");
return DB_CORRUPTION;
}
@@ -1403,7 +1403,7 @@ dberr_t dict_sys_t::create_or_check_sys_tables()
const auto srv_file_per_table_backup= srv_file_per_table;
srv_file_per_table= 0;
dberr_t error;
- const char *tablename;
+ span<const char> tablename;
if (!sys_foreign)
{
@@ -1421,9 +1421,11 @@ dberr_t dict_sys_t::create_or_check_sys_tables()
"END;\n", trx);
if (UNIV_UNLIKELY(error != DB_SUCCESS))
{
- tablename= SYS_TABLE[SYS_FOREIGN].data();
+ tablename= SYS_TABLE[SYS_FOREIGN];
err_exit:
- ib::error() << "Creation of " << tablename << " failed: " << error;
+ sql_print_error("InnoDB: Creation of %.*s failed: %s",
+ int(tablename.size()), tablename.data(),
+ ut_strerr(error));
trx->rollback();
row_mysql_unlock_data_dictionary(trx);
trx->free();
@@ -1443,7 +1445,7 @@ err_exit:
"END;\n", trx);
if (UNIV_UNLIKELY(error != DB_SUCCESS))
{
- tablename= SYS_TABLE[SYS_FOREIGN_COLS].data();
+ tablename= SYS_TABLE[SYS_FOREIGN_COLS];
goto err_exit;
}
}
@@ -1458,7 +1460,7 @@ err_exit:
"END;\n", trx);
if (UNIV_UNLIKELY(error != DB_SUCCESS))
{
- tablename= SYS_TABLE[SYS_VIRTUAL].data();
+ tablename= SYS_TABLE[SYS_VIRTUAL];
goto err_exit;
}
}
@@ -1472,10 +1474,11 @@ err_exit:
if (sys_foreign);
else if (!(sys_foreign= load_table(SYS_TABLE[SYS_FOREIGN])))
{
- tablename= SYS_TABLE[SYS_FOREIGN].data();
+ tablename= SYS_TABLE[SYS_FOREIGN];
load_fail:
unlock();
- ib::error() << "Failed to CREATE TABLE " << tablename;
+ sql_print_error("InnoDB: Failed to CREATE TABLE %.*s",
+ int(tablename.size()), tablename.data());
return DB_TABLE_NOT_FOUND;
}
else
@@ -1484,7 +1487,7 @@ load_fail:
if (sys_foreign_cols);
else if (!(sys_foreign_cols= load_table(SYS_TABLE[SYS_FOREIGN_COLS])))
{
- tablename= SYS_TABLE[SYS_FOREIGN_COLS].data();
+ tablename= SYS_TABLE[SYS_FOREIGN_COLS];
goto load_fail;
}
else
@@ -1493,7 +1496,7 @@ load_fail:
if (sys_virtual);
else if (!(sys_virtual= load_table(SYS_TABLE[SYS_VIRTUAL])))
{
- tablename= SYS_TABLE[SYS_VIRTUAL].data();
+ tablename= SYS_TABLE[SYS_VIRTUAL];
goto load_fail;
}
else
@@ -1516,12 +1519,14 @@ dict_foreign_eval_sql(
const char* id, /*!< in: foreign key id */
trx_t* trx) /*!< in/out: transaction */
{
- dberr_t error;
FILE* ef = dict_foreign_err_file;
- error = que_eval_sql(info, sql, trx);
+ dberr_t error = que_eval_sql(info, sql, trx);
- if (error == DB_DUPLICATE_KEY) {
+ switch (error) {
+ case DB_SUCCESS:
+ break;
+ case DB_DUPLICATE_KEY:
mysql_mutex_lock(&dict_foreign_err_mutex);
rewind(ef);
ut_print_timestamp(ef);
@@ -1541,15 +1546,11 @@ dict_foreign_eval_sql(
"names can occur. Workaround: name your constraints\n"
"explicitly with unique names.\n",
ef);
-
- mysql_mutex_unlock(&dict_foreign_err_mutex);
-
- return(error);
- }
-
- if (UNIV_UNLIKELY(error != DB_SUCCESS)) {
- ib::error() << "Foreign key constraint creation failed: "
- << error;
+ goto release;
+ default:
+ sql_print_error("InnoDB: "
+ "Foreign key constraint creation failed: %s",
+ ut_strerr(error));
mysql_mutex_lock(&dict_foreign_err_mutex);
ut_print_timestamp(ef);
@@ -1559,12 +1560,11 @@ dict_foreign_eval_sql(
fputs(".\n"
"See the MariaDB .err log in the datadir"
" for more information.\n", ef);
+release:
mysql_mutex_unlock(&dict_foreign_err_mutex);
-
- return(error);
}
- return(DB_SUCCESS);
+ return error;
}
/********************************************************************//**
diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc
index 58b219b452a..3d5e2434978 100644
--- a/storage/innobase/dict/dict0load.cc
+++ b/storage/innobase/dict/dict0load.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2016, 2021, MariaDB Corporation.
+Copyright (c) 2016, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -43,6 +43,7 @@ Created 4/24/1996 Heikki Tuuri
#include "srv0start.h"
#include "srv0srv.h"
#include "fts0opt.h"
+#include "row0vers.h"
/** Loads a table definition and also all its index definitions.
@@ -65,22 +66,20 @@ static dict_table_t *dict_load_table_one(const span<const char> &name,
dict_names_t &fk_tables);
/** Load an index definition from a SYS_INDEXES record to dict_index_t.
-If allocate=TRUE, we will create a dict_index_t structure and fill it
-accordingly. If allocated=FALSE, the dict_index_t will be supplied by
-the caller and filled with information read from the record.
@return error message
@retval NULL on success */
static
const char*
dict_load_index_low(
byte* table_id, /*!< in/out: table id (8 bytes),
- an "in" value if allocate=TRUE
- and "out" when allocate=FALSE */
+ an "in" value if mtr
+ and "out" when !mtr */
mem_heap_t* heap, /*!< in/out: temporary memory heap */
const rec_t* rec, /*!< in: SYS_INDEXES record */
- ibool allocate, /*!< in: TRUE=allocate *index,
- FALSE=fill in a pre-allocated
- *index */
+ mtr_t* mtr, /*!< in/out: mini-transaction,
+ or nullptr if a pre-allocated
+ *index is to be filled in */
+ dict_table_t* table, /*!< in/out: table, or NULL */
dict_index_t** index); /*!< out,own: index, or NULL */
/** Load a table column definition from a SYS_COLUMNS record to dict_table_t.
@@ -100,6 +99,7 @@ dict_load_column_low(
table_id_t* table_id, /*!< out: table id */
const char** col_name, /*!< out: column name */
const rec_t* rec, /*!< in: SYS_COLUMNS record */
+ mtr_t* mtr, /*!< in/out: mini-transaction */
ulint* nth_v_col); /*!< out: if not NULL, this
records the "n" of "nth" virtual
column */
@@ -143,6 +143,7 @@ dict_load_field_low(
byte* last_index_id, /*!< in: last index id */
mem_heap_t* heap, /*!< in/out: memory heap
for temporary storage */
+ mtr_t* mtr, /*!< in/out: mini-transaction */
const rec_t* rec); /*!< in: SYS_FIELDS record */
#ifdef UNIV_DEBUG
@@ -179,7 +180,7 @@ dict_getnext_system_low(
{
rec_t* rec = NULL;
- while (!rec || rec_get_deleted_flag(rec, 0)) {
+ while (!rec) {
btr_pcur_move_to_next_user_rec(pcur, mtr);
rec = btr_pcur_get_rec(pcur);
@@ -209,9 +210,13 @@ dict_startscan_system(
mtr_t* mtr, /*!< in: the mini-transaction */
dict_table_t* table) /*!< in: system table */
{
- btr_pcur_open_at_index_side(true, table->indexes.start,
- BTR_SEARCH_LEAF, pcur, true, 0, mtr);
- return dict_getnext_system_low(pcur, mtr);
+ btr_pcur_open_at_index_side(true, table->indexes.start, BTR_SEARCH_LEAF,
+ pcur, true, 0, mtr);
+ const rec_t *rec;
+ do
+ rec= dict_getnext_system_low(pcur, mtr);
+ while (rec && rec_get_deleted_flag(rec, 0));
+ return rec;
}
/********************************************************************//**
@@ -230,7 +235,9 @@ dict_getnext_system(
pcur->restore_position(BTR_SEARCH_LEAF, mtr);
/* Get the next record */
- rec = dict_getnext_system_low(pcur, mtr);
+ do {
+ rec = dict_getnext_system_low(pcur, mtr);
+ } while (rec && rec_get_deleted_flag(rec, 0));
return(rec);
}
@@ -249,14 +256,13 @@ dict_process_sys_indexes_rec(
table_id_t* table_id) /*!< out: index table id */
{
const char* err_msg;
- byte* buf;
+ byte buf[8];
ut_d(index->is_dummy = true);
ut_d(index->in_instant_init = false);
- buf = static_cast<byte*>(mem_heap_alloc(heap, 8));
/* Parse the record, and get "dict_index_t" struct filled */
- err_msg = dict_load_index_low(buf, heap, rec, FALSE, &index);
+ err_msg = dict_load_index_low(buf, heap, rec, nullptr, nullptr, &index);
*table_id = mach_read_from_8(buf);
@@ -282,7 +288,8 @@ dict_process_sys_columns_rec(
/* Parse the record, and get "dict_col_t" struct filled */
err_msg = dict_load_column_low(NULL, heap, column,
- table_id, col_name, rec, nth_v_col);
+ table_id, col_name, rec, nullptr,
+ nth_v_col);
return(err_msg);
}
@@ -301,13 +308,7 @@ dict_process_sys_virtual_rec(
ulint* pos,
ulint* base_pos)
{
- const char* err_msg;
-
- /* Parse the record, and get "dict_col_t" struct filled */
- err_msg = dict_load_virtual_low(NULL, NULL, table_id,
- pos, base_pos, rec);
-
- return(err_msg);
+ return dict_load_virtual_low(nullptr, nullptr, table_id, pos, base_pos, rec);
}
/********************************************************************//**
@@ -325,17 +326,14 @@ dict_process_sys_fields_rec(
index_id_t* index_id, /*!< out: current index id */
index_id_t last_id) /*!< in: previous index id */
{
- byte* buf;
- byte* last_index_id;
+ byte buf[8];
+ byte last_index_id[8];
const char* err_msg;
- buf = static_cast<byte*>(mem_heap_alloc(heap, 8));
-
- last_index_id = static_cast<byte*>(mem_heap_alloc(heap, 8));
mach_write_to_8(last_index_id, last_id);
err_msg = dict_load_field_low(buf, NULL, sys_field,
- pos, last_index_id, heap, rec);
+ pos, last_index_id, heap, nullptr, rec);
*index_id = mach_read_from_8(buf);
@@ -507,10 +505,6 @@ dict_sys_tables_rec_check(
ut_ad(dict_sys.locked());
- if (rec_get_deleted_flag(rec, 0)) {
- return("delete-marked record in SYS_TABLES");
- }
-
if (rec_get_n_fields_old(rec) != DICT_NUM_FIELDS__SYS_TABLES) {
return("wrong number of columns in SYS_TABLES record");
}
@@ -636,29 +630,77 @@ uint32_t dict_sys_tables_type_to_tf(uint32_t type, bool not_redundant)
return(flags);
}
+/** Outcome of dict_sys_tables_rec_read() */
+enum table_read_status { READ_OK= 0, READ_ERROR, READ_NOT_FOUND };
+
/** Read and return 5 integer fields from a SYS_TABLES record.
@param[in] rec A record of SYS_TABLES
-@param[in] name SYS_TABLES.NAME
+@param[in] mtr mini-transaction
@param[out] table_id Pointer to the table_id for this table
@param[out] space_id Pointer to the space_id for this table
@param[out] n_cols Pointer to number of columns for this table.
@param[out] flags Pointer to table flags
@param[out] flags2 Pointer to table flags2
-@return true if the record was read correctly, false if not. */
+@param[out] trx_id DB_TRX_ID of the committed SYS_TABLES record,
+ or nullptr to perform READ UNCOMMITTED
+@return whether the record was read correctly */
MY_ATTRIBUTE((warn_unused_result))
static
-bool
+table_read_status
dict_sys_tables_rec_read(
const rec_t* rec,
- const span<const char>& name,
+ mtr_t* mtr,
table_id_t* table_id,
uint32_t* space_id,
uint32_t* n_cols,
uint32_t* flags,
- uint32_t* flags2)
+ uint32_t* flags2,
+ trx_id_t* trx_id)
{
const byte* field;
ulint len;
+ mem_heap_t* heap = nullptr;
+
+ field = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_TABLES__DB_TRX_ID, &len);
+ ut_ad(len == 6 || len == UNIV_SQL_NULL);
+ trx_id_t id = len == 6 ? trx_read_trx_id(field) : 0;
+ if (id && trx_sys.find(nullptr, id, false)) {
+ const auto savepoint = mtr->get_savepoint();
+ heap = mem_heap_create(1024);
+ dict_index_t* index = UT_LIST_GET_FIRST(
+ dict_sys.sys_tables->indexes);
+ rec_offs* offsets = rec_get_offsets(
+ rec, index, nullptr, true, ULINT_UNDEFINED, &heap);
+ const rec_t* old_vers;
+ row_vers_build_for_semi_consistent_read(
+ nullptr, rec, mtr, index, &offsets, &heap,
+ heap, &old_vers, nullptr);
+ mtr->rollback_to_savepoint(savepoint);
+ rec = old_vers;
+ if (!rec) {
+ mem_heap_free(heap);
+ return READ_NOT_FOUND;
+ }
+ field = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_TABLES__DB_TRX_ID, &len);
+ if (UNIV_UNLIKELY(len != 6)) {
+ mem_heap_free(heap);
+ return READ_ERROR;
+ }
+ id = trx_read_trx_id(field);
+ }
+
+ if (rec_get_deleted_flag(rec, 0)) {
+ ut_ad(id);
+ if (trx_id) {
+ return READ_NOT_FOUND;
+ }
+ }
+
+ if (trx_id) {
+ *trx_id = id;
+ }
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_TABLES__ID, &len);
@@ -767,8 +809,13 @@ dict_sys_tables_rec_read(
" data dictionary contains invalid flags."
" SYS_TABLES.TYPE=" UINT32PF
" SYS_TABLES.N_COLS=" UINT32PF,
- int(name.size()), name.data(), type, *n_cols);
- return(false);
+ int(rec_get_field_start_offs(rec, 1)), rec,
+ type, *n_cols);
+err_exit:
+ if (UNIV_LIKELY_NULL(heap)) {
+ mem_heap_free(heap);
+ }
+ return READ_ERROR;
}
*flags = dict_sys_tables_type_to_tf(type, not_redundant);
@@ -792,9 +839,10 @@ dict_sys_tables_rec_read(
" contains invalid flags."
" SYS_TABLES.TYPE=" UINT32PF
" SYS_TABLES.MIX_LEN=" UINT32PF,
- int(name.size()), name.data(),
+ int(rec_get_field_start_offs(rec, 1)),
+ rec,
type, *flags2);
- return(false);
+ goto err_exit;
}
/* DICT_TF2_FTS will be set when indexes are being loaded */
@@ -806,7 +854,11 @@ dict_sys_tables_rec_read(
*flags2 = 0;
}
- return(true);
+ if (UNIV_LIKELY_NULL(heap)) {
+ mem_heap_free(heap);
+ }
+
+ return READ_OK;
}
/** Load and check each non-predefined tablespace mentioned in SYS_TABLES.
@@ -842,7 +894,6 @@ static uint32_t dict_check_sys_tables()
continue;
}
- /* Copy the table name from rec */
const char *field = reinterpret_cast<const char*>(
rec_get_nth_field_old(rec, DICT_FLD__SYS_TABLES__NAME,
&len));
@@ -850,10 +901,9 @@ static uint32_t dict_check_sys_tables()
DBUG_PRINT("dict_check_sys_tables",
("name: %*.s", static_cast<int>(len), field));
- const span<const char> name{field, len};
-
- if (!dict_sys_tables_rec_read(rec, name, &table_id, &space_id,
- &n_cols, &flags, &flags2)
+ if (dict_sys_tables_rec_read(rec, &mtr, &table_id, &space_id,
+ &n_cols, &flags, &flags2, nullptr)
+ != READ_OK
|| space_id == TRX_SYS_SPACE) {
continue;
}
@@ -878,13 +928,18 @@ static uint32_t dict_check_sys_tables()
continue;
}
+ const span<const char> name{field, len};
+
char* filepath = fil_make_filepath(nullptr, name,
IBD, false);
+ const bool not_dropped{!rec_get_deleted_flag(rec, 0)};
+
/* Check that the .ibd file exists. */
- if (fil_ibd_open(false, FIL_TYPE_TABLESPACE,
+ if (fil_ibd_open(not_dropped, FIL_TYPE_TABLESPACE,
space_id, dict_tf_to_fsp_flags(flags),
name, filepath)) {
+ } else if (!not_dropped) {
} else if (srv_operation == SRV_OPERATION_NORMAL
&& srv_start_after_restore
&& srv_force_recovery < SRV_FORCE_NO_BACKGROUND
@@ -897,8 +952,7 @@ static uint32_t dict_check_sys_tables()
sql_print_warning("InnoDB: Ignoring tablespace for"
" %.*s because it"
" could not be opened.",
- static_cast<int>(name.size()),
- name.data());
+ static_cast<int>(len), field);
}
max_space_id = ut_max(max_space_id, space_id);
@@ -970,6 +1024,7 @@ dict_load_column_low(
table_id_t* table_id, /*!< out: table id */
const char** col_name, /*!< out: column name */
const rec_t* rec, /*!< in: SYS_COLUMNS record */
+ mtr_t* mtr, /*!< in/out: mini-transaction */
ulint* nth_v_col) /*!< out: if not NULL, this
records the "n" of "nth" virtual
column */
@@ -985,10 +1040,6 @@ dict_load_column_low(
ut_ad(!table == !!column);
- if (rec_get_deleted_flag(rec, 0)) {
- return(dict_load_column_del);
- }
-
if (rec_get_n_fields_old(rec) != DICT_NUM_FIELDS__SYS_COLUMNS) {
return("wrong number of columns in SYS_COLUMNS record");
}
@@ -1020,7 +1071,30 @@ err_len:
goto err_len;
}
- const trx_id_t trx_id = mach_read_from_6(field);
+ const trx_id_t trx_id = trx_read_trx_id(field);
+
+ if (trx_id && mtr && trx_sys.find(nullptr, trx_id, false)) {
+ const auto savepoint = mtr->get_savepoint();
+ dict_index_t* index = UT_LIST_GET_FIRST(
+ dict_sys.sys_columns->indexes);
+ rec_offs* offsets = rec_get_offsets(
+ rec, index, nullptr, true, ULINT_UNDEFINED, &heap);
+ const rec_t* old_vers;
+ row_vers_build_for_semi_consistent_read(
+ nullptr, rec, mtr, index, &offsets, &heap,
+ heap, &old_vers, nullptr);
+ mtr->rollback_to_savepoint(savepoint);
+ rec = old_vers;
+ if (!old_vers) {
+ return dict_load_column_none;
+ }
+ ut_ad(!rec_get_deleted_flag(rec, 0));
+ }
+
+ if (rec_get_deleted_flag(rec, 0)) {
+ ut_ad(trx_id);
+ return dict_load_column_del;
+ }
rec_get_nth_field_offs_old(
rec, DICT_FLD__SYS_COLUMNS__DB_ROLL_PTR, &len);
@@ -1034,11 +1108,7 @@ err_len:
goto err_len;
}
- name = mem_heap_strdupl(heap, (const char*) field, len);
-
- if (col_name) {
- *col_name = name;
- }
+ *col_name = name = mem_heap_strdupl(heap, (const char*) field, len);
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_COLUMNS__MTYPE, &len);
@@ -1153,10 +1223,6 @@ dict_load_virtual_low(
ulint len;
ulint base;
- if (rec_get_deleted_flag(rec, 0)) {
- return(dict_load_virtual_del);
- }
-
if (rec_get_n_fields_old(rec) != DICT_NUM_FIELDS__SYS_VIRTUAL) {
return("wrong number of columns in SYS_VIRTUAL record");
}
@@ -1196,7 +1262,7 @@ err_len:
*base_pos = base;
}
- rec_get_nth_field_offs_old(
+ field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_VIRTUAL__DB_TRX_ID, &len);
if (len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL) {
goto err_len;
@@ -1208,6 +1274,17 @@ err_len:
goto err_len;
}
+ const trx_id_t trx_id = trx_read_trx_id(field);
+
+ if (trx_id && column && trx_sys.find(nullptr, trx_id, false)) {
+ if (!rec_get_deleted_flag(rec, 0)) {
+ return dict_load_virtual_none;
+ }
+ } else if (rec_get_deleted_flag(rec, 0)) {
+ ut_ad(trx_id != 0);
+ return dict_load_virtual_del;
+ }
+
if (column != NULL) {
*column = dict_table_get_nth_col(table, base);
}
@@ -1272,7 +1349,7 @@ dict_load_columns(
err_msg = btr_pcur_is_on_user_rec(&pcur)
? dict_load_column_low(table, heap, NULL, NULL,
- &name, rec, &nth_v_col)
+ &name, rec, &mtr, &nth_v_col)
: dict_load_column_none;
if (!err_msg) {
@@ -1280,8 +1357,8 @@ dict_load_columns(
n_skipped++;
goto next_rec;
} else if (err_msg == dict_load_column_none
- && strstr(table->name.m_name,
- "/" TEMP_FILE_PREFIX_INNODB)) {
+ && strstr(table->name.m_name,
+ "/" TEMP_FILE_PREFIX_INNODB)) {
break;
} else {
ib::fatal() << err_msg << " for table " << table->name;
@@ -1461,6 +1538,7 @@ dict_load_field_low(
byte* last_index_id, /*!< in: last index id */
mem_heap_t* heap, /*!< in/out: memory heap
for temporary storage */
+ mtr_t* mtr, /*!< in/out: mini-transaction */
const rec_t* rec) /*!< in: SYS_FIELDS record */
{
const byte* field;
@@ -1472,11 +1550,8 @@ dict_load_field_low(
ulint position;
/* Either index or sys_field is supplied, not both */
- ut_a((!index) || (!sys_field));
-
- if (rec_get_deleted_flag(rec, 0)) {
- return(dict_load_field_del);
- }
+ ut_ad((!index) != (!sys_field));
+ ut_ad((!index) == !mtr);
if (rec_get_n_fields_old(rec) != DICT_NUM_FIELDS__SYS_FIELDS) {
return("wrong number of columns in SYS_FIELDS record");
@@ -1532,7 +1607,7 @@ err_len:
position = pos_and_prefix_len & 0xFFFFUL;
}
- rec_get_nth_field_offs_old(
+ field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_FIELDS__DB_TRX_ID, &len);
if (len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL) {
goto err_len;
@@ -1543,6 +1618,31 @@ err_len:
goto err_len;
}
+ const trx_id_t trx_id = trx_read_trx_id(field);
+
+ if (!trx_id) {
+ ut_ad(!rec_get_deleted_flag(rec, 0));
+ } else if (mtr && trx_sys.find(nullptr, trx_id, false)) {
+ const auto savepoint = mtr->get_savepoint();
+ dict_index_t* sys_field = UT_LIST_GET_FIRST(
+ dict_sys.sys_fields->indexes);
+ rec_offs* offsets = rec_get_offsets(
+ rec, sys_field, nullptr, true, ULINT_UNDEFINED, &heap);
+ const rec_t* old_vers;
+ row_vers_build_for_semi_consistent_read(
+ nullptr, rec, mtr, sys_field, &offsets, &heap,
+ heap, &old_vers, nullptr);
+ mtr->rollback_to_savepoint(savepoint);
+ rec = old_vers;
+ if (!old_vers || rec_get_deleted_flag(rec, 0)) {
+ return dict_load_field_none;
+ }
+ }
+
+ if (rec_get_deleted_flag(rec, 0)) {
+ return(dict_load_field_del);
+ }
+
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_FIELDS__COL_NAME, &len);
if (len == 0 || len == UNIV_SQL_NULL) {
@@ -1554,9 +1654,6 @@ err_len:
index, mem_heap_strdupl(heap, (const char*) field, len),
prefix_len, descending);
} else {
- ut_a(sys_field);
- ut_a(pos);
-
sys_field->name = mem_heap_strdupl(
heap, (const char*) field, len);
sys_field->prefix_len = prefix_len & ((1U << 12) - 1);
@@ -1608,7 +1705,8 @@ dict_load_fields(
for (i = 0; i < index->n_fields; i++) {
const char *err_msg = btr_pcur_is_on_user_rec(&pcur)
? dict_load_field_low(buf, index, NULL, NULL, NULL,
- heap, btr_pcur_get_rec(&pcur))
+ heap, &mtr,
+ btr_pcur_get_rec(&pcur))
: dict_load_field_none;
if (!err_msg) {
@@ -1647,36 +1745,30 @@ static const char *dict_load_index_none= "SYS_INDEXES record not found";
static const char *dict_load_table_flags= "incorrect flags in SYS_TABLES";
/** Load an index definition from a SYS_INDEXES record to dict_index_t.
-If allocate=TRUE, we will create a dict_index_t structure and fill it
-accordingly. If allocated=FALSE, the dict_index_t will be supplied by
-the caller and filled with information read from the record.
@return error message
@retval NULL on success */
static
const char*
dict_load_index_low(
byte* table_id, /*!< in/out: table id (8 bytes),
- an "in" value if allocate=TRUE
- and "out" when allocate=FALSE */
+ an "in" value if mtr
+ and "out" when !mtr */
mem_heap_t* heap, /*!< in/out: temporary memory heap */
const rec_t* rec, /*!< in: SYS_INDEXES record */
- ibool allocate, /*!< in: TRUE=allocate *index,
- FALSE=fill in a pre-allocated
- *index */
+ mtr_t* mtr, /*!< in/out: mini-transaction,
+ or nullptr if a pre-allocated
+ *index is to be filled in */
+ dict_table_t* table, /*!< in/out: table, or NULL */
dict_index_t** index) /*!< out,own: index, or NULL */
{
const byte* field;
ulint len;
- ulint name_len;
- char* name_buf;
index_id_t id;
ulint n_fields;
ulint type;
unsigned merge_threshold;
- if (allocate) {
- /* If allocate=TRUE, no dict_index_t will
- be supplied. Initialize "*index" to NULL */
+ if (mtr) {
*index = NULL;
}
@@ -1711,7 +1803,7 @@ err_len:
return("incorrect column length in SYS_INDEXES");
}
- if (!allocate) {
+ if (!mtr) {
/* We are reading a SYS_INDEXES record. Copy the table_id */
memcpy(table_id, (const char*) field, 8);
} else if (memcmp(field, table_id, 8)) {
@@ -1728,7 +1820,7 @@ err_len:
id = mach_read_from_8(field);
- rec_get_nth_field_offs_old(
+ field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_INDEXES__DB_TRX_ID, &len);
if (len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL) {
goto err_len;
@@ -1739,15 +1831,31 @@ err_len:
goto err_len;
}
- field = rec_get_nth_field_old(
- rec, DICT_FLD__SYS_INDEXES__NAME, &name_len);
- if (name_len == 0 || name_len == UNIV_SQL_NULL) {
- goto err_len;
+ const trx_id_t trx_id = trx_read_trx_id(field);
+ if (!trx_id) {
+ ut_ad(!rec_get_deleted_flag(rec, 0));
+ } else if (!mtr) {
+ } else if (trx_sys.find(nullptr, trx_id, false)) {
+ const auto savepoint = mtr->get_savepoint();
+ dict_index_t* sys_index = UT_LIST_GET_FIRST(
+ dict_sys.sys_indexes->indexes);
+ rec_offs* offsets = rec_get_offsets(
+ rec, sys_index, nullptr, true, ULINT_UNDEFINED, &heap);
+ const rec_t* old_vers;
+ row_vers_build_for_semi_consistent_read(
+ nullptr, rec, mtr, sys_index, &offsets, &heap,
+ heap, &old_vers, nullptr);
+ mtr->rollback_to_savepoint(savepoint);
+ rec = old_vers;
+ if (!old_vers || rec_get_deleted_flag(rec, 0)) {
+ return dict_load_index_none;
+ }
+ } else if (rec_get_deleted_flag(rec, 0)
+ && rec[8 + 8 + DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN]
+ != static_cast<byte>(*TEMP_INDEX_PREFIX_STR)
+ && table->def_trx_id < trx_id) {
+ table->def_trx_id = trx_id;
}
- ut_ad(field == &rec[8 + 8 + DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN]);
-
- name_buf = mem_heap_strdupl(heap, (const char*) field,
- name_len);
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_INDEXES__N_FIELDS, &len);
@@ -1772,16 +1880,27 @@ err_len:
goto err_len;
}
+ ut_d(const auto name_offs =)
+ rec_get_nth_field_offs_old(rec, DICT_FLD__SYS_INDEXES__NAME, &len);
+ ut_ad(name_offs == 8 + 8 + DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN);
+
+ if (len == 0 || len == UNIV_SQL_NULL) {
+ goto err_len;
+ }
+
if (rec_get_deleted_flag(rec, 0)) {
- return(dict_load_index_del);
+ return dict_load_index_del;
}
- if (allocate) {
- *index = dict_mem_index_create(NULL, name_buf, type, n_fields);
- } else {
- ut_a(*index);
+ char* name = mem_heap_strdupl(heap, reinterpret_cast<const char*>(rec)
+ + (8 + 8 + DATA_TRX_ID_LEN
+ + DATA_ROLL_PTR_LEN),
+ len);
- dict_mem_fill_index_struct(*index, NULL, name_buf,
+ if (mtr) {
+ *index = dict_mem_index_create(table, name, type, n_fields);
+ } else {
+ dict_mem_fill_index_struct(*index, nullptr, name,
type, n_fields);
}
@@ -1813,7 +1932,7 @@ dict_load_indexes(
dtuple_t* tuple;
dfield_t* dfield;
const rec_t* rec;
- byte* buf;
+ byte buf[8];
mtr_t mtr;
dberr_t error = DB_SUCCESS;
@@ -1831,7 +1950,6 @@ dict_load_indexes(
tuple = dtuple_create(heap, 1);
dfield = dtuple_get_nth_field(tuple, 0);
- buf = static_cast<byte*>(mem_heap_alloc(heap, 8));
mach_write_to_8(buf, table->id);
dfield_set_data(dfield, buf, 8);
@@ -1867,7 +1985,8 @@ dict_load_indexes(
}
}
- err_msg = dict_load_index_low(buf, heap, rec, TRUE, &index);
+ err_msg = dict_load_index_low(buf, heap, rec, &mtr, table,
+ &index);
ut_ad(!index == !!err_msg);
if (err_msg == dict_load_index_none) {
@@ -1877,10 +1996,6 @@ dict_load_indexes(
}
if (err_msg == dict_load_index_del) {
- const trx_id_t id = mach_read_from_6(rec + 8 + 8);
- if (id > table->def_trx_id) {
- table->def_trx_id = id;
- }
goto next_rec;
} else if (err_msg) {
ib::error() << err_msg;
@@ -1893,7 +2008,7 @@ dict_load_indexes(
== static_cast<byte>(*TEMP_INDEX_PREFIX_STR)) {
goto next_rec;
} else {
- const trx_id_t id = mach_read_from_6(rec + 8 + 8);
+ const trx_id_t id = trx_read_trx_id(rec + 8 + 8);
if (id > table->def_trx_id) {
table->def_trx_id = id;
}
@@ -1955,7 +2070,6 @@ corrupted:
dictionary cache for such metadata corruption,
since we would always be able to set it
when loading the dictionary cache */
- index->table = table;
dict_set_corrupted_index_cache_only(index);
} else if (!dict_index_is_clust(index)
&& NULL == dict_table_get_first_index(table)) {
@@ -1974,7 +2088,6 @@ corrupted:
of the database server */
dict_mem_index_free(index);
} else {
- index->table = table;
dict_load_fields(index, heap);
/* The data dictionary tables should never contain
@@ -2036,43 +2149,41 @@ func_exit:
/** Load a table definition from a SYS_TABLES record to dict_table_t.
Do not load any columns or indexes.
-@param[in] name Table name
+@param[in,out] mtr mini-transaction
@param[in] rec SYS_TABLES record
@param[out,own] table table, or nullptr
@return error message
@retval nullptr on success */
-const char *dict_load_table_low(const span<const char> &name,
+const char *dict_load_table_low(mtr_t *mtr,
const rec_t *rec, dict_table_t **table)
{
table_id_t table_id;
uint32_t space_id, t_num, flags, flags2;
ulint n_cols, n_v_col;
+ trx_id_t trx_id;
if (const char* error_text = dict_sys_tables_rec_check(rec)) {
*table = NULL;
return(error_text);
}
- if (!dict_sys_tables_rec_read(rec, name, &table_id, &space_id,
- &t_num, &flags, &flags2)) {
+ if (auto r = dict_sys_tables_rec_read(rec, mtr, &table_id, &space_id,
+ &t_num, &flags, &flags2,
+ &trx_id)) {
*table = NULL;
- return(dict_load_table_flags);
+ return r == READ_ERROR ? dict_load_table_flags : nullptr;
}
dict_table_decode_n_col(t_num, &n_cols, &n_v_col);
- *table = dict_table_t::create(name, nullptr, n_cols + n_v_col,
- n_v_col, flags, flags2);
+ *table = dict_table_t::create(
+ span<const char>(reinterpret_cast<const char*>(rec),
+ rec_get_field_start_offs(rec, 1)),
+ nullptr, n_cols + n_v_col, n_v_col, flags, flags2);
(*table)->space_id = space_id;
(*table)->id = table_id;
(*table)->file_unreadable = !!(flags2 & DICT_TF2_DISCARDED);
-
- ulint len;
- (*table)->def_trx_id = mach_read_from_6(
- rec_get_nth_field_old(rec, DICT_FLD__SYS_TABLES__DB_TRX_ID,
- &len));
- ut_ad(len == DATA_TRX_ID_LEN);
- static_assert(DATA_TRX_ID_LEN == 6, "compatibility");
+ (*table)->def_trx_id = trx_id;
return(NULL);
}
@@ -2158,7 +2269,7 @@ dict_load_tablespace(
}
table->space = fil_ibd_open(
- true, FIL_TYPE_TABLESPACE, table->space_id,
+ 2, FIL_TYPE_TABLESPACE, table->space_id,
dict_tf_to_fsp_flags(table->flags),
{table->name.m_name, strlen(table->name.m_name)}, filepath);
@@ -2196,8 +2307,6 @@ static dict_table_t *dict_load_table_one(const span<const char> &name,
mem_heap_t* heap;
dfield_t* dfield;
const rec_t* rec;
- const byte* field;
- ulint len;
mtr_t mtr;
DBUG_ENTER("dict_load_table_one");
@@ -2233,8 +2342,7 @@ static dict_table_t *dict_load_table_one(const span<const char> &name,
BTR_SEARCH_LEAF, &pcur, &mtr);
rec = btr_pcur_get_rec(&pcur);
- if (!btr_pcur_is_on_user_rec(&pcur)
- || rec_get_deleted_flag(rec, 0)) {
+ if (!btr_pcur_is_on_user_rec(&pcur)) {
/* Not found */
err_exit:
btr_pcur_close(&pcur);
@@ -2244,21 +2352,22 @@ err_exit:
DBUG_RETURN(NULL);
}
- field = rec_get_nth_field_old(
- rec, DICT_FLD__SYS_TABLES__NAME, &len);
-
/* Check if the table name in record is the searched one */
- if (len != name.size() || memcmp(name.data(), field, len)) {
+ if (rec_get_field_start_offs(rec, 1) != name.size()
+ || memcmp(name.data(), rec, name.size())) {
goto err_exit;
}
dict_table_t* table;
- if (const char* err_msg = dict_load_table_low(name, rec, &table)) {
+ if (const char* err_msg = dict_load_table_low(&mtr, rec, &table)) {
if (err_msg != dict_load_table_flags) {
ib::error() << err_msg;
}
goto err_exit;
}
+ if (!table) {
+ goto err_exit;
+ }
btr_pcur_close(&pcur);
mtr_commit(&mtr);
@@ -2291,7 +2400,7 @@ err_exit:
err = dict_load_indexes(table, heap, index_load_err);
- if (err == DB_INDEX_CORRUPT) {
+ if (err == DB_INDEX_CORRUPT || !UT_LIST_GET_FIRST(table->indexes)) {
/* Refuse to load the table if the table has a corrupted
cluster index */
ut_ad(index_load_err != DICT_ERR_IGNORE_DROP);
@@ -2362,9 +2471,8 @@ corrupted:
if (!table->is_readable()) {
/* Don't attempt to load the indexes from disk. */
} else if (err == DB_SUCCESS) {
- err = dict_load_foreigns(table->name.m_name, NULL,
- true, true,
- ignore_err, fk_tables);
+ err = dict_load_foreigns(table->name.m_name, nullptr,
+ 0, true, ignore_err, fk_tables);
if (err != DB_SUCCESS) {
ib::warn() << "Load table " << table->name
@@ -2486,11 +2594,16 @@ check_rec:
/* Check if the table id in record is the one searched for */
if (table_id == mach_read_from_8(field)) {
- if (rec_get_deleted_flag(rec, 0)) {
- /* Until purge has completed, there
- may be delete-marked duplicate records
- for the same SYS_TABLES.ID, but different
- SYS_TABLES.NAME. */
+ field = rec_get_nth_field_old(rec,
+ DICT_FLD__SYS_TABLE_IDS__NAME, &len);
+ table = dict_sys.load_table(
+ {reinterpret_cast<const char*>(field),
+ len}, ignore_err);
+ if (table && table->id != table_id) {
+ ut_ad(rec_get_deleted_flag(rec, 0));
+ table = nullptr;
+ }
+ if (!table) {
while (btr_pcur_move_to_next(&pcur, &mtr)) {
rec = btr_pcur_get_rec(&pcur);
@@ -2498,13 +2611,6 @@ check_rec:
goto check_rec;
}
}
- } else {
- /* Now we get the table name from the record */
- field = rec_get_nth_field_old(rec,
- DICT_FLD__SYS_TABLE_IDS__NAME, &len);
- table = dict_sys.load_table(
- {reinterpret_cast<const char*>(field),
- len}, ignore_err);
}
}
}
@@ -2545,11 +2651,7 @@ Members that will be created and set by this function:
foreign->foreign_col_names[i]
foreign->referenced_col_names[i]
(for i=0..foreign->n_fields-1) */
-static
-void
-dict_load_foreign_cols(
-/*===================*/
- dict_foreign_t* foreign)/*!< in/out: foreign constraint object */
+static void dict_load_foreign_cols(dict_foreign_t *foreign, trx_id_t trx_id)
{
btr_pcur_t pcur;
dtuple_t* tuple;
@@ -2584,14 +2686,47 @@ dict_load_foreign_cols(
dfield_set_data(dfield, foreign->id, id_len);
dict_index_copy_types(tuple, sys_index, 1);
+ mem_heap_t* heap = nullptr;
btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE,
BTR_SEARCH_LEAF, &pcur, &mtr);
for (i = 0; i < foreign->n_fields; i++) {
+retry:
+ ut_a(btr_pcur_is_on_user_rec(&pcur));
rec = btr_pcur_get_rec(&pcur);
- ut_a(btr_pcur_is_on_user_rec(&pcur));
- ut_a(!rec_get_deleted_flag(rec, 0));
+ field = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_FOREIGN_COLS__DB_TRX_ID, &len);
+ ut_a(len == DATA_TRX_ID_LEN);
+
+ if (UNIV_LIKELY_NULL(heap)) {
+ mem_heap_empty(heap);
+ }
+
+ const trx_id_t id = trx_read_trx_id(field);
+ if (!id) {
+ } else if (id != trx_id && trx_sys.find(nullptr, id, false)) {
+ const auto savepoint = mtr.get_savepoint();
+ rec_offs* offsets = rec_get_offsets(
+ rec, sys_index, nullptr, true, ULINT_UNDEFINED,
+ &heap);
+ const rec_t* old_vers;
+ row_vers_build_for_semi_consistent_read(
+ nullptr, rec, &mtr, sys_index, &offsets, &heap,
+ heap, &old_vers, nullptr);
+ mtr.rollback_to_savepoint(savepoint);
+ rec = old_vers;
+ if (!rec || rec_get_deleted_flag(rec, 0)) {
+ goto next;
+ }
+ }
+
+ if (rec_get_deleted_flag(rec, 0)) {
+ ut_ad(id);
+next:
+ btr_pcur_move_to_next_user_rec(&pcur, &mtr);
+ goto retry;
+ }
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_FOREIGN_COLS__ID, &len);
@@ -2652,23 +2787,26 @@ dict_load_foreign_cols(
}
btr_pcur_close(&pcur);
- mtr_commit(&mtr);
+ mtr.commit();
+ if (UNIV_LIKELY_NULL(heap)) {
+ mem_heap_free(heap);
+ }
}
/***********************************************************************//**
Loads a foreign key constraint to the dictionary cache. If the referenced
table is not yet loaded, it is added in the output parameter (fk_tables).
@return DB_SUCCESS or error code */
-static MY_ATTRIBUTE((nonnull(1), warn_unused_result))
+static MY_ATTRIBUTE((warn_unused_result))
dberr_t
dict_load_foreign(
/*==============*/
- const char* id,
- /*!< in: foreign constraint id, must be
- '\0'-terminated */
+ const char* table_name, /*!< in: table name */
const char** col_names,
/*!< in: column names, or NULL
to use foreign->foreign_table->col_names */
+ trx_id_t trx_id,
+ /*!< in: current transaction id, or 0 */
bool check_recursive,
/*!< in: whether to record the foreign table
parent count to avoid unlimited recursive
@@ -2676,6 +2814,8 @@ dict_load_foreign(
bool check_charsets,
/*!< in: whether to check charset
compatibility */
+ span<const char> id,
+ /*!< in: foreign constraint id */
dict_err_ignore_t ignore_err,
/*!< in: error to be ignored */
dict_names_t& fk_tables)
@@ -2688,81 +2828,82 @@ dict_load_foreign(
{
dict_foreign_t* foreign;
btr_pcur_t pcur;
- dtuple_t* tuple;
- mem_heap_t* heap2;
- dfield_t* dfield;
- const rec_t* rec;
const byte* field;
ulint len;
mtr_t mtr;
dict_table_t* for_table;
dict_table_t* ref_table;
- size_t id_len;
+ byte dtuple_buf[DTUPLE_EST_ALLOC(1)];
DBUG_ENTER("dict_load_foreign");
DBUG_PRINT("dict_load_foreign",
- ("id: '%s', check_recursive: %d", id, check_recursive));
+ ("id: '%.*s', check_recursive: %d",
+ int(id.size()), id.data(), check_recursive));
ut_ad(dict_sys.locked());
- id_len = strlen(id);
-
- heap2 = mem_heap_create(1000);
-
- mtr_start(&mtr);
-
dict_index_t* sys_index = dict_sys.sys_foreign->indexes.start;
ut_ad(!dict_sys.sys_foreign->not_redundant());
- tuple = dtuple_create(heap2, 1);
- dfield = dtuple_get_nth_field(tuple, 0);
-
- dfield_set_data(dfield, id, id_len);
+ dtuple_t* tuple = dtuple_create_from_mem(dtuple_buf, sizeof dtuple_buf,
+ 1, 0);
+ dfield_set_data(dtuple_get_nth_field(tuple, 0), id.data(), id.size());
dict_index_copy_types(tuple, sys_index, 1);
+ mtr.start();
+
btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE,
BTR_SEARCH_LEAF, &pcur, &mtr);
- rec = btr_pcur_get_rec(&pcur);
-
- if (!btr_pcur_is_on_user_rec(&pcur)
- || rec_get_deleted_flag(rec, 0)) {
- /* Not found */
-
- ib::error() << "Cannot load foreign constraint " << id
- << ": could not find the relevant record in "
- "SYS_FOREIGN";
+ const rec_t* rec = btr_pcur_get_rec(&pcur);
+ mem_heap_t* heap = nullptr;
+ if (!btr_pcur_is_on_user_rec(&pcur)) {
+ not_found:
btr_pcur_close(&pcur);
- mtr_commit(&mtr);
- mem_heap_free(heap2);
-
- DBUG_RETURN(DB_ERROR);
+ mtr.commit();
+ if (UNIV_LIKELY_NULL(heap)) {
+ mem_heap_free(heap);
+ }
+ DBUG_RETURN(DB_NOT_FOUND);
}
+ static_assert(DICT_FLD__SYS_FOREIGN__ID == 0, "compatibility");
field = rec_get_nth_field_old(rec, DICT_FLD__SYS_FOREIGN__ID, &len);
/* Check if the id in record is the searched one */
- if (len != id_len || memcmp(id, field, len)) {
- {
- ib::error err;
- err << "Cannot load foreign constraint " << id
- << ": found ";
- err.write(field, len);
- err << " instead in SYS_FOREIGN";
- }
+ if (len != id.size() || memcmp(id.data(), field, id.size())) {
+ goto not_found;
+ }
- btr_pcur_close(&pcur);
- mtr_commit(&mtr);
- mem_heap_free(heap2);
+ field = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_FOREIGN__DB_TRX_ID, &len);
+ ut_a(len == DATA_TRX_ID_LEN);
+
+ const trx_id_t tid = trx_read_trx_id(field);
+
+ if (tid && tid != trx_id && trx_sys.find(nullptr, tid, false)) {
+ const auto savepoint = mtr.get_savepoint();
+ rec_offs* offsets = rec_get_offsets(
+ rec, sys_index, nullptr, true, ULINT_UNDEFINED, &heap);
+ const rec_t* old_vers;
+ row_vers_build_for_semi_consistent_read(
+ nullptr, rec, &mtr, sys_index, &offsets, &heap,
+ heap, &old_vers, nullptr);
+ mtr.rollback_to_savepoint(savepoint);
+ rec = old_vers;
+ if (!rec) {
+ goto not_found;
+ }
+ }
- DBUG_RETURN(DB_ERROR);
+ if (rec_get_deleted_flag(rec, 0)) {
+ ut_ad(tid);
+ goto not_found;
}
/* Read the table names and the number of columns associated
with the constraint */
- mem_heap_free(heap2);
-
foreign = dict_mem_foreign_create();
uint32_t n_fields_and_type = mach_read_from_4(
@@ -2776,7 +2917,7 @@ dict_load_foreign(
foreign->type = (n_fields_and_type >> 24) & ((1U << 6) - 1);
foreign->n_fields = n_fields_and_type & dict_index_t::MAX_N_FIELDS;
- foreign->id = mem_heap_strdupl(foreign->heap, id, id_len);
+ foreign->id = mem_heap_strdupl(foreign->heap, id.data(), id.size());
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_FOREIGN__FOR_NAME, &len);
@@ -2785,18 +2926,34 @@ dict_load_foreign(
foreign->heap, (char*) field, len);
dict_mem_foreign_table_name_lookup_set(foreign, TRUE);
- const ulint foreign_table_name_len = len;
+ const size_t foreign_table_name_len = len;
+ const size_t table_name_len = strlen(table_name);
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_FOREIGN__REF_NAME, &len);
+
+ if (!my_charset_latin1.strnncoll(table_name, table_name_len,
+ foreign->foreign_table_name,
+ foreign_table_name_len)) {
+ } else if (!check_recursive
+ && !my_charset_latin1.strnncoll(table_name, table_name_len,
+ (const char*) field, len)) {
+ } else {
+ dict_foreign_free(foreign);
+ goto not_found;
+ }
+
foreign->referenced_table_name = mem_heap_strdupl(
- foreign->heap, (char*) field, len);
+ foreign->heap, (const char*) field, len);
dict_mem_referenced_table_name_lookup_set(foreign, TRUE);
btr_pcur_close(&pcur);
- mtr_commit(&mtr);
+ mtr.commit();
+ if (UNIV_LIKELY_NULL(heap)) {
+ mem_heap_free(heap);
+ }
- dict_load_foreign_cols(foreign);
+ dict_load_foreign_cols(foreign, trx_id);
ref_table = dict_sys.find_table(
{foreign->referenced_table_name_lookup,
@@ -2851,7 +3008,8 @@ dict_load_foreigns(
const char* table_name, /*!< in: table name */
const char** col_names, /*!< in: column names, or NULL
to use table->col_names */
- bool check_recursive,/*!< in: Whether to check
+ trx_id_t trx_id, /*!< in: DDL transaction id,
+ or 0 to check
recursive load of tables
chained by FK */
bool check_charsets, /*!< in: whether to check
@@ -2868,10 +3026,6 @@ dict_load_foreigns(
btr_pcur_t pcur;
dtuple_t* tuple;
dfield_t* dfield;
- const rec_t* rec;
- const byte* field;
- ulint len;
- dberr_t err;
mtr_t mtr;
DBUG_ENTER("dict_load_foreigns");
@@ -2888,12 +3042,14 @@ dict_load_foreigns(
}
ut_ad(!dict_sys.sys_foreign->not_redundant());
- mtr_start(&mtr);
dict_index_t *sec_index = dict_table_get_next_index(
dict_table_get_first_index(dict_sys.sys_foreign));
ut_ad(!strcmp(sec_index->fields[0].name, "FOR_NAME"));
+ bool check_recursive = !trx_id;
+
start_load:
+ mtr.start();
tuple = dtuple_create_from_mem(tuple_buf, sizeof(tuple_buf), 1, 0);
dfield = dtuple_get_nth_field(tuple, 0);
@@ -2904,7 +3060,9 @@ start_load:
btr_pcur_open_on_user_rec(sec_index, tuple, PAGE_CUR_GE,
BTR_SEARCH_LEAF, &pcur, &mtr);
loop:
- rec = btr_pcur_get_rec(&pcur);
+ const rec_t* rec = btr_pcur_get_rec(&pcur);
+ const byte* field;
+ const auto maybe_deleted = rec_get_deleted_flag(rec, 0);
if (!btr_pcur_is_on_user_rec(&pcur)) {
/* End of index */
@@ -2915,6 +3073,7 @@ loop:
/* Now we have the record in the secondary index containing a table
name and a foreign constraint ID */
+ ulint len;
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_FOREIGN_FOR_NAME__NAME, &len);
@@ -2938,10 +3097,6 @@ loop:
may not be the same case, but the previous comparison showed that they
match with no-case. */
- if (rec_get_deleted_flag(rec, 0)) {
- goto next_rec;
- }
-
if (lower_case_table_names != 2 && memcmp(field, table_name, len)) {
goto next_rec;
}
@@ -2956,26 +3111,33 @@ loop:
ut_a(len <= MAX_TABLE_NAME_LEN);
memcpy(fk_id, field, len);
- fk_id[len] = '\0';
btr_pcur_store_position(&pcur, &mtr);
- mtr_commit(&mtr);
+ mtr.commit();
/* Load the foreign constraint definition to the dictionary cache */
- err = dict_load_foreign(fk_id, col_names,
- check_recursive, check_charsets, ignore_err,
- fk_tables);
-
- if (err != DB_SUCCESS) {
+ switch (dberr_t err
+ = dict_load_foreign(table_name, col_names, trx_id,
+ check_recursive, check_charsets,
+ {fk_id, len}, ignore_err, fk_tables)) {
+ case DB_SUCCESS:
+ break;
+ case DB_NOT_FOUND:
+ if (maybe_deleted) {
+ break;
+ }
+ sql_print_error("InnoDB: Cannot load foreign constraint %.*s:"
+ " could not find the relevant record in "
+ "SYS_FOREIGN", int(len), fk_id);
+ /* fall through */
+ default:
btr_pcur_close(&pcur);
-
DBUG_RETURN(err);
}
- mtr_start(&mtr);
-
+ mtr.start();
pcur.restore_position(BTR_SEARCH_LEAF, &mtr);
next_rec:
btr_pcur_move_to_next_user_rec(&pcur, &mtr);
@@ -2988,15 +3150,11 @@ load_next_index:
sec_index = dict_table_get_next_index(sec_index);
- if (sec_index != NULL) {
-
- mtr_start(&mtr);
-
+ if (sec_index) {
/* Switch to scan index on REF_NAME, fk_max_recusive_level
already been updated when scanning FOR_NAME index, no need to
update again */
- check_recursive = FALSE;
-
+ check_recursive = false;
goto start_load;
}
diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc
index 329d2bdd179..b9f653b4f09 100644
--- a/storage/innobase/fil/fil0fil.cc
+++ b/storage/innobase/fil/fil0fil.cc
@@ -1718,9 +1718,10 @@ char* fil_make_filepath(const char *path, const fil_space_t::name_type &name,
if (path != NULL) {
memcpy(full_name, path, path_len);
len = path_len;
- full_name[len] = '\0';
}
+ full_name[len] = '\0';
+
if (trim_name) {
/* Find the offset of the last DIR separator and set it to
null in order to strip off the old basename from this path. */
@@ -2078,7 +2079,7 @@ a remote tablespace is found it will be changed to true.
If the fix_dict boolean is set, then it is safe to use an internal SQL
statement to update the dictionary tables if they are incorrect.
-@param[in] validate true if we should validate the tablespace
+@param[in] validate 0=maybe missing, 1=do not validate, 2=validate
@param[in] purpose FIL_TYPE_TABLESPACE or FIL_TYPE_TEMPORARY
@param[in] id tablespace ID
@param[in] flags expected FSP_SPACE_FLAGS
@@ -2090,7 +2091,7 @@ If file-per-table, it is the table name in the databasename/tablename format
@retval NULL if the tablespace could not be opened */
fil_space_t*
fil_ibd_open(
- bool validate,
+ unsigned validate,
fil_type_t purpose,
uint32_t id,
uint32_t flags,
@@ -2102,7 +2103,7 @@ fil_ibd_open(
fil_space_t* space = fil_space_get_by_id(id);
mysql_mutex_unlock(&fil_system.mutex);
if (space) {
- if (validate && !srv_read_only_mode) {
+ if (validate > 1 && !srv_read_only_mode) {
fsp_flags_try_adjust(space,
flags & ~FSP_FLAGS_MEM_MASK);
}
@@ -2139,8 +2140,9 @@ func_exit:
/* Look for a filepath embedded in an ISL where the default file
would be. */
- if (df_remote.open_link_file(name)) {
- validate = true;
+ bool must_validate = df_remote.open_link_file(name);
+
+ if (must_validate) {
if (df_remote.open_read_only(true) == DB_SUCCESS) {
ut_ad(df_remote.is_open());
++tablespaces_found;
@@ -2153,15 +2155,12 @@ func_exit:
<< df_remote.filepath()
<< "' could not be opened read-only.";
}
- }
-
- /* Attempt to open the tablespace at the dictionary filepath. */
- if (path_in) {
- if (!df_default.same_filepath_as(path_in)) {
- /* Dict path is not the default path. Always validate
- remote files. If default is opened, it was moved. */
- validate = true;
- }
+ } else if (path_in && !df_default.same_filepath_as(path_in)) {
+ /* Dict path is not the default path. Always validate
+ remote files. If default is opened, it was moved. */
+ must_validate = true;
+ } else if (validate > 1) {
+ must_validate = true;
}
/* Always look for a file at the default location. But don't log
@@ -2173,7 +2172,7 @@ func_exit:
the first server startup. The tables ought to be dropped by
drop_garbage_tables_after_restore() a little later. */
- const bool strict = !tablespaces_found
+ const bool strict = validate && !tablespaces_found
&& !(srv_operation == SRV_OPERATION_NORMAL
&& srv_start_after_restore
&& srv_force_recovery < SRV_FORCE_NO_BACKGROUND
@@ -2199,7 +2198,7 @@ func_exit:
normal, we only found 1. */
/* For encrypted tablespace, we need to check the
encryption in header of first page. */
- if (!validate && tablespaces_found == 1) {
+ if (!must_validate && tablespaces_found == 1) {
goto skip_validate;
}
@@ -2215,7 +2214,8 @@ func_exit:
First, bail out if no tablespace files were found. */
if (valid_tablespaces_found == 0) {
if (!strict
- && IF_WIN(GetLastError() == ERROR_FILE_NOT_FOUND,
+ && IF_WIN(GetLastError() == ERROR_FILE_NOT_FOUND
+ || GetLastError() == ERROR_PATH_NOT_FOUND,
errno == ENOENT)) {
/* Suppress a message about a missing file. */
goto corrupted;
@@ -2228,7 +2228,7 @@ func_exit:
TROUBLESHOOT_DATADICT_MSG);
goto corrupted;
}
- if (!validate) {
+ if (!must_validate) {
goto skip_validate;
}
@@ -2311,7 +2311,7 @@ skip_validate:
df_remote.is_open() ? df_remote.filepath() :
df_default.filepath(), OS_FILE_CLOSED, 0, false, true);
- if (validate && !srv_read_only_mode) {
+ if (must_validate && !srv_read_only_mode) {
df_remote.close();
df_default.close();
if (space->acquire()) {
diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc
index b77623bc5e1..31c97ffcf42 100644
--- a/storage/innobase/fts/fts0fts.cc
+++ b/storage/innobase/fts/fts0fts.cc
@@ -2219,9 +2219,7 @@ fts_trx_table_create(
fts_trx_table_t* ftt;
ftt = static_cast<fts_trx_table_t*>(
- mem_heap_alloc(fts_trx->heap, sizeof(*ftt)));
-
- memset(ftt, 0x0, sizeof(*ftt));
+ mem_heap_zalloc(fts_trx->heap, sizeof *ftt));
ftt->table = table;
ftt->fts_trx = fts_trx;
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index c9e67bde430..60b90aecedc 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -12729,7 +12729,8 @@ int create_table_info_t::create_table(bool create_fk)
if (err == DB_SUCCESS) {
/* Check that also referencing constraints are ok */
dict_names_t fk_tables;
- err = dict_load_foreigns(m_table_name, NULL, false, true,
+ err = dict_load_foreigns(m_table_name, nullptr,
+ m_trx->id, true,
DICT_ERR_IGNORE_NONE, fk_tables);
while (err == DB_SUCCESS && !fk_tables.empty()) {
dict_sys.load_table(
@@ -13181,9 +13182,7 @@ ha_innobase::create(
}
if (error) {
- /* Drop the being-created table before rollback,
- so that rollback can possibly rename back a table
- that could have been renamed before the failed creation. */
+ /* Rollback will drop the being-created table. */
trx_rollback_for_mysql(trx);
row_mysql_unlock_data_dictionary(trx);
} else {
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index d4c31f06616..ab2e837d5f0 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -9749,7 +9749,7 @@ innobase_update_foreign_cache(
dict_names_t fk_tables;
err = dict_load_foreigns(user_table->name.m_name,
- ctx->col_names, false, true,
+ ctx->col_names, 1, true,
DICT_ERR_IGNORE_NONE,
fk_tables);
@@ -9760,7 +9760,7 @@ innobase_update_foreign_cache(
loaded with "foreign_key checks" off,
so let's retry the loading with charset_check is off */
err = dict_load_foreigns(user_table->name.m_name,
- ctx->col_names, false, false,
+ ctx->col_names, 1, false,
DICT_ERR_IGNORE_NONE,
fk_tables);
diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc
index 175b07551b0..97f7013465c 100644
--- a/storage/innobase/handler/i_s.cc
+++ b/storage/innobase/handler/i_s.cc
@@ -4808,12 +4808,13 @@ i_s_dict_fill_sys_tables(
/** Convert one SYS_TABLES record to dict_table_t.
@param pcur persistent cursor position on SYS_TABLES record
+@param mtr mini-transaction (nullptr=use the dict_sys cache)
@param rec record to read from (nullptr=use the dict_sys cache)
@param table the converted dict_table_t
@return error message
@retval nullptr on success */
-static const char *i_s_sys_tables_rec(const btr_pcur_t &pcur, const rec_t *rec,
- dict_table_t **table)
+static const char *i_s_sys_tables_rec(const btr_pcur_t &pcur, mtr_t *mtr,
+ const rec_t *rec, dict_table_t **table)
{
static_assert(DICT_FLD__SYS_TABLES__NAME == 0, "compatibility");
size_t len;
@@ -4831,12 +4832,11 @@ static const char *i_s_sys_tables_rec(const btr_pcur_t &pcur, const rec_t *rec,
return "corrupted SYS_TABLES.NAME";
}
- const span<const char>name{reinterpret_cast<const char*>(pcur.old_rec), len};
-
if (rec)
- return dict_load_table_low(name, rec, table);
+ return dict_load_table_low(mtr, rec, table);
- *table= dict_sys.load_table(name);
+ *table= dict_sys.load_table
+ (span<const char>{reinterpret_cast<const char*>(pcur.old_rec), len});
return *table ? nullptr : "Table not found in cache";
}
@@ -4878,7 +4878,7 @@ i_s_sys_tables_fill_table(
/* Create and populate a dict_table_t structure with
information from SYS_TABLES row */
- err_msg = i_s_sys_tables_rec(pcur, rec, &table_rec);
+ err_msg = i_s_sys_tables_rec(pcur, &mtr, rec, &table_rec);
mtr.commit();
dict_sys.unlock();
@@ -5116,7 +5116,8 @@ i_s_sys_tables_fill_table_stats(
mtr.commit();
/* Fetch the dict_table_t structure corresponding to
this SYS_TABLES record */
- err_msg = i_s_sys_tables_rec(pcur, nullptr, &table_rec);
+ err_msg = i_s_sys_tables_rec(pcur, nullptr, nullptr,
+ &table_rec);
if (UNIV_LIKELY(!err_msg)) {
bool evictable = dict_sys.prevent_eviction(table_rec);
diff --git a/storage/innobase/include/dict0load.h b/storage/innobase/include/dict0load.h
index 43e732263fd..33095eb8dbc 100644
--- a/storage/innobase/include/dict0load.h
+++ b/storage/innobase/include/dict0load.h
@@ -89,7 +89,8 @@ dict_load_foreigns(
const char* table_name, /*!< in: table name */
const char** col_names, /*!< in: column names, or NULL
to use table->col_names */
- bool check_recursive,/*!< in: Whether to check
+ trx_id_t trx_id, /*!< in: DDL transaction id,
+ or 0 to check
recursive load of tables
chained by FK */
bool check_charsets, /*!< in: whether to check
@@ -123,12 +124,12 @@ dict_getnext_system(
/** Load a table definition from a SYS_TABLES record to dict_table_t.
Do not load any columns or indexes.
-@param[in] name Table name
+@param[in,out] mtr mini-transaction
@param[in] rec SYS_TABLES record
@param[out,own] table table, or nullptr
@return error message
@retval nullptr on success */
-const char *dict_load_table_low(const span<const char> &name,
+const char *dict_load_table_low(mtr_t *mtr,
const rec_t *rec, dict_table_t **table)
MY_ATTRIBUTE((nonnull, warn_unused_result));
diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h
index a05485696f6..8a959402c0b 100644
--- a/storage/innobase/include/fil0fil.h
+++ b/storage/innobase/include/fil0fil.h
@@ -1661,10 +1661,7 @@ file inode probably is much faster (the OS caches them) than accessing
the first page of the file. This boolean may be initially false, but if
a remote tablespace is found it will be changed to true.
-If the fix_dict boolean is set, then it is safe to use an internal SQL
-statement to update the dictionary tables if they are incorrect.
-
-@param[in] validate true if we should validate the tablespace
+@param[in] validate 0=maybe missing, 1=do not validate, 2=validate
@param[in] purpose FIL_TYPE_TABLESPACE or FIL_TYPE_TEMPORARY
@param[in] id tablespace ID
@param[in] flags expected FSP_SPACE_FLAGS
@@ -1676,7 +1673,7 @@ If file-per-table, it is the table name in the databasename/tablename format
@retval NULL if the tablespace could not be opened */
fil_space_t*
fil_ibd_open(
- bool validate,
+ unsigned validate,
fil_type_t purpose,
uint32_t id,
uint32_t flags,
diff --git a/storage/innobase/include/log0log.h b/storage/innobase/include/log0log.h
index ab76980af16..4d9ad3ddfd8 100644
--- a/storage/innobase/include/log0log.h
+++ b/storage/innobase/include/log0log.h
@@ -268,12 +268,12 @@ public:
new query step is started */
/** latest completed checkpoint (protected by latch.wr_lock()) */
Atomic_relaxed<lsn_t> last_checkpoint_lsn;
- lsn_t next_checkpoint_lsn;
- /*!< next checkpoint lsn */
+ /** next checkpoint LSN (protected by log_sys.mutex) */
+ lsn_t next_checkpoint_lsn;
/** next checkpoint number (protected by latch.wr_lock()) */
ulint next_checkpoint_no;
- /** number of pending checkpoint writes */
- ulint n_pending_checkpoint_writes;
+ /** whether a checkpoint is pending */
+ Atomic_relaxed<bool> checkpoint_pending;
/** buffer for checkpoint header */
byte *checkpoint_buf;
diff --git a/storage/innobase/include/mtr0mtr.h b/storage/innobase/include/mtr0mtr.h
index 3707a693648..d595b58ad6f 100644
--- a/storage/innobase/include/mtr0mtr.h
+++ b/storage/innobase/include/mtr0mtr.h
@@ -100,6 +100,15 @@ struct mtr_t {
/** Commit the mini-transaction. */
void commit();
+ /** Release latches till savepoint. To simplify the code only
+ MTR_MEMO_S_LOCK and MTR_MEMO_PAGE_S_FIX slot types are allowed to be
+ released, otherwise it would be neccesary to add one more argument in the
+ function to point out what slot types are allowed for rollback, and this
+ would be overengineering as currently the function is used only in one place
+ in the code.
+ @param savepoint savepoint, can be obtained with get_savepoint */
+ void rollback_to_savepoint(ulint savepoint);
+
/** Commit a mini-transaction that is shrinking a tablespace.
@param space tablespace that is being shrunk */
ATTRIBUTE_COLD void commit_shrink(fil_space_t &space);
diff --git a/storage/innobase/include/rem0rec.h b/storage/innobase/include/rem0rec.h
index c2ebad91ecd..7faf0ca06bd 100644
--- a/storage/innobase/include/rem0rec.h
+++ b/storage/innobase/include/rem0rec.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2021, MariaDB Corporation.
+Copyright (c) 2017, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -727,11 +727,9 @@ in the clustered index for instant ADD COLUMN or ALTER TABLE.
@param[in] rec leaf page record
@param[in] index index of the record
@return whether the record is the metadata pseudo-record */
-inline bool rec_is_metadata(const rec_t* rec, const dict_index_t& index)
+inline bool rec_is_metadata(const rec_t *rec, const dict_index_t &index)
{
- bool is = rec_is_metadata(rec, dict_table_is_comp(index.table));
- ut_ad(!is || index.is_instant());
- return is;
+ return rec_is_metadata(rec, index.table->not_redundant());
}
/** Determine if the record is the metadata pseudo-record
diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc
index d8eb5882878..279e8b63468 100644
--- a/storage/innobase/log/log0log.cc
+++ b/storage/innobase/log/log0log.cc
@@ -131,7 +131,7 @@ void log_t::create()
max_modified_age_async= 0;
max_checkpoint_age= 0;
next_checkpoint_lsn= 0;
- n_pending_checkpoint_writes= 0;
+ checkpoint_pending= false;
buf_free= 0;
@@ -914,22 +914,6 @@ wait_suspend_loop:
buf_flush_buffer_pool();
}
- if (log_sys.is_initialised()) {
- log_sys.latch.rd_lock(SRW_LOCK_CALL);
- const ulint n_write = log_sys.n_pending_checkpoint_writes;
- log_sys.latch.rd_unlock();
-
- if (n_write) {
- if (srv_print_verbose_log && count > 600) {
- sql_print_information(
- "InnoDB: Pending checkpoint writes: "
- ULINTPF, n_write);
- count = 0;
- }
- goto loop;
- }
- }
-
if (srv_fast_shutdown == 2 || !srv_was_started) {
if (!srv_read_only_mode && srv_was_started) {
ib::info() << "Executing innodb_fast_shutdown=2."
diff --git a/storage/innobase/mtr/mtr0mtr.cc b/storage/innobase/mtr/mtr0mtr.cc
index 518814f25d8..c53e4dc81eb 100644
--- a/storage/innobase/mtr/mtr0mtr.cc
+++ b/storage/innobase/mtr/mtr0mtr.cc
@@ -296,6 +296,50 @@ struct ReleaseAll {
}
};
+/** Stops iteration is savepoint is reached */
+template <typename Functor> struct TillSavepoint
+{
+
+ /** Constructor
+ @param[in] functor functor which is called if savepoint is not reached
+ @param[in] savepoint savepoint value to rollback
+ @param[in] used current position in slots container */
+ TillSavepoint(const Functor &functor, ulint savepoint, ulint used)
+ : functor(functor),
+ m_slots_count((used - savepoint) / sizeof(mtr_memo_slot_t))
+ {
+ ut_ad(savepoint);
+ ut_ad(used >= savepoint);
+ }
+
+ /** @return true if savepoint is not reached, false otherwise */
+ bool operator()(mtr_memo_slot_t *slot)
+ {
+#ifdef UNIV_DEBUG
+ /** This check is added because the code is invoked only from
+ row_search_mvcc() to release latches acquired during clustered index search
+ for secondary index record. To make it more universal we could add one more
+ member in this functor for debug build to pass only certain slot types,
+ but this is currently not necessary. */
+ switch (slot->type)
+ {
+ case MTR_MEMO_S_LOCK:
+ case MTR_MEMO_PAGE_S_FIX:
+ break;
+ default:
+ ut_a(false);
+ }
+#endif
+ return m_slots_count-- && functor(slot);
+ }
+
+private:
+ /** functor to invoke */
+ const Functor &functor;
+ /** slots count left till savepoint */
+ ulint m_slots_count;
+};
+
#ifdef UNIV_DEBUG
/** Check that all slots have been handled. */
struct DebugCheck {
@@ -488,6 +532,21 @@ void mtr_t::commit()
release_resources();
}
+/** Release latches till savepoint. To simplify the code only
+MTR_MEMO_S_LOCK and MTR_MEMO_PAGE_S_FIX slot types are allowed to be
+released, otherwise it would be neccesary to add one more argument in the
+function to point out what slot types are allowed for rollback, and this
+would be overengineering as corrently the function is used only in one place
+in the code.
+@param savepoint savepoint, can be obtained with get_savepoint */
+void mtr_t::rollback_to_savepoint(ulint savepoint)
+{
+ Iterate<TillSavepoint<ReleaseLatches>> iteration(
+ TillSavepoint<ReleaseLatches>(ReleaseLatches(), savepoint,
+ get_savepoint()));
+ m_memo.for_each_block_in_reverse(iteration);
+}
+
/** Shrink a tablespace. */
struct Shrink
{
diff --git a/storage/innobase/rem/rem0rec.cc b/storage/innobase/rem/rem0rec.cc
index 902f3f2d5ca..bd572372aca 100644
--- a/storage/innobase/rem/rem0rec.cc
+++ b/storage/innobase/rem/rem0rec.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2021, MariaDB Corporation.
+Copyright (c) 2017, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -478,7 +478,7 @@ rec_offs_make_valid(
{
const bool is_alter_metadata = leaf
&& rec_is_alter_metadata(rec, *index);
- ut_ad(is_alter_metadata
+ ut_ad((leaf && rec_is_metadata(rec, *index))
|| index->is_dummy || index->is_ibuf()
|| (leaf
? rec_offs_n_fields(offsets)
@@ -572,7 +572,8 @@ rec_offs_validate(
}
/* index->n_def == 0 for dummy indexes if !comp */
ut_ad(!comp || index->n_def);
- ut_ad(!index->n_def || i <= max_n_fields);
+ ut_ad(!index->n_def || i <= max_n_fields
+ || rec_is_metadata(rec, *index));
}
while (i--) {
ulint curr = get_value(rec_offs_base(offsets)[1 + i]);
@@ -897,9 +898,7 @@ rec_get_offsets_func(
ut_ad(!is_user_rec || !n_core || index->is_dummy
|| dict_index_is_ibuf(index)
|| n == n_fields /* btr_pcur_restore_position() */
- || (n + (index->id == DICT_INDEXES_ID)
- >= n_core && n <= index->n_fields
- + unsigned(rec_is_alter_metadata(rec, false))));
+ || (n + (index->id == DICT_INDEXES_ID) >= n_core));
if (is_user_rec && n_core && n < index->n_fields) {
ut_ad(!index->is_dummy);
diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc
index a45cc3946b4..5eedb6a0ea7 100644
--- a/storage/innobase/row/row0import.cc
+++ b/storage/innobase/row/row0import.cc
@@ -4497,7 +4497,7 @@ row_import_for_mysql(
fil_space_t::set_imported() to declare it a persistent tablespace. */
table->space = fil_ibd_open(
- true, FIL_TYPE_IMPORT, table->space_id,
+ 2, FIL_TYPE_IMPORT, table->space_id,
dict_tf_to_fsp_flags(table->flags), name, filepath, &err);
ut_ad((table->space == NULL) == (err != DB_SUCCESS));
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index f064f39f7df..db586e8f266 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -2912,7 +2912,7 @@ row_rename_table_for_mysql(
dict_names_t fk_tables;
err = dict_load_foreigns(
- new_name, NULL, false,
+ new_name, nullptr, trx->id,
!old_is_tmp || trx->check_foreigns,
use_fk
? DICT_ERR_IGNORE_NONE
diff --git a/storage/innobase/row/row0row.cc b/storage/innobase/row/row0row.cc
index 19870906df8..4cd1c3a4d26 100644
--- a/storage/innobase/row/row0row.cc
+++ b/storage/innobase/row/row0row.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2018, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2018, 2021, MariaDB Corporation.
+Copyright (c) 2018, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -531,7 +531,11 @@ row_build_low(
continue;
}
- ut_ad(ind_field < &index->fields[index->n_fields]);
+ if (UNIV_UNLIKELY(ind_field
+ >= &index->fields[index->n_fields])) {
+ ut_ad(rec_is_metadata(rec, *index));
+ continue;
+ }
const dict_col_t* col = dict_field_get_col(ind_field);
@@ -745,11 +749,15 @@ row_rec_to_index_entry_impl(
if (mblob == 2) {
ut_ad(info_bits == REC_INFO_METADATA_ALTER
|| info_bits == REC_INFO_METADATA_ADD);
- ut_ad(rec_len <= ulint(index->n_fields + got));
if (pad) {
+ ut_ad(rec_len <= ulint(index->n_fields + got));
rec_len = ulint(index->n_fields)
+ (info_bits == REC_INFO_METADATA_ALTER);
- } else if (!got && info_bits == REC_INFO_METADATA_ALTER) {
+ } else if (got) {
+ rec_len = std::min(rec_len,
+ ulint(index->n_fields + got));
+ } else if (info_bits == REC_INFO_METADATA_ALTER) {
+ ut_ad(rec_len <= index->n_fields);
rec_len++;
}
} else {
diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc
index a599c387e60..52f9efa3957 100644
--- a/storage/innobase/row/row0sel.cc
+++ b/storage/innobase/row/row0sel.cc
@@ -3601,14 +3601,12 @@ record with the same ordering prefix in in the B-tree index
@param[in] latch_mode latch mode wished in restoration
@param[in] pcur cursor whose position has been stored
@param[in] moves_up true if the cursor moves up in the index
-@param[in] mtr mtr; CAUTION: may commit mtr temporarily!
-@param[in] select_lock_type select lock type
+@param[in,out] mtr mtr; CAUTION: may commit mtr temporarily!
@return true if we may need to process the record the cursor is now
positioned on (i.e. we should not go to the next record yet) */
static bool sel_restore_position_for_mysql(bool *same_user_rec,
ulint latch_mode, btr_pcur_t *pcur,
- bool moves_up, mtr_t *mtr,
- lock_mode select_lock_type)
+ bool moves_up, mtr_t *mtr)
{
auto status = pcur->restore_position(latch_mode, mtr);
@@ -3631,8 +3629,7 @@ static bool sel_restore_position_for_mysql(bool *same_user_rec,
switch (pcur->rel_pos) {
case BTR_PCUR_ON:
if (!*same_user_rec && moves_up) {
- if (status == btr_pcur_t::SAME_UNIQ
- && select_lock_type != LOCK_NONE)
+ if (status == btr_pcur_t::SAME_UNIQ)
return true;
next:
if (btr_pcur_move_to_next(pcur, mtr)
@@ -4325,7 +4322,7 @@ row_search_mvcc(
const rec_t* clust_rec;
Row_sel_get_clust_rec_for_mysql row_sel_get_clust_rec_for_mysql;
ibool unique_search = FALSE;
- ibool mtr_has_extra_clust_latch = FALSE;
+ ulint mtr_extra_clust_savepoint = 0;
bool moves_up = false;
/* if the returned record was locked and we did a semi-consistent
read (fetch the newest committed version), then this is set to
@@ -4697,7 +4694,7 @@ wait_table_again:
bool need_to_process = sel_restore_position_for_mysql(
&same_user_rec, BTR_SEARCH_LEAF,
- pcur, moves_up, &mtr, prebuilt->select_lock_type);
+ pcur, moves_up, &mtr);
if (UNIV_UNLIKELY(need_to_process)) {
if (UNIV_UNLIKELY(prebuilt->row_read_type
@@ -5445,7 +5442,7 @@ requires_clust_rec:
/* It was a non-clustered index and we must fetch also the
clustered index record */
- mtr_has_extra_clust_latch = TRUE;
+ mtr_extra_clust_savepoint = mtr.get_savepoint();
ut_ad(!vrow);
/* The following call returns 'offsets' associated with
@@ -5744,27 +5741,15 @@ next_rec:
/* No need to do store restore for R-tree */
mtr.commit();
mtr.start();
- mtr_has_extra_clust_latch = FALSE;
- } else if (mtr_has_extra_clust_latch) {
- /* If we have extra cluster latch, we must commit
- mtr if we are moving to the next non-clustered
+ mtr_extra_clust_savepoint = 0;
+ } else if (mtr_extra_clust_savepoint) {
+ /* We must release any clustered index latches
+ if we are moving to the next non-clustered
index record, because we could break the latching
order if we would access a different clustered
index page right away without releasing the previous. */
-
- btr_pcur_store_position(pcur, &mtr);
- mtr.commit();
- mtr_has_extra_clust_latch = FALSE;
-
- mtr.start();
-
- if (sel_restore_position_for_mysql(&same_user_rec,
- BTR_SEARCH_LEAF,
- pcur, moves_up, &mtr,
- prebuilt->select_lock_type)
- ) {
- goto rec_loop;
- }
+ mtr.rollback_to_savepoint(mtr_extra_clust_savepoint);
+ mtr_extra_clust_savepoint = 0;
}
if (moves_up) {
@@ -5824,7 +5809,7 @@ page_read_error:
lock_table_wait:
mtr.commit();
- mtr_has_extra_clust_latch = FALSE;
+ mtr_extra_clust_savepoint = 0;
trx->error_state = err;
thr->lock_state = QUE_THR_LOCK_ROW;
@@ -5846,7 +5831,7 @@ lock_table_wait:
if (!dict_index_is_spatial(index)) {
sel_restore_position_for_mysql(
&same_user_rec, BTR_SEARCH_LEAF, pcur,
- moves_up, &mtr, prebuilt->select_lock_type);
+ moves_up, &mtr);
}
if (trx->isolation_level <= TRX_ISO_READ_COMMITTED
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index cb908c9de35..01a2c5cea62 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -1045,13 +1045,13 @@ dberr_t srv_start(bool create_new_db)
recv_sys.create();
lock_sys.create(srv_lock_table_size);
+ srv_startup_is_before_trx_rollback_phase = true;
+
if (!srv_read_only_mode) {
buf_flush_page_cleaner_init();
ut_ad(buf_page_cleaner_is_active);
}
- srv_startup_is_before_trx_rollback_phase = true;
-
/* Check if undo tablespaces and redo log files exist before creating
a new system tablespace */
if (create_new_db) {
diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c
index 981f32370b3..08579bd233f 100644
--- a/storage/maria/ma_open.c
+++ b/storage/maria/ma_open.c
@@ -291,9 +291,9 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags,
#ifndef WITH_S3_STORAGE_ENGINE
DBUG_ASSERT(!s3);
-#endif /* WITH_S3_STORAGE_ENGINE */
-
+#else
if (!s3)
+#endif /* WITH_S3_STORAGE_ENGINE */
{
realpath_err= my_realpath(name_buff, fn_format(org_name, name, "",
MARIA_NAME_IEXT,
diff --git a/storage/perfschema/unittest/stub_pfs_global.h b/storage/perfschema/unittest/stub_pfs_global.h
index 4371523b014..6d10e29161d 100644
--- a/storage/perfschema/unittest/stub_pfs_global.h
+++ b/storage/perfschema/unittest/stub_pfs_global.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2008, 2021, Oracle and/or its affiliates.
+ Copyright (c) 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
@@ -24,6 +25,9 @@
#include <my_sys.h>
#include <pfs_global.h>
#include <string.h>
+#ifdef HAVE_MEMALIGN
+# include <malloc.h>
+#endif
bool pfs_initialized= false;
size_t pfs_allocated_memory_size= 0;
@@ -45,7 +49,17 @@ void *pfs_malloc(PFS_builtin_memory_class *klass, size_t size, myf)
if (--stub_alloc_fails_after_count <= 0)
return NULL;
+#ifndef PFS_ALIGNEMENT
void *ptr= malloc(size);
+#elif defined HAVE_MEMALIGN
+ void *ptr= memalign(PFS_ALIGNEMENT, size);
+#elif defined HAVE_ALIGNED_MALLOC
+ void *ptr= _aligned_malloc(size, PFS_ALIGNEMENT);
+#else
+ void *ptr;
+ if (posix_memalign(&ptr, PFS_ALIGNEMENT, size))
+ ptr= NULL;
+#endif
if (ptr != NULL)
memset(ptr, 0, size);
return ptr;
diff --git a/strings/decimal.c b/strings/decimal.c
index b5ac9f4f069..7d4e183ef63 100644
--- a/strings/decimal.c
+++ b/strings/decimal.c
@@ -1136,13 +1136,21 @@ int decimal2ulonglong(const decimal_t *from, ulonglong *to)
for (intg=from->intg; intg > 0; intg-=DIG_PER_DEC1)
{
- ulonglong y=x;
- x=x*DIG_BASE + *buf++;
- if (unlikely(y > ((ulonglong) ULONGLONG_MAX/DIG_BASE) || x < y))
+ /*
+ Check that the decimal is bigger than any possible integer.
+ Do it before we do the x*=DIB_BASE to avoid integer
+ overflow.
+ */
+ if (unlikely (
+ x >= ULONGLONG_MAX/DIG_BASE &&
+ (x > ULONGLONG_MAX/DIG_BASE ||
+ *buf > (dec1) (ULONGLONG_MAX%DIG_BASE))))
{
*to=ULONGLONG_MAX;
return E_DEC_OVERFLOW;
}
+
+ x=x*DIG_BASE + *buf++;
}
*to=x;
for (frac=from->frac; unlikely(frac > 0); frac-=DIG_PER_DEC1)
@@ -1159,23 +1167,29 @@ int decimal2longlong(const decimal_t *from, longlong *to)
for (intg=from->intg; intg > 0; intg-=DIG_PER_DEC1)
{
- longlong y=x;
/*
+ Check that the decimal is less than any possible integer.
+ Do it before we do the x*=DIB_BASE to avoid integer
+ overflow.
Attention: trick!
we're calculating -|from| instead of |from| here
because |LONGLONG_MIN| > LONGLONG_MAX
- so we can convert -9223372036854775808 correctly
+ so we can convert -9223372036854775808 correctly.
*/
- x=x*DIG_BASE - *buf++;
- if (unlikely(y < (LONGLONG_MIN/DIG_BASE) || x > y))
+ if (unlikely (
+ x <= LONGLONG_MIN/DIG_BASE &&
+ (x < LONGLONG_MIN/DIG_BASE ||
+ *buf > (dec1) (-(LONGLONG_MIN%DIG_BASE)))))
{
/*
- the decimal is bigger than any possible integer
- return border integer depending on the sign
+ the decimal is bigger than any possible integer
+ return border integer depending on the sign
*/
*to= from->sign ? LONGLONG_MIN : LONGLONG_MAX;
return E_DEC_OVERFLOW;
}
+
+ x=x*DIG_BASE - *buf++;
}
/* boundary case: 9223372036854775808 */
if (unlikely(from->sign==0 && x == LONGLONG_MIN))
diff --git a/tpool/aio_liburing.cc b/tpool/aio_liburing.cc
index b8666482193..8192a5b7fed 100644
--- a/tpool/aio_liburing.cc
+++ b/tpool/aio_liburing.cc
@@ -161,8 +161,7 @@ private:
}
io_uring_cqe_seen(&aio->uring_, cqe);
- if (iocb->m_ret_len != iocb->m_len && !iocb->m_err)
- finish_synchronous(iocb);
+ finish_synchronous(iocb);
// If we need to resubmit the IO operation, but the ring is full,
// we will follow the same path as for any other error codes.
diff --git a/tpool/aio_linux.cc b/tpool/aio_linux.cc
index fc6e5b53e1a..5d01c588a88 100644
--- a/tpool/aio_linux.cc
+++ b/tpool/aio_linux.cc
@@ -128,8 +128,7 @@ class aio_linux final : public aio
{
iocb->m_ret_len= event.res;
iocb->m_err= 0;
- if (iocb->m_ret_len != iocb->m_len)
- finish_synchronous(iocb);
+ finish_synchronous(iocb);
}
iocb->m_internal_task.m_func= iocb->m_callback;
iocb->m_internal_task.m_arg= iocb;
diff --git a/tpool/tpool.h b/tpool/tpool.h
index 2c61c2d62b2..87a0122adce 100644
--- a/tpool/tpool.h
+++ b/tpool/tpool.h
@@ -173,7 +173,17 @@ public:
protected:
static void synchronous(aiocb *cb);
/** finish a partial read/write callback synchronously */
- static void finish_synchronous(aiocb *cb);
+ static inline void finish_synchronous(aiocb *cb)
+ {
+ if (!cb->m_err && cb->m_ret_len != cb->m_len)
+ {
+ /* partial read/write */
+ cb->m_buffer= (char *) cb->m_buffer + cb->m_ret_len;
+ cb->m_len-= (unsigned int) cb->m_ret_len;
+ cb->m_offset+= cb->m_ret_len;
+ synchronous(cb);
+ }
+ }
};
class timer
diff --git a/tpool/tpool_generic.cc b/tpool/tpool_generic.cc
index a1b9a3ce945..5720c5b48aa 100644
--- a/tpool/tpool_generic.cc
+++ b/tpool/tpool_generic.cc
@@ -85,25 +85,12 @@ void aio::synchronous(aiocb *cb)
#endif
cb->m_ret_len = ret_len;
cb->m_err = err;
- if (!err && cb->m_ret_len != cb->m_len)
+ if (ret_len)
finish_synchronous(cb);
}
/**
- A partial read/write has occured, continue synchronously.
-*/
-void aio::finish_synchronous(aiocb *cb)
-{
- assert(cb->m_ret_len != (unsigned int) cb->m_len && !cb->m_err);
- /* partial read/write */
- cb->m_buffer= (char *) cb->m_buffer + cb->m_ret_len;
- cb->m_len-= (unsigned int) cb->m_ret_len;
- cb->m_offset+= cb->m_ret_len;
- synchronous(cb);
-}
-
-/**
Implementation of generic threadpool.
This threadpool consists of the following components