summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVladislav Vaintroub <wlad@mariadb.com>2017-04-18 18:43:20 +0000
committerSergei Golubchik <serg@mariadb.org>2017-04-27 19:12:39 +0200
commitd7714308e014a7335e1c0f60dc0afbf5eba5ff99 (patch)
treefcbbd1a871d35460389114be874b935e74b670df
parent9c4b7cad279fde927c192a8928aa43cb751b2116 (diff)
downloadmariadb-git-d7714308e014a7335e1c0f60dc0afbf5eba5ff99.tar.gz
MDEV-9566 Add Percona Xtrabackup 2.3.7
-rw-r--r--extra/mariabackup/CMakeLists.txt173
-rw-r--r--extra/mariabackup/backup_copy.cc2047
-rw-r--r--extra/mariabackup/backup_copy.h51
-rw-r--r--extra/mariabackup/backup_mysql.cc1756
-rw-r--r--extra/mariabackup/backup_mysql.h92
-rw-r--r--extra/mariabackup/changed_page_bitmap.cc1018
-rw-r--r--extra/mariabackup/changed_page_bitmap.h85
-rw-r--r--extra/mariabackup/common.h134
-rw-r--r--extra/mariabackup/compact.cc1059
-rw-r--r--extra/mariabackup/compact.h44
-rw-r--r--extra/mariabackup/datasink.c130
-rw-r--r--extra/mariabackup/datasink.h98
-rw-r--r--extra/mariabackup/ds_archive.c275
-rw-r--r--extra/mariabackup/ds_archive.h28
-rw-r--r--extra/mariabackup/ds_buffer.c189
-rw-r--r--extra/mariabackup/ds_buffer.h39
-rw-r--r--extra/mariabackup/ds_compress.c462
-rw-r--r--extra/mariabackup/ds_compress.h28
-rw-r--r--extra/mariabackup/ds_encrypt.c617
-rw-r--r--extra/mariabackup/ds_encrypt.h28
-rw-r--r--extra/mariabackup/ds_local.c151
-rw-r--r--extra/mariabackup/ds_local.h28
-rw-r--r--extra/mariabackup/ds_stdout.c121
-rw-r--r--extra/mariabackup/ds_stdout.h28
-rw-r--r--extra/mariabackup/ds_tmpfile.c248
-rw-r--r--extra/mariabackup/ds_tmpfile.h30
-rw-r--r--extra/mariabackup/ds_xbstream.c223
-rw-r--r--extra/mariabackup/ds_xbstream.h28
-rw-r--r--extra/mariabackup/fil_cur.cc402
-rw-r--r--extra/mariabackup/fil_cur.h123
-rw-r--r--extra/mariabackup/innobackupex.cc1153
-rw-r--r--extra/mariabackup/innobackupex.h45
-rw-r--r--extra/mariabackup/quicklz/quicklz.c848
-rw-r--r--extra/mariabackup/quicklz/quicklz.h144
-rw-r--r--extra/mariabackup/read_filt.cc206
-rw-r--r--extra/mariabackup/read_filt.h62
-rw-r--r--extra/mariabackup/version_check.pl1373
-rw-r--r--extra/mariabackup/write_filt.cc219
-rw-r--r--extra/mariabackup/write_filt.h61
-rw-r--r--extra/mariabackup/wsrep.cc219
-rw-r--r--extra/mariabackup/wsrep.h32
-rw-r--r--extra/mariabackup/xb_regex.h71
-rw-r--r--extra/mariabackup/xbcloud.cc2721
-rw-r--r--extra/mariabackup/xbcrypt.c694
-rw-r--r--extra/mariabackup/xbcrypt.h84
-rw-r--r--extra/mariabackup/xbcrypt_common.c60
-rw-r--r--extra/mariabackup/xbcrypt_read.c251
-rw-r--r--extra/mariabackup/xbcrypt_write.c104
-rw-r--r--extra/mariabackup/xbstream.c456
-rw-r--r--extra/mariabackup/xbstream.h103
-rw-r--r--extra/mariabackup/xbstream_read.c227
-rw-r--r--extra/mariabackup/xbstream_write.c280
-rw-r--r--extra/mariabackup/xtrabackup.cc7220
-rw-r--r--extra/mariabackup/xtrabackup.h232
-rw-r--r--extra/mariabackup/xtrabackup_version.h.in27
55 files changed, 26627 insertions, 0 deletions
diff --git a/extra/mariabackup/CMakeLists.txt b/extra/mariabackup/CMakeLists.txt
new file mode 100644
index 00000000000..a84ac304486
--- /dev/null
+++ b/extra/mariabackup/CMakeLists.txt
@@ -0,0 +1,173 @@
+# Copyright (c) 2013 Percona LLC and/or its affiliates.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+INCLUDE(gcrypt)
+INCLUDE(curl)
+INCLUDE(libev)
+
+ADD_SUBDIRECTORY(libarchive)
+ADD_SUBDIRECTORY(jsmn)
+
+FIND_GCRYPT()
+FIND_CURL()
+FIND_EV()
+
+# xxd is needed to embed version_check script
+FIND_PROGRAM(XXD_PATH xxd)
+
+IF(NOT XXD_PATH)
+ MESSAGE(FATAL_ERROR "xxd not found. Try to install vim-common.")
+ENDIF(NOT XXD_PATH)
+
+INCLUDE_DIRECTORIES(
+ ${CMAKE_SOURCE_DIR}/include
+ ${CMAKE_SOURCE_DIR}/storage/innobase/include
+ ${CMAKE_SOURCE_DIR}/sql
+ ${CMAKE_SOURCE_DIR}/storage/innobase/xtrabackup/src/libarchive/libarchive
+ ${CMAKE_SOURCE_DIR}/storage/innobase/xtrabackup/src/quicklz
+ ${CMAKE_SOURCE_DIR}/storage/innobase/xtrabackup/src/jsmn
+ ${GCRYPT_INCLUDE_DIR}
+ ${CURL_INCLUDE_DIRS}
+ ${LIBEV_INCLUDE_DIRS}
+ ${CMAKE_CURRENT_BINARY_DIR}
+ )
+
+ADD_DEFINITIONS(${SSL_DEFINES})
+
+########################################################################
+# xtrabackup binary
+########################################################################
+CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/xtrabackup_version.h.in
+ ${CMAKE_CURRENT_BINARY_DIR}/xtrabackup_version.h )
+
+ADD_CUSTOM_COMMAND(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/version_check_pl.h
+ COMMAND ${XXD_PATH} --include version_check.pl
+ ${CMAKE_CURRENT_BINARY_DIR}/version_check_pl.h
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
+
+ADD_CUSTOM_TARGET(GenVersionCheck
+ DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/version_check_pl.h)
+
+SET_SOURCE_FILES_PROPERTIES(
+ xtrabackup.cc
+ backup_mysql.cc
+ PROPERTIES COMPILE_FLAGS -DMYSQL_CLIENT)
+
+MYSQL_ADD_EXECUTABLE(xtrabackup
+ xtrabackup.cc
+ innobackupex.cc
+ changed_page_bitmap.cc
+ compact.cc
+ datasink.c
+ ds_archive.c
+ ds_buffer.c
+ ds_compress.c
+ ds_encrypt.c
+ ds_local.c
+ ds_stdout.c
+ ds_tmpfile.c
+ ds_xbstream.c
+ fil_cur.cc
+ quicklz/quicklz.c
+ read_filt.cc
+ write_filt.cc
+ wsrep.cc
+ xbcrypt_common.c
+ xbcrypt_write.c
+ xbstream_write.c
+ backup_mysql.cc
+ backup_copy.cc
+ ../../../../sql-common/client_authentication.cc
+ )
+
+SET_TARGET_PROPERTIES(xtrabackup PROPERTIES ENABLE_EXPORTS TRUE)
+
+TARGET_LINK_LIBRARIES(xtrabackup
+ mysqlserver
+ ${GCRYPT_LIBS}
+ archive_static
+ )
+
+ADD_DEPENDENCIES(xtrabackup GenVersionCheck)
+
+########################################################################
+# innobackupex symlink
+########################################################################
+ADD_CUSTOM_COMMAND(TARGET xtrabackup
+ COMMAND ${CMAKE_COMMAND} ARGS -E create_symlink
+ xtrabackup innobackupex)
+INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/innobackupex DESTINATION bin)
+
+########################################################################
+# xbstream binary
+########################################################################
+MYSQL_ADD_EXECUTABLE(xbstream
+ ds_buffer.c
+ ds_local.c
+ ds_stdout.c
+ datasink.c
+ xbstream.c
+ xbstream_read.c
+ xbstream_write.c
+ )
+
+SET_TARGET_PROPERTIES(xbstream
+ PROPERTIES LINKER_LANGUAGE CXX
+ )
+
+TARGET_LINK_LIBRARIES(xbstream
+ mysys
+ mysys_ssl
+ )
+
+########################################################################
+# xbcrypt binary
+########################################################################
+MYSQL_ADD_EXECUTABLE(xbcrypt
+ xbcrypt.c
+ xbcrypt_common.c
+ xbcrypt_read.c
+ xbcrypt_write.c
+ )
+
+SET_TARGET_PROPERTIES(xbcrypt
+ PROPERTIES LINKER_LANGUAGE CXX
+ )
+
+TARGET_LINK_LIBRARIES(xbcrypt
+ ${GCRYPT_LIBS}
+ mysys
+ mysys_ssl
+ )
+
+########################################################################
+# xbcloud binary
+########################################################################
+MYSQL_ADD_EXECUTABLE(xbcloud
+ xbcloud.cc
+ )
+
+SET_TARGET_PROPERTIES(xbcloud
+ PROPERTIES LINKER_LANGUAGE CXX
+ )
+
+TARGET_LINK_LIBRARIES(xbcloud
+ ${GCRYPT_LIBS}
+ ${LIBEV_LIBRARIES}
+ ${CURL_LIBRARIES}
+ mysys
+ mysys_ssl
+ jsmn
+ )
diff --git a/extra/mariabackup/backup_copy.cc b/extra/mariabackup/backup_copy.cc
new file mode 100644
index 00000000000..a051c64ee3b
--- /dev/null
+++ b/extra/mariabackup/backup_copy.cc
@@ -0,0 +1,2047 @@
+/******************************************************
+hot backup tool for InnoDB
+(c) 2009-2015 Percona LLC and/or its affiliates
+Originally Created 3/3/2009 Yasufumi Kinoshita
+Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
+Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************
+
+This file incorporates work covered by the following copyright and
+permission notice:
+
+Copyright (c) 2000, 2011, MySQL AB & Innobase Oy. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+Place, Suite 330, Boston, MA 02111-1307 USA
+
+*******************************************************/
+
+#include <my_global.h>
+#include <os0file.h>
+#include <my_dir.h>
+#include <ut0mem.h>
+#include <srv0start.h>
+#include <fil0fil.h>
+#include <set>
+#include <string>
+#include <mysqld.h>
+#include <version_check_pl.h>
+#include <sstream>
+#include "fil_cur.h"
+#include "xtrabackup.h"
+#include "common.h"
+#include "backup_copy.h"
+#include "backup_mysql.h"
+
+
+/* list of files to sync for --rsync mode */
+static std::set<std::string> rsync_list;
+/* locations of tablespaces read from .isl files */
+static std::map<std::string, std::string> tablespace_locations;
+
+/* Whether LOCK BINLOG FOR BACKUP has been issued during backup */
+bool binlog_locked;
+
+/************************************************************************
+Struct represents file or directory. */
+struct datadir_node_t {
+ ulint dbpath_len;
+ char *filepath;
+ ulint filepath_len;
+ char *filepath_rel;
+ ulint filepath_rel_len;
+ bool is_empty_dir;
+ bool is_file;
+};
+
+/************************************************************************
+Holds the state needed to enumerate files in MySQL data directory. */
+struct datadir_iter_t {
+ char *datadir_path;
+ char *dbpath;
+ ulint dbpath_len;
+ char *filepath;
+ ulint filepath_len;
+ char *filepath_rel;
+ ulint filepath_rel_len;
+ os_ib_mutex_t mutex;
+ os_file_dir_t dir;
+ os_file_dir_t dbdir;
+ os_file_stat_t dbinfo;
+ os_file_stat_t fileinfo;
+ dberr_t err;
+ bool is_empty_dir;
+ bool is_file;
+ bool skip_first_level;
+};
+
+
+/************************************************************************
+Represents the context of the thread processing MySQL data directory. */
+struct datadir_thread_ctxt_t {
+ datadir_iter_t *it;
+ uint n_thread;
+ uint *count;
+ os_ib_mutex_t count_mutex;
+ os_thread_id_t id;
+ bool ret;
+};
+
+
+/************************************************************************
+Retirn true if character if file separator */
+bool
+is_path_separator(char c)
+{
+ return(c == FN_LIBCHAR || c == FN_LIBCHAR2);
+}
+
+
+/************************************************************************
+Fill the node struct. Memory for node need to be allocated and freed by
+the caller. It is caller responsibility to initialize node with
+datadir_node_init and cleanup the memory with datadir_node_free.
+Node can not be shared between threads. */
+static
+void
+datadir_node_fill(datadir_node_t *node, datadir_iter_t *it)
+{
+ if (node->filepath_len < it->filepath_len) {
+ free(node->filepath);
+ node->filepath = (char*)(ut_malloc(it->filepath_len));
+ node->filepath_len = it->filepath_len;
+ }
+ if (node->filepath_rel_len < it->filepath_rel_len) {
+ free(node->filepath_rel);
+ node->filepath_rel = (char*)(ut_malloc(it->filepath_rel_len));
+ node->filepath_rel_len = it->filepath_rel_len;
+ }
+
+ strcpy(node->filepath, it->filepath);
+ strcpy(node->filepath_rel, it->filepath_rel);
+ node->is_empty_dir = it->is_empty_dir;
+ node->is_file = it->is_file;
+}
+
+static
+void
+datadir_node_free(datadir_node_t *node)
+{
+ ut_free(node->filepath);
+ ut_free(node->filepath_rel);
+ memset(node, 0, sizeof(datadir_node_t));
+}
+
+static
+void
+datadir_node_init(datadir_node_t *node)
+{
+ memset(node, 0, sizeof(datadir_node_t));
+}
+
+
+/************************************************************************
+Create the MySQL data directory iterator. Memory needs to be released
+with datadir_iter_free. Position should be advanced with
+datadir_iter_next_file. Iterator can be shared between multiple
+threads. It is guaranteed that each thread receives unique file from
+data directory into its local node struct. */
+static
+datadir_iter_t *
+datadir_iter_new(const char *path, bool skip_first_level = true)
+{
+ datadir_iter_t *it;
+
+ it = static_cast<datadir_iter_t *>(ut_malloc(sizeof(datadir_iter_t)));
+ memset(it, 0, sizeof(datadir_iter_t));
+
+ it->mutex = os_mutex_create();
+ it->datadir_path = strdup(path);
+
+ it->dir = os_file_opendir(it->datadir_path, TRUE);
+
+ if (it->dir == NULL) {
+
+ goto error;
+ }
+
+ it->err = DB_SUCCESS;
+
+ it->dbpath_len = FN_REFLEN;
+ it->dbpath = static_cast<char*>(ut_malloc(it->dbpath_len));
+
+ it->filepath_len = FN_REFLEN;
+ it->filepath = static_cast<char*>(ut_malloc(it->filepath_len));
+
+ it->filepath_rel_len = FN_REFLEN;
+ it->filepath_rel = static_cast<char*>(ut_malloc(it->filepath_rel_len));
+
+ it->skip_first_level = skip_first_level;
+
+ return(it);
+
+error:
+ ut_free(it);
+
+ return(NULL);
+}
+
+static
+bool
+datadir_iter_next_database(datadir_iter_t *it)
+{
+ if (it->dbdir != NULL) {
+ if (os_file_closedir(it->dbdir) != 0) {
+
+ msg("Warning: could not"
+ " close database directory %s\n", it->dbpath);
+
+ it->err = DB_ERROR;
+
+ }
+ it->dbdir = NULL;
+ }
+
+ while (fil_file_readdir_next_file(&it->err, it->datadir_path,
+ it->dir, &it->dbinfo) == 0) {
+ ulint len;
+
+ if ((it->dbinfo.type == OS_FILE_TYPE_FILE
+ && it->skip_first_level)
+ || it->dbinfo.type == OS_FILE_TYPE_UNKNOWN) {
+
+ continue;
+ }
+
+ /* We found a symlink or a directory; try opening it to see
+ if a symlink is a directory */
+
+ len = strlen(it->datadir_path)
+ + strlen (it->dbinfo.name) + 2;
+ if (len > it->dbpath_len) {
+ it->dbpath_len = len;
+
+ if (it->dbpath) {
+
+ ut_free(it->dbpath);
+ }
+
+ it->dbpath = static_cast<char*>
+ (ut_malloc(it->dbpath_len));
+ }
+ ut_snprintf(it->dbpath, it->dbpath_len,
+ "%s/%s", it->datadir_path,
+ it->dbinfo.name);
+ srv_normalize_path_for_win(it->dbpath);
+
+ if (it->dbinfo.type == OS_FILE_TYPE_FILE) {
+ it->is_file = true;
+ return(true);
+ }
+
+ /* We want wrong directory permissions to be a fatal error for
+ XtraBackup. */
+ it->dbdir = os_file_opendir(it->dbpath, TRUE);
+
+ if (it->dbdir != NULL) {
+
+ it->is_file = false;
+ return(true);
+ }
+
+ }
+
+ return(false);
+}
+
+/************************************************************************
+Concatenate n parts into single path */
+static
+void
+make_path_n(int n, char **path, ulint *path_len, ...)
+{
+ ulint len_needed = n + 1;
+ char *p;
+ int i;
+ va_list vl;
+
+ ut_ad(n > 0);
+
+ va_start(vl, path_len);
+ for (i = 0; i < n; i++) {
+ p = va_arg(vl, char*);
+ len_needed += strlen(p);
+ }
+ va_end(vl);
+
+ if (len_needed < *path_len) {
+ ut_free(*path);
+ *path = static_cast<char*>(ut_malloc(len_needed));
+ }
+
+ va_start(vl, path_len);
+ p = va_arg(vl, char*);
+ strcpy(*path, p);
+ for (i = 1; i < n; i++) {
+ size_t plen;
+ p = va_arg(vl, char*);
+ plen = strlen(*path);
+ if (!is_path_separator((*path)[plen - 1])) {
+ (*path)[plen] = FN_LIBCHAR;
+ (*path)[plen + 1] = 0;
+ }
+ strcat(*path + plen, p);
+ }
+ va_end(vl);
+}
+
+static
+bool
+datadir_iter_next_file(datadir_iter_t *it)
+{
+ if (it->is_file && it->dbpath) {
+ make_path_n(2, &it->filepath, &it->filepath_len,
+ it->datadir_path, it->dbinfo.name);
+
+ make_path_n(1, &it->filepath_rel, &it->filepath_rel_len,
+ it->dbinfo.name);
+
+ it->is_empty_dir = false;
+ it->is_file = false;
+
+ return(true);
+ }
+
+ if (!it->dbpath || !it->dbdir) {
+
+ return(false);
+ }
+
+ while (fil_file_readdir_next_file(&it->err, it->dbpath, it->dbdir,
+ &it->fileinfo) == 0) {
+
+ if (it->fileinfo.type == OS_FILE_TYPE_DIR) {
+
+ continue;
+ }
+
+ /* We found a symlink or a file */
+ make_path_n(3, &it->filepath, &it->filepath_len,
+ it->datadir_path, it->dbinfo.name,
+ it->fileinfo.name);
+
+ make_path_n(2, &it->filepath_rel, &it->filepath_rel_len,
+ it->dbinfo.name, it->fileinfo.name);
+
+ it->is_empty_dir = false;
+
+ return(true);
+ }
+
+ return(false);
+}
+
+static
+bool
+datadir_iter_next(datadir_iter_t *it, datadir_node_t *node)
+{
+ bool ret = true;
+
+ os_mutex_enter(it->mutex);
+
+ if (datadir_iter_next_file(it)) {
+
+ datadir_node_fill(node, it);
+
+ goto done;
+ }
+
+ while (datadir_iter_next_database(it)) {
+
+ if (datadir_iter_next_file(it)) {
+
+ datadir_node_fill(node, it);
+
+ goto done;
+ }
+
+ make_path_n(2, &it->filepath, &it->filepath_len,
+ it->datadir_path, it->dbinfo.name);
+
+ make_path_n(1, &it->filepath_rel, &it->filepath_rel_len,
+ it->dbinfo.name);
+
+ it->is_empty_dir = true;
+
+ datadir_node_fill(node, it);
+
+ goto done;
+ }
+
+ /* nothing found */
+ ret = false;
+
+done:
+ os_mutex_exit(it->mutex);
+
+ return(ret);
+}
+
+/************************************************************************
+Interface to read MySQL data file sequentially. One should open file
+with datafile_open to get cursor and close the cursor with
+datafile_close. Cursor can not be shared between multiple
+threads. */
+static
+void
+datadir_iter_free(datadir_iter_t *it)
+{
+ os_mutex_free(it->mutex);
+
+ if (it->dbdir) {
+
+ os_file_closedir(it->dbdir);
+ }
+
+ if (it->dir) {
+
+ os_file_closedir(it->dir);
+ }
+
+ ut_free(it->dbpath);
+ ut_free(it->filepath);
+ ut_free(it->filepath_rel);
+ free(it->datadir_path);
+ ut_free(it);
+}
+
+
+/************************************************************************
+Holds the state needed to copy single data file. */
+struct datafile_cur_t {
+ os_file_t file;
+ char rel_path[FN_REFLEN];
+ char abs_path[FN_REFLEN];
+ MY_STAT statinfo;
+ uint thread_n;
+ byte* orig_buf;
+ byte* buf;
+ ib_int64_t buf_size;
+ ib_int64_t buf_read;
+ ib_int64_t buf_offset;
+};
+
+static
+void
+datafile_close(datafile_cur_t *cursor)
+{
+ if (cursor->file != 0) {
+ os_file_close(cursor->file);
+ }
+ ut_free(cursor->buf);
+}
+
+static
+bool
+datafile_open(const char *file, datafile_cur_t *cursor, uint thread_n)
+{
+ ulint success;
+
+ memset(cursor, 0, sizeof(datafile_cur_t));
+
+ strncpy(cursor->abs_path, file, sizeof(cursor->abs_path));
+
+ /* Get the relative path for the destination tablespace name, i.e. the
+ one that can be appended to the backup root directory. Non-system
+ tablespaces may have absolute paths for remote tablespaces in MySQL
+ 5.6+. We want to make "local" copies for the backup. */
+ strncpy(cursor->rel_path,
+ xb_get_relative_path(cursor->abs_path, FALSE),
+ sizeof(cursor->rel_path));
+
+ cursor->file = os_file_create_simple_no_error_handling(0,
+ cursor->abs_path,
+ OS_FILE_OPEN,
+ OS_FILE_READ_ONLY,
+ &success);
+ if (!success) {
+ /* The following call prints an error message */
+ os_file_get_last_error(TRUE);
+
+ msg("[%02u] error: cannot open "
+ "file %s\n",
+ thread_n, cursor->abs_path);
+
+ return(false);
+ }
+
+ if (my_fstat(cursor->file, &cursor->statinfo, MYF(MY_WME))) {
+ msg("[%02u] error: cannot stat %s\n",
+ thread_n, cursor->abs_path);
+
+ datafile_close(cursor);
+
+ return(false);
+ }
+
+ posix_fadvise(cursor->file, 0, 0, POSIX_FADV_SEQUENTIAL);
+
+ cursor->buf_size = 10 * 1024 * 1024;
+ cursor->buf = static_cast<byte *>(ut_malloc(cursor->buf_size));
+
+ return(true);
+}
+
+
+static
+xb_fil_cur_result_t
+datafile_read(datafile_cur_t *cursor)
+{
+ ulint success;
+ ulint to_read;
+
+ xtrabackup_io_throttling();
+
+ to_read = min(cursor->statinfo.st_size - cursor->buf_offset,
+ cursor->buf_size);
+
+ if (to_read == 0) {
+ return(XB_FIL_CUR_EOF);
+ }
+
+ success = os_file_read(cursor->file, cursor->buf, cursor->buf_offset,
+ to_read);
+ if (!success) {
+ return(XB_FIL_CUR_ERROR);
+ }
+
+ posix_fadvise(cursor->file, cursor->buf_offset, to_read,
+ POSIX_FADV_DONTNEED);
+
+ cursor->buf_read = to_read;
+ cursor->buf_offset += to_read;
+
+ return(XB_FIL_CUR_SUCCESS);
+}
+
+
+
+/************************************************************************
+Check to see if a file exists.
+Takes name of the file to check.
+@return true if file exists. */
+static
+bool
+file_exists(const char *filename)
+{
+ MY_STAT stat_arg;
+
+ if (!my_stat(filename, &stat_arg, MYF(0))) {
+
+ return(false);
+ }
+
+ return(true);
+}
+
+/************************************************************************
+Trim leading slashes from absolute path so it becomes relative */
+static
+const char *
+trim_dotslash(const char *path)
+{
+ while (*path) {
+ if (is_path_separator(*path)) {
+ ++path;
+ continue;
+ }
+ if (*path == '.' && is_path_separator(path[1])) {
+ path += 2;
+ continue;
+ }
+ break;
+ }
+
+ return(path);
+}
+
+
+
+/************************************************************************
+Check if string ends with given suffix.
+@return true if string ends with given suffix. */
+static
+bool
+ends_with(const char *str, const char *suffix)
+{
+ size_t suffix_len = strlen(suffix);
+ size_t str_len = strlen(str);
+ return(str_len >= suffix_len
+ && strcmp(str + str_len - suffix_len, suffix) == 0);
+}
+
+/************************************************************************
+Create directories recursively.
+@return 0 if directories created successfully. */
+static
+int
+mkdirp(const char *pathname, int Flags, myf MyFlags)
+{
+ char parent[PATH_MAX], *p;
+
+ /* make a parent directory path */
+ strncpy(parent, pathname, sizeof(parent));
+ parent[sizeof(parent) - 1] = 0;
+
+ for (p = parent + strlen(parent);
+ !is_path_separator(*p) && p != parent; p--);
+
+ *p = 0;
+
+ /* try to make parent directory */
+ if (p != parent && mkdirp(parent, Flags, MyFlags) != 0) {
+ return(-1);
+ }
+
+ /* make this one if parent has been made */
+ if (my_mkdir(pathname, Flags, MyFlags) == 0) {
+ return(0);
+ }
+
+ /* if it already exists that is fine */
+ if (errno == EEXIST) {
+ return(0);
+ }
+
+ return(-1);
+}
+
+/************************************************************************
+Return true if first and second arguments are the same path. */
+bool
+equal_paths(const char *first, const char *second)
+{
+ char real_first[PATH_MAX];
+ char real_second[PATH_MAX];
+
+ if (realpath(first, real_first) == NULL) {
+ return false;
+ }
+ if (realpath(second, real_second) == NULL) {
+ return false;
+ }
+
+ return (strcmp(real_first, real_second) == 0);
+}
+
+/************************************************************************
+Check if directory exists. Optionally create directory if doesn't
+exist.
+@return true if directory exists and if it was created successfully. */
+bool
+directory_exists(const char *dir, bool create)
+{
+ os_file_dir_t os_dir;
+ MY_STAT stat_arg;
+ char errbuf[MYSYS_STRERROR_SIZE];
+
+ if (my_stat(dir, &stat_arg, MYF(0)) == NULL) {
+
+ if (!create) {
+ return(false);
+ }
+
+ if (mkdirp(dir, 0777, MYF(0)) < 0) {
+
+ msg("Can not create directory %s: %s\n", dir,
+ my_strerror(errbuf, sizeof(errbuf), my_errno));
+
+ return(false);
+
+ }
+ }
+
+ /* could be symlink */
+ os_dir = os_file_opendir(dir, FALSE);
+
+ if (os_dir == NULL) {
+
+ msg("Can not open directory %s: %s\n", dir,
+ my_strerror(errbuf, sizeof(errbuf), my_errno));
+
+ return(false);
+ }
+
+ os_file_closedir(os_dir);
+
+ return(true);
+}
+
+/************************************************************************
+Check that directory exists and it is empty. */
+static
+bool
+directory_exists_and_empty(const char *dir, const char *comment)
+{
+ os_file_dir_t os_dir;
+ dberr_t err;
+ os_file_stat_t info;
+ bool empty;
+
+ if (!directory_exists(dir, true)) {
+ return(false);
+ }
+
+ os_dir = os_file_opendir(dir, FALSE);
+
+ if (os_dir == NULL) {
+ msg("%s can not open directory %s\n", comment, dir);
+ return(false);
+ }
+
+ empty = (fil_file_readdir_next_file(&err, dir, os_dir, &info) != 0);
+
+ os_file_closedir(os_dir);
+
+ if (!empty) {
+ msg("%s directory %s is not empty!\n", comment, dir);
+ }
+
+ return(empty);
+}
+
+
+/************************************************************************
+Check if file name ends with given set of suffixes.
+@return true if it does. */
+static
+bool
+filename_matches(const char *filename, const char **ext_list)
+{
+ const char **ext;
+
+ for (ext = ext_list; *ext; ext++) {
+ if (ends_with(filename, *ext)) {
+ return(true);
+ }
+ }
+
+ return(false);
+}
+
+
+/************************************************************************
+Copy data file for backup. Also check if it is allowed to copy by
+comparing its name to the list of known data file types and checking
+if passes the rules for partial backup.
+@return true if file backed up or skipped successfully. */
+static
+bool
+datafile_copy_backup(const char *filepath, uint thread_n)
+{
+ const char *ext_list[] = {"frm", "isl", "MYD", "MYI", "MAD", "MAI",
+ "MRG", "TRG", "TRN", "ARM", "ARZ", "CSM", "CSV", "opt", "par",
+ NULL};
+
+ /* Get the name and the path for the tablespace. node->name always
+ contains the path (which may be absolute for remote tablespaces in
+ 5.6+). space->name contains the tablespace name in the form
+ "./database/table.ibd" (in 5.5-) or "database/table" (in 5.6+). For a
+ multi-node shared tablespace, space->name contains the name of the first
+ node, but that's irrelevant, since we only need node_name to match them
+ against filters, and the shared tablespace is always copied regardless
+ of the filters value. */
+
+ if (check_if_skip_table(filepath)) {
+ msg_ts("[%02u] Skipping %s.\n", thread_n, filepath);
+ return(true);
+ }
+
+ if (filename_matches(filepath, ext_list)) {
+ return copy_file(ds_data, filepath, filepath, thread_n);
+ }
+
+ return(true);
+}
+
+
+/************************************************************************
+Same as datafile_copy_backup, but put file name into the list for
+rsync command. */
+static
+bool
+datafile_rsync_backup(const char *filepath, bool save_to_list, FILE *f)
+{
+ const char *ext_list[] = {"frm", "isl", "MYD", "MYI", "MAD", "MAI",
+ "MRG", "TRG", "TRN", "ARM", "ARZ", "CSM", "CSV", "opt", "par",
+ NULL};
+
+ /* Get the name and the path for the tablespace. node->name always
+ contains the path (which may be absolute for remote tablespaces in
+ 5.6+). space->name contains the tablespace name in the form
+ "./database/table.ibd" (in 5.5-) or "database/table" (in 5.6+). For a
+ multi-node shared tablespace, space->name contains the name of the first
+ node, but that's irrelevant, since we only need node_name to match them
+ against filters, and the shared tablespace is always copied regardless
+ of the filters value. */
+
+ if (check_if_skip_table(filepath)) {
+ return(true);
+ }
+
+ if (filename_matches(filepath, ext_list)) {
+ fprintf(f, "%s\n", filepath);
+ if (save_to_list) {
+ rsync_list.insert(filepath);
+ }
+ }
+
+ return(true);
+}
+
+
+static
+bool
+backup_file_vprintf(const char *filename, const char *fmt, va_list ap)
+{
+ ds_file_t *dstfile = NULL;
+ MY_STAT stat; /* unused for now */
+ char *buf = 0;
+ int buf_len;
+ const char *action;
+
+ memset(&stat, 0, sizeof(stat));
+
+ buf_len = vasprintf(&buf, fmt, ap);
+
+ stat.st_size = buf_len;
+ stat.st_mtime = my_time(0);
+
+ dstfile = ds_open(ds_data, filename, &stat);
+ if (dstfile == NULL) {
+ msg("[%02u] error: "
+ "cannot open the destination stream for %s\n",
+ 0, filename);
+ goto error;
+ }
+
+ action = xb_get_copy_action("Writing");
+ msg_ts("[%02u] %s %s\n", 0, action, filename);
+
+ if (buf_len == -1) {
+ goto error;
+ }
+
+ if (ds_write(dstfile, buf, buf_len)) {
+ goto error;
+ }
+
+ /* close */
+ msg_ts("[%02u] ...done\n", 0);
+ free(buf);
+
+ if (ds_close(dstfile)) {
+ goto error_close;
+ }
+
+ return(true);
+
+error:
+ free(buf);
+ if (dstfile != NULL) {
+ ds_close(dstfile);
+ }
+
+error_close:
+ msg("[%02u] Error: backup file failed.\n", 0);
+ return(false); /*ERROR*/
+}
+
+
+bool
+backup_file_printf(const char *filename, const char *fmt, ...)
+{
+ bool result;
+ va_list ap;
+
+ va_start(ap, fmt);
+
+ result = backup_file_vprintf(filename, fmt, ap);
+
+ va_end(ap);
+
+ return(result);
+}
+
+static
+bool
+run_data_threads(datadir_iter_t *it, os_thread_func_t func, uint n)
+{
+ datadir_thread_ctxt_t *data_threads;
+ uint i, count;
+ os_ib_mutex_t count_mutex;
+ bool ret;
+
+ data_threads = (datadir_thread_ctxt_t*)
+ (ut_malloc(sizeof(datadir_thread_ctxt_t) * n));
+
+ count_mutex = os_mutex_create();
+ count = n;
+
+ for (i = 0; i < n; i++) {
+ data_threads[i].it = it;
+ data_threads[i].n_thread = i + 1;
+ data_threads[i].count = &count;
+ data_threads[i].count_mutex = count_mutex;
+ os_thread_create(func, data_threads + i, &data_threads[i].id);
+ }
+
+ /* Wait for threads to exit */
+ while (1) {
+ os_thread_sleep(100000);
+ os_mutex_enter(count_mutex);
+ if (count == 0) {
+ os_mutex_exit(count_mutex);
+ break;
+ }
+ os_mutex_exit(count_mutex);
+ }
+
+ os_mutex_free(count_mutex);
+
+ ret = true;
+ for (i = 0; i < n; i++) {
+ ret = data_threads[i].ret && ret;
+ if (!data_threads[i].ret) {
+ msg("Error: thread %u failed.\n", i);
+ }
+ }
+
+ ut_free(data_threads);
+
+ return(ret);
+}
+
+
+/************************************************************************
+Copy file for backup/restore.
+@return true in case of success. */
+bool
+copy_file(ds_ctxt_t *datasink,
+ const char *src_file_path,
+ const char *dst_file_path,
+ uint thread_n)
+{
+ char dst_name[FN_REFLEN];
+ ds_file_t *dstfile = NULL;
+ datafile_cur_t cursor;
+ xb_fil_cur_result_t res;
+ const char *action;
+
+ if (!datafile_open(src_file_path, &cursor, thread_n)) {
+ goto error;
+ }
+
+ strncpy(dst_name, cursor.rel_path, sizeof(dst_name));
+
+ dstfile = ds_open(datasink, trim_dotslash(dst_file_path),
+ &cursor.statinfo);
+ if (dstfile == NULL) {
+ msg("[%02u] error: "
+ "cannot open the destination stream for %s\n",
+ thread_n, dst_name);
+ goto error;
+ }
+
+ action = xb_get_copy_action();
+ msg_ts("[%02u] %s %s to %s\n",
+ thread_n, action, src_file_path, dstfile->path);
+
+ /* The main copy loop */
+ while ((res = datafile_read(&cursor)) == XB_FIL_CUR_SUCCESS) {
+
+ if (ds_write(dstfile, cursor.buf, cursor.buf_read)) {
+ goto error;
+ }
+ }
+
+ if (res == XB_FIL_CUR_ERROR) {
+ goto error;
+ }
+
+ /* close */
+ msg_ts("[%02u] ...done\n", thread_n);
+ datafile_close(&cursor);
+ if (ds_close(dstfile)) {
+ goto error_close;
+ }
+ return(true);
+
+error:
+ datafile_close(&cursor);
+ if (dstfile != NULL) {
+ ds_close(dstfile);
+ }
+
+error_close:
+ msg("[%02u] Error: copy_file() failed.\n", thread_n);
+ return(false); /*ERROR*/
+}
+
+
+/************************************************************************
+Try to move file by renaming it. If source and destination are on
+different devices fall back to copy and unlink.
+@return true in case of success. */
+static
+bool
+move_file(ds_ctxt_t *datasink,
+ const char *src_file_path,
+ const char *dst_file_path,
+ const char *dst_dir, uint thread_n)
+{
+ char errbuf[MYSYS_STRERROR_SIZE];
+ char dst_file_path_abs[FN_REFLEN];
+ char dst_dir_abs[FN_REFLEN];
+ size_t dirname_length;
+
+ ut_snprintf(dst_file_path_abs, sizeof(dst_file_path_abs),
+ "%s/%s", dst_dir, dst_file_path);
+
+ dirname_part(dst_dir_abs, dst_file_path_abs, &dirname_length);
+
+ if (!directory_exists(dst_dir_abs, true)) {
+ return(false);
+ }
+
+ if (file_exists(dst_file_path_abs)) {
+ msg("Error: Move file %s to %s failed: Destination "
+ "file exists\n",
+ src_file_path, dst_file_path_abs);
+ return(false);
+ }
+
+ msg_ts("[%02u] Moving %s to %s\n",
+ thread_n, src_file_path, dst_file_path_abs);
+
+ if (my_rename(src_file_path, dst_file_path_abs, MYF(0)) != 0) {
+ if (my_errno == EXDEV) {
+ bool ret;
+ ret = copy_file(datasink, src_file_path,
+ dst_file_path, thread_n);
+ msg_ts("[%02u] Removing %s\n", thread_n, src_file_path);
+ if (unlink(src_file_path) != 0) {
+ msg("Error: unlink %s failed: %s\n",
+ src_file_path,
+ my_strerror(errbuf,
+ sizeof(errbuf), errno));
+ }
+ return(ret);
+ }
+ msg("Can not move file %s to %s: %s\n",
+ src_file_path, dst_file_path_abs,
+ my_strerror(errbuf, sizeof(errbuf), my_errno));
+ return(false);
+ }
+
+ msg_ts("[%02u] ...done\n", thread_n);
+
+ return(true);
+}
+
+
+/************************************************************************
+Read link from .isl file if any and store it in the global map associated
+with given tablespace. */
+static
+void
+read_link_file(const char *ibd_filepath, const char *link_filepath)
+{
+ char *filepath= NULL;
+
+ FILE *file = fopen(link_filepath, "r+b");
+ if (file) {
+ filepath = static_cast<char*>(malloc(OS_FILE_MAX_PATH));
+
+ os_file_read_string(file, filepath, OS_FILE_MAX_PATH);
+ fclose(file);
+
+ if (strlen(filepath)) {
+ /* Trim whitespace from end of filepath */
+ ulint lastch = strlen(filepath) - 1;
+ while (lastch > 4 && filepath[lastch] <= 0x20) {
+ filepath[lastch--] = 0x00;
+ }
+ srv_normalize_path_for_win(filepath);
+ }
+
+ tablespace_locations[ibd_filepath] = filepath;
+ }
+ free(filepath);
+}
+
+
+/************************************************************************
+Return the location of given .ibd if it was previously read
+from .isl file.
+@return NULL or destination .ibd file path. */
+static
+const char *
+tablespace_filepath(const char *ibd_filepath)
+{
+ std::map<std::string, std::string>::iterator it;
+
+ it = tablespace_locations.find(ibd_filepath);
+
+ if (it != tablespace_locations.end()) {
+ return it->second.c_str();
+ }
+
+ return NULL;
+}
+
+
+/************************************************************************
+Copy or move file depending on current mode.
+@return true in case of success. */
+static
+bool
+copy_or_move_file(const char *src_file_path,
+ const char *dst_file_path,
+ const char *dst_dir,
+ uint thread_n)
+{
+ ds_ctxt_t *datasink = ds_data; /* copy to datadir by default */
+ char filedir[FN_REFLEN];
+ size_t filedir_len;
+ bool ret;
+
+ /* read the link from .isl file */
+ if (ends_with(src_file_path, ".isl")) {
+ char *ibd_filepath;
+
+ ibd_filepath = strdup(src_file_path);
+ strcpy(ibd_filepath + strlen(ibd_filepath) - 3, "ibd");
+
+ read_link_file(ibd_filepath, src_file_path);
+
+ free(ibd_filepath);
+ }
+
+ /* check if there is .isl file */
+ if (ends_with(src_file_path, ".ibd")) {
+ char *link_filepath;
+ const char *filepath;
+
+ link_filepath = strdup(src_file_path);
+ strcpy(link_filepath + strlen(link_filepath) - 3, "isl");
+
+ read_link_file(src_file_path, link_filepath);
+
+ filepath = tablespace_filepath(src_file_path);
+
+ if (filepath != NULL) {
+ dirname_part(filedir, filepath, &filedir_len);
+
+ dst_file_path = filepath + filedir_len;
+ dst_dir = filedir;
+
+ if (!directory_exists(dst_dir, true)) {
+ ret = false;
+ goto cleanup;
+ }
+
+ datasink = ds_create(dst_dir, DS_TYPE_LOCAL);
+ }
+
+ free(link_filepath);
+ }
+
+ ret = (xtrabackup_copy_back ?
+ copy_file(datasink, src_file_path, dst_file_path, thread_n) :
+ move_file(datasink, src_file_path, dst_file_path,
+ dst_dir, thread_n));
+
+cleanup:
+
+ if (datasink != ds_data) {
+ ds_destroy(datasink);
+ }
+
+ return(ret);
+}
+
+
+
+
+bool
+backup_files(const char *from, bool prep_mode)
+{
+ char rsync_tmpfile_name[FN_REFLEN];
+ FILE *rsync_tmpfile = NULL;
+ datadir_iter_t *it;
+ datadir_node_t node;
+ bool ret = true;
+
+ if (prep_mode && !opt_rsync) {
+ return(true);
+ }
+
+ if (opt_rsync) {
+ snprintf(rsync_tmpfile_name, sizeof(rsync_tmpfile_name),
+ "%s/%s%d", opt_mysql_tmpdir,
+ "xtrabackup_rsyncfiles_pass",
+ prep_mode ? 1 : 2);
+ rsync_tmpfile = fopen(rsync_tmpfile_name, "w");
+ if (rsync_tmpfile == NULL) {
+ msg("Error: can't create file %s\n",
+ rsync_tmpfile_name);
+ return(false);
+ }
+ }
+
+ msg_ts("Starting %s non-InnoDB tables and files\n",
+ prep_mode ? "prep copy of" : "to backup");
+
+ datadir_node_init(&node);
+ it = datadir_iter_new(from);
+
+ while (datadir_iter_next(it, &node)) {
+
+ if (!node.is_empty_dir) {
+ if (opt_rsync) {
+ ret = datafile_rsync_backup(node.filepath,
+ !prep_mode, rsync_tmpfile);
+ } else {
+ ret = datafile_copy_backup(node.filepath, 1);
+ }
+ if (!ret) {
+ msg("Failed to copy file %s\n", node.filepath);
+ goto out;
+ }
+ } else if (!prep_mode) {
+ /* backup fake file into empty directory */
+ char path[FN_REFLEN];
+ ut_snprintf(path, sizeof(path),
+ "%s/db.opt", node.filepath);
+ if (!(ret = backup_file_printf(
+ trim_dotslash(path), "%s", ""))) {
+ msg("Failed to create file %s\n", path);
+ goto out;
+ }
+ }
+ }
+
+ if (opt_rsync) {
+ std::stringstream cmd;
+ int err;
+
+ if (buffer_pool_filename && file_exists(buffer_pool_filename)) {
+ fprintf(rsync_tmpfile, "%s\n", buffer_pool_filename);
+ rsync_list.insert(buffer_pool_filename);
+ }
+ if (file_exists("ib_lru_dump")) {
+ fprintf(rsync_tmpfile, "%s\n", "ib_lru_dump");
+ rsync_list.insert("ib_lru_dump");
+ }
+
+ fclose(rsync_tmpfile);
+ rsync_tmpfile = NULL;
+
+ cmd << "rsync -t . --files-from=" << rsync_tmpfile_name
+ << " " << xtrabackup_target_dir;
+
+ msg_ts("Starting rsync as: %s\n", cmd.str().c_str());
+ if ((err = system(cmd.str().c_str()) && !prep_mode) != 0) {
+ msg_ts("Error: rsync failed with error code %d\n", err);
+ ret = false;
+ goto out;
+ }
+ msg_ts("rsync finished successfully.\n");
+
+ if (!prep_mode && !opt_no_lock) {
+ char path[FN_REFLEN];
+ char dst_path[FN_REFLEN];
+ char *newline;
+
+ /* Remove files that have been removed between first and
+ second passes. Cannot use "rsync --delete" because it
+ does not work with --files-from. */
+ snprintf(rsync_tmpfile_name, sizeof(rsync_tmpfile_name),
+ "%s/%s", opt_mysql_tmpdir,
+ "xtrabackup_rsyncfiles_pass1");
+
+ rsync_tmpfile = fopen(rsync_tmpfile_name, "r");
+ if (rsync_tmpfile == NULL) {
+ msg("Error: can't open file %s\n",
+ rsync_tmpfile_name);
+ return(false);
+ }
+
+ while (fgets(path, sizeof(path), rsync_tmpfile)) {
+
+ newline = strchr(path, '\n');
+ if (newline) {
+ *newline = 0;
+ }
+ if (rsync_list.count(path) < 1) {
+ snprintf(dst_path, sizeof(dst_path),
+ "%s/%s", xtrabackup_target_dir,
+ path);
+ msg_ts("Removing %s\n", dst_path);
+ unlink(dst_path);
+ }
+ }
+
+ fclose(rsync_tmpfile);
+ rsync_tmpfile = NULL;
+ }
+ }
+
+ msg_ts("Finished %s non-InnoDB tables and files\n",
+ prep_mode ? "a prep copy of" : "backing up");
+
+out:
+ datadir_iter_free(it);
+ datadir_node_free(&node);
+
+ if (rsync_tmpfile != NULL) {
+ fclose(rsync_tmpfile);
+ }
+
+ return(ret);
+}
+
+bool
+backup_start()
+{
+ if (!opt_no_lock) {
+ if (opt_safe_slave_backup) {
+ if (!wait_for_safe_slave(mysql_connection)) {
+ return(false);
+ }
+ }
+
+ if (!backup_files(fil_path_to_mysql_datadir, true)) {
+ return(false);
+ }
+
+ history_lock_time = time(NULL);
+
+ if (!lock_tables(mysql_connection)) {
+ return(false);
+ }
+ }
+
+ if (!backup_files(fil_path_to_mysql_datadir, false)) {
+ return(false);
+ }
+
+ // There is no need to stop slave thread before coping non-Innodb data when
+ // --no-lock option is used because --no-lock option requires that no DDL or
+ // DML to non-transaction tables can occur.
+ if (opt_no_lock) {
+ if (opt_safe_slave_backup) {
+ if (!wait_for_safe_slave(mysql_connection)) {
+ return(false);
+ }
+ }
+ }
+
+ if (opt_slave_info) {
+ lock_binlog_maybe(mysql_connection);
+
+ if (!write_slave_info(mysql_connection)) {
+ return(false);
+ }
+ }
+
+ /* The only reason why Galera/binlog info is written before
+ wait_for_ibbackup_log_copy_finish() is that after that call the xtrabackup
+ binary will start streamig a temporary copy of REDO log to stdout and
+ thus, any streaming from innobackupex would interfere. The only way to
+ avoid that is to have a single process, i.e. merge innobackupex and
+ xtrabackup. */
+ if (opt_galera_info) {
+ if (!write_galera_info(mysql_connection)) {
+ return(false);
+ }
+ write_current_binlog_file(mysql_connection);
+ }
+
+ if (opt_binlog_info == BINLOG_INFO_ON) {
+
+ lock_binlog_maybe(mysql_connection);
+ write_binlog_info(mysql_connection);
+ }
+
+ if (have_flush_engine_logs) {
+ msg_ts("Executing FLUSH NO_WRITE_TO_BINLOG ENGINE LOGS...\n");
+ xb_mysql_query(mysql_connection,
+ "FLUSH NO_WRITE_TO_BINLOG ENGINE LOGS", false);
+ }
+
+ return(true);
+}
+
+
+bool
+backup_finish()
+{
+ /* release all locks */
+ if (!opt_no_lock) {
+ unlock_all(mysql_connection);
+ history_lock_time = 0;
+ } else {
+ history_lock_time = time(NULL) - history_lock_time;
+ }
+
+ if (opt_safe_slave_backup && sql_thread_started) {
+ msg("Starting slave SQL thread\n");
+ xb_mysql_query(mysql_connection,
+ "START SLAVE SQL_THREAD", false);
+ }
+
+ /* Copy buffer pool dump or LRU dump */
+ if (!opt_rsync) {
+ if (buffer_pool_filename && file_exists(buffer_pool_filename)) {
+ const char *dst_name;
+
+ dst_name = trim_dotslash(buffer_pool_filename);
+ copy_file(ds_data, buffer_pool_filename, dst_name, 0);
+ }
+ if (file_exists("ib_lru_dump")) {
+ copy_file(ds_data, "ib_lru_dump", "ib_lru_dump", 0);
+ }
+ }
+
+ msg_ts("Backup created in directory '%s'\n", xtrabackup_target_dir);
+ if (mysql_binlog_position != NULL) {
+ msg("MySQL binlog position: %s\n", mysql_binlog_position);
+ }
+ if (mysql_slave_position && opt_slave_info) {
+ msg("MySQL slave binlog position: %s\n",
+ mysql_slave_position);
+ }
+
+ if (!write_backup_config_file()) {
+ return(false);
+ }
+
+ if (!write_xtrabackup_info(mysql_connection)) {
+ return(false);
+ }
+
+
+
+ return(true);
+}
+
+bool
+ibx_copy_incremental_over_full()
+{
+ const char *ext_list[] = {"frm", "isl", "MYD", "MYI", "MAD", "MAI",
+ "MRG", "TRG", "TRN", "ARM", "ARZ", "CSM", "CSV", "opt", "par",
+ NULL};
+ const char *sup_files[] = {"xtrabackup_binlog_info",
+ "xtrabackup_galera_info",
+ "xtrabackup_slave_info",
+ "xtrabackup_info",
+ "ib_lru_dump",
+ NULL};
+ datadir_iter_t *it = NULL;
+ datadir_node_t node;
+ bool ret = true;
+ char path[FN_REFLEN];
+ int i;
+
+ datadir_node_init(&node);
+
+ /* If we were applying an incremental change set, we need to make
+ sure non-InnoDB files and xtrabackup_* metainfo files are copied
+ to the full backup directory. */
+
+ if (xtrabackup_incremental) {
+
+ ds_data = ds_create(xtrabackup_target_dir, DS_TYPE_LOCAL);
+
+ it = datadir_iter_new(xtrabackup_incremental_dir);
+
+ while (datadir_iter_next(it, &node)) {
+
+ /* copy only non-innodb files */
+
+ if (node.is_empty_dir
+ || !filename_matches(node.filepath, ext_list)) {
+ continue;
+ }
+
+ if (file_exists(node.filepath_rel)) {
+ unlink(node.filepath_rel);
+ }
+
+ if (!(ret = copy_file(ds_data, node.filepath,
+ node.filepath_rel, 1))) {
+ msg("Failed to copy file %s\n",
+ node.filepath);
+ goto cleanup;
+ }
+ }
+
+ /* copy buffer pool dump */
+ if (innobase_buffer_pool_filename) {
+ const char *src_name;
+
+ src_name = trim_dotslash(innobase_buffer_pool_filename);
+
+ snprintf(path, sizeof(path), "%s/%s",
+ xtrabackup_incremental_dir,
+ src_name);
+
+ if (file_exists(path)) {
+ copy_file(ds_data, path,
+ innobase_buffer_pool_filename, 0);
+ }
+ }
+
+ /* copy supplementary files */
+
+ for (i = 0; sup_files[i]; i++) {
+ snprintf(path, sizeof(path), "%s/%s",
+ xtrabackup_incremental_dir,
+ sup_files[i]);
+
+ if (file_exists(path))
+ {
+ if (file_exists(sup_files[i])) {
+ unlink(sup_files[i]);
+ }
+ copy_file(ds_data, path, sup_files[i], 0);
+ }
+ }
+
+ }
+
+cleanup:
+ if (it != NULL) {
+ datadir_iter_free(it);
+ }
+
+ if (ds_data != NULL) {
+ ds_destroy(ds_data);
+ }
+
+ datadir_node_free(&node);
+
+ return(ret);
+}
+
+bool
+ibx_cleanup_full_backup()
+{
+ const char *ext_list[] = {"delta", "meta", "ibd", NULL};
+ datadir_iter_t *it = NULL;
+ datadir_node_t node;
+ bool ret = true;
+
+ datadir_node_init(&node);
+
+ /* If we are applying an incremental change set, we need to make
+ sure non-InnoDB files are cleaned up from full backup dir before
+ we copy files from incremental dir. */
+
+ it = datadir_iter_new(xtrabackup_target_dir);
+
+ while (datadir_iter_next(it, &node)) {
+
+ if (node.is_empty_dir) {
+ rmdir(node.filepath);
+ }
+
+ if (xtrabackup_incremental && !node.is_empty_dir
+ && !filename_matches(node.filepath, ext_list)) {
+ unlink(node.filepath);
+ }
+ }
+
+ datadir_iter_free(it);
+
+ datadir_node_free(&node);
+
+ return(ret);
+}
+
+bool
+apply_log_finish()
+{
+ if (!ibx_cleanup_full_backup()
+ || !ibx_copy_incremental_over_full()) {
+ return(false);
+ }
+
+ return(true);
+}
+
+bool
+copy_back()
+{
+ char *innobase_data_file_path_copy;
+ ulint i;
+ bool ret;
+ datadir_iter_t *it = NULL;
+ datadir_node_t node;
+ char *dst_dir;
+
+ memset(&node, 0, sizeof(node));
+
+ if (!opt_force_non_empty_dirs) {
+ if (!directory_exists_and_empty(mysql_data_home,
+ "Original data")) {
+ return(false);
+ }
+ } else {
+ if (!directory_exists(mysql_data_home, true)) {
+ return(false);
+ }
+ }
+ if (srv_undo_dir && *srv_undo_dir
+ && !directory_exists(srv_undo_dir, true)) {
+ return(false);
+ }
+ if (innobase_data_home_dir && *innobase_data_home_dir
+ && !directory_exists(innobase_data_home_dir, true)) {
+ return(false);
+ }
+ if (srv_log_group_home_dir && *srv_log_group_home_dir
+ && !directory_exists(srv_log_group_home_dir, true)) {
+ return(false);
+ }
+
+ /* cd to backup directory */
+ if (my_setwd(xtrabackup_target_dir, MYF(MY_WME)))
+ {
+ msg("cannot my_setwd %s\n", xtrabackup_target_dir);
+ return(false);
+ }
+
+ /* parse data file path */
+
+ if (!innobase_data_file_path) {
+ innobase_data_file_path = (char*) "ibdata1:10M:autoextend";
+ }
+ innobase_data_file_path_copy = strdup(innobase_data_file_path);
+
+ if (!(ret = srv_parse_data_file_paths_and_sizes(
+ innobase_data_file_path_copy))) {
+ msg("syntax error in innodb_data_file_path\n");
+ return(false);
+ }
+
+ srv_max_n_threads = 1000;
+ os_sync_mutex = NULL;
+ ut_mem_init();
+ /* temporally dummy value to avoid crash */
+ srv_page_size_shift = 14;
+ srv_page_size = (1 << srv_page_size_shift);
+ os_sync_init();
+ sync_init();
+ os_io_init_simple();
+ mem_init(srv_mem_pool_size);
+ ut_crc32_init();
+
+ /* copy undo tablespaces */
+ if (srv_undo_tablespaces > 0) {
+
+ dst_dir = (srv_undo_dir && *srv_undo_dir)
+ ? srv_undo_dir : mysql_data_home;
+
+ ds_data = ds_create(dst_dir, DS_TYPE_LOCAL);
+
+ for (i = 1; i <= srv_undo_tablespaces; i++) {
+ char filename[20];
+ sprintf(filename, "undo%03lu", i);
+ if (!(ret = copy_or_move_file(filename, filename,
+ dst_dir, 1))) {
+ goto cleanup;
+ }
+ }
+
+ ds_destroy(ds_data);
+ ds_data = NULL;
+ }
+
+ /* copy redo logs */
+
+ dst_dir = (srv_log_group_home_dir && *srv_log_group_home_dir)
+ ? srv_log_group_home_dir : mysql_data_home;
+
+ ds_data = ds_create(dst_dir, DS_TYPE_LOCAL);
+
+ for (i = 0; i < (ulong)innobase_log_files_in_group; i++) {
+ char filename[20];
+ sprintf(filename, "ib_logfile%lu", i);
+
+ if (!file_exists(filename)) {
+ continue;
+ }
+
+ if (!(ret = copy_or_move_file(filename, filename,
+ dst_dir, 1))) {
+ goto cleanup;
+ }
+ }
+
+ ds_destroy(ds_data);
+ ds_data = NULL;
+
+ /* copy innodb system tablespace(s) */
+
+ dst_dir = (innobase_data_home_dir && *innobase_data_home_dir)
+ ? innobase_data_home_dir : mysql_data_home;
+
+ ds_data = ds_create(dst_dir, DS_TYPE_LOCAL);
+
+ for (i = 0; i < srv_n_data_files; i++) {
+ const char *filename = base_name(srv_data_file_names[i]);
+
+ if (!(ret = copy_or_move_file(filename, srv_data_file_names[i],
+ dst_dir, 1))) {
+ goto cleanup;
+ }
+ }
+
+ ds_destroy(ds_data);
+ ds_data = NULL;
+
+ /* copy the rest of tablespaces */
+ ds_data = ds_create(mysql_data_home, DS_TYPE_LOCAL);
+
+ it = datadir_iter_new(".", false);
+
+ datadir_node_init(&node);
+
+ while (datadir_iter_next(it, &node)) {
+ const char *ext_list[] = {"backup-my.cnf", "xtrabackup_logfile",
+ "xtrabackup_binary", "xtrabackup_binlog_info",
+ "xtrabackup_checkpoints", ".qp", ".pmap", ".tmp",
+ ".xbcrypt", NULL};
+ const char *filename;
+ char c_tmp;
+ int i_tmp;
+ bool is_ibdata_file;
+
+ /* create empty directories */
+ if (node.is_empty_dir) {
+ char path[FN_REFLEN];
+
+ snprintf(path, sizeof(path), "%s/%s",
+ mysql_data_home, node.filepath_rel);
+
+ msg_ts("[%02u] Creating directory %s\n", 1, path);
+
+ if (mkdirp(path, 0777, MYF(0)) < 0) {
+ char errbuf[MYSYS_STRERROR_SIZE];
+
+ msg("Can not create directory %s: %s\n",
+ path, my_strerror(errbuf,
+ sizeof(errbuf), my_errno));
+ ret = false;
+
+ goto cleanup;
+
+ }
+
+ msg_ts("[%02u] ...done.", 1);
+
+ continue;
+ }
+
+ filename = base_name(node.filepath);
+
+ /* skip .qp and .xbcrypt files */
+ if (filename_matches(filename, ext_list)) {
+ continue;
+ }
+
+ /* skip undo tablespaces */
+ if (sscanf(filename, "undo%d%c", &i_tmp, &c_tmp) == 1) {
+ continue;
+ }
+
+ /* skip redo logs */
+ if (sscanf(filename, "ib_logfile%d%c", &i_tmp, &c_tmp) == 1) {
+ continue;
+ }
+
+ /* skip innodb data files */
+ is_ibdata_file = false;
+ for (i = 0; i < srv_n_data_files; i++) {
+ const char *ibfile;
+
+ ibfile = base_name(srv_data_file_names[i]);
+
+ if (strcmp(ibfile, filename) == 0) {
+ is_ibdata_file = true;
+ continue;
+ }
+ }
+ if (is_ibdata_file) {
+ continue;
+ }
+
+ if (!(ret = copy_or_move_file(node.filepath, node.filepath_rel,
+ mysql_data_home, 1))) {
+ goto cleanup;
+ }
+ }
+
+ /* copy buufer pool dump */
+
+ if (innobase_buffer_pool_filename) {
+ const char *src_name;
+ char path[FN_REFLEN];
+
+ src_name = trim_dotslash(innobase_buffer_pool_filename);
+
+ snprintf(path, sizeof(path), "%s/%s",
+ mysql_data_home,
+ src_name);
+
+ /* could be already copied with other files
+ from data directory */
+ if (file_exists(src_name) &&
+ !file_exists(innobase_buffer_pool_filename)) {
+ copy_or_move_file(src_name,
+ innobase_buffer_pool_filename,
+ mysql_data_home, 0);
+ }
+ }
+
+cleanup:
+ if (it != NULL) {
+ datadir_iter_free(it);
+ }
+
+ datadir_node_free(&node);
+
+ free(innobase_data_file_path_copy);
+
+ if (ds_data != NULL) {
+ ds_destroy(ds_data);
+ }
+
+ ds_data = NULL;
+
+ sync_close();
+ sync_initialized = FALSE;
+ os_sync_free();
+ mem_close();
+ os_sync_mutex = NULL;
+ ut_free_all_mem();
+
+ return(ret);
+}
+
+bool
+decrypt_decompress_file(const char *filepath, uint thread_n)
+{
+ std::stringstream cmd, message;
+ char *dest_filepath = strdup(filepath);
+ bool needs_action = false;
+
+ cmd << "cat " << filepath;
+
+ if (ends_with(filepath, ".xbcrypt") && opt_decrypt) {
+ cmd << " | xbcrypt --decrypt --encrypt-algo="
+ << xtrabackup_encrypt_algo_names[opt_decrypt_algo];
+ if (xtrabackup_encrypt_key) {
+ cmd << " --encrypt-key=" << xtrabackup_encrypt_key;
+ } else {
+ cmd << " --encrypt-key-file="
+ << xtrabackup_encrypt_key_file;
+ }
+ dest_filepath[strlen(dest_filepath) - 8] = 0;
+ message << "decrypting";
+ needs_action = true;
+ }
+
+ if (opt_decompress
+ && (ends_with(filepath, ".qp")
+ || (ends_with(filepath, ".qp.xbcrypt")
+ && opt_decrypt))) {
+ cmd << " | qpress -dio ";
+ dest_filepath[strlen(dest_filepath) - 3] = 0;
+ if (needs_action) {
+ message << " and ";
+ }
+ message << "decompressing";
+ needs_action = true;
+ }
+
+ cmd << " > " << dest_filepath;
+ message << " " << filepath;
+
+ free(dest_filepath);
+
+ if (needs_action) {
+
+ msg_ts("[%02u] %s\n", thread_n, message.str().c_str());
+
+ if (system(cmd.str().c_str()) != 0) {
+ return(false);
+ }
+
+ if (opt_remove_original) {
+ msg_ts("[%02u] removing %s\n", thread_n, filepath);
+ if (my_delete(filepath, MYF(MY_WME)) != 0) {
+ return(false);
+ }
+ }
+ }
+
+ return(true);
+}
+
+static
+os_thread_ret_t
+decrypt_decompress_thread_func(void *arg)
+{
+ bool ret = true;
+ datadir_node_t node;
+ datadir_thread_ctxt_t *ctxt = (datadir_thread_ctxt_t *)(arg);
+
+ datadir_node_init(&node);
+
+ while (datadir_iter_next(ctxt->it, &node)) {
+
+ /* skip empty directories in backup */
+ if (node.is_empty_dir) {
+ continue;
+ }
+
+ if (!ends_with(node.filepath, ".qp")
+ && !ends_with(node.filepath, ".xbcrypt")) {
+ continue;
+ }
+
+ if (!(ret = decrypt_decompress_file(node.filepath,
+ ctxt->n_thread))) {
+ goto cleanup;
+ }
+ }
+
+cleanup:
+
+ datadir_node_free(&node);
+
+ os_mutex_enter(ctxt->count_mutex);
+ --(*ctxt->count);
+ os_mutex_exit(ctxt->count_mutex);
+
+ ctxt->ret = ret;
+
+ os_thread_exit(NULL);
+ OS_THREAD_DUMMY_RETURN;
+}
+
+bool
+decrypt_decompress()
+{
+ bool ret;
+ datadir_iter_t *it = NULL;
+
+ srv_max_n_threads = 1000;
+ os_sync_mutex = NULL;
+ ut_mem_init();
+ os_sync_init();
+ sync_init();
+
+ /* cd to backup directory */
+ if (my_setwd(xtrabackup_target_dir, MYF(MY_WME)))
+ {
+ msg("cannot my_setwd %s\n", xtrabackup_target_dir);
+ return(false);
+ }
+
+ /* copy the rest of tablespaces */
+ ds_data = ds_create(".", DS_TYPE_LOCAL);
+
+ it = datadir_iter_new(".", false);
+
+ ut_a(xtrabackup_parallel >= 0);
+
+ ret = run_data_threads(it, decrypt_decompress_thread_func,
+ xtrabackup_parallel ? xtrabackup_parallel : 1);
+
+ if (it != NULL) {
+ datadir_iter_free(it);
+ }
+
+ if (ds_data != NULL) {
+ ds_destroy(ds_data);
+ }
+
+ ds_data = NULL;
+
+ sync_close();
+ sync_initialized = FALSE;
+ os_sync_free();
+ os_sync_mutex = NULL;
+ ut_free_all_mem();
+
+ return(ret);
+}
+
+void
+version_check()
+{
+ if (opt_password != NULL) {
+ setenv("option_mysql_password", opt_password, 1);
+ }
+ if (opt_user != NULL) {
+ setenv("option_mysql_user", opt_user, 1);
+ }
+ if (opt_host != NULL) {
+ setenv("option_mysql_host", opt_host, 1);
+ }
+ if (opt_socket != NULL) {
+ setenv("option_mysql_socket", opt_socket, 1);
+ }
+ if (opt_port != 0) {
+ char port[20];
+ snprintf(port, sizeof(port), "%u", opt_port);
+ setenv("option_mysql_port", port, 1);
+ }
+
+ FILE *pipe = popen("perl", "w");
+ if (pipe == NULL) {
+ return;
+ }
+
+ fputs((const char *)version_check_pl, pipe);
+
+ pclose(pipe);
+}
diff --git a/extra/mariabackup/backup_copy.h b/extra/mariabackup/backup_copy.h
new file mode 100644
index 00000000000..c8fc5fc8ba9
--- /dev/null
+++ b/extra/mariabackup/backup_copy.h
@@ -0,0 +1,51 @@
+
+#ifndef XTRABACKUP_BACKUP_COPY_H
+#define XTRABACKUP_BACKUP_COPY_H
+
+#include <my_global.h>
+#include "datasink.h"
+
+/* special files */
+#define XTRABACKUP_SLAVE_INFO "xtrabackup_slave_info"
+#define XTRABACKUP_GALERA_INFO "xtrabackup_galera_info"
+#define XTRABACKUP_BINLOG_INFO "xtrabackup_binlog_info"
+#define XTRABACKUP_INFO "xtrabackup_info"
+
+extern bool binlog_locked;
+
+bool
+backup_file_printf(const char *filename, const char *fmt, ...)
+ ATTRIBUTE_FORMAT(printf, 2, 0);
+
+/************************************************************************
+Return true if first and second arguments are the same path. */
+bool
+equal_paths(const char *first, const char *second);
+
+/************************************************************************
+Copy file for backup/restore.
+@return true in case of success. */
+bool
+copy_file(ds_ctxt_t *datasink,
+ const char *src_file_path,
+ const char *dst_file_path,
+ uint thread_n);
+
+bool
+backup_start();
+bool
+backup_finish();
+bool
+apply_log_finish();
+bool
+copy_back();
+bool
+decrypt_decompress();
+void
+version_check();
+bool
+is_path_separator(char);
+bool
+directory_exists(const char *dir, bool create);
+
+#endif
diff --git a/extra/mariabackup/backup_mysql.cc b/extra/mariabackup/backup_mysql.cc
new file mode 100644
index 00000000000..1ae8d10053e
--- /dev/null
+++ b/extra/mariabackup/backup_mysql.cc
@@ -0,0 +1,1756 @@
+/******************************************************
+hot backup tool for InnoDB
+(c) 2009-2015 Percona LLC and/or its affiliates
+Originally Created 3/3/2009 Yasufumi Kinoshita
+Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
+Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************
+
+This file incorporates work covered by the following copyright and
+permission notice:
+
+Copyright (c) 2000, 2011, MySQL AB & Innobase Oy. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+Place, Suite 330, Boston, MA 02111-1307 USA
+
+*******************************************************/
+
+#include <my_global.h>
+#include <mysql.h>
+#include <mysqld.h>
+#include <my_sys.h>
+#include <string.h>
+#include <limits>
+#include "common.h"
+#include "xtrabackup.h"
+#include "xtrabackup_version.h"
+#include "backup_copy.h"
+#include "backup_mysql.h"
+#include "mysqld.h"
+
+
+char *tool_name;
+char tool_args[2048];
+
+/* mysql flavor and version */
+mysql_flavor_t server_flavor = FLAVOR_UNKNOWN;
+unsigned long mysql_server_version = 0;
+
+/* server capabilities */
+bool have_changed_page_bitmaps = false;
+bool have_backup_locks = false;
+bool have_backup_safe_binlog_info = false;
+bool have_lock_wait_timeout = false;
+bool have_galera_enabled = false;
+bool have_flush_engine_logs = false;
+bool have_multi_threaded_slave = false;
+bool have_gtid_slave = false;
+
+/* Kill long selects */
+os_thread_id_t kill_query_thread_id;
+os_event_t kill_query_thread_started;
+os_event_t kill_query_thread_stopped;
+os_event_t kill_query_thread_stop;
+
+bool sql_thread_started = false;
+char *mysql_slave_position = NULL;
+char *mysql_binlog_position = NULL;
+char *buffer_pool_filename = NULL;
+
+/* History on server */
+time_t history_start_time;
+time_t history_end_time;
+time_t history_lock_time;
+
+MYSQL *mysql_connection;
+
+extern "C" {
+MYSQL * STDCALL
+cli_mysql_real_connect(MYSQL *mysql,const char *host, const char *user,
+ const char *passwd, const char *db,
+ uint port, const char *unix_socket,ulong client_flag);
+}
+
+#define mysql_real_connect cli_mysql_real_connect
+
+
+MYSQL *
+xb_mysql_connect()
+{
+ MYSQL *connection = mysql_init(NULL);
+ char mysql_port_str[std::numeric_limits<int>::digits10 + 3];
+
+ sprintf(mysql_port_str, "%d", opt_port);
+
+ if (connection == NULL) {
+ msg("Failed to init MySQL struct: %s.\n",
+ mysql_error(connection));
+ return(NULL);
+ }
+
+ if (!opt_secure_auth) {
+ mysql_options(connection, MYSQL_SECURE_AUTH,
+ (char *) &opt_secure_auth);
+ }
+
+ msg_ts("Connecting to MySQL server host: %s, user: %s, password: %s, "
+ "port: %s, socket: %s\n", opt_host ? opt_host : "localhost",
+ opt_user ? opt_user : "not set",
+ opt_password ? "set" : "not set",
+ opt_port != 0 ? mysql_port_str : "not set",
+ opt_socket ? opt_socket : "not set");
+
+#ifdef HAVE_OPENSSL
+ if (opt_use_ssl)
+ {
+ mysql_ssl_set(connection, opt_ssl_key, opt_ssl_cert,
+ opt_ssl_ca, opt_ssl_capath,
+ opt_ssl_cipher);
+ mysql_options(connection, MYSQL_OPT_SSL_CRL, opt_ssl_crl);
+ mysql_options(connection, MYSQL_OPT_SSL_CRLPATH,
+ opt_ssl_crlpath);
+ }
+ mysql_options(connection,MYSQL_OPT_SSL_VERIFY_SERVER_CERT,
+ (char*)&opt_ssl_verify_server_cert);
+#if !defined(HAVE_YASSL)
+ if (opt_server_public_key && *opt_server_public_key)
+ mysql_options(connection, MYSQL_SERVER_PUBLIC_KEY,
+ opt_server_public_key);
+#endif
+#endif
+
+ if (!mysql_real_connect(connection,
+ opt_host ? opt_host : "localhost",
+ opt_user,
+ opt_password,
+ "" /*database*/, opt_port,
+ opt_socket, 0)) {
+ msg("Failed to connect to MySQL server: %s.\n",
+ mysql_error(connection));
+ mysql_close(connection);
+ return(NULL);
+ }
+
+ xb_mysql_query(connection, "SET SESSION wait_timeout=2147483",
+ false, true);
+
+ return(connection);
+}
+
+/*********************************************************************//**
+Execute mysql query. */
+MYSQL_RES *
+xb_mysql_query(MYSQL *connection, const char *query, bool use_result,
+ bool die_on_error)
+{
+ MYSQL_RES *mysql_result = NULL;
+
+ if (mysql_query(connection, query)) {
+ msg("Error: failed to execute query %s: %s\n", query,
+ mysql_error(connection));
+ if (die_on_error) {
+ exit(EXIT_FAILURE);
+ }
+ return(NULL);
+ }
+
+ /* store result set on client if there is a result */
+ if (mysql_field_count(connection) > 0) {
+ if ((mysql_result = mysql_store_result(connection)) == NULL) {
+ msg("Error: failed to fetch query result %s: %s\n",
+ query, mysql_error(connection));
+ exit(EXIT_FAILURE);
+ }
+
+ if (!use_result) {
+ mysql_free_result(mysql_result);
+ }
+ }
+
+ return mysql_result;
+}
+
+
+struct mysql_variable {
+ const char *name;
+ char **value;
+};
+
+
+static
+void
+read_mysql_variables(MYSQL *connection, const char *query, mysql_variable *vars,
+ bool vertical_result)
+{
+ MYSQL_RES *mysql_result;
+ MYSQL_ROW row;
+ mysql_variable *var;
+
+ mysql_result = xb_mysql_query(connection, query, true);
+
+ ut_ad(!vertical_result || mysql_num_fields(mysql_result) == 2);
+
+ if (vertical_result) {
+ while ((row = mysql_fetch_row(mysql_result))) {
+ char *name = row[0];
+ char *value = row[1];
+ for (var = vars; var->name; var++) {
+ if (strcmp(var->name, name) == 0
+ && value != NULL) {
+ *(var->value) = strdup(value);
+ }
+ }
+ }
+ } else {
+ MYSQL_FIELD *field;
+
+ if ((row = mysql_fetch_row(mysql_result)) != NULL) {
+ int i = 0;
+ while ((field = mysql_fetch_field(mysql_result))
+ != NULL) {
+ char *name = field->name;
+ char *value = row[i];
+ for (var = vars; var->name; var++) {
+ if (strcmp(var->name, name) == 0
+ && value != NULL) {
+ *(var->value) = strdup(value);
+ }
+ }
+ ++i;
+ }
+ }
+ }
+
+ mysql_free_result(mysql_result);
+}
+
+
+static
+void
+free_mysql_variables(mysql_variable *vars)
+{
+ mysql_variable *var;
+
+ for (var = vars; var->name; var++) {
+ free(*(var->value));
+ }
+}
+
+
+static
+char *
+read_mysql_one_value(MYSQL *connection, const char *query)
+{
+ MYSQL_RES *mysql_result;
+ MYSQL_ROW row;
+ char *result = NULL;
+
+ mysql_result = xb_mysql_query(connection, query, true);
+
+ ut_ad(mysql_num_fields(mysql_result) == 1);
+
+ if ((row = mysql_fetch_row(mysql_result))) {
+ result = strdup(row[0]);
+ }
+
+ mysql_free_result(mysql_result);
+
+ return(result);
+}
+
+static
+bool
+check_server_version(unsigned long version_number,
+ const char *version_string,
+ const char *version_comment,
+ const char *innodb_version)
+{
+ bool version_supported = false;
+ bool mysql51 = false;
+
+ mysql_server_version = version_number;
+
+ server_flavor = FLAVOR_UNKNOWN;
+ if (strstr(version_comment, "Percona") != NULL) {
+ server_flavor = FLAVOR_PERCONA_SERVER;
+ } else if (strstr(version_comment, "MariaDB") != NULL ||
+ strstr(version_string, "MariaDB") != NULL) {
+ server_flavor = FLAVOR_MARIADB;
+ } else if (strstr(version_comment, "MySQL") != NULL) {
+ server_flavor = FLAVOR_MYSQL;
+ }
+
+ mysql51 = version_number > 50100 && version_number < 50500;
+ version_supported = version_supported
+ || (mysql51 && innodb_version != NULL);
+ version_supported = version_supported
+ || (version_number > 50500 && version_number < 50700);
+ version_supported = version_supported
+ || ((version_number > 100000 && version_number < 100300)
+ && server_flavor == FLAVOR_MARIADB);
+
+ if (mysql51 && innodb_version == NULL) {
+ msg("Error: Built-in InnoDB in MySQL 5.1 is not "
+ "supported in this release. You can either use "
+ "Percona XtraBackup 2.0, or upgrade to InnoDB "
+ "plugin.\n");
+ } else if (!version_supported) {
+ msg("Error: Unsupported server version: '%s'. Please "
+ "report a bug at "
+ "https://bugs.launchpad.net/percona-xtrabackup\n",
+ version_string);
+ }
+
+ return(version_supported);
+}
+
+/*********************************************************************//**
+Receive options important for XtraBackup from MySQL server.
+@return true on success. */
+bool
+get_mysql_vars(MYSQL *connection)
+{
+ char *gtid_mode_var = NULL;
+ char *version_var = NULL;
+ char *version_comment_var = NULL;
+ char *innodb_version_var = NULL;
+ char *have_backup_locks_var = NULL;
+ char *have_backup_safe_binlog_info_var = NULL;
+ char *log_bin_var = NULL;
+ char *lock_wait_timeout_var= NULL;
+ char *wsrep_on_var = NULL;
+ char *slave_parallel_workers_var = NULL;
+ char *gtid_slave_pos_var = NULL;
+ char *innodb_buffer_pool_filename_var = NULL;
+ char *datadir_var = NULL;
+ char *innodb_log_group_home_dir_var = NULL;
+ char *innodb_log_file_size_var = NULL;
+ char *innodb_log_files_in_group_var = NULL;
+ char *innodb_data_file_path_var = NULL;
+ char *innodb_data_home_dir_var = NULL;
+ char *innodb_undo_directory_var = NULL;
+ char *innodb_page_size_var = NULL;
+
+ unsigned long server_version = mysql_get_server_version(connection);
+
+ bool ret = true;
+
+ mysql_variable mysql_vars[] = {
+ {"have_backup_locks", &have_backup_locks_var},
+ {"have_backup_safe_binlog_info",
+ &have_backup_safe_binlog_info_var},
+ {"log_bin", &log_bin_var},
+ {"lock_wait_timeout", &lock_wait_timeout_var},
+ {"gtid_mode", &gtid_mode_var},
+ {"version", &version_var},
+ {"version_comment", &version_comment_var},
+ {"innodb_version", &innodb_version_var},
+ {"wsrep_on", &wsrep_on_var},
+ {"slave_parallel_workers", &slave_parallel_workers_var},
+ {"gtid_slave_pos", &gtid_slave_pos_var},
+ {"innodb_buffer_pool_filename",
+ &innodb_buffer_pool_filename_var},
+ {"datadir", &datadir_var},
+ {"innodb_log_group_home_dir", &innodb_log_group_home_dir_var},
+ {"innodb_log_file_size", &innodb_log_file_size_var},
+ {"innodb_log_files_in_group", &innodb_log_files_in_group_var},
+ {"innodb_data_file_path", &innodb_data_file_path_var},
+ {"innodb_data_home_dir", &innodb_data_home_dir_var},
+ {"innodb_undo_directory", &innodb_undo_directory_var},
+ {"innodb_page_size", &innodb_page_size_var},
+ {NULL, NULL}
+ };
+
+ read_mysql_variables(connection, "SHOW VARIABLES",
+ mysql_vars, true);
+
+ if (have_backup_locks_var != NULL && !opt_no_backup_locks) {
+ have_backup_locks = true;
+ }
+
+ if (opt_binlog_info == BINLOG_INFO_AUTO) {
+
+ if (have_backup_safe_binlog_info_var != NULL)
+ opt_binlog_info = BINLOG_INFO_LOCKLESS;
+ else if (log_bin_var != NULL && !strcmp(log_bin_var, "ON"))
+ opt_binlog_info = BINLOG_INFO_ON;
+ else
+ opt_binlog_info = BINLOG_INFO_OFF;
+ }
+
+ if (have_backup_safe_binlog_info_var == NULL &&
+ opt_binlog_info == BINLOG_INFO_LOCKLESS) {
+
+ msg("Error: --binlog-info=LOCKLESS is not supported by the "
+ "server\n");
+ return(false);
+ }
+
+ if (lock_wait_timeout_var != NULL) {
+ have_lock_wait_timeout = true;
+ }
+
+ if (wsrep_on_var != NULL) {
+ have_galera_enabled = true;
+ }
+
+ /* Check server version compatibility and detect server flavor */
+
+ if (!(ret = check_server_version(server_version, version_var,
+ version_comment_var,
+ innodb_version_var))) {
+ goto out;
+ }
+
+ if (server_version > 50500) {
+ have_flush_engine_logs = true;
+ }
+
+ if (slave_parallel_workers_var != NULL
+ && atoi(slave_parallel_workers_var) > 0) {
+ have_multi_threaded_slave = true;
+ }
+
+ if (innodb_buffer_pool_filename_var != NULL) {
+ buffer_pool_filename = strdup(innodb_buffer_pool_filename_var);
+ }
+
+ if ((gtid_mode_var && strcmp(gtid_mode_var, "ON") == 0) ||
+ (gtid_slave_pos_var && *gtid_slave_pos_var)) {
+ have_gtid_slave = true;
+ }
+
+ msg("Using server version %s\n", version_var);
+
+ if (!(ret = detect_mysql_capabilities_for_backup())) {
+ goto out;
+ }
+
+ /* make sure datadir value is the same in configuration file */
+ if (check_if_param_set("datadir")) {
+ if (!directory_exists(mysql_data_home, false)) {
+ msg("Warning: option 'datadir' points to "
+ "nonexistent directory '%s'\n", mysql_data_home);
+ }
+ if (!directory_exists(datadir_var, false)) {
+ msg("Warning: MySQL variable 'datadir' points to "
+ "nonexistent directory '%s'\n", datadir_var);
+ }
+ if (!equal_paths(mysql_data_home, datadir_var)) {
+ msg("Warning: option 'datadir' has different "
+ "values:\n"
+ " '%s' in defaults file\n"
+ " '%s' in SHOW VARIABLES\n",
+ mysql_data_home, datadir_var);
+ }
+ }
+
+ /* get some default values is they are missing from my.cnf */
+ if (!check_if_param_set("datadir") && datadir_var && *datadir_var) {
+ strmake(mysql_real_data_home, datadir_var, FN_REFLEN - 1);
+ mysql_data_home= mysql_real_data_home;
+ }
+
+ if (!check_if_param_set("innodb_data_file_path")
+ && innodb_data_file_path_var && *innodb_data_file_path_var) {
+ innobase_data_file_path = my_strdup(
+ innodb_data_file_path_var, MYF(MY_FAE));
+ }
+
+ if (!check_if_param_set("innodb_data_home_dir")
+ && innodb_data_home_dir_var && *innodb_data_home_dir_var) {
+ innobase_data_home_dir = my_strdup(
+ innodb_data_home_dir_var, MYF(MY_FAE));
+ }
+
+ if (!check_if_param_set("innodb_log_group_home_dir")
+ && innodb_log_group_home_dir_var
+ && *innodb_log_group_home_dir_var) {
+ srv_log_group_home_dir = my_strdup(
+ innodb_log_group_home_dir_var, MYF(MY_FAE));
+ }
+
+ if (!check_if_param_set("innodb_undo_directory")
+ && innodb_undo_directory_var && *innodb_undo_directory_var) {
+ srv_undo_dir = my_strdup(
+ innodb_undo_directory_var, MYF(MY_FAE));
+ }
+
+ if (!check_if_param_set("innodb_log_files_in_group")
+ && innodb_log_files_in_group_var) {
+ char *endptr;
+
+ innobase_log_files_in_group = strtol(
+ innodb_log_files_in_group_var, &endptr, 10);
+ ut_ad(*endptr == 0);
+ }
+
+ if (!check_if_param_set("innodb_log_file_size")
+ && innodb_log_file_size_var) {
+ char *endptr;
+
+ innobase_log_file_size = strtoll(
+ innodb_log_file_size_var, &endptr, 10);
+ ut_ad(*endptr == 0);
+ }
+
+ if (!check_if_param_set("innodb_page_size") && innodb_page_size_var) {
+ char *endptr;
+
+ innobase_page_size = strtoll(
+ innodb_page_size_var, &endptr, 10);
+ ut_ad(*endptr == 0);
+ }
+
+out:
+ free_mysql_variables(mysql_vars);
+
+ return(ret);
+}
+
+/*********************************************************************//**
+Query the server to find out what backup capabilities it supports.
+@return true on success. */
+bool
+detect_mysql_capabilities_for_backup()
+{
+ const char *query = "SELECT 'INNODB_CHANGED_PAGES', COUNT(*) FROM "
+ "INFORMATION_SCHEMA.PLUGINS "
+ "WHERE PLUGIN_NAME LIKE 'INNODB_CHANGED_PAGES'";
+ char *innodb_changed_pages = NULL;
+ mysql_variable vars[] = {
+ {"INNODB_CHANGED_PAGES", &innodb_changed_pages}, {NULL, NULL}};
+
+ if (xtrabackup_incremental) {
+
+ read_mysql_variables(mysql_connection, query, vars, true);
+
+ ut_ad(innodb_changed_pages != NULL);
+
+ have_changed_page_bitmaps = (atoi(innodb_changed_pages) == 1);
+
+ /* INNODB_CHANGED_PAGES are listed in
+ INFORMATION_SCHEMA.PLUGINS in MariaDB, but
+ FLUSH NO_WRITE_TO_BINLOG CHANGED_PAGE_BITMAPS
+ is not supported for versions below 10.1.6
+ (see MDEV-7472) */
+ if (server_flavor == FLAVOR_MARIADB &&
+ mysql_server_version < 100106) {
+ have_changed_page_bitmaps = false;
+ }
+
+ free_mysql_variables(vars);
+ }
+
+ /* do some sanity checks */
+ if (opt_galera_info && !have_galera_enabled) {
+ msg("--galera-info is specified on the command "
+ "line, but the server does not support Galera "
+ "replication. Ignoring the option.\n");
+ opt_galera_info = false;
+ }
+
+ if (opt_slave_info && have_multi_threaded_slave &&
+ !have_gtid_slave) {
+ msg("The --slave-info option requires GTID enabled for a "
+ "multi-threaded slave.\n");
+ return(false);
+ }
+
+ return(true);
+}
+
+static
+bool
+select_incremental_lsn_from_history(lsn_t *incremental_lsn)
+{
+ MYSQL_RES *mysql_result;
+ MYSQL_ROW row;
+ char query[1000];
+ char buf[100];
+
+ if (opt_incremental_history_name) {
+ mysql_real_escape_string(mysql_connection, buf,
+ opt_incremental_history_name,
+ strlen(opt_incremental_history_name));
+ ut_snprintf(query, sizeof(query),
+ "SELECT innodb_to_lsn "
+ "FROM PERCONA_SCHEMA.xtrabackup_history "
+ "WHERE name = '%s' "
+ "AND innodb_to_lsn IS NOT NULL "
+ "ORDER BY innodb_to_lsn DESC LIMIT 1",
+ buf);
+ }
+
+ if (opt_incremental_history_uuid) {
+ mysql_real_escape_string(mysql_connection, buf,
+ opt_incremental_history_uuid,
+ strlen(opt_incremental_history_uuid));
+ ut_snprintf(query, sizeof(query),
+ "SELECT innodb_to_lsn "
+ "FROM PERCONA_SCHEMA.xtrabackup_history "
+ "WHERE uuid = '%s' "
+ "AND innodb_to_lsn IS NOT NULL "
+ "ORDER BY innodb_to_lsn DESC LIMIT 1",
+ buf);
+ }
+
+ mysql_result = xb_mysql_query(mysql_connection, query, true);
+
+ ut_ad(mysql_num_fields(mysql_result) == 1);
+ if (!(row = mysql_fetch_row(mysql_result))) {
+ msg("Error while attempting to find history record "
+ "for %s %s\n",
+ opt_incremental_history_uuid ? "uuid" : "name",
+ opt_incremental_history_uuid ?
+ opt_incremental_history_uuid :
+ opt_incremental_history_name);
+ return(false);
+ }
+
+ *incremental_lsn = strtoull(row[0], NULL, 10);
+
+ mysql_free_result(mysql_result);
+
+ msg("Found and using lsn: " LSN_PF " for %s %s\n", *incremental_lsn,
+ opt_incremental_history_uuid ? "uuid" : "name",
+ opt_incremental_history_uuid ?
+ opt_incremental_history_uuid :
+ opt_incremental_history_name);
+
+ return(true);
+}
+
+static
+const char *
+eat_sql_whitespace(const char *query)
+{
+ bool comment = false;
+
+ while (*query) {
+ if (comment) {
+ if (query[0] == '*' && query[1] == '/') {
+ query += 2;
+ comment = false;
+ continue;
+ }
+ ++query;
+ continue;
+ }
+ if (query[0] == '/' && query[1] == '*') {
+ query += 2;
+ comment = true;
+ continue;
+ }
+ if (strchr("\t\n\r (", query[0])) {
+ ++query;
+ continue;
+ }
+ break;
+ }
+
+ return(query);
+}
+
+static
+bool
+is_query_from_list(const char *query, const char **list)
+{
+ const char **item;
+
+ query = eat_sql_whitespace(query);
+
+ item = list;
+ while (*item) {
+ if (strncasecmp(query, *item, strlen(*item)) == 0) {
+ return(true);
+ }
+ ++item;
+ }
+
+ return(false);
+}
+
+static
+bool
+is_query(const char *query)
+{
+ const char *query_list[] = {"insert", "update", "delete", "replace",
+ "alter", "load", "select", "do", "handler", "call", "execute",
+ "begin", NULL};
+
+ return is_query_from_list(query, query_list);
+}
+
+static
+bool
+is_select_query(const char *query)
+{
+ const char *query_list[] = {"select", NULL};
+
+ return is_query_from_list(query, query_list);
+}
+
+static
+bool
+is_update_query(const char *query)
+{
+ const char *query_list[] = {"insert", "update", "delete", "replace",
+ "alter", "load", NULL};
+
+ return is_query_from_list(query, query_list);
+}
+
+static
+bool
+have_queries_to_wait_for(MYSQL *connection, uint threshold)
+{
+ MYSQL_RES *result;
+ MYSQL_ROW row;
+ bool all_queries;
+
+ result = xb_mysql_query(connection, "SHOW FULL PROCESSLIST", true);
+
+ all_queries = (opt_lock_wait_query_type == QUERY_TYPE_ALL);
+ while ((row = mysql_fetch_row(result)) != NULL) {
+ const char *info = row[7];
+ int duration = atoi(row[5]);
+ char *id = row[0];
+
+ if (info != NULL
+ && duration >= (int)threshold
+ && ((all_queries && is_query(info))
+ || is_update_query(info))) {
+ msg_ts("Waiting for query %s (duration %d sec): %s",
+ id, duration, info);
+ return(true);
+ }
+ }
+
+ return(false);
+}
+
+static
+void
+kill_long_queries(MYSQL *connection, uint timeout)
+{
+ MYSQL_RES *result;
+ MYSQL_ROW row;
+ bool all_queries;
+ char kill_stmt[100];
+
+ result = xb_mysql_query(connection, "SHOW FULL PROCESSLIST", true);
+
+ all_queries = (opt_kill_long_query_type == QUERY_TYPE_ALL);
+ while ((row = mysql_fetch_row(result)) != NULL) {
+ const char *info = row[7];
+ int duration = atoi(row[5]);
+ char *id = row[0];
+
+ if (info != NULL &&
+ duration >= (int)timeout &&
+ ((all_queries && is_query(info)) ||
+ is_select_query(info))) {
+ msg_ts("Killing query %s (duration %d sec): %s\n",
+ id, duration, info);
+ ut_snprintf(kill_stmt, sizeof(kill_stmt),
+ "KILL %s", id);
+ xb_mysql_query(connection, kill_stmt, false, false);
+ }
+ }
+}
+
+static
+bool
+wait_for_no_updates(MYSQL *connection, uint timeout, uint threshold)
+{
+ time_t start_time;
+
+ start_time = time(NULL);
+
+ msg_ts("Waiting %u seconds for queries running longer than %u seconds "
+ "to finish\n", timeout, threshold);
+
+ while (time(NULL) <= (time_t)(start_time + timeout)) {
+ if (!have_queries_to_wait_for(connection, threshold)) {
+ return(true);
+ }
+ os_thread_sleep(1000000);
+ }
+
+ msg_ts("Unable to obtain lock. Please try again later.");
+
+ return(false);
+}
+
+static
+os_thread_ret_t
+kill_query_thread(
+/*===============*/
+ void *arg __attribute__((unused)))
+{
+ MYSQL *mysql;
+ time_t start_time;
+
+ start_time = time(NULL);
+
+ os_event_set(kill_query_thread_started);
+
+ msg_ts("Kill query timeout %d seconds.\n",
+ opt_kill_long_queries_timeout);
+
+ while (time(NULL) - start_time <
+ (time_t)opt_kill_long_queries_timeout) {
+ if (os_event_wait_time(kill_query_thread_stop, 1000) !=
+ OS_SYNC_TIME_EXCEEDED) {
+ goto stop_thread;
+ }
+ }
+
+ if ((mysql = xb_mysql_connect()) == NULL) {
+ msg("Error: kill query thread failed\n");
+ goto stop_thread;
+ }
+
+ while (true) {
+ kill_long_queries(mysql, time(NULL) - start_time);
+ if (os_event_wait_time(kill_query_thread_stop, 1000) !=
+ OS_SYNC_TIME_EXCEEDED) {
+ break;
+ }
+ }
+
+ mysql_close(mysql);
+
+stop_thread:
+ msg_ts("Kill query thread stopped\n");
+
+ os_event_set(kill_query_thread_stopped);
+
+ os_thread_exit(NULL);
+ OS_THREAD_DUMMY_RETURN;
+}
+
+
+static
+void
+start_query_killer()
+{
+ kill_query_thread_stop = os_event_create();
+ kill_query_thread_started = os_event_create();
+ kill_query_thread_stopped = os_event_create();
+
+ os_thread_create(kill_query_thread, NULL, &kill_query_thread_id);
+
+ os_event_wait(kill_query_thread_started);
+}
+
+static
+void
+stop_query_killer()
+{
+ os_event_set(kill_query_thread_stop);
+ os_event_wait_time(kill_query_thread_stopped, 60000);
+}
+
+/*********************************************************************//**
+Function acquires either a backup tables lock, if supported
+by the server, or a global read lock (FLUSH TABLES WITH READ LOCK)
+otherwise.
+@returns true if lock acquired */
+bool
+lock_tables(MYSQL *connection)
+{
+ if (have_lock_wait_timeout) {
+ /* Set the maximum supported session value for
+ lock_wait_timeout to prevent unnecessary timeouts when the
+ global value is changed from the default */
+ xb_mysql_query(connection,
+ "SET SESSION lock_wait_timeout=31536000", false);
+ }
+
+ if (have_backup_locks) {
+ msg_ts("Executing LOCK TABLES FOR BACKUP...\n");
+ xb_mysql_query(connection, "LOCK TABLES FOR BACKUP", false);
+ return(true);
+ }
+
+ if (!opt_lock_wait_timeout && !opt_kill_long_queries_timeout) {
+
+ /* We do first a FLUSH TABLES. If a long update is running, the
+ FLUSH TABLES will wait but will not stall the whole mysqld, and
+ when the long update is done the FLUSH TABLES WITH READ LOCK
+ will start and succeed quickly. So, FLUSH TABLES is to lower
+ the probability of a stage where both mysqldump and most client
+ connections are stalled. Of course, if a second long update
+ starts between the two FLUSHes, we have that bad stall.
+
+ Option lock_wait_timeout serve the same purpose and is not
+ compatible with this trick.
+ */
+
+ msg_ts("Executing FLUSH NO_WRITE_TO_BINLOG TABLES...\n");
+
+ xb_mysql_query(connection,
+ "FLUSH NO_WRITE_TO_BINLOG TABLES", false);
+ }
+
+ if (opt_lock_wait_timeout) {
+ if (!wait_for_no_updates(connection, opt_lock_wait_timeout,
+ opt_lock_wait_threshold)) {
+ return(false);
+ }
+ }
+
+ msg_ts("Executing FLUSH TABLES WITH READ LOCK...\n");
+
+ if (opt_kill_long_queries_timeout) {
+ start_query_killer();
+ }
+
+ if (have_galera_enabled) {
+ xb_mysql_query(connection,
+ "SET SESSION wsrep_causal_reads=0", false);
+ }
+
+ xb_mysql_query(connection, "FLUSH TABLES WITH READ LOCK", false);
+
+ if (opt_kill_long_queries_timeout) {
+ stop_query_killer();
+ }
+
+ return(true);
+}
+
+
+/*********************************************************************//**
+If backup locks are used, execute LOCK BINLOG FOR BACKUP provided that we are
+not in the --no-lock mode and the lock has not been acquired already.
+@returns true if lock acquired */
+bool
+lock_binlog_maybe(MYSQL *connection)
+{
+ if (have_backup_locks && !opt_no_lock && !binlog_locked) {
+ msg_ts("Executing LOCK BINLOG FOR BACKUP...\n");
+ xb_mysql_query(connection, "LOCK BINLOG FOR BACKUP", false);
+ binlog_locked = true;
+
+ return(true);
+ }
+
+ return(false);
+}
+
+
+/*********************************************************************//**
+Releases either global read lock acquired with FTWRL and the binlog
+lock acquired with LOCK BINLOG FOR BACKUP, depending on
+the locking strategy being used */
+void
+unlock_all(MYSQL *connection)
+{
+ if (opt_debug_sleep_before_unlock) {
+ msg_ts("Debug sleep for %u seconds\n",
+ opt_debug_sleep_before_unlock);
+ os_thread_sleep(opt_debug_sleep_before_unlock * 1000);
+ }
+
+ if (binlog_locked) {
+ msg_ts("Executing UNLOCK BINLOG\n");
+ xb_mysql_query(connection, "UNLOCK BINLOG", false);
+ }
+
+ msg_ts("Executing UNLOCK TABLES\n");
+ xb_mysql_query(connection, "UNLOCK TABLES", false);
+
+ msg_ts("All tables unlocked\n");
+}
+
+
+static
+int
+get_open_temp_tables(MYSQL *connection)
+{
+ char *slave_open_temp_tables = NULL;
+ mysql_variable status[] = {
+ {"Slave_open_temp_tables", &slave_open_temp_tables},
+ {NULL, NULL}
+ };
+ int result = false;
+
+ read_mysql_variables(connection,
+ "SHOW STATUS LIKE 'slave_open_temp_tables'", status, true);
+
+ result = slave_open_temp_tables ? atoi(slave_open_temp_tables) : 0;
+
+ free_mysql_variables(status);
+
+ return(result);
+}
+
+/*********************************************************************//**
+Wait until it's safe to backup a slave. Returns immediately if
+the host isn't a slave. Currently there's only one check:
+Slave_open_temp_tables has to be zero. Dies on timeout. */
+bool
+wait_for_safe_slave(MYSQL *connection)
+{
+ char *read_master_log_pos = NULL;
+ char *slave_sql_running = NULL;
+ int n_attempts = 1;
+ const int sleep_time = 3;
+ int open_temp_tables = 0;
+ bool result = true;
+
+ mysql_variable status[] = {
+ {"Read_Master_Log_Pos", &read_master_log_pos},
+ {"Slave_SQL_Running", &slave_sql_running},
+ {NULL, NULL}
+ };
+
+ sql_thread_started = false;
+
+ read_mysql_variables(connection, "SHOW SLAVE STATUS", status, false);
+
+ if (!(read_master_log_pos && slave_sql_running)) {
+ msg("Not checking slave open temp tables for "
+ "--safe-slave-backup because host is not a slave\n");
+ goto cleanup;
+ }
+
+ if (strcmp(slave_sql_running, "Yes") == 0) {
+ sql_thread_started = true;
+ xb_mysql_query(connection, "STOP SLAVE SQL_THREAD", false);
+ }
+
+ if (opt_safe_slave_backup_timeout > 0) {
+ n_attempts = opt_safe_slave_backup_timeout / sleep_time;
+ }
+
+ open_temp_tables = get_open_temp_tables(connection);
+ msg_ts("Slave open temp tables: %d\n", open_temp_tables);
+
+ while (open_temp_tables && n_attempts--) {
+ msg_ts("Starting slave SQL thread, waiting %d seconds, then "
+ "checking Slave_open_temp_tables again (%d attempts "
+ "remaining)...\n", sleep_time, n_attempts);
+
+ xb_mysql_query(connection, "START SLAVE SQL_THREAD", false);
+ os_thread_sleep(sleep_time * 1000000);
+ xb_mysql_query(connection, "STOP SLAVE SQL_THREAD", false);
+
+ open_temp_tables = get_open_temp_tables(connection);
+ msg_ts("Slave open temp tables: %d\n", open_temp_tables);
+ }
+
+ /* Restart the slave if it was running at start */
+ if (open_temp_tables == 0) {
+ msg_ts("Slave is safe to backup\n");
+ goto cleanup;
+ }
+
+ result = false;
+
+ if (sql_thread_started) {
+ msg_ts("Restarting slave SQL thread.\n");
+ xb_mysql_query(connection, "START SLAVE SQL_THREAD", false);
+ }
+
+ msg_ts("Slave_open_temp_tables did not become zero after "
+ "%d seconds\n", opt_safe_slave_backup_timeout);
+
+cleanup:
+ free_mysql_variables(status);
+
+ return(result);
+}
+
+
+/*********************************************************************//**
+Retrieves MySQL binlog position of the master server in a replication
+setup and saves it in a file. It also saves it in mysql_slave_position
+variable. */
+bool
+write_slave_info(MYSQL *connection)
+{
+ char *master = NULL;
+ char *filename = NULL;
+ char *gtid_executed = NULL;
+ char *position = NULL;
+ char *gtid_slave_pos = NULL;
+ char *ptr;
+ bool result = false;
+
+ mysql_variable status[] = {
+ {"Master_Host", &master},
+ {"Relay_Master_Log_File", &filename},
+ {"Exec_Master_Log_Pos", &position},
+ {"Executed_Gtid_Set", &gtid_executed},
+ {NULL, NULL}
+ };
+
+ mysql_variable variables[] = {
+ {"gtid_slave_pos", &gtid_slave_pos},
+ {NULL, NULL}
+ };
+
+ read_mysql_variables(connection, "SHOW SLAVE STATUS", status, false);
+ read_mysql_variables(connection, "SHOW VARIABLES", variables, true);
+
+ if (master == NULL || filename == NULL || position == NULL) {
+ msg("Failed to get master binlog coordinates "
+ "from SHOW SLAVE STATUS\n");
+ msg("This means that the server is not a "
+ "replication slave. Ignoring the --slave-info "
+ "option\n");
+ /* we still want to continue the backup */
+ result = true;
+ goto cleanup;
+ }
+
+ /* Print slave status to a file.
+ If GTID mode is used, construct a CHANGE MASTER statement with
+ MASTER_AUTO_POSITION and correct a gtid_purged value. */
+ if (gtid_executed != NULL && *gtid_executed) {
+ /* MySQL >= 5.6 with GTID enabled */
+
+ for (ptr = strchr(gtid_executed, '\n');
+ ptr;
+ ptr = strchr(ptr, '\n')) {
+ *ptr = ' ';
+ }
+
+ result = backup_file_printf(XTRABACKUP_SLAVE_INFO,
+ "SET GLOBAL gtid_purged='%s';\n"
+ "CHANGE MASTER TO MASTER_AUTO_POSITION=1\n",
+ gtid_executed);
+
+ ut_a(asprintf(&mysql_slave_position,
+ "master host '%s', purge list '%s'",
+ master, gtid_executed) != -1);
+ } else if (gtid_slave_pos && *gtid_slave_pos) {
+ /* MariaDB >= 10.0 with GTID enabled */
+ result = backup_file_printf(XTRABACKUP_SLAVE_INFO,
+ "SET GLOBAL gtid_slave_pos = '%s';\n"
+ "CHANGE MASTER TO master_use_gtid = slave_pos\n",
+ gtid_slave_pos);
+ ut_a(asprintf(&mysql_slave_position,
+ "master host '%s', gtid_slave_pos %s",
+ master, gtid_slave_pos) != -1);
+ } else {
+ result = backup_file_printf(XTRABACKUP_SLAVE_INFO,
+ "CHANGE MASTER TO MASTER_LOG_FILE='%s', "
+ "MASTER_LOG_POS=%s\n", filename, position);
+ ut_a(asprintf(&mysql_slave_position,
+ "master host '%s', filename '%s', position '%s'",
+ master, filename, position) != -1);
+ }
+
+cleanup:
+ free_mysql_variables(status);
+ free_mysql_variables(variables);
+
+ return(result);
+}
+
+
+/*********************************************************************//**
+Retrieves MySQL Galera and
+saves it in a file. It also prints it to stdout. */
+bool
+write_galera_info(MYSQL *connection)
+{
+ char *state_uuid = NULL, *state_uuid55 = NULL;
+ char *last_committed = NULL, *last_committed55 = NULL;
+ bool result;
+
+ mysql_variable status[] = {
+ {"Wsrep_local_state_uuid", &state_uuid},
+ {"wsrep_local_state_uuid", &state_uuid55},
+ {"Wsrep_last_committed", &last_committed},
+ {"wsrep_last_committed", &last_committed55},
+ {NULL, NULL}
+ };
+
+ /* When backup locks are supported by the server, we should skip
+ creating xtrabackup_galera_info file on the backup stage, because
+ wsrep_local_state_uuid and wsrep_last_committed will be inconsistent
+ without blocking commits. The state file will be created on the prepare
+ stage using the WSREP recovery procedure. */
+ if (have_backup_locks) {
+ return(true);
+ }
+
+ read_mysql_variables(connection, "SHOW STATUS", status, true);
+
+ if ((state_uuid == NULL && state_uuid55 == NULL)
+ || (last_committed == NULL && last_committed55 == NULL)) {
+ msg("Failed to get master wsrep state from SHOW STATUS.\n");
+ result = false;
+ goto cleanup;
+ }
+
+ result = backup_file_printf(XTRABACKUP_GALERA_INFO,
+ "%s:%s\n", state_uuid ? state_uuid : state_uuid55,
+ last_committed ? last_committed : last_committed55);
+
+cleanup:
+ free_mysql_variables(status);
+
+ return(result);
+}
+
+
+/*********************************************************************//**
+Flush and copy the current binary log file into the backup,
+if GTID is enabled */
+bool
+write_current_binlog_file(MYSQL *connection)
+{
+ char *executed_gtid_set = NULL;
+ char *gtid_binlog_state = NULL;
+ char *log_bin_file = NULL;
+ char *log_bin_dir = NULL;
+ bool gtid_exists;
+ bool result = true;
+ char filepath[FN_REFLEN];
+
+ mysql_variable status[] = {
+ {"Executed_Gtid_Set", &executed_gtid_set},
+ {NULL, NULL}
+ };
+
+ mysql_variable status_after_flush[] = {
+ {"File", &log_bin_file},
+ {NULL, NULL}
+ };
+
+ mysql_variable vars[] = {
+ {"gtid_binlog_state", &gtid_binlog_state},
+ {"log_bin_basename", &log_bin_dir},
+ {NULL, NULL}
+ };
+
+ read_mysql_variables(connection, "SHOW MASTER STATUS", status, false);
+ read_mysql_variables(connection, "SHOW VARIABLES", vars, true);
+
+ gtid_exists = (executed_gtid_set && *executed_gtid_set)
+ || (gtid_binlog_state && *gtid_binlog_state);
+
+ if (gtid_exists) {
+ size_t log_bin_dir_length;
+
+ lock_binlog_maybe(connection);
+
+ xb_mysql_query(connection, "FLUSH BINARY LOGS", false);
+
+ read_mysql_variables(connection, "SHOW MASTER STATUS",
+ status_after_flush, false);
+
+ if (opt_log_bin != NULL && strchr(opt_log_bin, FN_LIBCHAR)) {
+ /* If log_bin is set, it has priority */
+ if (log_bin_dir) {
+ free(log_bin_dir);
+ }
+ log_bin_dir = strdup(opt_log_bin);
+ } else if (log_bin_dir == NULL) {
+ /* Default location is MySQL datadir */
+ log_bin_dir = strdup("./");
+ }
+
+ dirname_part(log_bin_dir, log_bin_dir, &log_bin_dir_length);
+
+ /* strip final slash if it is not the only path component */
+ if (log_bin_dir_length > 1 &&
+ log_bin_dir[log_bin_dir_length - 1] == FN_LIBCHAR) {
+ log_bin_dir[log_bin_dir_length - 1] = 0;
+ }
+
+ if (log_bin_dir == NULL || log_bin_file == NULL) {
+ msg("Failed to get master binlog coordinates from "
+ "SHOW MASTER STATUS");
+ result = false;
+ goto cleanup;
+ }
+
+ ut_snprintf(filepath, sizeof(filepath), "%s%c%s",
+ log_bin_dir, FN_LIBCHAR, log_bin_file);
+ result = copy_file(ds_data, filepath, log_bin_file, 0);
+ }
+
+cleanup:
+ free_mysql_variables(status_after_flush);
+ free_mysql_variables(status);
+ free_mysql_variables(vars);
+
+ return(result);
+}
+
+
+/*********************************************************************//**
+Retrieves MySQL binlog position and
+saves it in a file. It also prints it to stdout. */
+bool
+write_binlog_info(MYSQL *connection)
+{
+ char *filename = NULL;
+ char *position = NULL;
+ char *gtid_mode = NULL;
+ char *gtid_current_pos = NULL;
+ char *gtid_executed = NULL;
+ char *gtid = NULL;
+ bool result;
+ bool mysql_gtid;
+ bool mariadb_gtid;
+
+ mysql_variable status[] = {
+ {"File", &filename},
+ {"Position", &position},
+ {"Executed_Gtid_Set", &gtid_executed},
+ {NULL, NULL}
+ };
+
+ mysql_variable vars[] = {
+ {"gtid_mode", &gtid_mode},
+ {"gtid_current_pos", &gtid_current_pos},
+ {NULL, NULL}
+ };
+
+ read_mysql_variables(connection, "SHOW MASTER STATUS", status, false);
+ read_mysql_variables(connection, "SHOW VARIABLES", vars, true);
+
+ if (filename == NULL || position == NULL) {
+ /* Do not create xtrabackup_binlog_info if binary
+ log is disabled */
+ result = true;
+ goto cleanup;
+ }
+
+ mysql_gtid = ((gtid_mode != NULL) && (strcmp(gtid_mode, "ON") == 0));
+ mariadb_gtid = (gtid_current_pos != NULL);
+
+ gtid = (gtid_executed != NULL ? gtid_executed : gtid_current_pos);
+
+ if (mariadb_gtid || mysql_gtid) {
+ ut_a(asprintf(&mysql_binlog_position,
+ "filename '%s', position '%s', "
+ "GTID of the last change '%s'",
+ filename, position, gtid) != -1);
+ result = backup_file_printf(XTRABACKUP_BINLOG_INFO,
+ "%s\t%s\t%s\n", filename, position,
+ gtid);
+ } else {
+ ut_a(asprintf(&mysql_binlog_position,
+ "filename '%s', position '%s'",
+ filename, position) != -1);
+ result = backup_file_printf(XTRABACKUP_BINLOG_INFO,
+ "%s\t%s\n", filename, position);
+ }
+
+cleanup:
+ free_mysql_variables(status);
+ free_mysql_variables(vars);
+
+ return(result);
+}
+
+
+
+/*********************************************************************//**
+Writes xtrabackup_info file and if backup_history is enable creates
+PERCONA_SCHEMA.xtrabackup_history and writes a new history record to the
+table containing all the history info particular to the just completed
+backup. */
+bool
+write_xtrabackup_info(MYSQL *connection)
+{
+ MYSQL_STMT *stmt;
+ MYSQL_BIND bind[19];
+ char *uuid = NULL;
+ char *server_version = NULL;
+ char buf_start_time[100];
+ char buf_end_time[100];
+ int idx;
+ tm tm;
+ my_bool null = TRUE;
+
+ const char *xb_stream_name[] = {"file", "tar", "xbstream"};
+ const char *ins_query = "insert into PERCONA_SCHEMA.xtrabackup_history("
+ "uuid, name, tool_name, tool_command, tool_version, "
+ "ibbackup_version, server_version, start_time, end_time, "
+ "lock_time, binlog_pos, innodb_from_lsn, innodb_to_lsn, "
+ "partial, incremental, format, compact, compressed, "
+ "encrypted) "
+ "values(?,?,?,?,?,?,?,from_unixtime(?),from_unixtime(?),"
+ "?,?,?,?,?,?,?,?,?,?)";
+
+ ut_ad(xtrabackup_stream_fmt < 3);
+
+ uuid = read_mysql_one_value(connection, "SELECT UUID()");
+ server_version = read_mysql_one_value(connection, "SELECT VERSION()");
+ localtime_r(&history_start_time, &tm);
+ strftime(buf_start_time, sizeof(buf_start_time),
+ "%Y-%m-%d %H:%M:%S", &tm);
+ history_end_time = time(NULL);
+ localtime_r(&history_end_time, &tm);
+ strftime(buf_end_time, sizeof(buf_end_time),
+ "%Y-%m-%d %H:%M:%S", &tm);
+ backup_file_printf(XTRABACKUP_INFO,
+ "uuid = %s\n"
+ "name = %s\n"
+ "tool_name = %s\n"
+ "tool_command = %s\n"
+ "tool_version = %s\n"
+ "ibbackup_version = %s\n"
+ "server_version = %s\n"
+ "start_time = %s\n"
+ "end_time = %s\n"
+ "lock_time = %d\n"
+ "binlog_pos = %s\n"
+ "innodb_from_lsn = %llu\n"
+ "innodb_to_lsn = %llu\n"
+ "partial = %s\n"
+ "incremental = %s\n"
+ "format = %s\n"
+ "compact = %s\n"
+ "compressed = %s\n"
+ "encrypted = %s\n",
+ uuid, /* uuid */
+ opt_history ? opt_history : "", /* name */
+ tool_name, /* tool_name */
+ tool_args, /* tool_command */
+ XTRABACKUP_VERSION, /* tool_version */
+ XTRABACKUP_VERSION, /* ibbackup_version */
+ server_version, /* server_version */
+ buf_start_time, /* start_time */
+ buf_end_time, /* end_time */
+ history_lock_time, /* lock_time */
+ mysql_binlog_position ?
+ mysql_binlog_position : "", /* binlog_pos */
+ incremental_lsn, /* innodb_from_lsn */
+ metadata_to_lsn, /* innodb_to_lsn */
+ (xtrabackup_tables /* partial */
+ || xtrabackup_tables_file
+ || xtrabackup_databases
+ || xtrabackup_databases_file) ? "Y" : "N",
+ xtrabackup_incremental ? "Y" : "N", /* incremental */
+ xb_stream_name[xtrabackup_stream_fmt], /* format */
+ xtrabackup_compact ? "Y" : "N", /* compact */
+ xtrabackup_compress ? "compressed" : "N", /* compressed */
+ xtrabackup_encrypt ? "Y" : "N"); /* encrypted */
+
+ if (!opt_history) {
+ goto cleanup;
+ }
+
+ xb_mysql_query(connection,
+ "CREATE DATABASE IF NOT EXISTS PERCONA_SCHEMA", false);
+ xb_mysql_query(connection,
+ "CREATE TABLE IF NOT EXISTS PERCONA_SCHEMA.xtrabackup_history("
+ "uuid VARCHAR(40) NOT NULL PRIMARY KEY,"
+ "name VARCHAR(255) DEFAULT NULL,"
+ "tool_name VARCHAR(255) DEFAULT NULL,"
+ "tool_command TEXT DEFAULT NULL,"
+ "tool_version VARCHAR(255) DEFAULT NULL,"
+ "ibbackup_version VARCHAR(255) DEFAULT NULL,"
+ "server_version VARCHAR(255) DEFAULT NULL,"
+ "start_time TIMESTAMP NULL DEFAULT NULL,"
+ "end_time TIMESTAMP NULL DEFAULT NULL,"
+ "lock_time BIGINT UNSIGNED DEFAULT NULL,"
+ "binlog_pos VARCHAR(128) DEFAULT NULL,"
+ "innodb_from_lsn BIGINT UNSIGNED DEFAULT NULL,"
+ "innodb_to_lsn BIGINT UNSIGNED DEFAULT NULL,"
+ "partial ENUM('Y', 'N') DEFAULT NULL,"
+ "incremental ENUM('Y', 'N') DEFAULT NULL,"
+ "format ENUM('file', 'tar', 'xbstream') DEFAULT NULL,"
+ "compact ENUM('Y', 'N') DEFAULT NULL,"
+ "compressed ENUM('Y', 'N') DEFAULT NULL,"
+ "encrypted ENUM('Y', 'N') DEFAULT NULL"
+ ") CHARACTER SET utf8 ENGINE=innodb", false);
+
+ stmt = mysql_stmt_init(connection);
+
+ mysql_stmt_prepare(stmt, ins_query, strlen(ins_query));
+
+ memset(bind, 0, sizeof(bind));
+ idx = 0;
+
+ /* uuid */
+ bind[idx].buffer_type = MYSQL_TYPE_STRING;
+ bind[idx].buffer = uuid;
+ bind[idx].buffer_length = strlen(uuid);
+ ++idx;
+
+ /* name */
+ bind[idx].buffer_type = MYSQL_TYPE_STRING;
+ bind[idx].buffer = (char*)(opt_history);
+ bind[idx].buffer_length = strlen(opt_history);
+ if (!(opt_history && *opt_history)) {
+ bind[idx].is_null = &null;
+ }
+ ++idx;
+
+ /* tool_name */
+ bind[idx].buffer_type = MYSQL_TYPE_STRING;
+ bind[idx].buffer = tool_name;
+ bind[idx].buffer_length = strlen(tool_name);
+ ++idx;
+
+ /* tool_command */
+ bind[idx].buffer_type = MYSQL_TYPE_STRING;
+ bind[idx].buffer = tool_args;
+ bind[idx].buffer_length = strlen(tool_args);
+ ++idx;
+
+ /* tool_version */
+ bind[idx].buffer_type = MYSQL_TYPE_STRING;
+ bind[idx].buffer = (char*)(XTRABACKUP_VERSION);
+ bind[idx].buffer_length = strlen(XTRABACKUP_VERSION);
+ ++idx;
+
+ /* ibbackup_version */
+ bind[idx].buffer_type = MYSQL_TYPE_STRING;
+ bind[idx].buffer = (char*)(XTRABACKUP_VERSION);
+ bind[idx].buffer_length = strlen(XTRABACKUP_VERSION);
+ ++idx;
+
+ /* server_version */
+ bind[idx].buffer_type = MYSQL_TYPE_STRING;
+ bind[idx].buffer = server_version;
+ bind[idx].buffer_length = strlen(server_version);
+ ++idx;
+
+ /* start_time */
+ bind[idx].buffer_type = MYSQL_TYPE_LONG;
+ bind[idx].buffer = &history_start_time;
+ ++idx;
+
+ /* end_time */
+ bind[idx].buffer_type = MYSQL_TYPE_LONG;
+ bind[idx].buffer = &history_end_time;
+ ++idx;
+
+ /* lock_time */
+ bind[idx].buffer_type = MYSQL_TYPE_LONG;
+ bind[idx].buffer = &history_lock_time;
+ ++idx;
+
+ /* binlog_pos */
+ bind[idx].buffer_type = MYSQL_TYPE_STRING;
+ bind[idx].buffer = mysql_binlog_position;
+ if (mysql_binlog_position != NULL) {
+ bind[idx].buffer_length = strlen(mysql_binlog_position);
+ } else {
+ bind[idx].is_null = &null;
+ }
+ ++idx;
+
+ /* innodb_from_lsn */
+ bind[idx].buffer_type = MYSQL_TYPE_LONGLONG;
+ bind[idx].buffer = (char*)(&incremental_lsn);
+ ++idx;
+
+ /* innodb_to_lsn */
+ bind[idx].buffer_type = MYSQL_TYPE_LONGLONG;
+ bind[idx].buffer = (char*)(&metadata_to_lsn);
+ ++idx;
+
+ /* partial (Y | N) */
+ bind[idx].buffer_type = MYSQL_TYPE_STRING;
+ bind[idx].buffer = (char*)((xtrabackup_tables
+ || xtrabackup_tables_file
+ || xtrabackup_databases
+ || xtrabackup_databases_file) ? "Y" : "N");
+ bind[idx].buffer_length = 1;
+ ++idx;
+
+ /* incremental (Y | N) */
+ bind[idx].buffer_type = MYSQL_TYPE_STRING;
+ bind[idx].buffer = (char*)(
+ (xtrabackup_incremental
+ || xtrabackup_incremental_basedir
+ || opt_incremental_history_name
+ || opt_incremental_history_uuid) ? "Y" : "N");
+ bind[idx].buffer_length = 1;
+ ++idx;
+
+ /* format (file | tar | xbstream) */
+ bind[idx].buffer_type = MYSQL_TYPE_STRING;
+ bind[idx].buffer = (char*)(xb_stream_name[xtrabackup_stream_fmt]);
+ bind[idx].buffer_length = strlen(xb_stream_name[xtrabackup_stream_fmt]);
+ ++idx;
+
+ /* compact (Y | N) */
+ bind[idx].buffer_type = MYSQL_TYPE_STRING;
+ bind[idx].buffer = (char*)(xtrabackup_compact ? "Y" : "N");
+ bind[idx].buffer_length = 1;
+ ++idx;
+
+ /* compressed (Y | N) */
+ bind[idx].buffer_type = MYSQL_TYPE_STRING;
+ bind[idx].buffer = (char*)(xtrabackup_compress ? "Y" : "N");
+ bind[idx].buffer_length = 1;
+ ++idx;
+
+ /* encrypted (Y | N) */
+ bind[idx].buffer_type = MYSQL_TYPE_STRING;
+ bind[idx].buffer = (char*)(xtrabackup_encrypt ? "Y" : "N");
+ bind[idx].buffer_length = 1;
+ ++idx;
+
+ ut_ad(idx == 19);
+
+ mysql_stmt_bind_param(stmt, bind);
+
+ mysql_stmt_execute(stmt);
+ mysql_stmt_close(stmt);
+
+cleanup:
+
+ free(uuid);
+ free(server_version);
+
+ return(true);
+}
+
+bool
+write_backup_config_file()
+{
+ return backup_file_printf("backup-my.cnf",
+ "# This MySQL options file was generated by innobackupex.\n\n"
+ "# The MySQL server\n"
+ "[mysqld]\n"
+ "innodb_checksum_algorithm=%s\n"
+ "innodb_log_checksum_algorithm=%s\n"
+ "innodb_data_file_path=%s\n"
+ "innodb_log_files_in_group=%lu\n"
+ "innodb_log_file_size=%lld\n"
+ "innodb_fast_checksum=%s\n"
+ "innodb_page_size=%lu\n"
+ "innodb_log_block_size=%lu\n"
+ "innodb_undo_directory=%s\n"
+ "innodb_undo_tablespaces=%lu\n"
+ "%s%s\n"
+ "%s%s\n",
+ innodb_checksum_algorithm_names[srv_checksum_algorithm],
+ innodb_checksum_algorithm_names[srv_log_checksum_algorithm],
+ innobase_data_file_path,
+ srv_n_log_files,
+ innobase_log_file_size,
+ srv_fast_checksum ? "true" : "false",
+ srv_page_size,
+ srv_log_block_size,
+ srv_undo_dir,
+ srv_undo_tablespaces,
+ innobase_doublewrite_file ? "innodb_doublewrite_file=" : "",
+ innobase_doublewrite_file ? innobase_doublewrite_file : "",
+ innobase_buffer_pool_filename ?
+ "innodb_buffer_pool_filename=" : "",
+ innobase_buffer_pool_filename ?
+ innobase_buffer_pool_filename : "");
+}
+
+
+static
+char *make_argv(char *buf, size_t len, int argc, char **argv)
+{
+ size_t left= len;
+ const char *arg;
+
+ buf[0]= 0;
+ ++argv; --argc;
+ while (argc > 0 && left > 0)
+ {
+ arg = *argv;
+ if (strncmp(*argv, "--password", strlen("--password")) == 0) {
+ arg = "--password=...";
+ }
+ if (strncmp(*argv, "--encrypt-key",
+ strlen("--encrypt-key")) == 0) {
+ arg = "--encrypt-key=...";
+ }
+ if (strncmp(*argv, "--encrypt_key",
+ strlen("--encrypt_key")) == 0) {
+ arg = "--encrypt_key=...";
+ }
+ left-= ut_snprintf(buf + len - left, left,
+ "%s%c", arg, argc > 1 ? ' ' : 0);
+ ++argv; --argc;
+ }
+
+ return buf;
+}
+
+void
+capture_tool_command(int argc, char **argv)
+{
+ /* capture tool name tool args */
+ tool_name = strrchr(argv[0], '/');
+ tool_name = tool_name ? tool_name + 1 : argv[0];
+
+ make_argv(tool_args, sizeof(tool_args), argc, argv);
+}
+
+
+bool
+select_history()
+{
+ if (opt_incremental_history_name || opt_incremental_history_uuid) {
+ if (!select_incremental_lsn_from_history(
+ &incremental_lsn)) {
+ return(false);
+ }
+ }
+ return(true);
+}
+
+bool
+flush_changed_page_bitmaps()
+{
+ if (xtrabackup_incremental && have_changed_page_bitmaps &&
+ !xtrabackup_incremental_force_scan) {
+ xb_mysql_query(mysql_connection,
+ "FLUSH NO_WRITE_TO_BINLOG CHANGED_PAGE_BITMAPS", false);
+ }
+ return(true);
+}
+
+
+/*********************************************************************//**
+Deallocate memory, disconnect from MySQL server, etc.
+@return true on success. */
+void
+backup_cleanup()
+{
+ free(mysql_slave_position);
+ free(mysql_binlog_position);
+ free(buffer_pool_filename);
+
+ if (mysql_connection) {
+ mysql_close(mysql_connection);
+ }
+}
diff --git a/extra/mariabackup/backup_mysql.h b/extra/mariabackup/backup_mysql.h
new file mode 100644
index 00000000000..3ccd7bdb613
--- /dev/null
+++ b/extra/mariabackup/backup_mysql.h
@@ -0,0 +1,92 @@
+#ifndef XTRABACKUP_BACKUP_MYSQL_H
+#define XTRABACKUP_BACKUP_MYSQL_H
+
+#include <mysql.h>
+
+/* mysql flavor and version */
+enum mysql_flavor_t { FLAVOR_UNKNOWN, FLAVOR_MYSQL,
+ FLAVOR_PERCONA_SERVER, FLAVOR_MARIADB };
+extern mysql_flavor_t server_flavor;
+extern unsigned long mysql_server_version;
+
+/* server capabilities */
+extern bool have_changed_page_bitmaps;
+extern bool have_backup_locks;
+extern bool have_lock_wait_timeout;
+extern bool have_galera_enabled;
+extern bool have_flush_engine_logs;
+extern bool have_multi_threaded_slave;
+extern bool have_gtid_slave;
+
+
+/* History on server */
+extern time_t history_start_time;
+extern time_t history_end_time;
+extern time_t history_lock_time;
+
+
+extern bool sql_thread_started;
+extern char *mysql_slave_position;
+extern char *mysql_binlog_position;
+extern char *buffer_pool_filename;
+
+/** connection to mysql server */
+extern MYSQL *mysql_connection;
+
+void
+capture_tool_command(int argc, char **argv);
+
+bool
+select_history();
+
+bool
+flush_changed_page_bitmaps();
+
+void
+backup_cleanup();
+
+bool
+get_mysql_vars(MYSQL *connection);
+
+bool
+detect_mysql_capabilities_for_backup();
+
+MYSQL *
+xb_mysql_connect();
+
+MYSQL_RES *
+xb_mysql_query(MYSQL *connection, const char *query, bool use_result,
+ bool die_on_error = true);
+
+void
+unlock_all(MYSQL *connection);
+
+bool
+write_current_binlog_file(MYSQL *connection);
+
+bool
+write_binlog_info(MYSQL *connection);
+
+bool
+write_xtrabackup_info(MYSQL *connection);
+
+bool
+write_backup_config_file();
+
+bool
+lock_binlog_maybe(MYSQL *connection);
+
+bool
+lock_tables(MYSQL *connection);
+
+bool
+wait_for_safe_slave(MYSQL *connection);
+
+bool
+write_galera_info(MYSQL *connection);
+
+bool
+write_slave_info(MYSQL *connection);
+
+
+#endif
diff --git a/extra/mariabackup/changed_page_bitmap.cc b/extra/mariabackup/changed_page_bitmap.cc
new file mode 100644
index 00000000000..e385474c7aa
--- /dev/null
+++ b/extra/mariabackup/changed_page_bitmap.cc
@@ -0,0 +1,1018 @@
+/******************************************************
+XtraBackup: hot backup tool for InnoDB
+(c) 2009-2012 Percona Inc.
+Originally Created 3/3/2009 Yasufumi Kinoshita
+Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
+Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+/* Changed page bitmap implementation */
+
+#include "changed_page_bitmap.h"
+
+#include "common.h"
+#include "xtrabackup.h"
+
+/* TODO: copy-pasted shared definitions from the XtraDB bitmap write code.
+Remove these on the first opportunity, i.e. single-binary XtraBackup. */
+
+/* log0online.h */
+
+/** Single bitmap file information */
+struct log_online_bitmap_file_t {
+ char name[FN_REFLEN]; /*!< Name with full path */
+ os_file_t file; /*!< Handle to opened file */
+ ib_uint64_t size; /*!< Size of the file */
+ ib_uint64_t offset; /*!< Offset of the next read,
+ or count of already-read bytes
+ */
+};
+
+/** A set of bitmap files containing some LSN range */
+struct log_online_bitmap_file_range_t {
+ size_t count; /*!< Number of files */
+ /*!< Dynamically-allocated array of info about individual files */
+ struct files_t {
+ char name[FN_REFLEN];/*!< Name of a file */
+ lsn_t start_lsn; /*!< Starting LSN of data in this
+ file */
+ ulong seq_num; /*!< Sequence number of this file */
+ } *files;
+};
+
+/* log0online.c */
+
+/** File name stem for bitmap files. */
+static const char* bmp_file_name_stem = "ib_modified_log_";
+
+/** The bitmap file block size in bytes. All writes will be multiples of this.
+ */
+enum {
+ MODIFIED_PAGE_BLOCK_SIZE = 4096
+};
+
+/** Offsets in a file bitmap block */
+enum {
+ MODIFIED_PAGE_IS_LAST_BLOCK = 0,/* 1 if last block in the current
+ write, 0 otherwise. */
+ MODIFIED_PAGE_START_LSN = 4, /* The starting tracked LSN of this and
+ other blocks in the same write */
+ MODIFIED_PAGE_END_LSN = 12, /* The ending tracked LSN of this and
+ other blocks in the same write */
+ MODIFIED_PAGE_SPACE_ID = 20, /* The space ID of tracked pages in
+ this block */
+ MODIFIED_PAGE_1ST_PAGE_ID = 24, /* The page ID of the first tracked
+ page in this block */
+ MODIFIED_PAGE_BLOCK_UNUSED_1 = 28,/* Unused in order to align the start
+ of bitmap at 8 byte boundary */
+ MODIFIED_PAGE_BLOCK_BITMAP = 32,/* Start of the bitmap itself */
+ MODIFIED_PAGE_BLOCK_UNUSED_2 = MODIFIED_PAGE_BLOCK_SIZE - 8,
+ /* Unused in order to align the end of
+ bitmap at 8 byte boundary */
+ MODIFIED_PAGE_BLOCK_CHECKSUM = MODIFIED_PAGE_BLOCK_SIZE - 4
+ /* The checksum of the current block */
+};
+
+/** Length of the bitmap data in a block */
+enum { MODIFIED_PAGE_BLOCK_BITMAP_LEN
+ = MODIFIED_PAGE_BLOCK_UNUSED_2 - MODIFIED_PAGE_BLOCK_BITMAP };
+
+/** Length of the bitmap data in a block in page ids */
+enum { MODIFIED_PAGE_BLOCK_ID_COUNT = MODIFIED_PAGE_BLOCK_BITMAP_LEN * 8 };
+
+typedef ib_uint64_t bitmap_word_t;
+
+/****************************************************************//**
+Calculate a bitmap block checksum. Algorithm borrowed from
+log_block_calc_checksum.
+@return checksum */
+UNIV_INLINE
+ulint
+log_online_calc_checksum(
+/*=====================*/
+ const byte* block); /*!<in: bitmap block */
+
+/****************************************************************//**
+Provide a comparisson function for the RB-tree tree (space,
+block_start_page) pairs. Actual implementation does not matter as
+long as the ordering is full.
+@return -1 if p1 < p2, 0 if p1 == p2, 1 if p1 > p2
+*/
+static
+int
+log_online_compare_bmp_keys(
+/*========================*/
+ const void* p1, /*!<in: 1st key to compare */
+ const void* p2) /*!<in: 2nd key to compare */
+{
+ const byte *k1 = (const byte *)p1;
+ const byte *k2 = (const byte *)p2;
+
+ ulint k1_space = mach_read_from_4(k1 + MODIFIED_PAGE_SPACE_ID);
+ ulint k2_space = mach_read_from_4(k2 + MODIFIED_PAGE_SPACE_ID);
+ if (k1_space == k2_space) {
+
+ ulint k1_start_page
+ = mach_read_from_4(k1 + MODIFIED_PAGE_1ST_PAGE_ID);
+ ulint k2_start_page
+ = mach_read_from_4(k2 + MODIFIED_PAGE_1ST_PAGE_ID);
+ return k1_start_page < k2_start_page
+ ? -1 : k1_start_page > k2_start_page ? 1 : 0;
+ }
+ return k1_space < k2_space ? -1 : 1;
+}
+
+/****************************************************************//**
+Calculate a bitmap block checksum. Algorithm borrowed from
+log_block_calc_checksum.
+@return checksum */
+UNIV_INLINE
+ulint
+log_online_calc_checksum(
+/*=====================*/
+ const byte* block) /*!<in: bitmap block */
+{
+ ulint sum;
+ ulint sh;
+ ulint i;
+
+ sum = 1;
+ sh = 0;
+
+ for (i = 0; i < MODIFIED_PAGE_BLOCK_CHECKSUM; i++) {
+
+ ulint b = block[i];
+ sum &= 0x7FFFFFFFUL;
+ sum += b;
+ sum += b << sh;
+ sh++;
+ if (sh > 24) {
+
+ sh = 0;
+ }
+ }
+
+ return sum;
+}
+
+/****************************************************************//**
+Read one bitmap data page and check it for corruption.
+
+@return TRUE if page read OK, FALSE if I/O error */
+static
+ibool
+log_online_read_bitmap_page(
+/*========================*/
+ log_online_bitmap_file_t *bitmap_file, /*!<in/out: bitmap
+ file */
+ byte *page, /*!<out: read page. Must be at
+ least MODIFIED_PAGE_BLOCK_SIZE
+ bytes long */
+ ibool *checksum_ok) /*!<out: TRUE if page
+ checksum OK */
+{
+ ulint checksum;
+ ulint actual_checksum;
+ ibool success;
+
+ ut_a(bitmap_file->size >= MODIFIED_PAGE_BLOCK_SIZE);
+ ut_a(bitmap_file->offset
+ <= bitmap_file->size - MODIFIED_PAGE_BLOCK_SIZE);
+ ut_a(bitmap_file->offset % MODIFIED_PAGE_BLOCK_SIZE == 0);
+
+ success = os_file_read(bitmap_file->file, page, bitmap_file->offset,
+ MODIFIED_PAGE_BLOCK_SIZE);
+
+ if (UNIV_UNLIKELY(!success)) {
+
+ /* The following call prints an error message */
+ os_file_get_last_error(TRUE);
+ msg("InnoDB: Warning: failed reading changed page bitmap "
+ "file \'%s\'\n", bitmap_file->name);
+ return FALSE;
+ }
+
+ bitmap_file->offset += MODIFIED_PAGE_BLOCK_SIZE;
+ ut_ad(bitmap_file->offset <= bitmap_file->size);
+
+ checksum = mach_read_from_4(page + MODIFIED_PAGE_BLOCK_CHECKSUM);
+ actual_checksum = log_online_calc_checksum(page);
+ *checksum_ok = (checksum == actual_checksum);
+
+ return TRUE;
+}
+
+/*********************************************************************//**
+Check the name of a given file if it's a changed page bitmap file and
+return file sequence and start LSN name components if it is. If is not,
+the values of output parameters are undefined.
+
+@return TRUE if a given file is a changed page bitmap file. */
+static
+ibool
+log_online_is_bitmap_file(
+/*======================*/
+ const os_file_stat_t* file_info, /*!<in: file to
+ check */
+ ulong* bitmap_file_seq_num, /*!<out: bitmap file
+ sequence number */
+ lsn_t* bitmap_file_start_lsn) /*!<out: bitmap file
+ start LSN */
+{
+ char stem[FN_REFLEN];
+
+ ut_ad (strlen(file_info->name) < OS_FILE_MAX_PATH);
+
+ return ((file_info->type == OS_FILE_TYPE_FILE
+ || file_info->type == OS_FILE_TYPE_LINK)
+ && (sscanf(file_info->name, "%[a-z_]%lu_" LSN_PF ".xdb", stem,
+ bitmap_file_seq_num, bitmap_file_start_lsn) == 3)
+ && (!strcmp(stem, bmp_file_name_stem)));
+}
+
+/*********************************************************************//**
+List the bitmap files in srv_data_home and setup their range that contains the
+specified LSN interval. This range, if non-empty, will start with a file that
+has the greatest LSN equal to or less than the start LSN and will include all
+the files up to the one with the greatest LSN less than the end LSN. Caller
+must free bitmap_files->files when done if bitmap_files set to non-NULL and
+this function returned TRUE. Field bitmap_files->count might be set to a
+larger value than the actual count of the files, and space for the unused array
+slots will be allocated but cleared to zeroes.
+
+@return TRUE if succeeded
+*/
+static
+ibool
+log_online_setup_bitmap_file_range(
+/*===============================*/
+ log_online_bitmap_file_range_t *bitmap_files, /*!<in/out: bitmap file
+ range */
+ lsn_t range_start, /*!<in: start LSN */
+ lsn_t range_end) /*!<in: end LSN */
+{
+ os_file_dir_t bitmap_dir;
+ os_file_stat_t bitmap_dir_file_info;
+ ulong first_file_seq_num = ULONG_MAX;
+ ulong last_file_seq_num = 0;
+ lsn_t first_file_start_lsn = LSN_MAX;
+
+ xb_ad(range_end >= range_start);
+
+ bitmap_files->count = 0;
+ bitmap_files->files = NULL;
+
+ /* 1st pass: size the info array */
+
+ bitmap_dir = os_file_opendir(srv_data_home, FALSE);
+ if (UNIV_UNLIKELY(!bitmap_dir)) {
+
+ msg("InnoDB: Error: failed to open bitmap directory \'%s\'\n",
+ srv_data_home);
+ return FALSE;
+ }
+
+ while (!os_file_readdir_next_file(srv_data_home, bitmap_dir,
+ &bitmap_dir_file_info)) {
+
+ ulong file_seq_num;
+ lsn_t file_start_lsn;
+
+ if (!log_online_is_bitmap_file(&bitmap_dir_file_info,
+ &file_seq_num,
+ &file_start_lsn)
+ || file_start_lsn >= range_end) {
+
+ continue;
+ }
+
+ if (file_seq_num > last_file_seq_num) {
+
+ last_file_seq_num = file_seq_num;
+ }
+
+ if (file_start_lsn >= range_start
+ || file_start_lsn == first_file_start_lsn
+ || first_file_start_lsn > range_start) {
+
+ /* A file that falls into the range */
+
+ if (file_start_lsn < first_file_start_lsn) {
+
+ first_file_start_lsn = file_start_lsn;
+ }
+ if (file_seq_num < first_file_seq_num) {
+
+ first_file_seq_num = file_seq_num;
+ }
+ } else if (file_start_lsn > first_file_start_lsn) {
+
+ /* A file that has LSN closer to the range start
+ but smaller than it, replacing another such file */
+ first_file_start_lsn = file_start_lsn;
+ first_file_seq_num = file_seq_num;
+ }
+ }
+
+ if (UNIV_UNLIKELY(os_file_closedir(bitmap_dir))) {
+
+ os_file_get_last_error(TRUE);
+ msg("InnoDB: Error: cannot close \'%s\'\n",srv_data_home);
+ return FALSE;
+ }
+
+ if (first_file_seq_num == ULONG_MAX && last_file_seq_num == 0) {
+
+ bitmap_files->count = 0;
+ return TRUE;
+ }
+
+ bitmap_files->count = last_file_seq_num - first_file_seq_num + 1;
+
+ /* 2nd pass: get the file names in the file_seq_num order */
+
+ bitmap_dir = os_file_opendir(srv_data_home, FALSE);
+ if (UNIV_UNLIKELY(!bitmap_dir)) {
+
+ msg("InnoDB: Error: failed to open bitmap directory \'%s\'\n",
+ srv_data_home);
+ return FALSE;
+ }
+
+ bitmap_files->files =
+ static_cast<log_online_bitmap_file_range_t::files_t *>
+ (ut_malloc(bitmap_files->count
+ * sizeof(bitmap_files->files[0])));
+ memset(bitmap_files->files, 0,
+ bitmap_files->count * sizeof(bitmap_files->files[0]));
+
+ while (!os_file_readdir_next_file(srv_data_home, bitmap_dir,
+ &bitmap_dir_file_info)) {
+
+ ulong file_seq_num;
+ lsn_t file_start_lsn;
+ size_t array_pos;
+
+ if (!log_online_is_bitmap_file(&bitmap_dir_file_info,
+ &file_seq_num,
+ &file_start_lsn)
+ || file_start_lsn >= range_end
+ || file_start_lsn < first_file_start_lsn) {
+
+ continue;
+ }
+
+ array_pos = file_seq_num - first_file_seq_num;
+ if (UNIV_UNLIKELY(array_pos >= bitmap_files->count)) {
+
+ msg("InnoDB: Error: inconsistent bitmap file "
+ "directory\n");
+ free(bitmap_files->files);
+ return FALSE;
+ }
+
+ if (file_seq_num > bitmap_files->files[array_pos].seq_num) {
+
+ bitmap_files->files[array_pos].seq_num = file_seq_num;
+ strncpy(bitmap_files->files[array_pos].name,
+ bitmap_dir_file_info.name, FN_REFLEN);
+ bitmap_files->files[array_pos].name[FN_REFLEN - 1]
+ = '\0';
+ bitmap_files->files[array_pos].start_lsn
+ = file_start_lsn;
+ }
+ }
+
+ if (UNIV_UNLIKELY(os_file_closedir(bitmap_dir))) {
+
+ os_file_get_last_error(TRUE);
+ msg("InnoDB: Error: cannot close \'%s\'\n", srv_data_home);
+ free(bitmap_files->files);
+ return FALSE;
+ }
+
+#ifdef UNIV_DEBUG
+ ut_ad(bitmap_files->files[0].seq_num == first_file_seq_num);
+
+ for (size_t i = 1; i < bitmap_files->count; i++) {
+ if (!bitmap_files->files[i].seq_num) {
+
+ break;
+ }
+ ut_ad(bitmap_files->files[i].seq_num
+ > bitmap_files->files[i - 1].seq_num);
+ ut_ad(bitmap_files->files[i].start_lsn
+ >= bitmap_files->files[i - 1].start_lsn);
+ }
+#endif
+
+ return TRUE;
+}
+
+/****************************************************************//**
+Open a bitmap file for reading.
+
+@return TRUE if opened successfully */
+static
+ibool
+log_online_open_bitmap_file_read_only(
+/*==================================*/
+ const char* name, /*!<in: bitmap file
+ name without directory,
+ which is assumed to be
+ srv_data_home */
+ log_online_bitmap_file_t* bitmap_file) /*!<out: opened bitmap
+ file */
+{
+ ibool success = FALSE;
+
+ xb_ad(name[0] != '\0');
+
+ ut_snprintf(bitmap_file->name, FN_REFLEN, "%s%s", srv_data_home, name);
+ bitmap_file->file
+ = os_file_create_simple_no_error_handling(0, bitmap_file->name,
+ OS_FILE_OPEN,
+ OS_FILE_READ_ONLY,
+ &success);
+ if (UNIV_UNLIKELY(!success)) {
+
+ /* Here and below assume that bitmap file names do not
+ contain apostrophes, thus no need for ut_print_filename(). */
+ msg("InnoDB: Warning: error opening the changed page "
+ "bitmap \'%s\'\n", bitmap_file->name);
+ return FALSE;
+ }
+
+ bitmap_file->size = os_file_get_size(bitmap_file->file);
+ bitmap_file->offset = 0;
+
+#ifdef UNIV_LINUX
+ posix_fadvise(bitmap_file->file, 0, 0, POSIX_FADV_SEQUENTIAL);
+ posix_fadvise(bitmap_file->file, 0, 0, POSIX_FADV_NOREUSE);
+#endif
+
+ return TRUE;
+}
+
+/****************************************************************//**
+Diagnose one or both of the following situations if we read close to
+the end of bitmap file:
+1) Warn if the remainder of the file is less than one page.
+2) Error if we cannot read any more full pages but the last read page
+did not have the last-in-run flag set.
+
+@return FALSE for the error */
+static
+ibool
+log_online_diagnose_bitmap_eof(
+/*===========================*/
+ const log_online_bitmap_file_t* bitmap_file, /*!< in: bitmap file */
+ ibool last_page_in_run)/*!< in: "last page in
+ run" flag value in the
+ last read page */
+{
+ /* Check if we are too close to EOF to read a full page */
+ if ((bitmap_file->size < MODIFIED_PAGE_BLOCK_SIZE)
+ || (bitmap_file->offset
+ > bitmap_file->size - MODIFIED_PAGE_BLOCK_SIZE)) {
+
+ if (UNIV_UNLIKELY(bitmap_file->offset != bitmap_file->size)) {
+
+ /* If we are not at EOF and we have less than one page
+ to read, it's junk. This error is not fatal in
+ itself. */
+
+ msg("InnoDB: Warning: junk at the end of changed "
+ "page bitmap file \'%s\'.\n", bitmap_file->name);
+ }
+
+ if (UNIV_UNLIKELY(!last_page_in_run)) {
+
+ /* We are at EOF but the last read page did not finish
+ a run */
+ /* It's a "Warning" here because it's not a fatal error
+ for the whole server */
+ msg("InnoDB: Warning: changed page bitmap "
+ "file \'%s\' does not contain a complete run "
+ "at the end.\n", bitmap_file->name);
+ return FALSE;
+ }
+ }
+ return TRUE;
+}
+
+/* End of copy-pasted definitions */
+
+/** Iterator structure over changed page bitmap */
+struct xb_page_bitmap_range_struct {
+ const xb_page_bitmap *bitmap; /* Bitmap with data */
+ ulint space_id; /* Space id for this
+ iterator */
+ ulint bit_i; /* Bit index of the iterator
+ position in the current page */
+ const ib_rbt_node_t *bitmap_node; /* Current bitmap tree node */
+ const byte *bitmap_page; /* Current bitmap page */
+ ulint current_page_id;/* Current page id */
+};
+
+/****************************************************************//**
+Print a diagnostic message on missing bitmap data for an LSN range. */
+static
+void
+xb_msg_missing_lsn_data(
+/*====================*/
+ lsn_t missing_interval_start, /*!<in: interval start */
+ lsn_t missing_interval_end) /*!<in: interval end */
+{
+ msg("xtrabackup: warning: changed page data missing for LSNs between "
+ LSN_PF " and " LSN_PF "\n", missing_interval_start,
+ missing_interval_end);
+}
+
+/****************************************************************//**
+Scan a bitmap file until data for a desired LSN or EOF is found and check that
+the page before the starting one is not corrupted to ensure that the found page
+indeed contains the very start of the desired LSN data. The caller must check
+the page LSN values to determine if the bitmap file was scanned until the data
+was found or until EOF. Page must be at least MODIFIED_PAGE_BLOCK_SIZE big.
+
+@return TRUE if the scan successful without corruption detected
+*/
+static
+ibool
+xb_find_lsn_in_bitmap_file(
+/*=======================*/
+ log_online_bitmap_file_t *bitmap_file, /*!<in/out: bitmap
+ file */
+ byte *page, /*!<in/out: last read
+ bitmap page */
+ lsn_t *page_end_lsn, /*!<out: end LSN of the
+ last read page */
+ lsn_t lsn) /*!<in: LSN to find */
+{
+ ibool last_page_ok = TRUE;
+ ibool next_to_last_page_ok = TRUE;
+
+ xb_ad (bitmap_file->size >= MODIFIED_PAGE_BLOCK_SIZE);
+
+ *page_end_lsn = 0;
+
+ while ((*page_end_lsn <= lsn)
+ && (bitmap_file->offset
+ <= bitmap_file->size - MODIFIED_PAGE_BLOCK_SIZE)) {
+
+ next_to_last_page_ok = last_page_ok;
+ if (!log_online_read_bitmap_page(bitmap_file, page,
+ &last_page_ok)) {
+
+ return FALSE;
+ }
+
+ *page_end_lsn = mach_read_from_8(page + MODIFIED_PAGE_END_LSN);
+ }
+
+ /* We check two pages here because the last read page already contains
+ the required LSN data. If the next to the last one page is corrupted,
+ then we have no way of telling if that page contained the required LSN
+ range data too */
+ return last_page_ok && next_to_last_page_ok;
+}
+
+/****************************************************************//**
+Read the disk bitmap and build the changed page bitmap tree for the
+LSN interval incremental_lsn to checkpoint_lsn_start.
+
+@return the built bitmap tree or NULL if unable to read the full interval for
+any reason. */
+xb_page_bitmap*
+xb_page_bitmap_init(void)
+/*=====================*/
+{
+ log_online_bitmap_file_t bitmap_file;
+ lsn_t bmp_start_lsn = incremental_lsn;
+ lsn_t bmp_end_lsn = checkpoint_lsn_start;
+ byte page[MODIFIED_PAGE_BLOCK_SIZE];
+ lsn_t current_page_end_lsn;
+ xb_page_bitmap *result;
+ ibool last_page_in_run= FALSE;
+ log_online_bitmap_file_range_t bitmap_files;
+ size_t bmp_i;
+ ibool last_page_ok = TRUE;
+
+ if (UNIV_UNLIKELY(bmp_start_lsn > bmp_end_lsn)) {
+
+ msg("xtrabackup: incremental backup LSN " LSN_PF
+ " is larger than than the last checkpoint LSN " LSN_PF
+ "\n", bmp_start_lsn, bmp_end_lsn);
+ return NULL;
+ }
+
+ if (!log_online_setup_bitmap_file_range(&bitmap_files, bmp_start_lsn,
+ bmp_end_lsn)) {
+
+ return NULL;
+ }
+
+ /* Only accept no bitmap files returned if start LSN == end LSN */
+ if (bitmap_files.count == 0 && bmp_end_lsn != bmp_start_lsn) {
+
+ return NULL;
+ }
+
+ result = rbt_create(MODIFIED_PAGE_BLOCK_SIZE,
+ log_online_compare_bmp_keys);
+
+ if (bmp_start_lsn == bmp_end_lsn) {
+
+ /* Empty range - empty bitmap */
+ return result;
+ }
+
+ bmp_i = 0;
+
+ if (UNIV_UNLIKELY(bitmap_files.files[bmp_i].start_lsn
+ > bmp_start_lsn)) {
+
+ /* The 1st file does not have the starting LSN data */
+ xb_msg_missing_lsn_data(bmp_start_lsn,
+ bitmap_files.files[bmp_i].start_lsn);
+ rbt_free(result);
+ free(bitmap_files.files);
+ return NULL;
+ }
+
+ /* Skip any zero-sized files at the start */
+ while ((bmp_i < bitmap_files.count - 1)
+ && (bitmap_files.files[bmp_i].start_lsn
+ == bitmap_files.files[bmp_i + 1].start_lsn)) {
+
+ bmp_i++;
+ }
+
+ /* Is the 1st bitmap file missing? */
+ if (UNIV_UNLIKELY(bitmap_files.files[bmp_i].name[0] == '\0')) {
+
+ /* TODO: this is not the exact missing range */
+ xb_msg_missing_lsn_data(bmp_start_lsn, bmp_end_lsn);
+ rbt_free(result);
+ free(bitmap_files.files);
+ return NULL;
+ }
+
+ /* Open the 1st bitmap file */
+ if (UNIV_UNLIKELY(!log_online_open_bitmap_file_read_only(
+ bitmap_files.files[bmp_i].name,
+ &bitmap_file))) {
+
+ rbt_free(result);
+ free(bitmap_files.files);
+ return NULL;
+ }
+
+ /* If the 1st file is truncated, no data. Not merged with the case
+ below because zero-length file indicates not a corruption but missing
+ subsequent files instead. */
+ if (UNIV_UNLIKELY(bitmap_file.size < MODIFIED_PAGE_BLOCK_SIZE)) {
+
+ xb_msg_missing_lsn_data(bmp_start_lsn, bmp_end_lsn);
+ rbt_free(result);
+ free(bitmap_files.files);
+ os_file_close(bitmap_file.file);
+ return NULL;
+ }
+
+ /* Find the start of the required LSN range in the file */
+ if (UNIV_UNLIKELY(!xb_find_lsn_in_bitmap_file(&bitmap_file, page,
+ &current_page_end_lsn,
+ bmp_start_lsn))) {
+
+ msg("xtrabackup: Warning: changed page bitmap file "
+ "\'%s\' corrupted\n", bitmap_file.name);
+ rbt_free(result);
+ free(bitmap_files.files);
+ os_file_close(bitmap_file.file);
+ return NULL;
+ }
+
+ last_page_in_run
+ = mach_read_from_4(page + MODIFIED_PAGE_IS_LAST_BLOCK);
+
+ if (UNIV_UNLIKELY(!log_online_diagnose_bitmap_eof(&bitmap_file,
+ last_page_in_run))) {
+
+ rbt_free(result);
+ free(bitmap_files.files);
+ os_file_close(bitmap_file.file);
+ return NULL;
+ }
+
+ if (UNIV_UNLIKELY(current_page_end_lsn < bmp_start_lsn)) {
+
+ xb_msg_missing_lsn_data(current_page_end_lsn, bmp_start_lsn);
+ rbt_free(result);
+ free(bitmap_files.files);
+ os_file_close(bitmap_file.file);
+ return NULL;
+ }
+
+ /* 1st bitmap page found, add it to the tree. */
+ rbt_insert(result, page, page);
+
+ /* Read next pages/files until all required data is read */
+ while (last_page_ok
+ && (current_page_end_lsn < bmp_end_lsn
+ || (current_page_end_lsn == bmp_end_lsn
+ && !last_page_in_run))) {
+
+ ib_rbt_bound_t tree_search_pos;
+
+ /* If EOF, advance the file skipping over any empty files */
+ while (bitmap_file.size < MODIFIED_PAGE_BLOCK_SIZE
+ || (bitmap_file.offset
+ > bitmap_file.size - MODIFIED_PAGE_BLOCK_SIZE)) {
+
+ os_file_close(bitmap_file.file);
+
+ if (UNIV_UNLIKELY(
+ !log_online_diagnose_bitmap_eof(
+ &bitmap_file, last_page_in_run))) {
+
+ rbt_free(result);
+ free(bitmap_files.files);
+ return NULL;
+ }
+
+ bmp_i++;
+
+ if (UNIV_UNLIKELY(bmp_i == bitmap_files.count
+ || (bitmap_files.files[bmp_i].seq_num
+ == 0))) {
+
+ xb_msg_missing_lsn_data(current_page_end_lsn,
+ bmp_end_lsn);
+ rbt_free(result);
+ free(bitmap_files.files);
+ return NULL;
+ }
+
+ /* Is the next file missing? */
+ if (UNIV_UNLIKELY(bitmap_files.files[bmp_i].name[0]
+ == '\0')) {
+
+ /* TODO: this is not the exact missing range */
+ xb_msg_missing_lsn_data(bitmap_files.files
+ [bmp_i - 1].start_lsn,
+ bmp_end_lsn);
+ rbt_free(result);
+ free(bitmap_files.files);
+ return NULL;
+ }
+
+ if (UNIV_UNLIKELY(
+ !log_online_open_bitmap_file_read_only(
+ bitmap_files.files[bmp_i].name,
+ &bitmap_file))) {
+
+ rbt_free(result);
+ free(bitmap_files.files);
+ return NULL;
+ }
+ }
+
+ if (UNIV_UNLIKELY(
+ !log_online_read_bitmap_page(&bitmap_file, page,
+ &last_page_ok))) {
+
+ rbt_free(result);
+ free(bitmap_files.files);
+ os_file_close(bitmap_file.file);
+ return NULL;
+ }
+
+ if (UNIV_UNLIKELY(!last_page_ok)) {
+
+ msg("xtrabackup: warning: changed page bitmap file "
+ "\'%s\' corrupted.\n", bitmap_file.name);
+ rbt_free(result);
+ free(bitmap_files.files);
+ os_file_close(bitmap_file.file);
+ return NULL;
+ }
+
+ /* Merge the current page with an existing page or insert a new
+ page into the tree */
+
+ if (!rbt_search(result, &tree_search_pos, page)) {
+
+ /* Merge the bitmap pages */
+ byte *existing_page
+ = rbt_value(byte, tree_search_pos.last);
+ bitmap_word_t *bmp_word_1 = (bitmap_word_t *)
+ (existing_page + MODIFIED_PAGE_BLOCK_BITMAP);
+ bitmap_word_t *bmp_end = (bitmap_word_t *)
+ (existing_page + MODIFIED_PAGE_BLOCK_UNUSED_2);
+ bitmap_word_t *bmp_word_2 = (bitmap_word_t *)
+ (page + MODIFIED_PAGE_BLOCK_BITMAP);
+ while (bmp_word_1 < bmp_end) {
+
+ *bmp_word_1++ |= *bmp_word_2++;
+ }
+ xb_a (bmp_word_1 == bmp_end);
+ } else {
+
+ /* Add a new page */
+ rbt_add_node(result, &tree_search_pos, page);
+ }
+
+ current_page_end_lsn
+ = mach_read_from_8(page + MODIFIED_PAGE_END_LSN);
+ last_page_in_run
+ = mach_read_from_4(page + MODIFIED_PAGE_IS_LAST_BLOCK);
+ }
+
+ xb_a (current_page_end_lsn >= bmp_end_lsn);
+
+ free(bitmap_files.files);
+ os_file_close(bitmap_file.file);
+
+ return result;
+}
+
+/****************************************************************//**
+Free the bitmap tree. */
+void
+xb_page_bitmap_deinit(
+/*==================*/
+ xb_page_bitmap* bitmap) /*!<in/out: bitmap tree */
+{
+ if (bitmap) {
+
+ rbt_free(bitmap);
+ }
+}
+
+/****************************************************************//**
+Advance to the next bitmap page or setup the first bitmap page for the
+given bitmap range. Assumes that bitmap_range->bitmap_page has been
+already found/bumped by rbt_search()/rbt_next().
+
+@return FALSE if no more bitmap data for the range space ID */
+static
+ibool
+xb_page_bitmap_setup_next_page(
+/*===========================*/
+ xb_page_bitmap_range* bitmap_range) /*!<in/out: the bitmap range */
+{
+ ulint new_space_id;
+ ulint new_1st_page_id;
+
+ if (bitmap_range->bitmap_node == NULL) {
+
+ bitmap_range->current_page_id = ULINT_UNDEFINED;
+ return FALSE;
+ }
+
+ bitmap_range->bitmap_page = rbt_value(byte, bitmap_range->bitmap_node);
+
+ new_space_id = mach_read_from_4(bitmap_range->bitmap_page
+ + MODIFIED_PAGE_SPACE_ID);
+ if (new_space_id != bitmap_range->space_id) {
+
+ /* No more data for the current page id. */
+ xb_a(new_space_id > bitmap_range->space_id);
+ bitmap_range->current_page_id = ULINT_UNDEFINED;
+ return FALSE;
+ }
+
+ new_1st_page_id = mach_read_from_4(bitmap_range->bitmap_page +
+ MODIFIED_PAGE_1ST_PAGE_ID);
+ xb_a (new_1st_page_id >= bitmap_range->current_page_id
+ || bitmap_range->current_page_id == ULINT_UNDEFINED);
+
+ bitmap_range->current_page_id = new_1st_page_id;
+ bitmap_range->bit_i = 0;
+
+ return TRUE;
+}
+
+/****************************************************************//**
+Set up a new bitmap range iterator over a given space id changed
+pages in a given bitmap.
+
+@return bitmap range iterator */
+xb_page_bitmap_range*
+xb_page_bitmap_range_init(
+/*======================*/
+ xb_page_bitmap* bitmap, /*!< in: bitmap to iterate over */
+ ulint space_id) /*!< in: space id */
+{
+ byte search_page[MODIFIED_PAGE_BLOCK_SIZE];
+ xb_page_bitmap_range *result
+ = static_cast<xb_page_bitmap_range *>
+ (ut_malloc(sizeof(*result)));
+
+ memset(result, 0, sizeof(*result));
+ result->bitmap = bitmap;
+ result->space_id = space_id;
+ result->current_page_id = ULINT_UNDEFINED;
+
+ /* Search for the 1st page for the given space id */
+ /* This also sets MODIFIED_PAGE_1ST_PAGE_ID to 0, which is what we
+ want. */
+ memset(search_page, 0, MODIFIED_PAGE_BLOCK_SIZE);
+ mach_write_to_4(search_page + MODIFIED_PAGE_SPACE_ID, space_id);
+
+ result->bitmap_node = rbt_lower_bound(result->bitmap, search_page);
+
+ xb_page_bitmap_setup_next_page(result);
+
+ return result;
+}
+
+/****************************************************************//**
+Get the value of the bitmap->range->bit_i bitmap bit
+
+@return the current bit value */
+static inline
+ibool
+is_bit_set(
+/*=======*/
+ const xb_page_bitmap_range* bitmap_range) /*!< in: bitmap
+ range */
+{
+ return ((*(((bitmap_word_t *)(bitmap_range->bitmap_page
+ + MODIFIED_PAGE_BLOCK_BITMAP))
+ + (bitmap_range->bit_i >> 6)))
+ & (1ULL << (bitmap_range->bit_i & 0x3F))) ? TRUE : FALSE;
+}
+
+/****************************************************************//**
+Get the next page id that has its bit set or cleared, i.e. equal to
+bit_value.
+
+@return page id */
+ulint
+xb_page_bitmap_range_get_next_bit(
+/*==============================*/
+ xb_page_bitmap_range* bitmap_range, /*!< in/out: bitmap range */
+ ibool bit_value) /*!< in: bit value */
+{
+ if (UNIV_UNLIKELY(bitmap_range->current_page_id
+ == ULINT_UNDEFINED)) {
+
+ return ULINT_UNDEFINED;
+ }
+
+ do {
+ while (bitmap_range->bit_i < MODIFIED_PAGE_BLOCK_ID_COUNT) {
+
+ while (is_bit_set(bitmap_range) != bit_value
+ && (bitmap_range->bit_i
+ < MODIFIED_PAGE_BLOCK_ID_COUNT)) {
+
+ bitmap_range->current_page_id++;
+ bitmap_range->bit_i++;
+ }
+
+ if (bitmap_range->bit_i
+ < MODIFIED_PAGE_BLOCK_ID_COUNT) {
+
+ ulint result = bitmap_range->current_page_id;
+ bitmap_range->current_page_id++;
+ bitmap_range->bit_i++;
+ return result;
+ }
+ }
+
+ bitmap_range->bitmap_node
+ = rbt_next(bitmap_range->bitmap,
+ bitmap_range->bitmap_node);
+
+ } while (xb_page_bitmap_setup_next_page(bitmap_range));
+
+ return ULINT_UNDEFINED;
+}
+
+/****************************************************************//**
+Free the bitmap range iterator. */
+void
+xb_page_bitmap_range_deinit(
+/*========================*/
+ xb_page_bitmap_range* bitmap_range) /*! in/out: bitmap range */
+{
+ ut_free(bitmap_range);
+}
diff --git a/extra/mariabackup/changed_page_bitmap.h b/extra/mariabackup/changed_page_bitmap.h
new file mode 100644
index 00000000000..6f549f47400
--- /dev/null
+++ b/extra/mariabackup/changed_page_bitmap.h
@@ -0,0 +1,85 @@
+/******************************************************
+XtraBackup: hot backup tool for InnoDB
+(c) 2009-2012 Percona Inc.
+Originally Created 3/3/2009 Yasufumi Kinoshita
+Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
+Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+/* Changed page bitmap interface */
+
+#ifndef XB_CHANGED_PAGE_BITMAP_H
+#define XB_CHANGED_PAGE_BITMAP_H
+
+#include <ut0rbt.h>
+#include <fil0fil.h>
+
+/* The changed page bitmap structure */
+typedef ib_rbt_t xb_page_bitmap;
+
+struct xb_page_bitmap_range_struct;
+
+/* The bitmap range iterator over one space id */
+typedef struct xb_page_bitmap_range_struct xb_page_bitmap_range;
+
+/****************************************************************//**
+Read the disk bitmap and build the changed page bitmap tree for the
+LSN interval incremental_lsn to checkpoint_lsn_start.
+
+@return the built bitmap tree */
+xb_page_bitmap*
+xb_page_bitmap_init(void);
+/*=====================*/
+
+/****************************************************************//**
+Free the bitmap tree. */
+void
+xb_page_bitmap_deinit(
+/*==================*/
+ xb_page_bitmap* bitmap); /*!<in/out: bitmap tree */
+
+
+/****************************************************************//**
+Set up a new bitmap range iterator over a given space id changed
+pages in a given bitmap.
+
+@return bitmap range iterator */
+xb_page_bitmap_range*
+xb_page_bitmap_range_init(
+/*======================*/
+ xb_page_bitmap* bitmap, /*!< in: bitmap to iterate over */
+ ulint space_id); /*!< in: space id */
+
+/****************************************************************//**
+Get the next page id that has its bit set or cleared, i.e. equal to
+bit_value.
+
+@return page id */
+ulint
+xb_page_bitmap_range_get_next_bit(
+/*==============================*/
+ xb_page_bitmap_range* bitmap_range, /*!< in/out: bitmap range */
+ ibool bit_value); /*!< in: bit value */
+
+/****************************************************************//**
+Free the bitmap range iterator. */
+void
+xb_page_bitmap_range_deinit(
+/*========================*/
+ xb_page_bitmap_range* bitmap_range); /*! in/out: bitmap range */
+
+#endif
diff --git a/extra/mariabackup/common.h b/extra/mariabackup/common.h
new file mode 100644
index 00000000000..abbc4b41a85
--- /dev/null
+++ b/extra/mariabackup/common.h
@@ -0,0 +1,134 @@
+/******************************************************
+Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
+
+Common declarations for XtraBackup.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#ifndef XB_COMMON_H
+#define XB_COMMON_H
+
+#include <my_global.h>
+#include <mysql_version.h>
+#include <fcntl.h>
+#include <stdarg.h>
+
+#define xb_a(expr) \
+ do { \
+ if (!(expr)) { \
+ msg("Assertion \"%s\" failed at %s:%lu\n", \
+ #expr, __FILE__, (ulong) __LINE__); \
+ abort(); \
+ } \
+ } while (0);
+
+#ifdef XB_DEBUG
+#define xb_ad(expr) xb_a(expr)
+#else
+#define xb_ad(expr)
+#endif
+
+#define XB_DELTA_INFO_SUFFIX ".meta"
+
+static inline int msg(const char *fmt, ...) ATTRIBUTE_FORMAT(printf, 1, 2);
+static inline int msg(const char *fmt, ...)
+{
+ int result;
+ va_list args;
+
+ va_start(args, fmt);
+ result = vfprintf(stderr, fmt, args);
+ va_end(args);
+
+ return result;
+}
+
+static inline int msg_ts(const char *fmt, ...) ATTRIBUTE_FORMAT(printf, 1, 2);
+static inline int msg_ts(const char *fmt, ...)
+{
+ int result;
+ time_t t = time(NULL);
+ char date[100];
+ char *line;
+ va_list args;
+
+ strftime(date, sizeof(date), "%y%m%d %H:%M:%S", localtime(&t));
+
+ va_start(args, fmt);
+ result = vasprintf(&line, fmt, args);
+ va_end(args);
+
+ if (result != -1) {
+ result = fprintf(stderr, "%s %s", date, line);
+ free(line);
+ }
+
+ return result;
+}
+
+/* Use POSIX_FADV_NORMAL when available */
+
+#ifdef POSIX_FADV_NORMAL
+# define USE_POSIX_FADVISE
+#else
+# define POSIX_FADV_NORMAL
+# define POSIX_FADV_SEQUENTIAL
+# define POSIX_FADV_DONTNEED
+# define posix_fadvise(a,b,c,d) do {} while(0)
+#endif
+
+/***********************************************************************
+Computes bit shift for a given value. If the argument is not a power
+of 2, returns 0.*/
+static inline ulong
+get_bit_shift(ulong value)
+{
+ ulong shift;
+
+ if (value == 0)
+ return 0;
+
+ for (shift = 0; !(value & 1UL); shift++) {
+ value >>= 1;
+ }
+ return (value >> 1) ? 0 : shift;
+}
+
+/****************************************************************************
+Read 'len' bytes from 'fd'. It is identical to my_read(..., MYF(MY_FULL_IO)),
+i.e. tries to combine partial reads into a single block of size 'len', except
+that it bails out on EOF or error, and returns the number of successfully read
+bytes instead. */
+static inline size_t
+xb_read_full(File fd, uchar *buf, size_t len)
+{
+ size_t tlen = 0;
+ size_t tbytes;
+
+ while (tlen < len) {
+ tbytes = my_read(fd, buf, len - tlen, MYF(MY_WME));
+ if (tbytes == 0 || tbytes == MY_FILE_ERROR) {
+ break;
+ }
+
+ buf += tbytes;
+ tlen += tbytes;
+ }
+
+ return tlen;
+}
+
+#endif
diff --git a/extra/mariabackup/compact.cc b/extra/mariabackup/compact.cc
new file mode 100644
index 00000000000..5d08a6e02b2
--- /dev/null
+++ b/extra/mariabackup/compact.cc
@@ -0,0 +1,1059 @@
+/******************************************************
+XtraBackup: hot backup tool for InnoDB
+(c) 2009-2014 Percona LLC and/or its affiliates.
+Originally Created 3/3/2009 Yasufumi Kinoshita
+Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
+Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+/* Compact backups implementation */
+
+#include <my_base.h>
+#include <table.h>
+
+#include <univ.i>
+#include <dict0mem.h>
+#include <dict0priv.h>
+#include <fsp0fsp.h>
+#include <handler0alter.h>
+#include <ibuf0ibuf.h>
+#include <page0page.h>
+#include <row0merge.h>
+#include "common.h"
+#include "write_filt.h"
+#include "fil_cur.h"
+#include "xtrabackup.h"
+#include "ds_buffer.h"
+#include "xb0xb.h"
+
+/* Number of the first primary key page in an .ibd file */
+#define XB_FIRST_CLUSTERED_INDEX_PAGE_NO 3
+
+/* Suffix for page map files */
+#define XB_PAGE_MAP_SUFFIX ".pmap"
+#define XB_TMPFILE_SUFFIX ".tmp"
+
+/* Page range */
+struct page_range_t {
+ ulint from; /*!< range start */
+ ulint to; /*!< range end */
+};
+
+/* Cursor in a page map file */
+struct page_map_cursor_t {
+ File fd; /*!< file descriptor */
+ IO_CACHE cache; /*!< IO_CACHE associated with fd */
+};
+
+/* Table descriptor for the index rebuild operation */
+struct index_rebuild_table_t {
+ char* name; /* table name */
+ ulint space_id; /* space ID */
+ UT_LIST_NODE_T(index_rebuild_table_t) list; /* list node */
+};
+
+/* Thread descriptor for the index rebuild operation */
+struct index_rebuild_thread_t {
+ ulint num; /* thread number */
+ pthread_t id; /* thread ID */
+};
+
+/* Empty page use to replace skipped pages in the data files */
+static byte empty_page[UNIV_PAGE_SIZE_MAX];
+static const char compacted_page_magic[] = "COMPACTP";
+static const size_t compacted_page_magic_size =
+ sizeof(compacted_page_magic) - 1;
+static const ulint compacted_page_magic_offset = FIL_PAGE_DATA;
+
+/* Mutex protecting table_list */
+static pthread_mutex_t table_list_mutex;
+/* List of tablespaces to process by the index rebuild operation */
+static UT_LIST_BASE_NODE_T(index_rebuild_table_t) table_list;
+
+
+/************************************************************************
+Compact page filter. */
+static my_bool wf_compact_init(xb_write_filt_ctxt_t *ctxt, char *dst_name,
+ xb_fil_cur_t *cursor);
+static my_bool wf_compact_process(xb_write_filt_ctxt_t *ctxt,
+ ds_file_t *dstfile);
+static my_bool wf_compact_finalize(xb_write_filt_ctxt_t *ctxt,
+ ds_file_t *dstfile);
+xb_write_filt_t wf_compact = {
+ &wf_compact_init,
+ &wf_compact_process,
+ &wf_compact_finalize,
+ NULL
+};
+
+/************************************************************************
+Initialize the compact page filter.
+
+@return TRUE on success, FALSE on error. */
+static my_bool
+wf_compact_init(xb_write_filt_ctxt_t *ctxt,
+ char *dst_name __attribute__((unused)), xb_fil_cur_t *cursor)
+{
+ xb_wf_compact_ctxt_t *cp = &(ctxt->u.wf_compact_ctxt);
+ char page_map_name[FN_REFLEN];
+ MY_STAT mystat;
+
+ ctxt->cursor = cursor;
+ cp->clustered_index_found = FALSE;
+ cp->inside_skipped_range = FALSE;
+ cp->free_limit = 0;
+
+ /* Don't compact the system table space */
+ cp->skip = cursor->is_system;
+ if (cp->skip) {
+ return(TRUE);
+ }
+
+ snprintf(page_map_name, sizeof(page_map_name), "%s%s", dst_name,
+ XB_PAGE_MAP_SUFFIX);
+
+ cp->ds_buffer = ds_create(xtrabackup_target_dir, DS_TYPE_BUFFER);
+ if (cp->ds_buffer == NULL) {
+ return(FALSE);
+ }
+
+ ds_set_pipe(cp->ds_buffer, ds_meta);
+
+ memset(&mystat, 0, sizeof(mystat));
+ mystat.st_mtime = my_time(0);
+ cp->buffer = ds_open(cp->ds_buffer, page_map_name, &mystat);
+ if (cp->buffer == NULL) {
+ msg("xtrabackup: Error: cannot open output stream for %s\n",
+ page_map_name);
+ return(FALSE);
+ }
+
+ return(TRUE);
+}
+
+/************************************************************************
+Check if the specified page should be skipped. We currently skip all
+non-clustered index pages for compact backups.
+
+@return TRUE if the page should be skipped. */
+static my_bool
+check_if_skip_page(xb_wf_compact_ctxt_t *cp, xb_fil_cur_t *cursor, ulint offset)
+{
+ byte *page;
+ ulint page_no;
+ ulint page_type;
+ index_id_t index_id;
+
+
+ xb_ad(cursor->is_system == FALSE);
+
+ page = cursor->buf + cursor->page_size * offset;
+ page_no = cursor->buf_page_no + offset;
+ page_type = fil_page_get_type(page);
+
+ if (UNIV_UNLIKELY(page_no == 0)) {
+
+ cp->free_limit = mach_read_from_4(page + FSP_HEADER_OFFSET +
+ FSP_FREE_LIMIT);
+ } else if (UNIV_UNLIKELY(page_no == XB_FIRST_CLUSTERED_INDEX_PAGE_NO)) {
+
+ xb_ad(cp->clustered_index_found == FALSE);
+
+ if (page_type != FIL_PAGE_INDEX) {
+
+ /* Uninitialized clustered index root page, there's
+ nothing we can do to compact the space.*/
+
+ msg("[%02u] Uninitialized page type value (%lu) in the "
+ "clustered index root page of tablespace %s. "
+ "Will not be compacted.\n",
+ cursor->thread_n,
+ page_type, cursor->rel_path);
+
+ cp->skip = TRUE;
+
+ return(FALSE);
+ }
+
+ cp->clustered_index =
+ mach_read_from_8(page + PAGE_HEADER + PAGE_INDEX_ID);
+ cp->clustered_index_found = TRUE;
+ } else if (UNIV_UNLIKELY(page_no >= cp->free_limit)) {
+
+ /* Skip unused pages above free limit, if that value is set in
+ the FSP header.*/
+
+ return(cp->free_limit > 0);
+ } else if (cp->clustered_index_found && page_type == FIL_PAGE_INDEX) {
+
+ index_id = mach_read_from_8(page + PAGE_HEADER + PAGE_INDEX_ID);
+ if (index_id != cp->clustered_index) {
+
+ ulint fseg_hdr_space =
+ mach_read_from_4(page + PAGE_HEADER +
+ PAGE_BTR_SEG_TOP);
+ ulint fseg_hdr_page_no =
+ mach_read_from_4(page + PAGE_HEADER +
+ PAGE_BTR_SEG_TOP + 4);
+ ulint fseg_hdr_offset =
+ mach_read_from_2(page + PAGE_HEADER +
+ PAGE_BTR_SEG_TOP + 8);
+
+ /* Don't skip root index pages, i.e. the ones where the
+ above fields are defined. We need root index pages to be
+ able to correctly drop the indexes later, as they
+ contain fseg inode pointers. */
+
+ return(fseg_hdr_space == 0 &&
+ fseg_hdr_page_no == 0 &&
+ fseg_hdr_offset == 0);
+ }
+ }
+
+ return(FALSE);
+}
+
+/************************************************************************
+Run the next batch of pages through the compact page filter.
+
+@return TRUE on success, FALSE on error. */
+static my_bool
+wf_compact_process(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile)
+{
+ xb_fil_cur_t *cursor = ctxt->cursor;
+ ulint page_size = cursor->page_size;
+ byte *page;
+ byte *buf_end;
+ byte *write_from;
+ xb_wf_compact_ctxt_t *cp = &(ctxt->u.wf_compact_ctxt);
+ ulint i;
+ ulint page_no;
+ byte tmp[4];
+
+ if (cp->skip) {
+ return(!ds_write(dstfile, cursor->buf, cursor->buf_read));
+ }
+
+ write_from = NULL;
+ buf_end = cursor->buf + cursor->buf_read;
+ for (i = 0, page = cursor->buf; page < buf_end;
+ i++, page += page_size) {
+
+ page_no = cursor->buf_page_no + i;
+
+ if (!check_if_skip_page(cp, cursor, i)) {
+
+ if (write_from == NULL) {
+ write_from = page;
+ }
+
+ if (cp->inside_skipped_range) {
+ cp->inside_skipped_range = FALSE;
+
+ /* Write the last range endpoint to the
+ skipped pages map */
+
+ xb_ad(page_no > 0);
+ mach_write_to_4(tmp, page_no - 1);
+ if (ds_write(cp->buffer, tmp, sizeof(tmp))) {
+ return(FALSE);
+ }
+ }
+ continue;
+ }
+
+ if (write_from != NULL) {
+
+ /* The first skipped page in this block, write the
+ non-skipped ones to the data file */
+
+ if (ds_write(dstfile, write_from, page - write_from)) {
+ return(FALSE);
+ }
+
+ write_from = NULL;
+ }
+
+ if (!cp->inside_skipped_range) {
+
+ /* The first skipped page in range, write the first
+ range endpoint to the skipped pages map */
+
+ cp->inside_skipped_range = TRUE;
+
+ mach_write_to_4(tmp, page_no);
+ if (ds_write(cp->buffer, tmp, sizeof(tmp))) {
+ return(FALSE);
+ }
+ }
+ }
+
+ /* Write the remaining pages in the buffer, if any */
+ if (write_from != NULL &&
+ ds_write(dstfile, write_from, buf_end - write_from)) {
+ return(FALSE);
+ }
+
+ return(TRUE);
+}
+
+/************************************************************************
+Close the compact filter's page map stream.
+
+@return TRUE on success, FALSE on error. */
+static my_bool
+wf_compact_finalize(xb_write_filt_ctxt_t *ctxt,
+ ds_file_t *dstfile __attribute__((unused)))
+{
+ xb_fil_cur_t *cursor = ctxt->cursor;
+ xb_wf_compact_ctxt_t *cp = &(ctxt->u.wf_compact_ctxt);
+ my_bool rc = TRUE;
+
+ /* Write the last endpoint of the current range, if the last pages of
+ the space have been skipped. */
+ if (cp->inside_skipped_range) {
+ byte tmp[4];
+
+ mach_write_to_4(tmp, cursor->space_size - 1);
+ if (ds_write(cp->buffer, tmp, sizeof(tmp))) {
+ return(FALSE);
+ }
+
+ cp->inside_skipped_range = FALSE;
+ }
+
+ if (cp->buffer) {
+ if (ds_close(cp->buffer)) {
+ rc = FALSE;
+ }
+ }
+ if (cp->ds_buffer) {
+ ds_destroy(cp->ds_buffer);
+ }
+
+ return(rc);
+}
+
+/************************************************************************
+Open a page map file and return a cursor.
+
+@return page map cursor, or NULL if the file doesn't exist. */
+static page_map_cursor_t *
+page_map_file_open(const char *path)
+{
+ MY_STAT statinfo;
+ page_map_cursor_t *pmap_cur;
+ int rc;
+
+ if (my_stat(path, &statinfo, MYF(0)) == NULL) {
+
+ return(NULL);
+ }
+
+ /* The maximum possible page map file corresponds to a 64 TB tablespace
+ and the worst case when every other page was skipped. That is, 2^32/2
+ page ranges = 16 GB. */
+ xb_a(statinfo.st_size < (off_t) 16 * 1024 * 1024 * 1024);
+
+ /* Must be a series of 8-byte tuples */
+ xb_a(statinfo.st_size % 8 == 0);
+
+ pmap_cur = (page_map_cursor_t *) my_malloc(sizeof(page_map_cursor_t),
+ MYF(MY_FAE));
+
+ pmap_cur->fd = my_open(path, O_RDONLY, MYF(MY_WME));
+ xb_a(pmap_cur->fd != 0);
+
+ rc = init_io_cache(&pmap_cur->cache, pmap_cur->fd, 0, READ_CACHE,
+ 0, 0, MYF(MY_WME));
+ xb_a(rc == 0);
+
+ return(pmap_cur);
+}
+
+/************************************************************************
+Read the next range from a page map file and update the cursor.
+
+@return TRUE on success, FALSE on end-of-file. */
+static ibool
+page_map_file_next(page_map_cursor_t *pmap_cur, page_range_t *range)
+{
+ byte buf[8];
+
+ xb_ad(pmap_cur != NULL);
+
+ if (my_b_read(&pmap_cur->cache, buf, sizeof(buf))) {
+ return(FALSE);
+ }
+
+ range->from = mach_read_from_4(buf);
+ range->to = mach_read_from_4(buf + 4);
+
+ return(TRUE);
+}
+
+/************************************************************************
+Close the page map cursor.*/
+static void
+page_map_file_close(page_map_cursor_t *pmap_cur)
+{
+ int rc;
+
+ xb_ad(pmap_cur != NULL);
+
+ rc = end_io_cache(&pmap_cur->cache);
+ xb_a(rc == 0);
+
+ posix_fadvise(pmap_cur->fd, 0, 0, POSIX_FADV_DONTNEED);
+
+ rc = my_close(pmap_cur->fd, MY_WME);
+ xb_a(rc == 0);
+
+ my_free(pmap_cur);
+}
+
+/****************************************************************************
+Expand a single data file according to the skipped pages maps created by
+--compact.
+
+@return TRUE on success, FALSE on failure. */
+static my_bool
+xb_expand_file(fil_node_t *node)
+{
+ char pmapfile_path[FN_REFLEN];
+ char tmpfile_path[FN_REFLEN];
+ xb_fil_cur_t cursor;
+ xb_fil_cur_result_t res;
+ ds_ctxt_t *ds_local;
+ ds_ctxt_t *ds_buffer;
+ ds_file_t *tmpfile;
+ my_bool success = FALSE;
+ ulint i;
+ byte *page;
+ ulint page_expected_no;
+ page_map_cursor_t *pmap_cur;
+ ibool have_next_range;
+ page_range_t pmap_range;
+
+ xb_ad(trx_sys_sys_space(node->space->id) == FALSE);
+
+ snprintf(pmapfile_path, sizeof(pmapfile_path), "%s%s",
+ node->name, XB_PAGE_MAP_SUFFIX);
+
+ /* Skip files that don't have a corresponding page map file */
+
+ if (!(pmap_cur = page_map_file_open(pmapfile_path))) {
+
+ msg("Not expanding %s\n", node->name);
+
+ return(FALSE);
+ }
+
+ msg("Expanding %s\n", node->name);
+
+ ds_local = ds_create(".", DS_TYPE_LOCAL);
+ ds_buffer = ds_create(".", DS_TYPE_BUFFER);
+
+ xb_a(ds_local != NULL && ds_buffer != NULL);
+
+ ds_buffer_set_size(ds_buffer, FSP_EXTENT_SIZE * UNIV_PAGE_SIZE_MAX);
+
+ ds_set_pipe(ds_buffer, ds_local);
+
+ res = xb_fil_cur_open(&cursor, &rf_pass_through, node, 1);
+ xb_a(res == XB_FIL_CUR_SUCCESS);
+
+ snprintf(tmpfile_path, sizeof(tmpfile_path), "%s%s",
+ node->name, XB_TMPFILE_SUFFIX);
+
+ tmpfile = ds_open(ds_buffer, tmpfile_path, &cursor.statinfo);
+ if (tmpfile == NULL) {
+
+ msg("Could not open temporary file '%s'\n", tmpfile_path);
+ goto error;
+ }
+
+ have_next_range = page_map_file_next(pmap_cur, &pmap_range);
+
+ page_expected_no = 0;
+
+ /* Initialize and mark the empty page which is used to replace
+ skipped pages. */
+ memset(empty_page, 0, cursor.page_size);
+ memcpy(empty_page + compacted_page_magic_offset,
+ compacted_page_magic, compacted_page_magic_size);
+ mach_write_to_4(empty_page + FIL_PAGE_SPACE_OR_CHKSUM,
+ BUF_NO_CHECKSUM_MAGIC);
+ mach_write_to_4(empty_page + cursor.page_size -
+ FIL_PAGE_END_LSN_OLD_CHKSUM,
+ BUF_NO_CHECKSUM_MAGIC);
+
+
+ /* Main copy loop */
+
+ while ((res = xb_fil_cur_read(&cursor)) == XB_FIL_CUR_SUCCESS) {
+
+ for (i = 0, page = cursor.buf; i < cursor.buf_npages;
+ i++, page += cursor.page_size) {
+
+ ulint page_read_no;
+
+ page_read_no = mach_read_from_4(page + FIL_PAGE_OFFSET);
+ xb_a(!page_read_no || page_expected_no <= page_read_no);
+
+ if (have_next_range &&
+ page_expected_no == pmap_range.from) {
+
+ xb_a(pmap_range.from <= pmap_range.to);
+
+ /* Write empty pages instead of skipped ones, if
+ necessary. */
+
+ while (page_expected_no <= pmap_range.to) {
+
+ if (ds_write(tmpfile, empty_page,
+ cursor.page_size)) {
+
+ goto write_error;
+ }
+
+ page_expected_no++;
+ }
+
+ have_next_range =
+ page_map_file_next(pmap_cur,
+ &pmap_range);
+ }
+
+ /* Write the current page */
+
+ if (ds_write(tmpfile, page, cursor.page_size)) {
+
+ goto write_error;
+ }
+
+ page_expected_no++;
+ }
+ }
+
+ if (res != XB_FIL_CUR_EOF) {
+
+ goto error;
+ }
+
+ /* Write empty pages instead of trailing skipped ones, if any */
+
+ if (have_next_range) {
+
+ xb_a(page_expected_no == pmap_range.from);
+ xb_a(pmap_range.from <= pmap_range.to);
+
+ while (page_expected_no <= pmap_range.to) {
+
+ if (ds_write(tmpfile, empty_page,
+ cursor.page_size)) {
+
+ goto write_error;
+ }
+
+ page_expected_no++;
+ }
+
+ xb_a(!page_map_file_next(pmap_cur, &pmap_range));
+ }
+
+ /* Replace the original .ibd file with the expanded file */
+ if (my_rename(tmpfile_path, node->name, MYF(MY_WME))) {
+
+ msg("Failed to rename '%s' to '%s'\n",
+ tmpfile_path, node->name);
+ goto error;
+ }
+
+ my_delete(pmapfile_path, MYF(MY_WME));
+
+ if (!ds_close(tmpfile)) {
+ success = TRUE;
+ }
+ tmpfile = NULL;
+
+ goto end;
+
+write_error:
+ msg("Write to '%s' failed\n", tmpfile_path);
+
+error:
+ if (tmpfile != NULL) {
+
+ ds_close(tmpfile);
+ my_delete(tmpfile_path, MYF(MY_WME));
+ }
+
+end:
+ ds_destroy(ds_buffer);
+ ds_destroy(ds_local);
+
+ xb_fil_cur_close(&cursor);
+
+ page_map_file_close(pmap_cur);
+
+ return(success);
+}
+
+/******************************************************************************
+Expand the data files according to the skipped pages maps created by --compact.
+@return TRUE on success, FALSE on failure. */
+my_bool
+xb_expand_datafiles(void)
+/*=====================*/
+{
+ ulint nfiles;
+ datafiles_iter_t *it = NULL;
+ fil_node_t *node;
+ fil_space_t *space;
+
+ msg("Starting to expand compacted .ibd files.\n");
+
+ /* Initialize the tablespace cache */
+ if (xb_data_files_init() != DB_SUCCESS) {
+ return(FALSE);
+ }
+
+ nfiles = UT_LIST_GET_LEN(fil_system->space_list);
+ xb_a(nfiles > 0);
+
+ it = datafiles_iter_new(fil_system);
+ if (it == NULL) {
+ msg("xtrabackup: error: datafiles_iter_new() failed.\n");
+ goto error;
+ }
+
+ while ((node = datafiles_iter_next(it)) != NULL) {
+
+ space = node->space;
+
+ /* System tablespace cannot be compacted */
+ if (!fil_is_user_tablespace_id(space->id)) {
+
+ continue;
+ }
+
+ if (!xb_expand_file(node)) {
+
+ goto error;
+ }
+ }
+
+ datafiles_iter_free(it);
+ xb_data_files_close();
+
+ return(TRUE);
+
+error:
+ if (it != NULL) {
+ datafiles_iter_free(it);
+ }
+
+ xb_data_files_close();
+
+ return(FALSE);
+}
+
+/******************************************************************************
+Callback used in buf_page_io_complete() to detect compacted pages.
+@return TRUE if the page is marked as compacted, FALSE otherwise. */
+ibool
+buf_page_is_compacted(
+/*==================*/
+ const byte* page) /*!< in: a database page */
+{
+ return !memcmp(page + compacted_page_magic_offset,
+ compacted_page_magic, compacted_page_magic_size);
+}
+
+/*****************************************************************************
+Builds an index definition corresponding to an index object. It is roughly
+similar to innobase_create_index_def() / innobase_create_index_field_def() and
+the opposite to dict_mem_index_create() / dict_mem_index_add_field(). */
+static
+void
+xb_build_index_def(
+/*=======================*/
+ mem_heap_t* heap, /*!< in: heap */
+ const dict_index_t* index, /*!< in: index */
+ index_def_t* index_def) /*!< out: index definition */
+{
+ index_field_t* fields;
+ ulint n_fields;
+ ulint i;
+
+ ut_a(index->n_fields);
+ ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
+
+ /* Use n_user_defined_cols instead of n_fields, as the index will
+ contain a part of the primary key after n_user_defined_cols, and those
+ columns will be created automatically in
+ dict_index_build_internal_clust(). */
+ n_fields = index->n_user_defined_cols;
+
+ memset(index_def, 0, sizeof(*index_def));
+
+ index_def->name = mem_heap_strdup(heap, index->name);
+ index_def->ind_type = index->type;
+
+ fields = static_cast<index_field_t *>
+ (mem_heap_alloc(heap, n_fields * sizeof(*fields)));
+
+ for (i = 0; i < n_fields; i++) {
+ dict_field_t* field;
+
+ field = dict_index_get_nth_field(index, i);
+ fields[i].col_no = dict_col_get_no(field->col);
+ fields[i].prefix_len = field->prefix_len;
+ }
+
+ index_def->fields = fields;
+ index_def->n_fields = n_fields;
+}
+
+/* A dummy autoc_inc sequence for row_merge_build_indexes(). */
+static ib_sequence_t null_seq(NULL, 0, 0);
+/* A dummy table share and table for row_merge_build_indexes() error reporting.
+Assumes that no errors are going to be reported. */
+static struct TABLE_SHARE dummy_table_share;
+static struct TABLE dummy_table;
+
+/********************************************************************//**
+Rebuild secondary indexes for a given table. */
+static
+void
+xb_rebuild_indexes_for_table(
+/*=========================*/
+ dict_table_t* table, /*!< in: table */
+ trx_t* trx, /*!< in: transaction handle */
+ ulint thread_n) /*!< in: thread number */
+{
+ dict_index_t* index;
+ dict_index_t** indexes;
+ ulint n_indexes;
+ index_def_t* index_defs;
+ ulint i;
+ mem_heap_t* heap;
+ ulint error;
+ ulint* add_key_nums;
+
+ ut_ad(!mutex_own(&(dict_sys->mutex)));
+ ut_ad(table);
+
+ ut_a(UT_LIST_GET_LEN(table->indexes) > 0);
+
+ n_indexes = UT_LIST_GET_LEN(table->indexes) - 1;
+ if (!n_indexes) {
+ /* Only the primary key, nothing to do. */
+ return;
+ }
+
+ heap = mem_heap_create(1024);
+
+ indexes = (dict_index_t**) mem_heap_alloc(heap,
+ n_indexes * sizeof(*indexes));
+ index_defs = (index_def_t*) mem_heap_alloc(heap, n_indexes *
+ sizeof(*index_defs));
+ add_key_nums = static_cast<ulint *>
+ (mem_heap_alloc(heap, n_indexes * sizeof(*add_key_nums)));
+
+ /* Skip the primary key. */
+ index = dict_table_get_first_index(table);
+ ut_a(dict_index_is_clust(index));
+
+ row_mysql_lock_data_dictionary(trx);
+
+ for (i = 0; (index = dict_table_get_next_index(index)); i++) {
+
+ msg("[%02lu] Found index %s\n", thread_n, index->name);
+
+ /* Pretend that it's the current trx that created this index.
+ Required to avoid 5.6+ debug assertions. */
+ index->trx_id = trx->id;
+
+ xb_build_index_def(heap, index, &index_defs[i]);
+
+ /* In 5.6+, row_merge_drop_indexes() drops all the indexes on
+ the table that have the temp index prefix. It does not accept
+ an array of indexes to drop as in 5.5-. */
+ row_merge_rename_index_to_drop(trx, table->id, index->id);
+ }
+
+ ut_ad(i == n_indexes);
+
+ row_merge_drop_indexes(trx, table, TRUE);
+
+ index = dict_table_get_first_index(table);
+ ut_a(dict_index_is_clust(index));
+ index = dict_table_get_next_index(index);
+ while (index) {
+
+ /* In 5.6+, row_merge_drop_indexes() does not remove the
+ indexes from the dictionary cache nor from any foreign key
+ list. This may cause invalid dereferences as we try to access
+ the dropped indexes from other tables as FKs. */
+
+ dict_index_t* next_index = dict_table_get_next_index(index);
+ index->to_be_dropped = 1;
+
+ /* Patch up any FK referencing this index with NULL */
+ dict_foreign_replace_index(table, NULL, index);
+
+ dict_index_remove_from_cache(table, index);
+
+ index = next_index;
+ }
+
+ msg("[%02lu] Rebuilding %lu index(es).\n", thread_n, n_indexes);
+
+ error = row_merge_lock_table(trx, table, LOCK_X);
+ xb_a(error == DB_SUCCESS);
+
+ for (i = 0; i < n_indexes; i++) {
+ indexes[i] = row_merge_create_index(trx, table,
+ &index_defs[i]);
+ add_key_nums[i] = index_defs[i].key_number;
+ }
+
+ /* Commit trx to release latches on system tables */
+ trx_commit_for_mysql(trx);
+ trx_start_for_ddl(trx, TRX_DICT_OP_INDEX);
+
+ row_mysql_unlock_data_dictionary(trx);
+
+ /* Reacquire table lock for row_merge_build_indexes() */
+ error = row_merge_lock_table(trx, table, LOCK_X);
+ xb_a(error == DB_SUCCESS);
+
+ error = row_merge_build_indexes(trx, table, table, FALSE, indexes,
+ add_key_nums, n_indexes, &dummy_table,
+ NULL, NULL, ULINT_UNDEFINED, null_seq);
+ ut_a(error == DB_SUCCESS);
+
+ mem_heap_free(heap);
+
+ trx_commit_for_mysql(trx);
+
+ trx_start_for_ddl(trx, TRX_DICT_OP_INDEX);
+}
+
+/**************************************************************************
+Worker thread function for index rebuild. */
+static
+void *
+xb_rebuild_indexes_thread_func(
+/*===========================*/
+ void* arg) /* thread context */
+{
+ dict_table_t* table;
+ index_rebuild_table_t* rebuild_table;
+ index_rebuild_thread_t* thread;
+ trx_t* trx;
+
+ thread = (index_rebuild_thread_t *) arg;
+
+ trx = trx_allocate_for_mysql();
+
+ /* Suppress foreign key checks, as we are going to drop and recreate all
+ secondary keys. */
+ trx->check_foreigns = FALSE;
+ trx_start_for_ddl(trx, TRX_DICT_OP_INDEX);
+
+ /* Loop until there are no more tables in tables list */
+ for (;;) {
+ pthread_mutex_lock(&table_list_mutex);
+
+ rebuild_table = UT_LIST_GET_FIRST(table_list);
+
+ if (rebuild_table == NULL) {
+
+ pthread_mutex_unlock(&table_list_mutex);
+ break;
+ }
+
+ UT_LIST_REMOVE(list, table_list, rebuild_table);
+
+ pthread_mutex_unlock(&table_list_mutex);
+
+ ut_ad(rebuild_table->name);
+ ut_ad(fil_is_user_tablespace_id(rebuild_table->space_id));
+
+ row_mysql_lock_data_dictionary(trx);
+
+ table = dict_table_get_low(rebuild_table->name);
+
+ ut_d(table->n_ref_count++);
+
+ row_mysql_unlock_data_dictionary(trx);
+
+ ut_a(table != NULL);
+ ut_a(table->space == rebuild_table->space_id);
+
+ /* Discard change buffer entries for this space */
+ ibuf_delete_for_discarded_space(rebuild_table->space_id);
+
+ msg("[%02lu] Checking if there are indexes to rebuild in table "
+ "%s (space id: %lu)\n",
+ thread->num,
+ rebuild_table->name, rebuild_table->space_id);
+
+ xb_rebuild_indexes_for_table(table, trx, thread->num);
+
+ ut_d(table->n_ref_count--);
+
+ mem_free(rebuild_table->name);
+ mem_free(rebuild_table);
+ }
+
+ trx_commit_for_mysql(trx);
+
+ trx_free_for_mysql(trx);
+
+ return(NULL);
+}
+
+/******************************************************************************
+Rebuild all secondary indexes in all tables in separate spaces. Called from
+innobase_start_or_create_for_mysql(). */
+void
+xb_compact_rebuild_indexes(void)
+/*=============================*/
+{
+ dict_table_t* sys_tables;
+ dict_index_t* sys_index;
+ btr_pcur_t pcur;
+ const rec_t* rec;
+ mtr_t mtr;
+ const byte* field;
+ ulint len;
+ ulint space_id;
+ trx_t* trx;
+ index_rebuild_table_t* rebuild_table;
+ index_rebuild_thread_t* threads;
+ ulint i;
+
+ /* Set up the dummy table for the index rebuild error reporting */
+ dummy_table_share.fields = 0;
+ dummy_table.s = &dummy_table_share;
+
+ /* Iterate all tables that are not in the system tablespace and add them
+ to the list of tables to be rebuilt later. */
+
+ trx = trx_allocate_for_mysql();
+ trx_start_for_ddl(trx, TRX_DICT_OP_INDEX);
+
+ row_mysql_lock_data_dictionary(trx);
+
+ /* Enlarge the fatal lock wait timeout during index rebuild
+ operation. */
+ os_increment_counter_by_amount(server_mutex,
+ srv_fatal_semaphore_wait_threshold,
+ 7200);
+
+ mtr_start(&mtr);
+
+ sys_tables = dict_table_get_low("SYS_TABLES");
+ sys_index = UT_LIST_GET_FIRST(sys_tables->indexes);
+ ut_a(!dict_table_is_comp(sys_tables));
+
+ pthread_mutex_init(&table_list_mutex, NULL);
+ UT_LIST_INIT(table_list);
+
+ btr_pcur_open_at_index_side(TRUE, sys_index, BTR_SEARCH_LEAF, &pcur,
+ TRUE, 0, &mtr);
+ for (;;) {
+ btr_pcur_move_to_next_user_rec(&pcur, &mtr);
+
+ rec = btr_pcur_get_rec(&pcur);
+
+ if (!btr_pcur_is_on_user_rec(&pcur)) {
+ /* end of index */
+
+ break;
+ }
+
+ if (rec_get_deleted_flag(rec, 0)) {
+ continue;
+ }
+
+ field = rec_get_nth_field_old(rec, 9, &len);
+ ut_a(len == 4);
+
+ space_id = mach_read_from_4(field);
+
+ /* Don't touch tables in the system tablespace */
+ if (!fil_is_user_tablespace_id(space_id)) {
+
+ continue;
+ }
+
+ field = rec_get_nth_field_old(rec, 0, &len);
+
+ rebuild_table = static_cast<index_rebuild_table_t *>
+ (mem_alloc(sizeof(*rebuild_table)));
+ rebuild_table->name = mem_strdupl((char*) field, len);
+ rebuild_table->space_id = space_id;
+
+ UT_LIST_ADD_LAST(list, table_list, rebuild_table);
+ }
+
+ btr_pcur_close(&pcur);
+ mtr_commit(&mtr);
+
+ row_mysql_unlock_data_dictionary(trx);
+
+ trx_commit_for_mysql(trx);
+
+ trx_free_for_mysql(trx);
+
+ /* Start worker threads for the index rebuild operation */
+ ut_ad(xtrabackup_rebuild_threads > 0);
+
+ if (xtrabackup_rebuild_threads > 1) {
+ msg("Starting %lu threads to rebuild indexes.\n",
+ xtrabackup_rebuild_threads);
+ }
+
+ threads = (index_rebuild_thread_t *)
+ mem_alloc(sizeof(*threads) *
+ xtrabackup_rebuild_threads);
+
+ for (i = 0; i < xtrabackup_rebuild_threads; i++) {
+
+ threads[i].num = i+1;
+ if (pthread_create(&threads[i].id, NULL,
+ xb_rebuild_indexes_thread_func,
+ &threads[i])) {
+
+ msg("error: pthread_create() failed: errno = %d\n",
+ errno);
+ ut_a(0);
+ }
+ }
+
+ /* Wait for worker threads to finish */
+ for (i = 0; i < xtrabackup_rebuild_threads; i++) {
+ pthread_join(threads[i].id, NULL);
+ }
+
+ mem_free(threads);
+}
diff --git a/extra/mariabackup/compact.h b/extra/mariabackup/compact.h
new file mode 100644
index 00000000000..d0d9840f66d
--- /dev/null
+++ b/extra/mariabackup/compact.h
@@ -0,0 +1,44 @@
+/******************************************************
+XtraBackup: hot backup tool for InnoDB
+(c) 2009-2013 Percona LLC and/or its affiliates.
+Originally Created 3/3/2009 Yasufumi Kinoshita
+Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
+Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#ifndef XB_COMPACT_H
+#define XB_COMPACT_H
+
+#include "write_filt.h"
+
+/* Compact page filter context */
+typedef struct {
+ my_bool skip;
+ ds_ctxt_t *ds_buffer;
+ ds_file_t *buffer;
+ index_id_t clustered_index;
+ my_bool clustered_index_found;
+ my_bool inside_skipped_range;
+ ulint free_limit;
+} xb_wf_compact_ctxt_t;
+
+/******************************************************************************
+Expand the data files according to the skipped pages maps created by --compact.
+@return TRUE on success, FALSE on failure. */
+my_bool xb_expand_datafiles(void);
+
+#endif
diff --git a/extra/mariabackup/datasink.c b/extra/mariabackup/datasink.c
new file mode 100644
index 00000000000..2f4233ddc98
--- /dev/null
+++ b/extra/mariabackup/datasink.c
@@ -0,0 +1,130 @@
+/******************************************************
+Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
+
+Data sink interface.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#include <my_base.h>
+#include "common.h"
+#include "datasink.h"
+#include "ds_compress.h"
+#include "ds_archive.h"
+#include "ds_xbstream.h"
+#include "ds_local.h"
+#include "ds_stdout.h"
+#include "ds_tmpfile.h"
+#include "ds_encrypt.h"
+#include "ds_buffer.h"
+
+/************************************************************************
+Create a datasink of the specified type */
+ds_ctxt_t *
+ds_create(const char *root, ds_type_t type)
+{
+ datasink_t *ds;
+ ds_ctxt_t *ctxt;
+
+ switch (type) {
+ case DS_TYPE_STDOUT:
+ ds = &datasink_stdout;
+ break;
+ case DS_TYPE_LOCAL:
+ ds = &datasink_local;
+ break;
+ case DS_TYPE_ARCHIVE:
+ ds = &datasink_archive;
+ break;
+ case DS_TYPE_XBSTREAM:
+ ds = &datasink_xbstream;
+ break;
+ case DS_TYPE_COMPRESS:
+ ds = &datasink_compress;
+ break;
+ case DS_TYPE_ENCRYPT:
+ ds = &datasink_encrypt;
+ break;
+ case DS_TYPE_TMPFILE:
+ ds = &datasink_tmpfile;
+ break;
+ case DS_TYPE_BUFFER:
+ ds = &datasink_buffer;
+ break;
+ default:
+ msg("Unknown datasink type: %d\n", type);
+ xb_ad(0);
+ return NULL;
+ }
+
+ ctxt = ds->init(root);
+ if (ctxt != NULL) {
+ ctxt->datasink = ds;
+ } else {
+ msg("Error: failed to initialize datasink.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return ctxt;
+}
+
+/************************************************************************
+Open a datasink file */
+ds_file_t *
+ds_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *stat)
+{
+ ds_file_t *file;
+
+ file = ctxt->datasink->open(ctxt, path, stat);
+ if (file != NULL) {
+ file->datasink = ctxt->datasink;
+ }
+
+ return file;
+}
+
+/************************************************************************
+Write to a datasink file.
+@return 0 on success, 1 on error. */
+int
+ds_write(ds_file_t *file, const void *buf, size_t len)
+{
+ return file->datasink->write(file, buf, len);
+}
+
+/************************************************************************
+Close a datasink file.
+@return 0 on success, 1, on error. */
+int
+ds_close(ds_file_t *file)
+{
+ return file->datasink->close(file);
+}
+
+/************************************************************************
+Destroy a datasink handle */
+void
+ds_destroy(ds_ctxt_t *ctxt)
+{
+ ctxt->datasink->deinit(ctxt);
+}
+
+/************************************************************************
+Set the destination pipe for a datasink (only makes sense for compress and
+tmpfile). */
+void ds_set_pipe(ds_ctxt_t *ctxt, ds_ctxt_t *pipe_ctxt)
+{
+ ctxt->pipe_ctxt = pipe_ctxt;
+}
diff --git a/extra/mariabackup/datasink.h b/extra/mariabackup/datasink.h
new file mode 100644
index 00000000000..36a3d564a57
--- /dev/null
+++ b/extra/mariabackup/datasink.h
@@ -0,0 +1,98 @@
+/******************************************************
+Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
+
+Data sink interface.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#ifndef XB_DATASINK_H
+#define XB_DATASINK_H
+
+#include <my_global.h>
+#include <my_dir.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct datasink_struct;
+typedef struct datasink_struct datasink_t;
+
+typedef struct ds_ctxt {
+ datasink_t *datasink;
+ char *root;
+ void *ptr;
+ struct ds_ctxt *pipe_ctxt;
+} ds_ctxt_t;
+
+typedef struct {
+ void *ptr;
+ char *path;
+ datasink_t *datasink;
+} ds_file_t;
+
+struct datasink_struct {
+ ds_ctxt_t *(*init)(const char *root);
+ ds_file_t *(*open)(ds_ctxt_t *ctxt, const char *path, MY_STAT *stat);
+ int (*write)(ds_file_t *file, const void *buf, size_t len);
+ int (*close)(ds_file_t *file);
+ void (*deinit)(ds_ctxt_t *ctxt);
+};
+
+/* Supported datasink types */
+typedef enum {
+ DS_TYPE_STDOUT,
+ DS_TYPE_LOCAL,
+ DS_TYPE_ARCHIVE,
+ DS_TYPE_XBSTREAM,
+ DS_TYPE_COMPRESS,
+ DS_TYPE_ENCRYPT,
+ DS_TYPE_TMPFILE,
+ DS_TYPE_BUFFER
+} ds_type_t;
+
+/************************************************************************
+Create a datasink of the specified type */
+ds_ctxt_t *ds_create(const char *root, ds_type_t type);
+
+/************************************************************************
+Open a datasink file */
+ds_file_t *ds_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *stat);
+
+/************************************************************************
+Write to a datasink file.
+@return 0 on success, 1 on error. */
+int ds_write(ds_file_t *file, const void *buf, size_t len);
+
+/************************************************************************
+Close a datasink file.
+@return 0 on success, 1, on error. */
+int ds_close(ds_file_t *file);
+
+/************************************************************************
+Destroy a datasink handle */
+void ds_destroy(ds_ctxt_t *ctxt);
+
+/************************************************************************
+Set the destination pipe for a datasink (only makes sense for compress and
+tmpfile). */
+void ds_set_pipe(ds_ctxt_t *ctxt, ds_ctxt_t *pipe_ctxt);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* XB_DATASINK_H */
diff --git a/extra/mariabackup/ds_archive.c b/extra/mariabackup/ds_archive.c
new file mode 100644
index 00000000000..aa38b2f9530
--- /dev/null
+++ b/extra/mariabackup/ds_archive.c
@@ -0,0 +1,275 @@
+/******************************************************
+Copyright (c) 2013 Percona LLC and/or its affiliates.
+
+Streaming implementation for XtraBackup.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#include <my_base.h>
+#include <archive.h>
+#include <archive_entry.h>
+#include "common.h"
+#include "datasink.h"
+
+typedef struct {
+ struct archive *archive;
+ ds_file_t *dest_file;
+ pthread_mutex_t mutex;
+} ds_archive_ctxt_t;
+
+typedef struct {
+ struct archive_entry *entry;
+ ds_archive_ctxt_t *archive_ctxt;
+} ds_archive_file_t;
+
+
+/***********************************************************************
+General archive interface */
+
+static ds_ctxt_t *archive_init(const char *root);
+static ds_file_t *archive_open(ds_ctxt_t *ctxt, const char *path,
+ MY_STAT *mystat);
+static int archive_write(ds_file_t *file, const void *buf, size_t len);
+static int archive_close(ds_file_t *file);
+static void archive_deinit(ds_ctxt_t *ctxt);
+
+datasink_t datasink_archive = {
+ &archive_init,
+ &archive_open,
+ &archive_write,
+ &archive_close,
+ &archive_deinit
+};
+
+static
+int
+my_archive_open_callback(struct archive *a __attribute__((unused)),
+ void *data __attribute__((unused)))
+{
+ return ARCHIVE_OK;
+}
+
+static
+ssize_t
+my_archive_write_callback(struct archive *a __attribute__((unused)),
+ void *data, const void *buffer, size_t length)
+{
+ ds_archive_ctxt_t *archive_ctxt;
+
+ archive_ctxt = (ds_archive_ctxt_t *) data;
+
+ xb_ad(archive_ctxt != NULL);
+ xb_ad(archive_ctxt->dest_file != NULL);
+
+ if (!ds_write(archive_ctxt->dest_file, buffer, length)) {
+ return length;
+ }
+ return -1;
+}
+
+static
+int
+my_archive_close_callback(struct archive *a __attribute__((unused)),
+ void *data __attribute__((unused)))
+{
+ return ARCHIVE_OK;
+}
+
+static
+ds_ctxt_t *
+archive_init(const char *root __attribute__((unused)))
+{
+ ds_ctxt_t *ctxt;
+ ds_archive_ctxt_t *archive_ctxt;
+ struct archive *a;
+
+ ctxt = my_malloc(sizeof(ds_ctxt_t) + sizeof(ds_archive_ctxt_t),
+ MYF(MY_FAE));
+ archive_ctxt = (ds_archive_ctxt_t *)(ctxt + 1);
+
+ if (pthread_mutex_init(&archive_ctxt->mutex, NULL)) {
+ msg("archive_init: pthread_mutex_init() failed.\n");
+ goto err;
+ }
+
+ a = archive_write_new();
+ if (a == NULL) {
+ msg("archive_write_new() failed.\n");
+ goto err;
+ }
+
+ archive_ctxt->archive = a;
+ archive_ctxt->dest_file = NULL;
+
+ if (archive_write_set_compression_none(a) != ARCHIVE_OK ||
+ archive_write_set_format_pax_restricted(a) != ARCHIVE_OK ||
+ /* disable internal buffering so we don't have to flush the
+ output in xtrabackup */
+ archive_write_set_bytes_per_block(a, 0) != ARCHIVE_OK) {
+ msg("failed to set libarchive archive options: %s\n",
+ archive_error_string(a));
+ archive_write_finish(a);
+ goto err;
+ }
+
+ if (archive_write_open(a, archive_ctxt, my_archive_open_callback,
+ my_archive_write_callback,
+ my_archive_close_callback) != ARCHIVE_OK) {
+ msg("cannot open output archive.\n");
+ return NULL;
+ }
+
+ ctxt->ptr = archive_ctxt;
+
+ return ctxt;
+
+err:
+ my_free(ctxt);
+ return NULL;
+}
+
+static
+ds_file_t *
+archive_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat)
+{
+ ds_archive_ctxt_t *archive_ctxt;
+ ds_ctxt_t *dest_ctxt;
+ ds_file_t *file;
+ ds_archive_file_t *archive_file;
+
+ struct archive *a;
+ struct archive_entry *entry;
+
+ xb_ad(ctxt->pipe_ctxt != NULL);
+ dest_ctxt = ctxt->pipe_ctxt;
+
+ archive_ctxt = (ds_archive_ctxt_t *) ctxt->ptr;
+
+ pthread_mutex_lock(&archive_ctxt->mutex);
+ if (archive_ctxt->dest_file == NULL) {
+ archive_ctxt->dest_file = ds_open(dest_ctxt, path, mystat);
+ if (archive_ctxt->dest_file == NULL) {
+ return NULL;
+ }
+ }
+ pthread_mutex_unlock(&archive_ctxt->mutex);
+
+ file = (ds_file_t *) my_malloc(sizeof(ds_file_t) +
+ sizeof(ds_archive_file_t),
+ MYF(MY_FAE));
+
+ archive_file = (ds_archive_file_t *) (file + 1);
+
+ a = archive_ctxt->archive;
+
+ entry = archive_entry_new();
+ if (entry == NULL) {
+ msg("archive_entry_new() failed.\n");
+ goto err;
+ }
+
+ archive_entry_set_size(entry, mystat->st_size);
+ archive_entry_set_mode(entry, 0660);
+ archive_entry_set_filetype(entry, AE_IFREG);
+ archive_entry_set_pathname(entry, path);
+ archive_entry_set_mtime(entry, mystat->st_mtime, 0);
+
+ archive_file->entry = entry;
+ archive_file->archive_ctxt = archive_ctxt;
+
+ if (archive_write_header(a, entry) != ARCHIVE_OK) {
+ msg("archive_write_header() failed.\n");
+ archive_entry_free(entry);
+ goto err;
+ }
+
+ file->ptr = archive_file;
+ file->path = archive_ctxt->dest_file->path;
+
+ return file;
+
+err:
+ if (archive_ctxt->dest_file) {
+ ds_close(archive_ctxt->dest_file);
+ archive_ctxt->dest_file = NULL;
+ }
+ my_free(file);
+
+ return NULL;
+}
+
+static
+int
+archive_write(ds_file_t *file, const void *buf, size_t len)
+{
+ ds_archive_file_t *archive_file;
+ struct archive *a;
+
+ archive_file = (ds_archive_file_t *) file->ptr;
+
+ a = archive_file->archive_ctxt->archive;
+
+ xb_ad(archive_file->archive_ctxt->dest_file != NULL);
+ if (archive_write_data(a, buf, len) < 0) {
+ msg("archive_write_data() failed: %s (errno = %d)\n",
+ archive_error_string(a), archive_errno(a));
+ return 1;
+ }
+
+ return 0;
+}
+
+static
+int
+archive_close(ds_file_t *file)
+{
+ ds_archive_file_t *archive_file;
+ int rc = 0;
+
+ archive_file = (ds_archive_file_t *)file->ptr;
+
+ archive_entry_free(archive_file->entry);
+
+ my_free(file);
+
+ return rc;
+}
+
+static
+void
+archive_deinit(ds_ctxt_t *ctxt)
+{
+ struct archive *a;
+ ds_archive_ctxt_t *archive_ctxt;
+
+ archive_ctxt = (ds_archive_ctxt_t *) ctxt->ptr;
+
+ a = archive_ctxt->archive;
+
+ if (archive_write_close(a) != ARCHIVE_OK) {
+ msg("archive_write_close() failed.\n");
+ }
+ archive_write_finish(a);
+
+ if (archive_ctxt->dest_file) {
+ ds_close(archive_ctxt->dest_file);
+ archive_ctxt->dest_file = NULL;
+ }
+
+ pthread_mutex_destroy(&archive_ctxt->mutex);
+
+ my_free(ctxt);
+}
diff --git a/extra/mariabackup/ds_archive.h b/extra/mariabackup/ds_archive.h
new file mode 100644
index 00000000000..3f4e4463c58
--- /dev/null
+++ b/extra/mariabackup/ds_archive.h
@@ -0,0 +1,28 @@
+/******************************************************
+Copyright (c) 2013 Percona LLC and/or its affiliates.
+
+Streaming interface for XtraBackup.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#ifndef DS_ARCHIVE_H
+#define DS_ARCHIVE_H
+
+#include "datasink.h"
+
+extern datasink_t datasink_archive;
+
+#endif
diff --git a/extra/mariabackup/ds_buffer.c b/extra/mariabackup/ds_buffer.c
new file mode 100644
index 00000000000..4bb314c0f50
--- /dev/null
+++ b/extra/mariabackup/ds_buffer.c
@@ -0,0 +1,189 @@
+/******************************************************
+Copyright (c) 2012-2013 Percona LLC and/or its affiliates.
+
+buffer datasink for XtraBackup.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+/* Does buffered output to a destination datasink set with ds_set_pipe().
+Writes to the destination datasink are guaranteed to not be smaller than a
+specified buffer size (DS_DEFAULT_BUFFER_SIZE by default), with the only
+exception for the last write for a file. */
+
+#include <mysql_version.h>
+#include <my_base.h>
+#include "ds_buffer.h"
+#include "common.h"
+#include "datasink.h"
+
+#define DS_DEFAULT_BUFFER_SIZE (64 * 1024)
+
+typedef struct {
+ ds_file_t *dst_file;
+ char *buf;
+ size_t pos;
+ size_t size;
+} ds_buffer_file_t;
+
+typedef struct {
+ size_t buffer_size;
+} ds_buffer_ctxt_t;
+
+static ds_ctxt_t *buffer_init(const char *root);
+static ds_file_t *buffer_open(ds_ctxt_t *ctxt, const char *path,
+ MY_STAT *mystat);
+static int buffer_write(ds_file_t *file, const void *buf, size_t len);
+static int buffer_close(ds_file_t *file);
+static void buffer_deinit(ds_ctxt_t *ctxt);
+
+datasink_t datasink_buffer = {
+ &buffer_init,
+ &buffer_open,
+ &buffer_write,
+ &buffer_close,
+ &buffer_deinit
+};
+
+/* Change the default buffer size */
+void ds_buffer_set_size(ds_ctxt_t *ctxt, size_t size)
+{
+ ds_buffer_ctxt_t *buffer_ctxt = (ds_buffer_ctxt_t *) ctxt->ptr;
+
+ buffer_ctxt->buffer_size = size;
+}
+
+static ds_ctxt_t *
+buffer_init(const char *root)
+{
+ ds_ctxt_t *ctxt;
+ ds_buffer_ctxt_t *buffer_ctxt;
+
+ ctxt = my_malloc(sizeof(ds_ctxt_t) + sizeof(ds_buffer_ctxt_t),
+ MYF(MY_FAE));
+ buffer_ctxt = (ds_buffer_ctxt_t *) (ctxt + 1);
+ buffer_ctxt->buffer_size = DS_DEFAULT_BUFFER_SIZE;
+
+ ctxt->ptr = buffer_ctxt;
+ ctxt->root = my_strdup(root, MYF(MY_FAE));
+
+ return ctxt;
+}
+
+static ds_file_t *
+buffer_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat)
+{
+ ds_buffer_ctxt_t *buffer_ctxt;
+ ds_ctxt_t *pipe_ctxt;
+ ds_file_t *dst_file;
+ ds_file_t *file;
+ ds_buffer_file_t *buffer_file;
+
+ pipe_ctxt = ctxt->pipe_ctxt;
+ xb_a(pipe_ctxt != NULL);
+
+ dst_file = ds_open(pipe_ctxt, path, mystat);
+ if (dst_file == NULL) {
+ exit(EXIT_FAILURE);
+ }
+
+ buffer_ctxt = (ds_buffer_ctxt_t *) ctxt->ptr;
+
+ file = (ds_file_t *) my_malloc(sizeof(ds_file_t) +
+ sizeof(ds_buffer_file_t) +
+ buffer_ctxt->buffer_size,
+ MYF(MY_FAE));
+
+ buffer_file = (ds_buffer_file_t *) (file + 1);
+ buffer_file->dst_file = dst_file;
+ buffer_file->buf = (char *) (buffer_file + 1);
+ buffer_file->size = buffer_ctxt->buffer_size;
+ buffer_file->pos = 0;
+
+ file->path = dst_file->path;
+ file->ptr = buffer_file;
+
+ return file;
+}
+
+static int
+buffer_write(ds_file_t *file, const void *buf, size_t len)
+{
+ ds_buffer_file_t *buffer_file;
+
+ buffer_file = (ds_buffer_file_t *) file->ptr;
+
+ while (len > 0) {
+ if (buffer_file->pos + len > buffer_file->size) {
+ if (buffer_file->pos > 0) {
+ size_t bytes;
+
+ bytes = buffer_file->size - buffer_file->pos;
+ memcpy(buffer_file->buf + buffer_file->pos, buf,
+ bytes);
+
+ if (ds_write(buffer_file->dst_file,
+ buffer_file->buf,
+ buffer_file->size)) {
+ return 1;
+ }
+
+ buffer_file->pos = 0;
+
+ buf = (const char *) buf + bytes;
+ len -= bytes;
+ } else {
+ /* We don't have any buffered bytes, just write
+ the entire source buffer */
+ if (ds_write(buffer_file->dst_file, buf, len)) {
+ return 1;
+ }
+ break;
+ }
+ } else {
+ memcpy(buffer_file->buf + buffer_file->pos, buf, len);
+ buffer_file->pos += len;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+buffer_close(ds_file_t *file)
+{
+ ds_buffer_file_t *buffer_file;
+ int ret;
+
+ buffer_file = (ds_buffer_file_t *) file->ptr;
+ if (buffer_file->pos > 0) {
+ ds_write(buffer_file->dst_file, buffer_file->buf,
+ buffer_file->pos);
+ }
+
+ ret = ds_close(buffer_file->dst_file);
+
+ my_free(file);
+
+ return ret;
+}
+
+static void
+buffer_deinit(ds_ctxt_t *ctxt)
+{
+ my_free(ctxt->root);
+ my_free(ctxt);
+}
diff --git a/extra/mariabackup/ds_buffer.h b/extra/mariabackup/ds_buffer.h
new file mode 100644
index 00000000000..f8d2d63267d
--- /dev/null
+++ b/extra/mariabackup/ds_buffer.h
@@ -0,0 +1,39 @@
+/******************************************************
+Copyright (c) 2012-2013 Percona LLC and/or its affiliates.
+
+buffer datasink for XtraBackup.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#ifndef DS_BUFFER_H
+#define DS_BUFFER_H
+
+#include "datasink.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern datasink_t datasink_buffer;
+
+/* Change the default buffer size */
+void ds_buffer_set_size(ds_ctxt_t *ctxt, size_t size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/extra/mariabackup/ds_compress.c b/extra/mariabackup/ds_compress.c
new file mode 100644
index 00000000000..c49fceaf7fa
--- /dev/null
+++ b/extra/mariabackup/ds_compress.c
@@ -0,0 +1,462 @@
+/******************************************************
+Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
+
+Compressing datasink implementation for XtraBackup.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#include <mysql_version.h>
+#include <my_base.h>
+#include <quicklz.h>
+#include <zlib.h>
+#include "common.h"
+#include "datasink.h"
+
+#define COMPRESS_CHUNK_SIZE ((size_t) (xtrabackup_compress_chunk_size))
+#define MY_QLZ_COMPRESS_OVERHEAD 400
+
+typedef struct {
+ pthread_t id;
+ uint num;
+ pthread_mutex_t ctrl_mutex;
+ pthread_cond_t ctrl_cond;
+ pthread_mutex_t data_mutex;
+ pthread_cond_t data_cond;
+ my_bool started;
+ my_bool data_avail;
+ my_bool cancelled;
+ const char *from;
+ size_t from_len;
+ char *to;
+ size_t to_len;
+ qlz_state_compress state;
+ ulong adler;
+} comp_thread_ctxt_t;
+
+typedef struct {
+ comp_thread_ctxt_t *threads;
+ uint nthreads;
+} ds_compress_ctxt_t;
+
+typedef struct {
+ ds_file_t *dest_file;
+ ds_compress_ctxt_t *comp_ctxt;
+ size_t bytes_processed;
+} ds_compress_file_t;
+
+/* Compression options */
+extern char *xtrabackup_compress_alg;
+extern uint xtrabackup_compress_threads;
+extern ulonglong xtrabackup_compress_chunk_size;
+
+static ds_ctxt_t *compress_init(const char *root);
+static ds_file_t *compress_open(ds_ctxt_t *ctxt, const char *path,
+ MY_STAT *mystat);
+static int compress_write(ds_file_t *file, const void *buf, size_t len);
+static int compress_close(ds_file_t *file);
+static void compress_deinit(ds_ctxt_t *ctxt);
+
+datasink_t datasink_compress = {
+ &compress_init,
+ &compress_open,
+ &compress_write,
+ &compress_close,
+ &compress_deinit
+};
+
+static inline int write_uint32_le(ds_file_t *file, ulong n);
+static inline int write_uint64_le(ds_file_t *file, ulonglong n);
+
+static comp_thread_ctxt_t *create_worker_threads(uint n);
+static void destroy_worker_threads(comp_thread_ctxt_t *threads, uint n);
+static void *compress_worker_thread_func(void *arg);
+
+static
+ds_ctxt_t *
+compress_init(const char *root)
+{
+ ds_ctxt_t *ctxt;
+ ds_compress_ctxt_t *compress_ctxt;
+ comp_thread_ctxt_t *threads;
+
+ /* Create and initialize the worker threads */
+ threads = create_worker_threads(xtrabackup_compress_threads);
+ if (threads == NULL) {
+ msg("compress: failed to create worker threads.\n");
+ return NULL;
+ }
+
+ ctxt = (ds_ctxt_t *) my_malloc(sizeof(ds_ctxt_t) +
+ sizeof(ds_compress_ctxt_t),
+ MYF(MY_FAE));
+
+ compress_ctxt = (ds_compress_ctxt_t *) (ctxt + 1);
+ compress_ctxt->threads = threads;
+ compress_ctxt->nthreads = xtrabackup_compress_threads;
+
+ ctxt->ptr = compress_ctxt;
+ ctxt->root = my_strdup(root, MYF(MY_FAE));
+
+ return ctxt;
+}
+
+static
+ds_file_t *
+compress_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat)
+{
+ ds_compress_ctxt_t *comp_ctxt;
+ ds_ctxt_t *dest_ctxt;
+ ds_file_t *dest_file;
+ char new_name[FN_REFLEN];
+ size_t name_len;
+ ds_file_t *file;
+ ds_compress_file_t *comp_file;
+
+ xb_ad(ctxt->pipe_ctxt != NULL);
+ dest_ctxt = ctxt->pipe_ctxt;
+
+ comp_ctxt = (ds_compress_ctxt_t *) ctxt->ptr;
+
+ /* Append the .qp extension to the filename */
+ fn_format(new_name, path, "", ".qp", MYF(MY_APPEND_EXT));
+
+ dest_file = ds_open(dest_ctxt, new_name, mystat);
+ if (dest_file == NULL) {
+ return NULL;
+ }
+
+ /* Write the qpress archive header */
+ if (ds_write(dest_file, "qpress10", 8) ||
+ write_uint64_le(dest_file, COMPRESS_CHUNK_SIZE)) {
+ goto err;
+ }
+
+ /* We are going to create a one-file "flat" (i.e. with no
+ subdirectories) archive. So strip the directory part from the path and
+ remove the '.qp' suffix. */
+ fn_format(new_name, path, "", "", MYF(MY_REPLACE_DIR));
+
+ /* Write the qpress file header */
+ name_len = strlen(new_name);
+ if (ds_write(dest_file, "F", 1) ||
+ write_uint32_le(dest_file, name_len) ||
+ /* we want to write the terminating \0 as well */
+ ds_write(dest_file, new_name, name_len + 1)) {
+ goto err;
+ }
+
+ file = (ds_file_t *) my_malloc(sizeof(ds_file_t) +
+ sizeof(ds_compress_file_t),
+ MYF(MY_FAE));
+ comp_file = (ds_compress_file_t *) (file + 1);
+ comp_file->dest_file = dest_file;
+ comp_file->comp_ctxt = comp_ctxt;
+ comp_file->bytes_processed = 0;
+
+ file->ptr = comp_file;
+ file->path = dest_file->path;
+
+ return file;
+
+err:
+ ds_close(dest_file);
+ return NULL;
+}
+
+static
+int
+compress_write(ds_file_t *file, const void *buf, size_t len)
+{
+ ds_compress_file_t *comp_file;
+ ds_compress_ctxt_t *comp_ctxt;
+ comp_thread_ctxt_t *threads;
+ comp_thread_ctxt_t *thd;
+ uint nthreads;
+ uint i;
+ const char *ptr;
+ ds_file_t *dest_file;
+
+ comp_file = (ds_compress_file_t *) file->ptr;
+ comp_ctxt = comp_file->comp_ctxt;
+ dest_file = comp_file->dest_file;
+
+ threads = comp_ctxt->threads;
+ nthreads = comp_ctxt->nthreads;
+
+ ptr = (const char *) buf;
+ while (len > 0) {
+ uint max_thread;
+
+ /* Send data to worker threads for compression */
+ for (i = 0; i < nthreads; i++) {
+ size_t chunk_len;
+
+ thd = threads + i;
+
+ pthread_mutex_lock(&thd->ctrl_mutex);
+
+ chunk_len = (len > COMPRESS_CHUNK_SIZE) ?
+ COMPRESS_CHUNK_SIZE : len;
+ thd->from = ptr;
+ thd->from_len = chunk_len;
+
+ pthread_mutex_lock(&thd->data_mutex);
+ thd->data_avail = TRUE;
+ pthread_cond_signal(&thd->data_cond);
+ pthread_mutex_unlock(&thd->data_mutex);
+
+ len -= chunk_len;
+ if (len == 0) {
+ break;
+ }
+ ptr += chunk_len;
+ }
+
+ max_thread = (i < nthreads) ? i : nthreads - 1;
+
+ /* Reap and stream the compressed data */
+ for (i = 0; i <= max_thread; i++) {
+ thd = threads + i;
+
+ pthread_mutex_lock(&thd->data_mutex);
+ while (thd->data_avail == TRUE) {
+ pthread_cond_wait(&thd->data_cond,
+ &thd->data_mutex);
+ }
+
+ xb_a(threads[i].to_len > 0);
+
+ if (ds_write(dest_file, "NEWBNEWB", 8) ||
+ write_uint64_le(dest_file,
+ comp_file->bytes_processed)) {
+ msg("compress: write to the destination stream "
+ "failed.\n");
+ return 1;
+ }
+
+ comp_file->bytes_processed += threads[i].from_len;
+
+ if (write_uint32_le(dest_file, threads[i].adler) ||
+ ds_write(dest_file, threads[i].to,
+ threads[i].to_len)) {
+ msg("compress: write to the destination stream "
+ "failed.\n");
+ return 1;
+ }
+
+ pthread_mutex_unlock(&threads[i].data_mutex);
+ pthread_mutex_unlock(&threads[i].ctrl_mutex);
+ }
+ }
+
+ return 0;
+}
+
+static
+int
+compress_close(ds_file_t *file)
+{
+ ds_compress_file_t *comp_file;
+ ds_file_t *dest_file;
+ int rc;
+
+ comp_file = (ds_compress_file_t *) file->ptr;
+ dest_file = comp_file->dest_file;
+
+ /* Write the qpress file trailer */
+ ds_write(dest_file, "ENDSENDS", 8);
+
+ /* Supposedly the number of written bytes should be written as a
+ "recovery information" in the file trailer, but in reality qpress
+ always writes 8 zeros here. Let's do the same */
+
+ write_uint64_le(dest_file, 0);
+
+ rc = ds_close(dest_file);
+
+ my_free(file);
+
+ return rc;
+}
+
+static
+void
+compress_deinit(ds_ctxt_t *ctxt)
+{
+ ds_compress_ctxt_t *comp_ctxt;
+
+ xb_ad(ctxt->pipe_ctxt != NULL);
+
+ comp_ctxt = (ds_compress_ctxt_t *) ctxt->ptr;;
+
+ destroy_worker_threads(comp_ctxt->threads, comp_ctxt->nthreads);
+
+ my_free(ctxt->root);
+ my_free(ctxt);
+}
+
+static inline
+int
+write_uint32_le(ds_file_t *file, ulong n)
+{
+ char tmp[4];
+
+ int4store(tmp, n);
+ return ds_write(file, tmp, sizeof(tmp));
+}
+
+static inline
+int
+write_uint64_le(ds_file_t *file, ulonglong n)
+{
+ char tmp[8];
+
+ int8store(tmp, n);
+ return ds_write(file, tmp, sizeof(tmp));
+}
+
+static
+comp_thread_ctxt_t *
+create_worker_threads(uint n)
+{
+ comp_thread_ctxt_t *threads;
+ uint i;
+
+ threads = (comp_thread_ctxt_t *)
+ my_malloc(sizeof(comp_thread_ctxt_t) * n, MYF(MY_FAE));
+
+ for (i = 0; i < n; i++) {
+ comp_thread_ctxt_t *thd = threads + i;
+
+ thd->num = i + 1;
+ thd->started = FALSE;
+ thd->cancelled = FALSE;
+ thd->data_avail = FALSE;
+
+ thd->to = (char *) my_malloc(COMPRESS_CHUNK_SIZE +
+ MY_QLZ_COMPRESS_OVERHEAD,
+ MYF(MY_FAE));
+
+ /* Initialize the control mutex and condition var */
+ if (pthread_mutex_init(&thd->ctrl_mutex, NULL) ||
+ pthread_cond_init(&thd->ctrl_cond, NULL)) {
+ goto err;
+ }
+
+ /* Initialize and data mutex and condition var */
+ if (pthread_mutex_init(&thd->data_mutex, NULL) ||
+ pthread_cond_init(&thd->data_cond, NULL)) {
+ goto err;
+ }
+
+ pthread_mutex_lock(&thd->ctrl_mutex);
+
+ if (pthread_create(&thd->id, NULL, compress_worker_thread_func,
+ thd)) {
+ msg("compress: pthread_create() failed: "
+ "errno = %d\n", errno);
+ goto err;
+ }
+ }
+
+ /* Wait for the threads to start */
+ for (i = 0; i < n; i++) {
+ comp_thread_ctxt_t *thd = threads + i;
+
+ while (thd->started == FALSE)
+ pthread_cond_wait(&thd->ctrl_cond, &thd->ctrl_mutex);
+ pthread_mutex_unlock(&thd->ctrl_mutex);
+ }
+
+ return threads;
+
+err:
+ return NULL;
+}
+
+static
+void
+destroy_worker_threads(comp_thread_ctxt_t *threads, uint n)
+{
+ uint i;
+
+ for (i = 0; i < n; i++) {
+ comp_thread_ctxt_t *thd = threads + i;
+
+ pthread_mutex_lock(&thd->data_mutex);
+ threads[i].cancelled = TRUE;
+ pthread_cond_signal(&thd->data_cond);
+ pthread_mutex_unlock(&thd->data_mutex);
+
+ pthread_join(thd->id, NULL);
+
+ pthread_cond_destroy(&thd->data_cond);
+ pthread_mutex_destroy(&thd->data_mutex);
+ pthread_cond_destroy(&thd->ctrl_cond);
+ pthread_mutex_destroy(&thd->ctrl_mutex);
+
+ my_free(thd->to);
+ }
+
+ my_free(threads);
+}
+
+static
+void *
+compress_worker_thread_func(void *arg)
+{
+ comp_thread_ctxt_t *thd = (comp_thread_ctxt_t *) arg;
+
+ pthread_mutex_lock(&thd->ctrl_mutex);
+
+ pthread_mutex_lock(&thd->data_mutex);
+
+ thd->started = TRUE;
+ pthread_cond_signal(&thd->ctrl_cond);
+
+ pthread_mutex_unlock(&thd->ctrl_mutex);
+
+ while (1) {
+ thd->data_avail = FALSE;
+ pthread_cond_signal(&thd->data_cond);
+
+ while (!thd->data_avail && !thd->cancelled) {
+ pthread_cond_wait(&thd->data_cond, &thd->data_mutex);
+ }
+
+ if (thd->cancelled)
+ break;
+
+ thd->to_len = qlz_compress(thd->from, thd->to, thd->from_len,
+ &thd->state);
+
+ /* qpress uses 0x00010000 as the initial value, but its own
+ Adler-32 implementation treats the value differently:
+ 1. higher order bits are the sum of all bytes in the sequence
+ 2. lower order bits are the sum of resulting values at every
+ step.
+ So it's the other way around as compared to zlib's adler32().
+ That's why 0x00000001 is being passed here to be compatible
+ with qpress implementation. */
+
+ thd->adler = adler32(0x00000001, (uchar *) thd->to,
+ thd->to_len);
+ }
+
+ pthread_mutex_unlock(&thd->data_mutex);
+
+ return NULL;
+}
diff --git a/extra/mariabackup/ds_compress.h b/extra/mariabackup/ds_compress.h
new file mode 100644
index 00000000000..8498c965e13
--- /dev/null
+++ b/extra/mariabackup/ds_compress.h
@@ -0,0 +1,28 @@
+/******************************************************
+Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
+
+Compression interface for XtraBackup.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#ifndef DS_COMPRESS_H
+#define DS_COMPRESS_H
+
+#include "datasink.h"
+
+extern datasink_t datasink_compress;
+
+#endif
diff --git a/extra/mariabackup/ds_encrypt.c b/extra/mariabackup/ds_encrypt.c
new file mode 100644
index 00000000000..f8d62a03e13
--- /dev/null
+++ b/extra/mariabackup/ds_encrypt.c
@@ -0,0 +1,617 @@
+/******************************************************
+Copyright (c) 2013 Percona LLC and/or its affiliates.
+
+Encryption datasink implementation for XtraBackup.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+
+#include <my_base.h>
+#include "common.h"
+#include "datasink.h"
+
+#if GCC_VERSION >= 4002
+/* Workaround to avoid "gcry_ac_* is deprecated" warnings in gcrypt.h */
+# pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
+
+#include <gcrypt.h>
+
+#if GCC_VERSION >= 4002
+# pragma GCC diagnostic warning "-Wdeprecated-declarations"
+#endif
+
+#include "xbcrypt.h"
+
+#if !defined(GCRYPT_VERSION_NUMBER) || (GCRYPT_VERSION_NUMBER < 0x010600)
+GCRY_THREAD_OPTION_PTHREAD_IMPL;
+#endif
+
+#define XB_CRYPT_CHUNK_SIZE ((size_t) (xtrabackup_encrypt_chunk_size))
+
+typedef struct {
+ pthread_t id;
+ uint num;
+ pthread_mutex_t ctrl_mutex;
+ pthread_cond_t ctrl_cond;
+ pthread_mutex_t data_mutex;
+ pthread_cond_t data_cond;
+ my_bool started;
+ my_bool data_avail;
+ my_bool cancelled;
+ const char *from;
+ size_t from_len;
+ char *to;
+ char *iv;
+ size_t to_len;
+ gcry_cipher_hd_t cipher_handle;
+} crypt_thread_ctxt_t;
+
+typedef struct {
+ crypt_thread_ctxt_t *threads;
+ uint nthreads;
+} ds_encrypt_ctxt_t;
+
+typedef struct {
+ xb_wcrypt_t *xbcrypt_file;
+ ds_encrypt_ctxt_t *crypt_ctxt;
+ size_t bytes_processed;
+ ds_file_t *dest_file;
+} ds_encrypt_file_t;
+
+/* Encryption options */
+extern ulong xtrabackup_encrypt_algo;
+extern char *xtrabackup_encrypt_key;
+extern char *xtrabackup_encrypt_key_file;
+extern uint xtrabackup_encrypt_threads;
+extern ulonglong xtrabackup_encrypt_chunk_size;
+
+static ds_ctxt_t *encrypt_init(const char *root);
+static ds_file_t *encrypt_open(ds_ctxt_t *ctxt, const char *path,
+ MY_STAT *mystat);
+static int encrypt_write(ds_file_t *file, const void *buf, size_t len);
+static int encrypt_close(ds_file_t *file);
+static void encrypt_deinit(ds_ctxt_t *ctxt);
+
+datasink_t datasink_encrypt = {
+ &encrypt_init,
+ &encrypt_open,
+ &encrypt_write,
+ &encrypt_close,
+ &encrypt_deinit
+};
+
+static crypt_thread_ctxt_t *create_worker_threads(uint n);
+static void destroy_worker_threads(crypt_thread_ctxt_t *threads, uint n);
+static void *encrypt_worker_thread_func(void *arg);
+
+static uint encrypt_algos[] = { GCRY_CIPHER_NONE, GCRY_CIPHER_AES128,
+ GCRY_CIPHER_AES192, GCRY_CIPHER_AES256 };
+static uint encrypt_algo;
+static const uint encrypt_mode = GCRY_CIPHER_MODE_CTR;
+static uint encrypt_key_len = 0;
+static size_t encrypt_iv_len = 0;
+
+static
+ssize_t
+my_xb_crypt_write_callback(void *userdata, const void *buf, size_t len)
+{
+ ds_encrypt_file_t *encrypt_file;
+
+ encrypt_file = (ds_encrypt_file_t *) userdata;
+
+ xb_ad(encrypt_file != NULL);
+ xb_ad(encrypt_file->dest_file != NULL);
+
+ if (!ds_write(encrypt_file->dest_file, buf, len)) {
+ return len;
+ }
+ return -1;
+}
+
+static
+ds_ctxt_t *
+encrypt_init(const char *root)
+{
+ ds_ctxt_t *ctxt;
+ ds_encrypt_ctxt_t *encrypt_ctxt;
+ crypt_thread_ctxt_t *threads;
+ gcry_error_t gcry_error;
+
+ /* Acording to gcrypt docs (and my testing), setting up the threading
+ callbacks must be done first, so, lets give it a shot */
+#if !defined(GCRYPT_VERSION_NUMBER) || (GCRYPT_VERSION_NUMBER < 0x010600)
+ gcry_error = gcry_control(GCRYCTL_SET_THREAD_CBS, &gcry_threads_pthread);
+ if (gcry_error) {
+ msg("encrypt: unable to set libgcrypt thread cbs - "
+ "%s : %s\n",
+ gcry_strsource(gcry_error),
+ gcry_strerror(gcry_error));
+ return NULL;
+ }
+#endif
+
+ /* Version check should be the very next call because it
+ makes sure that important subsystems are intialized. */
+ if (!gcry_control(GCRYCTL_ANY_INITIALIZATION_P)) {
+ const char *gcrypt_version;
+ gcrypt_version = gcry_check_version(NULL);
+ /* No other library has already initialized libgcrypt. */
+ if (!gcrypt_version) {
+ msg("encrypt: failed to initialize libgcrypt\n");
+ return NULL;
+ } else {
+ msg("encrypt: using gcrypt %s\n", gcrypt_version);
+ }
+ }
+
+ /* Disable the gcry secure memory, not dealing with this for now */
+ gcry_error = gcry_control(GCRYCTL_DISABLE_SECMEM, 0);
+ if (gcry_error) {
+ msg("encrypt: unable to disable libgcrypt secmem - "
+ "%s : %s\n",
+ gcry_strsource(gcry_error),
+ gcry_strerror(gcry_error));
+ return NULL;
+ }
+
+ /* Finalize gcry initialization. */
+ gcry_error = gcry_control(GCRYCTL_INITIALIZATION_FINISHED, 0);
+ if (gcry_error) {
+ msg("encrypt: unable to finish libgcrypt initialization - "
+ "%s : %s\n",
+ gcry_strsource(gcry_error),
+ gcry_strerror(gcry_error));
+ return NULL;
+ }
+
+ /* Determine the algorithm */
+ encrypt_algo = encrypt_algos[xtrabackup_encrypt_algo];
+
+ /* Set up the iv length */
+ encrypt_iv_len = gcry_cipher_get_algo_blklen(encrypt_algo);
+ xb_a(encrypt_iv_len > 0);
+
+ /* Now set up the key */
+ if (xtrabackup_encrypt_key == NULL &&
+ xtrabackup_encrypt_key_file == NULL) {
+ msg("encrypt: no encryption key or key file specified.\n");
+ return NULL;
+ } else if (xtrabackup_encrypt_key && xtrabackup_encrypt_key_file) {
+ msg("encrypt: both encryption key and key file specified.\n");
+ return NULL;
+ } else if (xtrabackup_encrypt_key_file) {
+ if (!xb_crypt_read_key_file(xtrabackup_encrypt_key_file,
+ (void**)&xtrabackup_encrypt_key,
+ &encrypt_key_len)) {
+ msg("encrypt: unable to read encryption key file"
+ " \"%s\".\n", xtrabackup_encrypt_key_file);
+ return NULL;
+ }
+ } else if (xtrabackup_encrypt_key) {
+ encrypt_key_len = strlen(xtrabackup_encrypt_key);
+ } else {
+ msg("encrypt: no encryption key or key file specified.\n");
+ return NULL;
+ }
+
+ /* Create and initialize the worker threads */
+ threads = create_worker_threads(xtrabackup_encrypt_threads);
+ if (threads == NULL) {
+ msg("encrypt: failed to create worker threads.\n");
+ return NULL;
+ }
+
+ ctxt = (ds_ctxt_t *) my_malloc(sizeof(ds_ctxt_t) +
+ sizeof(ds_encrypt_ctxt_t),
+ MYF(MY_FAE));
+
+ encrypt_ctxt = (ds_encrypt_ctxt_t *) (ctxt + 1);
+ encrypt_ctxt->threads = threads;
+ encrypt_ctxt->nthreads = xtrabackup_encrypt_threads;
+
+ ctxt->ptr = encrypt_ctxt;
+ ctxt->root = my_strdup(root, MYF(MY_FAE));
+
+ return ctxt;
+}
+
+static
+ds_file_t *
+encrypt_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat)
+{
+ ds_ctxt_t *dest_ctxt;
+
+ ds_encrypt_ctxt_t *crypt_ctxt;
+ ds_encrypt_file_t *crypt_file;
+
+ char new_name[FN_REFLEN];
+ ds_file_t *file;
+
+ xb_ad(ctxt->pipe_ctxt != NULL);
+ dest_ctxt = ctxt->pipe_ctxt;
+
+ crypt_ctxt = (ds_encrypt_ctxt_t *) ctxt->ptr;
+
+
+ file = (ds_file_t *) my_malloc(sizeof(ds_file_t) +
+ sizeof(ds_encrypt_file_t),
+ MYF(MY_FAE|MY_ZEROFILL));
+
+ crypt_file = (ds_encrypt_file_t *) (file + 1);
+
+ /* Append the .xbcrypt extension to the filename */
+ fn_format(new_name, path, "", ".xbcrypt", MYF(MY_APPEND_EXT));
+ crypt_file->dest_file = ds_open(dest_ctxt, new_name, mystat);
+ if (crypt_file->dest_file == NULL) {
+ msg("encrypt: ds_open(\"%s\") failed.\n", new_name);
+ goto err;
+ }
+
+ crypt_file->crypt_ctxt = crypt_ctxt;
+ crypt_file->xbcrypt_file = xb_crypt_write_open(crypt_file,
+ my_xb_crypt_write_callback);
+
+ if (crypt_file->xbcrypt_file == NULL) {
+ msg("encrypt: xb_crypt_write_open() failed.\n");
+ goto err;
+ }
+
+
+ file->ptr = crypt_file;
+ file->path = crypt_file->dest_file->path;
+
+ return file;
+
+err:
+ if (crypt_file->dest_file) {
+ ds_close(crypt_file->dest_file);
+ }
+ my_free(file);
+ return NULL;
+}
+
+static
+int
+encrypt_write(ds_file_t *file, const void *buf, size_t len)
+{
+ ds_encrypt_file_t *crypt_file;
+ ds_encrypt_ctxt_t *crypt_ctxt;
+ crypt_thread_ctxt_t *threads;
+ crypt_thread_ctxt_t *thd;
+ uint nthreads;
+ uint i;
+ const char *ptr;
+
+ crypt_file = (ds_encrypt_file_t *) file->ptr;
+ crypt_ctxt = crypt_file->crypt_ctxt;
+
+ threads = crypt_ctxt->threads;
+ nthreads = crypt_ctxt->nthreads;
+
+ ptr = (const char *) buf;
+ while (len > 0) {
+ uint max_thread;
+
+ /* Send data to worker threads for encryption */
+ for (i = 0; i < nthreads; i++) {
+ size_t chunk_len;
+
+ thd = threads + i;
+
+ pthread_mutex_lock(&thd->ctrl_mutex);
+
+ chunk_len = (len > XB_CRYPT_CHUNK_SIZE) ?
+ XB_CRYPT_CHUNK_SIZE : len;
+ thd->from = ptr;
+ thd->from_len = chunk_len;
+
+ pthread_mutex_lock(&thd->data_mutex);
+ thd->data_avail = TRUE;
+ pthread_cond_signal(&thd->data_cond);
+ pthread_mutex_unlock(&thd->data_mutex);
+
+ len -= chunk_len;
+ if (len == 0) {
+ break;
+ }
+ ptr += chunk_len;
+ }
+
+ max_thread = (i < nthreads) ? i : nthreads - 1;
+
+ /* Reap and stream the encrypted data */
+ for (i = 0; i <= max_thread; i++) {
+ thd = threads + i;
+
+ pthread_mutex_lock(&thd->data_mutex);
+ while (thd->data_avail == TRUE) {
+ pthread_cond_wait(&thd->data_cond,
+ &thd->data_mutex);
+ }
+
+ xb_a(threads[i].to_len > 0);
+
+ if (xb_crypt_write_chunk(crypt_file->xbcrypt_file,
+ threads[i].to,
+ threads[i].from_len +
+ XB_CRYPT_HASH_LEN,
+ threads[i].to_len,
+ threads[i].iv,
+ encrypt_iv_len)) {
+ msg("encrypt: write to the destination file "
+ "failed.\n");
+ return 1;
+ }
+
+ crypt_file->bytes_processed += threads[i].from_len;
+
+ pthread_mutex_unlock(&threads[i].data_mutex);
+ pthread_mutex_unlock(&threads[i].ctrl_mutex);
+ }
+ }
+
+ return 0;
+}
+
+static
+int
+encrypt_close(ds_file_t *file)
+{
+ ds_encrypt_file_t *crypt_file;
+ ds_file_t *dest_file;
+ int rc = 0;
+
+ crypt_file = (ds_encrypt_file_t *) file->ptr;
+ dest_file = crypt_file->dest_file;
+
+ rc = xb_crypt_write_close(crypt_file->xbcrypt_file);
+
+ if (ds_close(dest_file)) {
+ rc = 1;
+ }
+
+ my_free(file);
+
+ return rc;
+}
+
+static
+void
+encrypt_deinit(ds_ctxt_t *ctxt)
+{
+ ds_encrypt_ctxt_t *crypt_ctxt;
+
+ xb_ad(ctxt->pipe_ctxt != NULL);
+
+ crypt_ctxt = (ds_encrypt_ctxt_t *) ctxt->ptr;
+
+ destroy_worker_threads(crypt_ctxt->threads, crypt_ctxt->nthreads);
+
+ my_free(ctxt->root);
+ my_free(ctxt);
+ if (xtrabackup_encrypt_key)
+ my_free(xtrabackup_encrypt_key);
+ if (xtrabackup_encrypt_key_file)
+ my_free(xtrabackup_encrypt_key_file);
+}
+
+static
+crypt_thread_ctxt_t *
+create_worker_threads(uint n)
+{
+ crypt_thread_ctxt_t *threads;
+ uint i;
+
+ threads = (crypt_thread_ctxt_t *)
+ my_malloc(sizeof(crypt_thread_ctxt_t) * n, MYF(MY_FAE));
+
+ for (i = 0; i < n; i++) {
+ crypt_thread_ctxt_t *thd = threads + i;
+
+ thd->num = i + 1;
+ thd->started = FALSE;
+ thd->cancelled = FALSE;
+ thd->data_avail = FALSE;
+
+ thd->to = (char *) my_malloc(XB_CRYPT_CHUNK_SIZE +
+ XB_CRYPT_HASH_LEN, MYF(MY_FAE));
+
+ thd->iv = (char *) my_malloc(encrypt_iv_len,
+ MYF(MY_FAE));
+
+ /* Initialize the control mutex and condition var */
+ if (pthread_mutex_init(&thd->ctrl_mutex, NULL) ||
+ pthread_cond_init(&thd->ctrl_cond, NULL)) {
+ goto err;
+ }
+
+ /* Initialize and data mutex and condition var */
+ if (pthread_mutex_init(&thd->data_mutex, NULL) ||
+ pthread_cond_init(&thd->data_cond, NULL)) {
+ goto err;
+ }
+
+ if (encrypt_algo != GCRY_CIPHER_NONE) {
+ gcry_error_t gcry_error;
+
+ gcry_error = gcry_cipher_open(&thd->cipher_handle,
+ encrypt_algo,
+ encrypt_mode, 0);
+ if (gcry_error) {
+ msg("encrypt: unable to open libgcrypt"
+ " cipher - %s : %s\n",
+ gcry_strsource(gcry_error),
+ gcry_strerror(gcry_error));
+ gcry_cipher_close(thd->cipher_handle);
+ goto err;
+ }
+
+ gcry_error = gcry_cipher_setkey(thd->cipher_handle,
+ xtrabackup_encrypt_key,
+ encrypt_key_len);
+ if (gcry_error) {
+ msg("encrypt: unable to set libgcrypt"
+ " cipher key - %s : %s\n",
+ gcry_strsource(gcry_error),
+ gcry_strerror(gcry_error));
+ gcry_cipher_close(thd->cipher_handle);
+ goto err;
+ }
+ }
+
+ pthread_mutex_lock(&thd->ctrl_mutex);
+
+ if (pthread_create(&thd->id, NULL, encrypt_worker_thread_func,
+ thd)) {
+ msg("encrypt: pthread_create() failed: "
+ "errno = %d\n", errno);
+ goto err;
+ }
+ }
+
+ /* Wait for the threads to start */
+ for (i = 0; i < n; i++) {
+ crypt_thread_ctxt_t *thd = threads + i;
+
+ while (thd->started == FALSE)
+ pthread_cond_wait(&thd->ctrl_cond, &thd->ctrl_mutex);
+ pthread_mutex_unlock(&thd->ctrl_mutex);
+ }
+
+ return threads;
+
+err:
+ return NULL;
+}
+
+static
+void
+destroy_worker_threads(crypt_thread_ctxt_t *threads, uint n)
+{
+ uint i;
+
+ for (i = 0; i < n; i++) {
+ crypt_thread_ctxt_t *thd = threads + i;
+
+ pthread_mutex_lock(&thd->data_mutex);
+ threads[i].cancelled = TRUE;
+ pthread_cond_signal(&thd->data_cond);
+ pthread_mutex_unlock(&thd->data_mutex);
+
+ pthread_join(thd->id, NULL);
+
+ pthread_cond_destroy(&thd->data_cond);
+ pthread_mutex_destroy(&thd->data_mutex);
+ pthread_cond_destroy(&thd->ctrl_cond);
+ pthread_mutex_destroy(&thd->ctrl_mutex);
+
+ if (encrypt_algo != GCRY_CIPHER_NONE)
+ gcry_cipher_close(thd->cipher_handle);
+
+ my_free(thd->to);
+ my_free(thd->iv);
+ }
+
+ my_free(threads);
+}
+
+static
+void *
+encrypt_worker_thread_func(void *arg)
+{
+ crypt_thread_ctxt_t *thd = (crypt_thread_ctxt_t *) arg;
+
+ pthread_mutex_lock(&thd->ctrl_mutex);
+
+ pthread_mutex_lock(&thd->data_mutex);
+
+ thd->started = TRUE;
+ pthread_cond_signal(&thd->ctrl_cond);
+
+ pthread_mutex_unlock(&thd->ctrl_mutex);
+
+ while (1) {
+ thd->data_avail = FALSE;
+ pthread_cond_signal(&thd->data_cond);
+
+ while (!thd->data_avail && !thd->cancelled) {
+ pthread_cond_wait(&thd->data_cond, &thd->data_mutex);
+ }
+
+ if (thd->cancelled)
+ break;
+
+ /* ensure that XB_CRYPT_HASH_LEN is the correct length
+ of XB_CRYPT_HASH hashing algorithm output */
+ assert(gcry_md_get_algo_dlen(XB_CRYPT_HASH) ==
+ XB_CRYPT_HASH_LEN);
+
+ memcpy(thd->to, thd->from, thd->from_len);
+ gcry_md_hash_buffer(XB_CRYPT_HASH, thd->to + thd->from_len,
+ thd->from, thd->from_len);
+ thd->to_len = thd->from_len;
+
+ if (encrypt_algo != GCRY_CIPHER_NONE) {
+ gcry_error_t gcry_error;
+
+ gcry_error = gcry_cipher_reset(thd->cipher_handle);
+ if (gcry_error) {
+ msg("encrypt: unable to reset cipher - "
+ "%s : %s\n",
+ gcry_strsource(gcry_error),
+ gcry_strerror(gcry_error));
+ thd->to_len = 0;
+ continue;
+ }
+
+ xb_crypt_create_iv(thd->iv, encrypt_iv_len);
+ gcry_error = gcry_cipher_setctr(thd->cipher_handle,
+ thd->iv,
+ encrypt_iv_len);
+ if (gcry_error) {
+ msg("encrypt: unable to set cipher ctr - "
+ "%s : %s\n",
+ gcry_strsource(gcry_error),
+ gcry_strerror(gcry_error));
+ thd->to_len = 0;
+ continue;
+ }
+
+ gcry_error = gcry_cipher_encrypt(thd->cipher_handle,
+ thd->to,
+ thd->to_len +
+ XB_CRYPT_HASH_LEN,
+ thd->to,
+ thd->from_len +
+ XB_CRYPT_HASH_LEN);
+ if (gcry_error) {
+ msg("encrypt: unable to encrypt buffer - "
+ "%s : %s\n", gcry_strsource(gcry_error),
+ gcry_strerror(gcry_error));
+ thd->to_len = 0;
+ }
+ } else {
+ memcpy(thd->to, thd->from,
+ thd->from_len + XB_CRYPT_HASH_LEN);
+ }
+ thd->to_len += XB_CRYPT_HASH_LEN;
+ }
+
+ pthread_mutex_unlock(&thd->data_mutex);
+
+ return NULL;
+}
diff --git a/extra/mariabackup/ds_encrypt.h b/extra/mariabackup/ds_encrypt.h
new file mode 100644
index 00000000000..ed869747d79
--- /dev/null
+++ b/extra/mariabackup/ds_encrypt.h
@@ -0,0 +1,28 @@
+/******************************************************
+Copyright (c) 2013 Percona LLC and/or its affiliates.
+
+Encryption interface for XtraBackup.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#ifndef DS_ENCRYPT_H
+#define DS_ENCRYPT_H
+
+#include "datasink.h"
+
+extern datasink_t datasink_encrypt;
+
+#endif
diff --git a/extra/mariabackup/ds_local.c b/extra/mariabackup/ds_local.c
new file mode 100644
index 00000000000..dc13ed7595e
--- /dev/null
+++ b/extra/mariabackup/ds_local.c
@@ -0,0 +1,151 @@
+/******************************************************
+Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
+
+Local datasink implementation for XtraBackup.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#include <mysql_version.h>
+#include <my_base.h>
+#include <mysys_err.h>
+#include "common.h"
+#include "datasink.h"
+
+typedef struct {
+ File fd;
+} ds_local_file_t;
+
+static ds_ctxt_t *local_init(const char *root);
+static ds_file_t *local_open(ds_ctxt_t *ctxt, const char *path,
+ MY_STAT *mystat);
+static int local_write(ds_file_t *file, const void *buf, size_t len);
+static int local_close(ds_file_t *file);
+static void local_deinit(ds_ctxt_t *ctxt);
+
+datasink_t datasink_local = {
+ &local_init,
+ &local_open,
+ &local_write,
+ &local_close,
+ &local_deinit
+};
+
+static
+ds_ctxt_t *
+local_init(const char *root)
+{
+ ds_ctxt_t *ctxt;
+
+ if (my_mkdir(root, 0777, MYF(0)) < 0
+ && my_errno != EEXIST && my_errno != EISDIR)
+ {
+ char errbuf[MYSYS_STRERROR_SIZE];
+ my_error(EE_CANT_MKDIR, MYF(ME_BELL | ME_WAITTANG),
+ root, my_errno, my_strerror(errbuf, sizeof(errbuf),
+ my_errno));
+ return NULL;
+ }
+
+ ctxt = my_malloc(sizeof(ds_ctxt_t), MYF(MY_FAE));
+
+ ctxt->root = my_strdup(root, MYF(MY_FAE));
+
+ return ctxt;
+}
+
+static
+ds_file_t *
+local_open(ds_ctxt_t *ctxt, const char *path,
+ MY_STAT *mystat __attribute__((unused)))
+{
+ char fullpath[FN_REFLEN];
+ char dirpath[FN_REFLEN];
+ size_t dirpath_len;
+ size_t path_len;
+ ds_local_file_t *local_file;
+ ds_file_t *file;
+ File fd;
+
+ fn_format(fullpath, path, ctxt->root, "", MYF(MY_RELATIVE_PATH));
+
+ /* Create the directory if needed */
+ dirname_part(dirpath, fullpath, &dirpath_len);
+ if (my_mkdir(dirpath, 0777, MYF(0)) < 0 && my_errno != EEXIST) {
+ char errbuf[MYSYS_STRERROR_SIZE];
+ my_error(EE_CANT_MKDIR, MYF(ME_BELL | ME_WAITTANG),
+ dirpath, my_errno, my_strerror(errbuf, sizeof(errbuf),
+ my_errno));
+ return NULL;
+ }
+
+ fd = my_create(fullpath, 0, O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW,
+ MYF(MY_WME));
+ if (fd < 0) {
+ return NULL;
+ }
+
+ path_len = strlen(fullpath) + 1; /* terminating '\0' */
+
+ file = (ds_file_t *) my_malloc(sizeof(ds_file_t) +
+ sizeof(ds_local_file_t) +
+ path_len,
+ MYF(MY_FAE));
+ local_file = (ds_local_file_t *) (file + 1);
+
+ local_file->fd = fd;
+
+ file->path = (char *) local_file + sizeof(ds_local_file_t);
+ memcpy(file->path, fullpath, path_len);
+
+ file->ptr = local_file;
+
+ return file;
+}
+
+static
+int
+local_write(ds_file_t *file, const void *buf, size_t len)
+{
+ File fd = ((ds_local_file_t *) file->ptr)->fd;
+
+ if (!my_write(fd, buf, len, MYF(MY_WME | MY_NABP))) {
+ posix_fadvise(fd, 0, 0, POSIX_FADV_DONTNEED);
+ return 0;
+ }
+
+ return 1;
+}
+
+static
+int
+local_close(ds_file_t *file)
+{
+ File fd = ((ds_local_file_t *) file->ptr)->fd;
+
+ my_free(file);
+
+ my_sync(fd, MYF(MY_WME));
+
+ return my_close(fd, MYF(MY_WME));
+}
+
+static
+void
+local_deinit(ds_ctxt_t *ctxt)
+{
+ my_free(ctxt->root);
+ my_free(ctxt);
+}
diff --git a/extra/mariabackup/ds_local.h b/extra/mariabackup/ds_local.h
new file mode 100644
index 00000000000..b0f0f04030c
--- /dev/null
+++ b/extra/mariabackup/ds_local.h
@@ -0,0 +1,28 @@
+/******************************************************
+Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
+
+Local datasink interface for XtraBackup.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#ifndef DS_LOCAL_H
+#define DS_LOCAL_H
+
+#include "datasink.h"
+
+extern datasink_t datasink_local;
+
+#endif
diff --git a/extra/mariabackup/ds_stdout.c b/extra/mariabackup/ds_stdout.c
new file mode 100644
index 00000000000..616bcbd831e
--- /dev/null
+++ b/extra/mariabackup/ds_stdout.c
@@ -0,0 +1,121 @@
+/******************************************************
+Copyright (c) 2013 Percona LLC and/or its affiliates.
+
+Local datasink implementation for XtraBackup.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#include <my_base.h>
+#include <mysys_err.h>
+#include "common.h"
+#include "datasink.h"
+
+typedef struct {
+ File fd;
+} ds_stdout_file_t;
+
+static ds_ctxt_t *stdout_init(const char *root);
+static ds_file_t *stdout_open(ds_ctxt_t *ctxt, const char *path,
+ MY_STAT *mystat);
+static int stdout_write(ds_file_t *file, const void *buf, size_t len);
+static int stdout_close(ds_file_t *file);
+static void stdout_deinit(ds_ctxt_t *ctxt);
+
+datasink_t datasink_stdout = {
+ &stdout_init,
+ &stdout_open,
+ &stdout_write,
+ &stdout_close,
+ &stdout_deinit
+};
+
+static
+ds_ctxt_t *
+stdout_init(const char *root)
+{
+ ds_ctxt_t *ctxt;
+
+ ctxt = my_malloc(sizeof(ds_ctxt_t), MYF(MY_FAE));
+
+ ctxt->root = my_strdup(root, MYF(MY_FAE));
+
+ return ctxt;
+}
+
+static
+ds_file_t *
+stdout_open(ds_ctxt_t *ctxt __attribute__((unused)),
+ const char *path __attribute__((unused)),
+ MY_STAT *mystat __attribute__((unused)))
+{
+ ds_stdout_file_t *stdout_file;
+ ds_file_t *file;
+ size_t pathlen;
+ const char *fullpath = "<STDOUT>";
+
+ pathlen = strlen(fullpath) + 1;
+
+ file = (ds_file_t *) my_malloc(sizeof(ds_file_t) +
+ sizeof(ds_stdout_file_t) +
+ pathlen,
+ MYF(MY_FAE));
+ stdout_file = (ds_stdout_file_t *) (file + 1);
+
+
+#ifdef __WIN__
+ setmode(fileno(stdout), _O_BINARY);
+#endif
+
+ stdout_file->fd = fileno(stdout);
+
+ file->path = (char *) stdout_file + sizeof(ds_stdout_file_t);
+ memcpy(file->path, fullpath, pathlen);
+
+ file->ptr = stdout_file;
+
+ return file;
+}
+
+static
+int
+stdout_write(ds_file_t *file, const void *buf, size_t len)
+{
+ File fd = ((ds_stdout_file_t *) file->ptr)->fd;
+
+ if (!my_write(fd, buf, len, MYF(MY_WME | MY_NABP))) {
+ posix_fadvise(fd, 0, 0, POSIX_FADV_DONTNEED);
+ return 0;
+ }
+
+ return 1;
+}
+
+static
+int
+stdout_close(ds_file_t *file)
+{
+ my_free(file);
+
+ return 1;
+}
+
+static
+void
+stdout_deinit(ds_ctxt_t *ctxt)
+{
+ my_free(ctxt->root);
+ my_free(ctxt);
+}
diff --git a/extra/mariabackup/ds_stdout.h b/extra/mariabackup/ds_stdout.h
new file mode 100644
index 00000000000..58940264fef
--- /dev/null
+++ b/extra/mariabackup/ds_stdout.h
@@ -0,0 +1,28 @@
+/******************************************************
+Copyright (c) 2013 Percona LLC and/or its affiliates.
+
+Local datasink interface for XtraBackup.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#ifndef DS_STDOUT_H
+#define DS_STDOUT_H
+
+#include "datasink.h"
+
+extern datasink_t datasink_stdout;
+
+#endif
diff --git a/extra/mariabackup/ds_tmpfile.c b/extra/mariabackup/ds_tmpfile.c
new file mode 100644
index 00000000000..915191dcdae
--- /dev/null
+++ b/extra/mariabackup/ds_tmpfile.c
@@ -0,0 +1,248 @@
+/******************************************************
+Copyright (c) 2012 Percona LLC and/or its affiliates.
+
+tmpfile datasink for XtraBackup.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+/* Do all writes to temporary files first, then pipe them to the specified
+datasink in a serialized way in deinit(). */
+
+#include <my_base.h>
+#include "common.h"
+#include "datasink.h"
+
+typedef struct {
+ pthread_mutex_t mutex;
+ LIST *file_list;
+} ds_tmpfile_ctxt_t;
+
+typedef struct {
+ LIST list;
+ File fd;
+ char *orig_path;
+ MY_STAT mystat;
+ ds_file_t *file;
+} ds_tmp_file_t;
+
+static ds_ctxt_t *tmpfile_init(const char *root);
+static ds_file_t *tmpfile_open(ds_ctxt_t *ctxt, const char *path,
+ MY_STAT *mystat);
+static int tmpfile_write(ds_file_t *file, const void *buf, size_t len);
+static int tmpfile_close(ds_file_t *file);
+static void tmpfile_deinit(ds_ctxt_t *ctxt);
+
+datasink_t datasink_tmpfile = {
+ &tmpfile_init,
+ &tmpfile_open,
+ &tmpfile_write,
+ &tmpfile_close,
+ &tmpfile_deinit
+};
+
+MY_TMPDIR mysql_tmpdir_list;
+
+static ds_ctxt_t *
+tmpfile_init(const char *root)
+{
+ ds_ctxt_t *ctxt;
+ ds_tmpfile_ctxt_t *tmpfile_ctxt;
+
+ ctxt = my_malloc(sizeof(ds_ctxt_t) + sizeof(ds_tmpfile_ctxt_t),
+ MYF(MY_FAE));
+ tmpfile_ctxt = (ds_tmpfile_ctxt_t *) (ctxt + 1);
+ tmpfile_ctxt->file_list = NULL;
+ if (pthread_mutex_init(&tmpfile_ctxt->mutex, NULL)) {
+
+ my_free(ctxt);
+ return NULL;
+ }
+
+ ctxt->ptr = tmpfile_ctxt;
+ ctxt->root = my_strdup(root, MYF(MY_FAE));
+
+ return ctxt;
+}
+
+static ds_file_t *
+tmpfile_open(ds_ctxt_t *ctxt, const char *path,
+ MY_STAT *mystat)
+{
+ ds_tmpfile_ctxt_t *tmpfile_ctxt;
+ char tmp_path[FN_REFLEN];
+ ds_tmp_file_t *tmp_file;
+ ds_file_t *file;
+ size_t path_len;
+ File fd;
+
+ /* Create a temporary file in tmpdir. The file will be automatically
+ removed on close. Code copied from mysql_tmpfile(). */
+ fd = create_temp_file(tmp_path, my_tmpdir(&mysql_tmpdir_list),
+ "xbtemp",
+#ifdef __WIN__
+ O_BINARY | O_TRUNC | O_SEQUENTIAL |
+ O_TEMPORARY | O_SHORT_LIVED |
+#endif /* __WIN__ */
+ O_CREAT | O_EXCL | O_RDWR,
+ MYF(MY_WME));
+
+#ifndef __WIN__
+ if (fd >= 0) {
+ /* On Windows, open files cannot be removed, but files can be
+ created with the O_TEMPORARY flag to the same effect
+ ("delete on close"). */
+ unlink(tmp_path);
+ }
+#endif /* !__WIN__ */
+
+ if (fd < 0) {
+ return NULL;
+ }
+
+ path_len = strlen(path) + 1; /* terminating '\0' */
+
+ file = (ds_file_t *) my_malloc(sizeof(ds_file_t) +
+ sizeof(ds_tmp_file_t) + path_len,
+ MYF(MY_FAE));
+
+ tmp_file = (ds_tmp_file_t *) (file + 1);
+ tmp_file->file = file;
+ memcpy(&tmp_file->mystat, mystat, sizeof(MY_STAT));
+ /* Save a copy of 'path', since it may not be accessible later */
+ tmp_file->orig_path = (char *) tmp_file + sizeof(ds_tmp_file_t);
+
+ tmp_file->fd = fd;
+ memcpy(tmp_file->orig_path, path, path_len);
+
+ /* Store the real temporary file name in file->path */
+ file->path = my_strdup(tmp_path, MYF(MY_FAE));
+ file->ptr = tmp_file;
+
+ /* Store the file object in the list to be piped later */
+ tmpfile_ctxt = (ds_tmpfile_ctxt_t *) ctxt->ptr;
+ tmp_file->list.data = tmp_file;
+
+ pthread_mutex_lock(&tmpfile_ctxt->mutex);
+ tmpfile_ctxt->file_list = list_add(tmpfile_ctxt->file_list,
+ &tmp_file->list);
+ pthread_mutex_unlock(&tmpfile_ctxt->mutex);
+
+ return file;
+}
+
+static int
+tmpfile_write(ds_file_t *file, const void *buf, size_t len)
+{
+ File fd = ((ds_tmp_file_t *) file->ptr)->fd;
+
+ if (!my_write(fd, buf, len, MYF(MY_WME | MY_NABP))) {
+ posix_fadvise(fd, 0, 0, POSIX_FADV_DONTNEED);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int
+tmpfile_close(ds_file_t *file)
+{
+ /* Do nothing -- we will close (and thus remove) the file after piping
+ it to the destination datasink in tmpfile_deinit(). */
+
+ my_free(file->path);
+
+ return 0;
+}
+
+static void
+tmpfile_deinit(ds_ctxt_t *ctxt)
+{
+ LIST *list;
+ ds_tmpfile_ctxt_t *tmpfile_ctxt;
+ MY_STAT mystat;
+ ds_tmp_file_t *tmp_file;
+ ds_file_t *dst_file;
+ ds_ctxt_t *pipe_ctxt;
+ void *buf = NULL;
+ const size_t buf_size = 10 * 1024 * 1024;
+ size_t bytes;
+ size_t offset;
+
+ pipe_ctxt = ctxt->pipe_ctxt;
+ xb_a(pipe_ctxt != NULL);
+
+ buf = my_malloc(buf_size, MYF(MY_FAE));
+
+ tmpfile_ctxt = (ds_tmpfile_ctxt_t *) ctxt->ptr;
+ list = tmpfile_ctxt->file_list;
+
+ /* Walk the files in the order they have been added */
+ list = list_reverse(list);
+ while (list != NULL) {
+ tmp_file = list->data;
+ /* Stat the file to replace size and mtime on the original
+ * mystat struct */
+ if (my_fstat(tmp_file->fd, &mystat, MYF(0))) {
+ msg("error: my_fstat() failed.\n");
+ exit(EXIT_FAILURE);
+ }
+ tmp_file->mystat.st_size = mystat.st_size;
+ tmp_file->mystat.st_mtime = mystat.st_mtime;
+
+ dst_file = ds_open(pipe_ctxt, tmp_file->orig_path,
+ &tmp_file->mystat);
+ if (dst_file == NULL) {
+ msg("error: could not stream a temporary file to "
+ "'%s'\n", tmp_file->orig_path);
+ exit(EXIT_FAILURE);
+ }
+
+ /* copy to the destination datasink */
+ posix_fadvise(tmp_file->fd, 0, 0, POSIX_FADV_SEQUENTIAL);
+ if (my_seek(tmp_file->fd, 0, SEEK_SET, MYF(0)) ==
+ MY_FILEPOS_ERROR) {
+ msg("error: my_seek() failed for '%s', errno = %d.\n",
+ tmp_file->file->path, my_errno);
+ exit(EXIT_FAILURE);
+ }
+ offset = 0;
+ while ((bytes = my_read(tmp_file->fd, buf, buf_size,
+ MYF(MY_WME))) > 0) {
+ posix_fadvise(tmp_file->fd, offset, buf_size, POSIX_FADV_DONTNEED);
+ offset += buf_size;
+ if (ds_write(dst_file, buf, bytes)) {
+ msg("error: cannot write to stream for '%s'.\n",
+ tmp_file->orig_path);
+ exit(EXIT_FAILURE);
+ }
+ }
+ if (bytes == (size_t) -1) {
+ exit(EXIT_FAILURE);
+ }
+
+ my_close(tmp_file->fd, MYF(MY_WME));
+ ds_close(dst_file);
+
+ list = list_rest(list);
+ my_free(tmp_file->file);
+ }
+
+ pthread_mutex_destroy(&tmpfile_ctxt->mutex);
+
+ my_free(buf);
+ my_free(ctxt->root);
+ my_free(ctxt);
+}
diff --git a/extra/mariabackup/ds_tmpfile.h b/extra/mariabackup/ds_tmpfile.h
new file mode 100644
index 00000000000..c21f1a3f0b5
--- /dev/null
+++ b/extra/mariabackup/ds_tmpfile.h
@@ -0,0 +1,30 @@
+/******************************************************
+Copyright (c) 2012 Percona LLC and/or its affiliates.
+
+tmpfile datasink for XtraBackup.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#ifndef DS_TMPFILE_H
+#define DS_TMPFILE_H
+
+#include "datasink.h"
+
+extern datasink_t datasink_tmpfile;
+
+extern MY_TMPDIR mysql_tmpdir_list;
+
+#endif
diff --git a/extra/mariabackup/ds_xbstream.c b/extra/mariabackup/ds_xbstream.c
new file mode 100644
index 00000000000..42924a72d7f
--- /dev/null
+++ b/extra/mariabackup/ds_xbstream.c
@@ -0,0 +1,223 @@
+/******************************************************
+Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
+
+Streaming implementation for XtraBackup.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#include <mysql_version.h>
+#include <my_base.h>
+#include "common.h"
+#include "datasink.h"
+#include "xbstream.h"
+
+typedef struct {
+ xb_wstream_t *xbstream;
+ ds_file_t *dest_file;
+ pthread_mutex_t mutex;
+} ds_stream_ctxt_t;
+
+typedef struct {
+ xb_wstream_file_t *xbstream_file;
+ ds_stream_ctxt_t *stream_ctxt;
+} ds_stream_file_t;
+
+/***********************************************************************
+General streaming interface */
+
+static ds_ctxt_t *xbstream_init(const char *root);
+static ds_file_t *xbstream_open(ds_ctxt_t *ctxt, const char *path,
+ MY_STAT *mystat);
+static int xbstream_write(ds_file_t *file, const void *buf, size_t len);
+static int xbstream_close(ds_file_t *file);
+static void xbstream_deinit(ds_ctxt_t *ctxt);
+
+datasink_t datasink_xbstream = {
+ &xbstream_init,
+ &xbstream_open,
+ &xbstream_write,
+ &xbstream_close,
+ &xbstream_deinit
+};
+
+static
+ssize_t
+my_xbstream_write_callback(xb_wstream_file_t *f __attribute__((unused)),
+ void *userdata, const void *buf, size_t len)
+{
+ ds_stream_ctxt_t *stream_ctxt;
+
+ stream_ctxt = (ds_stream_ctxt_t *) userdata;
+
+ xb_ad(stream_ctxt != NULL);
+ xb_ad(stream_ctxt->dest_file != NULL);
+
+ if (!ds_write(stream_ctxt->dest_file, buf, len)) {
+ return len;
+ }
+ return -1;
+}
+
+static
+ds_ctxt_t *
+xbstream_init(const char *root __attribute__((unused)))
+{
+ ds_ctxt_t *ctxt;
+ ds_stream_ctxt_t *stream_ctxt;
+ xb_wstream_t *xbstream;
+
+ ctxt = my_malloc(sizeof(ds_ctxt_t) + sizeof(ds_stream_ctxt_t),
+ MYF(MY_FAE));
+ stream_ctxt = (ds_stream_ctxt_t *)(ctxt + 1);
+
+ if (pthread_mutex_init(&stream_ctxt->mutex, NULL)) {
+ msg("xbstream_init: pthread_mutex_init() failed.\n");
+ goto err;
+ }
+
+ xbstream = xb_stream_write_new();
+ if (xbstream == NULL) {
+ msg("xb_stream_write_new() failed.\n");
+ goto err;
+ }
+ stream_ctxt->xbstream = xbstream;
+ stream_ctxt->dest_file = NULL;
+
+ ctxt->ptr = stream_ctxt;
+
+ return ctxt;
+
+err:
+ my_free(ctxt);
+ return NULL;
+}
+
+static
+ds_file_t *
+xbstream_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat)
+{
+ ds_file_t *file;
+ ds_stream_file_t *stream_file;
+ ds_stream_ctxt_t *stream_ctxt;
+ ds_ctxt_t *dest_ctxt;
+ xb_wstream_t *xbstream;
+ xb_wstream_file_t *xbstream_file;
+
+
+ xb_ad(ctxt->pipe_ctxt != NULL);
+ dest_ctxt = ctxt->pipe_ctxt;
+
+ stream_ctxt = (ds_stream_ctxt_t *) ctxt->ptr;
+
+ pthread_mutex_lock(&stream_ctxt->mutex);
+ if (stream_ctxt->dest_file == NULL) {
+ stream_ctxt->dest_file = ds_open(dest_ctxt, path, mystat);
+ if (stream_ctxt->dest_file == NULL) {
+ return NULL;
+ }
+ }
+ pthread_mutex_unlock(&stream_ctxt->mutex);
+
+ file = (ds_file_t *) my_malloc(sizeof(ds_file_t) +
+ sizeof(ds_stream_file_t),
+ MYF(MY_FAE));
+ stream_file = (ds_stream_file_t *) (file + 1);
+
+ xbstream = stream_ctxt->xbstream;
+
+ xbstream_file = xb_stream_write_open(xbstream, path, mystat,
+ stream_ctxt,
+ my_xbstream_write_callback);
+
+ if (xbstream_file == NULL) {
+ msg("xb_stream_write_open() failed.\n");
+ goto err;
+ }
+
+ stream_file->xbstream_file = xbstream_file;
+ stream_file->stream_ctxt = stream_ctxt;
+ file->ptr = stream_file;
+ file->path = stream_ctxt->dest_file->path;
+
+ return file;
+
+err:
+ if (stream_ctxt->dest_file) {
+ ds_close(stream_ctxt->dest_file);
+ stream_ctxt->dest_file = NULL;
+ }
+ my_free(file);
+
+ return NULL;
+}
+
+static
+int
+xbstream_write(ds_file_t *file, const void *buf, size_t len)
+{
+ ds_stream_file_t *stream_file;
+ xb_wstream_file_t *xbstream_file;
+
+
+ stream_file = (ds_stream_file_t *) file->ptr;
+
+ xbstream_file = stream_file->xbstream_file;
+
+ if (xb_stream_write_data(xbstream_file, buf, len)) {
+ msg("xb_stream_write_data() failed.\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static
+int
+xbstream_close(ds_file_t *file)
+{
+ ds_stream_file_t *stream_file;
+ int rc = 0;
+
+ stream_file = (ds_stream_file_t *)file->ptr;
+
+ rc = xb_stream_write_close(stream_file->xbstream_file);
+
+ my_free(file);
+
+ return rc;
+}
+
+static
+void
+xbstream_deinit(ds_ctxt_t *ctxt)
+{
+ ds_stream_ctxt_t *stream_ctxt;
+
+ stream_ctxt = (ds_stream_ctxt_t *) ctxt->ptr;
+
+ if (xb_stream_write_done(stream_ctxt->xbstream)) {
+ msg("xb_stream_done() failed.\n");
+ }
+
+ if (stream_ctxt->dest_file) {
+ ds_close(stream_ctxt->dest_file);
+ stream_ctxt->dest_file = NULL;
+ }
+
+ pthread_mutex_destroy(&stream_ctxt->mutex);
+
+ my_free(ctxt);
+}
diff --git a/extra/mariabackup/ds_xbstream.h b/extra/mariabackup/ds_xbstream.h
new file mode 100644
index 00000000000..30f34ac8318
--- /dev/null
+++ b/extra/mariabackup/ds_xbstream.h
@@ -0,0 +1,28 @@
+/******************************************************
+Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
+
+Streaming interface for XtraBackup.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#ifndef DS_XBSTREAM_H
+#define DS_XBSTREAM_H
+
+#include "datasink.h"
+
+extern datasink_t datasink_xbstream;
+
+#endif
diff --git a/extra/mariabackup/fil_cur.cc b/extra/mariabackup/fil_cur.cc
new file mode 100644
index 00000000000..22ebdb90215
--- /dev/null
+++ b/extra/mariabackup/fil_cur.cc
@@ -0,0 +1,402 @@
+/******************************************************
+XtraBackup: hot backup tool for InnoDB
+(c) 2009-2013 Percona LLC and/or its affiliates.
+Originally Created 3/3/2009 Yasufumi Kinoshita
+Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
+Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+/* Source file cursor implementation */
+
+#include <my_base.h>
+
+#include <univ.i>
+#include <fil0fil.h>
+#include <srv0start.h>
+#include <trx0sys.h>
+
+#include "fil_cur.h"
+#include "common.h"
+#include "read_filt.h"
+#include "xtrabackup.h"
+
+/* Size of read buffer in pages (640 pages = 10M for 16K sized pages) */
+#define XB_FIL_CUR_PAGES 640
+
+/***********************************************************************
+Extracts the relative path ("database/table.ibd") of a tablespace from a
+specified possibly absolute path.
+
+For user tablespaces both "./database/table.ibd" and
+"/remote/dir/database/table.ibd" result in "database/table.ibd".
+
+For system tablepsaces (i.e. When is_system is TRUE) both "/remote/dir/ibdata1"
+and "./ibdata1" yield "ibdata1" in the output. */
+const char *
+xb_get_relative_path(
+/*=================*/
+ const char* path, /*!< in: tablespace path (either
+ relative or absolute) */
+ ibool is_system) /*!< in: TRUE for system tablespaces,
+ i.e. when only the filename must be
+ returned. */
+{
+ const char *next;
+ const char *cur;
+ const char *prev;
+
+ prev = NULL;
+ cur = path;
+
+ while ((next = strchr(cur, SRV_PATH_SEPARATOR)) != NULL) {
+
+ prev = cur;
+ cur = next + 1;
+ }
+
+ if (is_system) {
+
+ return(cur);
+ } else {
+
+ return((prev == NULL) ? cur : prev);
+ }
+
+}
+
+/**********************************************************************//**
+Closes a file. */
+static
+void
+xb_fil_node_close_file(
+/*===================*/
+ fil_node_t* node) /*!< in: file node */
+{
+ ibool ret;
+
+ mutex_enter(&fil_system->mutex);
+
+ ut_ad(node);
+ ut_a(node->n_pending == 0);
+ ut_a(node->n_pending_flushes == 0);
+ ut_a(!node->being_extended);
+
+ if (!node->open) {
+
+ mutex_exit(&fil_system->mutex);
+
+ return;
+ }
+
+ ret = os_file_close(node->handle);
+ ut_a(ret);
+
+ node->open = FALSE;
+
+ ut_a(fil_system->n_open > 0);
+ fil_system->n_open--;
+ fil_n_file_opened--;
+
+ if (node->space->purpose == FIL_TABLESPACE &&
+ fil_is_user_tablespace_id(node->space->id)) {
+
+ ut_a(UT_LIST_GET_LEN(fil_system->LRU) > 0);
+
+ /* The node is in the LRU list, remove it */
+ UT_LIST_REMOVE(LRU, fil_system->LRU, node);
+ }
+
+ mutex_exit(&fil_system->mutex);
+}
+
+/************************************************************************
+Open a source file cursor and initialize the associated read filter.
+
+@return XB_FIL_CUR_SUCCESS on success, XB_FIL_CUR_SKIP if the source file must
+be skipped and XB_FIL_CUR_ERROR on error. */
+xb_fil_cur_result_t
+xb_fil_cur_open(
+/*============*/
+ xb_fil_cur_t* cursor, /*!< out: source file cursor */
+ xb_read_filt_t* read_filter, /*!< in/out: the read filter */
+ fil_node_t* node, /*!< in: source tablespace node */
+ uint thread_n) /*!< thread number for diagnostics */
+{
+ ulint page_size;
+ ulint page_size_shift;
+ ulint zip_size;
+ ibool success;
+
+ /* Initialize these first so xb_fil_cur_close() handles them correctly
+ in case of error */
+ cursor->orig_buf = NULL;
+ cursor->node = NULL;
+
+ cursor->space_id = node->space->id;
+ cursor->is_system = !fil_is_user_tablespace_id(node->space->id);
+
+ strncpy(cursor->abs_path, node->name, sizeof(cursor->abs_path));
+
+ /* Get the relative path for the destination tablespace name, i.e. the
+ one that can be appended to the backup root directory. Non-system
+ tablespaces may have absolute paths for remote tablespaces in MySQL
+ 5.6+. We want to make "local" copies for the backup. */
+ strncpy(cursor->rel_path,
+ xb_get_relative_path(cursor->abs_path, cursor->is_system),
+ sizeof(cursor->rel_path));
+
+ /* In the backup mode we should already have a tablespace handle created
+ by fil_load_single_table_tablespace() unless it is a system
+ tablespace. Otherwise we open the file here. */
+ if (cursor->is_system || !srv_backup_mode || srv_close_files) {
+ node->handle =
+ os_file_create_simple_no_error_handling(0, node->name,
+ OS_FILE_OPEN,
+ OS_FILE_READ_ONLY,
+ &success);
+ if (!success) {
+ /* The following call prints an error message */
+ os_file_get_last_error(TRUE);
+
+ msg("[%02u] xtrabackup: error: cannot open "
+ "tablespace %s\n",
+ thread_n, cursor->abs_path);
+
+ return(XB_FIL_CUR_ERROR);
+ }
+ mutex_enter(&fil_system->mutex);
+
+ node->open = TRUE;
+
+ fil_system->n_open++;
+ fil_n_file_opened++;
+
+ if (node->space->purpose == FIL_TABLESPACE &&
+ fil_is_user_tablespace_id(node->space->id)) {
+
+ /* Put the node to the LRU list */
+ UT_LIST_ADD_FIRST(LRU, fil_system->LRU, node);
+ }
+
+ mutex_exit(&fil_system->mutex);
+ }
+
+ ut_ad(node->open);
+
+ cursor->node = node;
+ cursor->file = node->handle;
+
+ if (my_fstat(cursor->file, &cursor->statinfo, MYF(MY_WME))) {
+ msg("[%02u] xtrabackup: error: cannot stat %s\n",
+ thread_n, cursor->abs_path);
+
+ xb_fil_cur_close(cursor);
+
+ return(XB_FIL_CUR_ERROR);
+ }
+
+ if (srv_unix_file_flush_method == SRV_UNIX_O_DIRECT
+ || srv_unix_file_flush_method == SRV_UNIX_O_DIRECT_NO_FSYNC) {
+
+ os_file_set_nocache(cursor->file, node->name, "OPEN");
+ }
+
+ posix_fadvise(cursor->file, 0, 0, POSIX_FADV_SEQUENTIAL);
+
+ /* Determine the page size */
+ zip_size = xb_get_zip_size(cursor->file);
+ if (zip_size == ULINT_UNDEFINED) {
+ xb_fil_cur_close(cursor);
+ return(XB_FIL_CUR_SKIP);
+ } else if (zip_size) {
+ page_size = zip_size;
+ page_size_shift = get_bit_shift(page_size);
+ msg("[%02u] %s is compressed with page size = "
+ "%lu bytes\n", thread_n, node->name, page_size);
+ if (page_size_shift < 10 || page_size_shift > 14) {
+ msg("[%02u] xtrabackup: Error: Invalid "
+ "page size: %lu.\n", thread_n, page_size);
+ ut_error;
+ }
+ } else {
+ page_size = UNIV_PAGE_SIZE;
+ page_size_shift = UNIV_PAGE_SIZE_SHIFT;
+ }
+ cursor->page_size = page_size;
+ cursor->page_size_shift = page_size_shift;
+ cursor->zip_size = zip_size;
+
+ /* Allocate read buffer */
+ cursor->buf_size = XB_FIL_CUR_PAGES * page_size;
+ cursor->orig_buf = static_cast<byte *>
+ (ut_malloc(cursor->buf_size + UNIV_PAGE_SIZE));
+ cursor->buf = static_cast<byte *>
+ (ut_align(cursor->orig_buf, UNIV_PAGE_SIZE));
+
+ cursor->buf_read = 0;
+ cursor->buf_npages = 0;
+ cursor->buf_offset = 0;
+ cursor->buf_page_no = 0;
+ cursor->thread_n = thread_n;
+
+ cursor->space_size = cursor->statinfo.st_size / page_size;
+
+ cursor->read_filter = read_filter;
+ cursor->read_filter->init(&cursor->read_filter_ctxt, cursor,
+ node->space->id);
+
+ return(XB_FIL_CUR_SUCCESS);
+}
+
+/************************************************************************
+Reads and verifies the next block of pages from the source
+file. Positions the cursor after the last read non-corrupted page.
+
+@return XB_FIL_CUR_SUCCESS if some have been read successfully, XB_FIL_CUR_EOF
+if there are no more pages to read and XB_FIL_CUR_ERROR on error. */
+xb_fil_cur_result_t
+xb_fil_cur_read(
+/*============*/
+ xb_fil_cur_t* cursor) /*!< in/out: source file cursor */
+{
+ ibool success;
+ byte* page;
+ ulint i;
+ ulint npages;
+ ulint retry_count;
+ xb_fil_cur_result_t ret;
+ ib_int64_t offset;
+ ib_int64_t to_read;
+
+ cursor->read_filter->get_next_batch(&cursor->read_filter_ctxt,
+ &offset, &to_read);
+
+ if (to_read == 0LL) {
+ return(XB_FIL_CUR_EOF);
+ }
+
+ if (to_read > (ib_int64_t) cursor->buf_size) {
+ to_read = (ib_int64_t) cursor->buf_size;
+ }
+
+ xb_a(to_read > 0 && to_read <= 0xFFFFFFFFLL);
+
+ if (to_read % cursor->page_size != 0 &&
+ offset + to_read == cursor->statinfo.st_size) {
+
+ if (to_read < (ib_int64_t) cursor->page_size) {
+ msg("[%02u] xtrabackup: Warning: junk at the end of "
+ "%s:\n", cursor->thread_n, cursor->abs_path);
+ msg("[%02u] xtrabackup: Warning: offset = %llu, "
+ "to_read = %llu\n",
+ cursor->thread_n,
+ (unsigned long long) offset,
+ (unsigned long long) to_read);
+
+ return(XB_FIL_CUR_EOF);
+ }
+
+ to_read = (ib_int64_t) (((ulint) to_read) &
+ ~(cursor->page_size - 1));
+ }
+
+ xb_a(to_read % cursor->page_size == 0);
+
+ npages = (ulint) (to_read >> cursor->page_size_shift);
+
+ retry_count = 10;
+ ret = XB_FIL_CUR_SUCCESS;
+
+read_retry:
+ xtrabackup_io_throttling();
+
+ cursor->buf_read = 0;
+ cursor->buf_npages = 0;
+ cursor->buf_offset = offset;
+ cursor->buf_page_no = (ulint) (offset >> cursor->page_size_shift);
+
+ success = os_file_read(cursor->file, cursor->buf, offset,
+ to_read);
+ if (!success) {
+ return(XB_FIL_CUR_ERROR);
+ }
+
+ /* check pages for corruption and re-read if necessary. i.e. in case of
+ partially written pages */
+ for (page = cursor->buf, i = 0; i < npages;
+ page += cursor->page_size, i++) {
+
+ if (buf_page_is_corrupted(TRUE, page, cursor->zip_size)) {
+
+ ulint page_no = cursor->buf_page_no + i;
+
+ if (cursor->is_system &&
+ page_no >= FSP_EXTENT_SIZE &&
+ page_no < FSP_EXTENT_SIZE * 3) {
+ /* skip doublewrite buffer pages */
+ xb_a(cursor->page_size == UNIV_PAGE_SIZE);
+ msg("[%02u] xtrabackup: "
+ "Page %lu is a doublewrite buffer page, "
+ "skipping.\n", cursor->thread_n, page_no);
+ } else {
+ retry_count--;
+ if (retry_count == 0) {
+ msg("[%02u] xtrabackup: "
+ "Error: failed to read page after "
+ "10 retries. File %s seems to be "
+ "corrupted.\n", cursor->thread_n,
+ cursor->abs_path);
+ ret = XB_FIL_CUR_ERROR;
+ break;
+ }
+ msg("[%02u] xtrabackup: "
+ "Database page corruption detected at page "
+ "%lu, retrying...\n", cursor->thread_n,
+ page_no);
+
+ os_thread_sleep(100000);
+
+ goto read_retry;
+ }
+ }
+ cursor->buf_read += cursor->page_size;
+ cursor->buf_npages++;
+ }
+
+ posix_fadvise(cursor->file, offset, to_read, POSIX_FADV_DONTNEED);
+
+ return(ret);
+}
+
+/************************************************************************
+Close the source file cursor opened with xb_fil_cur_open() and its
+associated read filter. */
+void
+xb_fil_cur_close(
+/*=============*/
+ xb_fil_cur_t *cursor) /*!< in/out: source file cursor */
+{
+ cursor->read_filter->deinit(&cursor->read_filter_ctxt);
+
+ if (cursor->orig_buf != NULL) {
+ ut_free(cursor->orig_buf);
+ }
+ if (cursor->node != NULL) {
+ xb_fil_node_close_file(cursor->node);
+ cursor->file = XB_FILE_UNDEFINED;
+ }
+}
diff --git a/extra/mariabackup/fil_cur.h b/extra/mariabackup/fil_cur.h
new file mode 100644
index 00000000000..2057765dab5
--- /dev/null
+++ b/extra/mariabackup/fil_cur.h
@@ -0,0 +1,123 @@
+/******************************************************
+XtraBackup: hot backup tool for InnoDB
+(c) 2009-2013 Percona LLC and/or its affiliates.
+Originally Created 3/3/2009 Yasufumi Kinoshita
+Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
+Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+/* Source file cursor interface */
+
+#ifndef FIL_CUR_H
+#define FIL_CUR_H
+
+#include <my_dir.h>
+#include "read_filt.h"
+
+struct xb_fil_cur_t {
+ os_file_t file; /*!< source file handle */
+ fil_node_t* node; /*!< source tablespace node */
+ char rel_path[FN_REFLEN];
+ /*!< normalized file path */
+ char abs_path[FN_REFLEN];
+ /*!< absolute file path */
+ MY_STAT statinfo; /*!< information about the file */
+ ulint zip_size; /*!< compressed page size in bytes or 0
+ for uncompressed pages */
+ ulint page_size; /*!< = zip_size for compressed pages or
+ UNIV_PAGE_SIZE for uncompressed ones */
+ ulint page_size_shift;/*!< bit shift corresponding to
+ page_size */
+ my_bool is_system; /*!< TRUE for system tablespace, FALSE
+ otherwise */
+ xb_read_filt_t* read_filter; /*!< read filter */
+ xb_read_filt_ctxt_t read_filter_ctxt;
+ /*!< read filter context */
+ byte* orig_buf; /*!< read buffer */
+ byte* buf; /*!< aligned pointer for orig_buf */
+ ulint buf_size; /*!< buffer size in bytes */
+ ulint buf_read; /*!< number of read bytes in buffer
+ after the last cursor read */
+ ulint buf_npages; /*!< number of pages in buffer after the
+ last cursor read */
+ ib_int64_t buf_offset; /*!< file offset of the first page in
+ buffer */
+ ulint buf_page_no; /*!< number of the first page in
+ buffer */
+ uint thread_n; /*!< thread number for diagnostics */
+ ulint space_id; /*!< ID of tablespace */
+ ulint space_size; /*!< space size in pages */
+};
+
+typedef enum {
+ XB_FIL_CUR_SUCCESS,
+ XB_FIL_CUR_SKIP,
+ XB_FIL_CUR_ERROR,
+ XB_FIL_CUR_EOF
+} xb_fil_cur_result_t;
+
+/************************************************************************
+Open a source file cursor and initialize the associated read filter.
+
+@return XB_FIL_CUR_SUCCESS on success, XB_FIL_CUR_SKIP if the source file must
+be skipped and XB_FIL_CUR_ERROR on error. */
+xb_fil_cur_result_t
+xb_fil_cur_open(
+/*============*/
+ xb_fil_cur_t* cursor, /*!< out: source file cursor */
+ xb_read_filt_t* read_filter, /*!< in/out: the read filter */
+ fil_node_t* node, /*!< in: source tablespace node */
+ uint thread_n); /*!< thread number for diagnostics */
+
+/************************************************************************
+Reads and verifies the next block of pages from the source
+file. Positions the cursor after the last read non-corrupted page.
+
+@return XB_FIL_CUR_SUCCESS if some have been read successfully, XB_FIL_CUR_EOF
+if there are no more pages to read and XB_FIL_CUR_ERROR on error. */
+xb_fil_cur_result_t
+xb_fil_cur_read(
+/*============*/
+ xb_fil_cur_t* cursor); /*!< in/out: source file cursor */
+
+/************************************************************************
+Close the source file cursor opened with xb_fil_cur_open() and its
+associated read filter. */
+void
+xb_fil_cur_close(
+/*=============*/
+ xb_fil_cur_t *cursor); /*!< in/out: source file cursor */
+
+/***********************************************************************
+Extracts the relative path ("database/table.ibd") of a tablespace from a
+specified possibly absolute path.
+
+For user tablespaces both "./database/table.ibd" and
+"/remote/dir/database/table.ibd" result in "database/table.ibd".
+
+For system tablepsaces (i.e. When is_system is TRUE) both "/remote/dir/ibdata1"
+and "./ibdata1" yield "ibdata1" in the output. */
+const char *
+xb_get_relative_path(
+/*=================*/
+ const char* path, /*!< in: tablespace path (either
+ relative or absolute) */
+ ibool is_system); /*!< in: TRUE for system tablespaces,
+ i.e. when only the filename must be
+ returned. */
+
+#endif
diff --git a/extra/mariabackup/innobackupex.cc b/extra/mariabackup/innobackupex.cc
new file mode 100644
index 00000000000..ec697bb56ff
--- /dev/null
+++ b/extra/mariabackup/innobackupex.cc
@@ -0,0 +1,1153 @@
+/******************************************************
+hot backup tool for InnoDB
+(c) 2009-2015 Percona LLC and/or its affiliates
+Originally Created 3/3/2009 Yasufumi Kinoshita
+Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
+Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************
+
+This file incorporates work covered by the following copyright and
+permission notice:
+
+Copyright (c) 2000, 2011, MySQL AB & Innobase Oy. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+Place, Suite 330, Boston, MA 02111-1307 USA
+
+*******************************************************/
+
+#include <my_global.h>
+#include <stdio.h>
+#include <string.h>
+#include <mysql.h>
+#include <my_dir.h>
+#include <ut0mem.h>
+#include <os0sync.h>
+#include <os0file.h>
+#include <srv0start.h>
+#include <algorithm>
+#include <mysqld.h>
+#include <my_default.h>
+#include <my_getopt.h>
+#include <strings.h>
+#include <string>
+#include <sstream>
+#include <set>
+#include "common.h"
+#include "innobackupex.h"
+#include "xtrabackup.h"
+#include "xtrabackup_version.h"
+#include "xbstream.h"
+#include "fil_cur.h"
+#include "write_filt.h"
+#include "backup_copy.h"
+
+using std::min;
+using std::max;
+
+/* options */
+my_bool opt_ibx_version = FALSE;
+my_bool opt_ibx_help = FALSE;
+my_bool opt_ibx_apply_log = FALSE;
+my_bool opt_ibx_redo_only = FALSE;
+my_bool opt_ibx_incremental = FALSE;
+my_bool opt_ibx_notimestamp = FALSE;
+
+my_bool opt_ibx_copy_back = FALSE;
+my_bool opt_ibx_move_back = FALSE;
+my_bool opt_ibx_galera_info = FALSE;
+my_bool opt_ibx_slave_info = FALSE;
+my_bool opt_ibx_no_lock = FALSE;
+my_bool opt_ibx_safe_slave_backup = FALSE;
+my_bool opt_ibx_rsync = FALSE;
+my_bool opt_ibx_force_non_empty_dirs = FALSE;
+my_bool opt_ibx_noversioncheck = FALSE;
+my_bool opt_ibx_no_backup_locks = FALSE;
+my_bool opt_ibx_decompress = FALSE;
+
+char *opt_ibx_incremental_history_name = NULL;
+char *opt_ibx_incremental_history_uuid = NULL;
+
+char *opt_ibx_user = NULL;
+char *opt_ibx_password = NULL;
+char *opt_ibx_host = NULL;
+char *opt_ibx_defaults_group = NULL;
+char *opt_ibx_socket = NULL;
+uint opt_ibx_port = 0;
+char *opt_ibx_login_path = NULL;
+
+
+ulong opt_ibx_lock_wait_query_type;
+ulong opt_ibx_kill_long_query_type;
+
+ulong opt_ibx_decrypt_algo = 0;
+
+uint opt_ibx_kill_long_queries_timeout = 0;
+uint opt_ibx_lock_wait_timeout = 0;
+uint opt_ibx_lock_wait_threshold = 0;
+uint opt_ibx_debug_sleep_before_unlock = 0;
+uint opt_ibx_safe_slave_backup_timeout = 0;
+
+const char *opt_ibx_history = NULL;
+bool opt_ibx_decrypt = false;
+
+char *opt_ibx_include = NULL;
+char *opt_ibx_databases = NULL;
+bool ibx_partial_backup = false;
+
+char *ibx_position_arg = NULL;
+char *ibx_backup_directory = NULL;
+
+/* copy of proxied xtrabackup options */
+my_bool ibx_xb_close_files;
+my_bool ibx_xtrabackup_compact;
+const char *ibx_xtrabackup_compress_alg;
+uint ibx_xtrabackup_compress_threads;
+ulonglong ibx_xtrabackup_compress_chunk_size;
+ulong ibx_xtrabackup_encrypt_algo;
+char *ibx_xtrabackup_encrypt_key;
+char *ibx_xtrabackup_encrypt_key_file;
+uint ibx_xtrabackup_encrypt_threads;
+ulonglong ibx_xtrabackup_encrypt_chunk_size;
+my_bool ibx_xtrabackup_export;
+char *ibx_xtrabackup_extra_lsndir;
+char *ibx_xtrabackup_incremental_basedir;
+char *ibx_xtrabackup_incremental_dir;
+my_bool ibx_xtrabackup_incremental_force_scan;
+ulint ibx_xtrabackup_log_copy_interval;
+char *ibx_xtrabackup_incremental;
+int ibx_xtrabackup_parallel;
+my_bool ibx_xtrabackup_rebuild_indexes;
+ulint ibx_xtrabackup_rebuild_threads;
+char *ibx_xtrabackup_stream_str;
+char *ibx_xtrabackup_tables_file;
+long ibx_xtrabackup_throttle;
+char *ibx_opt_mysql_tmpdir;
+longlong ibx_xtrabackup_use_memory;
+
+
+static inline int ibx_msg(const char *fmt, ...) ATTRIBUTE_FORMAT(printf, 1, 2);
+static inline int ibx_msg(const char *fmt, ...)
+{
+ int result;
+ time_t t = time(NULL);
+ char date[100];
+ char *line;
+ va_list args;
+
+ strftime(date, sizeof(date), "%y%m%d %H:%M:%S", localtime(&t));
+
+ va_start(args, fmt);
+
+ result = vasprintf(&line, fmt, args);
+
+ va_end(args);
+
+ if (result != -1) {
+ result = fprintf(stderr, "%s %s: %s",
+ date, INNOBACKUPEX_BIN_NAME, line);
+ free(line);
+ }
+
+ return result;
+}
+
+enum innobackupex_options
+{
+ OPT_APPLY_LOG = 256,
+ OPT_COPY_BACK,
+ OPT_MOVE_BACK,
+ OPT_REDO_ONLY,
+ OPT_GALERA_INFO,
+ OPT_SLAVE_INFO,
+ OPT_INCREMENTAL,
+ OPT_INCREMENTAL_HISTORY_NAME,
+ OPT_INCREMENTAL_HISTORY_UUID,
+ OPT_LOCK_WAIT_QUERY_TYPE,
+ OPT_KILL_LONG_QUERY_TYPE,
+ OPT_KILL_LONG_QUERIES_TIMEOUT,
+ OPT_LOCK_WAIT_TIMEOUT,
+ OPT_LOCK_WAIT_THRESHOLD,
+ OPT_DEBUG_SLEEP_BEFORE_UNLOCK,
+ OPT_NO_LOCK,
+ OPT_SAFE_SLAVE_BACKUP,
+ OPT_SAFE_SLAVE_BACKUP_TIMEOUT,
+ OPT_RSYNC,
+ OPT_HISTORY,
+ OPT_INCLUDE,
+ OPT_FORCE_NON_EMPTY_DIRS,
+ OPT_NO_TIMESTAMP,
+ OPT_NO_VERSION_CHECK,
+ OPT_NO_BACKUP_LOCKS,
+ OPT_DATABASES,
+ OPT_DECRYPT,
+ OPT_DECOMPRESS,
+
+ /* options wich are passed directly to xtrabackup */
+ OPT_CLOSE_FILES,
+ OPT_COMPACT,
+ OPT_COMPRESS,
+ OPT_COMPRESS_THREADS,
+ OPT_COMPRESS_CHUNK_SIZE,
+ OPT_ENCRYPT,
+ OPT_ENCRYPT_KEY,
+ OPT_ENCRYPT_KEY_FILE,
+ OPT_ENCRYPT_THREADS,
+ OPT_ENCRYPT_CHUNK_SIZE,
+ OPT_EXPORT,
+ OPT_EXTRA_LSNDIR,
+ OPT_INCREMENTAL_BASEDIR,
+ OPT_INCREMENTAL_DIR,
+ OPT_INCREMENTAL_FORCE_SCAN,
+ OPT_LOG_COPY_INTERVAL,
+ OPT_PARALLEL,
+ OPT_REBUILD_INDEXES,
+ OPT_REBUILD_THREADS,
+ OPT_STREAM,
+ OPT_TABLES_FILE,
+ OPT_THROTTLE,
+ OPT_USE_MEMORY
+};
+
+ibx_mode_t ibx_mode = IBX_MODE_BACKUP;
+
+static struct my_option ibx_long_options[] =
+{
+ {"version", 'v', "print xtrabackup version information",
+ (uchar *) &opt_ibx_version, (uchar *) &opt_ibx_version, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"help", '?', "This option displays a help screen and exits.",
+ (uchar *) &opt_ibx_help, (uchar *) &opt_ibx_help, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"apply-log", OPT_APPLY_LOG, "Prepare a backup in BACKUP-DIR by "
+ "applying the transaction log file named \"xtrabackup_logfile\" "
+ "located in the same directory. Also, create new transaction logs. "
+ "The InnoDB configuration is read from the file \"backup-my.cnf\".",
+ (uchar*) &opt_ibx_apply_log, (uchar*) &opt_ibx_apply_log,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"redo-only", OPT_REDO_ONLY, "This option should be used when "
+ "preparing the base full backup and when merging all incrementals "
+ "except the last one. This forces xtrabackup to skip the \"rollback\" "
+ "phase and do a \"redo\" only. This is necessary if the backup will "
+ "have incremental changes applied to it later. See the xtrabackup "
+ "documentation for details.",
+ (uchar *) &opt_ibx_redo_only, (uchar *) &opt_ibx_redo_only, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"copy-back", OPT_COPY_BACK, "Copy all the files in a previously made "
+ "backup from the backup directory to their original locations.",
+ (uchar *) &opt_ibx_copy_back, (uchar *) &opt_ibx_copy_back, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"move-back", OPT_MOVE_BACK, "Move all the files in a previously made "
+ "backup from the backup directory to the actual datadir location. "
+ "Use with caution, as it removes backup files.",
+ (uchar *) &opt_ibx_move_back, (uchar *) &opt_ibx_move_back, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"galera-info", OPT_GALERA_INFO, "This options creates the "
+ "xtrabackup_galera_info file which contains the local node state at "
+ "the time of the backup. Option should be used when performing the "
+ "backup of Percona-XtraDB-Cluster. Has no effect when backup locks "
+ "are used to create the backup.",
+ (uchar *) &opt_ibx_galera_info, (uchar *) &opt_ibx_galera_info, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"slave-info", OPT_SLAVE_INFO, "This option is useful when backing "
+ "up a replication slave server. It prints the binary log position "
+ "and name of the master server. It also writes this information to "
+ "the \"xtrabackup_slave_info\" file as a \"CHANGE MASTER\" command. "
+ "A new slave for this master can be set up by starting a slave server "
+ "on this backup and issuing a \"CHANGE MASTER\" command with the "
+ "binary log position saved in the \"xtrabackup_slave_info\" file.",
+ (uchar *) &opt_ibx_slave_info, (uchar *) &opt_ibx_slave_info, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"incremental", OPT_INCREMENTAL, "This option tells xtrabackup to "
+ "create an incremental backup, rather than a full one. It is passed "
+ "to the xtrabackup child process. When this option is specified, "
+ "either --incremental-lsn or --incremental-basedir can also be given. "
+ "If neither option is given, option --incremental-basedir is passed "
+ "to xtrabackup by default, set to the first timestamped backup "
+ "directory in the backup base directory.",
+ (uchar *) &opt_ibx_incremental, (uchar *) &opt_ibx_incremental, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"no-lock", OPT_NO_LOCK, "Use this option to disable table lock "
+ "with \"FLUSH TABLES WITH READ LOCK\". Use it only if ALL your "
+ "tables are InnoDB and you DO NOT CARE about the binary log "
+ "position of the backup. This option shouldn't be used if there "
+ "are any DDL statements being executed or if any updates are "
+ "happening on non-InnoDB tables (this includes the system MyISAM "
+ "tables in the mysql database), otherwise it could lead to an "
+ "inconsistent backup. If you are considering to use --no-lock "
+ "because your backups are failing to acquire the lock, this could "
+ "be because of incoming replication events preventing the lock "
+ "from succeeding. Please try using --safe-slave-backup to "
+ "momentarily stop the replication slave thread, this may help "
+ "the backup to succeed and you then don't need to resort to "
+ "using this option.",
+ (uchar *) &opt_ibx_no_lock, (uchar *) &opt_ibx_no_lock, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"safe-slave-backup", OPT_SAFE_SLAVE_BACKUP, "Stop slave SQL thread "
+ "and wait to start backup until Slave_open_temp_tables in "
+ "\"SHOW STATUS\" is zero. If there are no open temporary tables, "
+ "the backup will take place, otherwise the SQL thread will be "
+ "started and stopped until there are no open temporary tables. "
+ "The backup will fail if Slave_open_temp_tables does not become "
+ "zero after --safe-slave-backup-timeout seconds. The slave SQL "
+ "thread will be restarted when the backup finishes.",
+ (uchar *) &opt_ibx_safe_slave_backup,
+ (uchar *) &opt_ibx_safe_slave_backup,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"rsync", OPT_RSYNC, "Uses the rsync utility to optimize local file "
+ "transfers. When this option is specified, innobackupex uses rsync "
+ "to copy all non-InnoDB files instead of spawning a separate cp for "
+ "each file, which can be much faster for servers with a large number "
+ "of databases or tables. This option cannot be used together with "
+ "--stream.",
+ (uchar *) &opt_ibx_rsync, (uchar *) &opt_ibx_rsync,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"force-non-empty-directories", OPT_FORCE_NON_EMPTY_DIRS, "This "
+ "option, when specified, makes --copy-back or --move-back transfer "
+ "files to non-empty directories. Note that no existing files will be "
+ "overwritten. If --copy-back or --nove-back has to copy a file from "
+ "the backup directory which already exists in the destination "
+ "directory, it will still fail with an error.",
+ (uchar *) &opt_ibx_force_non_empty_dirs,
+ (uchar *) &opt_ibx_force_non_empty_dirs,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"no-timestamp", OPT_NO_TIMESTAMP, "This option prevents creation of a "
+ "time-stamped subdirectory of the BACKUP-ROOT-DIR given on the "
+ "command line. When it is specified, the backup is done in "
+ "BACKUP-ROOT-DIR instead.",
+ (uchar *) &opt_ibx_notimestamp,
+ (uchar *) &opt_ibx_notimestamp,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"no-version-check", OPT_NO_VERSION_CHECK, "This option disables the "
+ "version check which is enabled by the --version-check option.",
+ (uchar *) &opt_ibx_noversioncheck,
+ (uchar *) &opt_ibx_noversioncheck,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"no-backup-locks", OPT_NO_BACKUP_LOCKS, "This option controls if "
+ "backup locks should be used instead of FLUSH TABLES WITH READ LOCK "
+ "on the backup stage. The option has no effect when backup locks are "
+ "not supported by the server. This option is enabled by default, "
+ "disable with --no-backup-locks.",
+ (uchar *) &opt_ibx_no_backup_locks,
+ (uchar *) &opt_ibx_no_backup_locks,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"decompress", OPT_DECOMPRESS, "Decompresses all files with the .qp "
+ "extension in a backup previously made with the --compress option.",
+ (uchar *) &opt_ibx_decompress,
+ (uchar *) &opt_ibx_decompress,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"user", 'u', "This option specifies the MySQL username used "
+ "when connecting to the server, if that's not the current user. "
+ "The option accepts a string argument. See mysql --help for details.",
+ (uchar*) &opt_ibx_user, (uchar*) &opt_ibx_user, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"host", 'H', "This option specifies the host to use when "
+ "connecting to the database server with TCP/IP. The option accepts "
+ "a string argument. See mysql --help for details.",
+ (uchar*) &opt_ibx_host, (uchar*) &opt_ibx_host, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"port", 'P', "This option specifies the port to use when "
+ "connecting to the database server with TCP/IP. The option accepts "
+ "a string argument. See mysql --help for details.",
+ &opt_ibx_port, &opt_ibx_port, 0, GET_UINT, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"password", 'p', "This option specifies the password to use "
+ "when connecting to the database. It accepts a string argument. "
+ "See mysql --help for details.",
+ 0, 0, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"socket", 'S', "This option specifies the socket to use when "
+ "connecting to the local database server with a UNIX domain socket. "
+ "The option accepts a string argument. See mysql --help for details.",
+ (uchar*) &opt_ibx_socket, (uchar*) &opt_ibx_socket, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"incremental-history-name", OPT_INCREMENTAL_HISTORY_NAME,
+ "This option specifies the name of the backup series stored in the "
+ "PERCONA_SCHEMA.xtrabackup_history history record to base an "
+ "incremental backup on. Xtrabackup will search the history table "
+ "looking for the most recent (highest innodb_to_lsn), successful "
+ "backup in the series and take the to_lsn value to use as the "
+ "starting lsn for the incremental backup. This will be mutually "
+ "exclusive with --incremental-history-uuid, --incremental-basedir "
+ "and --incremental-lsn. If no valid lsn can be found (no series by "
+ "that name, no successful backups by that name) xtrabackup will "
+ "return with an error. It is used with the --incremental option.",
+ (uchar*) &opt_ibx_incremental_history_name,
+ (uchar*) &opt_ibx_incremental_history_name, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"incremental-history-uuid", OPT_INCREMENTAL_HISTORY_UUID,
+ "This option specifies the UUID of the specific history record "
+ "stored in the PERCONA_SCHEMA.xtrabackup_history to base an "
+ "incremental backup on. --incremental-history-name, "
+ "--incremental-basedir and --incremental-lsn. If no valid lsn can be "
+ "found (no success record with that uuid) xtrabackup will return "
+ "with an error. It is used with the --incremental option.",
+ (uchar*) &opt_ibx_incremental_history_uuid,
+ (uchar*) &opt_ibx_incremental_history_uuid, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"decrypt", OPT_DECRYPT, "Decrypts all files with the .xbcrypt "
+ "extension in a backup previously made with --encrypt option.",
+ &opt_ibx_decrypt_algo, &opt_ibx_decrypt_algo,
+ &xtrabackup_encrypt_algo_typelib, GET_ENUM, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"ftwrl-wait-query-type", OPT_LOCK_WAIT_QUERY_TYPE,
+ "This option specifies which types of queries are allowed to complete "
+ "before innobackupex will issue the global lock. Default is all.",
+ (uchar*) &opt_ibx_lock_wait_query_type,
+ (uchar*) &opt_ibx_lock_wait_query_type, &query_type_typelib,
+ GET_ENUM, REQUIRED_ARG, QUERY_TYPE_ALL, 0, 0, 0, 0, 0},
+
+ {"kill-long-query-type", OPT_KILL_LONG_QUERY_TYPE,
+ "This option specifies which types of queries should be killed to "
+ "unblock the global lock. Default is \"all\".",
+ (uchar*) &opt_ibx_kill_long_query_type,
+ (uchar*) &opt_ibx_kill_long_query_type, &query_type_typelib,
+ GET_ENUM, REQUIRED_ARG, QUERY_TYPE_SELECT, 0, 0, 0, 0, 0},
+
+ {"history", OPT_HISTORY,
+ "This option enables the tracking of backup history in the "
+ "PERCONA_SCHEMA.xtrabackup_history table. An optional history "
+ "series name may be specified that will be placed with the history "
+ "record for the current backup being taken.",
+ NULL, NULL, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"include", OPT_INCLUDE,
+ "This option is a regular expression to be matched against table "
+ "names in databasename.tablename format. It is passed directly to "
+ "xtrabackup's --tables option. See the xtrabackup documentation for "
+ "details.",
+ (uchar*) &opt_ibx_include,
+ (uchar*) &opt_ibx_include, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"databases", OPT_DATABASES,
+ "This option specifies the list of databases that innobackupex should "
+ "back up. The option accepts a string argument or path to file that "
+ "contains the list of databases to back up. The list is of the form "
+ "\"databasename1[.table_name1] databasename2[.table_name2] . . .\". "
+ "If this option is not specified, all databases containing MyISAM and "
+ "InnoDB tables will be backed up. Please make sure that --databases "
+ "contains all of the InnoDB databases and tables, so that all of the "
+ "innodb.frm files are also backed up. In case the list is very long, "
+ "this can be specified in a file, and the full path of the file can "
+ "be specified instead of the list. (See option --tables-file.)",
+ (uchar*) &opt_ibx_databases,
+ (uchar*) &opt_ibx_databases, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"kill-long-queries-timeout", OPT_KILL_LONG_QUERIES_TIMEOUT,
+ "This option specifies the number of seconds innobackupex waits "
+ "between starting FLUSH TABLES WITH READ LOCK and killing those "
+ "queries that block it. Default is 0 seconds, which means "
+ "innobackupex will not attempt to kill any queries.",
+ (uchar*) &opt_ibx_kill_long_queries_timeout,
+ (uchar*) &opt_ibx_kill_long_queries_timeout, 0, GET_UINT,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"ftwrl-wait-timeout", OPT_LOCK_WAIT_TIMEOUT,
+ "This option specifies time in seconds that innobackupex should wait "
+ "for queries that would block FTWRL before running it. If there are "
+ "still such queries when the timeout expires, innobackupex terminates "
+ "with an error. Default is 0, in which case innobackupex does not "
+ "wait for queries to complete and starts FTWRL immediately.",
+ (uchar*) &opt_ibx_lock_wait_timeout,
+ (uchar*) &opt_ibx_lock_wait_timeout, 0, GET_UINT,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"ftwrl-wait-threshold", OPT_LOCK_WAIT_THRESHOLD,
+ "This option specifies the query run time threshold which is used by "
+ "innobackupex to detect long-running queries with a non-zero value "
+ "of --ftwrl-wait-timeout. FTWRL is not started until such "
+ "long-running queries exist. This option has no effect if "
+ "--ftwrl-wait-timeout is 0. Default value is 60 seconds.",
+ (uchar*) &opt_ibx_lock_wait_threshold,
+ (uchar*) &opt_ibx_lock_wait_threshold, 0, GET_UINT,
+ REQUIRED_ARG, 60, 0, 0, 0, 0, 0},
+
+ {"debug-sleep-before-unlock", OPT_DEBUG_SLEEP_BEFORE_UNLOCK,
+ "This is a debug-only option used by the XtraBackup test suite.",
+ (uchar*) &opt_ibx_debug_sleep_before_unlock,
+ (uchar*) &opt_ibx_debug_sleep_before_unlock, 0, GET_UINT,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"safe-slave-backup-timeout", OPT_SAFE_SLAVE_BACKUP_TIMEOUT,
+ "How many seconds --safe-slave-backup should wait for "
+ "Slave_open_temp_tables to become zero. (default 300)",
+ (uchar*) &opt_ibx_safe_slave_backup_timeout,
+ (uchar*) &opt_ibx_safe_slave_backup_timeout, 0, GET_UINT,
+ REQUIRED_ARG, 300, 0, 0, 0, 0, 0},
+
+
+ /* Following command-line options are actually handled by xtrabackup.
+ We put them here with only purpose for them to showup in
+ innobackupex --help output */
+
+ {"close_files", OPT_CLOSE_FILES, "Do not keep files opened. This "
+ "option is passed directly to xtrabackup. Use at your own risk.",
+ (uchar*) &ibx_xb_close_files, (uchar*) &ibx_xb_close_files, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"compact", OPT_COMPACT, "Create a compact backup with all secondary "
+ "index pages omitted. This option is passed directly to xtrabackup. "
+ "See xtrabackup documentation for details.",
+ (uchar*) &ibx_xtrabackup_compact, (uchar*) &ibx_xtrabackup_compact,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"compress", OPT_COMPRESS, "This option instructs xtrabackup to "
+ "compress backup copies of InnoDB data files. It is passed directly "
+ "to the xtrabackup child process. Try 'xtrabackup --help' for more "
+ "details.", (uchar*) &ibx_xtrabackup_compress_alg,
+ (uchar*) &ibx_xtrabackup_compress_alg, 0,
+ GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"compress-threads", OPT_COMPRESS_THREADS,
+ "This option specifies the number of worker threads that will be used "
+ "for parallel compression. It is passed directly to the xtrabackup "
+ "child process. Try 'xtrabackup --help' for more details.",
+ (uchar*) &ibx_xtrabackup_compress_threads,
+ (uchar*) &ibx_xtrabackup_compress_threads,
+ 0, GET_UINT, REQUIRED_ARG, 1, 1, UINT_MAX, 0, 0, 0},
+
+ {"compress-chunk-size", OPT_COMPRESS_CHUNK_SIZE, "Size of working "
+ "buffer(s) for compression threads in bytes. The default value "
+ "is 64K.", (uchar*) &ibx_xtrabackup_compress_chunk_size,
+ (uchar*) &ibx_xtrabackup_compress_chunk_size,
+ 0, GET_ULL, REQUIRED_ARG, (1 << 16), 1024, ULONGLONG_MAX, 0, 0, 0},
+
+ {"encrypt", OPT_ENCRYPT, "This option instructs xtrabackup to encrypt "
+ "backup copies of InnoDB data files using the algorithm specified in "
+ "the ENCRYPTION-ALGORITHM. It is passed directly to the xtrabackup "
+ "child process. Try 'xtrabackup --help' for more details.",
+ &ibx_xtrabackup_encrypt_algo, &ibx_xtrabackup_encrypt_algo,
+ &xtrabackup_encrypt_algo_typelib, GET_ENUM, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"encrypt-key", OPT_ENCRYPT_KEY, "This option instructs xtrabackup to "
+ "use the given ENCRYPTION-KEY when using the --encrypt or --decrypt "
+ "options. During backup it is passed directly to the xtrabackup child "
+ "process. Try 'xtrabackup --help' for more details.",
+ (uchar*) &ibx_xtrabackup_encrypt_key,
+ (uchar*) &ibx_xtrabackup_encrypt_key, 0,
+ GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"encrypt-key-file", OPT_ENCRYPT_KEY_FILE, "This option instructs "
+ "xtrabackup to use the encryption key stored in the given "
+ "ENCRYPTION-KEY-FILE when using the --encrypt or --decrypt options.",
+ (uchar*) &ibx_xtrabackup_encrypt_key_file,
+ (uchar*) &ibx_xtrabackup_encrypt_key_file, 0,
+ GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"encrypt-threads", OPT_ENCRYPT_THREADS,
+ "This option specifies the number of worker threads that will be used "
+ "for parallel encryption. It is passed directly to the xtrabackup "
+ "child process. Try 'xtrabackup --help' for more details.",
+ (uchar*) &ibx_xtrabackup_encrypt_threads,
+ (uchar*) &ibx_xtrabackup_encrypt_threads,
+ 0, GET_UINT, REQUIRED_ARG, 1, 1, UINT_MAX, 0, 0, 0},
+
+ {"encrypt-chunk-size", OPT_ENCRYPT_CHUNK_SIZE,
+ "This option specifies the size of the internal working buffer for "
+ "each encryption thread, measured in bytes. It is passed directly to "
+ "the xtrabackup child process. Try 'xtrabackup --help' for more "
+ "details.",
+ (uchar*) &ibx_xtrabackup_encrypt_chunk_size,
+ (uchar*) &ibx_xtrabackup_encrypt_chunk_size,
+ 0, GET_ULL, REQUIRED_ARG, (1 << 16), 1024, ULONGLONG_MAX, 0, 0, 0},
+
+ {"export", OPT_EXPORT, "This option is passed directly to xtrabackup's "
+ "--export option. It enables exporting individual tables for import "
+ "into another server. See the xtrabackup documentation for details.",
+ (uchar*) &ibx_xtrabackup_export, (uchar*) &ibx_xtrabackup_export,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"extra-lsndir", OPT_EXTRA_LSNDIR, "This option specifies the "
+ "directory in which to save an extra copy of the "
+ "\"xtrabackup_checkpoints\" file. The option accepts a string "
+ "argument. It is passed directly to xtrabackup's --extra-lsndir "
+ "option. See the xtrabackup documentation for details.",
+ (uchar*) &ibx_xtrabackup_extra_lsndir,
+ (uchar*) &ibx_xtrabackup_extra_lsndir,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"incremental-basedir", OPT_INCREMENTAL_BASEDIR, "This option "
+ "specifies the directory containing the full backup that is the base "
+ "dataset for the incremental backup. The option accepts a string "
+ "argument. It is used with the --incremental option.",
+ (uchar*) &ibx_xtrabackup_incremental_basedir,
+ (uchar*) &ibx_xtrabackup_incremental_basedir,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"incremental-dir", OPT_INCREMENTAL_DIR, "This option specifies the "
+ "directory where the incremental backup will be combined with the "
+ "full backup to make a new full backup. The option accepts a string "
+ "argument. It is used with the --incremental option.",
+ (uchar*) &ibx_xtrabackup_incremental_dir,
+ (uchar*) &ibx_xtrabackup_incremental_dir,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"incremental-force-scan", OPT_INCREMENTAL_FORCE_SCAN,
+ "This options tells xtrabackup to perform full scan of data files "
+ "for taking an incremental backup even if full changed page bitmap "
+ "data is available to enable the backup without the full scan.",
+ (uchar*)&ibx_xtrabackup_incremental_force_scan,
+ (uchar*)&ibx_xtrabackup_incremental_force_scan, 0, GET_BOOL, NO_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"log-copy-interval", OPT_LOG_COPY_INTERVAL, "This option specifies "
+ "time interval between checks done by log copying thread in "
+ "milliseconds.", (uchar*) &ibx_xtrabackup_log_copy_interval,
+ (uchar*) &ibx_xtrabackup_log_copy_interval,
+ 0, GET_LONG, REQUIRED_ARG, 1000, 0, LONG_MAX, 0, 1, 0},
+
+ {"incremental-lsn", OPT_INCREMENTAL, "This option specifies the log "
+ "sequence number (LSN) to use for the incremental backup. The option "
+ "accepts a string argument. It is used with the --incremental option. "
+ "It is used instead of specifying --incremental-basedir. For "
+ "databases created by MySQL and Percona Server 5.0-series versions, "
+ "specify the LSN as two 32-bit integers in high:low format. For "
+ "databases created in 5.1 and later, specify the LSN as a single "
+ "64-bit integer.",
+ (uchar*) &ibx_xtrabackup_incremental,
+ (uchar*) &ibx_xtrabackup_incremental,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"parallel", OPT_PARALLEL, "On backup, this option specifies the "
+ "number of threads the xtrabackup child process should use to back "
+ "up files concurrently. The option accepts an integer argument. It "
+ "is passed directly to xtrabackup's --parallel option. See the "
+ "xtrabackup documentation for details.",
+ (uchar*) &ibx_xtrabackup_parallel, (uchar*) &ibx_xtrabackup_parallel,
+ 0, GET_INT, REQUIRED_ARG, 1, 1, INT_MAX, 0, 0, 0},
+
+ {"rebuild-indexes", OPT_REBUILD_INDEXES,
+ "This option only has effect when used together with the --apply-log "
+ "option and is passed directly to xtrabackup. When used, makes "
+ "xtrabackup rebuild all secondary indexes after applying the log. "
+ "This option is normally used to prepare compact backups. See the "
+ "XtraBackup manual for more information.",
+ (uchar*) &ibx_xtrabackup_rebuild_indexes,
+ (uchar*) &ibx_xtrabackup_rebuild_indexes,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"rebuild-threads", OPT_REBUILD_THREADS,
+ "Use this number of threads to rebuild indexes in a compact backup. "
+ "Only has effect with --prepare and --rebuild-indexes.",
+ (uchar*) &ibx_xtrabackup_rebuild_threads,
+ (uchar*) &ibx_xtrabackup_rebuild_threads,
+ 0, GET_UINT, REQUIRED_ARG, 1, 1, UINT_MAX, 0, 0, 0},
+
+ {"stream", OPT_STREAM, "This option specifies the format in which to "
+ "do the streamed backup. The option accepts a string argument. The "
+ "backup will be done to STDOUT in the specified format. Currently, "
+ "the only supported formats are tar and xbstream. This option is "
+ "passed directly to xtrabackup's --stream option.",
+ (uchar*) &ibx_xtrabackup_stream_str,
+ (uchar*) &ibx_xtrabackup_stream_str, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"tables-file", OPT_TABLES_FILE, "This option specifies the file in "
+ "which there are a list of names of the form database. The option "
+ "accepts a string argument.table, one per line. The option is passed "
+ "directly to xtrabackup's --tables-file option.",
+ (uchar*) &ibx_xtrabackup_tables_file,
+ (uchar*) &ibx_xtrabackup_tables_file,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"throttle", OPT_THROTTLE, "This option specifies a number of I/O "
+ "operations (pairs of read+write) per second. It accepts an integer "
+ "argument. It is passed directly to xtrabackup's --throttle option.",
+ (uchar*) &ibx_xtrabackup_throttle, (uchar*) &ibx_xtrabackup_throttle,
+ 0, GET_LONG, REQUIRED_ARG, 0, 0, LONG_MAX, 0, 1, 0},
+
+ {"tmpdir", 't', "This option specifies the location where a temporary "
+ "files will be stored. If the option is not specified, the default is "
+ "to use the value of tmpdir read from the server configuration.",
+ (uchar*) &ibx_opt_mysql_tmpdir,
+ (uchar*) &ibx_opt_mysql_tmpdir, 0, GET_STR, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"use-memory", OPT_USE_MEMORY, "This option accepts a string argument "
+ "that specifies the amount of memory in bytes for xtrabackup to use "
+ "for crash recovery while preparing a backup. Multiples are supported "
+ "providing the unit (e.g. 1MB, 1GB). It is used only with the option "
+ "--apply-log. It is passed directly to xtrabackup's --use-memory "
+ "option. See the xtrabackup documentation for details.",
+ (uchar*) &ibx_xtrabackup_use_memory,
+ (uchar*) &ibx_xtrabackup_use_memory,
+ 0, GET_LL, REQUIRED_ARG, 100*1024*1024L, 1024*1024L, LONGLONG_MAX, 0,
+ 1024*1024L, 0},
+
+ { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
+};
+
+
+static void usage(void)
+{
+ puts("Open source backup tool for InnoDB and XtraDB\n\
+\n\
+Copyright (C) 2009-2015 Percona LLC and/or its affiliates.\n\
+Portions Copyright (C) 2000, 2011, MySQL AB & Innobase Oy. All Rights Reserved.\n\
+\n\
+This program is free software; you can redistribute it and/or\n\
+modify it under the terms of the GNU General Public License\n\
+as published by the Free Software Foundation version 2\n\
+of the License.\n\
+\n\
+This program is distributed in the hope that it will be useful,\n\
+but WITHOUT ANY WARRANTY; without even the implied warranty of\n\
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\
+GNU General Public License for more details.\n\
+\n\
+You can download full text of the license on http://www.gnu.org/licenses/gpl-2.0.txt\n\n");
+
+ puts("innobackupex - Non-blocking backup tool for InnoDB, XtraDB and HailDB databases\n\
+\n\
+SYNOPOSIS\n\
+\n\
+innobackupex [--compress] [--compress-threads=NUMBER-OF-THREADS] [--compress-chunk-size=CHUNK-SIZE]\n\
+ [--encrypt=ENCRYPTION-ALGORITHM] [--encrypt-threads=NUMBER-OF-THREADS] [--encrypt-chunk-size=CHUNK-SIZE]\n\
+ [--encrypt-key=LITERAL-ENCRYPTION-KEY] | [--encryption-key-file=MY.KEY]\n\
+ [--include=REGEXP] [--user=NAME]\n\
+ [--password=WORD] [--port=PORT] [--socket=SOCKET]\n\
+ [--no-timestamp] [--ibbackup=IBBACKUP-BINARY]\n\
+ [--slave-info] [--galera-info] [--stream=tar|xbstream]\n\
+ [--defaults-file=MY.CNF] [--defaults-group=GROUP-NAME]\n\
+ [--databases=LIST] [--no-lock] \n\
+ [--tmpdir=DIRECTORY] [--tables-file=FILE]\n\
+ [--history=NAME]\n\
+ [--incremental] [--incremental-basedir]\n\
+ [--incremental-dir] [--incremental-force-scan] [--incremental-lsn]\n\
+ [--incremental-history-name=NAME] [--incremental-history-uuid=UUID]\n\
+ [--close-files] [--compact] \n\
+ BACKUP-ROOT-DIR\n\
+\n\
+innobackupex --apply-log [--use-memory=B]\n\
+ [--defaults-file=MY.CNF]\n\
+ [--export] [--redo-only] [--ibbackup=IBBACKUP-BINARY]\n\
+ BACKUP-DIR\n\
+\n\
+innobackupex --copy-back [--defaults-file=MY.CNF] [--defaults-group=GROUP-NAME] BACKUP-DIR\n\
+\n\
+innobackupex --move-back [--defaults-file=MY.CNF] [--defaults-group=GROUP-NAME] BACKUP-DIR\n\
+\n\
+innobackupex [--decompress] [--decrypt=ENCRYPTION-ALGORITHM]\n\
+ [--encrypt-key=LITERAL-ENCRYPTION-KEY] | [--encryption-key-file=MY.KEY]\n\
+ [--parallel=NUMBER-OF-FORKS] BACKUP-DIR\n\
+\n\
+DESCRIPTION\n\
+\n\
+The first command line above makes a hot backup of a MySQL database.\n\
+By default it creates a backup directory (named by the current date\n\
+ and time) in the given backup root directory. With the --no-timestamp\n\
+option it does not create a time-stamped backup directory, but it puts\n\
+the backup in the given directory (which must not exist). This\n\
+command makes a complete backup of all MyISAM and InnoDB tables and\n\
+indexes in all databases or in all of the databases specified with the\n\
+--databases option. The created backup contains .frm, .MRG, .MYD,\n\
+.MYI, .MAD, .MAI, .TRG, .TRN, .ARM, .ARZ, .CSM, CSV, .opt, .par, and\n\
+InnoDB data and log files. The MY.CNF options file defines the\n\
+location of the database. This command connects to the MySQL server\n\
+using the mysql client program, and runs xtrabackup as a child\n\
+process.\n\
+\n\
+The --apply-log command prepares a backup for starting a MySQL\n\
+server on the backup. This command recovers InnoDB data files as specified\n\
+in BACKUP-DIR/backup-my.cnf using BACKUP-DIR/xtrabackup_logfile,\n\
+and creates new InnoDB log files as specified in BACKUP-DIR/backup-my.cnf.\n\
+The BACKUP-DIR should be the path to a backup directory created by\n\
+xtrabackup. This command runs xtrabackup as a child process, but it does not \n\
+connect to the database server.\n\
+\n\
+The --copy-back command copies data, index, and log files\n\
+from the backup directory back to their original locations.\n\
+The MY.CNF options file defines the original location of the database.\n\
+The BACKUP-DIR is the path to a backup directory created by xtrabackup.\n\
+\n\
+The --move-back command is similar to --copy-back with the only difference that\n\
+it moves files to their original locations rather than copies them. As this\n\
+option removes backup files, it must be used with caution. It may be useful in\n\
+cases when there is not enough free disk space to copy files.\n\
+\n\
+The --decompress --decrypt command will decrypt and/or decompress a backup made\n\
+with the --compress and/or --encrypt options. When decrypting, the encryption\n\
+algorithm and key used when the backup was taken MUST be provided via the\n\
+specified options. --decrypt and --decompress may be used together at the same\n\
+time to completely normalize a previously compressed and encrypted backup. The\n\
+--parallel option will allow multiple files to be decrypted and/or decompressed\n\
+simultaneously. In order to decompress, the qpress utility MUST be installed\n\
+and accessable within the path. This process will remove the original\n\
+compressed/encrypted files and leave the results in the same location.\n\
+\n\
+On success the exit code innobackupex is 0. A non-zero exit code \n\
+indicates an error.\n");
+ printf("Usage: [%s [--defaults-file=#] --backup | %s [--defaults-file=#] --prepare] [OPTIONS]\n", my_progname, my_progname);
+ my_print_help(ibx_long_options);
+}
+
+
+static
+my_bool
+ibx_get_one_option(int optid,
+ const struct my_option *opt __attribute__((unused)),
+ char *argument)
+{
+ switch(optid) {
+ case '?':
+ usage();
+ exit(0);
+ break;
+ case 'v':
+ msg("innobackupex version %s %s (%s) (revision id: %s)\n",
+ XTRABACKUP_VERSION,
+ SYSTEM_TYPE, MACHINE_TYPE, XTRABACKUP_REVISION);
+ exit(0);
+ break;
+ case OPT_HISTORY:
+ if (argument) {
+ opt_ibx_history = argument;
+ } else {
+ opt_ibx_history = "";
+ }
+ break;
+ case OPT_DECRYPT:
+ if (argument == NULL) {
+ ibx_msg("Missing --decrypt argument, must specify a "
+ "valid encryption algorithm.\n");
+ return(1);
+ }
+ opt_ibx_decrypt = true;
+ break;
+ case OPT_STREAM:
+ if (!strcasecmp(argument, "tar"))
+ xtrabackup_stream_fmt = XB_STREAM_FMT_TAR;
+ else if (!strcasecmp(argument, "xbstream"))
+ xtrabackup_stream_fmt = XB_STREAM_FMT_XBSTREAM;
+ else {
+ ibx_msg("Invalid --stream argument: %s\n", argument);
+ return 1;
+ }
+ xtrabackup_stream = TRUE;
+ break;
+ case OPT_COMPRESS:
+ if (argument == NULL)
+ xtrabackup_compress_alg = "quicklz";
+ else if (strcasecmp(argument, "quicklz"))
+ {
+ ibx_msg("Invalid --compress argument: %s\n", argument);
+ return 1;
+ }
+ xtrabackup_compress = TRUE;
+ break;
+ case OPT_ENCRYPT:
+ if (argument == NULL)
+ {
+ msg("Missing --encrypt argument, must specify a "
+ "valid encryption algorithm.\n");
+ return 1;
+ }
+ xtrabackup_encrypt = TRUE;
+ break;
+ case 'p':
+ if (argument)
+ {
+ char *start = argument;
+ my_free(opt_ibx_password);
+ opt_ibx_password= my_strdup(argument, MYF(MY_FAE));
+ /* Destroy argument */
+ while (*argument)
+ *argument++= 'x';
+ if (*start)
+ start[1]=0 ;
+ }
+ break;
+ }
+ return(0);
+}
+
+bool
+make_backup_dir()
+{
+ time_t t = time(NULL);
+ char buf[100];
+
+ if (!opt_ibx_notimestamp && !ibx_xtrabackup_stream_str) {
+ strftime(buf, sizeof(buf), "%Y-%m-%d_%H-%M-%S", localtime(&t));
+ ut_a(asprintf(&ibx_backup_directory, "%s/%s",
+ ibx_position_arg, buf) != -1);
+ } else {
+ ibx_backup_directory = strdup(ibx_position_arg);
+ }
+
+ if (!directory_exists(ibx_backup_directory, true)) {
+ return(false);
+ }
+
+ return(true);
+}
+
+bool
+ibx_handle_options(int *argc, char ***argv)
+{
+ int i, n_arguments;
+
+ if (handle_options(argc, argv, ibx_long_options, ibx_get_one_option)) {
+ return(false);
+ }
+
+ if (opt_ibx_apply_log) {
+ ibx_mode = IBX_MODE_APPLY_LOG;
+ } else if (opt_ibx_copy_back) {
+ ibx_mode = IBX_MODE_COPY_BACK;
+ } else if (opt_ibx_move_back) {
+ ibx_mode = IBX_MODE_MOVE_BACK;
+ } else if (opt_ibx_decrypt || opt_ibx_decompress) {
+ ibx_mode = IBX_MODE_DECRYPT_DECOMPRESS;
+ } else {
+ ibx_mode = IBX_MODE_BACKUP;
+ }
+
+ /* find and save position argument */
+ i = 0;
+ n_arguments = 0;
+ while (i < *argc) {
+ char *opt = (*argv)[i];
+
+ if (strncmp(opt, "--", 2) != 0
+ && !(strlen(opt) == 2 && opt[0] == '-')) {
+ if (ibx_position_arg != NULL
+ && ibx_position_arg != opt) {
+ ibx_msg("Error: extra argument found %s\n",
+ opt);
+ }
+ ibx_position_arg = opt;
+ ++n_arguments;
+ }
+ ++i;
+ }
+
+ *argc -= n_arguments;
+ if (n_arguments > 1) {
+ return(false);
+ }
+
+ if (ibx_position_arg == NULL) {
+ ibx_msg("Missing argument\n");
+ return(false);
+ }
+
+ /* set argv[0] to be the program name */
+ --(*argv);
+ ++(*argc);
+
+ return(true);
+}
+
+/*********************************************************************//**
+Parse command-line options, connect to MySQL server,
+detect server capabilities, etc.
+@return true on success. */
+bool
+ibx_init()
+{
+ const char *run;
+
+ /*=====================*/
+ xtrabackup_copy_back = opt_ibx_copy_back;
+ xtrabackup_move_back = opt_ibx_move_back;
+ opt_galera_info = opt_ibx_galera_info;
+ opt_slave_info = opt_ibx_slave_info;
+ opt_no_lock = opt_ibx_no_lock;
+ opt_safe_slave_backup = opt_ibx_safe_slave_backup;
+ opt_rsync = opt_ibx_rsync;
+ opt_force_non_empty_dirs = opt_ibx_force_non_empty_dirs;
+ opt_noversioncheck = opt_ibx_noversioncheck;
+ opt_no_backup_locks = opt_ibx_no_backup_locks;
+ opt_decompress = opt_ibx_decompress;
+
+ opt_incremental_history_name = opt_ibx_incremental_history_name;
+ opt_incremental_history_uuid = opt_ibx_incremental_history_uuid;
+
+ opt_user = opt_ibx_user;
+ opt_password = opt_ibx_password;
+ opt_host = opt_ibx_host;
+ opt_defaults_group = opt_ibx_defaults_group;
+ opt_socket = opt_ibx_socket;
+ opt_port = opt_ibx_port;
+ opt_login_path = opt_ibx_login_path;
+
+ opt_lock_wait_query_type = opt_ibx_lock_wait_query_type;
+ opt_kill_long_query_type = opt_ibx_kill_long_query_type;
+
+ opt_decrypt_algo = opt_ibx_decrypt_algo;
+
+ opt_kill_long_queries_timeout = opt_ibx_kill_long_queries_timeout;
+ opt_lock_wait_timeout = opt_ibx_lock_wait_timeout;
+ opt_lock_wait_threshold = opt_ibx_lock_wait_threshold;
+ opt_debug_sleep_before_unlock = opt_ibx_debug_sleep_before_unlock;
+ opt_safe_slave_backup_timeout = opt_ibx_safe_slave_backup_timeout;
+
+ opt_history = opt_ibx_history;
+ opt_decrypt = opt_ibx_decrypt;
+
+ /* setup xtrabackup options */
+ xb_close_files = ibx_xb_close_files;
+ xtrabackup_compact = ibx_xtrabackup_compact;
+ xtrabackup_compress_alg = ibx_xtrabackup_compress_alg;
+ xtrabackup_compress_threads = ibx_xtrabackup_compress_threads;
+ xtrabackup_compress_chunk_size = ibx_xtrabackup_compress_chunk_size;
+ xtrabackup_encrypt_algo = ibx_xtrabackup_encrypt_algo;
+ xtrabackup_encrypt_key = ibx_xtrabackup_encrypt_key;
+ xtrabackup_encrypt_key_file = ibx_xtrabackup_encrypt_key_file;
+ xtrabackup_encrypt_threads = ibx_xtrabackup_encrypt_threads;
+ xtrabackup_encrypt_chunk_size = ibx_xtrabackup_encrypt_chunk_size;
+ xtrabackup_export = ibx_xtrabackup_export;
+ xtrabackup_extra_lsndir = ibx_xtrabackup_extra_lsndir;
+ xtrabackup_incremental_basedir = ibx_xtrabackup_incremental_basedir;
+ xtrabackup_incremental_dir = ibx_xtrabackup_incremental_dir;
+ xtrabackup_incremental_force_scan =
+ ibx_xtrabackup_incremental_force_scan;
+ xtrabackup_log_copy_interval = ibx_xtrabackup_log_copy_interval;
+ xtrabackup_incremental = ibx_xtrabackup_incremental;
+ xtrabackup_parallel = ibx_xtrabackup_parallel;
+ xtrabackup_rebuild_indexes = ibx_xtrabackup_rebuild_indexes;
+ xtrabackup_rebuild_threads = ibx_xtrabackup_rebuild_threads;
+ xtrabackup_stream_str = ibx_xtrabackup_stream_str;
+ xtrabackup_tables_file = ibx_xtrabackup_tables_file;
+ xtrabackup_throttle = ibx_xtrabackup_throttle;
+ opt_mysql_tmpdir = ibx_opt_mysql_tmpdir;
+ xtrabackup_use_memory = ibx_xtrabackup_use_memory;
+
+ if (!opt_ibx_incremental
+ && (xtrabackup_incremental
+ || xtrabackup_incremental_basedir
+ || opt_ibx_incremental_history_name
+ || opt_ibx_incremental_history_uuid)) {
+ ibx_msg("Error: --incremental-lsn, --incremental-basedir, "
+ "--incremental-history-name and "
+ "--incremental-history-uuid require the "
+ "--incremental option.\n");
+ return(false);
+ }
+
+ if (opt_ibx_databases != NULL) {
+ if (is_path_separator(*opt_ibx_databases)) {
+ xtrabackup_databases_file = opt_ibx_databases;
+ } else {
+ xtrabackup_databases = opt_ibx_databases;
+ }
+ }
+
+ /* --tables and --tables-file options are xtrabackup only */
+ ibx_partial_backup = (opt_ibx_include || opt_ibx_databases);
+
+ if (ibx_mode == IBX_MODE_BACKUP) {
+
+ if (!make_backup_dir()) {
+ return(false);
+ }
+ }
+
+ /* --binlog-info is xtrabackup only, so force
+ --binlog-info=ON. i.e. behavior before the feature had been
+ implemented */
+ opt_binlog_info = BINLOG_INFO_ON;
+
+ switch (ibx_mode) {
+ case IBX_MODE_APPLY_LOG:
+ xtrabackup_prepare = TRUE;
+ if (opt_ibx_redo_only) {
+ xtrabackup_apply_log_only = TRUE;
+ }
+ xtrabackup_target_dir = ibx_position_arg;
+ run = "apply-log";
+ break;
+ case IBX_MODE_BACKUP:
+ xtrabackup_backup = TRUE;
+ xtrabackup_target_dir = ibx_backup_directory;
+ if (opt_ibx_include != NULL) {
+ xtrabackup_tables = opt_ibx_include;
+ }
+ run = "backup";
+ break;
+ case IBX_MODE_COPY_BACK:
+ xtrabackup_copy_back = TRUE;
+ xtrabackup_target_dir = ibx_position_arg;
+ run = "copy-back";
+ break;
+ case IBX_MODE_MOVE_BACK:
+ xtrabackup_move_back = TRUE;
+ xtrabackup_target_dir = ibx_position_arg;
+ run = "move-back";
+ break;
+ case IBX_MODE_DECRYPT_DECOMPRESS:
+ xtrabackup_decrypt_decompress = TRUE;
+ xtrabackup_target_dir = ibx_position_arg;
+ run = "decrypt and decompress";
+ break;
+ default:
+ ut_error;
+ }
+
+ ibx_msg("Starting the %s operation\n\n"
+ "IMPORTANT: Please check that the %s run completes "
+ "successfully.\n"
+ " At the end of a successful %s run innobackupex\n"
+ " prints \"completed OK!\".\n\n", run, run, run);
+
+
+ return(true);
+}
+
+void
+ibx_cleanup()
+{
+ free(ibx_backup_directory);
+}
diff --git a/extra/mariabackup/innobackupex.h b/extra/mariabackup/innobackupex.h
new file mode 100644
index 00000000000..e2ad9bd2511
--- /dev/null
+++ b/extra/mariabackup/innobackupex.h
@@ -0,0 +1,45 @@
+/******************************************************
+Copyright (c) 2011-2014 Percona LLC and/or its affiliates.
+
+Declarations for innobackupex.cc
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#ifndef INNOBACKUPEX_H
+#define INNOBACKUPEX_H
+
+#define INNOBACKUPEX_BIN_NAME "innobackupex"
+
+enum ibx_mode_t {
+ IBX_MODE_BACKUP,
+ IBX_MODE_APPLY_LOG,
+ IBX_MODE_COPY_BACK,
+ IBX_MODE_MOVE_BACK,
+ IBX_MODE_DECRYPT_DECOMPRESS
+};
+
+extern ibx_mode_t ibx_mode;
+
+bool
+ibx_handle_options(int *argc, char ***argv);
+
+bool
+ibx_init();
+
+void
+ibx_cleanup();
+
+#endif
diff --git a/extra/mariabackup/quicklz/quicklz.c b/extra/mariabackup/quicklz/quicklz.c
new file mode 100644
index 00000000000..3742129023a
--- /dev/null
+++ b/extra/mariabackup/quicklz/quicklz.c
@@ -0,0 +1,848 @@
+// Fast data compression library
+// Copyright (C) 2006-2011 Lasse Mikkel Reinhold
+// lar@quicklz.com
+//
+// QuickLZ can be used for free under the GPL 1, 2 or 3 license (where anything
+// released into public must be open source) or under a commercial license if such
+// has been acquired (see http://www.quicklz.com/order.html). The commercial license
+// does not cover derived or ported versions created by third parties under GPL.
+
+// 1.5.0 final
+
+#include "quicklz.h"
+
+#if QLZ_VERSION_MAJOR != 1 || QLZ_VERSION_MINOR != 5 || QLZ_VERSION_REVISION != 0
+ #error quicklz.c and quicklz.h have different versions
+#endif
+
+#if (defined(__X86__) || defined(__i386__) || defined(i386) || defined(_M_IX86) || defined(__386__) || defined(__x86_64__) || defined(_M_X64))
+ #define X86X64
+#endif
+
+#define MINOFFSET 2
+#define UNCONDITIONAL_MATCHLEN 6
+#define UNCOMPRESSED_END 4
+#define CWORD_LEN 4
+
+#if QLZ_COMPRESSION_LEVEL == 1 && defined QLZ_PTR_64 && QLZ_STREAMING_BUFFER == 0
+ #define OFFSET_BASE source
+ #define CAST (ui32)(size_t)
+#else
+ #define OFFSET_BASE 0
+ #define CAST
+#endif
+
+int qlz_get_setting(int setting)
+{
+ switch (setting)
+ {
+ case 0: return QLZ_COMPRESSION_LEVEL;
+ case 1: return sizeof(qlz_state_compress);
+ case 2: return sizeof(qlz_state_decompress);
+ case 3: return QLZ_STREAMING_BUFFER;
+#ifdef QLZ_MEMORY_SAFE
+ case 6: return 1;
+#else
+ case 6: return 0;
+#endif
+ case 7: return QLZ_VERSION_MAJOR;
+ case 8: return QLZ_VERSION_MINOR;
+ case 9: return QLZ_VERSION_REVISION;
+ }
+ return -1;
+}
+
+#if QLZ_COMPRESSION_LEVEL == 1
+static int same(const unsigned char *src, size_t n)
+{
+ while(n > 0 && *(src + n) == *src)
+ n--;
+ return n == 0 ? 1 : 0;
+}
+#endif
+
+static void reset_table_compress(qlz_state_compress *state)
+{
+ int i;
+ for(i = 0; i < QLZ_HASH_VALUES; i++)
+ {
+#if QLZ_COMPRESSION_LEVEL == 1
+ state->hash[i].offset = 0;
+#else
+ state->hash_counter[i] = 0;
+#endif
+ }
+}
+
+static void reset_table_decompress(qlz_state_decompress *state)
+{
+ int i;
+ (void)state;
+ (void)i;
+#if QLZ_COMPRESSION_LEVEL == 2
+ for(i = 0; i < QLZ_HASH_VALUES; i++)
+ {
+ state->hash_counter[i] = 0;
+ }
+#endif
+}
+
+static __inline ui32 hash_func(ui32 i)
+{
+#if QLZ_COMPRESSION_LEVEL == 2
+ return ((i >> 9) ^ (i >> 13) ^ i) & (QLZ_HASH_VALUES - 1);
+#else
+ return ((i >> 12) ^ i) & (QLZ_HASH_VALUES - 1);
+#endif
+}
+
+static __inline ui32 fast_read(void const *src, ui32 bytes)
+{
+#ifndef X86X64
+ unsigned char *p = (unsigned char*)src;
+ switch (bytes)
+ {
+ case 4:
+ return(*p | *(p + 1) << 8 | *(p + 2) << 16 | *(p + 3) << 24);
+ case 3:
+ return(*p | *(p + 1) << 8 | *(p + 2) << 16);
+ case 2:
+ return(*p | *(p + 1) << 8);
+ case 1:
+ return(*p);
+ }
+ return 0;
+#else
+ if (bytes >= 1 && bytes <= 4)
+ return *((ui32*)src);
+ else
+ return 0;
+#endif
+}
+
+static __inline ui32 hashat(const unsigned char *src)
+{
+ ui32 fetch, hash;
+ fetch = fast_read(src, 3);
+ hash = hash_func(fetch);
+ return hash;
+}
+
+static __inline void fast_write(ui32 f, void *dst, size_t bytes)
+{
+#ifndef X86X64
+ unsigned char *p = (unsigned char*)dst;
+
+ switch (bytes)
+ {
+ case 4:
+ *p = (unsigned char)f;
+ *(p + 1) = (unsigned char)(f >> 8);
+ *(p + 2) = (unsigned char)(f >> 16);
+ *(p + 3) = (unsigned char)(f >> 24);
+ return;
+ case 3:
+ *p = (unsigned char)f;
+ *(p + 1) = (unsigned char)(f >> 8);
+ *(p + 2) = (unsigned char)(f >> 16);
+ return;
+ case 2:
+ *p = (unsigned char)f;
+ *(p + 1) = (unsigned char)(f >> 8);
+ return;
+ case 1:
+ *p = (unsigned char)f;
+ return;
+ }
+#else
+ switch (bytes)
+ {
+ case 4:
+ *((ui32*)dst) = f;
+ return;
+ case 3:
+ *((ui32*)dst) = f;
+ return;
+ case 2:
+ *((ui16 *)dst) = (ui16)f;
+ return;
+ case 1:
+ *((unsigned char*)dst) = (unsigned char)f;
+ return;
+ }
+#endif
+}
+
+
+size_t qlz_size_decompressed(const char *source)
+{
+ ui32 n, r;
+ n = (((*source) & 2) == 2) ? 4 : 1;
+ r = fast_read(source + 1 + n, n);
+ r = r & (0xffffffff >> ((4 - n)*8));
+ return r;
+}
+
+size_t qlz_size_compressed(const char *source)
+{
+ ui32 n, r;
+ n = (((*source) & 2) == 2) ? 4 : 1;
+ r = fast_read(source + 1, n);
+ r = r & (0xffffffff >> ((4 - n)*8));
+ return r;
+}
+
+size_t qlz_size_header(const char *source)
+{
+ size_t n = 2*((((*source) & 2) == 2) ? 4 : 1) + 1;
+ return n;
+}
+
+
+static __inline void memcpy_up(unsigned char *dst, const unsigned char *src, ui32 n)
+{
+ // Caution if modifying memcpy_up! Overlap of dst and src must be special handled.
+#ifndef X86X64
+ unsigned char *end = dst + n;
+ while(dst < end)
+ {
+ *dst = *src;
+ dst++;
+ src++;
+ }
+#else
+ ui32 f = 0;
+ do
+ {
+ *(ui32 *)(dst + f) = *(ui32 *)(src + f);
+ f += MINOFFSET + 1;
+ }
+ while (f < n);
+#endif
+}
+
+static __inline void update_hash(qlz_state_decompress *state, const unsigned char *s)
+{
+#if QLZ_COMPRESSION_LEVEL == 1
+ ui32 hash;
+ hash = hashat(s);
+ state->hash[hash].offset = s;
+ state->hash_counter[hash] = 1;
+#elif QLZ_COMPRESSION_LEVEL == 2
+ ui32 hash;
+ unsigned char c;
+ hash = hashat(s);
+ c = state->hash_counter[hash];
+ state->hash[hash].offset[c & (QLZ_POINTERS - 1)] = s;
+ c++;
+ state->hash_counter[hash] = c;
+#endif
+ (void)state;
+ (void)s;
+}
+
+#if QLZ_COMPRESSION_LEVEL <= 2
+static void update_hash_upto(qlz_state_decompress *state, unsigned char **lh, const unsigned char *max)
+{
+ while(*lh < max)
+ {
+ (*lh)++;
+ update_hash(state, *lh);
+ }
+}
+#endif
+
+static size_t qlz_compress_core(const unsigned char *source, unsigned char *destination, size_t size, qlz_state_compress *state)
+{
+ const unsigned char *last_byte = source + size - 1;
+ const unsigned char *src = source;
+ unsigned char *cword_ptr = destination;
+ unsigned char *dst = destination + CWORD_LEN;
+ ui32 cword_val = 1U << 31;
+ const unsigned char *last_matchstart = last_byte - UNCONDITIONAL_MATCHLEN - UNCOMPRESSED_END;
+ ui32 fetch = 0;
+ unsigned int lits = 0;
+
+ (void) lits;
+
+ if(src <= last_matchstart)
+ fetch = fast_read(src, 3);
+
+ while(src <= last_matchstart)
+ {
+ if ((cword_val & 1) == 1)
+ {
+ // store uncompressed if compression ratio is too low
+ if (src > source + (size >> 1) && dst - destination > src - source - ((src - source) >> 5))
+ return 0;
+
+ fast_write((cword_val >> 1) | (1U << 31), cword_ptr, CWORD_LEN);
+
+ cword_ptr = dst;
+ dst += CWORD_LEN;
+ cword_val = 1U << 31;
+ fetch = fast_read(src, 3);
+ }
+#if QLZ_COMPRESSION_LEVEL == 1
+ {
+ const unsigned char *o;
+ ui32 hash, cached;
+
+ hash = hash_func(fetch);
+ cached = fetch ^ state->hash[hash].cache;
+ state->hash[hash].cache = fetch;
+
+ o = state->hash[hash].offset + OFFSET_BASE;
+ state->hash[hash].offset = CAST(src - OFFSET_BASE);
+
+#ifdef X86X64
+ if ((cached & 0xffffff) == 0 && o != OFFSET_BASE && (src - o > MINOFFSET || (src == o + 1 && lits >= 3 && src > source + 3 && same(src - 3, 6))))
+ {
+ if(cached != 0)
+ {
+#else
+ if (cached == 0 && o != OFFSET_BASE && (src - o > MINOFFSET || (src == o + 1 && lits >= 3 && src > source + 3 && same(src - 3, 6))))
+ {
+ if (*(o + 3) != *(src + 3))
+ {
+#endif
+ hash <<= 4;
+ cword_val = (cword_val >> 1) | (1U << 31);
+ fast_write((3 - 2) | hash, dst, 2);
+ src += 3;
+ dst += 2;
+ }
+ else
+ {
+ const unsigned char *old_src = src;
+ size_t matchlen;
+ hash <<= 4;
+
+ cword_val = (cword_val >> 1) | (1U << 31);
+ src += 4;
+
+ if(*(o + (src - old_src)) == *src)
+ {
+ src++;
+ if(*(o + (src - old_src)) == *src)
+ {
+ size_t q = last_byte - UNCOMPRESSED_END - (src - 5) + 1;
+ size_t remaining = q > 255 ? 255 : q;
+ src++;
+ while(*(o + (src - old_src)) == *src && (size_t)(src - old_src) < remaining)
+ src++;
+ }
+ }
+
+ matchlen = src - old_src;
+ if (matchlen < 18)
+ {
+ fast_write((ui32)(matchlen - 2) | hash, dst, 2);
+ dst += 2;
+ }
+ else
+ {
+ fast_write((ui32)(matchlen << 16) | hash, dst, 3);
+ dst += 3;
+ }
+ }
+ fetch = fast_read(src, 3);
+ lits = 0;
+ }
+ else
+ {
+ lits++;
+ *dst = *src;
+ src++;
+ dst++;
+ cword_val = (cword_val >> 1);
+#ifdef X86X64
+ fetch = fast_read(src, 3);
+#else
+ fetch = (fetch >> 8 & 0xffff) | (*(src + 2) << 16);
+#endif
+ }
+ }
+#elif QLZ_COMPRESSION_LEVEL >= 2
+ {
+ const unsigned char *o, *offset2;
+ ui32 hash, matchlen, k, m, best_k = 0;
+ unsigned char c;
+ size_t remaining = (last_byte - UNCOMPRESSED_END - src + 1) > 255 ? 255 : (last_byte - UNCOMPRESSED_END - src + 1);
+ (void)best_k;
+
+
+ //hash = hashat(src);
+ fetch = fast_read(src, 3);
+ hash = hash_func(fetch);
+
+ c = state->hash_counter[hash];
+
+ offset2 = state->hash[hash].offset[0];
+ if(offset2 < src - MINOFFSET && c > 0 && ((fast_read(offset2, 3) ^ fetch) & 0xffffff) == 0)
+ {
+ matchlen = 3;
+ if(*(offset2 + matchlen) == *(src + matchlen))
+ {
+ matchlen = 4;
+ while(*(offset2 + matchlen) == *(src + matchlen) && matchlen < remaining)
+ matchlen++;
+ }
+ }
+ else
+ matchlen = 0;
+ for(k = 1; k < QLZ_POINTERS && c > k; k++)
+ {
+ o = state->hash[hash].offset[k];
+#if QLZ_COMPRESSION_LEVEL == 3
+ if(((fast_read(o, 3) ^ fetch) & 0xffffff) == 0 && o < src - MINOFFSET)
+#elif QLZ_COMPRESSION_LEVEL == 2
+ if(*(src + matchlen) == *(o + matchlen) && ((fast_read(o, 3) ^ fetch) & 0xffffff) == 0 && o < src - MINOFFSET)
+#endif
+ {
+ m = 3;
+ while(*(o + m) == *(src + m) && m < remaining)
+ m++;
+#if QLZ_COMPRESSION_LEVEL == 3
+ if ((m > matchlen) || (m == matchlen && o > offset2))
+#elif QLZ_COMPRESSION_LEVEL == 2
+ if (m > matchlen)
+#endif
+ {
+ offset2 = o;
+ matchlen = m;
+ best_k = k;
+ }
+ }
+ }
+ o = offset2;
+ state->hash[hash].offset[c & (QLZ_POINTERS - 1)] = src;
+ c++;
+ state->hash_counter[hash] = c;
+
+#if QLZ_COMPRESSION_LEVEL == 3
+ if(matchlen > 2 && src - o < 131071)
+ {
+ ui32 u;
+ size_t offset = src - o;
+
+ for(u = 1; u < matchlen; u++)
+ {
+ hash = hashat(src + u);
+ c = state->hash_counter[hash]++;
+ state->hash[hash].offset[c & (QLZ_POINTERS - 1)] = src + u;
+ }
+
+ cword_val = (cword_val >> 1) | (1U << 31);
+ src += matchlen;
+
+ if(matchlen == 3 && offset <= 63)
+ {
+ *dst = (unsigned char)(offset << 2);
+ dst++;
+ }
+ else if (matchlen == 3 && offset <= 16383)
+ {
+ ui32 f = (ui32)((offset << 2) | 1);
+ fast_write(f, dst, 2);
+ dst += 2;
+ }
+ else if (matchlen <= 18 && offset <= 1023)
+ {
+ ui32 f = ((matchlen - 3) << 2) | ((ui32)offset << 6) | 2;
+ fast_write(f, dst, 2);
+ dst += 2;
+ }
+
+ else if(matchlen <= 33)
+ {
+ ui32 f = ((matchlen - 2) << 2) | ((ui32)offset << 7) | 3;
+ fast_write(f, dst, 3);
+ dst += 3;
+ }
+ else
+ {
+ ui32 f = ((matchlen - 3) << 7) | ((ui32)offset << 15) | 3;
+ fast_write(f, dst, 4);
+ dst += 4;
+ }
+ }
+ else
+ {
+ *dst = *src;
+ src++;
+ dst++;
+ cword_val = (cword_val >> 1);
+ }
+#elif QLZ_COMPRESSION_LEVEL == 2
+
+ if(matchlen > 2)
+ {
+ cword_val = (cword_val >> 1) | (1U << 31);
+ src += matchlen;
+
+ if (matchlen < 10)
+ {
+ ui32 f = best_k | ((matchlen - 2) << 2) | (hash << 5);
+ fast_write(f, dst, 2);
+ dst += 2;
+ }
+ else
+ {
+ ui32 f = best_k | (matchlen << 16) | (hash << 5);
+ fast_write(f, dst, 3);
+ dst += 3;
+ }
+ }
+ else
+ {
+ *dst = *src;
+ src++;
+ dst++;
+ cword_val = (cword_val >> 1);
+ }
+#endif
+ }
+#endif
+ }
+ while (src <= last_byte)
+ {
+ if ((cword_val & 1) == 1)
+ {
+ fast_write((cword_val >> 1) | (1U << 31), cword_ptr, CWORD_LEN);
+ cword_ptr = dst;
+ dst += CWORD_LEN;
+ cword_val = 1U << 31;
+ }
+#if QLZ_COMPRESSION_LEVEL < 3
+ if (src <= last_byte - 3)
+ {
+#if QLZ_COMPRESSION_LEVEL == 1
+ ui32 hash, fetch;
+ fetch = fast_read(src, 3);
+ hash = hash_func(fetch);
+ state->hash[hash].offset = CAST(src - OFFSET_BASE);
+ state->hash[hash].cache = fetch;
+#elif QLZ_COMPRESSION_LEVEL == 2
+ ui32 hash;
+ unsigned char c;
+ hash = hashat(src);
+ c = state->hash_counter[hash];
+ state->hash[hash].offset[c & (QLZ_POINTERS - 1)] = src;
+ c++;
+ state->hash_counter[hash] = c;
+#endif
+ }
+#endif
+ *dst = *src;
+ src++;
+ dst++;
+ cword_val = (cword_val >> 1);
+ }
+
+ while((cword_val & 1) != 1)
+ cword_val = (cword_val >> 1);
+
+ fast_write((cword_val >> 1) | (1U << 31), cword_ptr, CWORD_LEN);
+
+ // min. size must be 9 bytes so that the qlz_size functions can take 9 bytes as argument
+ return dst - destination < 9 ? 9 : dst - destination;
+}
+
+static size_t qlz_decompress_core(const unsigned char *source, unsigned char *destination, size_t size, qlz_state_decompress *state, const unsigned char *history)
+{
+ const unsigned char *src = source + qlz_size_header((const char *)source);
+ unsigned char *dst = destination;
+ const unsigned char *last_destination_byte = destination + size - 1;
+ ui32 cword_val = 1;
+ const unsigned char *last_matchstart = last_destination_byte - UNCONDITIONAL_MATCHLEN - UNCOMPRESSED_END;
+ unsigned char *last_hashed = destination - 1;
+ const unsigned char *last_source_byte = source + qlz_size_compressed((const char *)source) - 1;
+ static const ui32 bitlut[16] = {4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0};
+
+ (void) last_source_byte;
+ (void) last_hashed;
+ (void) state;
+ (void) history;
+
+ for(;;)
+ {
+ ui32 fetch;
+
+ if (cword_val == 1)
+ {
+#ifdef QLZ_MEMORY_SAFE
+ if(src + CWORD_LEN - 1 > last_source_byte)
+ return 0;
+#endif
+ cword_val = fast_read(src, CWORD_LEN);
+ src += CWORD_LEN;
+ }
+
+#ifdef QLZ_MEMORY_SAFE
+ if(src + 4 - 1 > last_source_byte)
+ return 0;
+#endif
+
+ fetch = fast_read(src, 4);
+
+ if ((cword_val & 1) == 1)
+ {
+ ui32 matchlen;
+ const unsigned char *offset2;
+
+#if QLZ_COMPRESSION_LEVEL == 1
+ ui32 hash;
+ cword_val = cword_val >> 1;
+ hash = (fetch >> 4) & 0xfff;
+ offset2 = (const unsigned char *)(size_t)state->hash[hash].offset;
+
+ if((fetch & 0xf) != 0)
+ {
+ matchlen = (fetch & 0xf) + 2;
+ src += 2;
+ }
+ else
+ {
+ matchlen = *(src + 2);
+ src += 3;
+ }
+
+#elif QLZ_COMPRESSION_LEVEL == 2
+ ui32 hash;
+ unsigned char c;
+ cword_val = cword_val >> 1;
+ hash = (fetch >> 5) & 0x7ff;
+ c = (unsigned char)(fetch & 0x3);
+ offset2 = state->hash[hash].offset[c];
+
+ if((fetch & (28)) != 0)
+ {
+ matchlen = ((fetch >> 2) & 0x7) + 2;
+ src += 2;
+ }
+ else
+ {
+ matchlen = *(src + 2);
+ src += 3;
+ }
+
+#elif QLZ_COMPRESSION_LEVEL == 3
+ ui32 offset;
+ cword_val = cword_val >> 1;
+ if ((fetch & 3) == 0)
+ {
+ offset = (fetch & 0xff) >> 2;
+ matchlen = 3;
+ src++;
+ }
+ else if ((fetch & 2) == 0)
+ {
+ offset = (fetch & 0xffff) >> 2;
+ matchlen = 3;
+ src += 2;
+ }
+ else if ((fetch & 1) == 0)
+ {
+ offset = (fetch & 0xffff) >> 6;
+ matchlen = ((fetch >> 2) & 15) + 3;
+ src += 2;
+ }
+ else if ((fetch & 127) != 3)
+ {
+ offset = (fetch >> 7) & 0x1ffff;
+ matchlen = ((fetch >> 2) & 0x1f) + 2;
+ src += 3;
+ }
+ else
+ {
+ offset = (fetch >> 15);
+ matchlen = ((fetch >> 7) & 255) + 3;
+ src += 4;
+ }
+
+ offset2 = dst - offset;
+#endif
+
+#ifdef QLZ_MEMORY_SAFE
+ if(offset2 < history || offset2 > dst - MINOFFSET - 1)
+ return 0;
+
+ if(matchlen > (ui32)(last_destination_byte - dst - UNCOMPRESSED_END + 1))
+ return 0;
+#endif
+
+ memcpy_up(dst, offset2, matchlen);
+ dst += matchlen;
+
+#if QLZ_COMPRESSION_LEVEL <= 2
+ update_hash_upto(state, &last_hashed, dst - matchlen);
+ last_hashed = dst - 1;
+#endif
+ }
+ else
+ {
+ if (dst < last_matchstart)
+ {
+ unsigned int n = bitlut[cword_val & 0xf];
+#ifdef X86X64
+ *(ui32 *)dst = *(ui32 *)src;
+#else
+ memcpy_up(dst, src, 4);
+#endif
+ cword_val = cword_val >> n;
+ dst += n;
+ src += n;
+#if QLZ_COMPRESSION_LEVEL <= 2
+ update_hash_upto(state, &last_hashed, dst - 3);
+#endif
+ }
+ else
+ {
+ while(dst <= last_destination_byte)
+ {
+ if (cword_val == 1)
+ {
+ src += CWORD_LEN;
+ cword_val = 1U << 31;
+ }
+#ifdef QLZ_MEMORY_SAFE
+ if(src >= last_source_byte + 1)
+ return 0;
+#endif
+ *dst = *src;
+ dst++;
+ src++;
+ cword_val = cword_val >> 1;
+ }
+
+#if QLZ_COMPRESSION_LEVEL <= 2
+ update_hash_upto(state, &last_hashed, last_destination_byte - 3); // todo, use constant
+#endif
+ return size;
+ }
+
+ }
+ }
+}
+
+size_t qlz_compress(const void *source, char *destination, size_t size, qlz_state_compress *state)
+{
+ size_t r;
+ ui32 compressed;
+ size_t base;
+
+ if(size == 0 || size > 0xffffffff - 400)
+ return 0;
+
+ if(size < 216)
+ base = 3;
+ else
+ base = 9;
+
+#if QLZ_STREAMING_BUFFER > 0
+ if (state->stream_counter + size - 1 >= QLZ_STREAMING_BUFFER)
+#endif
+ {
+ reset_table_compress(state);
+ r = base + qlz_compress_core((const unsigned char *)source, (unsigned char*)destination + base, size, state);
+#if QLZ_STREAMING_BUFFER > 0
+ reset_table_compress(state);
+#endif
+ if(r == base)
+ {
+ memcpy(destination + base, source, size);
+ r = size + base;
+ compressed = 0;
+ }
+ else
+ {
+ compressed = 1;
+ }
+ state->stream_counter = 0;
+ }
+#if QLZ_STREAMING_BUFFER > 0
+ else
+ {
+ unsigned char *src = state->stream_buffer + state->stream_counter;
+
+ memcpy(src, source, size);
+ r = base + qlz_compress_core(src, (unsigned char*)destination + base, size, state);
+
+ if(r == base)
+ {
+ memcpy(destination + base, src, size);
+ r = size + base;
+ compressed = 0;
+ reset_table_compress(state);
+ }
+ else
+ {
+ compressed = 1;
+ }
+ state->stream_counter += size;
+ }
+#endif
+ if(base == 3)
+ {
+ *destination = (unsigned char)(0 | compressed);
+ *(destination + 1) = (unsigned char)r;
+ *(destination + 2) = (unsigned char)size;
+ }
+ else
+ {
+ *destination = (unsigned char)(2 | compressed);
+ fast_write((ui32)r, destination + 1, 4);
+ fast_write((ui32)size, destination + 5, 4);
+ }
+
+ *destination |= (QLZ_COMPRESSION_LEVEL << 2);
+ *destination |= (1 << 6);
+ *destination |= ((QLZ_STREAMING_BUFFER == 0 ? 0 : (QLZ_STREAMING_BUFFER == 100000 ? 1 : (QLZ_STREAMING_BUFFER == 1000000 ? 2 : 3))) << 4);
+
+// 76543210
+// 01SSLLHC
+
+ return r;
+}
+
+size_t qlz_decompress(const char *source, void *destination, qlz_state_decompress *state)
+{
+ size_t dsiz = qlz_size_decompressed(source);
+
+#if QLZ_STREAMING_BUFFER > 0
+ if (state->stream_counter + qlz_size_decompressed(source) - 1 >= QLZ_STREAMING_BUFFER)
+#endif
+ {
+ if((*source & 1) == 1)
+ {
+ reset_table_decompress(state);
+ dsiz = qlz_decompress_core((const unsigned char *)source, (unsigned char *)destination, dsiz, state, (const unsigned char *)destination);
+ }
+ else
+ {
+ memcpy(destination, source + qlz_size_header(source), dsiz);
+ }
+ state->stream_counter = 0;
+ reset_table_decompress(state);
+ }
+#if QLZ_STREAMING_BUFFER > 0
+ else
+ {
+ unsigned char *dst = state->stream_buffer + state->stream_counter;
+ if((*source & 1) == 1)
+ {
+ dsiz = qlz_decompress_core((const unsigned char *)source, dst, dsiz, state, (const unsigned char *)state->stream_buffer);
+ }
+ else
+ {
+ memcpy(dst, source + qlz_size_header(source), dsiz);
+ reset_table_decompress(state);
+ }
+ memcpy(destination, dst, dsiz);
+ state->stream_counter += dsiz;
+ }
+#endif
+ return dsiz;
+}
+
diff --git a/extra/mariabackup/quicklz/quicklz.h b/extra/mariabackup/quicklz/quicklz.h
new file mode 100644
index 00000000000..6ffe00f3a91
--- /dev/null
+++ b/extra/mariabackup/quicklz/quicklz.h
@@ -0,0 +1,144 @@
+#ifndef QLZ_HEADER
+#define QLZ_HEADER
+
+// Fast data compression library
+// Copyright (C) 2006-2011 Lasse Mikkel Reinhold
+// lar@quicklz.com
+//
+// QuickLZ can be used for free under the GPL 1, 2 or 3 license (where anything
+// released into public must be open source) or under a commercial license if such
+// has been acquired (see http://www.quicklz.com/order.html). The commercial license
+// does not cover derived or ported versions created by third parties under GPL.
+
+// You can edit following user settings. Data must be decompressed with the same
+// setting of QLZ_COMPRESSION_LEVEL and QLZ_STREAMING_BUFFER as it was compressed
+// (see manual). If QLZ_STREAMING_BUFFER > 0, scratch buffers must be initially
+// zeroed out (see manual). First #ifndef makes it possible to define settings from
+// the outside like the compiler command line.
+
+// 1.5.0 final
+
+#ifndef QLZ_COMPRESSION_LEVEL
+ #define QLZ_COMPRESSION_LEVEL 1
+ //#define QLZ_COMPRESSION_LEVEL 2
+ //#define QLZ_COMPRESSION_LEVEL 3
+
+ #define QLZ_STREAMING_BUFFER 0
+ //#define QLZ_STREAMING_BUFFER 100000
+ //#define QLZ_STREAMING_BUFFER 1000000
+
+ //#define QLZ_MEMORY_SAFE
+#endif
+
+#define QLZ_VERSION_MAJOR 1
+#define QLZ_VERSION_MINOR 5
+#define QLZ_VERSION_REVISION 0
+
+// Using size_t, memset() and memcpy()
+#include <string.h>
+
+// Verify compression level
+#if QLZ_COMPRESSION_LEVEL != 1 && QLZ_COMPRESSION_LEVEL != 2 && QLZ_COMPRESSION_LEVEL != 3
+#error QLZ_COMPRESSION_LEVEL must be 1, 2 or 3
+#endif
+
+typedef unsigned int ui32;
+typedef unsigned short int ui16;
+
+// Decrease QLZ_POINTERS for level 3 to increase compression speed. Do not touch any other values!
+#if QLZ_COMPRESSION_LEVEL == 1
+#define QLZ_POINTERS 1
+#define QLZ_HASH_VALUES 4096
+#elif QLZ_COMPRESSION_LEVEL == 2
+#define QLZ_POINTERS 4
+#define QLZ_HASH_VALUES 2048
+#elif QLZ_COMPRESSION_LEVEL == 3
+#define QLZ_POINTERS 16
+#define QLZ_HASH_VALUES 4096
+#endif
+
+// Detect if pointer size is 64-bit. It's not fatal if some 64-bit target is not detected because this is only for adding an optional 64-bit optimization.
+#if defined _LP64 || defined __LP64__ || defined __64BIT__ || _ADDR64 || defined _WIN64 || defined __arch64__ || __WORDSIZE == 64 || (defined __sparc && defined __sparcv9) || defined __x86_64 || defined __amd64 || defined __x86_64__ || defined _M_X64 || defined _M_IA64 || defined __ia64 || defined __IA64__
+ #define QLZ_PTR_64
+#endif
+
+// hash entry
+typedef struct
+{
+#if QLZ_COMPRESSION_LEVEL == 1
+ ui32 cache;
+#if defined QLZ_PTR_64 && QLZ_STREAMING_BUFFER == 0
+ unsigned int offset;
+#else
+ const unsigned char *offset;
+#endif
+#else
+ const unsigned char *offset[QLZ_POINTERS];
+#endif
+
+} qlz_hash_compress;
+
+typedef struct
+{
+#if QLZ_COMPRESSION_LEVEL == 1
+ const unsigned char *offset;
+#else
+ const unsigned char *offset[QLZ_POINTERS];
+#endif
+} qlz_hash_decompress;
+
+
+// states
+typedef struct
+{
+ #if QLZ_STREAMING_BUFFER > 0
+ unsigned char stream_buffer[QLZ_STREAMING_BUFFER];
+ #endif
+ size_t stream_counter;
+ qlz_hash_compress hash[QLZ_HASH_VALUES];
+ unsigned char hash_counter[QLZ_HASH_VALUES];
+} qlz_state_compress;
+
+
+#if QLZ_COMPRESSION_LEVEL == 1 || QLZ_COMPRESSION_LEVEL == 2
+ typedef struct
+ {
+#if QLZ_STREAMING_BUFFER > 0
+ unsigned char stream_buffer[QLZ_STREAMING_BUFFER];
+#endif
+ qlz_hash_decompress hash[QLZ_HASH_VALUES];
+ unsigned char hash_counter[QLZ_HASH_VALUES];
+ size_t stream_counter;
+ } qlz_state_decompress;
+#elif QLZ_COMPRESSION_LEVEL == 3
+ typedef struct
+ {
+#if QLZ_STREAMING_BUFFER > 0
+ unsigned char stream_buffer[QLZ_STREAMING_BUFFER];
+#endif
+#if QLZ_COMPRESSION_LEVEL <= 2
+ qlz_hash_decompress hash[QLZ_HASH_VALUES];
+#endif
+ size_t stream_counter;
+ } qlz_state_decompress;
+#endif
+
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+// Public functions of QuickLZ
+size_t qlz_size_decompressed(const char *source);
+size_t qlz_size_compressed(const char *source);
+size_t qlz_compress(const void *source, char *destination, size_t size, qlz_state_compress *state);
+size_t qlz_decompress(const char *source, void *destination, qlz_state_decompress *state);
+int qlz_get_setting(int setting);
+size_t qlz_size_header(const char *source);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
+
diff --git a/extra/mariabackup/read_filt.cc b/extra/mariabackup/read_filt.cc
new file mode 100644
index 00000000000..8ebc735e99e
--- /dev/null
+++ b/extra/mariabackup/read_filt.cc
@@ -0,0 +1,206 @@
+/******************************************************
+XtraBackup: hot backup tool for InnoDB
+(c) 2009-2012 Percona Inc.
+Originally Created 3/3/2009 Yasufumi Kinoshita
+Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
+Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+/* Data file read filter implementation */
+
+#include "read_filt.h"
+#include "common.h"
+#include "fil_cur.h"
+#include "xtrabackup.h"
+
+/****************************************************************//**
+Perform read filter context initialization that is common to all read
+filters. */
+static
+void
+common_init(
+/*========*/
+ xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter context */
+ const xb_fil_cur_t* cursor) /*!<in: file cursor */
+{
+ ctxt->offset = 0;
+ ctxt->data_file_size = cursor->statinfo.st_size;
+ ctxt->buffer_capacity = cursor->buf_size;
+ ctxt->page_size = cursor->page_size;
+}
+
+/****************************************************************//**
+Initialize the pass-through read filter. */
+static
+void
+rf_pass_through_init(
+/*=================*/
+ xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter context */
+ const xb_fil_cur_t* cursor, /*!<in: file cursor */
+ ulint space_id __attribute__((unused)))
+ /*!<in: space id we are reading */
+{
+ common_init(ctxt, cursor);
+}
+
+/****************************************************************//**
+Get the next batch of pages for the pass-through read filter. */
+static
+void
+rf_pass_through_get_next_batch(
+/*===========================*/
+ xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter
+ context */
+ ib_int64_t* read_batch_start, /*!<out: starting read
+ offset in bytes for the
+ next batch of pages */
+ ib_int64_t* read_batch_len) /*!<out: length in
+ bytes of the next batch
+ of pages */
+{
+ *read_batch_start = ctxt->offset;
+ *read_batch_len = ctxt->data_file_size - ctxt->offset;
+
+ if (*read_batch_len > ctxt->buffer_capacity) {
+ *read_batch_len = ctxt->buffer_capacity;
+ }
+
+ ctxt->offset += *read_batch_len;
+}
+
+/****************************************************************//**
+Deinitialize the pass-through read filter. */
+static
+void
+rf_pass_through_deinit(
+/*===================*/
+ xb_read_filt_ctxt_t* ctxt __attribute__((unused)))
+ /*!<in: read filter context */
+{
+}
+
+/****************************************************************//**
+Initialize the changed page bitmap-based read filter. Assumes that
+the bitmap is already set up in changed_page_bitmap. */
+static
+void
+rf_bitmap_init(
+/*===========*/
+ xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter
+ context */
+ const xb_fil_cur_t* cursor, /*!<in: read cursor */
+ ulint space_id) /*!<in: space id */
+{
+ common_init(ctxt, cursor);
+ ctxt->bitmap_range = xb_page_bitmap_range_init(changed_page_bitmap,
+ space_id);
+ ctxt->filter_batch_end = 0;
+}
+
+/****************************************************************//**
+Get the next batch of pages for the bitmap read filter. */
+static
+void
+rf_bitmap_get_next_batch(
+/*=====================*/
+ xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter
+ context */
+ ib_int64_t* read_batch_start, /*!<out: starting read
+ offset in bytes for the
+ next batch of pages */
+ ib_int64_t* read_batch_len) /*!<out: length in
+ bytes of the next batch
+ of pages */
+{
+ ulint start_page_id;
+
+ start_page_id = ctxt->offset / ctxt->page_size;
+
+ xb_a (ctxt->offset % ctxt->page_size == 0);
+
+ if (start_page_id == ctxt->filter_batch_end) {
+
+ /* Used up all the previous bitmap range, get some more */
+ ulint next_page_id;
+
+ /* Find the next changed page using the bitmap */
+ next_page_id = xb_page_bitmap_range_get_next_bit
+ (ctxt->bitmap_range, TRUE);
+
+ if (next_page_id == ULINT_UNDEFINED) {
+ *read_batch_len = 0;
+ return;
+ }
+
+ ctxt->offset = next_page_id * ctxt->page_size;
+
+ /* Find the end of the current changed page block by searching
+ for the next cleared bitmap bit */
+ ctxt->filter_batch_end
+ = xb_page_bitmap_range_get_next_bit(ctxt->bitmap_range,
+ FALSE);
+ xb_a(next_page_id < ctxt->filter_batch_end);
+ }
+
+ *read_batch_start = ctxt->offset;
+ if (ctxt->filter_batch_end == ULINT_UNDEFINED) {
+ /* No more cleared bits in the bitmap, need to copy all the
+ remaining pages. */
+ *read_batch_len = ctxt->data_file_size - ctxt->offset;
+ } else {
+ *read_batch_len = ctxt->filter_batch_end * ctxt->page_size
+ - ctxt->offset;
+ }
+
+ /* If the page block is larger than the buffer capacity, limit it to
+ buffer capacity. The subsequent invocations will continue returning
+ the current block in buffer-sized pieces until ctxt->filter_batch_end
+ is reached, trigerring the next bitmap query. */
+ if (*read_batch_len > ctxt->buffer_capacity) {
+ *read_batch_len = ctxt->buffer_capacity;
+ }
+
+ ctxt->offset += *read_batch_len;
+ xb_a (ctxt->offset % ctxt->page_size == 0);
+ xb_a (*read_batch_start % ctxt->page_size == 0);
+ xb_a (*read_batch_len % ctxt->page_size == 0);
+}
+
+/****************************************************************//**
+Deinitialize the changed page bitmap-based read filter. */
+static
+void
+rf_bitmap_deinit(
+/*=============*/
+ xb_read_filt_ctxt_t* ctxt) /*!<in/out: read filter context */
+{
+ xb_page_bitmap_range_deinit(ctxt->bitmap_range);
+}
+
+/* The pass-through read filter */
+xb_read_filt_t rf_pass_through = {
+ &rf_pass_through_init,
+ &rf_pass_through_get_next_batch,
+ &rf_pass_through_deinit
+};
+
+/* The changed page bitmap-based read filter */
+xb_read_filt_t rf_bitmap = {
+ &rf_bitmap_init,
+ &rf_bitmap_get_next_batch,
+ &rf_bitmap_deinit
+};
diff --git a/extra/mariabackup/read_filt.h b/extra/mariabackup/read_filt.h
new file mode 100644
index 00000000000..73fef06a288
--- /dev/null
+++ b/extra/mariabackup/read_filt.h
@@ -0,0 +1,62 @@
+/******************************************************
+XtraBackup: hot backup tool for InnoDB
+(c) 2009-2012 Percona Inc.
+Originally Created 3/3/2009 Yasufumi Kinoshita
+Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
+Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+/* Data file read filter interface */
+
+#ifndef XB_READ_FILT_H
+#define XB_READ_FILT_H
+
+#include "changed_page_bitmap.h"
+
+struct xb_fil_cur_t;
+
+/* The read filter context */
+struct xb_read_filt_ctxt_t {
+ ib_int64_t offset; /*!< current file offset */
+ ib_int64_t data_file_size; /*!< data file size */
+ ib_int64_t buffer_capacity;/*!< read buffer capacity */
+ ulint space_id; /*!< space id */
+ /* The following fields used only in bitmap filter */
+ /* Move these to union if any other filters are added in future */
+ xb_page_bitmap_range *bitmap_range; /*!< changed page bitmap range
+ iterator for space_id */
+ ulint page_size; /*!< page size */
+ ulint filter_batch_end;/*!< the ending page id of the
+ current changed page block in
+ the bitmap */
+};
+
+/* The read filter */
+struct xb_read_filt_t {
+ void (*init)(xb_read_filt_ctxt_t* ctxt,
+ const xb_fil_cur_t* cursor,
+ ulint space_id);
+ void (*get_next_batch)(xb_read_filt_ctxt_t* ctxt,
+ ib_int64_t* read_batch_start,
+ ib_int64_t* read_batch_len);
+ void (*deinit)(xb_read_filt_ctxt_t* ctxt);
+};
+
+extern xb_read_filt_t rf_pass_through;
+extern xb_read_filt_t rf_bitmap;
+
+#endif
diff --git a/extra/mariabackup/version_check.pl b/extra/mariabackup/version_check.pl
new file mode 100644
index 00000000000..865e2eacb4a
--- /dev/null
+++ b/extra/mariabackup/version_check.pl
@@ -0,0 +1,1373 @@
+use warnings FATAL => 'all';
+use strict;
+use English qw(-no_match_vars);
+use POSIX "strftime";
+
+my @required_perl_version = (5, 0, 5);
+my $required_perl_version_old_style = 5.005;
+
+# check existence of DBD::mysql module
+eval {
+ require DBD::mysql;
+};
+my $dbd_mysql_installed = $EVAL_ERROR ? 0 : 1;
+
+my $now;
+my %mysql;
+my $prefix = "version_check";
+
+
+# ###########################################################################
+# HTTPMicro package
+# This package is a copy without comments from the original. The original
+# with comments and its test file can be found in the Bazaar repository at,
+# lib/HTTPMicro.pm
+# t/lib/HTTPMicro.t
+# See https://launchpad.net/percona-toolkit for more information.
+# ###########################################################################
+{
+
+package HTTPMicro;
+BEGIN {
+ $HTTPMicro::VERSION = '0.001';
+}
+use strict;
+use warnings;
+
+use Carp ();
+
+
+my @attributes;
+BEGIN {
+ @attributes = qw(agent timeout);
+ no strict 'refs';
+ for my $accessor ( @attributes ) {
+ *{$accessor} = sub {
+ @_ > 1 ? $_[0]->{$accessor} = $_[1] : $_[0]->{$accessor};
+ };
+ }
+}
+
+sub new {
+ my($class, %args) = @_;
+ (my $agent = $class) =~ s{::}{-}g;
+ my $self = {
+ agent => $agent . "/" . ($class->VERSION || 0),
+ timeout => 60,
+ };
+ for my $key ( @attributes ) {
+ $self->{$key} = $args{$key} if exists $args{$key}
+ }
+ return bless $self, $class;
+}
+
+my %DefaultPort = (
+ http => 80,
+ https => 443,
+);
+
+sub request {
+ my ($self, $method, $url, $args) = @_;
+ @_ == 3 || (@_ == 4 && ref $args eq 'HASH')
+ or Carp::croak(q/Usage: $http->request(METHOD, URL, [HASHREF])/);
+ $args ||= {}; # we keep some state in this during _request
+
+ my $response;
+ for ( 0 .. 1 ) {
+ $response = eval { $self->_request($method, $url, $args) };
+ last unless $@ && $method eq 'GET'
+ && $@ =~ m{^(?:Socket closed|Unexpected end)};
+ }
+
+ if (my $e = "$@") {
+ $response = {
+ success => q{},
+ status => 599,
+ reason => 'Internal Exception',
+ content => $e,
+ headers => {
+ 'content-type' => 'text/plain',
+ 'content-length' => length $e,
+ }
+ };
+ }
+ return $response;
+}
+
+sub _request {
+ my ($self, $method, $url, $args) = @_;
+
+ my ($scheme, $host, $port, $path_query) = $self->_split_url($url);
+
+ my $request = {
+ method => $method,
+ scheme => $scheme,
+ host_port => ($port == $DefaultPort{$scheme} ? $host : "$host:$port"),
+ uri => $path_query,
+ headers => {},
+ };
+
+ my $handle = HTTPMicro::Handle->new(timeout => $self->{timeout});
+
+ $handle->connect($scheme, $host, $port);
+
+ $self->_prepare_headers_and_cb($request, $args);
+ $handle->write_request_header(@{$request}{qw/method uri headers/});
+ $handle->write_content_body($request) if $request->{content};
+
+ my $response;
+ do { $response = $handle->read_response_header }
+ until (substr($response->{status},0,1) ne '1');
+
+ if (!($method eq 'HEAD' || $response->{status} =~ /^[23]04/)) {
+ $response->{content} = '';
+ $handle->read_content_body(sub { $_[1]->{content} .= $_[0] }, $response);
+ }
+
+ $handle->close;
+ $response->{success} = substr($response->{status},0,1) eq '2';
+ return $response;
+}
+
+sub _prepare_headers_and_cb {
+ my ($self, $request, $args) = @_;
+
+ for ($args->{headers}) {
+ next unless defined;
+ while (my ($k, $v) = each %$_) {
+ $request->{headers}{lc $k} = $v;
+ }
+ }
+ $request->{headers}{'host'} = $request->{host_port};
+ $request->{headers}{'connection'} = "close";
+ $request->{headers}{'user-agent'} ||= $self->{agent};
+
+ if (defined $args->{content}) {
+ $request->{headers}{'content-type'} ||= "application/octet-stream";
+ utf8::downgrade($args->{content}, 1)
+ or Carp::croak(q/Wide character in request message body/);
+ $request->{headers}{'content-length'} = length $args->{content};
+ $request->{content} = $args->{content};
+ }
+ return;
+}
+
+sub _split_url {
+ my $url = pop;
+
+ my ($scheme, $authority, $path_query) = $url =~ m<\A([^:/?#]+)://([^/?#]*)([^#]*)>
+ or Carp::croak(qq/Cannot parse URL: '$url'/);
+
+ $scheme = lc $scheme;
+ $path_query = "/$path_query" unless $path_query =~ m<\A/>;
+
+ my $host = (length($authority)) ? lc $authority : 'localhost';
+ $host =~ s/\A[^@]*@//; # userinfo
+ my $port = do {
+ $host =~ s/:([0-9]*)\z// && length $1
+ ? $1
+ : $DefaultPort{$scheme}
+ };
+
+ return ($scheme, $host, $port, $path_query);
+}
+
+package
+ HTTPMicro::Handle; # hide from PAUSE/indexers
+use strict;
+use warnings;
+
+use Carp qw[croak];
+use Errno qw[EINTR EPIPE];
+use IO::Socket qw[SOCK_STREAM];
+
+sub BUFSIZE () { 32768 }
+
+my $Printable = sub {
+ local $_ = shift;
+ s/\r/\\r/g;
+ s/\n/\\n/g;
+ s/\t/\\t/g;
+ s/([^\x20-\x7E])/sprintf('\\x%.2X', ord($1))/ge;
+ $_;
+};
+
+sub new {
+ my ($class, %args) = @_;
+ return bless {
+ rbuf => '',
+ timeout => 60,
+ max_line_size => 16384,
+ %args
+ }, $class;
+}
+
+my $ssl_verify_args = {
+ check_cn => "when_only",
+ wildcards_in_alt => "anywhere",
+ wildcards_in_cn => "anywhere"
+};
+
+sub connect {
+ @_ == 4 || croak(q/Usage: $handle->connect(scheme, host, port)/);
+ my ($self, $scheme, $host, $port) = @_;
+
+ if ( $scheme eq 'https' ) {
+ eval "require IO::Socket::SSL"
+ unless exists $INC{'IO/Socket/SSL.pm'};
+ croak(qq/IO::Socket::SSL must be installed for https support\n/)
+ unless $INC{'IO/Socket/SSL.pm'};
+ }
+ elsif ( $scheme ne 'http' ) {
+ croak(qq/Unsupported URL scheme '$scheme'\n/);
+ }
+
+ $self->{fh} = 'IO::Socket::INET'->new(
+ PeerHost => $host,
+ PeerPort => $port,
+ Proto => 'tcp',
+ Type => SOCK_STREAM,
+ Timeout => $self->{timeout}
+ ) or croak(qq/Could not connect to '$host:$port': $@/);
+
+ binmode($self->{fh})
+ or croak(qq/Could not binmode() socket: '$!'/);
+
+ if ( $scheme eq 'https') {
+ IO::Socket::SSL->start_SSL($self->{fh});
+ ref($self->{fh}) eq 'IO::Socket::SSL'
+ or die(qq/SSL connection failed for $host\n/);
+ if ( $self->{fh}->can("verify_hostname") ) {
+ $self->{fh}->verify_hostname( $host, $ssl_verify_args )
+ or die(qq/SSL certificate not valid for $host\n/);
+ }
+ else {
+ my $fh = $self->{fh};
+ _verify_hostname_of_cert($host, _peer_certificate($fh), $ssl_verify_args)
+ or die(qq/SSL certificate not valid for $host\n/);
+ }
+ }
+
+ $self->{host} = $host;
+ $self->{port} = $port;
+
+ return $self;
+}
+
+sub close {
+ @_ == 1 || croak(q/Usage: $handle->close()/);
+ my ($self) = @_;
+ CORE::close($self->{fh})
+ or croak(qq/Could not close socket: '$!'/);
+}
+
+sub write {
+ @_ == 2 || croak(q/Usage: $handle->write(buf)/);
+ my ($self, $buf) = @_;
+
+ my $len = length $buf;
+ my $off = 0;
+
+ local $SIG{PIPE} = 'IGNORE';
+
+ while () {
+ $self->can_write
+ or croak(q/Timed out while waiting for socket to become ready for writing/);
+ my $r = syswrite($self->{fh}, $buf, $len, $off);
+ if (defined $r) {
+ $len -= $r;
+ $off += $r;
+ last unless $len > 0;
+ }
+ elsif ($! == EPIPE) {
+ croak(qq/Socket closed by remote server: $!/);
+ }
+ elsif ($! != EINTR) {
+ croak(qq/Could not write to socket: '$!'/);
+ }
+ }
+ return $off;
+}
+
+sub read {
+ @_ == 2 || @_ == 3 || croak(q/Usage: $handle->read(len)/);
+ my ($self, $len) = @_;
+
+ my $buf = '';
+ my $got = length $self->{rbuf};
+
+ if ($got) {
+ my $take = ($got < $len) ? $got : $len;
+ $buf = substr($self->{rbuf}, 0, $take, '');
+ $len -= $take;
+ }
+
+ while ($len > 0) {
+ $self->can_read
+ or croak(q/Timed out while waiting for socket to become ready for reading/);
+ my $r = sysread($self->{fh}, $buf, $len, length $buf);
+ if (defined $r) {
+ last unless $r;
+ $len -= $r;
+ }
+ elsif ($! != EINTR) {
+ croak(qq/Could not read from socket: '$!'/);
+ }
+ }
+ if ($len) {
+ croak(q/Unexpected end of stream/);
+ }
+ return $buf;
+}
+
+sub readline {
+ @_ == 1 || croak(q/Usage: $handle->readline()/);
+ my ($self) = @_;
+
+ while () {
+ if ($self->{rbuf} =~ s/\A ([^\x0D\x0A]* \x0D?\x0A)//x) {
+ return $1;
+ }
+ $self->can_read
+ or croak(q/Timed out while waiting for socket to become ready for reading/);
+ my $r = sysread($self->{fh}, $self->{rbuf}, BUFSIZE, length $self->{rbuf});
+ if (defined $r) {
+ last unless $r;
+ }
+ elsif ($! != EINTR) {
+ croak(qq/Could not read from socket: '$!'/);
+ }
+ }
+ croak(q/Unexpected end of stream while looking for line/);
+}
+
+sub read_header_lines {
+ @_ == 1 || @_ == 2 || croak(q/Usage: $handle->read_header_lines([headers])/);
+ my ($self, $headers) = @_;
+ $headers ||= {};
+ my $lines = 0;
+ my $val;
+
+ while () {
+ my $line = $self->readline;
+
+ if ($line =~ /\A ([^\x00-\x1F\x7F:]+) : [\x09\x20]* ([^\x0D\x0A]*)/x) {
+ my ($field_name) = lc $1;
+ $val = \($headers->{$field_name} = $2);
+ }
+ elsif ($line =~ /\A [\x09\x20]+ ([^\x0D\x0A]*)/x) {
+ $val
+ or croak(q/Unexpected header continuation line/);
+ next unless length $1;
+ $$val .= ' ' if length $$val;
+ $$val .= $1;
+ }
+ elsif ($line =~ /\A \x0D?\x0A \z/x) {
+ last;
+ }
+ else {
+ croak(q/Malformed header line: / . $Printable->($line));
+ }
+ }
+ return $headers;
+}
+
+sub write_header_lines {
+ (@_ == 2 && ref $_[1] eq 'HASH') || croak(q/Usage: $handle->write_header_lines(headers)/);
+ my($self, $headers) = @_;
+
+ my $buf = '';
+ while (my ($k, $v) = each %$headers) {
+ my $field_name = lc $k;
+ $field_name =~ /\A [\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5A\x5E-\x7A\x7C\x7E]+ \z/x
+ or croak(q/Invalid HTTP header field name: / . $Printable->($field_name));
+ $field_name =~ s/\b(\w)/\u$1/g;
+ $buf .= "$field_name: $v\x0D\x0A";
+ }
+ $buf .= "\x0D\x0A";
+ return $self->write($buf);
+}
+
+sub read_content_body {
+ @_ == 3 || @_ == 4 || croak(q/Usage: $handle->read_content_body(callback, response, [read_length])/);
+ my ($self, $cb, $response, $len) = @_;
+ $len ||= $response->{headers}{'content-length'};
+
+ croak("No content-length in the returned response, and this "
+ . "UA doesn't implement chunking") unless defined $len;
+
+ while ($len > 0) {
+ my $read = ($len > BUFSIZE) ? BUFSIZE : $len;
+ $cb->($self->read($read), $response);
+ $len -= $read;
+ }
+
+ return;
+}
+
+sub write_content_body {
+ @_ == 2 || croak(q/Usage: $handle->write_content_body(request)/);
+ my ($self, $request) = @_;
+ my ($len, $content_length) = (0, $request->{headers}{'content-length'});
+
+ $len += $self->write($request->{content});
+
+ $len == $content_length
+ or croak(qq/Content-Length missmatch (got: $len expected: $content_length)/);
+
+ return $len;
+}
+
+sub read_response_header {
+ @_ == 1 || croak(q/Usage: $handle->read_response_header()/);
+ my ($self) = @_;
+
+ my $line = $self->readline;
+
+ $line =~ /\A (HTTP\/(0*\d+\.0*\d+)) [\x09\x20]+ ([0-9]{3}) [\x09\x20]+ ([^\x0D\x0A]*) \x0D?\x0A/x
+ or croak(q/Malformed Status-Line: / . $Printable->($line));
+
+ my ($protocol, $version, $status, $reason) = ($1, $2, $3, $4);
+
+ return {
+ status => $status,
+ reason => $reason,
+ headers => $self->read_header_lines,
+ protocol => $protocol,
+ };
+}
+
+sub write_request_header {
+ @_ == 4 || croak(q/Usage: $handle->write_request_header(method, request_uri, headers)/);
+ my ($self, $method, $request_uri, $headers) = @_;
+
+ return $self->write("$method $request_uri HTTP/1.1\x0D\x0A")
+ + $self->write_header_lines($headers);
+}
+
+sub _do_timeout {
+ my ($self, $type, $timeout) = @_;
+ $timeout = $self->{timeout}
+ unless defined $timeout && $timeout >= 0;
+
+ my $fd = fileno $self->{fh};
+ defined $fd && $fd >= 0
+ or croak(q/select(2): 'Bad file descriptor'/);
+
+ my $initial = time;
+ my $pending = $timeout;
+ my $nfound;
+
+ vec(my $fdset = '', $fd, 1) = 1;
+
+ while () {
+ $nfound = ($type eq 'read')
+ ? select($fdset, undef, undef, $pending)
+ : select(undef, $fdset, undef, $pending) ;
+ if ($nfound == -1) {
+ $! == EINTR
+ or croak(qq/select(2): '$!'/);
+ redo if !$timeout || ($pending = $timeout - (time - $initial)) > 0;
+ $nfound = 0;
+ }
+ last;
+ }
+ $! = 0;
+ return $nfound;
+}
+
+sub can_read {
+ @_ == 1 || @_ == 2 || croak(q/Usage: $handle->can_read([timeout])/);
+ my $self = shift;
+ return $self->_do_timeout('read', @_)
+}
+
+sub can_write {
+ @_ == 1 || @_ == 2 || croak(q/Usage: $handle->can_write([timeout])/);
+ my $self = shift;
+ return $self->_do_timeout('write', @_)
+}
+
+my $prog = <<'EOP';
+BEGIN {
+ if ( defined &IO::Socket::SSL::CAN_IPV6 ) {
+ *CAN_IPV6 = \*IO::Socket::SSL::CAN_IPV6;
+ }
+ else {
+ constant->import( CAN_IPV6 => '' );
+ }
+ my %const = (
+ NID_CommonName => 13,
+ GEN_DNS => 2,
+ GEN_IPADD => 7,
+ );
+ while ( my ($name,$value) = each %const ) {
+ no strict 'refs';
+ *{$name} = UNIVERSAL::can( 'Net::SSLeay', $name ) || sub { $value };
+ }
+}
+{
+ my %dispatcher = (
+ issuer => sub { Net::SSLeay::X509_NAME_oneline( Net::SSLeay::X509_get_issuer_name( shift )) },
+ subject => sub { Net::SSLeay::X509_NAME_oneline( Net::SSLeay::X509_get_subject_name( shift )) },
+ );
+ if ( $Net::SSLeay::VERSION >= 1.30 ) {
+ $dispatcher{commonName} = sub {
+ my $cn = Net::SSLeay::X509_NAME_get_text_by_NID(
+ Net::SSLeay::X509_get_subject_name( shift ), NID_CommonName);
+ $cn =~s{\0$}{}; # work around Bug in Net::SSLeay <1.33
+ $cn;
+ }
+ } else {
+ $dispatcher{commonName} = sub {
+ croak "you need at least Net::SSLeay version 1.30 for getting commonName"
+ }
+ }
+
+ if ( $Net::SSLeay::VERSION >= 1.33 ) {
+ $dispatcher{subjectAltNames} = sub { Net::SSLeay::X509_get_subjectAltNames( shift ) };
+ } else {
+ $dispatcher{subjectAltNames} = sub {
+ return;
+ };
+ }
+
+ $dispatcher{authority} = $dispatcher{issuer};
+ $dispatcher{owner} = $dispatcher{subject};
+ $dispatcher{cn} = $dispatcher{commonName};
+
+ sub _peer_certificate {
+ my ($self, $field) = @_;
+ my $ssl = $self->_get_ssl_object or return;
+
+ my $cert = ${*$self}{_SSL_certificate}
+ ||= Net::SSLeay::get_peer_certificate($ssl)
+ or return $self->error("Could not retrieve peer certificate");
+
+ if ($field) {
+ my $sub = $dispatcher{$field} or croak
+ "invalid argument for peer_certificate, valid are: ".join( " ",keys %dispatcher ).
+ "\nMaybe you need to upgrade your Net::SSLeay";
+ return $sub->($cert);
+ } else {
+ return $cert
+ }
+ }
+
+
+ my %scheme = (
+ ldap => {
+ wildcards_in_cn => 0,
+ wildcards_in_alt => 'leftmost',
+ check_cn => 'always',
+ },
+ http => {
+ wildcards_in_cn => 'anywhere',
+ wildcards_in_alt => 'anywhere',
+ check_cn => 'when_only',
+ },
+ smtp => {
+ wildcards_in_cn => 0,
+ wildcards_in_alt => 0,
+ check_cn => 'always'
+ },
+ none => {}, # do not check
+ );
+
+ $scheme{www} = $scheme{http}; # alias
+ $scheme{xmpp} = $scheme{http}; # rfc 3920
+ $scheme{pop3} = $scheme{ldap}; # rfc 2595
+ $scheme{imap} = $scheme{ldap}; # rfc 2595
+ $scheme{acap} = $scheme{ldap}; # rfc 2595
+ $scheme{nntp} = $scheme{ldap}; # rfc 4642
+ $scheme{ftp} = $scheme{http}; # rfc 4217
+
+
+ sub _verify_hostname_of_cert {
+ my $identity = shift;
+ my $cert = shift;
+ my $scheme = shift || 'none';
+ if ( ! ref($scheme) ) {
+ $scheme = $scheme{$scheme} or croak "scheme $scheme not defined";
+ }
+
+ return 1 if ! %$scheme; # 'none'
+
+ my $commonName = $dispatcher{cn}->($cert);
+ my @altNames = $dispatcher{subjectAltNames}->($cert);
+
+ if ( my $sub = $scheme->{callback} ) {
+ return $sub->($identity,$commonName,@altNames);
+ }
+
+
+ my $ipn;
+ if ( CAN_IPV6 and $identity =~m{:} ) {
+ $ipn = IO::Socket::SSL::inet_pton(IO::Socket::SSL::AF_INET6,$identity)
+ or croak "'$identity' is not IPv6, but neither IPv4 nor hostname";
+ } elsif ( $identity =~m{^\d+\.\d+\.\d+\.\d+$} ) {
+ $ipn = IO::Socket::SSL::inet_aton( $identity ) or croak "'$identity' is not IPv4, but neither IPv6 nor hostname";
+ } else {
+ if ( $identity =~m{[^a-zA-Z0-9_.\-]} ) {
+ $identity =~m{\0} and croak("name '$identity' has \\0 byte");
+ $identity = IO::Socket::SSL::idn_to_ascii($identity) or
+ croak "Warning: Given name '$identity' could not be converted to IDNA!";
+ }
+ }
+
+ my $check_name = sub {
+ my ($name,$identity,$wtyp) = @_;
+ $wtyp ||= '';
+ my $pattern;
+ if ( $wtyp eq 'anywhere' and $name =~m{^([a-zA-Z0-9_\-]*)\*(.+)} ) {
+ $pattern = qr{^\Q$1\E[a-zA-Z0-9_\-]*\Q$2\E$}i;
+ } elsif ( $wtyp eq 'leftmost' and $name =~m{^\*(\..+)$} ) {
+ $pattern = qr{^[a-zA-Z0-9_\-]*\Q$1\E$}i;
+ } else {
+ $pattern = qr{^\Q$name\E$}i;
+ }
+ return $identity =~ $pattern;
+ };
+
+ my $alt_dnsNames = 0;
+ while (@altNames) {
+ my ($type, $name) = splice (@altNames, 0, 2);
+ if ( $ipn and $type == GEN_IPADD ) {
+ return 1 if $ipn eq $name;
+
+ } elsif ( ! $ipn and $type == GEN_DNS ) {
+ $name =~s/\s+$//; $name =~s/^\s+//;
+ $alt_dnsNames++;
+ $check_name->($name,$identity,$scheme->{wildcards_in_alt})
+ and return 1;
+ }
+ }
+
+ if ( ! $ipn and (
+ $scheme->{check_cn} eq 'always' or
+ $scheme->{check_cn} eq 'when_only' and !$alt_dnsNames)) {
+ $check_name->($commonName,$identity,$scheme->{wildcards_in_cn})
+ and return 1;
+ }
+
+ return 0; # no match
+ }
+}
+EOP
+
+eval { require IO::Socket::SSL };
+if ( $INC{"IO/Socket/SSL.pm"} ) {
+ eval $prog;
+ die $@ if $@;
+}
+
+1;
+}
+# ###########################################################################
+# End HTTPMicro package
+# ###########################################################################
+
+# ###########################################################################
+# VersionCheck package
+# This package is a copy without comments from the original. The original
+# with comments and its test file can be found in the Bazaar repository at,
+# lib/VersionCheck.pm
+# t/lib/VersionCheck.t
+# See https://launchpad.net/percona-toolkit for more information.
+# ###########################################################################
+{
+package VersionCheck;
+
+
+use strict;
+use warnings FATAL => 'all';
+use English qw(-no_match_vars);
+
+use constant PTDEBUG => $ENV{PTDEBUG} || 0;
+
+use Data::Dumper;
+local $Data::Dumper::Indent = 1;
+local $Data::Dumper::Sortkeys = 1;
+local $Data::Dumper::Quotekeys = 0;
+
+use Digest::MD5 qw(md5_hex);
+use Sys::Hostname qw(hostname);
+use File::Basename qw();
+use File::Spec;
+use FindBin qw();
+
+eval {
+ require Percona::Toolkit;
+ require HTTPMicro;
+};
+
+{
+ my $file = 'percona-version-check';
+ my $home = $ENV{HOME} || $ENV{HOMEPATH} || $ENV{USERPROFILE} || '.';
+ my @vc_dirs = (
+ '/etc/percona',
+ '/etc/percona-toolkit',
+ '/tmp',
+ "$home",
+ );
+
+ if ($ENV{PTDEBUG_VERSION_CHECK_HOME}) {
+ @vc_dirs = ( $ENV{PTDEBUG_VERSION_CHECK_HOME} );
+ }
+
+ sub version_check_file {
+ foreach my $dir ( @vc_dirs ) {
+ if ( -d $dir && -w $dir ) {
+ PTDEBUG && _d('Version check file', $file, 'in', $dir);
+ return $dir . '/' . $file;
+ }
+ }
+ PTDEBUG && _d('Version check file', $file, 'in', $ENV{PWD});
+ return $file; # in the CWD
+ }
+}
+
+sub version_check_time_limit {
+ return 60 * 60 * 24; # one day
+}
+
+
+sub version_check {
+ my (%args) = @_;
+
+ my $instances = $args{instances} || [];
+ my $instances_to_check;
+
+ PTDEBUG && _d('FindBin::Bin:', $FindBin::Bin);
+ if ( !$args{force} ) {
+ if ( $FindBin::Bin
+ && (-d "$FindBin::Bin/../.bzr" || -d "$FindBin::Bin/../../.bzr") ) {
+ PTDEBUG && _d("$FindBin::Bin/../.bzr disables --version-check");
+ return;
+ }
+ }
+
+ eval {
+ foreach my $instance ( @$instances ) {
+ my ($name, $id) = get_instance_id($instance);
+ $instance->{name} = $name;
+ $instance->{id} = $id;
+ }
+
+ push @$instances, { name => 'system', id => 0 };
+
+ $instances_to_check = get_instances_to_check(
+ instances => $instances,
+ vc_file => $args{vc_file}, # testing
+ now => $args{now}, # testing
+ );
+ PTDEBUG && _d(scalar @$instances_to_check, 'instances to check');
+ return unless @$instances_to_check;
+
+ my $protocol = 'https';
+ eval { require IO::Socket::SSL; };
+ if ( $EVAL_ERROR ) {
+ PTDEBUG && _d($EVAL_ERROR);
+ PTDEBUG && _d("SSL not available, won't run version_check");
+ return;
+ }
+ PTDEBUG && _d('Using', $protocol);
+
+ my $advice = pingback(
+ instances => $instances_to_check,
+ protocol => $protocol,
+ url => $args{url} # testing
+ || $ENV{PERCONA_VERSION_CHECK_URL} # testing
+ || "$protocol://v.percona.com",
+ );
+ if ( $advice ) {
+ PTDEBUG && _d('Advice:', Dumper($advice));
+ if ( scalar @$advice > 1) {
+ print "\n# " . scalar @$advice . " software updates are "
+ . "available:\n";
+ }
+ else {
+ print "\n# A software update is available:\n";
+ }
+ print join("\n", map { "# * $_" } @$advice), "\n\n";
+ }
+ };
+ if ( $EVAL_ERROR ) {
+ PTDEBUG && _d('Version check failed:', $EVAL_ERROR);
+ }
+
+ if ( @$instances_to_check ) {
+ eval {
+ update_check_times(
+ instances => $instances_to_check,
+ vc_file => $args{vc_file}, # testing
+ now => $args{now}, # testing
+ );
+ };
+ if ( $EVAL_ERROR ) {
+ PTDEBUG && _d('Error updating version check file:', $EVAL_ERROR);
+ }
+ }
+
+ if ( $ENV{PTDEBUG_VERSION_CHECK} ) {
+ warn "Exiting because the PTDEBUG_VERSION_CHECK "
+ . "environment variable is defined.\n";
+ exit 255;
+ }
+
+ return;
+}
+
+sub get_instances_to_check {
+ my (%args) = @_;
+
+ my $instances = $args{instances};
+ my $now = $args{now} || int(time);
+ my $vc_file = $args{vc_file} || version_check_file();
+
+ if ( !-f $vc_file ) {
+ PTDEBUG && _d('Version check file', $vc_file, 'does not exist;',
+ 'version checking all instances');
+ return $instances;
+ }
+
+ open my $fh, '<', $vc_file or die "Cannot open $vc_file: $OS_ERROR";
+ chomp(my $file_contents = do { local $/ = undef; <$fh> });
+ PTDEBUG && _d('Version check file', $vc_file, 'contents:', $file_contents);
+ close $fh;
+ my %last_check_time_for = $file_contents =~ /^([^,]+),(.+)$/mg;
+
+ my $check_time_limit = version_check_time_limit();
+ my @instances_to_check;
+ foreach my $instance ( @$instances ) {
+ my $last_check_time = $last_check_time_for{ $instance->{id} };
+ PTDEBUG && _d('Intsance', $instance->{id}, 'last checked',
+ $last_check_time, 'now', $now, 'diff', $now - ($last_check_time || 0),
+ 'hours until next check',
+ sprintf '%.2f',
+ ($check_time_limit - ($now - ($last_check_time || 0))) / 3600);
+ if ( !defined $last_check_time
+ || ($now - $last_check_time) >= $check_time_limit ) {
+ PTDEBUG && _d('Time to check', Dumper($instance));
+ push @instances_to_check, $instance;
+ }
+ }
+
+ return \@instances_to_check;
+}
+
+sub update_check_times {
+ my (%args) = @_;
+
+ my $instances = $args{instances};
+ my $now = $args{now} || int(time);
+ my $vc_file = $args{vc_file} || version_check_file();
+ PTDEBUG && _d('Updating last check time:', $now);
+
+ my %all_instances = map {
+ $_->{id} => { name => $_->{name}, ts => $now }
+ } @$instances;
+
+ if ( -f $vc_file ) {
+ open my $fh, '<', $vc_file or die "Cannot read $vc_file: $OS_ERROR";
+ my $contents = do { local $/ = undef; <$fh> };
+ close $fh;
+
+ foreach my $line ( split("\n", ($contents || '')) ) {
+ my ($id, $ts) = split(',', $line);
+ if ( !exists $all_instances{$id} ) {
+ $all_instances{$id} = { ts => $ts }; # original ts, not updated
+ }
+ }
+ }
+
+ open my $fh, '>', $vc_file or die "Cannot write to $vc_file: $OS_ERROR";
+ foreach my $id ( sort keys %all_instances ) {
+ PTDEBUG && _d('Updated:', $id, Dumper($all_instances{$id}));
+ print { $fh } $id . ',' . $all_instances{$id}->{ts} . "\n";
+ }
+ close $fh;
+
+ return;
+}
+
+sub get_instance_id {
+ my ($instance) = @_;
+
+ my $dbh = $instance->{dbh};
+ my $dsn = $instance->{dsn};
+
+ my $sql = q{SELECT CONCAT(@@hostname, @@port)};
+ PTDEBUG && _d($sql);
+ my ($name) = eval { $dbh->selectrow_array($sql) };
+ if ( $EVAL_ERROR ) {
+ PTDEBUG && _d($EVAL_ERROR);
+ $sql = q{SELECT @@hostname};
+ PTDEBUG && _d($sql);
+ ($name) = eval { $dbh->selectrow_array($sql) };
+ if ( $EVAL_ERROR ) {
+ PTDEBUG && _d($EVAL_ERROR);
+ $name = ($dsn->{h} || 'localhost') . ($dsn->{P} || 3306);
+ }
+ else {
+ $sql = q{SHOW VARIABLES LIKE 'port'};
+ PTDEBUG && _d($sql);
+ my (undef, $port) = eval { $dbh->selectrow_array($sql) };
+ PTDEBUG && _d('port:', $port);
+ $name .= $port || '';
+ }
+ }
+ my $id = md5_hex($name);
+
+ PTDEBUG && _d('MySQL instance:', $id, $name, Dumper($dsn));
+
+ return $name, $id;
+}
+
+
+sub pingback {
+ my (%args) = @_;
+ my @required_args = qw(url instances);
+ foreach my $arg ( @required_args ) {
+ die "I need a $arg arugment" unless $args{$arg};
+ }
+ my $url = $args{url};
+ my $instances = $args{instances};
+
+ my $ua = $args{ua} || HTTPMicro->new( timeout => 3 );
+
+ my $response = $ua->request('GET', $url);
+ PTDEBUG && _d('Server response:', Dumper($response));
+ die "No response from GET $url"
+ if !$response;
+ die("GET on $url returned HTTP status $response->{status}; expected 200\n",
+ ($response->{content} || '')) if $response->{status} != 200;
+ die("GET on $url did not return any programs to check")
+ if !$response->{content};
+
+ my $items = parse_server_response(
+ response => $response->{content}
+ );
+ die "Failed to parse server requested programs: $response->{content}"
+ if !scalar keys %$items;
+
+ my $versions = get_versions(
+ items => $items,
+ instances => $instances,
+ );
+ die "Failed to get any program versions; should have at least gotten Perl"
+ if !scalar keys %$versions;
+
+ my $client_content = encode_client_response(
+ items => $items,
+ versions => $versions,
+ general_id => md5_hex( hostname() ),
+ );
+
+ my $client_response = {
+ headers => { "X-Percona-Toolkit-Tool" => File::Basename::basename($0) },
+ content => $client_content,
+ };
+ PTDEBUG && _d('Client response:', Dumper($client_response));
+
+ $response = $ua->request('POST', $url, $client_response);
+ PTDEBUG && _d('Server suggestions:', Dumper($response));
+ die "No response from POST $url $client_response"
+ if !$response;
+ die "POST $url returned HTTP status $response->{status}; expected 200"
+ if $response->{status} != 200;
+
+ return unless $response->{content};
+
+ $items = parse_server_response(
+ response => $response->{content},
+ split_vars => 0,
+ );
+ die "Failed to parse server suggestions: $response->{content}"
+ if !scalar keys %$items;
+ my @suggestions = map { $_->{vars} }
+ sort { $a->{item} cmp $b->{item} }
+ values %$items;
+
+ return \@suggestions;
+}
+
+sub encode_client_response {
+ my (%args) = @_;
+ my @required_args = qw(items versions general_id);
+ foreach my $arg ( @required_args ) {
+ die "I need a $arg arugment" unless $args{$arg};
+ }
+ my ($items, $versions, $general_id) = @args{@required_args};
+
+ my @lines;
+ foreach my $item ( sort keys %$items ) {
+ next unless exists $versions->{$item};
+ if ( ref($versions->{$item}) eq 'HASH' ) {
+ my $mysql_versions = $versions->{$item};
+ for my $id ( sort keys %$mysql_versions ) {
+ push @lines, join(';', $id, $item, $mysql_versions->{$id});
+ }
+ }
+ else {
+ push @lines, join(';', $general_id, $item, $versions->{$item});
+ }
+ }
+
+ my $client_response = join("\n", @lines) . "\n";
+ return $client_response;
+}
+
+sub parse_server_response {
+ my (%args) = @_;
+ my @required_args = qw(response);
+ foreach my $arg ( @required_args ) {
+ die "I need a $arg arugment" unless $args{$arg};
+ }
+ my ($response) = @args{@required_args};
+
+ my %items = map {
+ my ($item, $type, $vars) = split(";", $_);
+ if ( !defined $args{split_vars} || $args{split_vars} ) {
+ $vars = [ split(",", ($vars || '')) ];
+ }
+ $item => {
+ item => $item,
+ type => $type,
+ vars => $vars,
+ };
+ } split("\n", $response);
+
+ PTDEBUG && _d('Items:', Dumper(\%items));
+
+ return \%items;
+}
+
+my %sub_for_type = (
+ os_version => \&get_os_version,
+ perl_version => \&get_perl_version,
+ perl_module_version => \&get_perl_module_version,
+ mysql_variable => \&get_mysql_variable,
+);
+
+sub valid_item {
+ my ($item) = @_;
+ return unless $item;
+ if ( !exists $sub_for_type{ $item->{type} } ) {
+ PTDEBUG && _d('Invalid type:', $item->{type});
+ return 0;
+ }
+ return 1;
+}
+
+sub get_versions {
+ my (%args) = @_;
+ my @required_args = qw(items);
+ foreach my $arg ( @required_args ) {
+ die "I need a $arg arugment" unless $args{$arg};
+ }
+ my ($items) = @args{@required_args};
+
+ my %versions;
+ foreach my $item ( values %$items ) {
+ next unless valid_item($item);
+ eval {
+ my $version = $sub_for_type{ $item->{type} }->(
+ item => $item,
+ instances => $args{instances},
+ );
+ if ( $version ) {
+ chomp $version unless ref($version);
+ $versions{$item->{item}} = $version;
+ }
+ };
+ if ( $EVAL_ERROR ) {
+ PTDEBUG && _d('Error getting version for', Dumper($item), $EVAL_ERROR);
+ }
+ }
+
+ return \%versions;
+}
+
+
+sub get_os_version {
+ if ( $OSNAME eq 'MSWin32' ) {
+ require Win32;
+ return Win32::GetOSDisplayName();
+ }
+
+ chomp(my $platform = `uname -s`);
+ PTDEBUG && _d('platform:', $platform);
+ return $OSNAME unless $platform;
+
+ chomp(my $lsb_release
+ = `which lsb_release 2>/dev/null | awk '{print \$1}'` || '');
+ PTDEBUG && _d('lsb_release:', $lsb_release);
+
+ my $release = "";
+
+ if ( $platform eq 'Linux' ) {
+ if ( -f "/etc/fedora-release" ) {
+ $release = `cat /etc/fedora-release`;
+ }
+ elsif ( -f "/etc/redhat-release" ) {
+ $release = `cat /etc/redhat-release`;
+ }
+ elsif ( -f "/etc/system-release" ) {
+ $release = `cat /etc/system-release`;
+ }
+ elsif ( $lsb_release ) {
+ $release = `$lsb_release -ds`;
+ }
+ elsif ( -f "/etc/lsb-release" ) {
+ $release = `grep DISTRIB_DESCRIPTION /etc/lsb-release`;
+ $release =~ s/^\w+="([^"]+)".+/$1/;
+ }
+ elsif ( -f "/etc/debian_version" ) {
+ chomp(my $rel = `cat /etc/debian_version`);
+ $release = "Debian $rel";
+ if ( -f "/etc/apt/sources.list" ) {
+ chomp(my $code_name = `awk '/^deb/ {print \$3}' /etc/apt/sources.list | awk -F/ '{print \$1}'| awk 'BEGIN {FS="|"} {print \$1}' | sort | uniq -c | sort -rn | head -n1 | awk '{print \$2}'`);
+ $release .= " ($code_name)" if $code_name;
+ }
+ }
+ elsif ( -f "/etc/os-release" ) { # openSUSE
+ chomp($release = `grep PRETTY_NAME /etc/os-release`);
+ $release =~ s/^PRETTY_NAME="(.+)"$/$1/;
+ }
+ elsif ( `ls /etc/*release 2>/dev/null` ) {
+ if ( `grep DISTRIB_DESCRIPTION /etc/*release 2>/dev/null` ) {
+ $release = `grep DISTRIB_DESCRIPTION /etc/*release | head -n1`;
+ }
+ else {
+ $release = `cat /etc/*release | head -n1`;
+ }
+ }
+ }
+ elsif ( $platform =~ m/(?:BSD|^Darwin)$/ ) {
+ my $rel = `uname -r`;
+ $release = "$platform $rel";
+ }
+ elsif ( $platform eq "SunOS" ) {
+ my $rel = `head -n1 /etc/release` || `uname -r`;
+ $release = "$platform $rel";
+ }
+
+ if ( !$release ) {
+ PTDEBUG && _d('Failed to get the release, using platform');
+ $release = $platform;
+ }
+ chomp($release);
+
+ $release =~ s/^"|"$//g;
+
+ PTDEBUG && _d('OS version =', $release);
+ return $release;
+}
+
+sub get_perl_version {
+ my (%args) = @_;
+ my $item = $args{item};
+ return unless $item;
+
+ my $version = sprintf '%vd', $PERL_VERSION;
+ PTDEBUG && _d('Perl version', $version);
+ return $version;
+}
+
+sub get_perl_module_version {
+ my (%args) = @_;
+ my $item = $args{item};
+ return unless $item;
+
+ my $var = '$' . $item->{item} . '::VERSION';
+ my $version = eval "use $item->{item}; $var;";
+ PTDEBUG && _d('Perl version for', $var, '=', $version);
+ return $version;
+}
+
+sub get_mysql_variable {
+ return get_from_mysql(
+ show => 'VARIABLES',
+ @_,
+ );
+}
+
+sub get_from_mysql {
+ my (%args) = @_;
+ my $show = $args{show};
+ my $item = $args{item};
+ my $instances = $args{instances};
+ return unless $show && $item;
+
+ if ( !$instances || !@$instances ) {
+ PTDEBUG && _d('Cannot check', $item,
+ 'because there are no MySQL instances');
+ return;
+ }
+
+ if ($item->{item} eq 'MySQL' && $item->{type} eq 'mysql_variable') {
+ $item->{vars} = ['version_comment', 'version'];
+ }
+
+ my @versions;
+ my %version_for;
+ foreach my $instance ( @$instances ) {
+ next unless $instance->{id}; # special system instance has id=0
+ my $dbh = $instance->{dbh};
+ local $dbh->{FetchHashKeyName} = 'NAME_lc';
+ my $sql = qq/SHOW $show/;
+ PTDEBUG && _d($sql);
+ my $rows = $dbh->selectall_hashref($sql, 'variable_name');
+
+ my @versions;
+ foreach my $var ( @{$item->{vars}} ) {
+ $var = lc($var);
+ my $version = $rows->{$var}->{value};
+ PTDEBUG && _d('MySQL version for', $item->{item}, '=', $version,
+ 'on', $instance->{name});
+ push @versions, $version;
+ }
+ $version_for{ $instance->{id} } = join(' ', @versions);
+ }
+
+ return \%version_for;
+}
+
+sub _d {
+ my ($package, undef, $line) = caller 0;
+ @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; }
+ map { defined $_ ? $_ : 'undef' }
+ @_;
+ print STDERR "# $package:$line $PID ", join(' ', @_), "\n";
+}
+
+1;
+}
+# ###########################################################################
+# End VersionCheck package
+# ###########################################################################
+
+#
+# parse_connection_options() subroutine parses connection-related command line
+# options
+#
+sub parse_connection_options {
+ my $con = shift;
+
+ $con->{dsn} = 'dbi:mysql:';
+
+ # this option has to be first
+ if ($ENV{option_defaults_file}) {
+ $con->{dsn} .= ";mysql_read_default_file=$ENV{option_defaults_file}";
+ }
+
+ if ($ENV{option_defaults_extra_file}) {
+ $con->{dsn} .= ";mysql_read_default_file=$ENV{option_defaults_extra_file}";
+ }
+
+ $con->{dsn} .= ";mysql_read_default_group=xtrabackup";
+
+ if ($ENV{option_mysql_password}) {
+ $con->{dsn_password} = "$ENV{option_mysql_password}";
+ }
+ if ($ENV{option_mysql_user}) {
+ $con->{dsn_user} = "$ENV{option_mysql_user}";
+ }
+ if ($ENV{option_mysql_host}) {
+ $con->{dsn} .= ";host=$ENV{option_mysql_host}";
+ }
+ if ($ENV{option_mysql_port}) {
+ $con->{dsn} .= ";port=$ENV{option_mysql_port}";
+ }
+ if ($ENV{option_mysql_socket}) {
+ $con->{dsn} .= ";mysql_socket=$ENV{option_mysql_socket}";
+ }
+}
+
+#
+# mysql_connect subroutine connects to MySQL server
+#
+sub mysql_connect {
+ my %con;
+ my %args = (
+ # Defaults
+ abort_on_error => 1,
+ @_
+ );
+
+ $con{abort_on_error} = $args{abort_on_error};
+
+ parse_connection_options(\%con);
+
+ $now = current_time();
+ print STDERR "$now $prefix Connecting to MySQL server with DSN '$con{dsn}'" .
+ (defined($con{dsn_user}) ? " as '$con{dsn_user}' " : "") .
+ " (using password: ";
+ if (defined($con{dsn_password})) {
+ print STDERR "YES).\n";
+ } else {
+ print STDERR "NO).\n";
+ }
+
+ eval {
+ $con{dbh}=DBI->connect($con{dsn}, $con{dsn_user},
+ $con{dsn_password}, { RaiseError => 1 });
+ };
+
+ if ($EVAL_ERROR) {
+ $con{connect_error}=$EVAL_ERROR;
+ } else {
+ $now = current_time();
+ print STDERR "$now $prefix Connected to MySQL server\n";
+ }
+
+ if ($args{abort_on_error}) {
+ if (!$dbd_mysql_installed) {
+ die "Failed to connect to MySQL server as " .
+ "DBD::mysql module is not installed";
+ } else {
+ if (!$con{dbh}) {
+ die "Failed to connect to MySQL server: " .
+ $con{connect_error};
+ }
+ }
+ }
+
+ if ($con{dbh}) {
+ $con{dbh}->do("SET SESSION wait_timeout=2147483");
+ }
+
+ return %con;
+}
+
+#
+# return current local time as string in form "070816 12:23:15"
+#
+sub current_time {
+ return strftime("%y%m%d %H:%M:%S", localtime());
+}
+
+
+%mysql = mysql_connect(abort_on_error => 1);
+
+$now = current_time();
+print STDERR
+ "$now $prefix Executing a version check against the server...\n";
+
+# Redirect STDOUT to STDERR, as VersionCheck prints alerts to STDOUT
+select STDERR;
+
+VersionCheck::version_check(
+ force => 1,
+ instances => [ {
+ dbh => $mysql{dbh},
+ dsn => $mysql{dsn}
+ }
+ ]
+ );
+# Restore STDOUT as the default filehandle
+select STDOUT;
+
+$now = current_time();
+print STDERR "$now $prefix Done.\n";
diff --git a/extra/mariabackup/write_filt.cc b/extra/mariabackup/write_filt.cc
new file mode 100644
index 00000000000..129302d7fa0
--- /dev/null
+++ b/extra/mariabackup/write_filt.cc
@@ -0,0 +1,219 @@
+/******************************************************
+XtraBackup: hot backup tool for InnoDB
+(c) 2009-2013 Percona LLC and/or its affiliates.
+Originally Created 3/3/2009 Yasufumi Kinoshita
+Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
+Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+/* Page write filters implementation */
+
+#include <my_base.h>
+#include "common.h"
+#include "write_filt.h"
+#include "fil_cur.h"
+#include "xtrabackup.h"
+
+/************************************************************************
+Write-through page write filter. */
+static my_bool wf_wt_init(xb_write_filt_ctxt_t *ctxt, char *dst_name,
+ xb_fil_cur_t *cursor);
+static my_bool wf_wt_process(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile);
+
+xb_write_filt_t wf_write_through = {
+ &wf_wt_init,
+ &wf_wt_process,
+ NULL,
+ NULL
+};
+
+/************************************************************************
+Incremental page write filter. */
+static my_bool wf_incremental_init(xb_write_filt_ctxt_t *ctxt, char *dst_name,
+ xb_fil_cur_t *cursor);
+static my_bool wf_incremental_process(xb_write_filt_ctxt_t *ctxt,
+ ds_file_t *dstfile);
+static my_bool wf_incremental_finalize(xb_write_filt_ctxt_t *ctxt,
+ ds_file_t *dstfile);
+static void wf_incremental_deinit(xb_write_filt_ctxt_t *ctxt);
+
+xb_write_filt_t wf_incremental = {
+ &wf_incremental_init,
+ &wf_incremental_process,
+ &wf_incremental_finalize,
+ &wf_incremental_deinit
+};
+
+/************************************************************************
+Initialize incremental page write filter.
+
+@return TRUE on success, FALSE on error. */
+static my_bool
+wf_incremental_init(xb_write_filt_ctxt_t *ctxt, char *dst_name,
+ xb_fil_cur_t *cursor)
+{
+ char meta_name[FN_REFLEN];
+ xb_delta_info_t info;
+ ulint buf_size;
+ xb_wf_incremental_ctxt_t *cp =
+ &(ctxt->u.wf_incremental_ctxt);
+
+ ctxt->cursor = cursor;
+
+ /* allocate buffer for incremental backup (4096 pages) */
+ buf_size = (UNIV_PAGE_SIZE_MAX / 4 + 1) * UNIV_PAGE_SIZE_MAX;
+ cp->delta_buf_base = static_cast<byte *>(ut_malloc(buf_size));
+ memset(cp->delta_buf_base, 0, buf_size);
+ cp->delta_buf = static_cast<byte *>
+ (ut_align(cp->delta_buf_base, UNIV_PAGE_SIZE_MAX));
+
+ /* write delta meta info */
+ snprintf(meta_name, sizeof(meta_name), "%s%s", dst_name,
+ XB_DELTA_INFO_SUFFIX);
+ info.page_size = cursor->page_size;
+ info.zip_size = cursor->zip_size;
+ info.space_id = cursor->space_id;
+ if (!xb_write_delta_metadata(meta_name, &info)) {
+ msg("[%02u] xtrabackup: Error: "
+ "failed to write meta info for %s\n",
+ cursor->thread_n, cursor->rel_path);
+ return(FALSE);
+ }
+
+ /* change the target file name, since we are only going to write
+ delta pages */
+ strcat(dst_name, ".delta");
+
+ mach_write_to_4(cp->delta_buf, 0x78747261UL); /*"xtra"*/
+ cp->npages = 1;
+
+ return(TRUE);
+}
+
+/************************************************************************
+Run the next batch of pages through incremental page write filter.
+
+@return TRUE on success, FALSE on error. */
+static my_bool
+wf_incremental_process(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile)
+{
+ ulint i;
+ xb_fil_cur_t *cursor = ctxt->cursor;
+ ulint page_size = cursor->page_size;
+ byte *page;
+ xb_wf_incremental_ctxt_t *cp = &(ctxt->u.wf_incremental_ctxt);
+
+ for (i = 0, page = cursor->buf; i < cursor->buf_npages;
+ i++, page += page_size) {
+
+ if (incremental_lsn >= mach_read_from_8(page + FIL_PAGE_LSN)) {
+
+ continue;
+ }
+
+ /* updated page */
+ if (cp->npages == page_size / 4) {
+ /* flush buffer */
+ if (ds_write(dstfile, cp->delta_buf,
+ cp->npages * page_size)) {
+ return(FALSE);
+ }
+
+ /* clear buffer */
+ memset(cp->delta_buf, 0, page_size / 4 * page_size);
+ /*"xtra"*/
+ mach_write_to_4(cp->delta_buf, 0x78747261UL);
+ cp->npages = 1;
+ }
+
+ mach_write_to_4(cp->delta_buf + cp->npages * 4,
+ cursor->buf_page_no + i);
+ memcpy(cp->delta_buf + cp->npages * page_size, page,
+ page_size);
+
+ cp->npages++;
+ }
+
+ return(TRUE);
+}
+
+/************************************************************************
+Flush the incremental page write filter's buffer.
+
+@return TRUE on success, FALSE on error. */
+static my_bool
+wf_incremental_finalize(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile)
+{
+ xb_fil_cur_t *cursor = ctxt->cursor;
+ ulint page_size = cursor->page_size;
+ xb_wf_incremental_ctxt_t *cp = &(ctxt->u.wf_incremental_ctxt);
+
+ if (cp->npages != page_size / 4) {
+ mach_write_to_4(cp->delta_buf + cp->npages * 4, 0xFFFFFFFFUL);
+ }
+
+ /* Mark the final block */
+ mach_write_to_4(cp->delta_buf, 0x58545241UL); /*"XTRA"*/
+
+ /* flush buffer */
+ if (ds_write(dstfile, cp->delta_buf, cp->npages * page_size)) {
+ return(FALSE);
+ }
+
+ return(TRUE);
+}
+
+/************************************************************************
+Free the incremental page write filter's buffer. */
+static void
+wf_incremental_deinit(xb_write_filt_ctxt_t *ctxt)
+{
+ xb_wf_incremental_ctxt_t *cp = &(ctxt->u.wf_incremental_ctxt);
+
+ if (cp->delta_buf_base != NULL) {
+ ut_free(cp->delta_buf_base);
+ }
+}
+
+/************************************************************************
+Initialize the write-through page write filter.
+
+@return TRUE on success, FALSE on error. */
+static my_bool
+wf_wt_init(xb_write_filt_ctxt_t *ctxt, char *dst_name __attribute__((unused)),
+ xb_fil_cur_t *cursor)
+{
+ ctxt->cursor = cursor;
+
+ return(TRUE);
+}
+
+/************************************************************************
+Write the next batch of pages to the destination datasink.
+
+@return TRUE on success, FALSE on error. */
+static my_bool
+wf_wt_process(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile)
+{
+ xb_fil_cur_t *cursor = ctxt->cursor;
+
+ if (ds_write(dstfile, cursor->buf, cursor->buf_read)) {
+ return(FALSE);
+ }
+
+ return(TRUE);
+}
diff --git a/extra/mariabackup/write_filt.h b/extra/mariabackup/write_filt.h
new file mode 100644
index 00000000000..20213b6f523
--- /dev/null
+++ b/extra/mariabackup/write_filt.h
@@ -0,0 +1,61 @@
+/******************************************************
+XtraBackup: hot backup tool for InnoDB
+(c) 2009-2013 Percona LLC and/or its affiliates.
+Originally Created 3/3/2009 Yasufumi Kinoshita
+Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
+Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+/* Page write filter interface */
+
+#ifndef XB_WRITE_FILT_H
+#define XB_WRITE_FILT_H
+
+#include "fil_cur.h"
+#include "datasink.h"
+#include "compact.h"
+
+/* Incremental page filter context */
+typedef struct {
+ byte *delta_buf_base;
+ byte *delta_buf;
+ ulint npages;
+} xb_wf_incremental_ctxt_t;
+
+/* Page filter context used as an opaque structure by callers */
+typedef struct {
+ xb_fil_cur_t *cursor;
+ union {
+ xb_wf_incremental_ctxt_t wf_incremental_ctxt;
+ xb_wf_compact_ctxt_t wf_compact_ctxt;
+ } u;
+} xb_write_filt_ctxt_t;
+
+
+typedef struct {
+ my_bool (*init)(xb_write_filt_ctxt_t *ctxt, char *dst_name,
+ xb_fil_cur_t *cursor);
+ my_bool (*process)(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile);
+ my_bool (*finalize)(xb_write_filt_ctxt_t *, ds_file_t *dstfile);
+ void (*deinit)(xb_write_filt_ctxt_t *);
+} xb_write_filt_t;
+
+extern xb_write_filt_t wf_write_through;
+extern xb_write_filt_t wf_incremental;
+extern xb_write_filt_t wf_compact;
+
+#endif /* XB_WRITE_FILT_H */
diff --git a/extra/mariabackup/wsrep.cc b/extra/mariabackup/wsrep.cc
new file mode 100644
index 00000000000..420ada75f36
--- /dev/null
+++ b/extra/mariabackup/wsrep.cc
@@ -0,0 +1,219 @@
+/******************************************************
+Percona XtraBackup: hot backup tool for InnoDB
+(c) 2009-2014 Percona LLC and/or its affiliates
+Originally Created 3/3/2009 Yasufumi Kinoshita
+Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
+Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************
+
+This file incorporates work covered by the following copyright and
+permission notice:
+
+ Copyright 2010 Codership Oy <http://www.codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#include <mysql_version.h>
+#include <my_base.h>
+#include <handler.h>
+#include <trx0sys.h>
+
+#include "common.h"
+
+#define WSREP_XID_PREFIX "WSREPXid"
+#define WSREP_XID_PREFIX_LEN MYSQL_XID_PREFIX_LEN
+#define WSREP_XID_UUID_OFFSET 8
+#define WSREP_XID_SEQNO_OFFSET (WSREP_XID_UUID_OFFSET + sizeof(wsrep_uuid_t))
+#define WSREP_XID_GTRID_LEN (WSREP_XID_SEQNO_OFFSET + sizeof(wsrep_seqno_t))
+
+/*! undefined seqno */
+#define WSREP_SEQNO_UNDEFINED (-1)
+
+/*! Name of file where Galera info is stored on recovery */
+#define XB_GALERA_INFO_FILENAME "xtrabackup_galera_info"
+
+/* Galera UUID type - for all unique IDs */
+typedef struct wsrep_uuid {
+ uint8_t data[16];
+} wsrep_uuid_t;
+
+/* sequence number of a writeset, etc. */
+typedef int64_t wsrep_seqno_t;
+
+/* Undefined UUID */
+static const wsrep_uuid_t WSREP_UUID_UNDEFINED = {{0,}};
+
+/***********************************************************************//**
+Check if a given WSREP XID is valid.
+
+@return true if valid.
+*/
+static
+bool
+wsrep_is_wsrep_xid(
+/*===============*/
+ const void* xid_ptr)
+{
+ const XID* xid = reinterpret_cast<const XID*>(xid_ptr);
+
+ return((xid->formatID == 1 &&
+ xid->gtrid_length == WSREP_XID_GTRID_LEN &&
+ xid->bqual_length == 0 &&
+ !memcmp(xid->data, WSREP_XID_PREFIX, WSREP_XID_PREFIX_LEN)));
+}
+
+/***********************************************************************//**
+Retrieve binary WSREP UUID from XID.
+
+@return binary WSREP UUID represenataion, if UUID is valid, or
+ WSREP_UUID_UNDEFINED otherwise.
+*/
+static
+const wsrep_uuid_t*
+wsrep_xid_uuid(
+/*===========*/
+ const XID* xid)
+{
+ if (wsrep_is_wsrep_xid(xid)) {
+ return(reinterpret_cast<const wsrep_uuid_t*>
+ (xid->data + WSREP_XID_UUID_OFFSET));
+ } else {
+ return(&WSREP_UUID_UNDEFINED);
+ }
+}
+
+/***********************************************************************//**
+Retrieve WSREP seqno from XID.
+
+@return WSREP seqno, if it is valid, or WSREP_SEQNO_UNDEFINED otherwise.
+*/
+wsrep_seqno_t wsrep_xid_seqno(
+/*==========================*/
+ const XID* xid)
+{
+ if (wsrep_is_wsrep_xid(xid)) {
+ wsrep_seqno_t seqno;
+ memcpy(&seqno, xid->data + WSREP_XID_SEQNO_OFFSET,
+ sizeof(wsrep_seqno_t));
+
+ return(seqno);
+ } else {
+ return(WSREP_SEQNO_UNDEFINED);
+ }
+}
+
+/***********************************************************************//**
+Write UUID to string.
+
+@return length of UUID string representation or -EMSGSIZE if string is too
+short.
+*/
+static
+int
+wsrep_uuid_print(
+/*=============*/
+ const wsrep_uuid_t* uuid,
+ char* str,
+ size_t str_len)
+{
+ if (str_len > 36) {
+ const unsigned char* u = uuid->data;
+ return snprintf(str, str_len,
+ "%02x%02x%02x%02x-%02x%02x-%02x%02x-"
+ "%02x%02x-%02x%02x%02x%02x%02x%02x",
+ u[ 0], u[ 1], u[ 2], u[ 3], u[ 4], u[ 5], u[ 6],
+ u[ 7], u[ 8], u[ 9], u[10], u[11], u[12], u[13],
+ u[14], u[15]);
+ }
+ else {
+ return -EMSGSIZE;
+ }
+}
+
+/***********************************************************************
+Store Galera checkpoint info in the 'xtrabackup_galera_info' file, if that
+information is present in the trx system header. Otherwise, do nothing. */
+void
+xb_write_galera_info(bool incremental_prepare)
+/*==================*/
+{
+ FILE* fp;
+ XID xid;
+ char uuid_str[40];
+ wsrep_seqno_t seqno;
+ MY_STAT statinfo;
+
+ /* Do not overwrite existing an existing file to be compatible with
+ servers with older server versions */
+ if (!incremental_prepare &&
+ my_stat(XB_GALERA_INFO_FILENAME, &statinfo, MYF(0)) != NULL) {
+
+ return;
+ }
+
+ memset(&xid, 0, sizeof(xid));
+ xid.formatID = -1;
+
+ if (!trx_sys_read_wsrep_checkpoint(&xid)) {
+
+ return;
+ }
+
+ if (wsrep_uuid_print(wsrep_xid_uuid(&xid), uuid_str,
+ sizeof(uuid_str)) < 0) {
+ return;
+ }
+
+ fp = fopen(XB_GALERA_INFO_FILENAME, "w");
+ if (fp == NULL) {
+
+ msg("xtrabackup: error: "
+ "could not create " XB_GALERA_INFO_FILENAME
+ ", errno = %d\n",
+ errno);
+ exit(EXIT_FAILURE);
+ }
+
+ seqno = wsrep_xid_seqno(&xid);
+
+ msg("xtrabackup: Recovered WSREP position: %s:%lld\n",
+ uuid_str, (long long) seqno);
+
+ if (fprintf(fp, "%s:%lld", uuid_str, (long long) seqno) < 0) {
+
+ msg("xtrabackup: error: "
+ "could not write to " XB_GALERA_INFO_FILENAME
+ ", errno = %d\n",
+ errno);
+ exit(EXIT_FAILURE);
+ }
+
+ fclose(fp);
+}
diff --git a/extra/mariabackup/wsrep.h b/extra/mariabackup/wsrep.h
new file mode 100644
index 00000000000..7638d1f2b54
--- /dev/null
+++ b/extra/mariabackup/wsrep.h
@@ -0,0 +1,32 @@
+/******************************************************
+Percona XtraBackup: hot backup tool for InnoDB
+(c) 2009-2014 Percona LLC and/or its affiliates
+Originally Created 3/3/2009 Yasufumi Kinoshita
+Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
+Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+*******************************************************/
+
+#ifndef WSREP_H
+#define WSREP_H
+
+/***********************************************************************
+Store Galera checkpoint info in the 'xtrabackup_galera_info' file, if that
+information is present in the trx system header. Otherwise, do nothing. */
+void
+xb_write_galera_info(bool incremental_prepare);
+/*==================*/
+
+#endif
diff --git a/extra/mariabackup/xb_regex.h b/extra/mariabackup/xb_regex.h
new file mode 100644
index 00000000000..94c5e2a5fa7
--- /dev/null
+++ b/extra/mariabackup/xb_regex.h
@@ -0,0 +1,71 @@
+/******************************************************
+Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+/* This file is required to abstract away regex(3) calls so that
+my_regex is used on Windows and native calls are used on POSIX platforms. */
+
+#ifndef XB_REGEX_H
+#define XB_REGEX_H
+
+#ifdef _WIN32
+
+#include <my_regex.h>
+
+typedef my_regex_t xb_regex_t;
+typedef my_regmatch_t xb_regmatch_t;
+
+#define xb_regex_init() my_regex_init(&my_charset_latin1)
+
+#define xb_regexec(preg,string,nmatch,pmatch,eflags) \
+ my_regexec(preg, string, nmatch, pmatch, eflags)
+
+#define xb_regerror(errcode,preg,errbuf,errbuf_size) \
+ my_regerror(errcode, preg, errbuf, errbuf_size)
+
+#define xb_regcomp(preg,regex,cflags) \
+ my_regcomp(preg, regex, cflags, &my_charset_latin1)
+
+#define xb_regfree(preg) my_regfree(preg)
+
+#define xb_regex_end() my_regex_end()
+
+#else /* ! _WIN32 */
+
+#include <regex.h>
+
+typedef regex_t xb_regex_t;
+typedef regmatch_t xb_regmatch_t;
+
+#define xb_regex_init() do { } while(0)
+
+#define xb_regexec(preg,string,nmatch,pmatch,eflags) \
+ regexec(preg, string, nmatch, pmatch, eflags)
+
+#define xb_regerror(errcode,preg,errbuf,errbuf_size) \
+ regerror(errcode, preg, errbuf, errbuf_size)
+
+#define xb_regcomp(preg,regex,cflags) \
+ regcomp(preg, regex, cflags)
+
+#define xb_regfree(preg) regfree(preg)
+
+#define xb_regex_end() do { } while (0)
+
+#endif /* _WIN32 */
+
+#endif /* XB_REGEX_H */
diff --git a/extra/mariabackup/xbcloud.cc b/extra/mariabackup/xbcloud.cc
new file mode 100644
index 00000000000..56661b03dd0
--- /dev/null
+++ b/extra/mariabackup/xbcloud.cc
@@ -0,0 +1,2721 @@
+/******************************************************
+Copyright (c) 2014 Percona LLC and/or its affiliates.
+
+The xbstream utility: serialize/deserialize files in the XBSTREAM format.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#include <my_global.h>
+#include <my_default.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <curl/curl.h>
+#include <ev.h>
+#include <unistd.h>
+#include <errno.h>
+#include <gcrypt.h>
+#include <assert.h>
+#include <my_sys.h>
+#include <my_dir.h>
+#include <my_getopt.h>
+#include <algorithm>
+#include <map>
+#include <string>
+#include <jsmn.h>
+#include "xbstream.h"
+
+using std::min;
+using std::max;
+using std::map;
+using std::string;
+
+#define XBCLOUD_VERSION "1.0"
+
+#define SWIFT_MAX_URL_SIZE 8192
+#define SWIFT_MAX_HDR_SIZE 8192
+
+#define SWIFT_CHUNK_SIZE 11 * 1024 * 1024
+
+#if ((LIBCURL_VERSION_MAJOR >= 7) && (LIBCURL_VERSION_MINOR >= 16))
+#define OLD_CURL_MULTI 0
+#else
+#define OLD_CURL_MULTI 1
+#endif
+
+/*****************************************************************************/
+
+typedef struct swift_auth_info_struct swift_auth_info;
+typedef struct connection_info_struct connection_info;
+typedef struct socket_info_struct socket_info;
+typedef struct global_io_info_struct global_io_info;
+typedef struct slo_chunk_struct slo_chunk;
+typedef struct container_list_struct container_list;
+typedef struct object_info_struct object_info;
+
+struct swift_auth_info_struct {
+ char url[SWIFT_MAX_URL_SIZE];
+ char token[SWIFT_MAX_HDR_SIZE];
+};
+
+struct global_io_info_struct {
+ struct ev_loop *loop;
+ struct ev_io input_event;
+ struct ev_timer timer_event;
+ CURLM *multi;
+ int still_running;
+ int eof;
+ curl_socket_t input_fd;
+ connection_info **connections;
+ long chunk_no;
+ connection_info *current_connection;
+ const char *url;
+ const char *container;
+ const char *token;
+ const char *backup_name;
+};
+
+struct socket_info_struct {
+ curl_socket_t sockfd;
+ CURL *easy;
+ int action;
+ long timeout;
+ struct ev_io ev;
+ int evset;
+ global_io_info *global;
+};
+
+struct connection_info_struct {
+ CURL *easy;
+ global_io_info *global;
+ char *buffer;
+ size_t buffer_size;
+ size_t filled_size;
+ size_t upload_size;
+ bool chunk_uploaded;
+ bool chunk_acked;
+ char error[CURL_ERROR_SIZE];
+ struct curl_slist *slist;
+ char *name;
+ size_t name_len;
+ char hash[33];
+ size_t chunk_no;
+ bool magic_verified;
+ size_t chunk_path_len;
+ xb_chunk_type_t chunk_type;
+ size_t payload_size;
+ size_t chunk_size;
+ int retry_count;
+ bool upload_started;
+ ulong global_idx;
+};
+
+struct slo_chunk_struct {
+ char name[SWIFT_MAX_URL_SIZE];
+ char md5[33];
+ int idx;
+ size_t size;
+};
+
+struct object_info_struct {
+ char hash[33];
+ char name[SWIFT_MAX_URL_SIZE];
+ size_t bytes;
+};
+
+struct container_list_struct {
+ size_t content_length;
+ size_t content_bufsize;
+ char *content_json;
+ size_t object_count;
+ size_t idx;
+ object_info *objects;
+ bool final;
+};
+
+enum {SWIFT, S3};
+const char *storage_names[] =
+{ "SWIFT", "S3", NullS};
+
+static my_bool opt_verbose = 0;
+static ulong opt_storage = SWIFT;
+static const char *opt_swift_user = NULL;
+static const char *opt_swift_user_id = NULL;
+static const char *opt_swift_password = NULL;
+static const char *opt_swift_tenant = NULL;
+static const char *opt_swift_tenant_id = NULL;
+static const char *opt_swift_project = NULL;
+static const char *opt_swift_project_id = NULL;
+static const char *opt_swift_domain = NULL;
+static const char *opt_swift_domain_id = NULL;
+static const char *opt_swift_region = NULL;
+static const char *opt_swift_container = NULL;
+static const char *opt_swift_storage_url = NULL;
+static const char *opt_swift_auth_url = NULL;
+static const char *opt_swift_key = NULL;
+static const char *opt_swift_auth_version = NULL;
+static const char *opt_name = NULL;
+static const char *opt_cacert = NULL;
+static ulong opt_parallel = 1;
+static my_bool opt_insecure = 0;
+static enum {MODE_GET, MODE_PUT, MODE_DELETE} opt_mode;
+
+static char **file_list = NULL;
+static int file_list_size = 0;
+
+TYPELIB storage_typelib =
+{array_elements(storage_names)-1, "", storage_names, NULL};
+
+enum {
+ OPT_STORAGE = 256,
+ OPT_SWIFT_CONTAINER,
+ OPT_SWIFT_AUTH_URL,
+ OPT_SWIFT_KEY,
+ OPT_SWIFT_USER,
+ OPT_SWIFT_USER_ID,
+ OPT_SWIFT_PASSWORD,
+ OPT_SWIFT_TENANT,
+ OPT_SWIFT_TENANT_ID,
+ OPT_SWIFT_PROJECT,
+ OPT_SWIFT_PROJECT_ID,
+ OPT_SWIFT_DOMAIN,
+ OPT_SWIFT_DOMAIN_ID,
+ OPT_SWIFT_REGION,
+ OPT_SWIFT_STORAGE_URL,
+ OPT_SWIFT_AUTH_VERSION,
+ OPT_PARALLEL,
+ OPT_CACERT,
+ OPT_INSECURE,
+ OPT_VERBOSE
+};
+
+
+static struct my_option my_long_options[] =
+{
+ {"help", '?', "Display this help and exit.",
+ 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"storage", OPT_STORAGE, "Specify storage type S3/SWIFT.",
+ &opt_storage, &opt_storage, &storage_typelib,
+ GET_ENUM, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"swift-auth-version", OPT_SWIFT_AUTH_VERSION,
+ "Swift authentication verison to use.",
+ &opt_swift_auth_version, &opt_swift_auth_version, 0,
+ GET_STR_ALLOC, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"swift-container", OPT_SWIFT_CONTAINER,
+ "Swift container to store backups into.",
+ &opt_swift_container, &opt_swift_container, 0,
+ GET_STR_ALLOC, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"swift-user", OPT_SWIFT_USER,
+ "Swift user name.",
+ &opt_swift_user, &opt_swift_user, 0, GET_STR_ALLOC, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"swift-user-id", OPT_SWIFT_USER_ID,
+ "Swift user ID.",
+ &opt_swift_user_id, &opt_swift_user_id, 0, GET_STR_ALLOC, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"swift-auth-url", OPT_SWIFT_AUTH_URL,
+ "Base URL of SWIFT authentication service.",
+ &opt_swift_auth_url, &opt_swift_auth_url, 0,
+ GET_STR_ALLOC, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"swift-storage-url", OPT_SWIFT_STORAGE_URL,
+ "URL of object-store endpoint. Usually received from authentication "
+ "service. Specify to override this value.",
+ &opt_swift_storage_url, &opt_swift_storage_url, 0,
+ GET_STR_ALLOC, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"swift-key", OPT_SWIFT_KEY,
+ "Swift key.",
+ &opt_swift_key, &opt_swift_key, 0, GET_STR_ALLOC, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"swift-tenant", OPT_SWIFT_TENANT,
+ "The tenant name. Both the --swift-tenant and --swift-tenant-id "
+ "options are optional, but should not be specified together.",
+ &opt_swift_tenant, &opt_swift_tenant, 0, GET_STR_ALLOC, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"swift-tenant-id", OPT_SWIFT_TENANT_ID,
+ "The tenant ID. Both the --swift-tenant and --swift-tenant-id "
+ "options are optional, but should not be specified together.",
+ &opt_swift_tenant_id, &opt_swift_tenant_id, 0,
+ GET_STR_ALLOC, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"swift-project", OPT_SWIFT_PROJECT,
+ "The project name.",
+ &opt_swift_project, &opt_swift_project, 0, GET_STR_ALLOC, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"swift-project-id", OPT_SWIFT_PROJECT_ID,
+ "The project ID.",
+ &opt_swift_project_id, &opt_swift_project_id, 0,
+ GET_STR_ALLOC, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"swift-domain", OPT_SWIFT_DOMAIN,
+ "The domain name.",
+ &opt_swift_domain, &opt_swift_domain, 0, GET_STR_ALLOC, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"swift-domain-id", OPT_SWIFT_DOMAIN_ID,
+ "The domain ID.",
+ &opt_swift_domain_id, &opt_swift_domain_id, 0,
+ GET_STR_ALLOC, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"swift-password", OPT_SWIFT_PASSWORD,
+ "The password of the user.",
+ &opt_swift_password, &opt_swift_password, 0,
+ GET_STR_ALLOC, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"swift-region", OPT_SWIFT_REGION,
+ "The region object-store endpoint.",
+ &opt_swift_region, &opt_swift_region, 0,
+ GET_STR_ALLOC, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"parallel", OPT_PARALLEL,
+ "Number of parallel chunk uploads.",
+ &opt_parallel, &opt_parallel, 0, GET_ULONG, REQUIRED_ARG,
+ 1, 0, 0, 0, 0, 0},
+
+ {"cacert", OPT_CACERT,
+ "CA certificate file.",
+ &opt_cacert, &opt_cacert, 0, GET_STR_ALLOC, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"insecure", OPT_INSECURE,
+ "Do not verify server SSL certificate.",
+ &opt_insecure, &opt_insecure, 0, GET_BOOL, NO_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"verbose", OPT_VERBOSE,
+ "Turn ON cURL tracing.",
+ &opt_verbose, &opt_verbose, 0, GET_BOOL, NO_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
+};
+
+/* The values of these arguments should be masked
+ on the command line */
+static const char * const masked_args[] = {
+ "--swift-password",
+ "--swift-key",
+ "--swift-auth-url",
+ "--swift-storage-url",
+ "--swift-container",
+ "--swift-user",
+ "--swift-tenant",
+ "--swift-user-id",
+ "--swift-tenant-id",
+ 0
+};
+
+static map<string, ulonglong> file_chunk_count;
+
+static
+void
+print_version()
+{
+ printf("%s Ver %s for %s (%s)\n", my_progname, XBCLOUD_VERSION,
+ SYSTEM_TYPE, MACHINE_TYPE);
+}
+
+static
+void
+usage()
+{
+ print_version();
+ puts("Copyright (C) 2015 Percona LLC and/or its affiliates.");
+ puts("This software comes with ABSOLUTELY NO WARRANTY. "
+ "This is free software,\nand you are welcome to modify and "
+ "redistribute it under the GPL license.\n");
+
+ puts("Manage backups on Cloud services.\n");
+
+ puts("Usage: ");
+ printf(" %s -c put [OPTIONS...] <NAME> upload backup from STDIN into "
+ "the cloud service with given name.\n", my_progname);
+ printf(" %s -c get [OPTIONS...] <NAME> [FILES...] stream specified "
+ "backup or individual files from cloud service into STDOUT.\n",
+ my_progname);
+
+ puts("\nOptions:");
+ my_print_help(my_long_options);
+}
+
+static
+my_bool
+get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
+ char *argument __attribute__((unused)))
+{
+ switch (optid) {
+ case '?':
+ usage();
+ exit(0);
+ }
+
+ return(FALSE);
+}
+
+static const char *load_default_groups[]=
+ { "xbcloud", 0 };
+
+/*********************************************************************//**
+mask sensitive values on the command line */
+static
+void
+mask_args(int argc, char **argv)
+{
+ int i;
+ for (i = 0; i < argc-1; i++) {
+ int j = 0;
+ if (argv[i]) while (masked_args[j]) {
+ char *p;
+ if ((p = strstr(argv[i], masked_args[j]))) {
+ p += strlen(masked_args[j]);
+ while (*p && *p != '=') {
+ p++;
+ }
+ if (*p == '=') {
+ p++;
+ while (*p) {
+ *p++ = 'x';
+ }
+ }
+ }
+ j++;
+ }
+ }
+}
+
+static
+int parse_args(int argc, char **argv)
+{
+ const char *command;
+
+ if (argc < 2) {
+ fprintf(stderr, "Command isn't specified. "
+ "Supported commands are put and get\n");
+ usage();
+ exit(EXIT_FAILURE);
+ }
+
+ command = argv[1];
+ argc--; argv++;
+
+ if (strcasecmp(command, "put") == 0) {
+ opt_mode = MODE_PUT;
+ } else if (strcasecmp(command, "get") == 0) {
+ opt_mode = MODE_GET;
+ } else if (strcasecmp(command, "delete") == 0) {
+ opt_mode = MODE_DELETE;
+ } else {
+ fprintf(stderr, "Unknown command %s. "
+ "Supported commands are put and get\n", command);
+ usage();
+ exit(EXIT_FAILURE);
+ }
+
+ if (load_defaults("my", load_default_groups, &argc, &argv)) {
+ exit(EXIT_FAILURE);
+ }
+
+ if (handle_options(&argc, &argv, my_long_options, get_one_option)) {
+ exit(EXIT_FAILURE);
+ }
+
+ /* make sure name is specified */
+ if (argc < 1) {
+ fprintf(stderr, "Backup name is required argument\n");
+ exit(EXIT_FAILURE);
+ }
+ opt_name = argv[0];
+ argc--; argv++;
+
+ /* validate arguments */
+ if (opt_storage == SWIFT) {
+ if (opt_swift_user == NULL) {
+ fprintf(stderr, "Swift user is not specified\n");
+ exit(EXIT_FAILURE);
+ }
+ if (opt_swift_container == NULL) {
+ fprintf(stderr,
+ "Swift container is not specified\n");
+ exit(EXIT_FAILURE);
+ }
+ if (opt_swift_auth_url == NULL) {
+ fprintf(stderr, "Swift auth URL is not specified\n");
+ exit(EXIT_FAILURE);
+ }
+ } else {
+ fprintf(stderr, "Swift is only supported storage API\n");
+ }
+
+ if (argc > 0) {
+ file_list = argv;
+ file_list_size = argc;
+ }
+
+ return(0);
+}
+
+static char *hex_md5(const unsigned char *hash, char *out)
+{
+ enum { hash_len = 16 };
+ char *p;
+ int i;
+
+ for (i = 0, p = out; i < hash_len; i++, p+=2) {
+ sprintf(p, "%02x", hash[i]);
+ }
+
+ return out;
+}
+
+/* If header starts with prefix it's value will be copied into output buffer */
+static
+int get_http_header(const char *prefix, const char *buffer,
+ char *out, size_t out_size)
+{
+ const char *beg, *end;
+ size_t len, prefix_len;
+
+ prefix_len = strlen(prefix);
+
+ if (strncasecmp(buffer, prefix, prefix_len) == 0) {
+ beg = buffer + prefix_len;
+ end = strchr(beg, '\r');
+
+ len = min<size_t>(end - beg, out_size - 1);
+
+ strncpy(out, beg, len);
+
+ out[len] = 0;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static
+size_t swift_auth_header_read_cb(char *ptr, size_t size, size_t nmemb,
+ void *data)
+{
+ swift_auth_info *info = (swift_auth_info*)(data);
+
+ get_http_header("X-Storage-Url: ", ptr,
+ info->url, array_elements(info->url));
+ get_http_header("X-Auth-Token: ", ptr,
+ info->token, array_elements(info->token));
+
+ return nmemb * size;
+}
+
+/*********************************************************************//**
+Authenticate against Swift TempAuth. Fills swift_auth_info struct.
+Uses creadentials privided as global variables.
+@returns true if access is granted and token received. */
+static
+bool
+swift_temp_auth(const char *auth_url, swift_auth_info *info)
+{
+ CURL *curl;
+ CURLcode res;
+ long http_code;
+ char *hdr_buf = NULL;
+ struct curl_slist *slist = NULL;
+
+ if (opt_swift_user == NULL) {
+ fprintf(stderr, "Swift user must be specified for TempAuth.\n");
+ return(false);
+ }
+
+ if (opt_swift_key == NULL) {
+ fprintf(stderr, "Swift key must be specified for TempAuth.\n");
+ return(false);
+ }
+
+ curl = curl_easy_init();
+
+ if (curl != NULL) {
+
+ hdr_buf = (char *)(calloc(14 + max(strlen(opt_swift_user),
+ strlen(opt_swift_key)), 1));
+
+ if (!hdr_buf) {
+ res = CURLE_FAILED_INIT;
+ goto cleanup;
+ }
+
+ sprintf(hdr_buf, "X-Auth-User: %s", opt_swift_user);
+ slist = curl_slist_append(slist, hdr_buf);
+
+ sprintf(hdr_buf, "X-Auth-Key: %s", opt_swift_key);
+ slist = curl_slist_append(slist, hdr_buf);
+
+ curl_easy_setopt(curl, CURLOPT_VERBOSE, opt_verbose);
+ curl_easy_setopt(curl, CURLOPT_URL, auth_url);
+ curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
+ curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION,
+ swift_auth_header_read_cb);
+ curl_easy_setopt(curl, CURLOPT_HEADERDATA, info);
+ curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist);
+ if (opt_cacert != NULL)
+ curl_easy_setopt(curl, CURLOPT_CAINFO, opt_cacert);
+ if (opt_insecure)
+ curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, FALSE);
+
+ res = curl_easy_perform(curl);
+
+ if (res != CURLE_OK) {
+ fprintf(stderr, "error: authentication failed: "
+ "curl_easy_perform(): %s\n",
+ curl_easy_strerror(res));
+ goto cleanup;
+ }
+ curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_code);
+ if (http_code != 200 &&
+ http_code != 204) {
+ fprintf(stderr, "error: authentication failed "
+ "with response code: %ld\n", http_code);
+ res = CURLE_LOGIN_DENIED;
+ goto cleanup;
+ }
+ } else {
+ res = CURLE_FAILED_INIT;
+ fprintf(stderr, "error: curl_easy_init() failed\n");
+ goto cleanup;
+ }
+
+cleanup:
+ if (hdr_buf) {
+ free(hdr_buf);
+ }
+ if (slist) {
+ curl_slist_free_all(slist);
+ }
+ if (curl) {
+ curl_easy_cleanup(curl);
+ }
+
+ if (res == CURLE_OK) {
+ /* check that we received token and storage URL */
+ if (*info->url == 0) {
+ fprintf(stderr, "error: malformed response: "
+ "X-Storage-Url is missing\n");
+ return(false);
+ }
+ if (*info->token == 0) {
+ fprintf(stderr, "error: malformed response: "
+ "X-Auth-Token is missing\n");
+ return(false);
+ }
+ return(true);
+ }
+
+ return(false);
+}
+
+static
+size_t
+write_null_cb(char *buffer, size_t size, size_t nmemb, void *stream)
+{
+ return fwrite(buffer, size, nmemb, stderr);
+}
+
+
+static
+size_t
+read_null_cb(char *ptr, size_t size, size_t nmemb, void *data)
+{
+ return 0;
+}
+
+
+static
+int
+swift_create_container(swift_auth_info *info, const char *name)
+{
+ char url[SWIFT_MAX_URL_SIZE];
+ char auth_token[SWIFT_MAX_HDR_SIZE];
+ CURLcode res;
+ long http_code;
+ CURL *curl;
+ struct curl_slist *slist = NULL;
+
+ snprintf(url, array_elements(url), "%s/%s", info->url, name);
+ snprintf(auth_token, array_elements(auth_token), "X-Auth-Token: %s",
+ info->token);
+
+ curl = curl_easy_init();
+
+ if (curl != NULL) {
+ slist = curl_slist_append(slist, auth_token);
+ slist = curl_slist_append(slist, "Content-Length: 0");
+
+ curl_easy_setopt(curl, CURLOPT_VERBOSE, opt_verbose);
+ curl_easy_setopt(curl, CURLOPT_URL, url);
+ curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
+ curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist);
+ curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_null_cb);
+ curl_easy_setopt(curl, CURLOPT_READFUNCTION, read_null_cb);
+ curl_easy_setopt(curl, CURLOPT_INFILESIZE, 0L);
+ curl_easy_setopt(curl, CURLOPT_PUT, 1L);
+ if (opt_cacert != NULL)
+ curl_easy_setopt(curl, CURLOPT_CAINFO, opt_cacert);
+ if (opt_insecure)
+ curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, FALSE);
+
+ res = curl_easy_perform(curl);
+
+ if (res != CURLE_OK) {
+ fprintf(stderr,
+ "error: curl_easy_perform() failed: %s\n",
+ curl_easy_strerror(res));
+ goto cleanup;
+ }
+ curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_code);
+ if (http_code != 201 && /* created */
+ http_code != 202 /* accepted (already exists) */) {
+ fprintf(stderr, "error: request failed "
+ "with response code: %ld\n", http_code);
+ res = CURLE_LOGIN_DENIED;
+ goto cleanup;
+ }
+ } else {
+ res = CURLE_FAILED_INIT;
+ fprintf(stderr, "error: curl_easy_init() failed\n");
+ goto cleanup;
+ }
+
+cleanup:
+ if (slist) {
+ curl_slist_free_all(slist);
+ }
+ if (curl) {
+ curl_easy_cleanup(curl);
+ }
+
+ return res;
+}
+
+
+/*********************************************************************//**
+Delete object with given url.
+@returns true if object deleted successfully. */
+static
+bool
+swift_delete_object(swift_auth_info *info, const char *url)
+{
+ char auth_token[SWIFT_MAX_HDR_SIZE];
+ CURLcode res;
+ long http_code;
+ CURL *curl;
+ struct curl_slist *slist = NULL;
+ bool ret = false;
+
+ snprintf(auth_token, array_elements(auth_token), "X-Auth-Token: %s",
+ info->token);
+
+ curl = curl_easy_init();
+
+ if (curl != NULL) {
+ slist = curl_slist_append(slist, auth_token);
+
+ curl_easy_setopt(curl, CURLOPT_VERBOSE, opt_verbose);
+ curl_easy_setopt(curl, CURLOPT_URL, url);
+ curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
+ curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist);
+ curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "DELETE");
+ if (opt_cacert != NULL)
+ curl_easy_setopt(curl, CURLOPT_CAINFO, opt_cacert);
+ if (opt_insecure)
+ curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, FALSE);
+
+ res = curl_easy_perform(curl);
+
+ if (res != CURLE_OK) {
+ fprintf(stderr,
+ "error: curl_easy_perform() failed: %s\n",
+ curl_easy_strerror(res));
+ goto cleanup;
+ }
+ curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_code);
+ if (http_code != 200 && /* OK */
+ http_code != 204 /* no content */) {
+ fprintf(stderr, "error: request failed "
+ "with response code: %ld\n", http_code);
+ goto cleanup;
+ }
+ ret = true;
+ } else {
+ fprintf(stderr, "error: curl_easy_init() failed\n");
+ goto cleanup;
+ }
+
+cleanup:
+ if (slist) {
+ curl_slist_free_all(slist);
+ }
+ if (curl) {
+ curl_easy_cleanup(curl);
+ }
+
+ return ret;
+}
+
+static int conn_upload_init(connection_info *conn);
+static void conn_buffer_updated(connection_info *conn);
+static connection_info *conn_new(global_io_info *global, ulong global_idx);
+static void conn_cleanup(connection_info *conn);
+static void conn_upload_retry(connection_info *conn);
+
+/* Check for completed transfers, and remove their easy handles */
+static void check_multi_info(global_io_info *g)
+{
+ char *eff_url;
+ CURLMsg *msg;
+ int msgs_left;
+ connection_info *conn;
+ CURL *easy;
+
+ while ((msg = curl_multi_info_read(g->multi, &msgs_left))) {
+ if (msg->msg == CURLMSG_DONE) {
+ easy = msg->easy_handle;
+ curl_easy_getinfo(easy, CURLINFO_PRIVATE, &conn);
+ curl_easy_getinfo(easy, CURLINFO_EFFECTIVE_URL,
+ &eff_url);
+ curl_multi_remove_handle(g->multi, easy);
+ curl_easy_cleanup(easy);
+ conn->easy = NULL;
+ if (conn->chunk_acked) {
+ conn->chunk_uploaded = true;
+ fprintf(stderr, "%s is done\n", conn->hash);
+ } else {
+ fprintf(stderr, "error: chunk %zu '%s' %s "
+ "is not uploaded, but socket closed "
+ "(%zu bytes of %zu left to upload)\n",
+ conn->chunk_no,
+ conn->name,
+ conn->hash,
+ conn->chunk_size - conn->upload_size,
+ conn->chunk_size);
+ conn_upload_retry(conn);
+ }
+ }
+ }
+}
+
+/* Die if we get a bad CURLMcode somewhere */
+static void mcode_or_die(const char *where, CURLMcode code)
+{
+ if (code != CURLM_OK)
+ {
+ const char *s;
+ switch (code)
+ {
+ case CURLM_BAD_HANDLE:
+ s = "CURLM_BAD_HANDLE";
+ break;
+ case CURLM_BAD_EASY_HANDLE:
+ s = "CURLM_BAD_EASY_HANDLE";
+ break;
+ case CURLM_OUT_OF_MEMORY:
+ s = "CURLM_OUT_OF_MEMORY";
+ break;
+ case CURLM_INTERNAL_ERROR:
+ s = "CURLM_INTERNAL_ERROR";
+ break;
+ case CURLM_UNKNOWN_OPTION:
+ s = "CURLM_UNKNOWN_OPTION";
+ break;
+ case CURLM_LAST:
+ s = "CURLM_LAST";
+ break;
+ default:
+ s = "CURLM_unknown";
+ break;
+ case CURLM_BAD_SOCKET:
+ s = "CURLM_BAD_SOCKET";
+ fprintf(stderr, "error: %s returns (%d) %s\n",
+ where, code, s);
+ /* ignore this error */
+ return;
+ }
+ fprintf(stderr, "error: %s returns (%d) %s\n",
+ where, code, s);
+ assert(0);
+ }
+}
+
+/* Called by libev when we get action on a multi socket */
+static void event_cb(EV_P_ struct ev_io *w, int revents)
+{
+ global_io_info *global = (global_io_info*)(w->data);
+ CURLMcode rc;
+
+#if !(OLD_CURL_MULTI)
+ int action = (revents & EV_READ ? CURL_POLL_IN : 0) |
+ (revents & EV_WRITE ? CURL_POLL_OUT : 0);
+
+ do {
+ rc = curl_multi_socket_action(global->multi, w->fd, action,
+ &global->still_running);
+ } while (rc == CURLM_CALL_MULTI_PERFORM);
+#else
+ do {
+ rc = curl_multi_socket(global->multi, w->fd,
+ &global->still_running);
+ } while (rc == CURLM_CALL_MULTI_PERFORM);
+#endif
+ mcode_or_die("error: event_cb: curl_multi_socket_action", rc);
+ check_multi_info(global);
+ if (global->still_running <= 0) {
+ ev_timer_stop(global->loop, &global->timer_event);
+ }
+}
+
+static void remsock(curl_socket_t s, socket_info *fdp, global_io_info *global)
+{
+ if (fdp) {
+ if (fdp->evset) {
+ ev_io_stop(global->loop, &fdp->ev);
+ }
+ free(fdp);
+ }
+}
+
+static void setsock(socket_info *fdp, curl_socket_t s, CURL *easy, int action,
+ global_io_info *global)
+{
+ int kind = (action & CURL_POLL_IN ? (int)(EV_READ) : 0) |
+ (action & CURL_POLL_OUT ? (int)(EV_WRITE) : 0);
+
+ fdp->sockfd = s;
+ fdp->action = action;
+ fdp->easy = easy;
+ if (fdp->evset)
+ ev_io_stop(global->loop, &fdp->ev);
+ ev_io_init(&fdp->ev, event_cb, fdp->sockfd, kind);
+ fdp->ev.data = global;
+ fdp->evset = 1;
+ ev_io_start(global->loop, &fdp->ev);
+}
+
+static void addsock(curl_socket_t s, CURL *easy, int action,
+ global_io_info *global)
+{
+ socket_info *fdp = (socket_info *)(calloc(sizeof(socket_info), 1));
+
+ fdp->global = global;
+ setsock(fdp, s, easy, action, global);
+ curl_multi_assign(global->multi, s, fdp);
+}
+
+static int sock_cb(CURL *easy, curl_socket_t s, int what, void *cbp,
+ void *sockp)
+{
+ global_io_info *global = (global_io_info*)(cbp);
+ socket_info *fdp = (socket_info*)(sockp);
+
+ if (what == CURL_POLL_REMOVE) {
+ remsock(s, fdp, global);
+ } else {
+ if (!fdp) {
+ addsock(s, easy, what, global);
+ } else {
+ setsock(fdp, s, easy, what, global);
+ }
+ }
+ return 0;
+}
+
+/* Called by libev when our timeout expires */
+static void timer_cb(EV_P_ struct ev_timer *w, int revents)
+{
+ global_io_info *io_global = (global_io_info*)(w->data);
+ CURLMcode rc;
+
+#if !(OLD_CURL_MULTI)
+ do {
+ rc = curl_multi_socket_action(io_global->multi,
+ CURL_SOCKET_TIMEOUT, 0,
+ &io_global->still_running);
+ } while (rc == CURLM_CALL_MULTI_PERFORM);
+#else
+ do {
+ rc = curl_multi_socket_all(io_global->multi,
+ &io_global->still_running);
+ } while (rc == CURLM_CALL_MULTI_PERFORM);
+#endif
+ mcode_or_die("timer_cb: curl_multi_socket_action", rc);
+ check_multi_info(io_global);
+}
+
+static connection_info *get_current_connection(global_io_info *global)
+{
+ connection_info *conn = global->current_connection;
+ ulong i;
+
+ if (conn && conn->filled_size < conn->chunk_size)
+ return conn;
+
+ for (i = 0; i < opt_parallel; i++) {
+ conn = global->connections[i];
+ if (conn->chunk_uploaded || conn->filled_size == 0) {
+ global->current_connection = conn;
+ conn_upload_init(conn);
+ return conn;
+ }
+ }
+
+ return NULL;
+}
+
+/* This gets called whenever data is received from the input */
+static void input_cb(EV_P_ struct ev_io *w, int revents)
+{
+ global_io_info *io_global = (global_io_info *)(w->data);
+ connection_info *conn = get_current_connection(io_global);
+
+ if (conn == NULL)
+ return;
+
+ if (conn->filled_size < conn->chunk_size) {
+ if (revents & EV_READ) {
+ ssize_t nbytes = read(io_global->input_fd,
+ conn->buffer + conn->filled_size,
+ conn->chunk_size -
+ conn->filled_size);
+ if (nbytes > 0) {
+ conn->filled_size += nbytes;
+ conn_buffer_updated(conn);
+ } else if (nbytes < 0) {
+ if (errno != EAGAIN && errno != EINTR) {
+ char error[200];
+ my_strerror(error, sizeof(error),
+ errno);
+ fprintf(stderr, "error: failed to read "
+ "input stream (%s)\n", error);
+ /* failed to read input */
+ exit(1);
+ }
+ } else {
+ io_global->eof = 1;
+ ev_io_stop(io_global->loop, w);
+ }
+ }
+ }
+
+ assert(conn->filled_size <= conn->chunk_size);
+}
+
+static int swift_upload_read_cb(char *ptr, size_t size, size_t nmemb,
+ void *data)
+{
+ size_t realsize;
+
+ connection_info *conn = (connection_info*)(data);
+
+ if (conn->filled_size == conn->upload_size &&
+ conn->upload_size < conn->chunk_size && !conn->global->eof) {
+ ssize_t nbytes;
+ assert(conn->global->current_connection == conn);
+ do {
+ nbytes = read(conn->global->input_fd,
+ conn->buffer + conn->filled_size,
+ conn->chunk_size - conn->filled_size);
+ } while (nbytes == -1 && errno == EAGAIN);
+ if (nbytes > 0) {
+ conn->filled_size += nbytes;
+ conn_buffer_updated(conn);
+ } else {
+ conn->global->eof = 1;
+ }
+ }
+
+ realsize = min(size * nmemb, conn->filled_size - conn->upload_size);
+
+ memcpy(ptr, conn->buffer + conn->upload_size, realsize);
+ conn->upload_size += realsize;
+
+ assert(conn->filled_size <= conn->chunk_size);
+ assert(conn->upload_size <= conn->filled_size);
+
+ return realsize;
+}
+
+static
+size_t upload_header_read_cb(char *ptr, size_t size, size_t nmemb,
+ void *data)
+{
+ connection_info *conn = (connection_info *)(data);
+ char etag[33];
+
+ if (get_http_header("Etag: ", ptr, etag, array_elements(etag))) {
+ if (strcmp(conn->hash, etag) != 0) {
+ fprintf(stderr, "error: ETag mismatch\n");
+ exit(EXIT_FAILURE);
+ }
+ fprintf(stderr, "acked chunk %s\n", etag);
+ conn->chunk_acked = true;
+ }
+
+ return nmemb * size;
+}
+
+static int conn_upload_init(connection_info *conn)
+{
+ conn->filled_size = 0;
+ conn->upload_size = 0;
+ conn->chunk_uploaded = false;
+ conn->chunk_acked = false;
+ conn->chunk_size = CHUNK_HEADER_CONSTANT_LEN;
+ conn->magic_verified = false;
+ conn->chunk_path_len = 0;
+ conn->chunk_type = XB_CHUNK_TYPE_UNKNOWN;
+ conn->payload_size = 0;
+ conn->upload_started = false;
+ conn->retry_count = 0;
+ if (conn->name != NULL) {
+ conn->name[0] = 0;
+ }
+
+ if (conn->easy != NULL) {
+ conn->easy = 0;
+ }
+
+ if (conn->slist != NULL) {
+ curl_slist_free_all(conn->slist);
+ conn->slist = NULL;
+ }
+
+ return 0;
+}
+
+static void conn_upload_prepare(connection_info *conn)
+{
+ gcry_md_hd_t md5;
+
+ gcry_md_open(&md5, GCRY_MD_MD5, 0);
+ gcry_md_write(md5, conn->buffer, conn->chunk_size);
+ hex_md5(gcry_md_read(md5, GCRY_MD_MD5), conn->hash);
+ gcry_md_close(md5);
+}
+
+static int conn_upload_start(connection_info *conn)
+{
+ char token_header[SWIFT_MAX_HDR_SIZE];
+ char object_url[SWIFT_MAX_URL_SIZE];
+ char content_len[200], etag[200];
+ global_io_info *global;
+ CURLMcode rc;
+
+ global = conn->global;
+
+ fprintf(stderr, "uploading chunk %s/%s/%s.%020zu "
+ "(md5: %s, size: %zu)\n",
+ global->container, global->backup_name, conn->name,
+ conn->chunk_no, conn->hash, conn->chunk_size);
+
+ snprintf(object_url, array_elements(object_url), "%s/%s/%s/%s.%020zu",
+ global->url, global->container, global->backup_name,
+ conn->name, conn->chunk_no);
+
+ snprintf(content_len, sizeof(content_len), "Content-Length: %lu",
+ (ulong)(conn->chunk_size));
+
+ snprintf(etag, sizeof(etag), "ETag: %s", conn->hash);
+
+ snprintf(token_header, array_elements(token_header),
+ "X-Auth-Token: %s", global->token);
+
+ conn->slist = curl_slist_append(conn->slist, token_header);
+ conn->slist = curl_slist_append(conn->slist,
+ "Connection: keep-alive");
+ conn->slist = curl_slist_append(conn->slist,
+ "Content-Type: "
+ "application/octet-stream");
+ conn->slist = curl_slist_append(conn->slist, content_len);
+ conn->slist = curl_slist_append(conn->slist, etag);
+
+ conn->easy = curl_easy_init();
+ if (!conn->easy) {
+ fprintf(stderr, "error: curl_easy_init() failed\n");
+ return 1;
+ }
+ curl_easy_setopt(conn->easy, CURLOPT_URL, object_url);
+ curl_easy_setopt(conn->easy, CURLOPT_READFUNCTION,
+ swift_upload_read_cb);
+ curl_easy_setopt(conn->easy, CURLOPT_READDATA, conn);
+ curl_easy_setopt(conn->easy, CURLOPT_VERBOSE, opt_verbose);
+ curl_easy_setopt(conn->easy, CURLOPT_ERRORBUFFER, conn->error);
+ curl_easy_setopt(conn->easy, CURLOPT_PRIVATE, conn);
+ curl_easy_setopt(conn->easy, CURLOPT_NOPROGRESS, 1L);
+ curl_easy_setopt(conn->easy, CURLOPT_LOW_SPEED_TIME, 5L);
+ curl_easy_setopt(conn->easy, CURLOPT_LOW_SPEED_LIMIT, 1024L);
+ curl_easy_setopt(conn->easy, CURLOPT_PUT, 1L);
+ curl_easy_setopt(conn->easy, CURLOPT_HTTPHEADER, conn->slist);
+ curl_easy_setopt(conn->easy, CURLOPT_HEADERFUNCTION,
+ upload_header_read_cb);
+ curl_easy_setopt(conn->easy, CURLOPT_HEADERDATA, conn);
+ curl_easy_setopt(conn->easy, CURLOPT_INFILESIZE,
+ (long) conn->chunk_size);
+ if (opt_cacert != NULL)
+ curl_easy_setopt(conn->easy, CURLOPT_CAINFO, opt_cacert);
+ if (opt_insecure)
+ curl_easy_setopt(conn->easy, CURLOPT_SSL_VERIFYPEER, FALSE);
+
+ rc = curl_multi_add_handle(conn->global->multi, conn->easy);
+ mcode_or_die("conn_upload_init: curl_multi_add_handle", rc);
+
+#if (OLD_CURL_MULTI)
+ do {
+ rc = curl_multi_socket_all(global->multi,
+ &global->still_running);
+ } while(rc == CURLM_CALL_MULTI_PERFORM);
+#endif
+
+ conn->upload_started = true;
+
+ return 0;
+}
+
+static void conn_cleanup(connection_info *conn)
+{
+ if (conn) {
+ free(conn->name);
+ free(conn->buffer);
+ if (conn->slist) {
+ curl_slist_free_all(conn->slist);
+ conn->slist = NULL;
+ }
+ if (conn->easy) {
+ curl_easy_cleanup(conn->easy);
+ conn->easy = NULL;
+ }
+ }
+ free(conn);
+}
+
+static void conn_upload_retry(connection_info *conn)
+{
+ /* already closed by cURL */
+ conn->easy = NULL;
+
+ if (conn->slist != NULL) {
+ curl_slist_free_all(conn->slist);
+ conn->slist = NULL;
+ }
+
+ if (conn->retry_count++ > 3) {
+ fprintf(stderr, "error: retry count limit reached\n");
+ exit(EXIT_FAILURE);
+ }
+
+ fprintf(stderr, "warning: retrying to upload chunk %zu of '%s'\n",
+ conn->chunk_no, conn->name);
+
+ conn->upload_size = 0;
+
+ conn_upload_start(conn);
+}
+
+static connection_info *conn_new(global_io_info *global, ulong global_idx)
+{
+ connection_info *conn;
+
+ conn = (connection_info *)(calloc(1, sizeof(connection_info)));
+ if (conn == NULL) {
+ goto error;
+ }
+
+ conn->global = global;
+ conn->global_idx = global_idx;
+ conn->buffer_size = SWIFT_CHUNK_SIZE;
+ if ((conn->buffer = (char *)(calloc(conn->buffer_size, 1))) ==
+ NULL) {
+ goto error;
+ }
+
+ return conn;
+
+error:
+ if (conn != NULL) {
+ conn_cleanup(conn);
+ }
+
+ fprintf(stderr, "error: out of memory\n");
+ exit(EXIT_FAILURE);
+
+ return NULL;
+}
+
+/*********************************************************************//**
+Handle input buffer updates. Parse chunk header and set appropriate
+buffer size. */
+static
+void
+conn_buffer_updated(connection_info *conn)
+{
+ bool ready_for_upload = false;
+
+ /* chunk header */
+ if (!conn->magic_verified &&
+ conn->filled_size >= CHUNK_HEADER_CONSTANT_LEN) {
+ if (strncmp(XB_STREAM_CHUNK_MAGIC, conn->buffer,
+ sizeof(XB_STREAM_CHUNK_MAGIC) - 1) != 0) {
+
+ fprintf(stderr, "Error: magic expected\n");
+ exit(EXIT_FAILURE);
+ }
+ conn->magic_verified = true;
+ conn->chunk_path_len = uint4korr(conn->buffer
+ + PATH_LENGTH_OFFSET);
+ conn->chunk_type = (xb_chunk_type_t)
+ (conn->buffer[CHUNK_TYPE_OFFSET]);
+ conn->chunk_size = CHUNK_HEADER_CONSTANT_LEN +
+ conn->chunk_path_len;
+ if (conn->chunk_type != XB_CHUNK_TYPE_EOF) {
+ conn->chunk_size += 16;
+ }
+ }
+
+ /* ordinary chunk */
+ if (conn->magic_verified &&
+ conn->payload_size == 0 &&
+ conn->chunk_type != XB_CHUNK_TYPE_EOF &&
+ conn->filled_size >= CHUNK_HEADER_CONSTANT_LEN
+ + conn->chunk_path_len + 16) {
+
+ conn->payload_size = uint8korr(conn->buffer +
+ CHUNK_HEADER_CONSTANT_LEN +
+ conn->chunk_path_len);
+
+ conn->chunk_size = conn->payload_size + 4 + 16 +
+ conn->chunk_path_len +
+ CHUNK_HEADER_CONSTANT_LEN;
+
+ if (conn->name == NULL) {
+ conn->name = (char*)(malloc(conn->chunk_path_len + 1));
+ } else if (conn->name_len < conn->chunk_path_len + 1) {
+ conn->name = (char*)(realloc(conn->name,
+ conn->chunk_path_len + 1));
+ }
+ conn->name_len = conn->chunk_path_len + 1;
+
+ memcpy(conn->name, conn->buffer + CHUNK_HEADER_CONSTANT_LEN,
+ conn->chunk_path_len);
+ conn->name[conn->chunk_path_len] = 0;
+
+ if (conn->buffer_size < conn->chunk_size) {
+ conn->buffer =
+ (char *)(realloc(conn->buffer, conn->chunk_size));
+ conn->buffer_size = conn->chunk_size;
+ }
+ }
+
+ /* EOF chunk has no payload */
+ if (conn->magic_verified &&
+ conn->chunk_type == XB_CHUNK_TYPE_EOF &&
+ conn->filled_size >= CHUNK_HEADER_CONSTANT_LEN
+ + conn->chunk_path_len) {
+
+ if (conn->name == NULL) {
+ conn->name = (char*)(malloc(conn->chunk_path_len + 1));
+ } else if (conn->name_len < conn->chunk_path_len + 1) {
+ conn->name = (char*)(realloc(conn->name,
+ conn->chunk_path_len + 1));
+ }
+ conn->name_len = conn->chunk_path_len + 1;
+
+ memcpy(conn->name, conn->buffer + CHUNK_HEADER_CONSTANT_LEN,
+ conn->chunk_path_len);
+ conn->name[conn->chunk_path_len] = 0;
+ }
+
+ if (conn->filled_size > 0 && conn->filled_size == conn->chunk_size) {
+ ready_for_upload = true;
+ }
+
+ /* start upload once recieved the size of the chunk */
+ if (!conn->upload_started && ready_for_upload) {
+ conn->chunk_no = file_chunk_count[conn->name]++;
+ conn_upload_prepare(conn);
+ conn_upload_start(conn);
+ }
+}
+
+static int init_input(global_io_info *io_global)
+{
+ ev_io_init(&io_global->input_event, input_cb, STDIN_FILENO, EV_READ);
+ io_global->input_event.data = io_global;
+ ev_io_start(io_global->loop, &io_global->input_event);
+
+ return 0;
+}
+
+/* Update the event timer after curl_multi library calls */
+static int multi_timer_cb(CURLM *multi, long timeout_ms, global_io_info *global)
+{
+ ev_timer_stop(global->loop, &global->timer_event);
+ if (timeout_ms > 0) {
+ double t = timeout_ms / 1000.0;
+ ev_timer_init(&global->timer_event, timer_cb, t, 0.);
+ ev_timer_start(global->loop, &global->timer_event);
+ } else {
+ timer_cb(global->loop, &global->timer_event, 0);
+ }
+ return 0;
+}
+
+static
+int swift_upload_parts(swift_auth_info *auth, const char *container,
+ const char *name)
+{
+ global_io_info io_global;
+ ulong i;
+#if (OLD_CURL_MULTI)
+ long timeout;
+#endif
+ CURLMcode rc;
+ int n_dirty_buffers;
+
+ memset(&io_global, 0, sizeof(io_global));
+
+ io_global.loop = ev_default_loop(0);
+ init_input(&io_global);
+ io_global.multi = curl_multi_init();
+ ev_timer_init(&io_global.timer_event, timer_cb, 0., 0.);
+ io_global.timer_event.data = &io_global;
+ io_global.connections = (connection_info **)
+ (calloc(opt_parallel, sizeof(connection_info)));
+ io_global.url = auth->url;
+ io_global.container = container;
+ io_global.backup_name = name;
+ io_global.token = auth->token;
+ for (i = 0; i < opt_parallel; i++) {
+ io_global.connections[i] = conn_new(&io_global, i);
+ }
+
+ /* setup the generic multi interface options we want */
+ curl_multi_setopt(io_global.multi, CURLMOPT_SOCKETFUNCTION, sock_cb);
+ curl_multi_setopt(io_global.multi, CURLMOPT_SOCKETDATA, &io_global);
+#if !(OLD_CURL_MULTI)
+ curl_multi_setopt(io_global.multi, CURLMOPT_TIMERFUNCTION, multi_timer_cb);
+ curl_multi_setopt(io_global.multi, CURLMOPT_TIMERDATA, &io_global);
+ do {
+ rc = curl_multi_socket_action(io_global.multi,
+ CURL_SOCKET_TIMEOUT, 0,
+ &io_global.still_running);
+ } while (rc == CURLM_CALL_MULTI_PERFORM);
+#else
+ curl_multi_timeout(io_global.multi, &timeout);
+ if (timeout >= 0) {
+ multi_timer_cb(io_global.multi, timeout, &io_global);
+ }
+ do {
+ rc = curl_multi_socket_all(io_global.multi, &io_global.still_running);
+ } while(rc == CURLM_CALL_MULTI_PERFORM);
+#endif
+
+ ev_loop(io_global.loop, 0);
+ check_multi_info(&io_global);
+ curl_multi_cleanup(io_global.multi);
+
+ n_dirty_buffers = 0;
+ for (i = 0; i < opt_parallel; i++) {
+ connection_info *conn = io_global.connections[i];
+ if (conn && conn->upload_size != conn->filled_size) {
+ fprintf(stderr, "error: upload failed: %lu bytes left "
+ "in the buffer %s (uploaded = %d)\n",
+ (ulong)(conn->filled_size - conn->upload_size),
+ conn->name, conn->chunk_uploaded);
+ ++n_dirty_buffers;
+ }
+ }
+
+ for (i = 0; i < opt_parallel; i++) {
+ if (io_global.connections[i] != NULL) {
+ conn_cleanup(io_global.connections[i]);
+ }
+ }
+ free(io_global.connections);
+
+ if (n_dirty_buffers > 0) {
+ return(EXIT_FAILURE);
+ }
+
+ return 0;
+}
+
+struct download_buffer_info {
+ off_t offset;
+ size_t size;
+ size_t result_len;
+ char *buf;
+ curl_read_callback custom_header_callback;
+ void *custom_header_callback_data;
+};
+
+/*********************************************************************//**
+Callback to parse header of GET request on swift contaier. */
+static
+size_t fetch_buffer_header_cb(char *ptr, size_t size, size_t nmemb,
+ void *data)
+{
+ download_buffer_info *buffer_info = (download_buffer_info*)(data);
+ size_t buf_size;
+ char content_length_str[100];
+ char *endptr;
+
+ if (get_http_header("Content-Length: ", ptr,
+ content_length_str, sizeof(content_length_str))) {
+
+ buf_size = strtoull(content_length_str, &endptr, 10);
+
+ if (buffer_info->buf == NULL) {
+ buffer_info->buf = (char*)(malloc(buf_size));
+ buffer_info->size = buf_size;
+ }
+
+ if (buf_size > buffer_info->size) {
+ buffer_info->buf = (char*)
+ (realloc(buffer_info->buf, buf_size));
+ buffer_info->size = buf_size;
+ }
+
+ buffer_info->result_len = buf_size;
+ }
+
+ if (buffer_info->custom_header_callback) {
+ buffer_info->custom_header_callback(ptr, size, nmemb,
+ buffer_info->custom_header_callback_data);
+ }
+
+ return nmemb * size;
+}
+
+/*********************************************************************//**
+Write contents into string buffer */
+static
+size_t
+fetch_buffer_cb(char *buffer, size_t size, size_t nmemb, void *out_buffer)
+{
+ download_buffer_info *buffer_info = (download_buffer_info*)(out_buffer);
+
+ assert(buffer_info->size >= buffer_info->offset + size * nmemb);
+
+ memcpy(buffer_info->buf + buffer_info->offset, buffer, size * nmemb);
+ buffer_info->offset += size * nmemb;
+
+ return size * nmemb;
+}
+
+
+/*********************************************************************//**
+Downloads contents of URL into buffer. Caller is responsible for
+deallocating the buffer.
+@return pointer to a buffer or NULL */
+static
+char *
+swift_fetch_into_buffer(swift_auth_info *auth, const char *url,
+ char **buf, size_t *buf_size, size_t *result_len,
+ curl_read_callback header_callback,
+ void *header_callback_data)
+{
+ char auth_token[SWIFT_MAX_HDR_SIZE];
+ download_buffer_info buffer_info;
+ struct curl_slist *slist = NULL;
+ long http_code;
+ CURL *curl;
+ CURLcode res;
+
+ memset(&buffer_info, 0, sizeof(buffer_info));
+ buffer_info.buf = *buf;
+ buffer_info.size = *buf_size;
+ buffer_info.custom_header_callback = header_callback;
+ buffer_info.custom_header_callback_data = header_callback_data;
+
+ snprintf(auth_token, array_elements(auth_token), "X-Auth-Token: %s",
+ auth->token);
+
+ curl = curl_easy_init();
+
+ if (curl != NULL) {
+ slist = curl_slist_append(slist, auth_token);
+
+ curl_easy_setopt(curl, CURLOPT_VERBOSE, opt_verbose);
+ curl_easy_setopt(curl, CURLOPT_URL, url);
+ curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
+ curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist);
+ curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, fetch_buffer_cb);
+ curl_easy_setopt(curl, CURLOPT_WRITEDATA, &buffer_info);
+ curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION,
+ fetch_buffer_header_cb);
+ curl_easy_setopt(curl, CURLOPT_HEADERDATA,
+ &buffer_info);
+ if (opt_cacert != NULL)
+ curl_easy_setopt(curl, CURLOPT_CAINFO, opt_cacert);
+ if (opt_insecure)
+ curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, FALSE);
+
+ res = curl_easy_perform(curl);
+
+ if (res != CURLE_OK) {
+ fprintf(stderr,
+ "error: curl_easy_perform() failed: %s\n",
+ curl_easy_strerror(res));
+ goto cleanup;
+ }
+ curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_code);
+ if (http_code < 200 || http_code >= 300) {
+ fprintf(stderr, "error: request failed "
+ "with response code: %ld\n", http_code);
+ res = CURLE_LOGIN_DENIED;
+ goto cleanup;
+ }
+ } else {
+ res = CURLE_FAILED_INIT;
+ fprintf(stderr, "error: curl_easy_init() failed\n");
+ goto cleanup;
+ }
+
+cleanup:
+ if (slist) {
+ curl_slist_free_all(slist);
+ }
+ if (curl) {
+ curl_easy_cleanup(curl);
+ }
+
+ if (res == CURLE_OK) {
+ *buf = buffer_info.buf;
+ *buf_size = buffer_info.size;
+ *result_len = buffer_info.result_len;
+ return(buffer_info.buf);
+ }
+
+ free(buffer_info.buf);
+ *buf = NULL;
+ *buf_size = 0;
+ *result_len = 0;
+
+ return(NULL);
+}
+
+static
+container_list *
+container_list_new()
+{
+ container_list *list =
+ (container_list *)(calloc(1, sizeof(container_list)));
+
+ list->object_count = 1000;
+ list->objects = (object_info*)
+ (calloc(list->object_count, sizeof(object_info)));
+
+ if (list->objects == NULL) {
+ fprintf(stderr, "error: out of memory\n");
+ free(list);
+ return(NULL);
+ }
+
+ return(list);
+}
+
+static
+void
+container_list_free(container_list *list)
+{
+ free(list->content_json);
+ free(list->objects);
+ free(list);
+}
+
+static
+void
+container_list_add_object(container_list *list, const char *name,
+ const char *hash, size_t bytes)
+{
+ const size_t object_count_step = 1000;
+
+ if (list->idx >= list->object_count) {
+ list->objects = (object_info*)
+ realloc(list->objects,
+ (list->object_count + object_count_step) *
+ sizeof(object_info));
+ memset(list->objects + list->object_count, 0,
+ object_count_step * sizeof(object_info));
+ list->object_count += object_count_step;
+ }
+ assert(list->idx <= list->object_count);
+ strcpy(list->objects[list->idx].name, name);
+ strcpy(list->objects[list->idx].hash, hash);
+ list->objects[list->idx].bytes = bytes;
+ ++list->idx;
+}
+
+
+/*********************************************************************//**
+Tokenize json string. Return array of tokens. Caller is responsoble for
+deallocating the array. */
+jsmntok_t *
+json_tokenise(char *json, size_t len, int initial_tokens)
+{
+ jsmn_parser parser;
+ jsmn_init(&parser);
+
+ unsigned int n = initial_tokens;
+ jsmntok_t *tokens = (jsmntok_t *)(malloc(sizeof(jsmntok_t) * n));
+
+ int ret = jsmn_parse(&parser, json, len, tokens, n);
+
+ while (ret == JSMN_ERROR_NOMEM)
+ {
+ n = n * 2 + 1;
+ tokens = (jsmntok_t*)(realloc(tokens, sizeof(jsmntok_t) * n));
+ ret = jsmn_parse(&parser, json, len, tokens, n);
+ }
+
+ if (ret == JSMN_ERROR_INVAL) {
+ fprintf(stderr, "error: invalid JSON string\n");
+
+ }
+ if (ret == JSMN_ERROR_PART) {
+ fprintf(stderr, "error: truncated JSON string\n");
+ }
+
+ return tokens;
+}
+
+/*********************************************************************//**
+Return true if token representation equal to given string. */
+static
+bool
+json_token_eq(const char *buf, jsmntok_t *t, const char *s)
+{
+ size_t len = strlen(s);
+
+ assert(t->end > t->start);
+
+ return((size_t)(t->end - t->start) == len &&
+ (strncmp(buf + t->start, s, len) == 0));
+}
+
+/*********************************************************************//**
+Copy given token as string. */
+static
+bool
+json_token_str(const char *buf, jsmntok_t *t, char *out, int out_size)
+{
+ size_t len = min(t->end - t->start, out_size - 1);
+
+ memcpy(out, buf + t->start, len);
+ out[len] = 0;
+
+ return(true);
+}
+
+/*********************************************************************//**
+Parse SWIFT container list response and fill output array with values
+sorted by object name. */
+static
+bool
+swift_parse_container_list(container_list *list)
+{
+ enum {MAX_DEPTH=20};
+ enum label_t {NONE, OBJECT};
+
+ char name[SWIFT_MAX_URL_SIZE];
+ char hash[33];
+ char bytes[30];
+ char *response = list->content_json;
+
+ struct stack_t {
+ jsmntok_t *t;
+ int n_items;
+ label_t label;
+ };
+
+ stack_t stack[MAX_DEPTH];
+ jsmntok_t *tokens;
+ int level;
+ size_t count = 0;
+
+ tokens = json_tokenise(list->content_json, list->content_length, 200);
+
+ stack[0].t = &tokens[0];
+ stack[0].label = NONE;
+ stack[0].n_items = 1;
+ level = 0;
+
+ for (size_t i = 0, j = 1; j > 0; i++, j--) {
+ jsmntok_t *t = &tokens[i];
+
+ assert(t->start != -1 && t->end != -1);
+ assert(level >= 0);
+
+ --stack[level].n_items;
+
+ switch (t->type) {
+ case JSMN_ARRAY:
+ case JSMN_OBJECT:
+ if (level < MAX_DEPTH - 1) {
+ level++;
+ }
+ stack[level].t = t;
+ stack[level].label = NONE;
+ if (t->type == JSMN_ARRAY) {
+ stack[level].n_items = t->size;
+ j += t->size;
+ } else {
+ stack[level].n_items = t->size * 2;
+ j += t->size * 2;
+ }
+ break;
+ case JSMN_PRIMITIVE:
+ case JSMN_STRING:
+ if (stack[level].t->type == JSMN_OBJECT &&
+ stack[level].n_items % 2 == 1) {
+ /* key */
+ if (json_token_eq(response, t, "name")) {
+ json_token_str(response, &tokens[i + 1],
+ name, sizeof(name));
+ }
+ if (json_token_eq(response, t, "hash")) {
+ json_token_str(response, &tokens[i + 1],
+ hash, sizeof(hash));
+ }
+ if (json_token_eq(response, t, "bytes")) {
+ json_token_str(response, &tokens[i + 1],
+ bytes, sizeof(bytes));
+ }
+ }
+ break;
+ }
+
+ while (stack[level].n_items == 0 && level > 0) {
+ if (stack[level].t->type == JSMN_OBJECT
+ && level == 2) {
+ char *endptr;
+ container_list_add_object(list, name, hash,
+ strtoull(bytes, &endptr, 10));
+ ++count;
+ }
+ --level;
+ }
+ }
+
+ if (count == 0) {
+ list->final = true;
+ }
+
+ free(tokens);
+
+ return(true);
+}
+
+/*********************************************************************//**
+List swift container with given name. Return list of objects sorted by
+object name. */
+static
+container_list *
+swift_list(swift_auth_info *auth, const char *container, const char *path)
+{
+ container_list *list;
+ char url[SWIFT_MAX_URL_SIZE];
+
+ list = container_list_new();
+
+ while (!list->final) {
+
+ /* download the list in json format */
+ snprintf(url, array_elements(url),
+ "%s/%s?format=json&limit=1000%s%s%s%s",
+ auth->url, container, path ? "&prefix=" : "",
+ path ? path : "", list->idx > 0 ? "&marker=" : "",
+ list->idx > 0 ?
+ list->objects[list->idx - 1].name : "");
+
+ list->content_json = swift_fetch_into_buffer(auth, url,
+ &list->content_json, &list->content_bufsize,
+ &list->content_length, NULL, NULL);
+
+ if (list->content_json == NULL) {
+ container_list_free(list);
+ return(NULL);
+ }
+
+ /* parse downloaded list */
+ if (!swift_parse_container_list(list)) {
+ fprintf(stderr, "error: unable to parse "
+ "container list\n");
+ container_list_free(list);
+ return(NULL);
+ }
+ }
+
+ return(list);
+}
+
+
+/*********************************************************************//**
+Return true if chunk is a part of backup with given name. */
+static
+bool
+chunk_belongs_to(const char *chunk_name, const char *backup_name)
+{
+ size_t backup_name_len = strlen(backup_name);
+
+ return((strlen(chunk_name) > backup_name_len)
+ && (chunk_name[backup_name_len] == '/')
+ && strncmp(chunk_name, backup_name, backup_name_len) == 0);
+}
+
+/*********************************************************************//**
+Return true if chunk is in given list. */
+static
+bool
+chunk_in_list(const char *chunk_name, char **list, int list_size)
+{
+ size_t chunk_name_len;
+
+ if (list_size == 0) {
+ return(true);
+ }
+
+ chunk_name_len = strlen(chunk_name);
+ if (chunk_name_len < 20) {
+ return(false);
+ }
+
+ for (int i = 0; i < list_size; i++) {
+ size_t item_len = strlen(list[i]);
+
+ if ((strncmp(chunk_name - item_len + chunk_name_len - 21,
+ list[i], item_len) == 0)
+ && (chunk_name[chunk_name_len - 21] == '.')
+ && (chunk_name[chunk_name_len - item_len - 22] == '/')) {
+ return(true);
+ }
+ }
+
+ return(false);
+}
+
+static
+int swift_download(swift_auth_info *auth, const char *container,
+ const char *name)
+{
+ container_list *list;
+ char *buf = NULL;
+ size_t buf_size = 0;
+ size_t result_len = 0;
+
+ if ((list = swift_list(auth, container, name)) == NULL) {
+ return(CURLE_FAILED_INIT);
+ }
+
+ for (size_t i = 0; i < list->idx; i++) {
+ const char *chunk_name = list->objects[i].name;
+
+ if (chunk_belongs_to(chunk_name, name)
+ && chunk_in_list(chunk_name, file_list, file_list_size)) {
+ char url[SWIFT_MAX_URL_SIZE];
+
+ snprintf(url, sizeof(url), "%s/%s/%s",
+ auth->url, container, chunk_name);
+
+ if ((buf = swift_fetch_into_buffer(
+ auth, url, &buf, &buf_size, &result_len,
+ NULL, NULL)) == NULL) {
+ fprintf(stderr, "error: failed to download "
+ "chunk %s\n", chunk_name);
+ container_list_free(list);
+ return(CURLE_FAILED_INIT);
+ }
+
+ fwrite(buf, 1, result_len, stdout);
+ }
+ }
+
+ free(buf);
+
+ container_list_free(list);
+
+ return(CURLE_OK);
+}
+
+
+/*********************************************************************//**
+Delete backup with given name from given container.
+@return true if backup deleted successfully */
+static
+bool swift_delete(swift_auth_info *auth, const char *container,
+ const char *name)
+{
+ container_list *list;
+
+ if ((list = swift_list(auth, container, name)) == NULL) {
+ return(CURLE_FAILED_INIT);
+ }
+
+ for (size_t i = 0; i < list->object_count; i++) {
+ const char *chunk_name = list->objects[i].name;
+
+ if (chunk_belongs_to(chunk_name, name)) {
+ char url[SWIFT_MAX_URL_SIZE];
+
+ snprintf(url, sizeof(url), "%s/%s/%s",
+ auth->url, container, chunk_name);
+
+ fprintf(stderr, "delete %s\n", chunk_name);
+ if (!swift_delete_object(auth, url)) {
+ fprintf(stderr, "error: failed to delete "
+ "chunk %s\n", chunk_name);
+ container_list_free(list);
+ return(CURLE_FAILED_INIT);
+ }
+ }
+ }
+
+ container_list_free(list);
+
+ return(CURLE_OK);
+}
+
+/*********************************************************************//**
+Check if backup with given name exists.
+@return true if backup exists */
+static
+bool swift_backup_exists(swift_auth_info *auth, const char *container,
+ const char *backup_name)
+{
+ container_list *list;
+
+ if ((list = swift_list(auth, container, backup_name)) == NULL) {
+ fprintf(stderr, "error: unable to list container %s\n",
+ container);
+ exit(EXIT_FAILURE);
+ }
+
+ for (size_t i = 0; i < list->object_count; i++) {
+ if (chunk_belongs_to(list->objects[i].name, backup_name)) {
+ container_list_free(list);
+ return(true);
+ }
+ }
+
+ container_list_free(list);
+
+ return(false);
+}
+
+/*********************************************************************//**
+Fills auth_info with response from keystone response.
+@return true is response parsed successfully */
+static
+bool
+swift_parse_keystone_response_v2(char *response, size_t response_length,
+ swift_auth_info *auth_info)
+{
+ enum {MAX_DEPTH=20};
+ enum label_t {NONE, ACCESS, CATALOG, ENDPOINTS, TOKEN};
+
+ char filtered_url[SWIFT_MAX_URL_SIZE];
+ char public_url[SWIFT_MAX_URL_SIZE];
+ char region[SWIFT_MAX_URL_SIZE];
+ char id[SWIFT_MAX_URL_SIZE];
+ char token_id[SWIFT_MAX_URL_SIZE];
+ char type[SWIFT_MAX_URL_SIZE];
+
+ struct stack_t {
+ jsmntok_t *t;
+ int n_items;
+ label_t label;
+ };
+
+ stack_t stack[MAX_DEPTH];
+ jsmntok_t *tokens;
+ int level;
+
+ tokens = json_tokenise(response, response_length, 200);
+
+ stack[0].t = &tokens[0];
+ stack[0].label = NONE;
+ stack[0].n_items = 1;
+ level = 0;
+
+ for (size_t i = 0, j = 1; j > 0; i++, j--) {
+ jsmntok_t *t = &tokens[i];
+
+ assert(t->start != -1 && t->end != -1);
+ assert(level >= 0);
+
+ --stack[level].n_items;
+
+ switch (t->type) {
+ case JSMN_ARRAY:
+ case JSMN_OBJECT:
+ if (level < MAX_DEPTH - 1) {
+ level++;
+ }
+ stack[level].t = t;
+ stack[level].label = NONE;
+ if (t->type == JSMN_ARRAY) {
+ stack[level].n_items = t->size;
+ j += t->size;
+ } else {
+ stack[level].n_items = t->size * 2;
+ j += t->size * 2;
+ }
+ break;
+ case JSMN_PRIMITIVE:
+ case JSMN_STRING:
+ if (stack[level].t->type == JSMN_OBJECT &&
+ stack[level].n_items % 2 == 1) {
+ /* key */
+ if (json_token_eq(response, t, "access")) {
+ stack[level].label = ACCESS;
+ }
+ if (json_token_eq(response, t,
+ "serviceCatalog")) {
+ stack[level].label = CATALOG;
+ }
+ if (json_token_eq(response, t, "endpoints")) {
+ stack[level].label = ENDPOINTS;
+ }
+ if (json_token_eq(response, t, "token")) {
+ stack[level].label = TOKEN;
+ }
+ if (json_token_eq(response, t, "id")) {
+ json_token_str(response, &tokens[i + 1],
+ id, sizeof(id));
+ }
+ if (json_token_eq(response, t, "id")
+ && stack[level - 1].label == TOKEN) {
+ json_token_str(response, &tokens[i + 1],
+ token_id, sizeof(token_id));
+ }
+ if (json_token_eq(response, t, "region")) {
+ json_token_str(response, &tokens[i + 1],
+ region, sizeof(region));
+ }
+ if (json_token_eq(response, t, "publicURL")) {
+ json_token_str(response, &tokens[i + 1],
+ public_url, sizeof(public_url));
+ }
+ if (json_token_eq(response, t, "type")) {
+ json_token_str(response, &tokens[i + 1],
+ type, sizeof(type));
+ }
+ }
+ break;
+ }
+
+ while (stack[level].n_items == 0 && level > 0) {
+ if (stack[level].t->type == JSMN_OBJECT
+ && level == 6
+ && stack[level - 1].t->type == JSMN_ARRAY
+ && stack[level - 2].label == ENDPOINTS) {
+ if (opt_swift_region == NULL
+ || strcmp(opt_swift_region, region) == 0) {
+ strncpy(filtered_url, public_url,
+ sizeof(filtered_url));
+ }
+ }
+ if (stack[level].t->type == JSMN_OBJECT &&
+ level == 4 &&
+ stack[level - 1].t->type == JSMN_ARRAY &&
+ stack[level - 2].label == CATALOG) {
+ if (strcmp(type, "object-store") == 0) {
+ strncpy(auth_info->url, filtered_url,
+ sizeof(auth_info->url));
+ }
+ }
+ --level;
+ }
+ }
+
+ free(tokens);
+
+ strncpy(auth_info->token, token_id, sizeof(auth_info->token));
+
+ assert(level == 0);
+
+ if (*auth_info->token == 0) {
+ fprintf(stderr, "error: can not receive token from response\n");
+ return(false);
+ }
+
+ if (*auth_info->url == 0) {
+ fprintf(stderr, "error: can not get URL from response\n");
+ return(false);
+ }
+
+ return(true);
+}
+
+/*********************************************************************//**
+Authenticate against Swift TempAuth. Fills swift_auth_info struct.
+Uses creadentials privided as global variables.
+@returns true if access is granted and token received. */
+static
+bool
+swift_keystone_auth_v2(const char *auth_url, swift_auth_info *info)
+{
+ char tenant_arg[SWIFT_MAX_URL_SIZE];
+ char payload[SWIFT_MAX_URL_SIZE];
+ struct curl_slist *slist = NULL;
+ download_buffer_info buf_info;
+ long http_code;
+ CURLcode res;
+ CURL *curl;
+ bool auth_res = false;
+
+ memset(&buf_info, 0, sizeof(buf_info));
+
+ if (opt_swift_user == NULL) {
+ fprintf(stderr, "error: both --swift-user is required "
+ "for keystone authentication.\n");
+ return(false);
+ }
+
+ if (opt_swift_password == NULL) {
+ fprintf(stderr, "error: both --swift-password is required "
+ "for keystone authentication.\n");
+ return(false);
+ }
+
+ if (opt_swift_tenant != NULL && opt_swift_tenant_id != NULL) {
+ fprintf(stderr, "error: both --swift-tenant and "
+ "--swift-tenant-id specified for keystone "
+ "authentication.\n");
+ return(false);
+ }
+
+ if (opt_swift_tenant != NULL) {
+ snprintf(tenant_arg, sizeof(tenant_arg), ",\"%s\":\"%s\"",
+ "tenantName", opt_swift_tenant);
+ } else if (opt_swift_tenant_id != NULL) {
+ snprintf(tenant_arg, sizeof(tenant_arg), ",\"%s\":\"%s\"",
+ "tenantId", opt_swift_tenant_id);
+ } else {
+ *tenant_arg = 0;
+ }
+
+ snprintf(payload, sizeof(payload), "{\"auth\": "
+ "{\"passwordCredentials\": {\"username\":\"%s\","
+ "\"password\":\"%s\"}%s}}",
+ opt_swift_user, opt_swift_password, tenant_arg);
+
+ curl = curl_easy_init();
+
+ if (curl != NULL) {
+
+ slist = curl_slist_append(slist,
+ "Content-Type: application/json");
+ slist = curl_slist_append(slist,
+ "Accept: application/json");
+
+ curl_easy_setopt(curl, CURLOPT_VERBOSE, opt_verbose);
+ curl_easy_setopt(curl, CURLOPT_POST, 1L);
+ curl_easy_setopt(curl, CURLOPT_URL, auth_url);
+ curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
+ curl_easy_setopt(curl, CURLOPT_POSTFIELDS, payload);
+ curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, fetch_buffer_cb);
+ curl_easy_setopt(curl, CURLOPT_WRITEDATA, &buf_info);
+ curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION,
+ fetch_buffer_header_cb);
+ curl_easy_setopt(curl, CURLOPT_HEADERDATA,
+ &buf_info);
+ curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist);
+
+ if (opt_cacert != NULL)
+ curl_easy_setopt(curl, CURLOPT_CAINFO, opt_cacert);
+ if (opt_insecure)
+ curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, FALSE);
+
+ res = curl_easy_perform(curl);
+
+ if (res != CURLE_OK) {
+ fprintf(stderr,
+ "error: curl_easy_perform() failed: %s\n",
+ curl_easy_strerror(res));
+ goto cleanup;
+ }
+ curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_code);
+ if (http_code < 200 || http_code >= 300) {
+ fprintf(stderr, "error: request failed "
+ "with response code: %ld\n", http_code);
+ res = CURLE_LOGIN_DENIED;
+ goto cleanup;
+ }
+ } else {
+ res = CURLE_FAILED_INIT;
+ fprintf(stderr, "error: curl_easy_init() failed\n");
+ goto cleanup;
+ }
+
+ if (!swift_parse_keystone_response_v2(buf_info.buf,
+ buf_info.size, info)) {
+ goto cleanup;
+ }
+
+ auth_res = true;
+
+cleanup:
+ if (slist) {
+ curl_slist_free_all(slist);
+ }
+ if (curl) {
+ curl_easy_cleanup(curl);
+ }
+
+ free(buf_info.buf);
+
+ return(auth_res);
+}
+
+
+/*********************************************************************//**
+Fills auth_info with response from keystone response.
+@return true is response parsed successfully */
+static
+bool
+swift_parse_keystone_response_v3(char *response, size_t response_length,
+ swift_auth_info *auth_info)
+{
+ enum {MAX_DEPTH=20};
+ enum label_t {NONE, TOKEN, CATALOG, ENDPOINTS};
+
+ char url[SWIFT_MAX_URL_SIZE];
+ char filtered_url[SWIFT_MAX_URL_SIZE];
+ char region[SWIFT_MAX_URL_SIZE];
+ char interface[SWIFT_MAX_URL_SIZE];
+ char type[SWIFT_MAX_URL_SIZE];
+
+ struct stack_t {
+ jsmntok_t *t;
+ int n_items;
+ label_t label;
+ };
+
+ stack_t stack[MAX_DEPTH];
+ jsmntok_t *tokens;
+ int level;
+
+ tokens = json_tokenise(response, response_length, 200);
+
+ stack[0].t = &tokens[0];
+ stack[0].label = NONE;
+ stack[0].n_items = 1;
+ level = 0;
+
+ for (size_t i = 0, j = 1; j > 0; i++, j--) {
+ jsmntok_t *t = &tokens[i];
+
+ assert(t->start != -1 && t->end != -1);
+ assert(level >= 0);
+
+ --stack[level].n_items;
+
+ switch (t->type) {
+ case JSMN_ARRAY:
+ case JSMN_OBJECT:
+ if (level < MAX_DEPTH - 1) {
+ level++;
+ }
+ stack[level].t = t;
+ stack[level].label = NONE;
+ if (t->type == JSMN_ARRAY) {
+ stack[level].n_items = t->size;
+ j += t->size;
+ } else {
+ stack[level].n_items = t->size * 2;
+ j += t->size * 2;
+ }
+ break;
+ case JSMN_PRIMITIVE:
+ case JSMN_STRING:
+ if (stack[level].t->type == JSMN_OBJECT &&
+ stack[level].n_items % 2 == 1) {
+ /* key */
+ if (json_token_eq(response, t, "token")) {
+ stack[level].label = TOKEN;
+ fprintf(stderr, "token\n");
+ }
+ if (json_token_eq(response, t,
+ "catalog")) {
+ stack[level].label = CATALOG;
+ fprintf(stderr, "catalog\n");
+ }
+ if (json_token_eq(response, t, "endpoints")) {
+ stack[level].label = ENDPOINTS;
+ }
+ if (json_token_eq(response, t, "region")) {
+ json_token_str(response, &tokens[i + 1],
+ region, sizeof(region));
+ }
+ if (json_token_eq(response, t, "url")) {
+ json_token_str(response, &tokens[i + 1],
+ url, sizeof(url));
+ }
+ if (json_token_eq(response, t, "interface")) {
+ json_token_str(response, &tokens[i + 1],
+ interface, sizeof(interface));
+ }
+ if (json_token_eq(response, t, "type")) {
+ json_token_str(response, &tokens[i + 1],
+ type, sizeof(type));
+ }
+ }
+ break;
+ }
+
+ while (stack[level].n_items == 0 && level > 0) {
+ if (stack[level].t->type == JSMN_OBJECT
+ && level == 6
+ && stack[level - 1].t->type == JSMN_ARRAY
+ && stack[level - 2].label == ENDPOINTS) {
+ if ((opt_swift_region == NULL
+ || strcmp(opt_swift_region, region) == 0)
+ && strcmp(interface, "public") == 0) {
+ strncpy(filtered_url, url,
+ sizeof(filtered_url));
+ }
+ }
+ if (stack[level].t->type == JSMN_OBJECT &&
+ level == 4 &&
+ stack[level - 1].t->type == JSMN_ARRAY &&
+ stack[level - 2].label == CATALOG) {
+ if (strcmp(type, "object-store") == 0) {
+ strncpy(auth_info->url, filtered_url,
+ sizeof(auth_info->url));
+ }
+ }
+ --level;
+ }
+ }
+
+ free(tokens);
+
+ assert(level == 0);
+
+ if (*auth_info->url == 0) {
+ fprintf(stderr, "error: can not get URL from response\n");
+ return(false);
+ }
+
+ return(true);
+}
+
+/*********************************************************************//**
+Captures X-Subject-Token header. */
+static
+size_t keystone_v3_header_cb(char *ptr, size_t size, size_t nmemb, void *data)
+{
+ swift_auth_info *info = (swift_auth_info*)(data);
+
+ get_http_header("X-Subject-Token: ", ptr,
+ info->token, array_elements(info->token));
+
+ return nmemb * size;
+}
+
+/*********************************************************************//**
+Authenticate against Swift TempAuth. Fills swift_auth_info struct.
+Uses creadentials privided as global variables.
+@returns true if access is granted and token received. */
+static
+bool
+swift_keystone_auth_v3(const char *auth_url, swift_auth_info *info)
+{
+ char scope[SWIFT_MAX_URL_SIZE];
+ char domain[SWIFT_MAX_URL_SIZE];
+ char payload[SWIFT_MAX_URL_SIZE];
+ struct curl_slist *slist = NULL;
+ download_buffer_info buf_info;
+ long http_code;
+ CURLcode res;
+ CURL *curl;
+ bool auth_res = false;
+
+ memset(&buf_info, 0, sizeof(buf_info));
+ buf_info.custom_header_callback = keystone_v3_header_cb;
+ buf_info.custom_header_callback_data = info;
+
+ if (opt_swift_user == NULL) {
+ fprintf(stderr, "error: both --swift-user is required "
+ "for keystone authentication.\n");
+ return(false);
+ }
+
+ if (opt_swift_password == NULL) {
+ fprintf(stderr, "error: both --swift-password is required "
+ "for keystone authentication.\n");
+ return(false);
+ }
+
+ if (opt_swift_project_id != NULL && opt_swift_project != NULL) {
+ fprintf(stderr, "error: both --swift-project and "
+ "--swift-project-id specified for keystone "
+ "authentication.\n");
+ return(false);
+ }
+
+ if (opt_swift_domain_id != NULL && opt_swift_domain != NULL) {
+ fprintf(stderr, "error: both --swift-domain and "
+ "--swift-domain-id specified for keystone "
+ "authentication.\n");
+ return(false);
+ }
+
+ if (opt_swift_project_id != NULL && opt_swift_domain != NULL) {
+ fprintf(stderr, "error: both --swift-project-id and "
+ "--swift-domain specified for keystone "
+ "authentication.\n");
+ return(false);
+ }
+
+ if (opt_swift_project_id != NULL && opt_swift_domain_id != NULL) {
+ fprintf(stderr, "error: both --swift-project-id and "
+ "--swift-domain-id specified for keystone "
+ "authentication.\n");
+ return(false);
+ }
+
+ scope[0] = 0; domain[0] = 0;
+
+ if (opt_swift_domain != NULL) {
+ snprintf(domain, sizeof(domain),
+ ",{\"domain\":{\"name\":\"%s\"}}",
+ opt_swift_domain);
+ } else if (opt_swift_domain_id != NULL) {
+ snprintf(domain, sizeof(domain),
+ ",{\"domain\":{\"id\":\"%s\"}}",
+ opt_swift_domain_id);
+ }
+
+ if (opt_swift_project_id != NULL) {
+ snprintf(scope, sizeof(scope),
+ ",\"scope\":{\"project\":{\"id\":\"%s\"}}",
+ opt_swift_project_id);
+ } else if (opt_swift_project != NULL) {
+ snprintf(scope, sizeof(scope),
+ ",\"scope\":{\"project\":{\"name\":\"%s\"%s}}",
+ opt_swift_project_id, domain);
+ }
+
+ snprintf(payload, sizeof(payload), "{\"auth\":{\"identity\":"
+ "{\"methods\":[\"password\"],\"password\":{\"user\":"
+ "{\"name\":\"%s\",\"password\":\"%s\"%s}}}%s}}",
+ opt_swift_user, opt_swift_password,
+ *scope ? "" : ",\"domain\":{\"id\":\"default\"}",
+ scope);
+
+ curl = curl_easy_init();
+
+ if (curl != NULL) {
+
+ slist = curl_slist_append(slist,
+ "Content-Type: application/json");
+ slist = curl_slist_append(slist,
+ "Accept: application/json");
+
+ curl_easy_setopt(curl, CURLOPT_VERBOSE, opt_verbose);
+ curl_easy_setopt(curl, CURLOPT_POST, 1L);
+ curl_easy_setopt(curl, CURLOPT_URL, auth_url);
+ curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
+ curl_easy_setopt(curl, CURLOPT_POSTFIELDS, payload);
+ curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, fetch_buffer_cb);
+ curl_easy_setopt(curl, CURLOPT_WRITEDATA, &buf_info);
+ curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION,
+ fetch_buffer_header_cb);
+ curl_easy_setopt(curl, CURLOPT_HEADERDATA,
+ &buf_info);
+ curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist);
+
+ if (opt_cacert != NULL)
+ curl_easy_setopt(curl, CURLOPT_CAINFO, opt_cacert);
+ if (opt_insecure)
+ curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, FALSE);
+
+ res = curl_easy_perform(curl);
+
+ if (res != CURLE_OK) {
+ fprintf(stderr,
+ "error: curl_easy_perform() failed: %s\n",
+ curl_easy_strerror(res));
+ goto cleanup;
+ }
+ curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_code);
+ if (http_code < 200 || http_code >= 300) {
+ fprintf(stderr, "error: request failed "
+ "with response code: %ld\n", http_code);
+ res = CURLE_LOGIN_DENIED;
+ goto cleanup;
+ }
+ } else {
+ res = CURLE_FAILED_INIT;
+ fprintf(stderr, "error: curl_easy_init() failed\n");
+ goto cleanup;
+ }
+
+ if (!swift_parse_keystone_response_v3(buf_info.buf,
+ buf_info.size, info)) {
+ goto cleanup;
+ }
+
+ auth_res = true;
+
+cleanup:
+ if (slist) {
+ curl_slist_free_all(slist);
+ }
+ if (curl) {
+ curl_easy_cleanup(curl);
+ }
+
+ free(buf_info.buf);
+
+ return(auth_res);
+}
+
+int main(int argc, char **argv)
+{
+ swift_auth_info info;
+ char auth_url[SWIFT_MAX_URL_SIZE];
+
+ MY_INIT(argv[0]);
+
+ /* handle_options in parse_args is destructive so
+ * we make a copy of our argument pointers so we can
+ * mask the sensitive values afterwards */
+ char **mask_argv = (char **)malloc(sizeof(char *) * (argc - 1));
+ memcpy(mask_argv, argv + 1, sizeof(char *) * (argc - 1));
+
+ if (parse_args(argc, argv)) {
+ return(EXIT_FAILURE);
+ }
+
+ mask_args(argc, mask_argv); /* mask args on cmdline */
+
+ curl_global_init(CURL_GLOBAL_ALL);
+
+ if (opt_swift_auth_version == NULL || *opt_swift_auth_version == '1') {
+ /* TempAuth */
+ snprintf(auth_url, SWIFT_MAX_URL_SIZE, "%sauth/v%s/",
+ opt_swift_auth_url, opt_swift_auth_version ?
+ opt_swift_auth_version : "1.0");
+
+ if (!swift_temp_auth(auth_url, &info)) {
+ fprintf(stderr, "error: failed to authenticate\n");
+ return(EXIT_FAILURE);
+ }
+
+ } else if (*opt_swift_auth_version == '2') {
+ /* Keystone v2 */
+ snprintf(auth_url, SWIFT_MAX_URL_SIZE, "%sv%s/tokens",
+ opt_swift_auth_url, opt_swift_auth_version);
+
+ if (!swift_keystone_auth_v2(auth_url, &info)) {
+ fprintf(stderr, "error: failed to authenticate\n");
+ return(EXIT_FAILURE);
+ }
+
+ } else if (*opt_swift_auth_version == '3') {
+ /* Keystone v3 */
+ snprintf(auth_url, SWIFT_MAX_URL_SIZE, "%sv%s/auth/tokens",
+ opt_swift_auth_url, opt_swift_auth_version);
+
+ if (!swift_keystone_auth_v3(auth_url, &info)) {
+ fprintf(stderr, "error: failed to authenticate\n");
+ exit(EXIT_FAILURE);
+ }
+
+ }
+
+ if (opt_swift_storage_url != NULL) {
+ snprintf(info.url, sizeof(info.url), "%s",
+ opt_swift_storage_url);
+ }
+
+ fprintf(stderr, "Object store URL: %s\n", info.url);
+
+ if (opt_mode == MODE_PUT) {
+
+ if (swift_create_container(&info, opt_swift_container) != 0) {
+ fprintf(stderr, "error: failed to create "
+ "container %s\n",
+ opt_swift_container);
+ return(EXIT_FAILURE);
+ }
+
+ if (swift_backup_exists(&info, opt_swift_container, opt_name)) {
+ fprintf(stderr, "error: backup named '%s' "
+ "already exists!\n",
+ opt_name);
+ return(EXIT_FAILURE);
+ }
+
+ if (swift_upload_parts(&info, opt_swift_container,
+ opt_name) != 0) {
+ fprintf(stderr, "error: upload failed\n");
+ return(EXIT_FAILURE);
+ }
+
+ } else if (opt_mode == MODE_GET) {
+
+ if (swift_download(&info, opt_swift_container, opt_name)
+ != CURLE_OK) {
+ fprintf(stderr, "error: download failed\n");
+ return(EXIT_FAILURE);
+ }
+
+ } else if (opt_mode == MODE_DELETE) {
+
+ if (swift_delete(&info, opt_swift_container, opt_name)
+ != CURLE_OK) {
+ fprintf(stderr, "error: delete failed\n");
+ return(EXIT_FAILURE);
+ }
+
+ } else {
+ fprintf(stderr, "Unknown command supplied.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ curl_global_cleanup();
+
+ return(EXIT_SUCCESS);
+}
diff --git a/extra/mariabackup/xbcrypt.c b/extra/mariabackup/xbcrypt.c
new file mode 100644
index 00000000000..255da875de4
--- /dev/null
+++ b/extra/mariabackup/xbcrypt.c
@@ -0,0 +1,694 @@
+/******************************************************
+Copyright (c) 2013 Percona LLC and/or its affiliates.
+
+The xbcrypt utility: decrypt files in the XBCRYPT format.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#include <my_base.h>
+#include <my_getopt.h>
+#include "common.h"
+#include "xbcrypt.h"
+#include <gcrypt.h>
+
+#if !defined(GCRYPT_VERSION_NUMBER) || (GCRYPT_VERSION_NUMBER < 0x010600)
+GCRY_THREAD_OPTION_PTHREAD_IMPL;
+#endif
+
+#define XBCRYPT_VERSION "1.1"
+
+typedef enum {
+ RUN_MODE_NONE,
+ RUN_MODE_ENCRYPT,
+ RUN_MODE_DECRYPT
+} run_mode_t;
+
+const char *xbcrypt_encrypt_algo_names[] =
+{ "NONE", "AES128", "AES192", "AES256", NullS};
+TYPELIB xbcrypt_encrypt_algo_typelib=
+{array_elements(xbcrypt_encrypt_algo_names)-1,"",
+ xbcrypt_encrypt_algo_names, NULL};
+
+static run_mode_t opt_run_mode = RUN_MODE_ENCRYPT;
+static char *opt_input_file = NULL;
+static char *opt_output_file = NULL;
+static ulong opt_encrypt_algo;
+static char *opt_encrypt_key_file = NULL;
+static void *opt_encrypt_key = NULL;
+static ulonglong opt_encrypt_chunk_size = 0;
+static my_bool opt_verbose = FALSE;
+
+static uint encrypt_algos[] = { GCRY_CIPHER_NONE,
+ GCRY_CIPHER_AES128,
+ GCRY_CIPHER_AES192,
+ GCRY_CIPHER_AES256 };
+static int encrypt_algo = 0;
+static int encrypt_mode = GCRY_CIPHER_MODE_CTR;
+static uint encrypt_key_len = 0;
+static size_t encrypt_iv_len = 0;
+
+static struct my_option my_long_options[] =
+{
+ {"help", '?', "Display this help and exit.",
+ 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"decrypt", 'd', "Decrypt data input to output.",
+ 0, 0, 0,
+ GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"input", 'i', "Optional input file. If not specified, input"
+ " will be read from standard input.",
+ &opt_input_file, &opt_input_file, 0,
+ GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"output", 'o', "Optional output file. If not specified, output"
+ " will be written to standard output.",
+ &opt_output_file, &opt_output_file, 0,
+ GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"encrypt-algo", 'a', "Encryption algorithm.",
+ &opt_encrypt_algo, &opt_encrypt_algo, &xbcrypt_encrypt_algo_typelib,
+ GET_ENUM, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"encrypt-key", 'k', "Encryption key.",
+ &opt_encrypt_key, &opt_encrypt_key, 0,
+ GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"encrypt-key-file", 'f', "File which contains encryption key.",
+ &opt_encrypt_key_file, &opt_encrypt_key_file, 0,
+ GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"encrypt-chunk-size", 's', "Size of working buffer for encryption in"
+ " bytes. The default value is 64K.",
+ &opt_encrypt_chunk_size, &opt_encrypt_chunk_size, 0,
+ GET_ULL, REQUIRED_ARG, (1 << 16), 1024, ULONGLONG_MAX, 0, 0, 0},
+
+ {"verbose", 'v', "Display verbose status output.",
+ &opt_verbose, &opt_verbose,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
+};
+
+static
+int
+get_options(int *argc, char ***argv);
+
+static
+my_bool
+get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
+ char *argument __attribute__((unused)));
+
+static
+void
+print_version(void);
+
+static
+void
+usage(void);
+
+static
+int
+mode_decrypt(File filein, File fileout);
+
+static
+int
+mode_encrypt(File filein, File fileout);
+
+int
+main(int argc, char **argv)
+{
+#if !defined(GCRYPT_VERSION_NUMBER) || (GCRYPT_VERSION_NUMBER < 0x010600)
+ gcry_error_t gcry_error;
+#endif
+ File filein = 0;
+ File fileout = 0;
+
+ MY_INIT(argv[0]);
+
+ if (get_options(&argc, &argv)) {
+ goto err;
+ }
+
+ /* Acording to gcrypt docs (and my testing), setting up the threading
+ callbacks must be done first, so, lets give it a shot */
+#if !defined(GCRYPT_VERSION_NUMBER) || (GCRYPT_VERSION_NUMBER < 0x010600)
+ gcry_error = gcry_control(GCRYCTL_SET_THREAD_CBS, &gcry_threads_pthread);
+ if (gcry_error) {
+ msg("%s: unable to set libgcrypt thread cbs - "
+ "%s : %s\n", my_progname,
+ gcry_strsource(gcry_error),
+ gcry_strerror(gcry_error));
+ return 1;
+ }
+#endif
+
+ /* Version check should be the very first call because it
+ makes sure that important subsystems are intialized. */
+ if (!gcry_control(GCRYCTL_ANY_INITIALIZATION_P)) {
+ const char *gcrypt_version;
+ gcrypt_version = gcry_check_version(NULL);
+ /* No other library has already initialized libgcrypt. */
+ if (!gcrypt_version) {
+ msg("%s: failed to initialize libgcrypt\n",
+ my_progname);
+ return 1;
+ } else if (opt_verbose) {
+ msg("%s: using gcrypt %s\n", my_progname,
+ gcrypt_version);
+ }
+ }
+ gcry_control(GCRYCTL_DISABLE_SECMEM, 0);
+ gcry_control(GCRYCTL_INITIALIZATION_FINISHED, 0);
+
+ /* Determine the algorithm */
+ encrypt_algo = encrypt_algos[opt_encrypt_algo];
+
+ /* Set up the iv length */
+ encrypt_iv_len = gcry_cipher_get_algo_blklen(encrypt_algo);
+
+ /* Now set up the key */
+ if (opt_encrypt_key == NULL && opt_encrypt_key_file == NULL) {
+ msg("%s: no encryption key or key file specified.\n",
+ my_progname);
+ return 1;
+ } else if (opt_encrypt_key && opt_encrypt_key_file) {
+ msg("%s: both encryption key and key file specified.\n",
+ my_progname);
+ return 1;
+ } else if (opt_encrypt_key_file) {
+ if (!xb_crypt_read_key_file(opt_encrypt_key_file,
+ &opt_encrypt_key,
+ &encrypt_key_len)) {
+ msg("%s: unable to read encryption key file \"%s\".\n",
+ opt_encrypt_key_file, my_progname);
+ return 1;
+ }
+ } else {
+ encrypt_key_len = strlen(opt_encrypt_key);
+ }
+
+ if (opt_input_file) {
+ MY_STAT mystat;
+
+ if (opt_verbose)
+ msg("%s: input file \"%s\".\n", my_progname,
+ opt_input_file);
+
+ if (my_stat(opt_input_file, &mystat, MYF(MY_WME)) == NULL) {
+ goto err;
+ }
+ if (!MY_S_ISREG(mystat.st_mode)) {
+ msg("%s: \"%s\" is not a regular file, exiting.\n",
+ my_progname, opt_input_file);
+ goto err;
+ }
+ if ((filein = my_open(opt_input_file, O_RDONLY, MYF(MY_WME)))
+ < 0) {
+ msg("%s: failed to open \"%s\".\n", my_progname,
+ opt_input_file);
+ goto err;
+ }
+ } else {
+ if (opt_verbose)
+ msg("%s: input from standard input.\n", my_progname);
+ filein = fileno(stdin);
+ }
+
+ if (opt_output_file) {
+ if (opt_verbose)
+ msg("%s: output file \"%s\".\n", my_progname,
+ opt_output_file);
+
+ if ((fileout = my_create(opt_output_file, 0,
+ O_WRONLY|O_BINARY|O_EXCL|O_NOFOLLOW,
+ MYF(MY_WME))) < 0) {
+ msg("%s: failed to create output file \"%s\".\n",
+ my_progname, opt_output_file);
+ goto err;
+ }
+ } else {
+ if (opt_verbose)
+ msg("%s: output to standard output.\n", my_progname);
+ fileout = fileno(stdout);
+ }
+
+ if (opt_run_mode == RUN_MODE_DECRYPT
+ && mode_decrypt(filein, fileout)) {
+ goto err;
+ } else if (opt_run_mode == RUN_MODE_ENCRYPT
+ && mode_encrypt(filein, fileout)) {
+ goto err;
+ }
+
+ if (opt_input_file && filein) {
+ my_close(filein, MYF(MY_WME));
+ }
+ if (opt_output_file && fileout) {
+ my_close(fileout, MYF(MY_WME));
+ }
+
+ my_cleanup_options(my_long_options);
+
+ my_end(0);
+
+ return EXIT_SUCCESS;
+err:
+ if (opt_input_file && filein) {
+ my_close(filein, MYF(MY_WME));
+ }
+ if (opt_output_file && fileout) {
+ my_close(fileout, MYF(MY_WME));
+ }
+
+ my_cleanup_options(my_long_options);
+
+ my_end(0);
+
+ exit(EXIT_FAILURE);
+
+}
+
+
+static
+size_t
+my_xb_crypt_read_callback(void *userdata, void *buf, size_t len)
+{
+ File* file = (File *) userdata;
+ return xb_read_full(*file, buf, len);
+}
+
+static
+int
+mode_decrypt(File filein, File fileout)
+{
+ xb_rcrypt_t *xbcrypt_file = NULL;
+ void *chunkbuf = NULL;
+ size_t chunksize;
+ size_t originalsize;
+ void *ivbuf = NULL;
+ size_t ivsize;
+ void *decryptbuf = NULL;
+ size_t decryptbufsize = 0;
+ ulonglong ttlchunksread = 0;
+ ulonglong ttlbytesread = 0;
+ xb_rcrypt_result_t result;
+ gcry_cipher_hd_t cipher_handle;
+ gcry_error_t gcry_error;
+ my_bool hash_appended;
+
+ if (encrypt_algo != GCRY_CIPHER_NONE) {
+ gcry_error = gcry_cipher_open(&cipher_handle,
+ encrypt_algo,
+ encrypt_mode, 0);
+ if (gcry_error) {
+ msg("%s:decrypt: unable to open libgcrypt"
+ " cipher - %s : %s\n", my_progname,
+ gcry_strsource(gcry_error),
+ gcry_strerror(gcry_error));
+ return 1;
+ }
+
+ gcry_error = gcry_cipher_setkey(cipher_handle,
+ opt_encrypt_key,
+ encrypt_key_len);
+ if (gcry_error) {
+ msg("%s:decrypt: unable to set libgcrypt cipher"
+ "key - %s : %s\n", my_progname,
+ gcry_strsource(gcry_error),
+ gcry_strerror(gcry_error));
+ goto err;
+ }
+ }
+
+ /* Initialize the xb_crypt format reader */
+ xbcrypt_file = xb_crypt_read_open(&filein, my_xb_crypt_read_callback);
+ if (xbcrypt_file == NULL) {
+ msg("%s:decrypt: xb_crypt_read_open() failed.\n", my_progname);
+ goto err;
+ }
+
+ /* Walk the encrypted chunks, decrypting them and writing out */
+ while ((result = xb_crypt_read_chunk(xbcrypt_file, &chunkbuf,
+ &originalsize, &chunksize,
+ &ivbuf, &ivsize, &hash_appended))
+ == XB_CRYPT_READ_CHUNK) {
+
+ if (encrypt_algo != GCRY_CIPHER_NONE) {
+ gcry_error = gcry_cipher_reset(cipher_handle);
+ if (gcry_error) {
+ msg("%s:decrypt: unable to reset libgcrypt"
+ " cipher - %s : %s\n", my_progname,
+ gcry_strsource(gcry_error),
+ gcry_strerror(gcry_error));
+ goto err;
+ }
+
+ if (ivsize) {
+ gcry_error = gcry_cipher_setctr(cipher_handle,
+ ivbuf,
+ ivsize);
+ }
+ if (gcry_error) {
+ msg("%s:decrypt: unable to set cipher iv - "
+ "%s : %s\n", my_progname,
+ gcry_strsource(gcry_error),
+ gcry_strerror(gcry_error));
+ continue;
+ }
+
+ if (decryptbufsize < originalsize) {
+ decryptbuf = my_realloc(decryptbuf,
+ originalsize,
+ MYF(MY_WME | MY_ALLOW_ZERO_PTR));
+ decryptbufsize = originalsize;
+ }
+
+ /* Try to decrypt it */
+ gcry_error = gcry_cipher_decrypt(cipher_handle,
+ decryptbuf,
+ originalsize,
+ chunkbuf,
+ chunksize);
+ if (gcry_error) {
+ msg("%s:decrypt: unable to decrypt chunk - "
+ "%s : %s\n", my_progname,
+ gcry_strsource(gcry_error),
+ gcry_strerror(gcry_error));
+ gcry_cipher_close(cipher_handle);
+ goto err;
+ }
+
+ } else {
+ decryptbuf = chunkbuf;
+ }
+
+ if (hash_appended) {
+ uchar hash[XB_CRYPT_HASH_LEN];
+
+ originalsize -= XB_CRYPT_HASH_LEN;
+
+ /* ensure that XB_CRYPT_HASH_LEN is the correct length
+ of XB_CRYPT_HASH hashing algorithm output */
+ assert(gcry_md_get_algo_dlen(XB_CRYPT_HASH) ==
+ XB_CRYPT_HASH_LEN);
+ gcry_md_hash_buffer(XB_CRYPT_HASH, hash, decryptbuf,
+ originalsize);
+ if (memcmp(hash, (char *) decryptbuf + originalsize,
+ XB_CRYPT_HASH_LEN) != 0) {
+ msg("%s:%s invalid plaintext hash. "
+ "Wrong encrytion key specified?\n",
+ my_progname, __FUNCTION__);
+ result = XB_CRYPT_READ_ERROR;
+ goto err;
+ }
+ }
+
+ /* Write it out */
+ if (my_write(fileout, (const uchar *) decryptbuf, originalsize,
+ MYF(MY_WME | MY_NABP))) {
+ msg("%s:decrypt: unable to write output chunk.\n",
+ my_progname);
+ goto err;
+ }
+ ttlchunksread++;
+ ttlbytesread += chunksize;
+ if (opt_verbose)
+ msg("%s:decrypt: %llu chunks read, %llu bytes read\n.",
+ my_progname, ttlchunksread, ttlbytesread);
+ }
+
+ xb_crypt_read_close(xbcrypt_file);
+
+ if (encrypt_algo != GCRY_CIPHER_NONE)
+ gcry_cipher_close(cipher_handle);
+
+ if (decryptbuf && decryptbufsize)
+ my_free(decryptbuf);
+
+ if (opt_verbose)
+ msg("\n%s:decrypt: done\n", my_progname);
+
+ return 0;
+err:
+ if (xbcrypt_file)
+ xb_crypt_read_close(xbcrypt_file);
+
+ if (encrypt_algo != GCRY_CIPHER_NONE)
+ gcry_cipher_close(cipher_handle);
+
+ if (decryptbuf && decryptbufsize)
+ my_free(decryptbuf);
+
+ return 1;
+}
+
+static
+ssize_t
+my_xb_crypt_write_callback(void *userdata, const void *buf, size_t len)
+{
+ File* file = (File *) userdata;
+
+ ssize_t ret = my_write(*file, buf, len, MYF(MY_WME));
+ posix_fadvise(*file, 0, 0, POSIX_FADV_DONTNEED);
+ return ret;
+}
+
+static
+int
+mode_encrypt(File filein, File fileout)
+{
+ size_t bytesread;
+ size_t chunkbuflen;
+ uchar *chunkbuf = NULL;
+ void *ivbuf = NULL;
+ size_t encryptbuflen = 0;
+ size_t encryptedlen = 0;
+ void *encryptbuf = NULL;
+ ulonglong ttlchunkswritten = 0;
+ ulonglong ttlbyteswritten = 0;
+ xb_wcrypt_t *xbcrypt_file = NULL;
+ gcry_cipher_hd_t cipher_handle;
+ gcry_error_t gcry_error;
+
+ if (encrypt_algo != GCRY_CIPHER_NONE) {
+ gcry_error = gcry_cipher_open(&cipher_handle,
+ encrypt_algo,
+ encrypt_mode, 0);
+ if (gcry_error) {
+ msg("%s:encrypt: unable to open libgcrypt cipher - "
+ "%s : %s\n", my_progname,
+ gcry_strsource(gcry_error),
+ gcry_strerror(gcry_error));
+ return 1;
+ }
+
+ gcry_error = gcry_cipher_setkey(cipher_handle,
+ opt_encrypt_key,
+ encrypt_key_len);
+ if (gcry_error) {
+ msg("%s:encrypt: unable to set libgcrypt cipher key - "
+ "%s : %s\n", my_progname,
+ gcry_strsource(gcry_error),
+ gcry_strerror(gcry_error));
+ goto err;
+ }
+ }
+
+ posix_fadvise(filein, 0, 0, POSIX_FADV_SEQUENTIAL);
+
+ xbcrypt_file = xb_crypt_write_open(&fileout,
+ my_xb_crypt_write_callback);
+ if (xbcrypt_file == NULL) {
+ msg("%s:encrypt: xb_crypt_write_open() failed.\n",
+ my_progname);
+ goto err;
+ }
+
+ ivbuf = my_malloc(encrypt_iv_len, MYF(MY_FAE));
+
+ /* now read in data in chunk size, encrypt and write out */
+ chunkbuflen = opt_encrypt_chunk_size + XB_CRYPT_HASH_LEN;
+ chunkbuf = (uchar *) my_malloc(chunkbuflen, MYF(MY_FAE));
+ while ((bytesread = my_read(filein, chunkbuf, opt_encrypt_chunk_size,
+ MYF(MY_WME))) > 0) {
+
+ size_t origbuflen = bytesread + XB_CRYPT_HASH_LEN;
+
+ /* ensure that XB_CRYPT_HASH_LEN is the correct length
+ of XB_CRYPT_HASH hashing algorithm output */
+ assert(XB_CRYPT_HASH_LEN ==
+ gcry_md_get_algo_dlen(XB_CRYPT_HASH));
+ gcry_md_hash_buffer(XB_CRYPT_HASH, chunkbuf + bytesread,
+ chunkbuf, bytesread);
+
+ if (encrypt_algo != GCRY_CIPHER_NONE) {
+ gcry_error = gcry_cipher_reset(cipher_handle);
+
+ if (gcry_error) {
+ msg("%s:encrypt: unable to reset cipher - "
+ "%s : %s\n", my_progname,
+ gcry_strsource(gcry_error),
+ gcry_strerror(gcry_error));
+ goto err;
+ }
+
+ xb_crypt_create_iv(ivbuf, encrypt_iv_len);
+ gcry_error = gcry_cipher_setctr(cipher_handle,
+ ivbuf,
+ encrypt_iv_len);
+
+ if (gcry_error) {
+ msg("%s:encrypt: unable to set cipher iv - "
+ "%s : %s\n", my_progname,
+ gcry_strsource(gcry_error),
+ gcry_strerror(gcry_error));
+ continue;
+ }
+
+ if (encryptbuflen < origbuflen) {
+ encryptbuf = my_realloc(encryptbuf, origbuflen,
+ MYF(MY_WME | MY_ALLOW_ZERO_PTR));
+ encryptbuflen = origbuflen;
+ }
+
+ gcry_error = gcry_cipher_encrypt(cipher_handle,
+ encryptbuf,
+ encryptbuflen,
+ chunkbuf,
+ origbuflen);
+
+ encryptedlen = origbuflen;
+
+ if (gcry_error) {
+ msg("%s:encrypt: unable to encrypt chunk - "
+ "%s : %s\n", my_progname,
+ gcry_strsource(gcry_error),
+ gcry_strerror(gcry_error));
+ gcry_cipher_close(cipher_handle);
+ goto err;
+ }
+ } else {
+ encryptedlen = origbuflen;
+ encryptbuf = chunkbuf;
+ }
+
+ if (xb_crypt_write_chunk(xbcrypt_file, encryptbuf,
+ bytesread + XB_CRYPT_HASH_LEN,
+ encryptedlen, ivbuf, encrypt_iv_len)) {
+ msg("%s:encrypt: abcrypt_write_chunk() failed.\n",
+ my_progname);
+ goto err;
+ }
+
+ ttlchunkswritten++;
+ ttlbyteswritten += encryptedlen;
+
+ if (opt_verbose)
+ msg("%s:encrypt: %llu chunks written, %llu bytes "
+ "written\n.", my_progname, ttlchunkswritten,
+ ttlbyteswritten);
+ }
+
+ my_free(ivbuf);
+ my_free(chunkbuf);
+
+ if (encryptbuf && encryptbuflen)
+ my_free(encryptbuf);
+
+ xb_crypt_write_close(xbcrypt_file);
+
+ if (encrypt_algo != GCRY_CIPHER_NONE)
+ gcry_cipher_close(cipher_handle);
+
+ if (opt_verbose)
+ msg("\n%s:encrypt: done\n", my_progname);
+
+ return 0;
+err:
+ if (chunkbuf)
+ my_free(chunkbuf);
+
+ if (encryptbuf && encryptbuflen)
+ my_free(encryptbuf);
+
+ if (xbcrypt_file)
+ xb_crypt_write_close(xbcrypt_file);
+
+ if (encrypt_algo != GCRY_CIPHER_NONE)
+ gcry_cipher_close(cipher_handle);
+
+ return 1;
+}
+
+static
+int
+get_options(int *argc, char ***argv)
+{
+ int ho_error;
+
+ if ((ho_error= handle_options(argc, argv, my_long_options,
+ get_one_option))) {
+ exit(EXIT_FAILURE);
+ }
+
+ return 0;
+}
+
+static
+my_bool
+get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
+ char *argument __attribute__((unused)))
+{
+ switch (optid) {
+ case 'd':
+ opt_run_mode = RUN_MODE_DECRYPT;
+ break;
+ case '?':
+ usage();
+ exit(0);
+ }
+
+ return FALSE;
+}
+
+static
+void
+print_version(void)
+{
+ printf("%s Ver %s for %s (%s)\n", my_progname, XBCRYPT_VERSION,
+ SYSTEM_TYPE, MACHINE_TYPE);
+}
+
+static
+void
+usage(void)
+{
+ print_version();
+ puts("Copyright (C) 2011 Percona Inc.");
+ puts("This software comes with ABSOLUTELY NO WARRANTY. "
+ "This is free software,\nand you are welcome to modify and "
+ "redistribute it under the GPL license.\n");
+
+ puts("Encrypt or decrypt files in the XBCRYPT format.\n");
+
+ puts("Usage: ");
+ printf(" %s [OPTIONS...]"
+ " # read data from specified input, encrypting or decrypting "
+ " and writing the result to the specified output.\n",
+ my_progname);
+ puts("\nOptions:");
+ my_print_help(my_long_options);
+}
diff --git a/extra/mariabackup/xbcrypt.h b/extra/mariabackup/xbcrypt.h
new file mode 100644
index 00000000000..cdabf56a21a
--- /dev/null
+++ b/extra/mariabackup/xbcrypt.h
@@ -0,0 +1,84 @@
+/******************************************************
+Copyright (c) 2011 Percona LLC and/or its affiliates.
+
+Encryption interface for XtraBackup.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#ifndef XBCRYPT_H
+#define XBCRYPT_H
+
+#include <my_base.h>
+#include "common.h"
+
+#define XB_CRYPT_CHUNK_MAGIC1 "XBCRYP01"
+#define XB_CRYPT_CHUNK_MAGIC2 "XBCRYP02"
+#define XB_CRYPT_CHUNK_MAGIC3 "XBCRYP03" /* must be same size as ^^ */
+#define XB_CRYPT_CHUNK_MAGIC_CURRENT XB_CRYPT_CHUNK_MAGIC3
+#define XB_CRYPT_CHUNK_MAGIC_SIZE (sizeof(XB_CRYPT_CHUNK_MAGIC1)-1)
+
+#define XB_CRYPT_HASH GCRY_MD_SHA256
+#define XB_CRYPT_HASH_LEN 32
+
+/******************************************************************************
+Write interface */
+typedef struct xb_wcrypt_struct xb_wcrypt_t;
+
+/* Callback on write for i/o, must return # of bytes written or -1 on error */
+typedef ssize_t xb_crypt_write_callback(void *userdata,
+ const void *buf, size_t len);
+
+xb_wcrypt_t *xb_crypt_write_open(void *userdata,
+ xb_crypt_write_callback *onwrite);
+
+/* Takes buffer, original length, encrypted length iv and iv length, formats
+ output buffer and calls write callback.
+ Returns 0 on success, 1 on error */
+int xb_crypt_write_chunk(xb_wcrypt_t *crypt, const void *buf, size_t olen,
+ size_t elen, const void *iv, size_t ivlen);
+
+/* Returns 0 on success, 1 on error */
+int xb_crypt_write_close(xb_wcrypt_t *crypt);
+
+/******************************************************************************
+Read interface */
+typedef struct xb_rcrypt_struct xb_rcrypt_t;
+
+/* Callback on read for i/o, must return # of bytes read or -1 on error */
+typedef size_t xb_crypt_read_callback(void *userdata, void *buf, size_t len);
+
+xb_rcrypt_t *xb_crypt_read_open(void *userdata,
+ xb_crypt_read_callback *onread);
+
+typedef enum {
+ XB_CRYPT_READ_CHUNK,
+ XB_CRYPT_READ_EOF,
+ XB_CRYPT_READ_ERROR
+} xb_rcrypt_result_t;
+
+xb_rcrypt_result_t xb_crypt_read_chunk(xb_rcrypt_t *crypt, void **buf,
+ size_t *olen, size_t *elen, void **iv,
+ size_t *ivlen, my_bool *hash_appended);
+
+int xb_crypt_read_close(xb_rcrypt_t *crypt);
+
+/******************************************************************************
+Utility interface */
+my_bool xb_crypt_read_key_file(const char *filename,
+ void** key, uint *keylength);
+
+void xb_crypt_create_iv(void* ivbuf, size_t ivlen);
+#endif
diff --git a/extra/mariabackup/xbcrypt_common.c b/extra/mariabackup/xbcrypt_common.c
new file mode 100644
index 00000000000..fe34fcb3bb0
--- /dev/null
+++ b/extra/mariabackup/xbcrypt_common.c
@@ -0,0 +1,60 @@
+/******************************************************
+Copyright (c) 2013 Percona LLC and/or its affiliates.
+
+Encryption configuration file interface for XtraBackup.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#include <my_base.h>
+#include "common.h"
+#include "xbcrypt.h"
+
+#if GCC_VERSION >= 4002
+/* Workaround to avoid "gcry_ac_* is deprecated" warnings in gcrypt.h */
+# pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
+
+#include <gcrypt.h>
+
+#if GCC_VERSION >= 4002
+# pragma GCC diagnostic warning "-Wdeprecated-declarations"
+#endif
+
+my_bool
+xb_crypt_read_key_file(const char *filename, void** key, uint *keylength)
+{
+ FILE *fp;
+
+ if (!(fp = my_fopen(filename, O_RDONLY, MYF(0)))) {
+ msg("%s:%s: unable to open config file \"%s\", errno(%d)\n",
+ my_progname, __FUNCTION__, filename, my_errno);
+ return FALSE;
+ }
+
+ fseek(fp, 0 , SEEK_END);
+ *keylength = ftell(fp);
+ rewind(fp);
+ *key = my_malloc(*keylength, MYF(MY_FAE));
+ *keylength = fread(*key, 1, *keylength, fp);
+ my_fclose(fp, MYF(0));
+ return TRUE;
+}
+
+void
+xb_crypt_create_iv(void* ivbuf, size_t ivlen)
+{
+ gcry_create_nonce(ivbuf, ivlen);
+}
diff --git a/extra/mariabackup/xbcrypt_read.c b/extra/mariabackup/xbcrypt_read.c
new file mode 100644
index 00000000000..6522333f023
--- /dev/null
+++ b/extra/mariabackup/xbcrypt_read.c
@@ -0,0 +1,251 @@
+/******************************************************
+Copyright (c) 2013 Percona LLC and/or its affiliates.
+
+The xbcrypt format reader implementation.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#include "xbcrypt.h"
+
+struct xb_rcrypt_struct {
+ void *userdata;
+ xb_crypt_read_callback *read;
+ void *buffer;
+ size_t bufsize;
+ void *ivbuffer;
+ size_t ivbufsize;
+ ulonglong offset;
+};
+
+xb_rcrypt_t *
+xb_crypt_read_open(void *userdata, xb_crypt_read_callback *onread)
+{
+ xb_rcrypt_t *crypt;
+
+ xb_ad(onread);
+
+ crypt = (xb_rcrypt_t *) my_malloc(sizeof(xb_rcrypt_t), MYF(MY_FAE));
+
+ crypt->userdata = userdata;
+ crypt->read = onread;
+ crypt->buffer = NULL;
+ crypt->bufsize = 0;
+ crypt->offset = 0;
+ crypt->ivbuffer = NULL;
+ crypt->ivbufsize = 0;
+ return crypt;
+}
+
+xb_rcrypt_result_t
+xb_crypt_read_chunk(xb_rcrypt_t *crypt, void **buf, size_t *olen, size_t *elen,
+ void **iv, size_t *ivlen, my_bool *hash_appended)
+
+{
+ uchar tmpbuf[XB_CRYPT_CHUNK_MAGIC_SIZE + 8 + 8 + 8 + 4];
+ uchar *ptr;
+ ulonglong tmp;
+ ulong checksum, checksum_exp, version;
+ size_t bytesread;
+ xb_rcrypt_result_t result = XB_CRYPT_READ_CHUNK;
+
+ if ((bytesread = crypt->read(crypt->userdata, tmpbuf, sizeof(tmpbuf)))
+ != sizeof(tmpbuf)) {
+ if (bytesread == 0) {
+ result = XB_CRYPT_READ_EOF;
+ goto err;
+ } else {
+ msg("%s:%s: unable to read chunk header data at "
+ "offset 0x%llx.\n",
+ my_progname, __FUNCTION__, crypt->offset);
+ result = XB_CRYPT_READ_ERROR;
+ goto err;
+ }
+ }
+
+ ptr = tmpbuf;
+
+ if (memcmp(ptr, XB_CRYPT_CHUNK_MAGIC3,
+ XB_CRYPT_CHUNK_MAGIC_SIZE) == 0) {
+ version = 3;
+ } else if (memcmp(ptr, XB_CRYPT_CHUNK_MAGIC2,
+ XB_CRYPT_CHUNK_MAGIC_SIZE) == 0) {
+ version = 2;
+ } else if (memcmp(ptr, XB_CRYPT_CHUNK_MAGIC1,
+ XB_CRYPT_CHUNK_MAGIC_SIZE) == 0) {
+ version = 1;
+ } else {
+ msg("%s:%s: wrong chunk magic at offset 0x%llx.\n",
+ my_progname, __FUNCTION__, crypt->offset);
+ result = XB_CRYPT_READ_ERROR;
+ goto err;
+ }
+
+ ptr += XB_CRYPT_CHUNK_MAGIC_SIZE;
+ crypt->offset += XB_CRYPT_CHUNK_MAGIC_SIZE;
+
+ tmp = uint8korr(ptr); /* reserved */
+ ptr += 8;
+ crypt->offset += 8;
+
+ tmp = uint8korr(ptr); /* original size */
+ ptr += 8;
+ if (tmp > INT_MAX) {
+ msg("%s:%s: invalid original size at offset 0x%llx.\n",
+ my_progname, __FUNCTION__, crypt->offset);
+ result = XB_CRYPT_READ_ERROR;
+ goto err;
+ }
+ crypt->offset += 8;
+ *olen = (size_t)tmp;
+
+ tmp = uint8korr(ptr); /* encrypted size */
+ ptr += 8;
+ if (tmp > INT_MAX) {
+ msg("%s:%s: invalid encrypted size at offset 0x%llx.\n",
+ my_progname, __FUNCTION__, crypt->offset);
+ result = XB_CRYPT_READ_ERROR;
+ goto err;
+ }
+ crypt->offset += 8;
+ *elen = (size_t)tmp;
+
+ checksum_exp = uint4korr(ptr); /* checksum */
+ ptr += 4;
+ crypt->offset += 4;
+
+ /* iv size */
+ if (version == 1) {
+ *ivlen = 0;
+ *iv = 0;
+ } else {
+ if ((bytesread = crypt->read(crypt->userdata, tmpbuf, 8))
+ != 8) {
+ if (bytesread == 0) {
+ result = XB_CRYPT_READ_EOF;
+ goto err;
+ } else {
+ msg("%s:%s: unable to read chunk iv size at "
+ "offset 0x%llx.\n",
+ my_progname, __FUNCTION__, crypt->offset);
+ result = XB_CRYPT_READ_ERROR;
+ goto err;
+ }
+ }
+
+ tmp = uint8korr(tmpbuf);
+ if (tmp > INT_MAX) {
+ msg("%s:%s: invalid iv size at offset 0x%llx.\n",
+ my_progname, __FUNCTION__, crypt->offset);
+ result = XB_CRYPT_READ_ERROR;
+ goto err;
+ }
+ crypt->offset += 8;
+ *ivlen = (size_t)tmp;
+ }
+
+ if (*ivlen > crypt->ivbufsize) {
+ crypt->ivbuffer = my_realloc(crypt->ivbuffer, *ivlen,
+ MYF(MY_WME | MY_ALLOW_ZERO_PTR));
+ if (crypt->ivbuffer == NULL) {
+ msg("%s:%s: failed to increase iv buffer to "
+ "%llu bytes.\n", my_progname, __FUNCTION__,
+ (ulonglong)*ivlen);
+ result = XB_CRYPT_READ_ERROR;
+ goto err;
+ }
+ crypt->ivbufsize = *ivlen;
+ }
+
+ if (*ivlen > 0) {
+ if (crypt->read(crypt->userdata, crypt->ivbuffer, *ivlen)
+ != *ivlen) {
+ msg("%s:%s: failed to read %lld bytes for chunk iv "
+ "at offset 0x%llx.\n", my_progname, __FUNCTION__,
+ (ulonglong)*ivlen, crypt->offset);
+ result = XB_CRYPT_READ_ERROR;
+ goto err;
+ }
+ *iv = crypt->ivbuffer;
+ }
+
+ /* for version euqals 2 we need to read in the iv data but do not init
+ CTR with it */
+ if (version == 2) {
+ *ivlen = 0;
+ *iv = 0;
+ }
+
+ if (*olen > crypt->bufsize) {
+ crypt->buffer = my_realloc(crypt->buffer, *olen,
+ MYF(MY_WME | MY_ALLOW_ZERO_PTR));
+ if (crypt->buffer == NULL) {
+ msg("%s:%s: failed to increase buffer to "
+ "%llu bytes.\n", my_progname, __FUNCTION__,
+ (ulonglong)*olen);
+ result = XB_CRYPT_READ_ERROR;
+ goto err;
+ }
+ crypt->bufsize = *olen;
+ }
+
+ if (*elen > 0) {
+ if (crypt->read(crypt->userdata, crypt->buffer, *elen)
+ != *elen) {
+ msg("%s:%s: failed to read %lld bytes for chunk payload "
+ "at offset 0x%llx.\n", my_progname, __FUNCTION__,
+ (ulonglong)*elen, crypt->offset);
+ result = XB_CRYPT_READ_ERROR;
+ goto err;
+ }
+ }
+
+ checksum = crc32(0, crypt->buffer, *elen);
+ if (checksum != checksum_exp) {
+ msg("%s:%s invalid checksum at offset 0x%llx, "
+ "expected 0x%lx, actual 0x%lx.\n", my_progname, __FUNCTION__,
+ crypt->offset, checksum_exp, checksum);
+ result = XB_CRYPT_READ_ERROR;
+ goto err;
+ }
+
+ crypt->offset += *elen;
+ *buf = crypt->buffer;
+
+ *hash_appended = version > 2;
+
+ goto exit;
+
+err:
+ *buf = NULL;
+ *olen = 0;
+ *elen = 0;
+ *ivlen = 0;
+ *iv = 0;
+exit:
+ return result;
+}
+
+int xb_crypt_read_close(xb_rcrypt_t *crypt)
+{
+ if (crypt->buffer)
+ my_free(crypt->buffer);
+ if (crypt->ivbuffer)
+ my_free(crypt->ivbuffer);
+ my_free(crypt);
+
+ return 0;
+}
+
diff --git a/extra/mariabackup/xbcrypt_write.c b/extra/mariabackup/xbcrypt_write.c
new file mode 100644
index 00000000000..5cbeb67f227
--- /dev/null
+++ b/extra/mariabackup/xbcrypt_write.c
@@ -0,0 +1,104 @@
+/******************************************************
+Copyright (c) 2013 Percona LLC and/or its affiliates.
+
+The xbcrypt format writer implementation.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#include "xbcrypt.h"
+
+struct xb_wcrypt_struct {
+ void *userdata;
+ xb_crypt_write_callback *write;
+};
+
+xb_wcrypt_t *
+xb_crypt_write_open(void *userdata, xb_crypt_write_callback *onwrite)
+{
+ xb_wcrypt_t *crypt;
+
+ xb_ad(onwrite);
+
+ crypt = (xb_wcrypt_t *) my_malloc(sizeof(xb_wcrypt_t), MYF(MY_FAE));
+
+ crypt->userdata = userdata;
+ crypt->write = onwrite;
+
+ return crypt;
+}
+
+int xb_crypt_write_chunk(xb_wcrypt_t *crypt, const void *buf, size_t olen,
+ size_t elen, const void *iv, size_t ivlen)
+{
+ uchar tmpbuf[XB_CRYPT_CHUNK_MAGIC_SIZE + 8 + 8 + 8 + 4 + 8];
+ uchar *ptr;
+ ulong checksum;
+
+ xb_ad(olen <= INT_MAX);
+ if (olen > INT_MAX)
+ return 0;
+
+ xb_ad(elen <= INT_MAX);
+ if (elen > INT_MAX)
+ return 0;
+
+ xb_ad(ivlen <= INT_MAX);
+ if (ivlen > INT_MAX)
+ return 0;
+
+ ptr = tmpbuf;
+
+ memcpy(ptr, XB_CRYPT_CHUNK_MAGIC_CURRENT, XB_CRYPT_CHUNK_MAGIC_SIZE);
+ ptr += XB_CRYPT_CHUNK_MAGIC_SIZE;
+
+ int8store(ptr, (ulonglong)0); /* reserved */
+ ptr += 8;
+
+ int8store(ptr, (ulonglong)olen); /* original size */
+ ptr += 8;
+
+ int8store(ptr, (ulonglong)elen); /* encrypted (actual) size */
+ ptr += 8;
+
+ checksum = crc32(0, buf, elen);
+ int4store(ptr, checksum); /* checksum */
+ ptr += 4;
+
+ int8store(ptr, (ulonglong)ivlen); /* iv size */
+ ptr += 8;
+
+ xb_ad(ptr <= tmpbuf + sizeof(tmpbuf));
+
+ if (crypt->write(crypt->userdata, tmpbuf, ptr-tmpbuf) == -1)
+ return 1;
+
+ if (crypt->write(crypt->userdata, iv, ivlen) == -1)
+ return 1;
+
+ if (crypt->write(crypt->userdata, buf, elen) == -1)
+ return 1;
+
+ return 0;
+}
+
+int xb_crypt_write_close(xb_wcrypt_t *crypt)
+{
+ my_free(crypt);
+
+ return 0;
+}
+
+
diff --git a/extra/mariabackup/xbstream.c b/extra/mariabackup/xbstream.c
new file mode 100644
index 00000000000..ba3412a359b
--- /dev/null
+++ b/extra/mariabackup/xbstream.c
@@ -0,0 +1,456 @@
+/******************************************************
+Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
+
+The xbstream utility: serialize/deserialize files in the XBSTREAM format.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#include <mysql_version.h>
+#include <my_base.h>
+#include <my_getopt.h>
+#include <hash.h>
+#include "common.h"
+#include "xbstream.h"
+#include "ds_local.h"
+#include "ds_stdout.h"
+
+#define XBSTREAM_VERSION "1.0"
+#define XBSTREAM_BUFFER_SIZE (10 * 1024 * 1024UL)
+
+#define START_FILE_HASH_SIZE 16
+
+typedef enum {
+ RUN_MODE_NONE,
+ RUN_MODE_CREATE,
+ RUN_MODE_EXTRACT
+} run_mode_t;
+
+/* Need the following definitions to avoid linking with ds_*.o and their link
+dependencies */
+datasink_t datasink_archive;
+datasink_t datasink_xbstream;
+datasink_t datasink_compress;
+datasink_t datasink_tmpfile;
+datasink_t datasink_encrypt;
+datasink_t datasink_buffer;
+
+static run_mode_t opt_mode;
+static char * opt_directory = NULL;
+static my_bool opt_verbose = 0;
+
+static struct my_option my_long_options[] =
+{
+ {"help", '?', "Display this help and exit.",
+ 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"create", 'c', "Stream the specified files to the standard output.",
+ 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"extract", 'x', "Extract to disk files from the stream on the "
+ "standard input.",
+ 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"directory", 'C', "Change the current directory to the specified one "
+ "before streaming or extracting.", &opt_directory, &opt_directory, 0,
+ GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"verbose", 'v', "Print verbose output.", &opt_verbose, &opt_verbose,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
+};
+
+typedef struct {
+ char *path;
+ uint pathlen;
+ my_off_t offset;
+ ds_ctxt_t *ds_ctxt;
+ ds_file_t *file;
+} file_entry_t;
+
+static int get_options(int *argc, char ***argv);
+static int mode_create(int argc, char **argv);
+static int mode_extract(int argc, char **argv);
+static my_bool get_one_option(int optid, const struct my_option *opt,
+ char *argument);
+
+int
+main(int argc, char **argv)
+{
+ MY_INIT(argv[0]);
+
+ if (get_options(&argc, &argv)) {
+ goto err;
+ }
+
+ if (opt_mode == RUN_MODE_NONE) {
+ msg("%s: either -c or -x must be specified.\n", my_progname);
+ goto err;
+ }
+
+ /* Change the current directory if -C is specified */
+ if (opt_directory && my_setwd(opt_directory, MYF(MY_WME))) {
+ goto err;
+ }
+
+ if (opt_mode == RUN_MODE_CREATE && mode_create(argc, argv)) {
+ goto err;
+ } else if (opt_mode == RUN_MODE_EXTRACT && mode_extract(argc, argv)) {
+ goto err;
+ }
+
+ my_cleanup_options(my_long_options);
+
+ my_end(0);
+
+ return EXIT_SUCCESS;
+err:
+ my_cleanup_options(my_long_options);
+
+ my_end(0);
+
+ exit(EXIT_FAILURE);
+}
+
+static
+int
+get_options(int *argc, char ***argv)
+{
+ int ho_error;
+
+ if ((ho_error= handle_options(argc, argv, my_long_options,
+ get_one_option))) {
+ exit(EXIT_FAILURE);
+ }
+
+ return 0;
+}
+
+static
+void
+print_version(void)
+{
+ printf("%s Ver %s for %s (%s)\n", my_progname, XBSTREAM_VERSION,
+ SYSTEM_TYPE, MACHINE_TYPE);
+}
+
+static
+void
+usage(void)
+{
+ print_version();
+ puts("Copyright (C) 2011-2013 Percona LLC and/or its affiliates.");
+ puts("This software comes with ABSOLUTELY NO WARRANTY. "
+ "This is free software,\nand you are welcome to modify and "
+ "redistribute it under the GPL license.\n");
+
+ puts("Serialize/deserialize files in the XBSTREAM format.\n");
+
+ puts("Usage: ");
+ printf(" %s -c [OPTIONS...] FILES... # stream specified files to "
+ "standard output.\n", my_progname);
+ printf(" %s -x [OPTIONS...] # extract files from the stream"
+ "on the standard input.\n", my_progname);
+
+ puts("\nOptions:");
+ my_print_help(my_long_options);
+}
+
+static
+int
+set_run_mode(run_mode_t mode)
+{
+ if (opt_mode != RUN_MODE_NONE) {
+ msg("%s: can't set specify both -c and -x.\n", my_progname);
+ return 1;
+ }
+
+ opt_mode = mode;
+
+ return 0;
+}
+
+static
+my_bool
+get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
+ char *argument __attribute__((unused)))
+{
+ switch (optid) {
+ case 'c':
+ if (set_run_mode(RUN_MODE_CREATE)) {
+ return TRUE;
+ }
+ break;
+ case 'x':
+ if (set_run_mode(RUN_MODE_EXTRACT)) {
+ return TRUE;
+ }
+ break;
+ case '?':
+ usage();
+ exit(0);
+ }
+
+ return FALSE;
+}
+
+static
+int
+stream_one_file(File file, xb_wstream_file_t *xbfile)
+{
+ uchar *buf;
+ size_t bytes;
+ size_t offset;
+
+ posix_fadvise(file, 0, 0, POSIX_FADV_SEQUENTIAL);
+ offset = my_tell(file, MYF(MY_WME));
+
+ buf = (uchar*)(my_malloc(XBSTREAM_BUFFER_SIZE, MYF(MY_FAE)));
+
+ while ((bytes = my_read(file, buf, XBSTREAM_BUFFER_SIZE,
+ MYF(MY_WME))) > 0) {
+ if (xb_stream_write_data(xbfile, buf, bytes)) {
+ msg("%s: xb_stream_write_data() failed.\n",
+ my_progname);
+ my_free(buf);
+ return 1;
+ }
+ posix_fadvise(file, offset, XBSTREAM_BUFFER_SIZE,
+ POSIX_FADV_DONTNEED);
+ offset += XBSTREAM_BUFFER_SIZE;
+
+ }
+
+ my_free(buf);
+
+ if (bytes == (size_t) -1) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static
+int
+mode_create(int argc, char **argv)
+{
+ int i;
+ MY_STAT mystat;
+ xb_wstream_t *stream;
+
+ if (argc < 1) {
+ msg("%s: no files are specified.\n", my_progname);
+ return 1;
+ }
+
+ stream = xb_stream_write_new();
+ if (stream == NULL) {
+ msg("%s: xb_stream_write_new() failed.\n", my_progname);
+ return 1;
+ }
+
+ for (i = 0; i < argc; i++) {
+ char *filepath = argv[i];
+ File src_file;
+ xb_wstream_file_t *file;
+
+ if (my_stat(filepath, &mystat, MYF(MY_WME)) == NULL) {
+ goto err;
+ }
+ if (!MY_S_ISREG(mystat.st_mode)) {
+ msg("%s: %s is not a regular file, exiting.\n",
+ my_progname, filepath);
+ goto err;
+ }
+
+ if ((src_file = my_open(filepath, O_RDONLY, MYF(MY_WME))) < 0) {
+ msg("%s: failed to open %s.\n", my_progname, filepath);
+ goto err;
+ }
+
+ file = xb_stream_write_open(stream, filepath, &mystat, NULL, NULL);
+ if (file == NULL) {
+ goto err;
+ }
+
+ if (opt_verbose) {
+ msg("%s\n", filepath);
+ }
+
+ if (stream_one_file(src_file, file) ||
+ xb_stream_write_close(file) ||
+ my_close(src_file, MYF(MY_WME))) {
+ goto err;
+ }
+ }
+
+ xb_stream_write_done(stream);
+
+ return 0;
+err:
+ xb_stream_write_done(stream);
+
+ return 1;
+}
+
+static
+file_entry_t *
+file_entry_new(ds_ctxt_t *ds_ctxt, const char *path, uint pathlen)
+{
+ file_entry_t *entry;
+ ds_file_t *file;
+
+ entry = (file_entry_t *) my_malloc(sizeof(file_entry_t),
+ MYF(MY_WME | MY_ZEROFILL));
+ if (entry == NULL) {
+ return NULL;
+ }
+
+ entry->path = my_strndup(path, pathlen, MYF(MY_WME));
+ if (entry->path == NULL) {
+ goto err;
+ }
+ entry->pathlen = pathlen;
+
+ file = ds_open(ds_ctxt, path, NULL);
+ if (file == NULL) {
+ msg("%s: failed to create file.\n", my_progname);
+ goto err;
+ }
+
+ if (opt_verbose) {
+ msg("%s\n", entry->path);
+ }
+
+ entry->file = file;
+ entry->ds_ctxt = ds_ctxt;
+
+ return entry;
+
+err:
+ if (entry->path != NULL) {
+ my_free(entry->path);
+ }
+ my_free(entry);
+
+ return NULL;
+}
+
+static
+uchar *
+get_file_entry_key(file_entry_t *entry, size_t *length,
+ my_bool not_used __attribute__((unused)))
+{
+ *length = entry->pathlen;
+ return (uchar *) entry->path;
+}
+
+static
+void
+file_entry_free(file_entry_t *entry)
+{
+ ds_close(entry->file);
+ my_free(entry->path);
+ my_free(entry);
+}
+
+static
+int
+mode_extract(int argc __attribute__((unused)),
+ char **argv __attribute__((unused)))
+{
+ xb_rstream_t *stream;
+ xb_rstream_result_t res;
+ xb_rstream_chunk_t chunk;
+ HASH filehash;
+ file_entry_t *entry;
+ ds_ctxt_t *ds_ctxt;
+
+ stream = xb_stream_read_new();
+ if (stream == NULL) {
+ msg("%s: xb_stream_read_new() failed.\n", my_progname);
+ return 1;
+ }
+
+ /* If --directory is specified, it is already set as CWD by now. */
+ ds_ctxt = ds_create(".", DS_TYPE_LOCAL);
+
+ if (my_hash_init(&filehash, &my_charset_bin, START_FILE_HASH_SIZE,
+ 0, 0, (my_hash_get_key) get_file_entry_key,
+ (my_hash_free_key) file_entry_free, MYF(0))) {
+ msg("%s: failed to initialize file hash.\n", my_progname);
+ goto err;
+ }
+
+ while ((res = xb_stream_read_chunk(stream, &chunk)) ==
+ XB_STREAM_READ_CHUNK) {
+ /* If unknown type and ignorable flag is set, skip this chunk */
+ if (chunk.type == XB_CHUNK_TYPE_UNKNOWN && \
+ !(chunk.flags & XB_STREAM_FLAG_IGNORABLE)) {
+ continue;
+ }
+
+ /* See if we already have this file open */
+ entry = (file_entry_t *) my_hash_search(&filehash,
+ (uchar *) chunk.path,
+ chunk.pathlen);
+
+ if (entry == NULL) {
+ entry = file_entry_new(ds_ctxt, chunk.path,
+ chunk.pathlen);
+ if (entry == NULL) {
+ goto err;
+ }
+ if (my_hash_insert(&filehash, (uchar *) entry)) {
+ msg("%s: my_hash_insert() failed.\n",
+ my_progname);
+ goto err;
+ }
+ }
+
+ if (chunk.type == XB_CHUNK_TYPE_EOF) {
+ my_hash_delete(&filehash, (uchar *) entry);
+
+ continue;
+ }
+
+ if (entry->offset != chunk.offset) {
+ msg("%s: out-of-order chunk: real offset = 0x%llx, "
+ "expected offset = 0x%llx\n", my_progname,
+ chunk.offset, entry->offset);
+ goto err;
+ }
+
+ if (ds_write(entry->file, chunk.data, chunk.length)) {
+ msg("%s: my_write() failed.\n", my_progname);
+ goto err;
+ }
+
+ entry->offset += chunk.length;
+ };
+
+ if (res == XB_STREAM_READ_ERROR) {
+ goto err;
+ }
+
+ my_hash_free(&filehash);
+ ds_destroy(ds_ctxt);
+ xb_stream_read_done(stream);
+
+ return 0;
+err:
+ my_hash_free(&filehash);
+ ds_destroy(ds_ctxt);
+ xb_stream_read_done(stream);
+
+ return 1;
+}
diff --git a/extra/mariabackup/xbstream.h b/extra/mariabackup/xbstream.h
new file mode 100644
index 00000000000..e9f1468e58d
--- /dev/null
+++ b/extra/mariabackup/xbstream.h
@@ -0,0 +1,103 @@
+/******************************************************
+Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
+
+The xbstream format interface.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#ifndef XBSTREAM_H
+#define XBSTREAM_H
+
+#include <my_base.h>
+
+/* Magic value in a chunk header */
+#define XB_STREAM_CHUNK_MAGIC "XBSTCK01"
+
+/* Chunk flags */
+/* Chunk can be ignored if unknown version/format */
+#define XB_STREAM_FLAG_IGNORABLE 0x01
+
+/* Magic + flags + type + path len */
+#define CHUNK_HEADER_CONSTANT_LEN ((sizeof(XB_STREAM_CHUNK_MAGIC) - 1) + \
+ 1 + 1 + 4)
+#define CHUNK_TYPE_OFFSET (sizeof(XB_STREAM_CHUNK_MAGIC) - 1 + 1)
+#define PATH_LENGTH_OFFSET (sizeof(XB_STREAM_CHUNK_MAGIC) - 1 + 1 + 1)
+
+typedef struct xb_wstream_struct xb_wstream_t;
+
+typedef struct xb_wstream_file_struct xb_wstream_file_t;
+
+typedef enum {
+ XB_STREAM_FMT_NONE,
+ XB_STREAM_FMT_TAR,
+ XB_STREAM_FMT_XBSTREAM
+} xb_stream_fmt_t;
+
+/************************************************************************
+Write interface. */
+
+typedef ssize_t xb_stream_write_callback(xb_wstream_file_t *file,
+ void *userdata,
+ const void *buf, size_t len);
+
+xb_wstream_t *xb_stream_write_new(void);
+
+xb_wstream_file_t *xb_stream_write_open(xb_wstream_t *stream, const char *path,
+ MY_STAT *mystat, void *userdata,
+ xb_stream_write_callback *onwrite);
+
+int xb_stream_write_data(xb_wstream_file_t *file, const void *buf, size_t len);
+
+int xb_stream_write_close(xb_wstream_file_t *file);
+
+int xb_stream_write_done(xb_wstream_t *stream);
+
+/************************************************************************
+Read interface. */
+
+typedef enum {
+ XB_STREAM_READ_CHUNK,
+ XB_STREAM_READ_EOF,
+ XB_STREAM_READ_ERROR
+} xb_rstream_result_t;
+
+typedef enum {
+ XB_CHUNK_TYPE_UNKNOWN = '\0',
+ XB_CHUNK_TYPE_PAYLOAD = 'P',
+ XB_CHUNK_TYPE_EOF = 'E'
+} xb_chunk_type_t;
+
+typedef struct xb_rstream_struct xb_rstream_t;
+
+typedef struct {
+ uchar flags;
+ xb_chunk_type_t type;
+ uint pathlen;
+ char path[FN_REFLEN];
+ size_t length;
+ my_off_t offset;
+ void *data;
+ ulong checksum;
+} xb_rstream_chunk_t;
+
+xb_rstream_t *xb_stream_read_new(void);
+
+xb_rstream_result_t xb_stream_read_chunk(xb_rstream_t *stream,
+ xb_rstream_chunk_t *chunk);
+
+int xb_stream_read_done(xb_rstream_t *stream);
+
+#endif
diff --git a/extra/mariabackup/xbstream_read.c b/extra/mariabackup/xbstream_read.c
new file mode 100644
index 00000000000..0ffcabd9270
--- /dev/null
+++ b/extra/mariabackup/xbstream_read.c
@@ -0,0 +1,227 @@
+/******************************************************
+Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
+
+The xbstream format reader implementation.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#include <mysql_version.h>
+#include <my_base.h>
+#include <zlib.h>
+#include "common.h"
+#include "xbstream.h"
+
+/* Allocate 1 MB for the payload buffer initially */
+#define INIT_BUFFER_LEN (1024 * 1024)
+
+#ifndef MY_OFF_T_MAX
+#define MY_OFF_T_MAX (~(my_off_t)0UL)
+#endif
+
+struct xb_rstream_struct {
+ my_off_t offset;
+ File fd;
+ void *buffer;
+ size_t buflen;
+};
+
+xb_rstream_t *
+xb_stream_read_new(void)
+{
+ xb_rstream_t *stream;
+
+ stream = (xb_rstream_t *) my_malloc(sizeof(xb_rstream_t), MYF(MY_FAE));
+
+ stream->buffer = my_malloc(INIT_BUFFER_LEN, MYF(MY_FAE));
+ stream->buflen = INIT_BUFFER_LEN;
+
+ stream->fd = fileno(stdin);
+ stream->offset = 0;
+
+#ifdef __WIN__
+ setmode(stream->fd, _O_BINARY);
+#endif
+
+ return stream;
+}
+
+static inline
+xb_chunk_type_t
+validate_chunk_type(uchar code)
+{
+ switch ((xb_chunk_type_t) code) {
+ case XB_CHUNK_TYPE_PAYLOAD:
+ case XB_CHUNK_TYPE_EOF:
+ return (xb_chunk_type_t) code;
+ default:
+ return XB_CHUNK_TYPE_UNKNOWN;
+ }
+}
+
+#define F_READ(buf,len) \
+ do { \
+ if (xb_read_full(fd, buf, len) < len) { \
+ msg("xb_stream_read_chunk(): my_read() failed.\n"); \
+ goto err; \
+ } \
+ } while (0)
+
+xb_rstream_result_t
+xb_stream_read_chunk(xb_rstream_t *stream, xb_rstream_chunk_t *chunk)
+{
+ uchar tmpbuf[16];
+ uchar *ptr = tmpbuf;
+ uint pathlen;
+ size_t tbytes;
+ ulonglong ullval;
+ ulong checksum_exp;
+ ulong checksum;
+ File fd = stream->fd;
+
+ xb_ad(sizeof(tmpbuf) >= CHUNK_HEADER_CONSTANT_LEN);
+
+ /* This is the only place where we expect EOF, so read with
+ xb_read_full() rather than F_READ() */
+ tbytes = xb_read_full(fd, ptr, CHUNK_HEADER_CONSTANT_LEN);
+ if (tbytes == 0) {
+ return XB_STREAM_READ_EOF;
+ } else if (tbytes < CHUNK_HEADER_CONSTANT_LEN) {
+ msg("xb_stream_read_chunk(): unexpected end of stream at "
+ "offset 0x%llx.\n", stream->offset);
+ goto err;
+ }
+
+ ptr = tmpbuf;
+
+ /* Chunk magic value */
+ if (memcmp(tmpbuf, XB_STREAM_CHUNK_MAGIC, 8)) {
+ msg("xb_stream_read_chunk(): wrong chunk magic at offset "
+ "0x%llx.\n", (ulonglong) stream->offset);
+ goto err;
+ }
+ ptr += 8;
+ stream->offset += 8;
+
+ /* Chunk flags */
+ chunk->flags = *ptr++;
+ stream->offset++;
+
+ /* Chunk type, ignore unknown ones if ignorable flag is set */
+ chunk->type = validate_chunk_type(*ptr);
+ if (chunk->type == XB_CHUNK_TYPE_UNKNOWN &&
+ !(chunk->flags & XB_STREAM_FLAG_IGNORABLE)) {
+ msg("xb_stream_read_chunk(): unknown chunk type 0x%lu at "
+ "offset 0x%llx.\n", (ulong) *ptr,
+ (ulonglong) stream->offset);
+ goto err;
+ }
+ ptr++;
+ stream->offset++;
+
+ /* Path length */
+ pathlen = uint4korr(ptr);
+ if (pathlen >= FN_REFLEN) {
+ msg("xb_stream_read_chunk(): path length (%lu) is too large at "
+ "offset 0x%llx.\n", (ulong) pathlen, stream->offset);
+ goto err;
+ }
+ chunk->pathlen = pathlen;
+ stream->offset +=4;
+
+ xb_ad((ptr + 4 - tmpbuf) == CHUNK_HEADER_CONSTANT_LEN);
+
+ /* Path */
+ if (chunk->pathlen > 0) {
+ F_READ((uchar *) chunk->path, pathlen);
+ stream->offset += pathlen;
+ }
+ chunk->path[pathlen] = '\0';
+
+ if (chunk->type == XB_CHUNK_TYPE_EOF) {
+ return XB_STREAM_READ_CHUNK;
+ }
+
+ /* Payload length */
+ F_READ(tmpbuf, 16);
+ ullval = uint8korr(tmpbuf);
+ if (ullval > (ulonglong) SIZE_T_MAX) {
+ msg("xb_stream_read_chunk(): chunk length is too large at "
+ "offset 0x%llx: 0x%llx.\n", (ulonglong) stream->offset,
+ ullval);
+ goto err;
+ }
+ chunk->length = (size_t) ullval;
+ stream->offset += 8;
+
+ /* Payload offset */
+ ullval = uint8korr(tmpbuf + 8);
+ if (ullval > (ulonglong) MY_OFF_T_MAX) {
+ msg("xb_stream_read_chunk(): chunk offset is too large at "
+ "offset 0x%llx: 0x%llx.\n", (ulonglong) stream->offset,
+ ullval);
+ goto err;
+ }
+ chunk->offset = (my_off_t) ullval;
+ stream->offset += 8;
+
+ /* Reallocate the buffer if needed */
+ if (chunk->length > stream->buflen) {
+ stream->buffer = my_realloc(stream->buffer, chunk->length,
+ MYF(MY_WME));
+ if (stream->buffer == NULL) {
+ msg("xb_stream_read_chunk(): failed to increase buffer "
+ "to %lu bytes.\n", (ulong) chunk->length);
+ goto err;
+ }
+ stream->buflen = chunk->length;
+ }
+
+ /* Checksum */
+ F_READ(tmpbuf, 4);
+ checksum_exp = uint4korr(tmpbuf);
+
+ /* Payload */
+ if (chunk->length > 0) {
+ F_READ(stream->buffer, chunk->length);
+ stream->offset += chunk->length;
+ }
+
+ checksum = crc32(0, stream->buffer, chunk->length);
+ if (checksum != checksum_exp) {
+ msg("xb_stream_read_chunk(): invalid checksum at offset "
+ "0x%llx: expected 0x%lx, read 0x%lx.\n",
+ (ulonglong) stream->offset, checksum_exp, checksum);
+ goto err;
+ }
+ stream->offset += 4;
+
+ chunk->data = stream->buffer;
+ chunk->checksum = checksum;
+
+ return XB_STREAM_READ_CHUNK;
+
+err:
+ return XB_STREAM_READ_ERROR;
+}
+
+int
+xb_stream_read_done(xb_rstream_t *stream)
+{
+ my_free(stream->buffer);
+ my_free(stream);
+
+ return 0;
+}
diff --git a/extra/mariabackup/xbstream_write.c b/extra/mariabackup/xbstream_write.c
new file mode 100644
index 00000000000..7b042eea49c
--- /dev/null
+++ b/extra/mariabackup/xbstream_write.c
@@ -0,0 +1,280 @@
+/******************************************************
+Copyright (c) 2011-2013 Percona LLC and/or its affiliates.
+
+The xbstream format writer implementation.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#include <mysql_version.h>
+#include <my_base.h>
+#include <zlib.h>
+#include "common.h"
+#include "xbstream.h"
+
+/* Group writes smaller than this into a single chunk */
+#define XB_STREAM_MIN_CHUNK_SIZE (10 * 1024 * 1024)
+
+struct xb_wstream_struct {
+ pthread_mutex_t mutex;
+};
+
+struct xb_wstream_file_struct {
+ xb_wstream_t *stream;
+ char *path;
+ ulong path_len;
+ char chunk[XB_STREAM_MIN_CHUNK_SIZE];
+ char *chunk_ptr;
+ size_t chunk_free;
+ my_off_t offset;
+ void *userdata;
+ xb_stream_write_callback *write;
+};
+
+static int xb_stream_flush(xb_wstream_file_t *file);
+static int xb_stream_write_chunk(xb_wstream_file_t *file,
+ const void *buf, size_t len);
+static int xb_stream_write_eof(xb_wstream_file_t *file);
+
+static
+ssize_t
+xb_stream_default_write_callback(xb_wstream_file_t *file __attribute__((unused)),
+ void *userdata __attribute__((unused)),
+ const void *buf, size_t len)
+{
+ if (my_write(fileno(stdout), buf, len, MYF(MY_WME | MY_NABP)))
+ return -1;
+ return len;
+}
+
+xb_wstream_t *
+xb_stream_write_new(void)
+{
+ xb_wstream_t *stream;
+
+ stream = (xb_wstream_t *) my_malloc(sizeof(xb_wstream_t), MYF(MY_FAE));
+ pthread_mutex_init(&stream->mutex, NULL);
+
+ return stream;;
+}
+
+xb_wstream_file_t *
+xb_stream_write_open(xb_wstream_t *stream, const char *path,
+ MY_STAT *mystat __attribute__((unused)),
+ void *userdata,
+ xb_stream_write_callback *onwrite)
+{
+ xb_wstream_file_t *file;
+ ulong path_len;
+
+ path_len = strlen(path);
+
+ if (path_len > FN_REFLEN) {
+ msg("xb_stream_write_open(): file path is too long.\n");
+ return NULL;
+ }
+
+ file = (xb_wstream_file_t *) my_malloc(sizeof(xb_wstream_file_t) +
+ path_len + 1, MYF(MY_FAE));
+
+ file->path = (char *) (file + 1);
+ memcpy(file->path, path, path_len + 1);
+ file->path_len = path_len;
+
+ file->stream = stream;
+ file->offset = 0;
+ file->chunk_ptr = file->chunk;
+ file->chunk_free = XB_STREAM_MIN_CHUNK_SIZE;
+ if (onwrite) {
+#ifdef __WIN__
+ setmode(fileno(stdout), _O_BINARY);
+#endif
+ file->userdata = userdata;
+ file->write = onwrite;
+ } else {
+ file->userdata = NULL;
+ file->write = xb_stream_default_write_callback;
+ }
+
+ return file;
+}
+
+int
+xb_stream_write_data(xb_wstream_file_t *file, const void *buf, size_t len)
+{
+ if (len < file->chunk_free) {
+ memcpy(file->chunk_ptr, buf, len);
+ file->chunk_ptr += len;
+ file->chunk_free -= len;
+
+ return 0;
+ }
+
+ if (xb_stream_flush(file))
+ return 1;
+
+ return xb_stream_write_chunk(file, buf, len);
+}
+
+int
+xb_stream_write_close(xb_wstream_file_t *file)
+{
+ if (xb_stream_flush(file) ||
+ xb_stream_write_eof(file)) {
+ my_free(file);
+ return 1;
+ }
+
+ my_free(file);
+
+ return 0;
+}
+
+int
+xb_stream_write_done(xb_wstream_t *stream)
+{
+ pthread_mutex_destroy(&stream->mutex);
+
+ my_free(stream);
+
+ return 0;
+}
+
+static
+int
+xb_stream_flush(xb_wstream_file_t *file)
+{
+ if (file->chunk_ptr == file->chunk) {
+ return 0;
+ }
+
+ if (xb_stream_write_chunk(file, file->chunk,
+ file->chunk_ptr - file->chunk)) {
+ return 1;
+ }
+
+ file->chunk_ptr = file->chunk;
+ file->chunk_free = XB_STREAM_MIN_CHUNK_SIZE;
+
+ return 0;
+}
+
+static
+int
+xb_stream_write_chunk(xb_wstream_file_t *file, const void *buf, size_t len)
+{
+ /* Chunk magic + flags + chunk type + path_len + path + len + offset +
+ checksum */
+ uchar tmpbuf[sizeof(XB_STREAM_CHUNK_MAGIC) - 1 + 1 + 1 + 4 +
+ FN_REFLEN + 8 + 8 + 4];
+ uchar *ptr;
+ xb_wstream_t *stream = file->stream;
+ ulong checksum;
+
+ /* Write xbstream header */
+ ptr = tmpbuf;
+
+ /* Chunk magic */
+ memcpy(ptr, XB_STREAM_CHUNK_MAGIC, sizeof(XB_STREAM_CHUNK_MAGIC) - 1);
+ ptr += sizeof(XB_STREAM_CHUNK_MAGIC) - 1;
+
+ *ptr++ = 0; /* Chunk flags */
+
+ *ptr++ = (uchar) XB_CHUNK_TYPE_PAYLOAD; /* Chunk type */
+
+ int4store(ptr, file->path_len); /* Path length */
+ ptr += 4;
+
+ memcpy(ptr, file->path, file->path_len); /* Path */
+ ptr += file->path_len;
+
+ int8store(ptr, len); /* Payload length */
+ ptr += 8;
+
+ pthread_mutex_lock(&stream->mutex);
+
+ int8store(ptr, file->offset); /* Payload offset */
+ ptr += 8;
+
+ checksum = crc32(0, buf, len); /* checksum */
+ int4store(ptr, checksum);
+ ptr += 4;
+
+ xb_ad(ptr <= tmpbuf + sizeof(tmpbuf));
+
+ if (file->write(file, file->userdata, tmpbuf, ptr-tmpbuf) == -1)
+ goto err;
+
+
+ if (file->write(file, file->userdata, buf, len) == -1) /* Payload */
+ goto err;
+
+ file->offset+= len;
+
+ pthread_mutex_unlock(&stream->mutex);
+
+ return 0;
+
+err:
+
+ pthread_mutex_unlock(&stream->mutex);
+
+ return 1;
+}
+
+static
+int
+xb_stream_write_eof(xb_wstream_file_t *file)
+{
+ /* Chunk magic + flags + chunk type + path_len + path */
+ uchar tmpbuf[sizeof(XB_STREAM_CHUNK_MAGIC) - 1 + 1 + 1 + 4 +
+ FN_REFLEN];
+ uchar *ptr;
+ xb_wstream_t *stream = file->stream;
+
+ pthread_mutex_lock(&stream->mutex);
+
+ /* Write xbstream header */
+ ptr = tmpbuf;
+
+ /* Chunk magic */
+ memcpy(ptr, XB_STREAM_CHUNK_MAGIC, sizeof(XB_STREAM_CHUNK_MAGIC) - 1);
+ ptr += sizeof(XB_STREAM_CHUNK_MAGIC) - 1;
+
+ *ptr++ = 0; /* Chunk flags */
+
+ *ptr++ = (uchar) XB_CHUNK_TYPE_EOF; /* Chunk type */
+
+ int4store(ptr, file->path_len); /* Path length */
+ ptr += 4;
+
+ memcpy(ptr, file->path, file->path_len); /* Path */
+ ptr += file->path_len;
+
+ xb_ad(ptr <= tmpbuf + sizeof(tmpbuf));
+
+ if (file->write(file, file->userdata, tmpbuf,
+ (ulonglong) (ptr - tmpbuf)) == -1)
+ goto err;
+
+ pthread_mutex_unlock(&stream->mutex);
+
+ return 0;
+err:
+
+ pthread_mutex_unlock(&stream->mutex);
+
+ return 1;
+}
diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc
new file mode 100644
index 00000000000..d24c915bb48
--- /dev/null
+++ b/extra/mariabackup/xtrabackup.cc
@@ -0,0 +1,7220 @@
+/******************************************************
+XtraBackup: hot backup tool for InnoDB
+(c) 2009-2015 Percona LLC and/or its affiliates
+Originally Created 3/3/2009 Yasufumi Kinoshita
+Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
+Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************
+
+This file incorporates work covered by the following copyright and
+permission notice:
+
+Copyright (c) 2000, 2011, MySQL AB & Innobase Oy. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+Place, Suite 330, Boston, MA 02111-1307 USA
+
+*******************************************************/
+
+//#define XTRABACKUP_TARGET_IS_PLUGIN
+
+#include <mysql_version.h>
+#include <my_base.h>
+#include <my_getopt.h>
+#include <mysql_com.h>
+#include <my_default.h>
+#include <mysqld.h>
+
+#include <fcntl.h>
+#include <string.h>
+
+#ifdef __linux__
+# include <sys/prctl.h>
+#endif
+
+#include <sys/resource.h>
+
+#include <btr0sea.h>
+#include <dict0priv.h>
+#include <dict0stats.h>
+#include <lock0lock.h>
+#include <log0recv.h>
+#include <row0mysql.h>
+#include <row0quiesce.h>
+#include <srv0start.h>
+#include <buf0dblwr.h>
+
+#include <sstream>
+#include <set>
+#include <mysql.h>
+
+#define G_PTR uchar*
+
+#include "common.h"
+#include "xtrabackup_version.h"
+#include "datasink.h"
+
+#include "xb_regex.h"
+#include "fil_cur.h"
+#include "write_filt.h"
+#include "xtrabackup.h"
+#include "ds_buffer.h"
+#include "ds_tmpfile.h"
+#include "xbstream.h"
+#include "changed_page_bitmap.h"
+#include "read_filt.h"
+#include "wsrep.h"
+#include "innobackupex.h"
+#include "backup_mysql.h"
+#include "backup_copy.h"
+#include "backup_mysql.h"
+
+/* TODO: replace with appropriate macros used in InnoDB 5.6 */
+#define PAGE_ZIP_MIN_SIZE_SHIFT 10
+#define DICT_TF_ZSSIZE_SHIFT 1
+#define DICT_TF_FORMAT_ZIP 1
+#define DICT_TF_FORMAT_SHIFT 5
+
+int sys_var_init();
+
+my_bool innodb_inited= 0;
+
+/* === xtrabackup specific options === */
+char xtrabackup_real_target_dir[FN_REFLEN] = "./xtrabackup_backupfiles/";
+char *xtrabackup_target_dir= xtrabackup_real_target_dir;
+my_bool xtrabackup_version = FALSE;
+my_bool xtrabackup_backup = FALSE;
+my_bool xtrabackup_stats = FALSE;
+my_bool xtrabackup_prepare = FALSE;
+my_bool xtrabackup_copy_back = FALSE;
+my_bool xtrabackup_move_back = FALSE;
+my_bool xtrabackup_decrypt_decompress = FALSE;
+my_bool xtrabackup_print_param = FALSE;
+
+my_bool xtrabackup_export = FALSE;
+my_bool xtrabackup_apply_log_only = FALSE;
+
+longlong xtrabackup_use_memory = 100*1024*1024L;
+my_bool xtrabackup_create_ib_logfile = FALSE;
+
+long xtrabackup_throttle = 0; /* 0:unlimited */
+lint io_ticket;
+os_event_t wait_throttle = NULL;
+os_event_t log_copying_stop = NULL;
+
+char *xtrabackup_incremental = NULL;
+lsn_t incremental_lsn;
+lsn_t incremental_to_lsn;
+lsn_t incremental_last_lsn;
+xb_page_bitmap *changed_page_bitmap = NULL;
+
+char *xtrabackup_incremental_basedir = NULL; /* for --backup */
+char *xtrabackup_extra_lsndir = NULL; /* for --backup with --extra-lsndir */
+char *xtrabackup_incremental_dir = NULL; /* for --prepare */
+
+char xtrabackup_real_incremental_basedir[FN_REFLEN];
+char xtrabackup_real_extra_lsndir[FN_REFLEN];
+char xtrabackup_real_incremental_dir[FN_REFLEN];
+
+lsn_t xtrabackup_archived_to_lsn = 0; /* for --archived-to-lsn */
+
+char *xtrabackup_tables = NULL;
+
+/* List of regular expressions for filtering */
+typedef struct xb_regex_list_node_struct xb_regex_list_node_t;
+struct xb_regex_list_node_struct {
+ UT_LIST_NODE_T(xb_regex_list_node_t) regex_list;
+ xb_regex_t regex;
+};
+static UT_LIST_BASE_NODE_T(xb_regex_list_node_t) regex_list;
+
+static xb_regmatch_t tables_regmatch[1];
+
+char *xtrabackup_tables_file = NULL;
+static hash_table_t* tables_hash = NULL;
+
+char *xtrabackup_databases = NULL;
+char *xtrabackup_databases_file = NULL;
+static hash_table_t* databases_hash = NULL;
+
+static hash_table_t* inc_dir_tables_hash;
+
+struct xb_filter_entry_struct{
+ char* name;
+ ibool has_tables;
+ hash_node_t name_hash;
+};
+typedef struct xb_filter_entry_struct xb_filter_entry_t;
+
+static ulint thread_nr[SRV_MAX_N_IO_THREADS + 6];
+static os_thread_id_t thread_ids[SRV_MAX_N_IO_THREADS + 6];
+
+lsn_t checkpoint_lsn_start;
+lsn_t checkpoint_no_start;
+lsn_t log_copy_scanned_lsn;
+ibool log_copying = TRUE;
+ibool log_copying_running = FALSE;
+ibool io_watching_thread_running = FALSE;
+
+ibool xtrabackup_logfile_is_renamed = FALSE;
+
+int xtrabackup_parallel;
+
+char *xtrabackup_stream_str = NULL;
+xb_stream_fmt_t xtrabackup_stream_fmt = XB_STREAM_FMT_NONE;
+ibool xtrabackup_stream = FALSE;
+
+const char *xtrabackup_compress_alg = NULL;
+ibool xtrabackup_compress = FALSE;
+uint xtrabackup_compress_threads;
+ulonglong xtrabackup_compress_chunk_size = 0;
+
+const char *xtrabackup_encrypt_algo_names[] =
+{ "NONE", "AES128", "AES192", "AES256", NullS};
+TYPELIB xtrabackup_encrypt_algo_typelib=
+{array_elements(xtrabackup_encrypt_algo_names)-1,"",
+ xtrabackup_encrypt_algo_names, NULL};
+
+ibool xtrabackup_encrypt = FALSE;
+ulong xtrabackup_encrypt_algo;
+char *xtrabackup_encrypt_key = NULL;
+char *xtrabackup_encrypt_key_file = NULL;
+uint xtrabackup_encrypt_threads;
+ulonglong xtrabackup_encrypt_chunk_size = 0;
+
+ulint xtrabackup_rebuild_threads = 1;
+
+/* sleep interval beetween log copy iterations in log copying thread
+in milliseconds (default is 1 second) */
+ulint xtrabackup_log_copy_interval = 1000;
+
+/* Ignored option (--log) for MySQL option compatibility */
+char* log_ignored_opt = NULL;
+
+/* === metadata of backup === */
+#define XTRABACKUP_METADATA_FILENAME "xtrabackup_checkpoints"
+char metadata_type[30] = ""; /*[full-backuped|log-applied|
+ full-prepared|incremental]*/
+lsn_t metadata_from_lsn = 0;
+lsn_t metadata_to_lsn = 0;
+lsn_t metadata_last_lsn = 0;
+
+#define XB_LOG_FILENAME "xtrabackup_logfile"
+
+ds_file_t *dst_log_file = NULL;
+
+static char mysql_data_home_buff[2];
+
+const char *defaults_group = "mysqld";
+
+/* === static parameters in ha_innodb.cc */
+
+#define HA_INNOBASE_ROWS_IN_TABLE 10000 /* to get optimization right */
+#define HA_INNOBASE_RANGE_COUNT 100
+
+ulong innobase_large_page_size = 0;
+
+/* The default values for the following, type long or longlong, start-up
+parameters are declared in mysqld.cc: */
+
+long innobase_additional_mem_pool_size = 1*1024*1024L;
+long innobase_buffer_pool_awe_mem_mb = 0;
+long innobase_file_io_threads = 4;
+long innobase_read_io_threads = 4;
+long innobase_write_io_threads = 4;
+long innobase_force_recovery = 0;
+long innobase_log_buffer_size = 1024*1024L;
+long innobase_log_files_in_group = 2;
+long innobase_open_files = 300L;
+
+longlong innobase_page_size = (1LL << 14); /* 16KB */
+static ulong innobase_log_block_size = 512;
+my_bool innobase_fast_checksum = FALSE;
+char* innobase_doublewrite_file = NULL;
+char* innobase_buffer_pool_filename = NULL;
+
+longlong innobase_buffer_pool_size = 8*1024*1024L;
+longlong innobase_log_file_size = 48*1024*1024L;
+
+/* The default values for the following char* start-up parameters
+are determined in innobase_init below: */
+
+char* innobase_ignored_opt = NULL;
+char* innobase_data_home_dir = NULL;
+char* innobase_data_file_path = NULL;
+char* innobase_log_arch_dir = NULL;/* unused */
+/* The following has a misleading name: starting from 4.0.5, this also
+affects Windows: */
+char* innobase_unix_file_flush_method = NULL;
+
+/* Below we have boolean-valued start-up parameters, and their default
+values */
+
+ulong innobase_fast_shutdown = 1;
+my_bool innobase_log_archive = FALSE;/* unused */
+my_bool innobase_use_doublewrite = TRUE;
+my_bool innobase_use_checksums = TRUE;
+my_bool innobase_use_large_pages = FALSE;
+my_bool innobase_file_per_table = FALSE;
+my_bool innobase_locks_unsafe_for_binlog = FALSE;
+my_bool innobase_rollback_on_timeout = FALSE;
+my_bool innobase_create_status_file = FALSE;
+my_bool innobase_adaptive_hash_index = TRUE;
+
+static char *internal_innobase_data_file_path = NULL;
+
+/* The following counter is used to convey information to InnoDB
+about server activity: in selects it is not sensible to call
+srv_active_wake_master_thread after each fetch or search, we only do
+it every INNOBASE_WAKE_INTERVAL'th step. */
+
+#define INNOBASE_WAKE_INTERVAL 32
+ulong innobase_active_counter = 0;
+
+ibool srv_compact_backup = FALSE;
+ibool srv_rebuild_indexes = FALSE;
+
+static char *xtrabackup_debug_sync = NULL;
+
+my_bool xtrabackup_compact = FALSE;
+my_bool xtrabackup_rebuild_indexes = FALSE;
+
+my_bool xtrabackup_incremental_force_scan = FALSE;
+
+/* The flushed lsn which is read from data files */
+lsn_t min_flushed_lsn= 0;
+lsn_t max_flushed_lsn= 0;
+
+/* The size of archived log file */
+size_t xtrabackup_arch_file_size = 0ULL;
+/* The minimal LSN of found archived log files */
+lsn_t xtrabackup_arch_first_file_lsn = 0ULL;
+/* The maximum LSN of found archived log files */
+lsn_t xtrabackup_arch_last_file_lsn = 0ULL;
+
+ulong xb_open_files_limit= 0;
+my_bool xb_close_files= FALSE;
+
+/* Datasinks */
+ds_ctxt_t *ds_data = NULL;
+ds_ctxt_t *ds_meta = NULL;
+ds_ctxt_t *ds_redo = NULL;
+
+static bool innobackupex_mode = false;
+
+static long innobase_log_files_in_group_save;
+static char *srv_log_group_home_dir_save;
+static longlong innobase_log_file_size_save;
+
+/* String buffer used by --print-param to accumulate server options as they are
+parsed from the defaults file */
+static std::ostringstream print_param_str;
+
+/* Set of specified parameters */
+std::set<std::string> param_set;
+
+static ulonglong global_max_value;
+
+extern "C" sig_handler handle_fatal_signal(int sig);
+
+my_bool opt_galera_info = FALSE;
+my_bool opt_slave_info = FALSE;
+my_bool opt_no_lock = FALSE;
+my_bool opt_safe_slave_backup = FALSE;
+my_bool opt_rsync = FALSE;
+my_bool opt_force_non_empty_dirs = FALSE;
+my_bool opt_noversioncheck = FALSE;
+my_bool opt_no_backup_locks = FALSE;
+my_bool opt_decompress = FALSE;
+my_bool opt_remove_original = FALSE;
+
+static const char *binlog_info_values[] = {"off", "lockless", "on", "auto",
+ NullS};
+static TYPELIB binlog_info_typelib = {array_elements(binlog_info_values)-1, "",
+ binlog_info_values, NULL};
+ulong opt_binlog_info;
+
+char *opt_incremental_history_name = NULL;
+char *opt_incremental_history_uuid = NULL;
+
+char *opt_user = NULL;
+char *opt_password = NULL;
+char *opt_host = NULL;
+char *opt_defaults_group = NULL;
+char *opt_socket = NULL;
+uint opt_port = 0;
+char *opt_login_path = NULL;
+char *opt_log_bin = NULL;
+
+const char *query_type_names[] = { "ALL", "UPDATE", "SELECT", NullS};
+
+TYPELIB query_type_typelib= {array_elements(query_type_names) - 1, "",
+ query_type_names, NULL};
+
+ulong opt_lock_wait_query_type;
+ulong opt_kill_long_query_type;
+
+ulong opt_decrypt_algo = 0;
+
+uint opt_kill_long_queries_timeout = 0;
+uint opt_lock_wait_timeout = 0;
+uint opt_lock_wait_threshold = 0;
+uint opt_debug_sleep_before_unlock = 0;
+uint opt_safe_slave_backup_timeout = 0;
+
+const char *opt_history = NULL;
+my_bool opt_decrypt = FALSE;
+
+#if defined(HAVE_OPENSSL)
+my_bool opt_ssl_verify_server_cert = FALSE;
+#if !defined(HAVE_YASSL)
+char *opt_server_public_key = NULL;
+#endif
+#endif
+
+/* Whether xtrabackup_binlog_info should be created on recovery */
+static bool recover_binlog_info;
+
+/* Simple datasink creation tracking...add datasinks in the reverse order you
+want them destroyed. */
+#define XTRABACKUP_MAX_DATASINKS 10
+static ds_ctxt_t *datasinks[XTRABACKUP_MAX_DATASINKS];
+static uint actual_datasinks = 0;
+static inline
+void
+xtrabackup_add_datasink(ds_ctxt_t *ds)
+{
+ xb_ad(actual_datasinks < XTRABACKUP_MAX_DATASINKS);
+ datasinks[actual_datasinks] = ds; actual_datasinks++;
+}
+
+/* ======== Datafiles iterator ======== */
+datafiles_iter_t *
+datafiles_iter_new(fil_system_t *f_system)
+{
+ datafiles_iter_t *it;
+
+ it = static_cast<datafiles_iter_t *>
+ (ut_malloc(sizeof(datafiles_iter_t)));
+ it->mutex = os_mutex_create();
+
+ it->system = f_system;
+ it->space = NULL;
+ it->node = NULL;
+ it->started = FALSE;
+
+ return it;
+}
+
+fil_node_t *
+datafiles_iter_next(datafiles_iter_t *it)
+{
+ fil_node_t *new_node;
+
+ os_mutex_enter(it->mutex);
+
+ if (it->node == NULL) {
+ if (it->started)
+ goto end;
+ it->started = TRUE;
+ } else {
+ it->node = UT_LIST_GET_NEXT(chain, it->node);
+ if (it->node != NULL)
+ goto end;
+ }
+
+ it->space = (it->space == NULL) ?
+ UT_LIST_GET_FIRST(it->system->space_list) :
+ UT_LIST_GET_NEXT(space_list, it->space);
+
+ while (it->space != NULL &&
+ (it->space->purpose != FIL_TABLESPACE ||
+ UT_LIST_GET_LEN(it->space->chain) == 0))
+ it->space = UT_LIST_GET_NEXT(space_list, it->space);
+ if (it->space == NULL)
+ goto end;
+
+ it->node = UT_LIST_GET_FIRST(it->space->chain);
+
+end:
+ new_node = it->node;
+ os_mutex_exit(it->mutex);
+
+ return new_node;
+}
+
+void
+datafiles_iter_free(datafiles_iter_t *it)
+{
+ os_mutex_free(it->mutex);
+ ut_free(it);
+}
+
+/* ======== Date copying thread context ======== */
+
+typedef struct {
+ datafiles_iter_t *it;
+ uint num;
+ uint *count;
+ os_ib_mutex_t count_mutex;
+ os_thread_id_t id;
+} data_thread_ctxt_t;
+
+/* ======== for option and variables ======== */
+
+enum options_xtrabackup
+{
+ OPT_XTRA_TARGET_DIR = 1000, /* make sure it is larger
+ than OPT_MAX_CLIENT_OPTION */
+ OPT_XTRA_BACKUP,
+ OPT_XTRA_STATS,
+ OPT_XTRA_PREPARE,
+ OPT_XTRA_EXPORT,
+ OPT_XTRA_APPLY_LOG_ONLY,
+ OPT_XTRA_PRINT_PARAM,
+ OPT_XTRA_USE_MEMORY,
+ OPT_XTRA_THROTTLE,
+ OPT_XTRA_LOG_COPY_INTERVAL,
+ OPT_XTRA_INCREMENTAL,
+ OPT_XTRA_INCREMENTAL_BASEDIR,
+ OPT_XTRA_EXTRA_LSNDIR,
+ OPT_XTRA_INCREMENTAL_DIR,
+ OPT_XTRA_ARCHIVED_TO_LSN,
+ OPT_XTRA_TABLES,
+ OPT_XTRA_TABLES_FILE,
+ OPT_XTRA_DATABASES,
+ OPT_XTRA_DATABASES_FILE,
+ OPT_XTRA_CREATE_IB_LOGFILE,
+ OPT_XTRA_PARALLEL,
+ OPT_XTRA_STREAM,
+ OPT_XTRA_COMPRESS,
+ OPT_XTRA_COMPRESS_THREADS,
+ OPT_XTRA_COMPRESS_CHUNK_SIZE,
+ OPT_XTRA_ENCRYPT,
+ OPT_XTRA_ENCRYPT_KEY,
+ OPT_XTRA_ENCRYPT_KEY_FILE,
+ OPT_XTRA_ENCRYPT_THREADS,
+ OPT_XTRA_ENCRYPT_CHUNK_SIZE,
+ OPT_LOG,
+ OPT_INNODB,
+ OPT_INNODB_CHECKSUMS,
+ OPT_INNODB_DATA_FILE_PATH,
+ OPT_INNODB_DATA_HOME_DIR,
+ OPT_INNODB_ADAPTIVE_HASH_INDEX,
+ OPT_INNODB_DOUBLEWRITE,
+ OPT_INNODB_FAST_SHUTDOWN,
+ OPT_INNODB_FILE_PER_TABLE,
+ OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT,
+ OPT_INNODB_FLUSH_METHOD,
+ OPT_INNODB_LOCKS_UNSAFE_FOR_BINLOG,
+ OPT_INNODB_LOG_ARCH_DIR,
+ OPT_INNODB_LOG_ARCHIVE,
+ OPT_INNODB_LOG_GROUP_HOME_DIR,
+ OPT_INNODB_MAX_DIRTY_PAGES_PCT,
+ OPT_INNODB_MAX_PURGE_LAG,
+ OPT_INNODB_ROLLBACK_ON_TIMEOUT,
+ OPT_INNODB_STATUS_FILE,
+ OPT_INNODB_ADDITIONAL_MEM_POOL_SIZE,
+ OPT_INNODB_AUTOEXTEND_INCREMENT,
+ OPT_INNODB_BUFFER_POOL_SIZE,
+ OPT_INNODB_COMMIT_CONCURRENCY,
+ OPT_INNODB_CONCURRENCY_TICKETS,
+ OPT_INNODB_FILE_IO_THREADS,
+ OPT_INNODB_IO_CAPACITY,
+ OPT_INNODB_READ_IO_THREADS,
+ OPT_INNODB_WRITE_IO_THREADS,
+ OPT_INNODB_USE_NATIVE_AIO,
+ OPT_INNODB_PAGE_SIZE,
+ OPT_INNODB_LOG_BLOCK_SIZE,
+ OPT_INNODB_FAST_CHECKSUM,
+ OPT_INNODB_EXTRA_UNDOSLOTS,
+ OPT_INNODB_DOUBLEWRITE_FILE,
+ OPT_INNODB_BUFFER_POOL_FILENAME,
+ OPT_INNODB_FORCE_RECOVERY,
+ OPT_INNODB_LOCK_WAIT_TIMEOUT,
+ OPT_INNODB_LOG_BUFFER_SIZE,
+ OPT_INNODB_LOG_FILE_SIZE,
+ OPT_INNODB_LOG_FILES_IN_GROUP,
+ OPT_INNODB_MIRRORED_LOG_GROUPS,
+ OPT_INNODB_OPEN_FILES,
+ OPT_INNODB_SYNC_SPIN_LOOPS,
+ OPT_INNODB_THREAD_CONCURRENCY,
+ OPT_INNODB_THREAD_SLEEP_DELAY,
+ OPT_XTRA_DEBUG_SYNC,
+ OPT_XTRA_COMPACT,
+ OPT_XTRA_REBUILD_INDEXES,
+ OPT_XTRA_REBUILD_THREADS,
+ OPT_INNODB_CHECKSUM_ALGORITHM,
+ OPT_INNODB_UNDO_DIRECTORY,
+ OPT_INNODB_UNDO_TABLESPACES,
+ OPT_INNODB_LOG_CHECKSUM_ALGORITHM,
+ OPT_XTRA_INCREMENTAL_FORCE_SCAN,
+ OPT_DEFAULTS_GROUP,
+ OPT_OPEN_FILES_LIMIT,
+ OPT_CLOSE_FILES,
+ OPT_CORE_FILE,
+
+ OPT_COPY_BACK,
+ OPT_MOVE_BACK,
+ OPT_GALERA_INFO,
+ OPT_SLAVE_INFO,
+ OPT_NO_LOCK,
+ OPT_SAFE_SLAVE_BACKUP,
+ OPT_RSYNC,
+ OPT_FORCE_NON_EMPTY_DIRS,
+ OPT_NO_VERSION_CHECK,
+ OPT_NO_BACKUP_LOCKS,
+ OPT_DECOMPRESS,
+ OPT_INCREMENTAL_HISTORY_NAME,
+ OPT_INCREMENTAL_HISTORY_UUID,
+ OPT_DECRYPT,
+ OPT_REMOVE_ORIGINAL,
+ OPT_LOCK_WAIT_QUERY_TYPE,
+ OPT_KILL_LONG_QUERY_TYPE,
+ OPT_HISTORY,
+ OPT_KILL_LONG_QUERIES_TIMEOUT,
+ OPT_LOCK_WAIT_TIMEOUT,
+ OPT_LOCK_WAIT_THRESHOLD,
+ OPT_DEBUG_SLEEP_BEFORE_UNLOCK,
+ OPT_SAFE_SLAVE_BACKUP_TIMEOUT,
+ OPT_BINLOG_INFO,
+ OPT_XB_SECURE_AUTH,
+
+ OPT_SSL_SSL,
+ OPT_SSL_VERIFY_SERVER_CERT,
+ OPT_SERVER_PUBLIC_KEY,
+
+};
+
+struct my_option xb_client_options[] =
+{
+ {"version", 'v', "print xtrabackup version information",
+ (G_PTR *) &xtrabackup_version, (G_PTR *) &xtrabackup_version, 0, GET_BOOL,
+ NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"target-dir", OPT_XTRA_TARGET_DIR, "destination directory", (G_PTR*) &xtrabackup_target_dir,
+ (G_PTR*) &xtrabackup_target_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"backup", OPT_XTRA_BACKUP, "take backup to target-dir",
+ (G_PTR*) &xtrabackup_backup, (G_PTR*) &xtrabackup_backup,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"stats", OPT_XTRA_STATS, "calc statistic of datadir (offline mysqld is recommended)",
+ (G_PTR*) &xtrabackup_stats, (G_PTR*) &xtrabackup_stats,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"prepare", OPT_XTRA_PREPARE, "prepare a backup for starting mysql server on the backup.",
+ (G_PTR*) &xtrabackup_prepare, (G_PTR*) &xtrabackup_prepare,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"export", OPT_XTRA_EXPORT, "create files to import to another database when prepare.",
+ (G_PTR*) &xtrabackup_export, (G_PTR*) &xtrabackup_export,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"apply-log-only", OPT_XTRA_APPLY_LOG_ONLY,
+ "stop recovery process not to progress LSN after applying log when prepare.",
+ (G_PTR*) &xtrabackup_apply_log_only, (G_PTR*) &xtrabackup_apply_log_only,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"print-param", OPT_XTRA_PRINT_PARAM, "print parameter of mysqld needed for copyback.",
+ (G_PTR*) &xtrabackup_print_param, (G_PTR*) &xtrabackup_print_param,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"use-memory", OPT_XTRA_USE_MEMORY, "The value is used instead of buffer_pool_size",
+ (G_PTR*) &xtrabackup_use_memory, (G_PTR*) &xtrabackup_use_memory,
+ 0, GET_LL, REQUIRED_ARG, 100*1024*1024L, 1024*1024L, LONGLONG_MAX, 0,
+ 1024*1024L, 0},
+ {"throttle", OPT_XTRA_THROTTLE, "limit count of IO operations (pairs of read&write) per second to IOS values (for '--backup')",
+ (G_PTR*) &xtrabackup_throttle, (G_PTR*) &xtrabackup_throttle,
+ 0, GET_LONG, REQUIRED_ARG, 0, 0, LONG_MAX, 0, 1, 0},
+ {"log", OPT_LOG, "Ignored option for MySQL option compatibility",
+ (G_PTR*) &log_ignored_opt, (G_PTR*) &log_ignored_opt, 0,
+ GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
+ {"log-copy-interval", OPT_XTRA_LOG_COPY_INTERVAL, "time interval between checks done by log copying thread in milliseconds (default is 1 second).",
+ (G_PTR*) &xtrabackup_log_copy_interval, (G_PTR*) &xtrabackup_log_copy_interval,
+ 0, GET_LONG, REQUIRED_ARG, 1000, 0, LONG_MAX, 0, 1, 0},
+ {"extra-lsndir", OPT_XTRA_EXTRA_LSNDIR, "(for --backup): save an extra copy of the xtrabackup_checkpoints file in this directory.",
+ (G_PTR*) &xtrabackup_extra_lsndir, (G_PTR*) &xtrabackup_extra_lsndir,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"incremental-lsn", OPT_XTRA_INCREMENTAL, "(for --backup): copy only .ibd pages newer than specified LSN 'high:low'. ##ATTENTION##: If a wrong LSN value is specified, it is impossible to diagnose this, causing the backup to be unusable. Be careful!",
+ (G_PTR*) &xtrabackup_incremental, (G_PTR*) &xtrabackup_incremental,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"incremental-basedir", OPT_XTRA_INCREMENTAL_BASEDIR, "(for --backup): copy only .ibd pages newer than backup at specified directory.",
+ (G_PTR*) &xtrabackup_incremental_basedir, (G_PTR*) &xtrabackup_incremental_basedir,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"incremental-dir", OPT_XTRA_INCREMENTAL_DIR, "(for --prepare): apply .delta files and logfile in the specified directory.",
+ (G_PTR*) &xtrabackup_incremental_dir, (G_PTR*) &xtrabackup_incremental_dir,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"to-archived-lsn", OPT_XTRA_ARCHIVED_TO_LSN,
+ "Don't apply archived logs with bigger log sequence number.",
+ (G_PTR*) &xtrabackup_archived_to_lsn, (G_PTR*) &xtrabackup_archived_to_lsn, 0,
+ GET_LL, REQUIRED_ARG, 0, 0, LONGLONG_MAX, 0, 0, 0},
+ {"tables", OPT_XTRA_TABLES, "filtering by regexp for table names.",
+ (G_PTR*) &xtrabackup_tables, (G_PTR*) &xtrabackup_tables,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"tables_file", OPT_XTRA_TABLES_FILE, "filtering by list of the exact database.table name in the file.",
+ (G_PTR*) &xtrabackup_tables_file, (G_PTR*) &xtrabackup_tables_file,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"databases", OPT_XTRA_DATABASES, "filtering by list of databases.",
+ (G_PTR*) &xtrabackup_databases, (G_PTR*) &xtrabackup_databases,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"databases_file", OPT_XTRA_TABLES_FILE,
+ "filtering by list of databases in the file.",
+ (G_PTR*) &xtrabackup_databases_file, (G_PTR*) &xtrabackup_databases_file,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"create-ib-logfile", OPT_XTRA_CREATE_IB_LOGFILE, "** not work for now** creates ib_logfile* also after '--prepare'. ### If you want create ib_logfile*, only re-execute this command in same options. ###",
+ (G_PTR*) &xtrabackup_create_ib_logfile, (G_PTR*) &xtrabackup_create_ib_logfile,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"stream", OPT_XTRA_STREAM, "Stream all backup files to the standard output "
+ "in the specified format. Currently the only supported format is 'tar'.",
+ (G_PTR*) &xtrabackup_stream_str, (G_PTR*) &xtrabackup_stream_str, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"compress", OPT_XTRA_COMPRESS, "Compress individual backup files using the "
+ "specified compression algorithm. Currently the only supported algorithm "
+ "is 'quicklz'. It is also the default algorithm, i.e. the one used when "
+ "--compress is used without an argument.",
+ (G_PTR*) &xtrabackup_compress_alg, (G_PTR*) &xtrabackup_compress_alg, 0,
+ GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"compress-threads", OPT_XTRA_COMPRESS_THREADS,
+ "Number of threads for parallel data compression. The default value is 1.",
+ (G_PTR*) &xtrabackup_compress_threads, (G_PTR*) &xtrabackup_compress_threads,
+ 0, GET_UINT, REQUIRED_ARG, 1, 1, UINT_MAX, 0, 0, 0},
+
+ {"compress-chunk-size", OPT_XTRA_COMPRESS_CHUNK_SIZE,
+ "Size of working buffer(s) for compression threads in bytes. The default value is 64K.",
+ (G_PTR*) &xtrabackup_compress_chunk_size, (G_PTR*) &xtrabackup_compress_chunk_size,
+ 0, GET_ULL, REQUIRED_ARG, (1 << 16), 1024, ULONGLONG_MAX, 0, 0, 0},
+
+ {"encrypt", OPT_XTRA_ENCRYPT, "Encrypt individual backup files using the "
+ "specified encryption algorithm.",
+ &xtrabackup_encrypt_algo, &xtrabackup_encrypt_algo,
+ &xtrabackup_encrypt_algo_typelib, GET_ENUM, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"encrypt-key", OPT_XTRA_ENCRYPT_KEY, "Encryption key to use.",
+ (G_PTR*) &xtrabackup_encrypt_key, (G_PTR*) &xtrabackup_encrypt_key, 0,
+ GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"encrypt-key-file", OPT_XTRA_ENCRYPT_KEY_FILE, "File which contains encryption key to use.",
+ (G_PTR*) &xtrabackup_encrypt_key_file, (G_PTR*) &xtrabackup_encrypt_key_file, 0,
+ GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"encrypt-threads", OPT_XTRA_ENCRYPT_THREADS,
+ "Number of threads for parallel data encryption. The default value is 1.",
+ (G_PTR*) &xtrabackup_encrypt_threads, (G_PTR*) &xtrabackup_encrypt_threads,
+ 0, GET_UINT, REQUIRED_ARG, 1, 1, UINT_MAX, 0, 0, 0},
+
+ {"encrypt-chunk-size", OPT_XTRA_ENCRYPT_CHUNK_SIZE,
+ "Size of working buffer(S) for encryption threads in bytes. The default value is 64K.",
+ (G_PTR*) &xtrabackup_encrypt_chunk_size, (G_PTR*) &xtrabackup_encrypt_chunk_size,
+ 0, GET_ULL, REQUIRED_ARG, (1 << 16), 1024, ULONGLONG_MAX, 0, 0, 0},
+
+ {"compact", OPT_XTRA_COMPACT,
+ "Create a compact backup by skipping secondary index pages.",
+ (G_PTR*) &xtrabackup_compact, (G_PTR*) &xtrabackup_compact,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"rebuild_indexes", OPT_XTRA_REBUILD_INDEXES,
+ "Rebuild secondary indexes in InnoDB tables after applying the log. "
+ "Only has effect with --prepare.",
+ (G_PTR*) &xtrabackup_rebuild_indexes, (G_PTR*) &xtrabackup_rebuild_indexes,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"rebuild_threads", OPT_XTRA_REBUILD_THREADS,
+ "Use this number of threads to rebuild indexes in a compact backup. "
+ "Only has effect with --prepare and --rebuild-indexes.",
+ (G_PTR*) &xtrabackup_rebuild_threads, (G_PTR*) &xtrabackup_rebuild_threads,
+ 0, GET_UINT, REQUIRED_ARG, 1, 1, UINT_MAX, 0, 0, 0},
+
+ {"incremental-force-scan", OPT_XTRA_INCREMENTAL_FORCE_SCAN,
+ "Perform a full-scan incremental backup even in the presence of changed "
+ "page bitmap data",
+ (G_PTR*)&xtrabackup_incremental_force_scan,
+ (G_PTR*)&xtrabackup_incremental_force_scan, 0, GET_BOOL, NO_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+
+ {"close_files", OPT_CLOSE_FILES, "do not keep files opened. Use at your own "
+ "risk.", (G_PTR*) &xb_close_files, (G_PTR*) &xb_close_files, 0, GET_BOOL,
+ NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"core-file", OPT_CORE_FILE, "Write core on fatal signals", 0, 0, 0,
+ GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+
+ {"copy-back", OPT_COPY_BACK, "Copy all the files in a previously made "
+ "backup from the backup directory to their original locations.",
+ (uchar *) &xtrabackup_copy_back, (uchar *) &xtrabackup_copy_back, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"move-back", OPT_MOVE_BACK, "Move all the files in a previously made "
+ "backup from the backup directory to the actual datadir location. "
+ "Use with caution, as it removes backup files.",
+ (uchar *) &xtrabackup_move_back, (uchar *) &xtrabackup_move_back, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"galera-info", OPT_GALERA_INFO, "This options creates the "
+ "xtrabackup_galera_info file which contains the local node state at "
+ "the time of the backup. Option should be used when performing the "
+ "backup of Percona-XtraDB-Cluster. Has no effect when backup locks "
+ "are used to create the backup.",
+ (uchar *) &opt_galera_info, (uchar *) &opt_galera_info, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"slave-info", OPT_SLAVE_INFO, "This option is useful when backing "
+ "up a replication slave server. It prints the binary log position "
+ "and name of the master server. It also writes this information to "
+ "the \"xtrabackup_slave_info\" file as a \"CHANGE MASTER\" command. "
+ "A new slave for this master can be set up by starting a slave server "
+ "on this backup and issuing a \"CHANGE MASTER\" command with the "
+ "binary log position saved in the \"xtrabackup_slave_info\" file.",
+ (uchar *) &opt_slave_info, (uchar *) &opt_slave_info, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"no-lock", OPT_NO_LOCK, "Use this option to disable table lock "
+ "with \"FLUSH TABLES WITH READ LOCK\". Use it only if ALL your "
+ "tables are InnoDB and you DO NOT CARE about the binary log "
+ "position of the backup. This option shouldn't be used if there "
+ "are any DDL statements being executed or if any updates are "
+ "happening on non-InnoDB tables (this includes the system MyISAM "
+ "tables in the mysql database), otherwise it could lead to an "
+ "inconsistent backup. If you are considering to use --no-lock "
+ "because your backups are failing to acquire the lock, this could "
+ "be because of incoming replication events preventing the lock "
+ "from succeeding. Please try using --safe-slave-backup to "
+ "momentarily stop the replication slave thread, this may help "
+ "the backup to succeed and you then don't need to resort to "
+ "using this option.",
+ (uchar *) &opt_no_lock, (uchar *) &opt_no_lock, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"safe-slave-backup", OPT_SAFE_SLAVE_BACKUP, "Stop slave SQL thread "
+ "and wait to start backup until Slave_open_temp_tables in "
+ "\"SHOW STATUS\" is zero. If there are no open temporary tables, "
+ "the backup will take place, otherwise the SQL thread will be "
+ "started and stopped until there are no open temporary tables. "
+ "The backup will fail if Slave_open_temp_tables does not become "
+ "zero after --safe-slave-backup-timeout seconds. The slave SQL "
+ "thread will be restarted when the backup finishes.",
+ (uchar *) &opt_safe_slave_backup,
+ (uchar *) &opt_safe_slave_backup,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"rsync", OPT_RSYNC, "Uses the rsync utility to optimize local file "
+ "transfers. When this option is specified, innobackupex uses rsync "
+ "to copy all non-InnoDB files instead of spawning a separate cp for "
+ "each file, which can be much faster for servers with a large number "
+ "of databases or tables. This option cannot be used together with "
+ "--stream.",
+ (uchar *) &opt_rsync, (uchar *) &opt_rsync,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"force-non-empty-directories", OPT_FORCE_NON_EMPTY_DIRS, "This "
+ "option, when specified, makes --copy-back or --move-back transfer "
+ "files to non-empty directories. Note that no existing files will be "
+ "overwritten. If --copy-back or --nove-back has to copy a file from "
+ "the backup directory which already exists in the destination "
+ "directory, it will still fail with an error.",
+ (uchar *) &opt_force_non_empty_dirs,
+ (uchar *) &opt_force_non_empty_dirs,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"no-version-check", OPT_NO_VERSION_CHECK, "This option disables the "
+ "version check which is enabled by the --version-check option.",
+ (uchar *) &opt_noversioncheck,
+ (uchar *) &opt_noversioncheck,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"no-backup-locks", OPT_NO_BACKUP_LOCKS, "This option controls if "
+ "backup locks should be used instead of FLUSH TABLES WITH READ LOCK "
+ "on the backup stage. The option has no effect when backup locks are "
+ "not supported by the server. This option is enabled by default, "
+ "disable with --no-backup-locks.",
+ (uchar *) &opt_no_backup_locks,
+ (uchar *) &opt_no_backup_locks,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"decompress", OPT_DECOMPRESS, "Decompresses all files with the .qp "
+ "extension in a backup previously made with the --compress option.",
+ (uchar *) &opt_decompress,
+ (uchar *) &opt_decompress,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"user", 'u', "This option specifies the MySQL username used "
+ "when connecting to the server, if that's not the current user. "
+ "The option accepts a string argument. See mysql --help for details.",
+ (uchar*) &opt_user, (uchar*) &opt_user, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"host", 'H', "This option specifies the host to use when "
+ "connecting to the database server with TCP/IP. The option accepts "
+ "a string argument. See mysql --help for details.",
+ (uchar*) &opt_host, (uchar*) &opt_host, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"port", 'P', "This option specifies the port to use when "
+ "connecting to the database server with TCP/IP. The option accepts "
+ "a string argument. See mysql --help for details.",
+ &opt_port, &opt_port, 0, GET_UINT, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"password", 'p', "This option specifies the password to use "
+ "when connecting to the database. It accepts a string argument. "
+ "See mysql --help for details.",
+ 0, 0, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"socket", 'S', "This option specifies the socket to use when "
+ "connecting to the local database server with a UNIX domain socket. "
+ "The option accepts a string argument. See mysql --help for details.",
+ (uchar*) &opt_socket, (uchar*) &opt_socket, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"incremental-history-name", OPT_INCREMENTAL_HISTORY_NAME,
+ "This option specifies the name of the backup series stored in the "
+ "PERCONA_SCHEMA.xtrabackup_history history record to base an "
+ "incremental backup on. Xtrabackup will search the history table "
+ "looking for the most recent (highest innodb_to_lsn), successful "
+ "backup in the series and take the to_lsn value to use as the "
+ "starting lsn for the incremental backup. This will be mutually "
+ "exclusive with --incremental-history-uuid, --incremental-basedir "
+ "and --incremental-lsn. If no valid lsn can be found (no series by "
+ "that name, no successful backups by that name) xtrabackup will "
+ "return with an error. It is used with the --incremental option.",
+ (uchar*) &opt_incremental_history_name,
+ (uchar*) &opt_incremental_history_name, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"incremental-history-uuid", OPT_INCREMENTAL_HISTORY_UUID,
+ "This option specifies the UUID of the specific history record "
+ "stored in the PERCONA_SCHEMA.xtrabackup_history to base an "
+ "incremental backup on. --incremental-history-name, "
+ "--incremental-basedir and --incremental-lsn. If no valid lsn can be "
+ "found (no success record with that uuid) xtrabackup will return "
+ "with an error. It is used with the --incremental option.",
+ (uchar*) &opt_incremental_history_uuid,
+ (uchar*) &opt_incremental_history_uuid, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"decrypt", OPT_DECRYPT, "Decrypts all files with the .xbcrypt "
+ "extension in a backup previously made with --encrypt option.",
+ &opt_decrypt_algo, &opt_decrypt_algo,
+ &xtrabackup_encrypt_algo_typelib, GET_ENUM, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
+
+ {"remove-original", OPT_REMOVE_ORIGINAL, "Remove .qp and .xbcrypt files "
+ "after decryption and decompression.",
+ (uchar *) &opt_remove_original,
+ (uchar *) &opt_remove_original,
+ 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"ftwrl-wait-query-type", OPT_LOCK_WAIT_QUERY_TYPE,
+ "This option specifies which types of queries are allowed to complete "
+ "before innobackupex will issue the global lock. Default is all.",
+ (uchar*) &opt_lock_wait_query_type,
+ (uchar*) &opt_lock_wait_query_type, &query_type_typelib,
+ GET_ENUM, REQUIRED_ARG, QUERY_TYPE_ALL, 0, 0, 0, 0, 0},
+
+ {"kill-long-query-type", OPT_KILL_LONG_QUERY_TYPE,
+ "This option specifies which types of queries should be killed to "
+ "unblock the global lock. Default is \"all\".",
+ (uchar*) &opt_kill_long_query_type,
+ (uchar*) &opt_kill_long_query_type, &query_type_typelib,
+ GET_ENUM, REQUIRED_ARG, QUERY_TYPE_SELECT, 0, 0, 0, 0, 0},
+
+ {"history", OPT_HISTORY,
+ "This option enables the tracking of backup history in the "
+ "PERCONA_SCHEMA.xtrabackup_history table. An optional history "
+ "series name may be specified that will be placed with the history "
+ "record for the current backup being taken.",
+ NULL, NULL, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"kill-long-queries-timeout", OPT_KILL_LONG_QUERIES_TIMEOUT,
+ "This option specifies the number of seconds innobackupex waits "
+ "between starting FLUSH TABLES WITH READ LOCK and killing those "
+ "queries that block it. Default is 0 seconds, which means "
+ "innobackupex will not attempt to kill any queries.",
+ (uchar*) &opt_kill_long_queries_timeout,
+ (uchar*) &opt_kill_long_queries_timeout, 0, GET_UINT,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"ftwrl-wait-timeout", OPT_LOCK_WAIT_TIMEOUT,
+ "This option specifies time in seconds that innobackupex should wait "
+ "for queries that would block FTWRL before running it. If there are "
+ "still such queries when the timeout expires, innobackupex terminates "
+ "with an error. Default is 0, in which case innobackupex does not "
+ "wait for queries to complete and starts FTWRL immediately.",
+ (uchar*) &opt_lock_wait_timeout,
+ (uchar*) &opt_lock_wait_timeout, 0, GET_UINT,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"ftwrl-wait-threshold", OPT_LOCK_WAIT_THRESHOLD,
+ "This option specifies the query run time threshold which is used by "
+ "innobackupex to detect long-running queries with a non-zero value "
+ "of --ftwrl-wait-timeout. FTWRL is not started until such "
+ "long-running queries exist. This option has no effect if "
+ "--ftwrl-wait-timeout is 0. Default value is 60 seconds.",
+ (uchar*) &opt_lock_wait_threshold,
+ (uchar*) &opt_lock_wait_threshold, 0, GET_UINT,
+ REQUIRED_ARG, 60, 0, 0, 0, 0, 0},
+
+ {"debug-sleep-before-unlock", OPT_DEBUG_SLEEP_BEFORE_UNLOCK,
+ "This is a debug-only option used by the XtraBackup test suite.",
+ (uchar*) &opt_debug_sleep_before_unlock,
+ (uchar*) &opt_debug_sleep_before_unlock, 0, GET_UINT,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"safe-slave-backup-timeout", OPT_SAFE_SLAVE_BACKUP_TIMEOUT,
+ "How many seconds --safe-slave-backup should wait for "
+ "Slave_open_temp_tables to become zero. (default 300)",
+ (uchar*) &opt_safe_slave_backup_timeout,
+ (uchar*) &opt_safe_slave_backup_timeout, 0, GET_UINT,
+ REQUIRED_ARG, 300, 0, 0, 0, 0, 0},
+
+ {"binlog-info", OPT_BINLOG_INFO,
+ "This option controls how XtraBackup should retrieve server's binary log "
+ "coordinates corresponding to the backup. Possible values are OFF, ON, "
+ "LOCKLESS and AUTO. See the XtraBackup manual for more information",
+ &opt_binlog_info, &opt_binlog_info,
+ &binlog_info_typelib, GET_ENUM, OPT_ARG, BINLOG_INFO_AUTO, 0, 0, 0, 0, 0},
+
+ {"secure-auth", OPT_XB_SECURE_AUTH, "Refuse client connecting to server if it"
+ " uses old (pre-4.1.1) protocol.", &opt_secure_auth,
+ &opt_secure_auth, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
+
+#include "sslopt-longopts.h"
+
+#if !defined(HAVE_YASSL)
+ {"server-public-key-path", OPT_SERVER_PUBLIC_KEY,
+ "File path to the server public RSA key in PEM format.",
+ &opt_server_public_key, &opt_server_public_key, 0,
+ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+#endif
+
+ { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
+};
+
+uint xb_client_options_count = array_elements(xb_client_options);
+
+struct my_option xb_server_options[] =
+{
+ {"datadir", 'h', "Path to the database root.", (G_PTR*) &mysql_data_home,
+ (G_PTR*) &mysql_data_home, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"tmpdir", 't',
+ "Path for temporary files. Several paths may be specified, separated by a "
+#if defined(__WIN__) || defined(OS2) || defined(__NETWARE__)
+ "semicolon (;)"
+#else
+ "colon (:)"
+#endif
+ ", in this case they are used in a round-robin fashion.",
+ (G_PTR*) &opt_mysql_tmpdir,
+ (G_PTR*) &opt_mysql_tmpdir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"parallel", OPT_XTRA_PARALLEL,
+ "Number of threads to use for parallel datafiles transfer. Does not have "
+ "any effect in the stream mode. The default value is 1.",
+ (G_PTR*) &xtrabackup_parallel, (G_PTR*) &xtrabackup_parallel, 0, GET_INT,
+ REQUIRED_ARG, 1, 1, INT_MAX, 0, 0, 0},
+
+ {"log", OPT_LOG, "Ignored option for MySQL option compatibility",
+ (G_PTR*) &log_ignored_opt, (G_PTR*) &log_ignored_opt, 0,
+ GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"log_bin", OPT_LOG, "Base name for the log sequence",
+ &opt_log_bin, &opt_log_bin, 0, GET_STR_ALLOC, OPT_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"innodb", OPT_INNODB, "Ignored option for MySQL option compatibility",
+ (G_PTR*) &innobase_ignored_opt, (G_PTR*) &innobase_ignored_opt, 0,
+ GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"innodb_adaptive_hash_index", OPT_INNODB_ADAPTIVE_HASH_INDEX,
+ "Enable InnoDB adaptive hash index (enabled by default). "
+ "Disable with --skip-innodb-adaptive-hash-index.",
+ (G_PTR*) &innobase_adaptive_hash_index,
+ (G_PTR*) &innobase_adaptive_hash_index,
+ 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
+ {"innodb_additional_mem_pool_size", OPT_INNODB_ADDITIONAL_MEM_POOL_SIZE,
+ "Size of a memory pool InnoDB uses to store data dictionary information and other internal data structures.",
+ (G_PTR*) &innobase_additional_mem_pool_size,
+ (G_PTR*) &innobase_additional_mem_pool_size, 0, GET_LONG, REQUIRED_ARG,
+ 1*1024*1024L, 512*1024L, LONG_MAX, 0, 1024, 0},
+ {"innodb_autoextend_increment", OPT_INNODB_AUTOEXTEND_INCREMENT,
+ "Data file autoextend increment in megabytes",
+ (G_PTR*) &srv_auto_extend_increment,
+ (G_PTR*) &srv_auto_extend_increment,
+ 0, GET_ULONG, REQUIRED_ARG, 8L, 1L, 1000L, 0, 1L, 0},
+ {"innodb_buffer_pool_size", OPT_INNODB_BUFFER_POOL_SIZE,
+ "The size of the memory buffer InnoDB uses to cache data and indexes of its tables.",
+ (G_PTR*) &innobase_buffer_pool_size, (G_PTR*) &innobase_buffer_pool_size, 0,
+ GET_LL, REQUIRED_ARG, 8*1024*1024L, 1024*1024L, LONGLONG_MAX, 0,
+ 1024*1024L, 0},
+ {"innodb_checksums", OPT_INNODB_CHECKSUMS, "Enable InnoDB checksums validation (enabled by default). \
+Disable with --skip-innodb-checksums.", (G_PTR*) &innobase_use_checksums,
+ (G_PTR*) &innobase_use_checksums, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
+ {"innodb_data_file_path", OPT_INNODB_DATA_FILE_PATH,
+ "Path to individual files and their sizes.", &innobase_data_file_path,
+ &innobase_data_file_path, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"innodb_data_home_dir", OPT_INNODB_DATA_HOME_DIR,
+ "The common part for InnoDB table spaces.", &innobase_data_home_dir,
+ &innobase_data_home_dir, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"innodb_doublewrite", OPT_INNODB_DOUBLEWRITE, "Enable InnoDB doublewrite buffer (enabled by default). \
+Disable with --skip-innodb-doublewrite.", (G_PTR*) &innobase_use_doublewrite,
+ (G_PTR*) &innobase_use_doublewrite, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
+ {"innodb_io_capacity", OPT_INNODB_IO_CAPACITY,
+ "Number of IOPs the server can do. Tunes the background IO rate",
+ (G_PTR*) &srv_io_capacity, (G_PTR*) &srv_io_capacity,
+ 0, GET_ULONG, OPT_ARG, 200, 100, ~0UL, 0, 0, 0},
+ {"innodb_file_io_threads", OPT_INNODB_FILE_IO_THREADS,
+ "Number of file I/O threads in InnoDB.", (G_PTR*) &innobase_file_io_threads,
+ (G_PTR*) &innobase_file_io_threads, 0, GET_LONG, REQUIRED_ARG, 4, 4, 64, 0,
+ 1, 0},
+ {"innodb_read_io_threads", OPT_INNODB_READ_IO_THREADS,
+ "Number of background read I/O threads in InnoDB.", (G_PTR*) &innobase_read_io_threads,
+ (G_PTR*) &innobase_read_io_threads, 0, GET_LONG, REQUIRED_ARG, 4, 1, 64, 0,
+ 1, 0},
+ {"innodb_write_io_threads", OPT_INNODB_WRITE_IO_THREADS,
+ "Number of background write I/O threads in InnoDB.", (G_PTR*) &innobase_write_io_threads,
+ (G_PTR*) &innobase_write_io_threads, 0, GET_LONG, REQUIRED_ARG, 4, 1, 64, 0,
+ 1, 0},
+ {"innodb_file_per_table", OPT_INNODB_FILE_PER_TABLE,
+ "Stores each InnoDB table to an .ibd file in the database dir.",
+ (G_PTR*) &innobase_file_per_table,
+ (G_PTR*) &innobase_file_per_table, 0, GET_BOOL, NO_ARG,
+ FALSE, 0, 0, 0, 0, 0},
+ {"innodb_flush_log_at_trx_commit", OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT,
+ "Set to 0 (write and flush once per second), 1 (write and flush at each commit) or 2 (write at commit, flush once per second).",
+ (G_PTR*) &srv_flush_log_at_trx_commit,
+ (G_PTR*) &srv_flush_log_at_trx_commit,
+ 0, GET_ULONG, OPT_ARG, 1, 0, 2, 0, 0, 0},
+ {"innodb_flush_method", OPT_INNODB_FLUSH_METHOD,
+ "With which method to flush data.", (G_PTR*) &innobase_unix_file_flush_method,
+ (G_PTR*) &innobase_unix_file_flush_method, 0, GET_STR, REQUIRED_ARG, 0, 0, 0,
+ 0, 0, 0},
+
+/* ####### Should we use this option? ####### */
+ {"innodb_force_recovery", OPT_INNODB_FORCE_RECOVERY,
+ "Helps to save your data in case the disk image of the database becomes corrupt.",
+ (G_PTR*) &innobase_force_recovery, (G_PTR*) &innobase_force_recovery, 0,
+ GET_LONG, REQUIRED_ARG, 0, 0, 6, 0, 1, 0},
+
+ {"innodb_log_arch_dir", OPT_INNODB_LOG_ARCH_DIR,
+ "Where full logs should be archived.", (G_PTR*) &innobase_log_arch_dir,
+ (G_PTR*) &innobase_log_arch_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"innodb_log_buffer_size", OPT_INNODB_LOG_BUFFER_SIZE,
+ "The size of the buffer which InnoDB uses to write log to the log files on disk.",
+ (G_PTR*) &innobase_log_buffer_size, (G_PTR*) &innobase_log_buffer_size, 0,
+ GET_LONG, REQUIRED_ARG, 1024*1024L, 256*1024L, LONG_MAX, 0, 1024, 0},
+ {"innodb_log_file_size", OPT_INNODB_LOG_FILE_SIZE,
+ "Size of each log file in a log group.",
+ (G_PTR*) &innobase_log_file_size, (G_PTR*) &innobase_log_file_size, 0,
+ GET_LL, REQUIRED_ARG, 48*1024*1024L, 1*1024*1024L, LONGLONG_MAX, 0,
+ 1024*1024L, 0},
+ {"innodb_log_files_in_group", OPT_INNODB_LOG_FILES_IN_GROUP,
+ "Number of log files in the log group. InnoDB writes to the files in a "
+ "circular fashion. Value 3 is recommended here.",
+ &innobase_log_files_in_group, &innobase_log_files_in_group,
+ 0, GET_LONG, REQUIRED_ARG, 2, 2, 100, 0, 1, 0},
+ {"innodb_log_group_home_dir", OPT_INNODB_LOG_GROUP_HOME_DIR,
+ "Path to InnoDB log files.", &srv_log_group_home_dir,
+ &srv_log_group_home_dir, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"innodb_max_dirty_pages_pct", OPT_INNODB_MAX_DIRTY_PAGES_PCT,
+ "Percentage of dirty pages allowed in bufferpool.", (G_PTR*) &srv_max_buf_pool_modified_pct,
+ (G_PTR*) &srv_max_buf_pool_modified_pct, 0, GET_ULONG, REQUIRED_ARG, 90, 0, 100, 0, 0, 0},
+ {"innodb_open_files", OPT_INNODB_OPEN_FILES,
+ "How many files at the maximum InnoDB keeps open at the same time.",
+ (G_PTR*) &innobase_open_files, (G_PTR*) &innobase_open_files, 0,
+ GET_LONG, REQUIRED_ARG, 300L, 10L, LONG_MAX, 0, 1L, 0},
+ {"innodb_use_native_aio", OPT_INNODB_USE_NATIVE_AIO,
+ "Use native AIO if supported on this platform.",
+ (G_PTR*) &srv_use_native_aio,
+ (G_PTR*) &srv_use_native_aio, 0, GET_BOOL, NO_ARG,
+ FALSE, 0, 0, 0, 0, 0},
+ {"innodb_page_size", OPT_INNODB_PAGE_SIZE,
+ "The universal page size of the database.",
+ (G_PTR*) &innobase_page_size, (G_PTR*) &innobase_page_size, 0,
+ /* Use GET_LL to support numeric suffixes in 5.6 */
+ GET_LL, REQUIRED_ARG,
+ (1LL << 14), (1LL << 12), (1LL << UNIV_PAGE_SIZE_SHIFT_MAX), 0, 1L, 0},
+ {"innodb_log_block_size", OPT_INNODB_LOG_BLOCK_SIZE,
+ "The log block size of the transaction log file. "
+ "Changing for created log file is not supported. Use on your own risk!",
+ (G_PTR*) &innobase_log_block_size, (G_PTR*) &innobase_log_block_size, 0,
+ GET_ULONG, REQUIRED_ARG, 512, 512, 1 << UNIV_PAGE_SIZE_SHIFT_MAX, 0, 1L, 0},
+ {"innodb_fast_checksum", OPT_INNODB_FAST_CHECKSUM,
+ "Change the algorithm of checksum for the whole of datapage to 4-bytes word based.",
+ (G_PTR*) &innobase_fast_checksum,
+ (G_PTR*) &innobase_fast_checksum, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"innodb_doublewrite_file", OPT_INNODB_DOUBLEWRITE_FILE,
+ "Path to special datafile for doublewrite buffer. (default is "": not used)",
+ (G_PTR*) &innobase_doublewrite_file, (G_PTR*) &innobase_doublewrite_file,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"innodb_buffer_pool_filename", OPT_INNODB_BUFFER_POOL_FILENAME,
+ "Filename to/from which to dump/load the InnoDB buffer pool",
+ (G_PTR*) &innobase_buffer_pool_filename,
+ (G_PTR*) &innobase_buffer_pool_filename,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+#ifndef __WIN__
+ {"debug-sync", OPT_XTRA_DEBUG_SYNC,
+ "Debug sync point. This is only used by the xtrabackup test suite",
+ (G_PTR*) &xtrabackup_debug_sync,
+ (G_PTR*) &xtrabackup_debug_sync,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+#endif
+
+ {"innodb_checksum_algorithm", OPT_INNODB_CHECKSUM_ALGORITHM,
+ "The algorithm InnoDB uses for page checksumming. [CRC32, STRICT_CRC32, "
+ "INNODB, STRICT_INNODB, NONE, STRICT_NONE]", &srv_checksum_algorithm,
+ &srv_checksum_algorithm, &innodb_checksum_algorithm_typelib, GET_ENUM,
+ REQUIRED_ARG, SRV_CHECKSUM_ALGORITHM_INNODB, 0, 0, 0, 0, 0},
+ {"innodb_log_checksum_algorithm", OPT_INNODB_LOG_CHECKSUM_ALGORITHM,
+ "The algorithm InnoDB uses for log checksumming. [CRC32, STRICT_CRC32, "
+ "INNODB, STRICT_INNODB, NONE, STRICT_NONE]", &srv_log_checksum_algorithm,
+ &srv_log_checksum_algorithm, &innodb_checksum_algorithm_typelib, GET_ENUM,
+ REQUIRED_ARG, SRV_CHECKSUM_ALGORITHM_INNODB, 0, 0, 0, 0, 0},
+ {"innodb_undo_directory", OPT_INNODB_UNDO_DIRECTORY,
+ "Directory where undo tablespace files live, this path can be absolute.",
+ &srv_undo_dir, &srv_undo_dir, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0,
+ 0},
+
+ {"innodb_undo_tablespaces", OPT_INNODB_UNDO_TABLESPACES,
+ "Number of undo tablespaces to use.",
+ (G_PTR*)&srv_undo_tablespaces, (G_PTR*)&srv_undo_tablespaces,
+ 0, GET_ULONG, REQUIRED_ARG, 0, 0, 126, 0, 1, 0},
+
+ {"defaults_group", OPT_DEFAULTS_GROUP, "defaults group in config file (default \"mysqld\").",
+ (G_PTR*) &defaults_group, (G_PTR*) &defaults_group,
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+
+ {"open_files_limit", OPT_OPEN_FILES_LIMIT, "the maximum number of file "
+ "descriptors to reserve with setrlimit().",
+ (G_PTR*) &xb_open_files_limit, (G_PTR*) &xb_open_files_limit, 0, GET_ULONG,
+ REQUIRED_ARG, 0, 0, UINT_MAX, 0, 1, 0},
+
+ { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
+};
+
+uint xb_server_options_count = array_elements(xb_server_options);
+
+#ifndef __WIN__
+static int debug_sync_resumed;
+
+static void sigcont_handler(int sig);
+
+static void sigcont_handler(int sig __attribute__((unused)))
+{
+ debug_sync_resumed= 1;
+}
+#endif
+
+static inline
+void
+debug_sync_point(const char *name)
+{
+#ifndef __WIN__
+ FILE *fp;
+ pid_t pid;
+ char pid_path[FN_REFLEN];
+
+ if (xtrabackup_debug_sync == NULL) {
+ return;
+ }
+
+ if (strcmp(xtrabackup_debug_sync, name)) {
+ return;
+ }
+
+ pid = getpid();
+
+ snprintf(pid_path, sizeof(pid_path), "%s/xtrabackup_debug_sync",
+ xtrabackup_target_dir);
+ fp = fopen(pid_path, "w");
+ if (fp == NULL) {
+ msg("xtrabackup: Error: cannot open %s\n", pid_path);
+ exit(EXIT_FAILURE);
+ }
+ fprintf(fp, "%u\n", (uint) pid);
+ fclose(fp);
+
+ msg("xtrabackup: DEBUG: Suspending at debug sync point '%s'. "
+ "Resume with 'kill -SIGCONT %u'.\n", name, (uint) pid);
+
+ debug_sync_resumed= 0;
+ kill(pid, SIGSTOP);
+ while (!debug_sync_resumed) {
+ sleep(1);
+ }
+
+ /* On resume */
+ msg("xtrabackup: DEBUG: removing the pid file.\n");
+ my_delete(pid_path, MYF(MY_WME));
+#endif
+}
+
+static const char *xb_client_default_groups[]=
+ { "xtrabackup", "client", 0, 0, 0 };
+
+static const char *xb_server_default_groups[]=
+ { "xtrabackup", "mysqld", 0, 0, 0 };
+
+static void print_version(void)
+{
+ msg("%s version %s based on MySQL server %s %s (%s) (revision id: %s)\n",
+ my_progname, XTRABACKUP_VERSION, MYSQL_SERVER_VERSION, SYSTEM_TYPE,
+ MACHINE_TYPE, XTRABACKUP_REVISION);
+}
+
+static void usage(void)
+{
+ puts("Open source backup tool for InnoDB and XtraDB\n\
+\n\
+Copyright (C) 2009-2015 Percona LLC and/or its affiliates.\n\
+Portions Copyright (C) 2000, 2011, MySQL AB & Innobase Oy. All Rights Reserved.\n\
+\n\
+This program is free software; you can redistribute it and/or\n\
+modify it under the terms of the GNU General Public License\n\
+as published by the Free Software Foundation version 2\n\
+of the License.\n\
+\n\
+This program is distributed in the hope that it will be useful,\n\
+but WITHOUT ANY WARRANTY; without even the implied warranty of\n\
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\
+GNU General Public License for more details.\n\
+\n\
+You can download full text of the license on http://www.gnu.org/licenses/gpl-2.0.txt\n");
+
+ printf("Usage: [%s [--defaults-file=#] --backup | %s [--defaults-file=#] --prepare] [OPTIONS]\n",my_progname,my_progname);
+ print_defaults("my", xb_server_default_groups);
+ my_print_help(xb_client_options);
+ my_print_help(xb_server_options);
+ my_print_variables(xb_server_options);
+ my_print_variables(xb_client_options);
+}
+
+#define ADD_PRINT_PARAM_OPT(value) \
+ { \
+ print_param_str << opt->name << "=" << value << "\n"; \
+ param_set.insert(opt->name); \
+ }
+
+/************************************************************************
+Check if parameter is set in defaults file or via command line argument
+@return true if parameter is set. */
+bool
+check_if_param_set(const char *param)
+{
+ return param_set.find(param) != param_set.end();
+}
+
+my_bool
+xb_get_one_option(int optid,
+ const struct my_option *opt __attribute__((unused)),
+ char *argument)
+{
+ switch(optid) {
+ case 'h':
+ strmake(mysql_real_data_home,argument, FN_REFLEN - 1);
+ mysql_data_home= mysql_real_data_home;
+
+ ADD_PRINT_PARAM_OPT(mysql_real_data_home);
+ break;
+
+ case 't':
+
+ ADD_PRINT_PARAM_OPT(opt_mysql_tmpdir);
+ break;
+
+ case OPT_INNODB_DATA_HOME_DIR:
+
+ ADD_PRINT_PARAM_OPT(innobase_data_home_dir);
+ break;
+
+ case OPT_INNODB_DATA_FILE_PATH:
+
+ ADD_PRINT_PARAM_OPT(innobase_data_file_path);
+ break;
+
+ case OPT_INNODB_LOG_GROUP_HOME_DIR:
+
+ ADD_PRINT_PARAM_OPT(srv_log_group_home_dir);
+ break;
+
+ case OPT_INNODB_LOG_FILES_IN_GROUP:
+
+ ADD_PRINT_PARAM_OPT(innobase_log_files_in_group);
+ break;
+
+ case OPT_INNODB_LOG_FILE_SIZE:
+
+ ADD_PRINT_PARAM_OPT(innobase_log_file_size);
+ break;
+
+ case OPT_INNODB_FLUSH_METHOD:
+
+ ADD_PRINT_PARAM_OPT(innobase_unix_file_flush_method);
+ break;
+
+ case OPT_INNODB_PAGE_SIZE:
+
+ ADD_PRINT_PARAM_OPT(innobase_page_size);
+ break;
+
+ case OPT_INNODB_FAST_CHECKSUM:
+
+ ADD_PRINT_PARAM_OPT(!!innobase_fast_checksum);
+ break;
+
+ case OPT_INNODB_LOG_BLOCK_SIZE:
+
+ ADD_PRINT_PARAM_OPT(innobase_log_block_size);
+ break;
+
+ case OPT_INNODB_DOUBLEWRITE_FILE:
+
+ ADD_PRINT_PARAM_OPT(innobase_doublewrite_file);
+ break;
+
+ case OPT_INNODB_UNDO_DIRECTORY:
+
+ ADD_PRINT_PARAM_OPT(srv_undo_dir);
+ break;
+
+ case OPT_INNODB_UNDO_TABLESPACES:
+
+ ADD_PRINT_PARAM_OPT(srv_undo_tablespaces);
+ break;
+
+ case OPT_INNODB_CHECKSUM_ALGORITHM:
+
+ ut_a(srv_checksum_algorithm <= SRV_CHECKSUM_ALGORITHM_STRICT_NONE);
+
+ ADD_PRINT_PARAM_OPT(innodb_checksum_algorithm_names[srv_checksum_algorithm]);
+ break;
+
+ case OPT_INNODB_LOG_CHECKSUM_ALGORITHM:
+
+ ut_a(srv_log_checksum_algorithm <= SRV_CHECKSUM_ALGORITHM_STRICT_NONE);
+
+ ADD_PRINT_PARAM_OPT(innodb_checksum_algorithm_names[srv_log_checksum_algorithm]);
+ break;
+
+ case OPT_INNODB_BUFFER_POOL_FILENAME:
+
+ ADD_PRINT_PARAM_OPT(innobase_buffer_pool_filename);
+ break;
+
+ case OPT_XTRA_TARGET_DIR:
+ strmake(xtrabackup_real_target_dir,argument, sizeof(xtrabackup_real_target_dir)-1);
+ xtrabackup_target_dir= xtrabackup_real_target_dir;
+ break;
+ case OPT_XTRA_STREAM:
+ if (!strcasecmp(argument, "tar"))
+ xtrabackup_stream_fmt = XB_STREAM_FMT_TAR;
+ else if (!strcasecmp(argument, "xbstream"))
+ xtrabackup_stream_fmt = XB_STREAM_FMT_XBSTREAM;
+ else
+ {
+ msg("Invalid --stream argument: %s\n", argument);
+ return 1;
+ }
+ xtrabackup_stream = TRUE;
+ break;
+ case OPT_XTRA_COMPRESS:
+ if (argument == NULL)
+ xtrabackup_compress_alg = "quicklz";
+ else if (strcasecmp(argument, "quicklz"))
+ {
+ msg("Invalid --compress argument: %s\n", argument);
+ return 1;
+ }
+ xtrabackup_compress = TRUE;
+ break;
+ case OPT_XTRA_ENCRYPT:
+ if (argument == NULL)
+ {
+ msg("Missing --encrypt argument, must specify a valid encryption "
+ " algorithm.\n");
+ return 1;
+ }
+ xtrabackup_encrypt = TRUE;
+ break;
+ case OPT_DECRYPT:
+ if (argument == NULL) {
+ msg("Missing --decrypt argument, must specify a "
+ "valid encryption algorithm.\n");
+ return(1);
+ }
+ opt_decrypt = TRUE;
+ xtrabackup_decrypt_decompress = true;
+ break;
+ case OPT_DECOMPRESS:
+ opt_decompress = TRUE;
+ xtrabackup_decrypt_decompress = true;
+ break;
+ case (int) OPT_CORE_FILE:
+ test_flags |= TEST_CORE_ON_SIGNAL;
+ break;
+ case OPT_HISTORY:
+ if (argument) {
+ opt_history = argument;
+ } else {
+ opt_history = "";
+ }
+ break;
+ case 'p':
+ if (argument)
+ {
+ char *start= argument;
+ my_free(opt_password);
+ opt_password= my_strdup(argument, MYF(MY_FAE));
+ while (*argument) *argument++= 'x'; // Destroy argument
+ if (*start)
+ start[1]=0 ;
+ }
+ break;
+
+
+#include "sslopt-case.h"
+
+ case '?':
+ usage();
+ exit(EXIT_SUCCESS);
+ break;
+ case 'v':
+ print_version();
+ exit(EXIT_SUCCESS);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/***********************************************************************
+Initializes log_block_size */
+static
+ibool
+xb_init_log_block_size(void)
+{
+ srv_log_block_size = 0;
+ if (innobase_log_block_size != 512) {
+ uint n_shift = get_bit_shift(innobase_log_block_size);;
+
+ if (n_shift > 0) {
+ srv_log_block_size = (1 << n_shift);
+ msg("InnoDB: The log block size is set to %lu.\n",
+ srv_log_block_size);
+ }
+ } else {
+ srv_log_block_size = 512;
+ }
+ if (!srv_log_block_size) {
+ msg("InnoDB: Error: %lu is not valid value for "
+ "innodb_log_block_size.\n", innobase_log_block_size);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static my_bool
+innodb_init_param(void)
+{
+ /* innobase_init */
+ static char current_dir[3]; /* Set if using current lib */
+ my_bool ret;
+ char *default_path;
+
+ /* === some variables from mysqld === */
+ memset((G_PTR) &mysql_tmpdir_list, 0, sizeof(mysql_tmpdir_list));
+
+ if (init_tmpdir(&mysql_tmpdir_list, opt_mysql_tmpdir))
+ exit(EXIT_FAILURE);
+
+ /* dummy for initialize all_charsets[] */
+ get_charset_name(0);
+
+ srv_page_size = 0;
+ srv_page_size_shift = 0;
+
+ if (innobase_page_size != (1LL << 14)) {
+ int n_shift = get_bit_shift((ulint) innobase_page_size);
+
+ if (n_shift >= 12 && n_shift <= UNIV_PAGE_SIZE_SHIFT_MAX) {
+ srv_page_size_shift = n_shift;
+ srv_page_size = 1 << n_shift;
+ msg("InnoDB: The universal page size of the "
+ "database is set to %lu.\n", srv_page_size);
+ } else {
+ msg("InnoDB: Error: invalid value of "
+ "innobase_page_size: %lld", innobase_page_size);
+ exit(EXIT_FAILURE);
+ }
+ } else {
+ srv_page_size_shift = 14;
+ srv_page_size = (1 << srv_page_size_shift);
+ }
+
+ if (!xb_init_log_block_size()) {
+ goto error;
+ }
+
+ srv_fast_checksum = (ibool) innobase_fast_checksum;
+
+ /* Check that values don't overflow on 32-bit systems. */
+ if (sizeof(ulint) == 4) {
+ if (xtrabackup_use_memory > UINT_MAX32) {
+ msg("xtrabackup: use-memory can't be over 4GB"
+ " on 32-bit systems\n");
+ }
+
+ if (innobase_buffer_pool_size > UINT_MAX32) {
+ msg("xtrabackup: innobase_buffer_pool_size can't be "
+ "over 4GB on 32-bit systems\n");
+
+ goto error;
+ }
+
+ if (innobase_log_file_size > UINT_MAX32) {
+ msg("xtrabackup: innobase_log_file_size can't be "
+ "over 4GB on 32-bit systemsi\n");
+
+ goto error;
+ }
+ }
+
+ os_innodb_umask = (ulint)0664;
+
+ /* First calculate the default path for innodb_data_home_dir etc.,
+ in case the user has not given any value.
+
+ Note that when using the embedded server, the datadirectory is not
+ necessarily the current directory of this program. */
+
+ /* It's better to use current lib, to keep paths short */
+ current_dir[0] = FN_CURLIB;
+ current_dir[1] = FN_LIBCHAR;
+ current_dir[2] = 0;
+ default_path = current_dir;
+
+ ut_a(default_path);
+
+ /* Set InnoDB initialization parameters according to the values
+ read from MySQL .cnf file */
+
+ if (xtrabackup_backup || xtrabackup_stats) {
+ msg("xtrabackup: using the following InnoDB configuration:\n");
+ } else {
+ msg("xtrabackup: using the following InnoDB configuration "
+ "for recovery:\n");
+ }
+
+ /*--------------- Data files -------------------------*/
+
+ /* The default dir for data files is the datadir of MySQL */
+
+ srv_data_home = ((xtrabackup_backup || xtrabackup_stats) && innobase_data_home_dir
+ ? innobase_data_home_dir : default_path);
+ msg("xtrabackup: innodb_data_home_dir = %s\n", srv_data_home);
+
+ /* Set default InnoDB data file size to 10 MB and let it be
+ auto-extending. Thus users can use InnoDB in >= 4.0 without having
+ to specify any startup options. */
+
+ if (!innobase_data_file_path) {
+ innobase_data_file_path = (char*) "ibdata1:10M:autoextend";
+ }
+ msg("xtrabackup: innodb_data_file_path = %s\n",
+ innobase_data_file_path);
+
+ /* Since InnoDB edits the argument in the next call, we make another
+ copy of it: */
+
+ internal_innobase_data_file_path = strdup(innobase_data_file_path);
+
+ ret = (my_bool) srv_parse_data_file_paths_and_sizes(
+ internal_innobase_data_file_path);
+ if (ret == FALSE) {
+ msg("xtrabackup: syntax error in innodb_data_file_path\n");
+mem_free_and_error:
+ free(internal_innobase_data_file_path);
+ internal_innobase_data_file_path = NULL;
+ goto error;
+ }
+
+ if (xtrabackup_prepare) {
+ /* "--prepare" needs filenames only */
+ ulint i;
+
+ for (i=0; i < srv_n_data_files; i++) {
+ char *p;
+
+ p = srv_data_file_names[i];
+ while ((p = strchr(p, SRV_PATH_SEPARATOR)) != NULL)
+ {
+ p++;
+ srv_data_file_names[i] = p;
+ }
+ }
+ }
+
+ /* -------------- Log files ---------------------------*/
+
+ /* The default dir for log files is the datadir of MySQL */
+
+ if (!((xtrabackup_backup || xtrabackup_stats) &&
+ srv_log_group_home_dir)) {
+ srv_log_group_home_dir = default_path;
+ }
+ if (xtrabackup_prepare && xtrabackup_incremental_dir) {
+ srv_log_group_home_dir = xtrabackup_incremental_dir;
+ }
+ msg("xtrabackup: innodb_log_group_home_dir = %s\n",
+ srv_log_group_home_dir);
+
+ srv_normalize_path_for_win(srv_log_group_home_dir);
+
+ if (strchr(srv_log_group_home_dir, ';')) {
+
+ msg("syntax error in innodb_log_group_home_dir, ");
+
+ goto mem_free_and_error;
+ }
+
+ srv_adaptive_flushing = FALSE;
+ srv_use_sys_malloc = TRUE;
+ srv_file_format = 1; /* Barracuda */
+ srv_max_file_format_at_startup = UNIV_FORMAT_MIN; /* on */
+ /* --------------------------------------------------*/
+
+ srv_file_flush_method_str = innobase_unix_file_flush_method;
+
+ srv_n_log_files = (ulint) innobase_log_files_in_group;
+ srv_log_file_size = (ulint) innobase_log_file_size;
+ msg("xtrabackup: innodb_log_files_in_group = %ld\n",
+ srv_n_log_files);
+ msg("xtrabackup: innodb_log_file_size = %lld\n",
+ (long long int) srv_log_file_size);
+
+ srv_log_archive_on = (ulint) innobase_log_archive;
+ srv_log_buffer_size = (ulint) innobase_log_buffer_size;
+
+ /* We set srv_pool_size here in units of 1 kB. InnoDB internally
+ changes the value so that it becomes the number of database pages. */
+
+ //srv_buf_pool_size = (ulint) innobase_buffer_pool_size;
+ srv_buf_pool_size = (ulint) xtrabackup_use_memory;
+
+ srv_mem_pool_size = (ulint) innobase_additional_mem_pool_size;
+
+ srv_n_file_io_threads = (ulint) innobase_file_io_threads;
+ srv_n_read_io_threads = (ulint) innobase_read_io_threads;
+ srv_n_write_io_threads = (ulint) innobase_write_io_threads;
+
+ srv_force_recovery = (ulint) innobase_force_recovery;
+
+ srv_use_doublewrite_buf = (ibool) innobase_use_doublewrite;
+
+ if (!innobase_use_checksums) {
+
+ srv_checksum_algorithm = SRV_CHECKSUM_ALGORITHM_NONE;
+ }
+
+ btr_search_enabled = (char) innobase_adaptive_hash_index;
+
+ os_use_large_pages = (ibool) innobase_use_large_pages;
+ os_large_page_size = (ulint) innobase_large_page_size;
+
+ row_rollback_on_timeout = (ibool) innobase_rollback_on_timeout;
+
+ srv_file_per_table = (my_bool) innobase_file_per_table;
+
+ srv_locks_unsafe_for_binlog = (ibool) innobase_locks_unsafe_for_binlog;
+
+ srv_max_n_open_files = (ulint) innobase_open_files;
+ srv_innodb_status = (ibool) innobase_create_status_file;
+
+ srv_print_verbose_log = 1;
+
+ /* Store the default charset-collation number of this MySQL
+ installation */
+
+ /* We cannot treat characterset here for now!! */
+ data_mysql_default_charset_coll = (ulint)default_charset_info->number;
+
+ ut_a(DATA_MYSQL_LATIN1_SWEDISH_CHARSET_COLL ==
+ my_charset_latin1.number);
+ ut_a(DATA_MYSQL_BINARY_CHARSET_COLL == my_charset_bin.number);
+
+ /* Store the latin1_swedish_ci character ordering table to InnoDB. For
+ non-latin1_swedish_ci charsets we use the MySQL comparison functions,
+ and consequently we do not need to know the ordering internally in
+ InnoDB. */
+
+ ut_a(0 == strcmp(my_charset_latin1.name, "latin1_swedish_ci"));
+ srv_latin1_ordering = my_charset_latin1.sort_order;
+
+ //innobase_commit_concurrency_init_default();
+
+ /* Since we in this module access directly the fields of a trx
+ struct, and due to different headers and flags it might happen that
+ mutex_t has a different size in this module and in InnoDB
+ modules, we check at run time that the size is the same in
+ these compilation modules. */
+
+ /* On 5.5+ srv_use_native_aio is TRUE by default. It is later reset
+ if it is not supported by the platform in
+ innobase_start_or_create_for_mysql(). As we don't call it in xtrabackup,
+ we have to duplicate checks from that function here. */
+
+#ifdef __WIN__
+ switch (os_get_os_version()) {
+ case OS_WIN95:
+ case OS_WIN31:
+ case OS_WINNT:
+ /* On Win 95, 98, ME, Win32 subsystem for Windows 3.1,
+ and NT use simulated aio. In NT Windows provides async i/o,
+ but when run in conjunction with InnoDB Hot Backup, it seemed
+ to corrupt the data files. */
+
+ srv_use_native_aio = FALSE;
+ break;
+
+ case OS_WIN2000:
+ case OS_WINXP:
+ /* On 2000 and XP, async IO is available. */
+ srv_use_native_aio = TRUE;
+ break;
+
+ default:
+ /* Vista and later have both async IO and condition variables */
+ srv_use_native_aio = TRUE;
+ srv_use_native_conditions = TRUE;
+ break;
+ }
+
+#elif defined(LINUX_NATIVE_AIO)
+
+ if (srv_use_native_aio) {
+ ut_print_timestamp(stderr);
+ msg(" InnoDB: Using Linux native AIO\n");
+ }
+#else
+ /* Currently native AIO is supported only on windows and linux
+ and that also when the support is compiled in. In all other
+ cases, we ignore the setting of innodb_use_native_aio. */
+ srv_use_native_aio = FALSE;
+
+#endif
+
+ /* Assign the default value to srv_undo_dir if it's not specified, as
+ my_getopt does not support default values for string options. We also
+ ignore the option and override innodb_undo_directory on --prepare,
+ because separate undo tablespaces are copied to the root backup
+ directory. */
+
+ if (!srv_undo_dir || !xtrabackup_backup) {
+ my_free(srv_undo_dir);
+ srv_undo_dir = my_strdup(".", MYF(MY_FAE));
+ }
+
+ innodb_log_checksum_func_update(srv_log_checksum_algorithm);
+
+ return(FALSE);
+
+error:
+ msg("xtrabackup: innodb_init_param(): Error occured.\n");
+ return(TRUE);
+}
+
+static my_bool
+innodb_init(void)
+{
+ int err;
+
+ err = innobase_start_or_create_for_mysql();
+
+ if (err != DB_SUCCESS) {
+ free(internal_innobase_data_file_path);
+ internal_innobase_data_file_path = NULL;
+ goto error;
+ }
+
+ /* They may not be needed for now */
+// (void) hash_init(&innobase_open_tables,system_charset_info, 32, 0, 0,
+// (hash_get_key) innobase_get_key, 0, 0);
+// pthread_mutex_init(&innobase_share_mutex, MY_MUTEX_INIT_FAST);
+// pthread_mutex_init(&prepare_commit_mutex, MY_MUTEX_INIT_FAST);
+// pthread_mutex_init(&commit_threads_m, MY_MUTEX_INIT_FAST);
+// pthread_mutex_init(&commit_cond_m, MY_MUTEX_INIT_FAST);
+// pthread_cond_init(&commit_cond, NULL);
+
+ innodb_inited= 1;
+
+ return(FALSE);
+
+error:
+ msg("xtrabackup: innodb_init(): Error occured.\n");
+ return(TRUE);
+}
+
+static my_bool
+innodb_end(void)
+{
+ srv_fast_shutdown = (ulint) innobase_fast_shutdown;
+ innodb_inited = 0;
+
+ msg("xtrabackup: starting shutdown with innodb_fast_shutdown = %lu\n",
+ srv_fast_shutdown);
+
+ if (innobase_shutdown_for_mysql() != DB_SUCCESS) {
+ goto error;
+ }
+ free(internal_innobase_data_file_path);
+ internal_innobase_data_file_path = NULL;
+
+ /* They may not be needed for now */
+// hash_free(&innobase_open_tables);
+// pthread_mutex_destroy(&innobase_share_mutex);
+// pthread_mutex_destroy(&prepare_commit_mutex);
+// pthread_mutex_destroy(&commit_threads_m);
+// pthread_mutex_destroy(&commit_cond_m);
+// pthread_cond_destroy(&commit_cond);
+
+ return(FALSE);
+
+error:
+ msg("xtrabackup: innodb_end(): Error occured.\n");
+ return(TRUE);
+}
+
+/* ================= common ================= */
+
+/***********************************************************************
+Read backup meta info.
+@return TRUE on success, FALSE on failure. */
+static
+my_bool
+xtrabackup_read_metadata(char *filename)
+{
+ FILE *fp;
+ my_bool r = TRUE;
+ int t;
+
+ fp = fopen(filename,"r");
+ if(!fp) {
+ msg("xtrabackup: Error: cannot open %s\n", filename);
+ return(FALSE);
+ }
+
+ if (fscanf(fp, "backup_type = %29s\n", metadata_type)
+ != 1) {
+ r = FALSE;
+ goto end;
+ }
+ /* Use UINT64PF instead of LSN_PF here, as we have to maintain the file
+ format. */
+ if (fscanf(fp, "from_lsn = " UINT64PF "\n", &metadata_from_lsn)
+ != 1) {
+ r = FALSE;
+ goto end;
+ }
+ if (fscanf(fp, "to_lsn = " UINT64PF "\n", &metadata_to_lsn)
+ != 1) {
+ r = FALSE;
+ goto end;
+ }
+ if (fscanf(fp, "last_lsn = " UINT64PF "\n", &metadata_last_lsn)
+ != 1) {
+ metadata_last_lsn = 0;
+ }
+ /* Optional fields */
+
+ if (fscanf(fp, "compact = %d\n", &t) == 1) {
+ xtrabackup_compact = (t == 1);
+ } else {
+ xtrabackup_compact = 0;
+ }
+
+ if (fscanf(fp, "recover_binlog_info = %d\n", &t) == 1) {
+ recover_binlog_info = (t == 1);
+ }
+end:
+ fclose(fp);
+
+ return(r);
+}
+
+/***********************************************************************
+Print backup meta info to a specified buffer. */
+static
+void
+xtrabackup_print_metadata(char *buf, size_t buf_len)
+{
+ /* Use UINT64PF instead of LSN_PF here, as we have to maintain the file
+ format. */
+ snprintf(buf, buf_len,
+ "backup_type = %s\n"
+ "from_lsn = " UINT64PF "\n"
+ "to_lsn = " UINT64PF "\n"
+ "last_lsn = " UINT64PF "\n"
+ "compact = %d\n"
+ "recover_binlog_info = %d\n",
+ metadata_type,
+ metadata_from_lsn,
+ metadata_to_lsn,
+ metadata_last_lsn,
+ MY_TEST(xtrabackup_compact == TRUE),
+ MY_TEST(opt_binlog_info == BINLOG_INFO_LOCKLESS));
+}
+
+/***********************************************************************
+Stream backup meta info to a specified datasink.
+@return TRUE on success, FALSE on failure. */
+static
+my_bool
+xtrabackup_stream_metadata(ds_ctxt_t *ds_ctxt)
+{
+ char buf[1024];
+ size_t len;
+ ds_file_t *stream;
+ MY_STAT mystat;
+ my_bool rc = TRUE;
+
+ xtrabackup_print_metadata(buf, sizeof(buf));
+
+ len = strlen(buf);
+
+ mystat.st_size = len;
+ mystat.st_mtime = my_time(0);
+
+ stream = ds_open(ds_ctxt, XTRABACKUP_METADATA_FILENAME, &mystat);
+ if (stream == NULL) {
+ msg("xtrabackup: Error: cannot open output stream "
+ "for %s\n", XTRABACKUP_METADATA_FILENAME);
+ return(FALSE);
+ }
+
+ if (ds_write(stream, buf, len)) {
+ rc = FALSE;
+ }
+
+ if (ds_close(stream)) {
+ rc = FALSE;
+ }
+
+ return(rc);
+}
+
+/***********************************************************************
+Write backup meta info to a specified file.
+@return TRUE on success, FALSE on failure. */
+static
+my_bool
+xtrabackup_write_metadata(const char *filepath)
+{
+ char buf[1024];
+ size_t len;
+ FILE *fp;
+
+ xtrabackup_print_metadata(buf, sizeof(buf));
+
+ len = strlen(buf);
+
+ fp = fopen(filepath, "w");
+ if(!fp) {
+ msg("xtrabackup: Error: cannot open %s\n", filepath);
+ return(FALSE);
+ }
+ if (fwrite(buf, len, 1, fp) < 1) {
+ fclose(fp);
+ return(FALSE);
+ }
+
+ fclose(fp);
+
+ return(TRUE);
+}
+
+/***********************************************************************
+Read meta info for an incremental delta.
+@return TRUE on success, FALSE on failure. */
+static my_bool
+xb_read_delta_metadata(const char *filepath, xb_delta_info_t *info)
+{
+ FILE* fp;
+ char key[51];
+ char value[51];
+ my_bool r = TRUE;
+
+ /* set defaults */
+ info->page_size = ULINT_UNDEFINED;
+ info->zip_size = ULINT_UNDEFINED;
+ info->space_id = ULINT_UNDEFINED;
+
+ fp = fopen(filepath, "r");
+ if (!fp) {
+ /* Meta files for incremental deltas are optional */
+ return(TRUE);
+ }
+
+ while (!feof(fp)) {
+ if (fscanf(fp, "%50s = %50s\n", key, value) == 2) {
+ if (strcmp(key, "page_size") == 0) {
+ info->page_size = strtoul(value, NULL, 10);
+ } else if (strcmp(key, "zip_size") == 0) {
+ info->zip_size = strtoul(value, NULL, 10);
+ } else if (strcmp(key, "space_id") == 0) {
+ info->space_id = strtoul(value, NULL, 10);
+ }
+ }
+ }
+
+ fclose(fp);
+
+ if (info->page_size == ULINT_UNDEFINED) {
+ msg("xtrabackup: page_size is required in %s\n", filepath);
+ r = FALSE;
+ }
+ if (info->space_id == ULINT_UNDEFINED) {
+ msg("xtrabackup: Warning: This backup was taken with XtraBackup 2.0.1 "
+ "or earlier, some DDL operations between full and incremental "
+ "backups may be handled incorrectly\n");
+ }
+
+ return(r);
+}
+
+/***********************************************************************
+Write meta info for an incremental delta.
+@return TRUE on success, FALSE on failure. */
+my_bool
+xb_write_delta_metadata(const char *filename, const xb_delta_info_t *info)
+{
+ ds_file_t *f;
+ char buf[64];
+ my_bool ret;
+ size_t len;
+ MY_STAT mystat;
+
+ snprintf(buf, sizeof(buf),
+ "page_size = %lu\n"
+ "zip_size = %lu\n"
+ "space_id = %lu\n",
+ info->page_size, info->zip_size, info->space_id);
+ len = strlen(buf);
+
+ mystat.st_size = len;
+ mystat.st_mtime = my_time(0);
+
+ f = ds_open(ds_meta, filename, &mystat);
+ if (f == NULL) {
+ msg("xtrabackup: Error: cannot open output stream for %s\n",
+ filename);
+ return(FALSE);
+ }
+
+ ret = (ds_write(f, buf, len) == 0);
+
+ if (ds_close(f)) {
+ ret = FALSE;
+ }
+
+ return(ret);
+}
+
+/* ================= backup ================= */
+void
+xtrabackup_io_throttling(void)
+{
+ if (xtrabackup_throttle && (io_ticket--) < 0) {
+ os_event_reset(wait_throttle);
+ os_event_wait(wait_throttle);
+ }
+}
+
+/************************************************************************
+Checks if a given table name matches any of specifications in the --tables or
+--tables-file options.
+
+@return TRUE on match. */
+static my_bool
+check_if_table_matches_filters(const char *name)
+{
+ int regres;
+ xb_filter_entry_t* table;
+ xb_regex_list_node_t* node;
+
+ if (UT_LIST_GET_LEN(regex_list)) {
+ /* Check against regular expressions list */
+ for (node = UT_LIST_GET_FIRST(regex_list); node;
+ node = UT_LIST_GET_NEXT(regex_list, node)) {
+ regres = xb_regexec(&node->regex, name, 1,
+ tables_regmatch, 0);
+ if (regres != REG_NOMATCH) {
+
+ return(TRUE);
+ }
+ }
+ }
+
+ if (tables_hash) {
+ HASH_SEARCH(name_hash, tables_hash, ut_fold_string(name),
+ xb_filter_entry_t*,
+ table, (void) 0,
+ !strcmp(table->name, name));
+ if (table) {
+
+ return(TRUE);
+ }
+ }
+
+ return(FALSE);
+}
+
+/************************************************************************
+Checks if a table specified as a name in the form "database/name" (InnoDB 5.6)
+or "./database/name.ibd" (InnoDB 5.5-) should be skipped from backup based on
+the --tables or --tables-file options.
+
+@return TRUE if the table should be skipped. */
+my_bool
+check_if_skip_table(
+/******************/
+ const char* name) /*!< in: path to the table */
+{
+ char buf[FN_REFLEN];
+ const char *dbname, *tbname;
+ const char *ptr;
+ char *eptr;
+
+ if (UT_LIST_GET_LEN(regex_list) == 0 &&
+ tables_hash == NULL &&
+ databases_hash == NULL) {
+ return(FALSE);
+ }
+
+ dbname = NULL;
+ tbname = name;
+ while ((ptr = strchr(tbname, SRV_PATH_SEPARATOR)) != NULL) {
+ dbname = tbname;
+ tbname = ptr + 1;
+ }
+
+ if (dbname == NULL) {
+ return(FALSE);
+ }
+
+ strncpy(buf, dbname, FN_REFLEN);
+ buf[tbname - 1 - dbname] = 0;
+
+ if (databases_hash) {
+ /* There are some filters for databases, check them */
+ xb_filter_entry_t* database;
+
+ HASH_SEARCH(name_hash, databases_hash, ut_fold_string(buf),
+ xb_filter_entry_t*,
+ database, (void) 0,
+ !strcmp(database->name, buf));
+ /* Table's database isn't found, skip the table */
+ if (!database) {
+ return(TRUE);
+ }
+ /* There aren't tables specified for the database,
+ it should be backed up entirely */
+ if (!database->has_tables) {
+ return(FALSE);
+ }
+ }
+
+ buf[FN_REFLEN - 1] = '\0';
+ buf[tbname - 1 - dbname] = '.';
+
+ /* Check if there's a suffix in the table name. If so, truncate it. We
+ rely on the fact that a dot cannot be a part of a table name (it is
+ encoded by the server with the @NNNN syntax). */
+ if ((eptr = strchr(&buf[tbname - dbname], '.')) != NULL) {
+
+ *eptr = '\0';
+ }
+
+ /* For partitioned tables first try to match against the regexp
+ without truncating the #P#... suffix so we can backup individual
+ partitions with regexps like '^test[.]t#P#p5' */
+ if (check_if_table_matches_filters(buf)) {
+
+ return(FALSE);
+ }
+ if ((eptr = strstr(buf, "#P#")) != NULL) {
+
+ *eptr = 0;
+
+ if (check_if_table_matches_filters(buf)) {
+
+ return(FALSE);
+ }
+ }
+
+ return(TRUE);
+}
+
+/***********************************************************************
+Reads the space flags from a given data file and returns the compressed
+page size, or 0 if the space is not compressed. */
+ulint
+xb_get_zip_size(os_file_t file)
+{
+ byte *buf;
+ byte *page;
+ ulint zip_size = ULINT_UNDEFINED;
+ ibool success;
+ ulint space;
+
+ buf = static_cast<byte *>(ut_malloc(2 * UNIV_PAGE_SIZE_MAX));
+ page = static_cast<byte *>(ut_align(buf, UNIV_PAGE_SIZE_MAX));
+
+ success = os_file_read(file, page, 0, UNIV_PAGE_SIZE_MAX);
+ if (!success) {
+ goto end;
+ }
+
+ space = mach_read_from_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
+ zip_size = (space == 0 ) ? 0 :
+ dict_tf_get_zip_size(fsp_header_get_flags(page));
+end:
+ ut_free(buf);
+
+ return(zip_size);
+}
+
+const char*
+xb_get_copy_action(const char *dflt)
+{
+ const char *action;
+
+ if (xtrabackup_stream) {
+ if (xtrabackup_compress) {
+ if (xtrabackup_encrypt) {
+ action = "Compressing, encrypting and streaming";
+ } else {
+ action = "Compressing and streaming";
+ }
+ } else if (xtrabackup_encrypt) {
+ action = "Encrypting and streaming";
+ } else {
+ action = "Streaming";
+ }
+ } else {
+ if (xtrabackup_compress) {
+ if (xtrabackup_encrypt) {
+ action = "Compressing and encrypting";
+ } else {
+ action = "Compressing";
+ }
+ } else if (xtrabackup_encrypt) {
+ action = "Encrypting";
+ } else {
+ action = dflt;
+ }
+ }
+
+ return(action);
+}
+
+/* TODO: We may tune the behavior (e.g. by fil_aio)*/
+
+static
+my_bool
+xtrabackup_copy_datafile(fil_node_t* node, uint thread_n)
+{
+ char dst_name[FN_REFLEN];
+ ds_file_t *dstfile = NULL;
+ xb_fil_cur_t cursor;
+ xb_fil_cur_result_t res;
+ xb_write_filt_t *write_filter = NULL;
+ xb_write_filt_ctxt_t write_filt_ctxt;
+ const char *action;
+ xb_read_filt_t *read_filter;
+ ibool is_system;
+ my_bool rc = FALSE;
+
+ /* Get the name and the path for the tablespace. node->name always
+ contains the path (which may be absolute for remote tablespaces in
+ 5.6+). space->name contains the tablespace name in the form
+ "./database/table.ibd" (in 5.5-) or "database/table" (in 5.6+). For a
+ multi-node shared tablespace, space->name contains the name of the first
+ node, but that's irrelevant, since we only need node_name to match them
+ against filters, and the shared tablespace is always copied regardless
+ of the filters value. */
+
+ const char* const node_name = node->space->name;
+ const char* const node_path = node->name;
+
+ is_system = !fil_is_user_tablespace_id(node->space->id);
+
+ if (!is_system && check_if_skip_table(node_name)) {
+ msg("[%02u] Skipping %s.\n", thread_n, node_name);
+ return(FALSE);
+ }
+
+ if (!changed_page_bitmap) {
+ read_filter = &rf_pass_through;
+ }
+ else {
+ read_filter = &rf_bitmap;
+ }
+ res = xb_fil_cur_open(&cursor, read_filter, node, thread_n);
+ if (res == XB_FIL_CUR_SKIP) {
+ goto skip;
+ } else if (res == XB_FIL_CUR_ERROR) {
+ goto error;
+ }
+
+ strncpy(dst_name, cursor.rel_path, sizeof(dst_name));
+
+ /* Setup the page write filter */
+ if (xtrabackup_incremental) {
+ write_filter = &wf_incremental;
+ } else if (xtrabackup_compact) {
+ write_filter = &wf_compact;
+ } else {
+ write_filter = &wf_write_through;
+ }
+
+ memset(&write_filt_ctxt, 0, sizeof(xb_write_filt_ctxt_t));
+ ut_a(write_filter->process != NULL);
+
+ if (write_filter->init != NULL &&
+ !write_filter->init(&write_filt_ctxt, dst_name, &cursor)) {
+ msg("[%02u] xtrabackup: error: "
+ "failed to initialize page write filter.\n", thread_n);
+ goto error;
+ }
+
+ dstfile = ds_open(ds_data, dst_name, &cursor.statinfo);
+ if (dstfile == NULL) {
+ msg("[%02u] xtrabackup: error: "
+ "cannot open the destination stream for %s\n",
+ thread_n, dst_name);
+ goto error;
+ }
+
+ action = xb_get_copy_action();
+
+ if (xtrabackup_stream) {
+ msg_ts("[%02u] %s %s\n", thread_n, action, node_path);
+ } else {
+ msg_ts("[%02u] %s %s to %s\n", thread_n, action,
+ node_path, dstfile->path);
+ }
+
+ /* The main copy loop */
+ while ((res = xb_fil_cur_read(&cursor)) == XB_FIL_CUR_SUCCESS) {
+ if (!write_filter->process(&write_filt_ctxt, dstfile)) {
+ goto error;
+ }
+ }
+
+ if (res == XB_FIL_CUR_ERROR) {
+ goto error;
+ }
+
+ if (write_filter->finalize
+ && !write_filter->finalize(&write_filt_ctxt, dstfile)) {
+ goto error;
+ }
+
+ /* close */
+ msg_ts("[%02u] ...done\n", thread_n);
+ xb_fil_cur_close(&cursor);
+ if (ds_close(dstfile)) {
+ rc = TRUE;
+ }
+ if (write_filter && write_filter->deinit) {
+ write_filter->deinit(&write_filt_ctxt);
+ }
+ return(rc);
+
+error:
+ xb_fil_cur_close(&cursor);
+ if (dstfile != NULL) {
+ ds_close(dstfile);
+ }
+ if (write_filter && write_filter->deinit) {
+ write_filter->deinit(&write_filt_ctxt);;
+ }
+ msg("[%02u] xtrabackup: Error: "
+ "xtrabackup_copy_datafile() failed.\n", thread_n);
+ return(TRUE); /*ERROR*/
+
+skip:
+
+ if (dstfile != NULL) {
+ ds_close(dstfile);
+ }
+ if (write_filter && write_filter->deinit) {
+ write_filter->deinit(&write_filt_ctxt);
+ }
+ msg("[%02u] xtrabackup: Warning: We assume the "
+ "table was dropped during xtrabackup execution "
+ "and ignore the file.\n", thread_n);
+ msg("[%02u] xtrabackup: Warning: skipping tablespace %s.\n",
+ thread_n, node_name);
+ return(FALSE);
+}
+
+static
+void
+xtrabackup_choose_lsn_offset(lsn_t start_lsn)
+{
+ ulint no, alt_no, expected_no;
+ ulint blocks_in_group;
+ lsn_t tmp_offset, end_lsn;
+ int lsn_chosen = 0;
+ log_group_t *group;
+
+ start_lsn = ut_uint64_align_down(start_lsn, OS_FILE_LOG_BLOCK_SIZE);
+ end_lsn = start_lsn + RECV_SCAN_SIZE;
+
+ group = UT_LIST_GET_FIRST(log_sys->log_groups);
+
+ if (mysql_server_version < 50500 || mysql_server_version > 50600) {
+ /* only make sense for Percona Server 5.5 */
+ return;
+ }
+
+ if (server_flavor == FLAVOR_PERCONA_SERVER) {
+ /* it is Percona Server 5.5 */
+ group->alt_offset_chosen = true;
+ group->lsn_offset = group->lsn_offset_alt;
+ return;
+ }
+
+ if (group->lsn_offset_alt == group->lsn_offset ||
+ group->lsn_offset_alt == (lsn_t) -1) {
+ /* we have only one option */
+ return;
+ }
+
+ no = alt_no = (ulint) -1;
+ lsn_chosen = 0;
+
+ blocks_in_group = log_block_convert_lsn_to_no(
+ log_group_get_capacity(group)) - 1;
+
+ /* read log block number from usual offset */
+ if (group->lsn_offset < group->file_size * group->n_files &&
+ (log_group_calc_lsn_offset(start_lsn, group) %
+ UNIV_PAGE_SIZE) % OS_MIN_LOG_BLOCK_SIZE == 0) {
+ log_group_read_log_seg(LOG_RECOVER, log_sys->buf,
+ group, start_lsn, end_lsn);
+ no = log_block_get_hdr_no(log_sys->buf);
+ }
+
+ /* read log block number from Percona Server 5.5 offset */
+ tmp_offset = group->lsn_offset;
+ group->lsn_offset = group->lsn_offset_alt;
+
+ if (group->lsn_offset < group->file_size * group->n_files &&
+ (log_group_calc_lsn_offset(start_lsn, group) %
+ UNIV_PAGE_SIZE) % OS_MIN_LOG_BLOCK_SIZE == 0) {
+ log_group_read_log_seg(LOG_RECOVER, log_sys->buf,
+ group, start_lsn, end_lsn);
+ alt_no = log_block_get_hdr_no(log_sys->buf);
+ }
+
+ expected_no = log_block_convert_lsn_to_no(start_lsn);
+
+ ut_a(!(no == expected_no && alt_no == expected_no));
+
+ group->lsn_offset = tmp_offset;
+
+ if ((no <= expected_no &&
+ ((expected_no - no) % blocks_in_group) == 0) ||
+ ((expected_no | 0x40000000UL) - no) % blocks_in_group == 0) {
+ /* default offset looks ok */
+ ++lsn_chosen;
+ }
+
+ if ((alt_no <= expected_no &&
+ ((expected_no - alt_no) % blocks_in_group) == 0) ||
+ ((expected_no | 0x40000000UL) - alt_no) % blocks_in_group == 0) {
+ /* PS 5.5 style offset looks ok */
+ ++lsn_chosen;
+ group->alt_offset_chosen = true;
+ group->lsn_offset = group->lsn_offset_alt;
+ }
+
+ /* We are in trouble, because we can not make a
+ decision to choose one over the other. Die just
+ like a Buridan's ass */
+ ut_a(lsn_chosen == 1);
+}
+
+/*******************************************************//**
+Scans log from a buffer and writes new log data to the outpud datasinc.
+@return true if success */
+static
+bool
+xtrabackup_scan_log_recs(
+/*===============*/
+ log_group_t* group, /*!< in: log group */
+ bool is_last, /*!< in: whether it is last segment
+ to copy */
+ lsn_t start_lsn, /*!< in: buffer start lsn */
+ lsn_t* contiguous_lsn, /*!< in/out: it is known that all log
+ groups contain contiguous log data up
+ to this lsn */
+ lsn_t* group_scanned_lsn,/*!< out: scanning succeeded up to
+ this lsn */
+ bool* finished) /*!< out: false if is not able to scan
+ any more in this log group */
+{
+ lsn_t scanned_lsn;
+ ulint data_len;
+ ulint write_size;
+ const byte* log_block;
+
+ ulint scanned_checkpoint_no = 0;
+
+ *finished = false;
+ scanned_lsn = start_lsn;
+ log_block = log_sys->buf;
+
+ while (log_block < log_sys->buf + RECV_SCAN_SIZE && !*finished) {
+ ulint no = log_block_get_hdr_no(log_block);
+ ulint scanned_no = log_block_convert_lsn_to_no(scanned_lsn);
+ ibool checksum_is_ok =
+ log_block_checksum_is_ok_or_old_format(log_block);
+
+ if (no != scanned_no && checksum_is_ok) {
+ ulint blocks_in_group;
+
+ blocks_in_group = log_block_convert_lsn_to_no(
+ log_group_get_capacity(group)) - 1;
+
+ if ((no < scanned_no &&
+ ((scanned_no - no) % blocks_in_group) == 0) ||
+ no == 0 ||
+ /* Log block numbers wrap around at 0x3FFFFFFF */
+ ((scanned_no | 0x40000000UL) - no) %
+ blocks_in_group == 0) {
+
+ /* old log block, do nothing */
+ *finished = true;
+ break;
+ }
+
+ msg("xtrabackup: error:"
+ " log block numbers mismatch:\n"
+ "xtrabackup: error: expected log block no. %lu,"
+ " but got no. %lu from the log file.\n",
+ (ulong) scanned_no, (ulong) no);
+
+ if ((no - scanned_no) % blocks_in_group == 0) {
+ msg("xtrabackup: error:"
+ " it looks like InnoDB log has wrapped"
+ " around before xtrabackup could"
+ " process all records due to either"
+ " log copying being too slow, or "
+ " log files being too small.\n");
+ }
+
+ return(false);
+ } else if (!checksum_is_ok) {
+ /* Garbage or an incompletely written log block */
+
+ msg("xtrabackup: warning: Log block checksum mismatch"
+ " (block no %lu at lsn " LSN_PF "): \n"
+ "expected %lu, calculated checksum %lu\n",
+ (ulong) no,
+ scanned_lsn,
+ (ulong) log_block_get_checksum(log_block),
+ (ulong) log_block_calc_checksum(log_block));
+ msg("xtrabackup: warning: this is possible when the "
+ "log block has not been fully written by the "
+ "server, will retry later.\n");
+ *finished = true;
+ break;
+ }
+
+ if (log_block_get_flush_bit(log_block)) {
+ /* This block was a start of a log flush operation:
+ we know that the previous flush operation must have
+ been completed for all log groups before this block
+ can have been flushed to any of the groups. Therefore,
+ we know that log data is contiguous up to scanned_lsn
+ in all non-corrupt log groups. */
+
+ if (scanned_lsn > *contiguous_lsn) {
+
+ *contiguous_lsn = scanned_lsn;
+ }
+ }
+
+ data_len = log_block_get_data_len(log_block);
+
+ if (
+ (scanned_checkpoint_no > 0)
+ && (log_block_get_checkpoint_no(log_block)
+ < scanned_checkpoint_no)
+ && (scanned_checkpoint_no
+ - log_block_get_checkpoint_no(log_block)
+ > 0x80000000UL)) {
+
+ /* Garbage from a log buffer flush which was made
+ before the most recent database recovery */
+
+ *finished = true;
+ break;
+ }
+
+ scanned_lsn = scanned_lsn + data_len;
+ scanned_checkpoint_no = log_block_get_checkpoint_no(log_block);
+
+ if (data_len < OS_FILE_LOG_BLOCK_SIZE) {
+ /* Log data for this group ends here */
+
+ *finished = true;
+ } else {
+ log_block += OS_FILE_LOG_BLOCK_SIZE;
+ }
+ }
+
+ *group_scanned_lsn = scanned_lsn;
+
+ /* ===== write log to 'xtrabackup_logfile' ====== */
+ if (!*finished) {
+ write_size = RECV_SCAN_SIZE;
+ } else {
+ write_size = ut_uint64_align_up(scanned_lsn,
+ OS_FILE_LOG_BLOCK_SIZE) - start_lsn;
+ if (!is_last && scanned_lsn % OS_FILE_LOG_BLOCK_SIZE) {
+ write_size -= OS_FILE_LOG_BLOCK_SIZE;
+ }
+ }
+
+ if (ds_write(dst_log_file, log_sys->buf, write_size)) {
+ msg("xtrabackup: Error: "
+ "write to logfile failed\n");
+ return(false);
+ }
+
+ return(true);
+}
+
+static my_bool
+xtrabackup_copy_logfile(lsn_t from_lsn, my_bool is_last)
+{
+ /* definition from recv_recovery_from_checkpoint_start() */
+ log_group_t* group;
+ lsn_t group_scanned_lsn;
+ lsn_t contiguous_lsn;
+
+ ut_a(dst_log_file != NULL);
+
+ /* read from checkpoint_lsn_start to current */
+ contiguous_lsn = ut_uint64_align_down(from_lsn, OS_FILE_LOG_BLOCK_SIZE);
+
+ /* TODO: We must check the contiguous_lsn still exists in log file.. */
+
+ group = UT_LIST_GET_FIRST(log_sys->log_groups);
+
+ while (group) {
+ bool finished;
+ lsn_t start_lsn;
+ lsn_t end_lsn;
+
+ /* reference recv_group_scan_log_recs() */
+ finished = false;
+
+ start_lsn = contiguous_lsn;
+
+ while (!finished) {
+
+ end_lsn = start_lsn + RECV_SCAN_SIZE;
+
+ xtrabackup_io_throttling();
+
+ mutex_enter(&log_sys->mutex);
+
+ log_group_read_log_seg(LOG_RECOVER, log_sys->buf,
+ group, start_lsn, end_lsn);
+
+ if (!xtrabackup_scan_log_recs(group, is_last,
+ start_lsn, &contiguous_lsn, &group_scanned_lsn,
+ &finished)) {
+ goto error;
+ }
+
+ mutex_exit(&log_sys->mutex);
+
+ start_lsn = end_lsn;
+
+ }
+
+ group->scanned_lsn = group_scanned_lsn;
+
+ msg_ts(">> log scanned up to (" LSN_PF ")\n",
+ group->scanned_lsn);
+
+ group = UT_LIST_GET_NEXT(log_groups, group);
+
+ /* update global variable*/
+ log_copy_scanned_lsn = group_scanned_lsn;
+
+ /* innodb_mirrored_log_groups must be 1, no other groups */
+ ut_a(group == NULL);
+
+ debug_sync_point("xtrabackup_copy_logfile_pause");
+
+ }
+
+
+ return(FALSE);
+
+error:
+ mutex_exit(&log_sys->mutex);
+ ds_close(dst_log_file);
+ msg("xtrabackup: Error: xtrabackup_copy_logfile() failed.\n");
+ return(TRUE);
+}
+
+static
+#ifndef __WIN__
+void*
+#else
+ulint
+#endif
+log_copying_thread(
+ void* arg __attribute__((unused)))
+{
+ /*
+ Initialize mysys thread-specific memory so we can
+ use mysys functions in this thread.
+ */
+ my_thread_init();
+
+ ut_a(dst_log_file != NULL);
+
+ log_copying_running = TRUE;
+
+ while(log_copying) {
+ os_event_reset(log_copying_stop);
+ os_event_wait_time_low(log_copying_stop,
+ xtrabackup_log_copy_interval * 1000ULL,
+ 0);
+ if (log_copying) {
+ if(xtrabackup_copy_logfile(log_copy_scanned_lsn,
+ FALSE)) {
+
+ exit(EXIT_FAILURE);
+ }
+ }
+ }
+
+ /* last copying */
+ if(xtrabackup_copy_logfile(log_copy_scanned_lsn, TRUE)) {
+
+ exit(EXIT_FAILURE);
+ }
+
+ log_copying_running = FALSE;
+ my_thread_end();
+ os_thread_exit(NULL);
+
+ return(0);
+}
+
+/* io throttle watching (rough) */
+static
+#ifndef __WIN__
+void*
+#else
+ulint
+#endif
+io_watching_thread(
+ void* arg)
+{
+ (void)arg;
+ /* currently, for --backup only */
+ ut_a(xtrabackup_backup);
+
+ io_watching_thread_running = TRUE;
+
+ while (log_copying) {
+ os_thread_sleep(1000000); /*1 sec*/
+ io_ticket = xtrabackup_throttle;
+ os_event_set(wait_throttle);
+ }
+
+ /* stop io throttle */
+ xtrabackup_throttle = 0;
+ os_event_set(wait_throttle);
+
+ io_watching_thread_running = FALSE;
+
+ os_thread_exit(NULL);
+
+ return(0);
+}
+
+/************************************************************************
+I/o-handler thread function. */
+static
+
+#ifndef __WIN__
+void*
+#else
+ulint
+#endif
+io_handler_thread(
+/*==============*/
+ void* arg)
+{
+ ulint segment;
+
+
+ segment = *((ulint*)arg);
+
+ while (srv_shutdown_state != SRV_SHUTDOWN_EXIT_THREADS) {
+ fil_aio_wait(segment);
+ }
+
+ /* We count the number of threads in os_thread_exit(). A created
+ thread should always use that to exit and not use return() to exit.
+ The thread actually never comes here because it is exited in an
+ os_event_wait(). */
+
+ os_thread_exit(NULL);
+
+#ifndef __WIN__
+ return(NULL); /* Not reached */
+#else
+ return(0);
+#endif
+}
+
+/**************************************************************************
+Datafiles copying thread.*/
+static
+os_thread_ret_t
+data_copy_thread_func(
+/*==================*/
+ void *arg) /* thread context */
+{
+ data_thread_ctxt_t *ctxt = (data_thread_ctxt_t *) arg;
+ uint num = ctxt->num;
+ fil_node_t* node;
+
+ /*
+ Initialize mysys thread-specific memory so we can
+ use mysys functions in this thread.
+ */
+ my_thread_init();
+
+ debug_sync_point("data_copy_thread_func");
+
+ while ((node = datafiles_iter_next(ctxt->it)) != NULL) {
+
+ /* copy the datafile */
+ if(xtrabackup_copy_datafile(node, num)) {
+ msg("[%02u] xtrabackup: Error: "
+ "failed to copy datafile.\n", num);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ os_mutex_enter(ctxt->count_mutex);
+ (*ctxt->count)--;
+ os_mutex_exit(ctxt->count_mutex);
+
+ my_thread_end();
+ os_thread_exit(NULL);
+ OS_THREAD_DUMMY_RETURN;
+}
+
+/************************************************************************
+Initialize the appropriate datasink(s). Both local backups and streaming in the
+'xbstream' format allow parallel writes so we can write directly.
+
+Otherwise (i.e. when streaming in the 'tar' format) we need 2 separate datasinks
+for the data stream (and don't allow parallel data copying) and for metainfo
+files (including xtrabackup_logfile). The second datasink writes to temporary
+files first, and then streams them in a serialized way when closed. */
+static void
+xtrabackup_init_datasinks(void)
+{
+ if (xtrabackup_parallel > 1 && xtrabackup_stream &&
+ xtrabackup_stream_fmt == XB_STREAM_FMT_TAR) {
+ msg("xtrabackup: warning: the --parallel option does not have "
+ "any effect when streaming in the 'tar' format. "
+ "You can use the 'xbstream' format instead.\n");
+ xtrabackup_parallel = 1;
+ }
+
+ /* Start building out the pipelines from the terminus back */
+ if (xtrabackup_stream) {
+ /* All streaming goes to stdout */
+ ds_data = ds_meta = ds_redo = ds_create(xtrabackup_target_dir,
+ DS_TYPE_STDOUT);
+ } else {
+ /* Local filesystem */
+ ds_data = ds_meta = ds_redo = ds_create(xtrabackup_target_dir,
+ DS_TYPE_LOCAL);
+ }
+
+ /* Track it for destruction */
+ xtrabackup_add_datasink(ds_data);
+
+ /* Stream formatting */
+ if (xtrabackup_stream) {
+ ds_ctxt_t *ds;
+ if (xtrabackup_stream_fmt == XB_STREAM_FMT_TAR) {
+ ds = ds_create(xtrabackup_target_dir, DS_TYPE_ARCHIVE);
+ } else if (xtrabackup_stream_fmt == XB_STREAM_FMT_XBSTREAM) {
+ ds = ds_create(xtrabackup_target_dir, DS_TYPE_XBSTREAM);
+ } else {
+ /* bad juju... */
+ ds = NULL;
+ }
+
+ xtrabackup_add_datasink(ds);
+
+ ds_set_pipe(ds, ds_data);
+ ds_data = ds;
+
+ if (xtrabackup_stream_fmt != XB_STREAM_FMT_XBSTREAM) {
+
+ /* 'tar' does not allow parallel streams */
+ ds_redo = ds_meta = ds_create(xtrabackup_target_dir,
+ DS_TYPE_TMPFILE);
+ xtrabackup_add_datasink(ds_meta);
+ ds_set_pipe(ds_meta, ds);
+ } else {
+ ds_redo = ds_meta = ds_data;
+ }
+ }
+
+ /* Encryption */
+ if (xtrabackup_encrypt) {
+ ds_ctxt_t *ds;
+
+ ds = ds_create(xtrabackup_target_dir, DS_TYPE_ENCRYPT);
+ xtrabackup_add_datasink(ds);
+
+ ds_set_pipe(ds, ds_data);
+ if (ds_data != ds_meta) {
+ ds_data = ds;
+ ds = ds_create(xtrabackup_target_dir, DS_TYPE_ENCRYPT);
+ xtrabackup_add_datasink(ds);
+
+ ds_set_pipe(ds, ds_meta);
+ ds_redo = ds_meta = ds;
+ } else {
+ ds_redo = ds_data = ds_meta = ds;
+ }
+ }
+
+ /* Compression for ds_data and ds_redo */
+ if (xtrabackup_compress) {
+ ds_ctxt_t *ds;
+
+ /* Use a 1 MB buffer for compressed output stream */
+ ds = ds_create(xtrabackup_target_dir, DS_TYPE_BUFFER);
+ ds_buffer_set_size(ds, 1024 * 1024);
+ xtrabackup_add_datasink(ds);
+ ds_set_pipe(ds, ds_data);
+ if (ds_data != ds_redo) {
+ ds_data = ds;
+ ds = ds_create(xtrabackup_target_dir, DS_TYPE_BUFFER);
+ ds_buffer_set_size(ds, 1024 * 1024);
+ xtrabackup_add_datasink(ds);
+ ds_set_pipe(ds, ds_redo);
+ ds_redo = ds;
+ } else {
+ ds_redo = ds_data = ds;
+ }
+
+ ds = ds_create(xtrabackup_target_dir, DS_TYPE_COMPRESS);
+ xtrabackup_add_datasink(ds);
+ ds_set_pipe(ds, ds_data);
+ if (ds_data != ds_redo) {
+ ds_data = ds;
+ ds = ds_create(xtrabackup_target_dir, DS_TYPE_COMPRESS);
+ xtrabackup_add_datasink(ds);
+ ds_set_pipe(ds, ds_redo);
+ ds_redo = ds;
+ } else {
+ ds_redo = ds_data = ds;
+ }
+ }
+}
+
+/************************************************************************
+Destroy datasinks.
+
+Destruction is done in the specific order to not violate their order in the
+pipeline so that each datasink is able to flush data down the pipeline. */
+static void xtrabackup_destroy_datasinks(void)
+{
+ for (uint i = actual_datasinks; i > 0; i--) {
+ ds_destroy(datasinks[i-1]);
+ datasinks[i-1] = NULL;
+ }
+ ds_data = NULL;
+ ds_meta = NULL;
+ ds_redo = NULL;
+}
+
+#define SRV_N_PENDING_IOS_PER_THREAD OS_AIO_N_PENDING_IOS_PER_THREAD
+#define SRV_MAX_N_PENDING_SYNC_IOS 100
+
+/************************************************************************
+@return TRUE if table should be opened. */
+static
+ibool
+xb_check_if_open_tablespace(
+ const char* db,
+ const char* table)
+{
+ char buf[FN_REFLEN];
+
+ snprintf(buf, sizeof(buf), "%s/%s", db, table);
+
+ return !check_if_skip_table(buf);
+}
+
+/************************************************************************
+Initializes the I/O and tablespace cache subsystems. */
+static
+void
+xb_fil_io_init(void)
+/*================*/
+{
+ srv_n_file_io_threads = srv_n_read_io_threads;
+
+ os_aio_init(8 * SRV_N_PENDING_IOS_PER_THREAD,
+ srv_n_read_io_threads,
+ srv_n_write_io_threads,
+ SRV_MAX_N_PENDING_SYNC_IOS);
+
+ fil_init(srv_file_per_table ? 50000 : 5000, LONG_MAX);
+
+ fsp_init();
+}
+
+/****************************************************************************
+Populates the tablespace memory cache by scanning for and opening data files.
+@returns DB_SUCCESS or error code.*/
+static
+ulint
+xb_load_tablespaces(void)
+/*=====================*/
+{
+ ulint i;
+ ibool create_new_db;
+ ulint err;
+ ulint sum_of_new_sizes;
+
+ for (i = 0; i < srv_n_file_io_threads; i++) {
+ thread_nr[i] = i;
+
+ os_thread_create(io_handler_thread, thread_nr + i,
+ thread_ids + i);
+ }
+
+ os_thread_sleep(200000); /*0.2 sec*/
+
+ err = open_or_create_data_files(&create_new_db,
+ &min_flushed_lsn, &max_flushed_lsn,
+ &sum_of_new_sizes);
+ if (err != DB_SUCCESS) {
+ msg("xtrabackup: Could not open or create data files.\n"
+ "xtrabackup: If you tried to add new data files, and it "
+ "failed here,\n"
+ "xtrabackup: you should now edit innodb_data_file_path in "
+ "my.cnf back\n"
+ "xtrabackup: to what it was, and remove the new ibdata "
+ "files InnoDB created\n"
+ "xtrabackup: in this failed attempt. InnoDB only wrote "
+ "those files full of\n"
+ "xtrabackup: zeros, but did not yet use them in any way. "
+ "But be careful: do not\n"
+ "xtrabackup: remove old data files which contain your "
+ "precious data!\n");
+ return(err);
+ }
+
+ /* create_new_db must not be TRUE.. */
+ if (create_new_db) {
+ msg("xtrabackup: could not find data files at the "
+ "specified datadir\n");
+ return(DB_ERROR);
+ }
+
+ /* Add separate undo tablespaces to fil_system */
+
+ err = srv_undo_tablespaces_init(FALSE,
+ TRUE,
+ srv_undo_tablespaces,
+ &srv_undo_tablespaces_open);
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
+
+ /* It is important to call fil_load_single_table_tablespace() after
+ srv_undo_tablespaces_init(), because fil_is_user_tablespace_id() *
+ relies on srv_undo_tablespaces_open to be properly initialized */
+
+ msg("xtrabackup: Generating a list of tablespaces\n");
+
+ err = fil_load_single_table_tablespaces(xb_check_if_open_tablespace);
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
+
+ debug_sync_point("xtrabackup_load_tablespaces_pause");
+
+ return(DB_SUCCESS);
+}
+
+/************************************************************************
+Initialize the tablespace memory cache and populate it by scanning for and
+opening data files.
+@returns DB_SUCCESS or error code.*/
+ulint
+xb_data_files_init(void)
+/*====================*/
+{
+ xb_fil_io_init();
+
+ return(xb_load_tablespaces());
+}
+
+/************************************************************************
+Destroy the tablespace memory cache. */
+void
+xb_data_files_close(void)
+/*====================*/
+{
+ ulint i;
+
+ /* Shutdown the aio threads. This has been copied from
+ innobase_shutdown_for_mysql(). */
+
+ srv_shutdown_state = SRV_SHUTDOWN_EXIT_THREADS;
+
+ for (i = 0; i < 1000; i++) {
+ os_aio_wake_all_threads_at_shutdown();
+
+ os_mutex_enter(os_sync_mutex);
+
+ if (os_thread_count == 0) {
+
+ os_mutex_exit(os_sync_mutex);
+
+ os_thread_sleep(10000);
+
+ break;
+ }
+
+ os_mutex_exit(os_sync_mutex);
+
+ os_thread_sleep(10000);
+ }
+
+ if (i == 1000) {
+ msg("xtrabackup: Warning: %lu threads created by InnoDB"
+ " had not exited at shutdown!\n",
+ (ulong) os_thread_count);
+ }
+
+ os_aio_free();
+
+ fil_close_all_files();
+
+ /* Free the double write data structures. */
+ if (buf_dblwr) {
+ buf_dblwr_free();
+ }
+
+ /* Reset srv_file_io_threads to its default value to avoid confusing
+ warning on --prepare in innobase_start_or_create_for_mysql()*/
+ srv_n_file_io_threads = 4;
+
+ srv_shutdown_state = SRV_SHUTDOWN_NONE;
+}
+
+/***********************************************************************
+Allocate and initialize the entry for databases and tables filtering
+hash tables. If memory allocation is not successful, terminate program.
+@return pointer to the created entry. */
+static
+xb_filter_entry_t *
+xb_new_filter_entry(
+/*================*/
+ const char* name) /*!< in: name of table/database */
+{
+ xb_filter_entry_t *entry;
+ ulint namelen = strlen(name);
+
+ ut_a(namelen <= NAME_LEN * 2 + 1);
+
+ entry = static_cast<xb_filter_entry_t *>
+ (ut_malloc(sizeof(xb_filter_entry_t) + namelen + 1));
+ memset(entry, '\0', sizeof(xb_filter_entry_t) + namelen + 1);
+ entry->name = ((char*)entry) + sizeof(xb_filter_entry_t);
+ strcpy(entry->name, name);
+ entry->has_tables = FALSE;
+
+ return entry;
+}
+
+/***********************************************************************
+Add entry to hash table. If hash table is NULL, allocate and initialize
+new hash table */
+static
+xb_filter_entry_t*
+xb_add_filter(
+/*========================*/
+ const char* name, /*!< in: name of table/database */
+ hash_table_t** hash) /*!< in/out: hash to insert into */
+{
+ xb_filter_entry_t* entry;
+
+ entry = xb_new_filter_entry(name);
+
+ if (UNIV_UNLIKELY(*hash == NULL)) {
+ *hash = hash_create(1000);
+ }
+ HASH_INSERT(xb_filter_entry_t,
+ name_hash, *hash,
+ ut_fold_string(entry->name),
+ entry);
+
+ return entry;
+}
+
+/***********************************************************************
+Validate name of table or database. If name is invalid, program will
+be finished with error code */
+static
+void
+xb_validate_name(
+/*=============*/
+ const char* name, /*!< in: name */
+ size_t len) /*!< in: length of name */
+{
+ const char* p;
+
+ /* perform only basic validation. validate length and
+ path symbols */
+ if (len > NAME_LEN) {
+ msg("xtrabackup: name `%s` is too long.\n", name);
+ exit(EXIT_FAILURE);
+ }
+ p = strpbrk(name, "/\\~");
+ if (p && p - name < NAME_LEN) {
+ msg("xtrabackup: name `%s` is not valid.\n", name);
+ exit(EXIT_FAILURE);
+ }
+}
+
+/***********************************************************************
+Register new filter entry which can be either database
+or table name. */
+static
+void
+xb_register_filter_entry(
+/*=====================*/
+ const char* name) /*!< in: name */
+{
+ const char* p;
+ size_t namelen;
+ xb_filter_entry_t* db_entry = NULL;
+
+ namelen = strlen(name);
+ if ((p = strchr(name, '.')) != NULL) {
+ char dbname[NAME_LEN + 1];
+
+ xb_validate_name(name, p - name);
+ xb_validate_name(p + 1, namelen - (p - name));
+
+ strncpy(dbname, name, p - name);
+ dbname[p - name] = 0;
+
+ if (databases_hash) {
+ HASH_SEARCH(name_hash, databases_hash,
+ ut_fold_string(dbname),
+ xb_filter_entry_t*,
+ db_entry, (void) 0,
+ !strcmp(db_entry->name, dbname));
+ }
+ if (!db_entry) {
+ db_entry = xb_add_filter(dbname, &databases_hash);
+ }
+ db_entry->has_tables = TRUE;
+ xb_add_filter(name, &tables_hash);
+ } else {
+ xb_validate_name(name, namelen);
+
+ xb_add_filter(name, &databases_hash);
+ }
+}
+
+/***********************************************************************
+Register new table for the filter. */
+static
+void
+xb_register_table(
+/*==============*/
+ const char* name) /*!< in: name of table */
+{
+ if (strchr(name, '.') == NULL) {
+ msg("xtrabackup: `%s` is not fully qualified name.\n", name);
+ exit(EXIT_FAILURE);
+ }
+
+ xb_register_filter_entry(name);
+}
+
+/***********************************************************************
+Register new regex for the filter. */
+static
+void
+xb_register_regex(
+/*==============*/
+ const char* regex) /*!< in: regex */
+{
+ xb_regex_list_node_t* node;
+ char errbuf[100];
+ int ret;
+
+ node = static_cast<xb_regex_list_node_t *>
+ (ut_malloc(sizeof(xb_regex_list_node_t)));
+
+ ret = xb_regcomp(&node->regex, regex, REG_EXTENDED);
+ if (ret != 0) {
+ xb_regerror(ret, &node->regex, errbuf, sizeof(errbuf));
+ msg("xtrabackup: error: tables regcomp(%s): %s\n",
+ regex, errbuf);
+ exit(EXIT_FAILURE);
+ }
+
+ UT_LIST_ADD_LAST(regex_list, regex_list, node);
+}
+
+typedef void (*insert_entry_func_t)(const char*);
+
+/***********************************************************************
+Scan string and load filter entries from it. */
+static
+void
+xb_load_list_string(
+/*================*/
+ char* list, /*!< in: string representing a list */
+ const char* delimiters, /*!< in: delimiters of entries */
+ insert_entry_func_t ins) /*!< in: callback to add entry */
+{
+ char* p;
+ char* saveptr;
+
+ p = strtok_r(list, delimiters, &saveptr);
+ while (p) {
+
+ ins(p);
+
+ p = strtok_r(NULL, delimiters, &saveptr);
+ }
+}
+
+/***********************************************************************
+Scan file and load filter entries from it. */
+static
+void
+xb_load_list_file(
+/*==============*/
+ const char* filename, /*!< in: name of file */
+ insert_entry_func_t ins) /*!< in: callback to add entry */
+{
+ char name_buf[NAME_LEN*2+2];
+ FILE* fp;
+
+ /* read and store the filenames */
+ fp = fopen(filename, "r");
+ if (!fp) {
+ msg("xtrabackup: cannot open %s\n",
+ filename);
+ exit(EXIT_FAILURE);
+ }
+ while (fgets(name_buf, sizeof(name_buf), fp) != NULL) {
+ char* p = strchr(name_buf, '\n');
+ if (p) {
+ *p = '\0';
+ } else {
+ msg("xtrabackup: `%s...` name is too long", name_buf);
+ exit(EXIT_FAILURE);
+ }
+
+ ins(name_buf);
+ }
+
+ fclose(fp);
+}
+
+
+static
+void
+xb_filters_init()
+{
+ UT_LIST_INIT(regex_list);
+
+ if (xtrabackup_databases) {
+ xb_load_list_string(xtrabackup_databases, " \t",
+ xb_register_filter_entry);
+ }
+
+ if (xtrabackup_databases_file) {
+ xb_load_list_file(xtrabackup_databases_file,
+ xb_register_filter_entry);
+ }
+
+ if (xtrabackup_tables) {
+ xb_load_list_string(xtrabackup_tables, ",",
+ xb_register_regex);
+ }
+
+ if (xtrabackup_tables_file) {
+ xb_load_list_file(xtrabackup_tables_file, xb_register_table);
+ }
+}
+
+static
+void
+xb_filter_hash_free(hash_table_t* hash)
+{
+ ulint i;
+
+ /* free the hash elements */
+ for (i = 0; i < hash_get_n_cells(hash); i++) {
+ xb_filter_entry_t* table;
+
+ table = static_cast<xb_filter_entry_t *>
+ (HASH_GET_FIRST(hash, i));
+
+ while (table) {
+ xb_filter_entry_t* prev_table = table;
+
+ table = static_cast<xb_filter_entry_t *>
+ (HASH_GET_NEXT(name_hash, prev_table));
+
+ HASH_DELETE(xb_filter_entry_t, name_hash, hash,
+ ut_fold_string(prev_table->name), prev_table);
+ ut_free(prev_table);
+ }
+ }
+
+ /* free hash */
+ hash_table_free(hash);
+}
+
+/************************************************************************
+Destroy table filters for partial backup. */
+static
+void
+xb_filters_free()
+{
+ while (UT_LIST_GET_LEN(regex_list) > 0) {
+ xb_regex_list_node_t* node = UT_LIST_GET_FIRST(regex_list);
+ UT_LIST_REMOVE(regex_list, regex_list, node);
+ xb_regfree(&node->regex);
+ ut_free(node);
+ }
+
+ if (tables_hash) {
+ xb_filter_hash_free(tables_hash);
+ }
+
+ if (databases_hash) {
+ xb_filter_hash_free(databases_hash);
+ }
+}
+
+/*********************************************************************//**
+Creates or opens the log files and closes them.
+@return DB_SUCCESS or error code */
+static
+ulint
+open_or_create_log_file(
+/*====================*/
+ ibool create_new_db, /*!< in: TRUE if we should create a
+ new database */
+ ibool* log_file_created, /*!< out: TRUE if new log file
+ created */
+ ibool log_file_has_been_opened,/*!< in: TRUE if a log file has been
+ opened before: then it is an error
+ to try to create another log file */
+ ulint k, /*!< in: log group number */
+ ulint i) /*!< in: log file number in group */
+{
+ ibool ret;
+ os_offset_t size;
+ char name[10000];
+ ulint dirnamelen;
+
+ UT_NOT_USED(create_new_db);
+ UT_NOT_USED(log_file_has_been_opened);
+ UT_NOT_USED(k);
+ ut_ad(k == 0);
+
+ *log_file_created = FALSE;
+
+ srv_normalize_path_for_win(srv_log_group_home_dir);
+
+ dirnamelen = strlen(srv_log_group_home_dir);
+ ut_a(dirnamelen < (sizeof name) - 10 - sizeof "ib_logfile");
+ memcpy(name, srv_log_group_home_dir, dirnamelen);
+
+ /* Add a path separator if needed. */
+ if (dirnamelen && name[dirnamelen - 1] != SRV_PATH_SEPARATOR) {
+ name[dirnamelen++] = SRV_PATH_SEPARATOR;
+ }
+
+ sprintf(name + dirnamelen, "%s%lu", "ib_logfile", (ulong) i);
+
+ files[i] = os_file_create(innodb_file_log_key, name,
+ OS_FILE_OPEN, OS_FILE_NORMAL,
+ OS_LOG_FILE, &ret);
+ if (ret == FALSE) {
+ fprintf(stderr, "InnoDB: Error in opening %s\n", name);
+
+ return(DB_ERROR);
+ }
+
+ size = os_file_get_size(files[i]);
+
+ if (size != srv_log_file_size * UNIV_PAGE_SIZE) {
+
+ fprintf(stderr,
+ "InnoDB: Error: log file %s is"
+ " of different size " UINT64PF " bytes\n"
+ "InnoDB: than specified in the .cnf"
+ " file " UINT64PF " bytes!\n",
+ name, size, srv_log_file_size * UNIV_PAGE_SIZE);
+
+ return(DB_ERROR);
+ }
+
+ ret = os_file_close(files[i]);
+ ut_a(ret);
+
+ if (i == 0) {
+ /* Create in memory the file space object
+ which is for this log group */
+
+ fil_space_create(name,
+ 2 * k + SRV_LOG_SPACE_FIRST_ID, 0, FIL_LOG);
+ }
+
+ ut_a(fil_validate());
+
+ ut_a(fil_node_create(name, srv_log_file_size,
+ 2 * k + SRV_LOG_SPACE_FIRST_ID, FALSE));
+ if (i == 0) {
+ log_group_init(k, srv_n_log_files,
+ srv_log_file_size * UNIV_PAGE_SIZE,
+ 2 * k + SRV_LOG_SPACE_FIRST_ID,
+ SRV_LOG_SPACE_FIRST_ID + 1); /* dummy arch
+ space id */
+ }
+
+ return(DB_SUCCESS);
+}
+
+/*********************************************************************//**
+Normalizes init parameter values to use units we use inside InnoDB.
+@return DB_SUCCESS or error code */
+static
+void
+xb_normalize_init_values(void)
+/*==========================*/
+{
+ ulint i;
+
+ for (i = 0; i < srv_n_data_files; i++) {
+ srv_data_file_sizes[i] = srv_data_file_sizes[i]
+ * ((1024 * 1024) / UNIV_PAGE_SIZE);
+ }
+
+ srv_last_file_size_max = srv_last_file_size_max
+ * ((1024 * 1024) / UNIV_PAGE_SIZE);
+
+ srv_log_file_size = srv_log_file_size / UNIV_PAGE_SIZE;
+
+ srv_log_buffer_size = srv_log_buffer_size / UNIV_PAGE_SIZE;
+
+ srv_lock_table_size = 5 * (srv_buf_pool_size / UNIV_PAGE_SIZE);
+}
+
+/***********************************************************************
+Set the open files limit. Based on set_max_open_files().
+
+@return the resulting open files limit. May be less or more than the requested
+value. */
+static uint
+xb_set_max_open_files(
+/*==================*/
+ uint max_file_limit) /*!<in: open files limit */
+{
+#if defined(RLIMIT_NOFILE)
+ struct rlimit rlimit;
+ uint old_cur;
+
+ if (getrlimit(RLIMIT_NOFILE, &rlimit)) {
+
+ goto end;
+ }
+
+ old_cur = (uint) rlimit.rlim_cur;
+
+ if (rlimit.rlim_cur == RLIM_INFINITY) {
+
+ rlimit.rlim_cur = max_file_limit;
+ }
+
+ if (rlimit.rlim_cur >= max_file_limit) {
+
+ max_file_limit = rlimit.rlim_cur;
+ goto end;
+ }
+
+ rlimit.rlim_cur = rlimit.rlim_max = max_file_limit;
+
+ if (setrlimit(RLIMIT_NOFILE, &rlimit)) {
+
+ max_file_limit = old_cur; /* Use original value */
+ } else {
+
+ rlimit.rlim_cur = 0; /* Safety if next call fails */
+
+ (void) getrlimit(RLIMIT_NOFILE, &rlimit);
+
+ if (rlimit.rlim_cur) {
+
+ /* If call didn't fail */
+ max_file_limit = (uint) rlimit.rlim_cur;
+ }
+ }
+
+end:
+ return(max_file_limit);
+#else
+ return(0);
+#endif
+}
+
+void
+xtrabackup_backup_func(void)
+{
+ MY_STAT stat_info;
+ lsn_t latest_cp;
+ uint i;
+ uint count;
+ os_ib_mutex_t count_mutex;
+ data_thread_ctxt_t *data_threads;
+
+#ifdef USE_POSIX_FADVISE
+ msg("xtrabackup: uses posix_fadvise().\n");
+#endif
+
+ /* cd to datadir */
+
+ if (my_setwd(mysql_real_data_home,MYF(MY_WME)))
+ {
+ msg("xtrabackup: cannot my_setwd %s\n", mysql_real_data_home);
+ exit(EXIT_FAILURE);
+ }
+ msg("xtrabackup: cd to %s\n", mysql_real_data_home);
+
+ msg("xtrabackup: open files limit requested %u, set to %u\n",
+ (uint) xb_open_files_limit,
+ xb_set_max_open_files(xb_open_files_limit));
+
+ mysql_data_home= mysql_data_home_buff;
+ mysql_data_home[0]=FN_CURLIB; // all paths are relative from here
+ mysql_data_home[1]=0;
+
+ srv_read_only_mode = TRUE;
+
+ srv_backup_mode = TRUE;
+ srv_close_files = xb_close_files;
+
+ if (srv_close_files)
+ msg("xtrabackup: warning: close-files specified. Use it "
+ "at your own risk. If there are DDL operations like table DROP TABLE "
+ "or RENAME TABLE during the backup, inconsistent backup will be "
+ "produced.\n");
+
+ /* initialize components */
+ if(innodb_init_param())
+ exit(EXIT_FAILURE);
+
+ xb_normalize_init_values();
+
+#ifndef __WIN__
+ if (srv_file_flush_method_str == NULL) {
+ /* These are the default options */
+ srv_unix_file_flush_method = SRV_UNIX_FSYNC;
+ } else if (0 == ut_strcmp(srv_file_flush_method_str, "fsync")) {
+ srv_unix_file_flush_method = SRV_UNIX_FSYNC;
+ } else if (0 == ut_strcmp(srv_file_flush_method_str, "O_DSYNC")) {
+ srv_unix_file_flush_method = SRV_UNIX_O_DSYNC;
+
+ } else if (0 == ut_strcmp(srv_file_flush_method_str, "O_DIRECT")) {
+ srv_unix_file_flush_method = SRV_UNIX_O_DIRECT;
+ msg("xtrabackup: using O_DIRECT\n");
+ } else if (0 == ut_strcmp(srv_file_flush_method_str, "littlesync")) {
+ srv_unix_file_flush_method = SRV_UNIX_LITTLESYNC;
+
+ } else if (0 == ut_strcmp(srv_file_flush_method_str, "nosync")) {
+ srv_unix_file_flush_method = SRV_UNIX_NOSYNC;
+ } else if (0 == ut_strcmp(srv_file_flush_method_str, "ALL_O_DIRECT")) {
+ srv_unix_file_flush_method = SRV_UNIX_ALL_O_DIRECT;
+ msg("xtrabackup: using ALL_O_DIRECT\n");
+ } else if (0 == ut_strcmp(srv_file_flush_method_str,
+ "O_DIRECT_NO_FSYNC")) {
+ srv_unix_file_flush_method = SRV_UNIX_O_DIRECT_NO_FSYNC;
+ msg("xtrabackup: using O_DIRECT_NO_FSYNC\n");
+ } else {
+ msg("xtrabackup: Unrecognized value %s for "
+ "innodb_flush_method\n", srv_file_flush_method_str);
+ exit(EXIT_FAILURE);
+ }
+#else /* __WIN__ */
+ /* We can only use synchronous unbuffered IO on Windows for now */
+ if (srv_file_flush_method_str != NULL) {
+ msg("xtrabackupp: Warning: "
+ "ignoring innodb_flush_method = %s on Windows.\n");
+ }
+
+ srv_win_file_flush_method = SRV_WIN_IO_UNBUFFERED;
+ srv_use_native_aio = FALSE;
+#endif
+
+ if (srv_buf_pool_size >= 1000 * 1024 * 1024) {
+ /* Here we still have srv_pool_size counted
+ in kilobytes (in 4.0 this was in bytes)
+ srv_boot() converts the value to
+ pages; if buffer pool is less than 1000 MB,
+ assume fewer threads. */
+ srv_max_n_threads = 50000;
+
+ } else if (srv_buf_pool_size >= 8 * 1024 * 1024) {
+
+ srv_max_n_threads = 10000;
+ } else {
+ srv_max_n_threads = 1000; /* saves several MB of memory,
+ especially in 64-bit
+ computers */
+ }
+
+ os_sync_mutex = NULL;
+ srv_general_init();
+ ut_crc32_init();
+
+ xb_filters_init();
+
+ {
+ ibool log_file_created;
+ ibool log_created = FALSE;
+ ibool log_opened = FALSE;
+ ulint err;
+ ulint i;
+
+ xb_fil_io_init();
+
+ log_init();
+
+ lock_sys_create(srv_lock_table_size);
+
+ for (i = 0; i < srv_n_log_files; i++) {
+ err = open_or_create_log_file(FALSE, &log_file_created,
+ log_opened, 0, i);
+ if (err != DB_SUCCESS) {
+
+ //return((int) err);
+ exit(EXIT_FAILURE);
+ }
+
+ if (log_file_created) {
+ log_created = TRUE;
+ } else {
+ log_opened = TRUE;
+ }
+ if ((log_opened && log_created)) {
+ msg(
+ "xtrabackup: Error: all log files must be created at the same time.\n"
+ "xtrabackup: All log files must be created also in database creation.\n"
+ "xtrabackup: If you want bigger or smaller log files, shut down the\n"
+ "xtrabackup: database and make sure there were no errors in shutdown.\n"
+ "xtrabackup: Then delete the existing log files. Edit the .cnf file\n"
+ "xtrabackup: and start the database again.\n");
+
+ //return(DB_ERROR);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ /* log_file_created must not be TRUE, if online */
+ if (log_file_created) {
+ msg("xtrabackup: Something wrong with source files...\n");
+ exit(EXIT_FAILURE);
+ }
+
+ }
+
+ /* create extra LSN dir if it does not exist. */
+ if (xtrabackup_extra_lsndir
+ &&!my_stat(xtrabackup_extra_lsndir,&stat_info,MYF(0))
+ && (my_mkdir(xtrabackup_extra_lsndir,0777,MYF(0)) < 0)) {
+ msg("xtrabackup: Error: cannot mkdir %d: %s\n",
+ my_errno, xtrabackup_extra_lsndir);
+ exit(EXIT_FAILURE);
+ }
+
+ /* create target dir if not exist */
+ if (!my_stat(xtrabackup_target_dir,&stat_info,MYF(0))
+ && (my_mkdir(xtrabackup_target_dir,0777,MYF(0)) < 0)){
+ msg("xtrabackup: Error: cannot mkdir %d: %s\n",
+ my_errno, xtrabackup_target_dir);
+ exit(EXIT_FAILURE);
+ }
+
+ {
+ fil_system_t* f_system = fil_system;
+
+ /* definition from recv_recovery_from_checkpoint_start() */
+ log_group_t* max_cp_group;
+ ulint max_cp_field;
+ byte* buf;
+ byte* log_hdr_buf_;
+ byte* log_hdr_buf;
+ ulint err;
+
+ /* start back ground thread to copy newer log */
+ os_thread_id_t log_copying_thread_id;
+ datafiles_iter_t *it;
+
+ log_hdr_buf_ = static_cast<byte *>
+ (ut_malloc(LOG_FILE_HDR_SIZE + UNIV_PAGE_SIZE_MAX));
+ log_hdr_buf = static_cast<byte *>
+ (ut_align(log_hdr_buf_, UNIV_PAGE_SIZE_MAX));
+
+ /* get current checkpoint_lsn */
+ /* Look for the latest checkpoint from any of the log groups */
+
+ mutex_enter(&log_sys->mutex);
+
+ err = recv_find_max_checkpoint(&max_cp_group, &max_cp_field);
+
+ if (err != DB_SUCCESS) {
+
+ ut_free(log_hdr_buf_);
+ exit(EXIT_FAILURE);
+ }
+
+ log_group_read_checkpoint_info(max_cp_group, max_cp_field);
+ buf = log_sys->checkpoint_buf;
+
+ checkpoint_lsn_start = mach_read_from_8(buf + LOG_CHECKPOINT_LSN);
+ checkpoint_no_start = mach_read_from_8(buf + LOG_CHECKPOINT_NO);
+
+ mutex_exit(&log_sys->mutex);
+
+reread_log_header:
+ fil_io(OS_FILE_READ | OS_FILE_LOG, TRUE, max_cp_group->space_id,
+ 0,
+ 0, 0, LOG_FILE_HDR_SIZE,
+ log_hdr_buf, max_cp_group);
+
+ /* check consistency of log file header to copy */
+ mutex_enter(&log_sys->mutex);
+
+ err = recv_find_max_checkpoint(&max_cp_group, &max_cp_field);
+
+ if (err != DB_SUCCESS) {
+
+ ut_free(log_hdr_buf_);
+ exit(EXIT_FAILURE);
+ }
+
+ log_group_read_checkpoint_info(max_cp_group, max_cp_field);
+ buf = log_sys->checkpoint_buf;
+
+ if(checkpoint_no_start != mach_read_from_8(buf + LOG_CHECKPOINT_NO)) {
+
+ checkpoint_lsn_start = mach_read_from_8(buf + LOG_CHECKPOINT_LSN);
+ checkpoint_no_start = mach_read_from_8(buf + LOG_CHECKPOINT_NO);
+ mutex_exit(&log_sys->mutex);
+ goto reread_log_header;
+ }
+
+ mutex_exit(&log_sys->mutex);
+
+ xtrabackup_init_datasinks();
+
+ if (!select_history()) {
+ exit(EXIT_FAILURE);
+ }
+
+ /* open the log file */
+ memset(&stat_info, 0, sizeof(MY_STAT));
+ dst_log_file = ds_open(ds_redo, XB_LOG_FILENAME, &stat_info);
+ if (dst_log_file == NULL) {
+ msg("xtrabackup: error: failed to open the target stream for "
+ "'%s'.\n", XB_LOG_FILENAME);
+ ut_free(log_hdr_buf_);
+ exit(EXIT_FAILURE);
+ }
+
+ /* label it */
+ strcpy((char*) log_hdr_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP,
+ "xtrabkup ");
+ ut_sprintf_timestamp(
+ (char*) log_hdr_buf + (LOG_FILE_WAS_CREATED_BY_HOT_BACKUP
+ + (sizeof "xtrabkup ") - 1));
+
+ if (ds_write(dst_log_file, log_hdr_buf, LOG_FILE_HDR_SIZE)) {
+ msg("xtrabackup: error: write to logfile failed\n");
+ ut_free(log_hdr_buf_);
+ exit(EXIT_FAILURE);
+ }
+
+ ut_free(log_hdr_buf_);
+
+ /* start flag */
+ log_copying = TRUE;
+
+ /* start io throttle */
+ if(xtrabackup_throttle) {
+ os_thread_id_t io_watching_thread_id;
+
+ io_ticket = xtrabackup_throttle;
+ wait_throttle = os_event_create();
+
+ os_thread_create(io_watching_thread, NULL,
+ &io_watching_thread_id);
+ }
+
+ mutex_enter(&log_sys->mutex);
+ xtrabackup_choose_lsn_offset(checkpoint_lsn_start);
+ mutex_exit(&log_sys->mutex);
+
+ /* copy log file by current position */
+ if(xtrabackup_copy_logfile(checkpoint_lsn_start, FALSE))
+ exit(EXIT_FAILURE);
+
+
+ log_copying_stop = os_event_create();
+ os_thread_create(log_copying_thread, NULL, &log_copying_thread_id);
+
+ /* Populate fil_system with tablespaces to copy */
+ err = xb_load_tablespaces();
+ if (err != DB_SUCCESS) {
+ msg("xtrabackup: error: xb_load_tablespaces() failed with"
+ "error code %lu\n", err);
+ exit(EXIT_FAILURE);
+ }
+
+ /* FLUSH CHANGED_PAGE_BITMAPS call */
+ if (!flush_changed_page_bitmaps()) {
+ exit(EXIT_FAILURE);
+ }
+ debug_sync_point("xtrabackup_suspend_at_start");
+
+ if (xtrabackup_incremental) {
+ if (!xtrabackup_incremental_force_scan) {
+ changed_page_bitmap = xb_page_bitmap_init();
+ }
+ if (!changed_page_bitmap) {
+ msg("xtrabackup: using the full scan for incremental "
+ "backup\n");
+ } else if (incremental_lsn != checkpoint_lsn_start) {
+ /* Do not print that bitmaps are used when dummy bitmap
+ is build for an empty LSN range. */
+ msg("xtrabackup: using the changed page bitmap\n");
+ }
+ }
+
+ ut_a(xtrabackup_parallel > 0);
+
+ if (xtrabackup_parallel > 1) {
+ msg("xtrabackup: Starting %u threads for parallel data "
+ "files transfer\n", xtrabackup_parallel);
+ }
+
+ it = datafiles_iter_new(f_system);
+ if (it == NULL) {
+ msg("xtrabackup: Error: datafiles_iter_new() failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Create data copying threads */
+ data_threads = (data_thread_ctxt_t *)
+ ut_malloc(sizeof(data_thread_ctxt_t) * xtrabackup_parallel);
+ count = xtrabackup_parallel;
+ count_mutex = os_mutex_create();
+
+ for (i = 0; i < (uint) xtrabackup_parallel; i++) {
+ data_threads[i].it = it;
+ data_threads[i].num = i+1;
+ data_threads[i].count = &count;
+ data_threads[i].count_mutex = count_mutex;
+ os_thread_create(data_copy_thread_func, data_threads + i,
+ &data_threads[i].id);
+ }
+
+ /* Wait for threads to exit */
+ while (1) {
+ os_thread_sleep(1000000);
+ os_mutex_enter(count_mutex);
+ if (count == 0) {
+ os_mutex_exit(count_mutex);
+ break;
+ }
+ os_mutex_exit(count_mutex);
+ }
+
+ os_mutex_free(count_mutex);
+ ut_free(data_threads);
+ datafiles_iter_free(it);
+
+ if (changed_page_bitmap) {
+ xb_page_bitmap_deinit(changed_page_bitmap);
+ }
+ }
+
+ if (!backup_start()) {
+ exit(EXIT_FAILURE);
+ }
+
+ /* read the latest checkpoint lsn */
+ latest_cp = 0;
+ {
+ log_group_t* max_cp_group;
+ ulint max_cp_field;
+ ulint err;
+
+ mutex_enter(&log_sys->mutex);
+
+ err = recv_find_max_checkpoint(&max_cp_group, &max_cp_field);
+
+ if (err != DB_SUCCESS) {
+ msg("xtrabackup: Error: recv_find_max_checkpoint() failed.\n");
+ mutex_exit(&log_sys->mutex);
+ goto skip_last_cp;
+ }
+
+ log_group_read_checkpoint_info(max_cp_group, max_cp_field);
+
+ xtrabackup_choose_lsn_offset(checkpoint_lsn_start);
+
+ latest_cp = mach_read_from_8(log_sys->checkpoint_buf +
+ LOG_CHECKPOINT_LSN);
+
+ mutex_exit(&log_sys->mutex);
+
+ msg("xtrabackup: The latest check point (for incremental): "
+ "'" LSN_PF "'\n", latest_cp);
+ }
+skip_last_cp:
+ /* stop log_copying_thread */
+ log_copying = FALSE;
+ os_event_set(log_copying_stop);
+ msg("xtrabackup: Stopping log copying thread.\n");
+ while (log_copying_running) {
+ msg(".");
+ os_thread_sleep(200000); /*0.2 sec*/
+ }
+ msg("\n");
+
+ os_event_free(log_copying_stop);
+ if (ds_close(dst_log_file)) {
+ exit(EXIT_FAILURE);
+ }
+
+ if(!xtrabackup_incremental) {
+ strcpy(metadata_type, "full-backuped");
+ metadata_from_lsn = 0;
+ } else {
+ strcpy(metadata_type, "incremental");
+ metadata_from_lsn = incremental_lsn;
+ }
+ metadata_to_lsn = latest_cp;
+ metadata_last_lsn = log_copy_scanned_lsn;
+
+ if (!xtrabackup_stream_metadata(ds_meta)) {
+ msg("xtrabackup: Error: failed to stream metadata.\n");
+ exit(EXIT_FAILURE);
+ }
+ if (xtrabackup_extra_lsndir) {
+ char filename[FN_REFLEN];
+
+ sprintf(filename, "%s/%s", xtrabackup_extra_lsndir,
+ XTRABACKUP_METADATA_FILENAME);
+ if (!xtrabackup_write_metadata(filename)) {
+ msg("xtrabackup: Error: failed to write metadata "
+ "to '%s'.\n", filename);
+ exit(EXIT_FAILURE);
+ }
+
+ }
+
+ if (!backup_finish()) {
+ exit(EXIT_FAILURE);
+ }
+
+ xtrabackup_destroy_datasinks();
+
+ if (wait_throttle) {
+ /* wait for io_watching_thread completion */
+ while (io_watching_thread_running) {
+ os_thread_sleep(1000000);
+ }
+ os_event_free(wait_throttle);
+ wait_throttle = NULL;
+ }
+
+ msg("xtrabackup: Transaction log of lsn (" LSN_PF ") to (" LSN_PF
+ ") was copied.\n", checkpoint_lsn_start, log_copy_scanned_lsn);
+ xb_filters_free();
+
+ xb_data_files_close();
+
+ /* Make sure that the latest checkpoint made it to xtrabackup_logfile */
+ if (latest_cp > log_copy_scanned_lsn) {
+ msg("xtrabackup: error: last checkpoint LSN (" LSN_PF
+ ") is larger than last copied LSN (" LSN_PF ").\n",
+ latest_cp, log_copy_scanned_lsn);
+ exit(EXIT_FAILURE);
+ }
+}
+
+/* ================= stats ================= */
+static my_bool
+xtrabackup_stats_level(
+ dict_index_t* index,
+ ulint level)
+{
+ ulint space;
+ page_t* page;
+
+ rec_t* node_ptr;
+
+ ulint right_page_no;
+
+ page_cur_t cursor;
+
+ mtr_t mtr;
+ mem_heap_t* heap = mem_heap_create(256);
+
+ ulint* offsets = NULL;
+
+ ulonglong n_pages, n_pages_extern;
+ ulonglong sum_data, sum_data_extern;
+ ulonglong n_recs;
+ ulint page_size;
+ buf_block_t* block;
+ ulint zip_size;
+
+ n_pages = sum_data = n_recs = 0;
+ n_pages_extern = sum_data_extern = 0;
+
+
+ if (level == 0)
+ fprintf(stdout, " leaf pages: ");
+ else
+ fprintf(stdout, " level %lu pages: ", level);
+
+ mtr_start(&mtr);
+
+ mtr_x_lock(&(index->lock), &mtr);
+ block = btr_root_block_get(index, RW_X_LATCH, &mtr);
+ page = buf_block_get_frame(block);
+
+ space = page_get_space_id(page);
+ zip_size = fil_space_get_zip_size(space);
+
+ while (level != btr_page_get_level(page, &mtr)) {
+
+ ut_a(space == buf_block_get_space(block));
+ ut_a(space == page_get_space_id(page));
+ ut_a(!page_is_leaf(page));
+
+ page_cur_set_before_first(block, &cursor);
+ page_cur_move_to_next(&cursor);
+
+ node_ptr = page_cur_get_rec(&cursor);
+ offsets = rec_get_offsets(node_ptr, index, offsets,
+ ULINT_UNDEFINED, &heap);
+ block = btr_node_ptr_get_child(node_ptr, index, offsets, &mtr);
+ page = buf_block_get_frame(block);
+ }
+
+loop:
+ mem_heap_empty(heap);
+ offsets = NULL;
+ mtr_x_lock(&(index->lock), &mtr);
+
+ right_page_no = btr_page_get_next(page, &mtr);
+
+
+ /*=================================*/
+ //fprintf(stdout, "%lu ", (ulint) buf_frame_get_page_no(page));
+
+ n_pages++;
+ sum_data += page_get_data_size(page);
+ n_recs += page_get_n_recs(page);
+
+
+ if (level == 0) {
+ page_cur_t cur;
+ ulint n_fields;
+ ulint i;
+ mem_heap_t* local_heap = NULL;
+ ulint offsets_[REC_OFFS_NORMAL_SIZE];
+ ulint* local_offsets = offsets_;
+
+ *offsets_ = (sizeof offsets_) / sizeof *offsets_;
+
+ page_cur_set_before_first(block, &cur);
+ page_cur_move_to_next(&cur);
+
+ for (;;) {
+ if (page_cur_is_after_last(&cur)) {
+ break;
+ }
+
+ local_offsets = rec_get_offsets(cur.rec, index, local_offsets,
+ ULINT_UNDEFINED, &local_heap);
+ n_fields = rec_offs_n_fields(local_offsets);
+
+ for (i = 0; i < n_fields; i++) {
+ if (rec_offs_nth_extern(local_offsets, i)) {
+ page_t* local_page;
+ ulint space_id;
+ ulint page_no;
+ ulint offset;
+ byte* blob_header;
+ ulint part_len;
+ mtr_t local_mtr;
+ ulint local_len;
+ byte* data;
+ buf_block_t* local_block;
+
+ data = rec_get_nth_field(cur.rec, local_offsets, i, &local_len);
+
+ ut_a(local_len >= BTR_EXTERN_FIELD_REF_SIZE);
+ local_len -= BTR_EXTERN_FIELD_REF_SIZE;
+
+ space_id = mach_read_from_4(data + local_len + BTR_EXTERN_SPACE_ID);
+ page_no = mach_read_from_4(data + local_len + BTR_EXTERN_PAGE_NO);
+ offset = mach_read_from_4(data + local_len + BTR_EXTERN_OFFSET);
+
+ if (offset != FIL_PAGE_DATA)
+ msg("\nWarning: several record may share same external page.\n");
+
+ for (;;) {
+ mtr_start(&local_mtr);
+
+ local_block = btr_block_get(space_id, zip_size, page_no, RW_S_LATCH, index, &local_mtr);
+ local_page = buf_block_get_frame(local_block);
+ blob_header = local_page + offset;
+#define BTR_BLOB_HDR_PART_LEN 0
+#define BTR_BLOB_HDR_NEXT_PAGE_NO 4
+ //part_len = btr_blob_get_part_len(blob_header);
+ part_len = mach_read_from_4(blob_header + BTR_BLOB_HDR_PART_LEN);
+
+ //page_no = btr_blob_get_next_page_no(blob_header);
+ page_no = mach_read_from_4(blob_header + BTR_BLOB_HDR_NEXT_PAGE_NO);
+
+ offset = FIL_PAGE_DATA;
+
+
+
+
+ /*=================================*/
+ //fprintf(stdout, "[%lu] ", (ulint) buf_frame_get_page_no(page));
+
+ n_pages_extern++;
+ sum_data_extern += part_len;
+
+
+ mtr_commit(&local_mtr);
+
+ if (page_no == FIL_NULL)
+ break;
+ }
+ }
+ }
+
+ page_cur_move_to_next(&cur);
+ }
+ }
+
+
+
+
+ mtr_commit(&mtr);
+ if (right_page_no != FIL_NULL) {
+ mtr_start(&mtr);
+ block = btr_block_get(space, zip_size, right_page_no,
+ RW_X_LATCH, index, &mtr);
+ page = buf_block_get_frame(block);
+ goto loop;
+ }
+ mem_heap_free(heap);
+
+ if (zip_size) {
+ page_size = zip_size;
+ } else {
+ page_size = UNIV_PAGE_SIZE;
+ }
+
+ if (level == 0)
+ fprintf(stdout, "recs=%llu, ", n_recs);
+
+ fprintf(stdout, "pages=%llu, data=%llu bytes, data/pages=%lld%%",
+ n_pages, sum_data,
+ ((sum_data * 100)/ page_size)/n_pages);
+
+
+ if (level == 0 && n_pages_extern) {
+ putc('\n', stdout);
+ /* also scan blob pages*/
+ fprintf(stdout, " external pages: ");
+
+ fprintf(stdout, "pages=%llu, data=%llu bytes, data/pages=%lld%%",
+ n_pages_extern, sum_data_extern,
+ ((sum_data_extern * 100)/ page_size)/n_pages_extern);
+ }
+
+ putc('\n', stdout);
+
+ if (level > 0) {
+ xtrabackup_stats_level(index, level - 1);
+ }
+
+ return(TRUE);
+}
+
+static void
+xtrabackup_stats_func(void)
+{
+ ulint n;
+
+ /* cd to datadir */
+
+ if (my_setwd(mysql_real_data_home,MYF(MY_WME)))
+ {
+ msg("xtrabackup: cannot my_setwd %s\n", mysql_real_data_home);
+ exit(EXIT_FAILURE);
+ }
+ msg("xtrabackup: cd to %s\n", mysql_real_data_home);
+
+ mysql_data_home= mysql_data_home_buff;
+ mysql_data_home[0]=FN_CURLIB; // all paths are relative from here
+ mysql_data_home[1]=0;
+
+ /* set read only */
+ srv_read_only_mode = TRUE;
+
+ /* initialize components */
+ if(innodb_init_param())
+ exit(EXIT_FAILURE);
+
+ /* Check if the log files have been created, otherwise innodb_init()
+ will crash when called with srv_read_only == TRUE */
+ for (n = 0; n < srv_n_log_files; n++) {
+ char logname[FN_REFLEN];
+ ibool exists;
+ os_file_type_t type;
+
+ snprintf(logname, sizeof(logname), "%s%c%s%lu",
+ srv_log_group_home_dir, SRV_PATH_SEPARATOR,
+ "ib_logfile", (ulong) n);
+ srv_normalize_path_for_win(logname);
+
+ if (!os_file_status(logname, &exists, &type) || !exists ||
+ type != OS_FILE_TYPE_FILE) {
+ msg("xtrabackup: Error: "
+ "Cannot find log file %s.\n", logname);
+ msg("xtrabackup: Error: "
+ "to use the statistics feature, you need a "
+ "clean copy of the database including "
+ "correctly sized log files, so you need to "
+ "execute with --prepare twice to use this "
+ "functionality on a backup.\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ msg("xtrabackup: Starting 'read-only' InnoDB instance to gather "
+ "index statistics.\n"
+ "xtrabackup: Using %lld bytes for buffer pool (set by "
+ "--use-memory parameter)\n", xtrabackup_use_memory);
+
+ if(innodb_init())
+ exit(EXIT_FAILURE);
+
+ xb_filters_init();
+
+ fprintf(stdout, "\n\n<INDEX STATISTICS>\n");
+
+ /* gather stats */
+
+ {
+ dict_table_t* sys_tables;
+ dict_index_t* sys_index;
+ dict_table_t* table;
+ btr_pcur_t pcur;
+ rec_t* rec;
+ byte* field;
+ ulint len;
+ mtr_t mtr;
+
+ /* Enlarge the fatal semaphore wait timeout during the InnoDB table
+ monitor printout */
+
+ os_increment_counter_by_amount(server_mutex,
+ srv_fatal_semaphore_wait_threshold,
+ 72000);
+
+ mutex_enter(&(dict_sys->mutex));
+
+ mtr_start(&mtr);
+
+ sys_tables = dict_table_get_low("SYS_TABLES");
+ sys_index = UT_LIST_GET_FIRST(sys_tables->indexes);
+
+ btr_pcur_open_at_index_side(TRUE, sys_index, BTR_SEARCH_LEAF, &pcur,
+ TRUE, 0, &mtr);
+loop:
+ btr_pcur_move_to_next_user_rec(&pcur, &mtr);
+
+ rec = btr_pcur_get_rec(&pcur);
+
+ if (!btr_pcur_is_on_user_rec(&pcur))
+ {
+ /* end of index */
+
+ btr_pcur_close(&pcur);
+ mtr_commit(&mtr);
+
+ mutex_exit(&(dict_sys->mutex));
+
+ /* Restore the fatal semaphore wait timeout */
+ os_increment_counter_by_amount(server_mutex,
+ srv_fatal_semaphore_wait_threshold,
+ -72000);
+
+ goto end;
+ }
+
+ field = rec_get_nth_field_old(rec, 0, &len);
+
+ if (!rec_get_deleted_flag(rec, 0)) {
+
+ /* We found one */
+
+ char* table_name = mem_strdupl((char*) field, len);
+
+ btr_pcur_store_position(&pcur, &mtr);
+
+ mtr_commit(&mtr);
+
+ table = dict_table_get_low(table_name);
+ mem_free(table_name);
+
+ if (table && check_if_skip_table(table->name))
+ goto skip;
+
+
+ if (table == NULL) {
+ fputs("InnoDB: Failed to load table ", stderr);
+ ut_print_namel(stderr, NULL, TRUE, (char*) field, len);
+ putc('\n', stderr);
+ } else {
+ dict_index_t* index;
+
+ /* The table definition was corrupt if there
+ is no index */
+
+ if (dict_table_get_first_index(table)) {
+ dict_stats_update_transient(table);
+ }
+
+ //dict_table_print_low(table);
+
+ index = UT_LIST_GET_FIRST(table->indexes);
+ while (index != NULL) {
+{
+ ib_int64_t n_vals;
+
+ if (index->n_user_defined_cols > 0) {
+ n_vals = index->stat_n_diff_key_vals[
+ index->n_user_defined_cols];
+ } else {
+ n_vals = index->stat_n_diff_key_vals[1];
+ }
+
+ fprintf(stdout,
+ " table: %s, index: %s, space id: %lu, root page: %lu"
+ ", zip size: %lu"
+ "\n estimated statistics in dictionary:\n"
+ " key vals: %lu, leaf pages: %lu, size pages: %lu\n"
+ " real statistics:\n",
+ table->name, index->name,
+ (ulong) index->space,
+ (ulong) index->page,
+ (ulong) fil_space_get_zip_size(index->space),
+ (ulong) n_vals,
+ (ulong) index->stat_n_leaf_pages,
+ (ulong) index->stat_index_size);
+
+ {
+ mtr_t local_mtr;
+ page_t* root;
+ ulint page_level;
+
+ mtr_start(&local_mtr);
+
+ mtr_x_lock(&(index->lock), &local_mtr);
+ root = btr_root_get(index, &local_mtr);
+ page_level = btr_page_get_level(root, &local_mtr);
+
+ xtrabackup_stats_level(index, page_level);
+
+ mtr_commit(&local_mtr);
+ }
+
+ putc('\n', stdout);
+}
+ index = UT_LIST_GET_NEXT(indexes, index);
+ }
+ }
+
+skip:
+ mtr_start(&mtr);
+
+ btr_pcur_restore_position(BTR_SEARCH_LEAF, &pcur, &mtr);
+ }
+
+ goto loop;
+ }
+
+end:
+ putc('\n', stdout);
+
+ fflush(stdout);
+
+ xb_filters_free();
+
+ /* shutdown InnoDB */
+ if(innodb_end())
+ exit(EXIT_FAILURE);
+}
+
+/* ================= prepare ================= */
+
+static my_bool
+xtrabackup_init_temp_log(void)
+{
+ os_file_t src_file = XB_FILE_UNDEFINED;
+ char src_path[FN_REFLEN];
+ char dst_path[FN_REFLEN];
+ ibool success;
+
+ ulint field;
+ byte log_buf[UNIV_PAGE_SIZE_MAX * 128]; /* 2 MB */
+
+ ib_int64_t file_size;
+
+ lsn_t max_no;
+ lsn_t max_lsn;
+ lsn_t checkpoint_no;
+
+ ulint fold;
+
+ bool checkpoint_found;
+
+ max_no = 0;
+
+ if (!xb_init_log_block_size()) {
+ goto error;
+ }
+
+ if(!xtrabackup_incremental_dir) {
+ sprintf(dst_path, "%s/ib_logfile0", xtrabackup_target_dir);
+ sprintf(src_path, "%s/%s", xtrabackup_target_dir,
+ XB_LOG_FILENAME);
+ } else {
+ sprintf(dst_path, "%s/ib_logfile0", xtrabackup_incremental_dir);
+ sprintf(src_path, "%s/%s", xtrabackup_incremental_dir,
+ XB_LOG_FILENAME);
+ }
+
+ srv_normalize_path_for_win(dst_path);
+ srv_normalize_path_for_win(src_path);
+retry:
+ src_file = os_file_create_simple_no_error_handling(0, src_path,
+ OS_FILE_OPEN,
+ OS_FILE_READ_WRITE,
+ &success);
+ if (!success) {
+ /* The following call prints an error message */
+ os_file_get_last_error(TRUE);
+
+ msg("xtrabackup: Warning: cannot open %s. will try to find.\n",
+ src_path);
+
+ /* check if ib_logfile0 may be xtrabackup_logfile */
+ src_file = os_file_create_simple_no_error_handling(0, dst_path,
+ OS_FILE_OPEN,
+ OS_FILE_READ_WRITE,
+ &success);
+ if (!success) {
+ os_file_get_last_error(TRUE);
+ msg(" xtrabackup: Fatal error: cannot find %s.\n",
+ src_path);
+
+ goto error;
+ }
+
+ success = os_file_read(src_file, log_buf, 0,
+ LOG_FILE_HDR_SIZE);
+ if (!success) {
+ goto error;
+ }
+
+ if ( ut_memcmp(log_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP,
+ (byte*)"xtrabkup", (sizeof "xtrabkup") - 1) == 0) {
+ msg(" xtrabackup: 'ib_logfile0' seems to be "
+ "'xtrabackup_logfile'. will retry.\n");
+
+ os_file_close(src_file);
+ src_file = XB_FILE_UNDEFINED;
+
+ /* rename and try again */
+ success = os_file_rename(0, dst_path, src_path);
+ if (!success) {
+ goto error;
+ }
+
+ goto retry;
+ }
+
+ msg(" xtrabackup: Fatal error: cannot find %s.\n",
+ src_path);
+
+ os_file_close(src_file);
+ src_file = XB_FILE_UNDEFINED;
+
+ goto error;
+ }
+
+ file_size = os_file_get_size(src_file);
+
+
+ /* TODO: We should skip the following modifies, if it is not the first time. */
+
+ /* read log file header */
+ success = os_file_read(src_file, log_buf, 0, LOG_FILE_HDR_SIZE);
+ if (!success) {
+ goto error;
+ }
+
+ if ( ut_memcmp(log_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP,
+ (byte*)"xtrabkup", (sizeof "xtrabkup") - 1) != 0 ) {
+ msg("xtrabackup: notice: xtrabackup_logfile was already used "
+ "to '--prepare'.\n");
+ goto skip_modify;
+ } else {
+ /* clear it later */
+ //memset(log_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP,
+ // ' ', 4);
+ }
+
+ checkpoint_found = false;
+
+ /* read last checkpoint lsn */
+ for (field = LOG_CHECKPOINT_1; field <= LOG_CHECKPOINT_2;
+ field += LOG_CHECKPOINT_2 - LOG_CHECKPOINT_1) {
+ if (!recv_check_cp_is_consistent(const_cast<const byte *>
+ (log_buf + field)))
+ goto not_consistent;
+
+ checkpoint_no = mach_read_from_8(log_buf + field +
+ LOG_CHECKPOINT_NO);
+
+ if (checkpoint_no >= max_no) {
+
+ max_no = checkpoint_no;
+ max_lsn = mach_read_from_8(log_buf + field +
+ LOG_CHECKPOINT_LSN);
+ checkpoint_found = true;
+ }
+not_consistent:
+ ;
+ }
+
+ if (!checkpoint_found) {
+ msg("xtrabackup: No valid checkpoint found.\n");
+ goto error;
+ }
+
+
+ /* It seems to be needed to overwrite the both checkpoint area. */
+ mach_write_to_8(log_buf + LOG_CHECKPOINT_1 + LOG_CHECKPOINT_LSN,
+ max_lsn);
+ mach_write_to_4(log_buf + LOG_CHECKPOINT_1
+ + LOG_CHECKPOINT_OFFSET_LOW32,
+ LOG_FILE_HDR_SIZE +
+ (max_lsn -
+ ut_uint64_align_down(max_lsn,
+ OS_FILE_LOG_BLOCK_SIZE)));
+ mach_write_to_4(log_buf + LOG_CHECKPOINT_1
+ + LOG_CHECKPOINT_OFFSET_HIGH32, 0);
+ fold = ut_fold_binary(log_buf + LOG_CHECKPOINT_1, LOG_CHECKPOINT_CHECKSUM_1);
+ mach_write_to_4(log_buf + LOG_CHECKPOINT_1 + LOG_CHECKPOINT_CHECKSUM_1, fold);
+
+ fold = ut_fold_binary(log_buf + LOG_CHECKPOINT_1 + LOG_CHECKPOINT_LSN,
+ LOG_CHECKPOINT_CHECKSUM_2 - LOG_CHECKPOINT_LSN);
+ mach_write_to_4(log_buf + LOG_CHECKPOINT_1 + LOG_CHECKPOINT_CHECKSUM_2, fold);
+
+ mach_write_to_8(log_buf + LOG_CHECKPOINT_2 + LOG_CHECKPOINT_LSN,
+ max_lsn);
+ mach_write_to_4(log_buf + LOG_CHECKPOINT_2
+ + LOG_CHECKPOINT_OFFSET_LOW32,
+ LOG_FILE_HDR_SIZE +
+ (max_lsn -
+ ut_uint64_align_down(max_lsn,
+ OS_FILE_LOG_BLOCK_SIZE)));
+ mach_write_to_4(log_buf + LOG_CHECKPOINT_2
+ + LOG_CHECKPOINT_OFFSET_HIGH32, 0);
+ fold = ut_fold_binary(log_buf + LOG_CHECKPOINT_2, LOG_CHECKPOINT_CHECKSUM_1);
+ mach_write_to_4(log_buf + LOG_CHECKPOINT_2 + LOG_CHECKPOINT_CHECKSUM_1, fold);
+
+ fold = ut_fold_binary(log_buf + LOG_CHECKPOINT_2 + LOG_CHECKPOINT_LSN,
+ LOG_CHECKPOINT_CHECKSUM_2 - LOG_CHECKPOINT_LSN);
+ mach_write_to_4(log_buf + LOG_CHECKPOINT_2 + LOG_CHECKPOINT_CHECKSUM_2, fold);
+
+
+ success = os_file_write(src_path, src_file, log_buf, 0,
+ LOG_FILE_HDR_SIZE);
+ if (!success) {
+ goto error;
+ }
+
+ /* expand file size (9/8) and align to UNIV_PAGE_SIZE_MAX */
+
+ if (file_size % UNIV_PAGE_SIZE_MAX) {
+ memset(log_buf, 0, UNIV_PAGE_SIZE_MAX);
+ success = os_file_write(src_path, src_file, log_buf,
+ file_size,
+ UNIV_PAGE_SIZE_MAX
+ - (ulint) (file_size
+ % UNIV_PAGE_SIZE_MAX));
+ if (!success) {
+ goto error;
+ }
+
+ file_size = os_file_get_size(src_file);
+ }
+
+ /* TODO: We should judge whether the file is already expanded or not... */
+ {
+ ulint expand;
+
+ memset(log_buf, 0, UNIV_PAGE_SIZE_MAX * 128);
+ expand = (ulint) (file_size / UNIV_PAGE_SIZE_MAX / 8);
+
+ for (; expand > 128; expand -= 128) {
+ success = os_file_write(src_path, src_file, log_buf,
+ file_size,
+ UNIV_PAGE_SIZE_MAX * 128);
+ if (!success) {
+ goto error;
+ }
+ file_size += UNIV_PAGE_SIZE_MAX * 128;
+ }
+
+ if (expand) {
+ success = os_file_write(src_path, src_file, log_buf,
+ file_size,
+ expand * UNIV_PAGE_SIZE_MAX);
+ if (!success) {
+ goto error;
+ }
+ file_size += UNIV_PAGE_SIZE_MAX * expand;
+ }
+ }
+
+ /* make larger than 2MB */
+ if (file_size < 2*1024*1024L) {
+ memset(log_buf, 0, UNIV_PAGE_SIZE_MAX);
+ while (file_size < 2*1024*1024L) {
+ success = os_file_write(src_path, src_file, log_buf,
+ file_size,
+ UNIV_PAGE_SIZE_MAX);
+ if (!success) {
+ goto error;
+ }
+ file_size += UNIV_PAGE_SIZE_MAX;
+ }
+ file_size = os_file_get_size(src_file);
+ }
+
+ msg("xtrabackup: xtrabackup_logfile detected: size=" INT64PF ", "
+ "start_lsn=(" LSN_PF ")\n", file_size, max_lsn);
+
+ os_file_close(src_file);
+ src_file = XB_FILE_UNDEFINED;
+
+ /* fake InnoDB */
+ innobase_log_files_in_group_save = innobase_log_files_in_group;
+ srv_log_group_home_dir_save = srv_log_group_home_dir;
+ innobase_log_file_size_save = innobase_log_file_size;
+
+ srv_log_group_home_dir = NULL;
+ innobase_log_file_size = file_size;
+ innobase_log_files_in_group = 1;
+
+ srv_thread_concurrency = 0;
+
+ /* rename 'xtrabackup_logfile' to 'ib_logfile0' */
+ success = os_file_rename(0, src_path, dst_path);
+ if (!success) {
+ goto error;
+ }
+ xtrabackup_logfile_is_renamed = TRUE;
+
+ return(FALSE);
+
+skip_modify:
+ os_file_close(src_file);
+ src_file = XB_FILE_UNDEFINED;
+ return(FALSE);
+
+error:
+ if (src_file != XB_FILE_UNDEFINED)
+ os_file_close(src_file);
+ msg("xtrabackup: Error: xtrabackup_init_temp_log() failed.\n");
+ return(TRUE); /*ERROR*/
+}
+
+/***********************************************************************
+Generates path to the meta file path from a given path to an incremental .delta
+by replacing trailing ".delta" with ".meta", or returns error if 'delta_path'
+does not end with the ".delta" character sequence.
+@return TRUE on success, FALSE on error. */
+static
+ibool
+get_meta_path(
+ const char *delta_path, /* in: path to a .delta file */
+ char *meta_path) /* out: path to the corresponding .meta
+ file */
+{
+ size_t len = strlen(delta_path);
+
+ if (len <= 6 || strcmp(delta_path + len - 6, ".delta")) {
+ return FALSE;
+ }
+ memcpy(meta_path, delta_path, len - 6);
+ strcpy(meta_path + len - 6, XB_DELTA_INFO_SUFFIX);
+
+ return TRUE;
+}
+
+/****************************************************************//**
+Create a new tablespace on disk and return the handle to its opened
+file. Code adopted from fil_create_new_single_table_tablespace with
+the main difference that only disk file is created without updating
+the InnoDB in-memory dictionary data structures.
+
+@return TRUE on success, FALSE on error. */
+static
+ibool
+xb_space_create_file(
+/*==================*/
+ const char* path, /*!<in: path to tablespace */
+ ulint space_id, /*!<in: space id */
+ ulint flags __attribute__((unused)),/*!<in: tablespace
+ flags */
+ os_file_t* file) /*!<out: file handle */
+{
+ ibool ret;
+ byte* buf;
+ byte* page;
+
+ *file = os_file_create_simple_no_error_handling(0, path, OS_FILE_CREATE,
+ OS_FILE_READ_WRITE,
+ &ret);
+ if (!ret) {
+ msg("xtrabackup: cannot create file %s\n", path);
+ return ret;
+ }
+
+ ret = os_file_set_size(path, *file,
+ FIL_IBD_FILE_INITIAL_SIZE * UNIV_PAGE_SIZE);
+ if (!ret) {
+ msg("xtrabackup: cannot set size for file %s\n", path);
+ os_file_close(*file);
+ os_file_delete(0, path);
+ return ret;
+ }
+
+ buf = static_cast<byte *>(ut_malloc(3 * UNIV_PAGE_SIZE));
+ /* Align the memory for file i/o if we might have O_DIRECT set */
+ page = static_cast<byte *>(ut_align(buf, UNIV_PAGE_SIZE));
+
+ memset(page, '\0', UNIV_PAGE_SIZE);
+
+ fsp_header_init_fields(page, space_id, flags);
+ mach_write_to_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, space_id);
+
+ if (!fsp_flags_is_compressed(flags)) {
+ buf_flush_init_for_writing(page, NULL, 0);
+
+ ret = os_file_write(path, *file, page, 0, UNIV_PAGE_SIZE);
+ }
+ else {
+ page_zip_des_t page_zip;
+ ulint zip_size;
+
+ zip_size = fsp_flags_get_zip_size(flags);
+ page_zip_set_size(&page_zip, zip_size);
+ page_zip.data = page + UNIV_PAGE_SIZE;
+ fprintf(stderr, "zip_size = %lu\n", zip_size);
+
+#ifdef UNIV_DEBUG
+ page_zip.m_start =
+#endif /* UNIV_DEBUG */
+ page_zip.m_end = page_zip.m_nonempty =
+ page_zip.n_blobs = 0;
+
+ buf_flush_init_for_writing(page, &page_zip, 0);
+
+ ret = os_file_write(path, *file, page_zip.data, 0,
+ zip_size);
+ }
+
+ ut_free(buf);
+
+ if (!ret) {
+ msg("xtrabackup: could not write the first page to %s\n",
+ path);
+ os_file_close(*file);
+ os_file_delete(0, path);
+ return ret;
+ }
+
+ return TRUE;
+}
+
+/***********************************************************************
+Searches for matching tablespace file for given .delta file and space_id
+in given directory. When matching tablespace found, renames it to match the
+name of .delta file. If there was a tablespace with matching name and
+mismatching ID, renames it to xtrabackup_tmp_#ID.ibd. If there was no
+matching file, creates a new tablespace.
+@return file handle of matched or created file */
+static
+os_file_t
+xb_delta_open_matching_space(
+ const char* dbname, /* in: path to destination database dir */
+ const char* name, /* in: name of delta file (without .delta) */
+ ulint space_id, /* in: space id of delta file */
+ ulint zip_size, /* in: zip_size of tablespace */
+ char* real_name, /* out: full path of destination file */
+ size_t real_name_len, /* out: buffer size for real_name */
+ ibool* success) /* out: indicates error. TRUE = success */
+{
+ char dest_dir[FN_REFLEN];
+ char dest_space_name[FN_REFLEN];
+ ibool ok;
+ fil_space_t* fil_space;
+ os_file_t file = 0;
+ ulint tablespace_flags;
+ xb_filter_entry_t* table;
+
+ ut_a(dbname != NULL ||
+ !fil_is_user_tablespace_id(space_id) ||
+ space_id == ULINT_UNDEFINED);
+
+ *success = FALSE;
+
+ if (dbname) {
+ snprintf(dest_dir, FN_REFLEN, "%s/%s",
+ xtrabackup_target_dir, dbname);
+ srv_normalize_path_for_win(dest_dir);
+
+ snprintf(dest_space_name, FN_REFLEN, "%s/%s", dbname, name);
+ } else {
+ snprintf(dest_dir, FN_REFLEN, "%s", xtrabackup_target_dir);
+ srv_normalize_path_for_win(dest_dir);
+
+ snprintf(dest_space_name, FN_REFLEN, "%s", name);
+ }
+
+ snprintf(real_name, real_name_len,
+ "%s/%s",
+ xtrabackup_target_dir, dest_space_name);
+ srv_normalize_path_for_win(real_name);
+ /* Truncate ".ibd" */
+ dest_space_name[strlen(dest_space_name) - 4] = '\0';
+
+ /* Create the database directory if it doesn't exist yet */
+ if (!os_file_create_directory(dest_dir, FALSE)) {
+ msg("xtrabackup: error: cannot create dir %s\n", dest_dir);
+ return file;
+ }
+
+ if (!fil_is_user_tablespace_id(space_id)) {
+ goto found;
+ }
+
+ /* remember space name for further reference */
+ table = static_cast<xb_filter_entry_t *>
+ (ut_malloc(sizeof(xb_filter_entry_t) +
+ strlen(dest_space_name) + 1));
+
+ table->name = ((char*)table) + sizeof(xb_filter_entry_t);
+ strcpy(table->name, dest_space_name);
+ HASH_INSERT(xb_filter_entry_t, name_hash, inc_dir_tables_hash,
+ ut_fold_string(table->name), table);
+
+ mutex_enter(&fil_system->mutex);
+ fil_space = fil_space_get_by_name(dest_space_name);
+ mutex_exit(&fil_system->mutex);
+
+ if (fil_space != NULL) {
+ if (fil_space->id == space_id || space_id == ULINT_UNDEFINED) {
+ /* we found matching space */
+ goto found;
+ } else {
+
+ char tmpname[FN_REFLEN];
+
+ snprintf(tmpname, FN_REFLEN, "%s/xtrabackup_tmp_#%lu",
+ dbname, fil_space->id);
+
+ msg("xtrabackup: Renaming %s to %s.ibd\n",
+ fil_space->name, tmpname);
+
+ if (!fil_rename_tablespace(NULL, fil_space->id,
+ tmpname, NULL))
+ {
+ msg("xtrabackup: Cannot rename %s to %s\n",
+ fil_space->name, tmpname);
+ goto exit;
+ }
+ }
+ }
+
+ if (space_id == ULINT_UNDEFINED)
+ {
+ msg("xtrabackup: Error: Cannot handle DDL operation on tablespace "
+ "%s\n", dest_space_name);
+ exit(EXIT_FAILURE);
+ }
+ mutex_enter(&fil_system->mutex);
+ fil_space = fil_space_get_by_id(space_id);
+ mutex_exit(&fil_system->mutex);
+ if (fil_space != NULL) {
+ char tmpname[FN_REFLEN];
+
+ strncpy(tmpname, dest_space_name, FN_REFLEN);
+
+ msg("xtrabackup: Renaming %s to %s\n",
+ fil_space->name, dest_space_name);
+
+ if (!fil_rename_tablespace(NULL, fil_space->id, tmpname,
+ NULL))
+ {
+ msg("xtrabackup: Cannot rename %s to %s\n",
+ fil_space->name, dest_space_name);
+ goto exit;
+ }
+
+ goto found;
+ }
+
+ /* No matching space found. create the new one. */
+
+ if (!fil_space_create(dest_space_name, space_id, 0,
+ FIL_TABLESPACE)) {
+ msg("xtrabackup: Cannot create tablespace %s\n",
+ dest_space_name);
+ goto exit;
+ }
+
+ /* Calculate correct tablespace flags for compressed tablespaces. */
+ if (!zip_size || zip_size == ULINT_UNDEFINED) {
+ tablespace_flags = 0;
+ }
+ else {
+ tablespace_flags
+ = (get_bit_shift(zip_size >> PAGE_ZIP_MIN_SIZE_SHIFT
+ << 1)
+ << DICT_TF_ZSSIZE_SHIFT)
+ | DICT_TF_COMPACT
+ | (DICT_TF_FORMAT_ZIP << DICT_TF_FORMAT_SHIFT);
+ ut_a(dict_tf_get_zip_size(tablespace_flags)
+ == zip_size);
+ }
+ *success = xb_space_create_file(real_name, space_id, tablespace_flags,
+ &file);
+ goto exit;
+
+found:
+ /* open the file and return it's handle */
+
+ file = os_file_create_simple_no_error_handling(0, real_name,
+ OS_FILE_OPEN,
+ OS_FILE_READ_WRITE,
+ &ok);
+
+ if (ok) {
+ *success = TRUE;
+ } else {
+ msg("xtrabackup: Cannot open file %s\n", real_name);
+ }
+
+exit:
+
+ return file;
+}
+
+/************************************************************************
+Applies a given .delta file to the corresponding data file.
+@return TRUE on success */
+static
+ibool
+xtrabackup_apply_delta(
+ const char* dirname, /* in: dir name of incremental */
+ const char* dbname, /* in: database name (ibdata: NULL) */
+ const char* filename, /* in: file name (not a path),
+ including the .delta extension */
+ void* /*data*/)
+{
+ os_file_t src_file = XB_FILE_UNDEFINED;
+ os_file_t dst_file = XB_FILE_UNDEFINED;
+ char src_path[FN_REFLEN];
+ char dst_path[FN_REFLEN];
+ char meta_path[FN_REFLEN];
+ char space_name[FN_REFLEN];
+ ibool success;
+
+ ibool last_buffer = FALSE;
+ ulint page_in_buffer;
+ ulint incremental_buffers = 0;
+
+ xb_delta_info_t info;
+ ulint page_size;
+ ulint page_size_shift;
+ byte* incremental_buffer_base = NULL;
+ byte* incremental_buffer;
+
+ size_t offset;
+
+ ut_a(xtrabackup_incremental);
+
+ if (dbname) {
+ snprintf(src_path, sizeof(src_path), "%s/%s/%s",
+ dirname, dbname, filename);
+ snprintf(dst_path, sizeof(dst_path), "%s/%s/%s",
+ xtrabackup_real_target_dir, dbname, filename);
+ } else {
+ snprintf(src_path, sizeof(src_path), "%s/%s",
+ dirname, filename);
+ snprintf(dst_path, sizeof(dst_path), "%s/%s",
+ xtrabackup_real_target_dir, filename);
+ }
+ dst_path[strlen(dst_path) - 6] = '\0';
+
+ strncpy(space_name, filename, FN_REFLEN);
+ space_name[strlen(space_name) - 6] = 0;
+
+ if (!get_meta_path(src_path, meta_path)) {
+ goto error;
+ }
+
+ srv_normalize_path_for_win(dst_path);
+ srv_normalize_path_for_win(src_path);
+ srv_normalize_path_for_win(meta_path);
+
+ if (!xb_read_delta_metadata(meta_path, &info)) {
+ goto error;
+ }
+
+ page_size = info.page_size;
+ page_size_shift = get_bit_shift(page_size);
+ msg("xtrabackup: page size for %s is %lu bytes\n",
+ src_path, page_size);
+ if (page_size_shift < 10 ||
+ page_size_shift > UNIV_PAGE_SIZE_SHIFT_MAX) {
+ msg("xtrabackup: error: invalid value of page_size "
+ "(%lu bytes) read from %s\n", page_size, meta_path);
+ goto error;
+ }
+
+ src_file = os_file_create_simple_no_error_handling(0, src_path,
+ OS_FILE_OPEN,
+ OS_FILE_READ_WRITE,
+ &success);
+ if (!success) {
+ os_file_get_last_error(TRUE);
+ msg("xtrabackup: error: cannot open %s\n", src_path);
+ goto error;
+ }
+
+ posix_fadvise(src_file, 0, 0, POSIX_FADV_SEQUENTIAL);
+
+ os_file_set_nocache(src_file, src_path, "OPEN");
+
+ dst_file = xb_delta_open_matching_space(
+ dbname, space_name, info.space_id, info.zip_size,
+ dst_path, sizeof(dst_path), &success);
+ if (!success) {
+ msg("xtrabackup: error: cannot open %s\n", dst_path);
+ goto error;
+ }
+
+ posix_fadvise(dst_file, 0, 0, POSIX_FADV_DONTNEED);
+
+ os_file_set_nocache(dst_file, dst_path, "OPEN");
+
+ /* allocate buffer for incremental backup (4096 pages) */
+ incremental_buffer_base = static_cast<byte *>
+ (ut_malloc((UNIV_PAGE_SIZE_MAX / 4 + 1) *
+ UNIV_PAGE_SIZE_MAX));
+ incremental_buffer = static_cast<byte *>
+ (ut_align(incremental_buffer_base,
+ UNIV_PAGE_SIZE_MAX));
+
+ msg("Applying %s to %s...\n", src_path, dst_path);
+
+ while (!last_buffer) {
+ ulint cluster_header;
+
+ /* read to buffer */
+ /* first block of block cluster */
+ offset = ((incremental_buffers * (page_size / 4))
+ << page_size_shift);
+ success = os_file_read(src_file, incremental_buffer,
+ offset, page_size);
+ if (!success) {
+ goto error;
+ }
+
+ cluster_header = mach_read_from_4(incremental_buffer);
+ switch(cluster_header) {
+ case 0x78747261UL: /*"xtra"*/
+ break;
+ case 0x58545241UL: /*"XTRA"*/
+ last_buffer = TRUE;
+ break;
+ default:
+ msg("xtrabackup: error: %s seems not "
+ ".delta file.\n", src_path);
+ goto error;
+ }
+
+ for (page_in_buffer = 1; page_in_buffer < page_size / 4;
+ page_in_buffer++) {
+ if (mach_read_from_4(incremental_buffer + page_in_buffer * 4)
+ == 0xFFFFFFFFUL)
+ break;
+ }
+
+ ut_a(last_buffer || page_in_buffer == page_size / 4);
+
+ /* read whole of the cluster */
+ success = os_file_read(src_file, incremental_buffer,
+ offset, page_in_buffer * page_size);
+ if (!success) {
+ goto error;
+ }
+
+ posix_fadvise(src_file, offset, page_in_buffer * page_size,
+ POSIX_FADV_DONTNEED);
+
+ for (page_in_buffer = 1; page_in_buffer < page_size / 4;
+ page_in_buffer++) {
+ ulint offset_on_page;
+
+ offset_on_page = mach_read_from_4(incremental_buffer + page_in_buffer * 4);
+
+ if (offset_on_page == 0xFFFFFFFFUL)
+ break;
+
+ success = os_file_write(dst_path, dst_file,
+ incremental_buffer +
+ page_in_buffer * page_size,
+ (offset_on_page <<
+ page_size_shift),
+ page_size);
+ if (!success) {
+ goto error;
+ }
+ }
+
+ incremental_buffers++;
+ }
+
+ if (incremental_buffer_base)
+ ut_free(incremental_buffer_base);
+ if (src_file != XB_FILE_UNDEFINED)
+ os_file_close(src_file);
+ if (dst_file != XB_FILE_UNDEFINED)
+ os_file_close(dst_file);
+ return TRUE;
+
+error:
+ if (incremental_buffer_base)
+ ut_free(incremental_buffer_base);
+ if (src_file != XB_FILE_UNDEFINED)
+ os_file_close(src_file);
+ if (dst_file != XB_FILE_UNDEFINED)
+ os_file_close(dst_file);
+ msg("xtrabackup: Error: xtrabackup_apply_delta(): "
+ "failed to apply %s to %s.\n", src_path, dst_path);
+ return FALSE;
+}
+
+/************************************************************************
+Callback to handle datadir entry. Function of this type will be called
+for each entry which matches the mask by xb_process_datadir.
+@return should return TRUE on success */
+typedef ibool (*handle_datadir_entry_func_t)(
+/*=========================================*/
+ const char* data_home_dir, /*!<in: path to datadir */
+ const char* db_name, /*!<in: database name */
+ const char* file_name, /*!<in: file name with suffix */
+ void* arg); /*!<in: caller-provided data */
+
+/************************************************************************
+Callback to handle datadir entry. Deletes entry if it has no matching
+fil_space in fil_system directory.
+@return FALSE if delete attempt was unsuccessful */
+static
+ibool
+rm_if_not_found(
+ const char* data_home_dir, /*!<in: path to datadir */
+ const char* db_name, /*!<in: database name */
+ const char* file_name, /*!<in: file name with suffix */
+ void* arg __attribute__((unused)))
+{
+ char name[FN_REFLEN];
+ xb_filter_entry_t* table;
+
+ snprintf(name, FN_REFLEN, "%s/%s", db_name, file_name);
+ /* Truncate ".ibd" */
+ name[strlen(name) - 4] = '\0';
+
+ HASH_SEARCH(name_hash, inc_dir_tables_hash, ut_fold_string(name),
+ xb_filter_entry_t*,
+ table, (void) 0,
+ !strcmp(table->name, name));
+
+ if (!table) {
+ snprintf(name, FN_REFLEN, "%s/%s/%s", data_home_dir,
+ db_name, file_name);
+ return os_file_delete(0, name);
+ }
+
+ return(TRUE);
+}
+
+/************************************************************************
+Function enumerates files in datadir (provided by path) which are matched
+by provided suffix. For each entry callback is called.
+@return FALSE if callback for some entry returned FALSE */
+static
+ibool
+xb_process_datadir(
+ const char* path, /*!<in: datadir path */
+ const char* suffix, /*!<in: suffix to match
+ against */
+ handle_datadir_entry_func_t func, /*!<in: callback */
+ void* data) /*!<in: additional argument for
+ callback */
+{
+ ulint ret;
+ char dbpath[FN_REFLEN];
+ os_file_dir_t dir;
+ os_file_dir_t dbdir;
+ os_file_stat_t dbinfo;
+ os_file_stat_t fileinfo;
+ ulint suffix_len;
+ dberr_t err = DB_SUCCESS;
+ static char current_dir[2];
+
+ current_dir[0] = FN_CURLIB;
+ current_dir[1] = 0;
+ srv_data_home = current_dir;
+
+ suffix_len = strlen(suffix);
+
+ /* datafile */
+ dbdir = os_file_opendir(path, FALSE);
+
+ if (dbdir != NULL) {
+ ret = fil_file_readdir_next_file(&err, path, dbdir,
+ &fileinfo);
+ while (ret == 0) {
+ if (fileinfo.type == OS_FILE_TYPE_DIR) {
+ goto next_file_item_1;
+ }
+
+ if (strlen(fileinfo.name) > suffix_len
+ && 0 == strcmp(fileinfo.name +
+ strlen(fileinfo.name) - suffix_len,
+ suffix)) {
+ if (!func(
+ path, NULL,
+ fileinfo.name, data))
+ {
+ return(FALSE);
+ }
+ }
+next_file_item_1:
+ ret = fil_file_readdir_next_file(&err,
+ path, dbdir,
+ &fileinfo);
+ }
+
+ os_file_closedir(dbdir);
+ } else {
+ msg("xtrabackup: Cannot open dir %s\n",
+ path);
+ }
+
+ /* single table tablespaces */
+ dir = os_file_opendir(path, FALSE);
+
+ if (dir == NULL) {
+ msg("xtrabackup: Cannot open dir %s\n",
+ path);
+ }
+
+ ret = fil_file_readdir_next_file(&err, path, dir,
+ &dbinfo);
+ while (ret == 0) {
+ if (dbinfo.type == OS_FILE_TYPE_FILE
+ || dbinfo.type == OS_FILE_TYPE_UNKNOWN) {
+
+ goto next_datadir_item;
+ }
+
+ sprintf(dbpath, "%s/%s", path,
+ dbinfo.name);
+ srv_normalize_path_for_win(dbpath);
+
+ dbdir = os_file_opendir(dbpath, FALSE);
+
+ if (dbdir != NULL) {
+
+ ret = fil_file_readdir_next_file(&err, dbpath, dbdir,
+ &fileinfo);
+ while (ret == 0) {
+
+ if (fileinfo.type == OS_FILE_TYPE_DIR) {
+
+ goto next_file_item_2;
+ }
+
+ if (strlen(fileinfo.name) > suffix_len
+ && 0 == strcmp(fileinfo.name +
+ strlen(fileinfo.name) -
+ suffix_len,
+ suffix)) {
+ /* The name ends in suffix; process
+ the file */
+ if (!func(
+ path,
+ dbinfo.name,
+ fileinfo.name, data))
+ {
+ return(FALSE);
+ }
+ }
+next_file_item_2:
+ ret = fil_file_readdir_next_file(&err,
+ dbpath, dbdir,
+ &fileinfo);
+ }
+
+ os_file_closedir(dbdir);
+ }
+next_datadir_item:
+ ret = fil_file_readdir_next_file(&err,
+ path,
+ dir, &dbinfo);
+ }
+
+ os_file_closedir(dir);
+
+ return(TRUE);
+}
+
+/************************************************************************
+Applies all .delta files from incremental_dir to the full backup.
+@return TRUE on success. */
+static
+ibool
+xtrabackup_apply_deltas()
+{
+ return xb_process_datadir(xtrabackup_incremental_dir, ".delta",
+ xtrabackup_apply_delta, NULL);
+}
+
+static my_bool
+xtrabackup_close_temp_log(my_bool clear_flag)
+{
+ os_file_t src_file = XB_FILE_UNDEFINED;
+ char src_path[FN_REFLEN];
+ char dst_path[FN_REFLEN];
+ ibool success;
+ byte log_buf[UNIV_PAGE_SIZE_MAX];
+
+ if (!xtrabackup_logfile_is_renamed)
+ return(FALSE);
+
+ /* rename 'ib_logfile0' to 'xtrabackup_logfile' */
+ if(!xtrabackup_incremental_dir) {
+ sprintf(dst_path, "%s/ib_logfile0", xtrabackup_target_dir);
+ sprintf(src_path, "%s/%s", xtrabackup_target_dir,
+ XB_LOG_FILENAME);
+ } else {
+ sprintf(dst_path, "%s/ib_logfile0", xtrabackup_incremental_dir);
+ sprintf(src_path, "%s/%s", xtrabackup_incremental_dir,
+ XB_LOG_FILENAME);
+ }
+
+ srv_normalize_path_for_win(dst_path);
+ srv_normalize_path_for_win(src_path);
+
+ success = os_file_rename(0, dst_path, src_path);
+ if (!success) {
+ goto error;
+ }
+ xtrabackup_logfile_is_renamed = FALSE;
+
+ if (!clear_flag)
+ return(FALSE);
+
+ /* clear LOG_FILE_WAS_CREATED_BY_HOT_BACKUP field */
+ src_file = os_file_create_simple_no_error_handling(0, src_path,
+ OS_FILE_OPEN,
+ OS_FILE_READ_WRITE,
+ &success);
+ if (!success) {
+ goto error;
+ }
+
+ success = os_file_read(src_file, log_buf, 0, LOG_FILE_HDR_SIZE);
+ if (!success) {
+ goto error;
+ }
+
+ memset(log_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP, ' ', 4);
+
+ success = os_file_write(src_path, src_file, log_buf, 0,
+ LOG_FILE_HDR_SIZE);
+ if (!success) {
+ goto error;
+ }
+
+ os_file_close(src_file);
+ src_file = XB_FILE_UNDEFINED;
+
+ innobase_log_files_in_group = innobase_log_files_in_group_save;
+ srv_log_group_home_dir = srv_log_group_home_dir_save;
+ innobase_log_file_size = innobase_log_file_size_save;
+
+ return(FALSE);
+error:
+ if (src_file != XB_FILE_UNDEFINED)
+ os_file_close(src_file);
+ msg("xtrabackup: Error: xtrabackup_close_temp_log() failed.\n");
+ return(TRUE); /*ERROR*/
+}
+
+
+/*********************************************************************//**
+Write the meta data (index user fields) config file.
+@return true in case of success otherwise false. */
+static
+bool
+xb_export_cfg_write_index_fields(
+/*===========================*/
+ const dict_index_t* index, /*!< in: write the meta data for
+ this index */
+ FILE* file) /*!< in: file to write to */
+{
+ byte row[sizeof(ib_uint32_t) * 2];
+
+ for (ulint i = 0; i < index->n_fields; ++i) {
+ byte* ptr = row;
+ const dict_field_t* field = &index->fields[i];
+
+ mach_write_to_4(ptr, field->prefix_len);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, field->fixed_len);
+
+ if (fwrite(row, 1, sizeof(row), file) != sizeof(row)) {
+
+ msg("xtrabackup: Error: writing index fields.");
+
+ return(false);
+ }
+
+ /* Include the NUL byte in the length. */
+ ib_uint32_t len = strlen(field->name) + 1;
+ ut_a(len > 1);
+
+ mach_write_to_4(row, len);
+
+ if (fwrite(row, 1, sizeof(len), file) != sizeof(len)
+ || fwrite(field->name, 1, len, file) != len) {
+
+ msg("xtrabackup: Error: writing index column.");
+
+ return(false);
+ }
+ }
+
+ return(true);
+}
+
+/*********************************************************************//**
+Write the meta data config file index information.
+@return true in case of success otherwise false. */
+static __attribute__((nonnull, warn_unused_result))
+bool
+xb_export_cfg_write_indexes(
+/*======================*/
+ const dict_table_t* table, /*!< in: write the meta data for
+ this table */
+ FILE* file) /*!< in: file to write to */
+{
+ {
+ byte row[sizeof(ib_uint32_t)];
+
+ /* Write the number of indexes in the table. */
+ mach_write_to_4(row, UT_LIST_GET_LEN(table->indexes));
+
+ if (fwrite(row, 1, sizeof(row), file) != sizeof(row)) {
+ msg("xtrabackup: Error: writing index count.");
+
+ return(false);
+ }
+ }
+
+ bool ret = true;
+
+ /* Write the index meta data. */
+ for (const dict_index_t* index = UT_LIST_GET_FIRST(table->indexes);
+ index != 0 && ret;
+ index = UT_LIST_GET_NEXT(indexes, index)) {
+
+ byte* ptr;
+ byte row[sizeof(ib_uint64_t)
+ + sizeof(ib_uint32_t) * 8];
+
+ ptr = row;
+
+ ut_ad(sizeof(ib_uint64_t) == 8);
+ mach_write_to_8(ptr, index->id);
+ ptr += sizeof(ib_uint64_t);
+
+ mach_write_to_4(ptr, index->space);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, index->page);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, index->type);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, index->trx_id_offset);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, index->n_user_defined_cols);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, index->n_uniq);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, index->n_nullable);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, index->n_fields);
+
+ if (fwrite(row, 1, sizeof(row), file) != sizeof(row)) {
+
+ msg("xtrabackup: Error: writing index meta-data.");
+
+ return(false);
+ }
+
+ /* Write the length of the index name.
+ NUL byte is included in the length. */
+ ib_uint32_t len = strlen(index->name) + 1;
+ ut_a(len > 1);
+
+ mach_write_to_4(row, len);
+
+ if (fwrite(row, 1, sizeof(len), file) != sizeof(len)
+ || fwrite(index->name, 1, len, file) != len) {
+
+ msg("xtrabackup: Error: writing index name.");
+
+ return(false);
+ }
+
+ ret = xb_export_cfg_write_index_fields(index, file);
+ }
+
+ return(ret);
+}
+
+/*********************************************************************//**
+Write the meta data (table columns) config file. Serialise the contents of
+dict_col_t structure, along with the column name. All fields are serialized
+as ib_uint32_t.
+@return true in case of success otherwise false. */
+static __attribute__((nonnull, warn_unused_result))
+bool
+xb_export_cfg_write_table(
+/*====================*/
+ const dict_table_t* table, /*!< in: write the meta data for
+ this table */
+ FILE* file) /*!< in: file to write to */
+{
+ dict_col_t* col;
+ byte row[sizeof(ib_uint32_t) * 7];
+
+ col = table->cols;
+
+ for (ulint i = 0; i < table->n_cols; ++i, ++col) {
+ byte* ptr = row;
+
+ mach_write_to_4(ptr, col->prtype);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, col->mtype);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, col->len);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, col->mbminmaxlen);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, col->ind);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, col->ord_part);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, col->max_prefix);
+
+ if (fwrite(row, 1, sizeof(row), file) != sizeof(row)) {
+ msg("xtrabackup: Error: writing table column data.");
+
+ return(false);
+ }
+
+ /* Write out the column name as [len, byte array]. The len
+ includes the NUL byte. */
+ ib_uint32_t len;
+ const char* col_name;
+
+ col_name = dict_table_get_col_name(table, dict_col_get_no(col));
+
+ /* Include the NUL byte in the length. */
+ len = strlen(col_name) + 1;
+ ut_a(len > 1);
+
+ mach_write_to_4(row, len);
+
+ if (fwrite(row, 1, sizeof(len), file) != sizeof(len)
+ || fwrite(col_name, 1, len, file) != len) {
+
+ msg("xtrabackup: Error: writing column name.");
+
+ return(false);
+ }
+ }
+
+ return(true);
+}
+
+/*********************************************************************//**
+Write the meta data config file header.
+@return true in case of success otherwise false. */
+static __attribute__((nonnull, warn_unused_result))
+bool
+xb_export_cfg_write_header(
+/*=====================*/
+ const dict_table_t* table, /*!< in: write the meta data for
+ this table */
+ FILE* file) /*!< in: file to write to */
+{
+ byte value[sizeof(ib_uint32_t)];
+
+ /* Write the meta-data version number. */
+ mach_write_to_4(value, IB_EXPORT_CFG_VERSION_V1);
+
+ if (fwrite(&value, 1, sizeof(value), file) != sizeof(value)) {
+ msg("xtrabackup: Error: writing meta-data version number.");
+
+ return(false);
+ }
+
+ /* Write the server hostname. */
+ ib_uint32_t len;
+ const char* hostname = "Hostname unknown";
+
+ /* The server hostname includes the NUL byte. */
+ len = strlen(hostname) + 1;
+ mach_write_to_4(value, len);
+
+ if (fwrite(&value, 1, sizeof(value), file) != sizeof(value)
+ || fwrite(hostname, 1, len, file) != len) {
+
+ msg("xtrabackup: Error: writing hostname.");
+
+ return(false);
+ }
+
+ /* The table name includes the NUL byte. */
+ ut_a(table->name != 0);
+ len = strlen(table->name) + 1;
+
+ /* Write the table name. */
+ mach_write_to_4(value, len);
+
+ if (fwrite(&value, 1, sizeof(value), file) != sizeof(value)
+ || fwrite(table->name, 1, len, file) != len) {
+
+ msg("xtrabackup: Error: writing table name.");
+
+ return(false);
+ }
+
+ byte row[sizeof(ib_uint32_t) * 3];
+
+ /* Write the next autoinc value. */
+ mach_write_to_8(row, table->autoinc);
+
+ if (fwrite(row, 1, sizeof(ib_uint64_t), file) != sizeof(ib_uint64_t)) {
+ msg("xtrabackup: Error: writing table autoinc value.");
+
+ return(false);
+ }
+
+ byte* ptr = row;
+
+ /* Write the system page size. */
+ mach_write_to_4(ptr, UNIV_PAGE_SIZE);
+ ptr += sizeof(ib_uint32_t);
+
+ /* Write the table->flags. */
+ mach_write_to_4(ptr, table->flags);
+ ptr += sizeof(ib_uint32_t);
+
+ /* Write the number of columns in the table. */
+ mach_write_to_4(ptr, table->n_cols);
+
+ if (fwrite(row, 1, sizeof(row), file) != sizeof(row)) {
+ msg("xtrabackup: Error: writing table meta-data.");
+
+ return(false);
+ }
+
+ return(true);
+}
+
+/*********************************************************************//**
+Write MySQL 5.6-style meta data config file.
+@return true in case of success otherwise false. */
+static
+bool
+xb_export_cfg_write(
+ const fil_node_t* node,
+ const dict_table_t* table) /*!< in: write the meta data for
+ this table */
+{
+ char file_path[FN_REFLEN];
+ FILE* file;
+ bool success;
+
+ strcpy(file_path, node->name);
+ strcpy(file_path + strlen(file_path) - 4, ".cfg");
+
+ file = fopen(file_path, "w+b");
+
+ if (file == NULL) {
+ msg("xtrabackup: Error: cannot close %s\n", node->name);
+
+ success = false;
+ } else {
+
+ success = xb_export_cfg_write_header(table, file);
+
+ if (success) {
+ success = xb_export_cfg_write_table(table, file);
+ }
+
+ if (success) {
+ success = xb_export_cfg_write_indexes(table, file);
+ }
+
+ if (fclose(file) != 0) {
+ msg("xtrabackup: Error: cannot close %s\n", node->name);
+ success = false;
+ }
+
+ }
+
+ return(success);
+
+}
+
+/********************************************************************//**
+Searches archived log files in archived log directory. The min and max
+LSN's of found files as well as archived log file size are stored in
+xtrabackup_arch_first_file_lsn, xtrabackup_arch_last_file_lsn and
+xtrabackup_arch_file_size respectively.
+@return true on success
+*/
+static
+bool
+xtrabackup_arch_search_files(
+/*=========================*/
+ ib_uint64_t start_lsn) /*!< in: filter out log files
+ witch does not contain data
+ with lsn < start_lsn */
+{
+ os_file_dir_t dir;
+ os_file_stat_t fileinfo;
+ ut_ad(innobase_log_arch_dir);
+
+ dir = os_file_opendir(innobase_log_arch_dir, FALSE);
+ if (!dir) {
+ msg("xtrabackup: error: cannot open archived log directory %s\n",
+ innobase_log_arch_dir);
+ return false;
+ }
+
+ while(!os_file_readdir_next_file(innobase_log_arch_dir,
+ dir,
+ &fileinfo) ) {
+ lsn_t log_file_lsn;
+ char* log_str_end_lsn_ptr;
+
+ if (strncmp(fileinfo.name,
+ IB_ARCHIVED_LOGS_PREFIX,
+ sizeof(IB_ARCHIVED_LOGS_PREFIX) - 1)) {
+ continue;
+ }
+
+ log_file_lsn = strtoll(fileinfo.name +
+ sizeof(IB_ARCHIVED_LOGS_PREFIX) - 1,
+ &log_str_end_lsn_ptr, 10);
+
+ if (*log_str_end_lsn_ptr) {
+ continue;
+ }
+
+ if (log_file_lsn + (fileinfo.size - LOG_FILE_HDR_SIZE) < start_lsn) {
+ continue;
+ }
+
+ if (!xtrabackup_arch_first_file_lsn ||
+ log_file_lsn < xtrabackup_arch_first_file_lsn) {
+ xtrabackup_arch_first_file_lsn = log_file_lsn;
+ }
+ if (log_file_lsn > xtrabackup_arch_last_file_lsn) {
+ xtrabackup_arch_last_file_lsn = log_file_lsn;
+ }
+
+ //TODO: find the more suitable way to extract archived log file
+ //size
+ if (fileinfo.size > (ib_int64_t)xtrabackup_arch_file_size) {
+ xtrabackup_arch_file_size = fileinfo.size;
+ }
+ }
+
+ return xtrabackup_arch_first_file_lsn != 0;
+}
+
+static
+void
+innodb_free_param()
+{
+ srv_free_paths_and_sizes();
+ free(internal_innobase_data_file_path);
+ internal_innobase_data_file_path = NULL;
+ free_tmpdir(&mysql_tmpdir_list);
+}
+
+
+/**************************************************************************
+Store the current binary log coordinates in a specified file.
+@return 'false' on error. */
+static bool
+store_binlog_info(
+/*==============*/
+ const char *filename) /*!< in: output file name */
+{
+ FILE *fp;
+
+ if (trx_sys_mysql_bin_log_name[0] == '\0') {
+ return(true);
+ }
+
+ fp = fopen(filename, "w");
+
+ if (!fp) {
+ msg("xtrabackup: failed to open '%s'\n", filename);
+ return(false);
+ }
+
+ fprintf(fp, "%s\t" UINT64PF "\n",
+ trx_sys_mysql_bin_log_name, trx_sys_mysql_bin_log_pos);
+ fclose(fp);
+
+ return(true);
+}
+
+static void
+xtrabackup_prepare_func(void)
+{
+ ulint err;
+ datafiles_iter_t *it;
+ fil_node_t *node;
+ fil_space_t *space;
+ char metadata_path[FN_REFLEN];
+
+ /* cd to target-dir */
+
+ if (my_setwd(xtrabackup_real_target_dir,MYF(MY_WME)))
+ {
+ msg("xtrabackup: cannot my_setwd %s\n",
+ xtrabackup_real_target_dir);
+ exit(EXIT_FAILURE);
+ }
+ msg("xtrabackup: cd to %s\n", xtrabackup_real_target_dir);
+
+ xtrabackup_target_dir= mysql_data_home_buff;
+ xtrabackup_target_dir[0]=FN_CURLIB; // all paths are relative from here
+ xtrabackup_target_dir[1]=0;
+
+ /*
+ read metadata of target, we don't need metadata reading in the case
+ archived logs applying
+ */
+ sprintf(metadata_path, "%s/%s", xtrabackup_target_dir,
+ XTRABACKUP_METADATA_FILENAME);
+
+ if (!xtrabackup_read_metadata(metadata_path)) {
+ msg("xtrabackup: Error: failed to read metadata from '%s'\n",
+ metadata_path);
+ exit(EXIT_FAILURE);
+ }
+
+ if (!innobase_log_arch_dir)
+ {
+ if (!strcmp(metadata_type, "full-backuped")) {
+ msg("xtrabackup: This target seems to be not prepared "
+ "yet.\n");
+ } else if (!strcmp(metadata_type, "log-applied")) {
+ msg("xtrabackup: This target seems to be already "
+ "prepared with --apply-log-only.\n");
+ goto skip_check;
+ } else if (!strcmp(metadata_type, "full-prepared")) {
+ msg("xtrabackup: This target seems to be already "
+ "prepared.\n");
+ } else {
+ msg("xtrabackup: This target seems not to have correct "
+ "metadata...\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (xtrabackup_incremental) {
+ msg("xtrabackup: error: applying incremental backup "
+ "needs target prepared with --apply-log-only.\n");
+ exit(EXIT_FAILURE);
+ }
+skip_check:
+ if (xtrabackup_incremental
+ && metadata_to_lsn != incremental_lsn) {
+ msg("xtrabackup: error: This incremental backup seems "
+ "not to be proper for the target.\n"
+ "xtrabackup: Check 'to_lsn' of the target and "
+ "'from_lsn' of the incremental.\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ /* Create logfiles for recovery from 'xtrabackup_logfile', before start InnoDB */
+ srv_max_n_threads = 1000;
+ os_sync_mutex = NULL;
+ ut_mem_init();
+ /* temporally dummy value to avoid crash */
+ srv_page_size_shift = 14;
+ srv_page_size = (1 << srv_page_size_shift);
+ os_sync_init();
+ sync_init();
+ os_io_init_simple();
+ mem_init(srv_mem_pool_size);
+ ut_crc32_init();
+
+ xb_filters_init();
+
+ if(!innobase_log_arch_dir && xtrabackup_init_temp_log())
+ goto error_cleanup;
+
+ if(innodb_init_param()) {
+ goto error_cleanup;
+ }
+
+ /* Expand compacted datafiles */
+
+ if (xtrabackup_compact) {
+ srv_compact_backup = TRUE;
+
+ if (!xb_expand_datafiles()) {
+ goto error_cleanup;
+ }
+
+ /* Reset the 'compact' flag in xtrabackup_checkpoints so we
+ don't expand on subsequent invocations. */
+ xtrabackup_compact = FALSE;
+ if (!xtrabackup_write_metadata(metadata_path)) {
+ msg("xtrabackup: error: xtrabackup_write_metadata() "
+ "failed\n");
+ goto error_cleanup;
+ }
+ }
+
+ xb_normalize_init_values();
+
+ if (xtrabackup_incremental || innobase_log_arch_dir) {
+ err = xb_data_files_init();
+ if (err != DB_SUCCESS) {
+ msg("xtrabackup: error: xb_data_files_init() failed "
+ "with error code %lu\n", err);
+ goto error_cleanup;
+ }
+ }
+ if (xtrabackup_incremental) {
+ inc_dir_tables_hash = hash_create(1000);
+
+ if(!xtrabackup_apply_deltas()) {
+ xb_data_files_close();
+ xb_filter_hash_free(inc_dir_tables_hash);
+ goto error_cleanup;
+ }
+ }
+ if (xtrabackup_incremental || innobase_log_arch_dir) {
+ xb_data_files_close();
+ }
+ if (xtrabackup_incremental) {
+ /* Cleanup datadir from tablespaces deleted between full and
+ incremental backups */
+
+ xb_process_datadir("./", ".ibd", rm_if_not_found, NULL);
+
+ xb_filter_hash_free(inc_dir_tables_hash);
+ }
+ sync_close();
+ sync_initialized = FALSE;
+ if (fil_system) {
+ fil_close();
+ }
+ os_sync_free();
+ mem_close();
+ os_sync_mutex = NULL;
+ ut_free_all_mem();
+
+ innodb_free_param();
+
+ /* Reset the configuration as it might have been changed by
+ xb_data_files_init(). */
+ if(innodb_init_param()) {
+ goto error_cleanup;
+ }
+
+ srv_apply_log_only = (ibool) xtrabackup_apply_log_only;
+ srv_rebuild_indexes = (ibool) xtrabackup_rebuild_indexes;
+
+ /* increase IO threads */
+ if(srv_n_file_io_threads < 10) {
+ srv_n_read_io_threads = 4;
+ srv_n_write_io_threads = 4;
+ }
+
+ if (innobase_log_arch_dir) {
+ srv_arch_dir = innobase_log_arch_dir;
+ srv_archive_recovery = TRUE;
+ if (xtrabackup_archived_to_lsn) {
+ if (xtrabackup_archived_to_lsn < metadata_last_lsn) {
+ msg("xtrabackup: warning: logs applying lsn "
+ "limit " UINT64PF " is "
+ "less than metadata last-lsn " UINT64PF
+ " and will be set to metadata last-lsn value\n",
+ xtrabackup_archived_to_lsn,
+ metadata_last_lsn);
+ xtrabackup_archived_to_lsn = metadata_last_lsn;
+ }
+ if (xtrabackup_archived_to_lsn < min_flushed_lsn) {
+ msg("xtrabackup: error: logs applying "
+ "lsn limit " UINT64PF " is less than "
+ "min_flushed_lsn " UINT64PF
+ ", there is nothing to do\n",
+ xtrabackup_archived_to_lsn,
+ min_flushed_lsn);
+ goto error_cleanup;
+ }
+ }
+ srv_archive_recovery_limit_lsn= xtrabackup_archived_to_lsn;
+ /*
+ Unfinished transactions are not rolled back during log applying
+ as they can be finished at the firther files applyings.
+ */
+ srv_apply_log_only = xtrabackup_apply_log_only = TRUE;
+
+ if (!xtrabackup_arch_search_files(min_flushed_lsn)) {
+ goto error_cleanup;
+ }
+
+ /*
+ Check if last log file last lsn is big enough to overlap
+ last scanned lsn read from metadata.
+ */
+ if (xtrabackup_arch_last_file_lsn +
+ xtrabackup_arch_file_size -
+ LOG_FILE_HDR_SIZE < metadata_last_lsn) {
+ msg("xtrabackup: error: there are no enough archived logs "
+ "to apply\n");
+ goto error_cleanup;
+ }
+ }
+
+ msg("xtrabackup: Starting InnoDB instance for recovery.\n"
+ "xtrabackup: Using %lld bytes for buffer pool "
+ "(set by --use-memory parameter)\n", xtrabackup_use_memory);
+
+ if(innodb_init())
+ goto error_cleanup;
+
+ it = datafiles_iter_new(fil_system);
+ if (it == NULL) {
+ msg("xtrabackup: Error: datafiles_iter_new() failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ while ((node = datafiles_iter_next(it)) != NULL) {
+ byte *header;
+ ulint size;
+ ulint actual_size;
+ mtr_t mtr;
+ buf_block_t *block;
+ ulint flags;
+
+ space = node->space;
+
+ /* Align space sizes along with fsp header. We want to process
+ each space once, so skip all nodes except the first one in a
+ multi-node space. */
+ if (UT_LIST_GET_PREV(chain, node) != NULL) {
+ continue;
+ }
+
+ mtr_start(&mtr);
+
+ mtr_s_lock(fil_space_get_latch(space->id, &flags), &mtr);
+
+ block = buf_page_get(space->id,
+ dict_tf_get_zip_size(flags),
+ 0, RW_S_LATCH, &mtr);
+ header = FSP_HEADER_OFFSET + buf_block_get_frame(block);
+
+ size = mtr_read_ulint(header + FSP_SIZE, MLOG_4BYTES,
+ &mtr);
+
+ mtr_commit(&mtr);
+
+ fil_extend_space_to_desired_size(&actual_size, space->id, size);
+ }
+
+ datafiles_iter_free(it);
+
+ if (xtrabackup_export) {
+ msg("xtrabackup: export option is specified.\n");
+ os_file_t info_file = XB_FILE_UNDEFINED;
+ char info_file_path[FN_REFLEN];
+ ibool success;
+ char table_name[FN_REFLEN];
+
+ byte* page;
+ byte* buf = NULL;
+
+ buf = static_cast<byte *>(ut_malloc(UNIV_PAGE_SIZE * 2));
+ page = static_cast<byte *>(ut_align(buf, UNIV_PAGE_SIZE));
+
+ /* flush insert buffer at shutdwon */
+ innobase_fast_shutdown = 0;
+
+ it = datafiles_iter_new(fil_system);
+ if (it == NULL) {
+ msg("xtrabackup: Error: datafiles_iter_new() "
+ "failed.\n");
+ exit(EXIT_FAILURE);
+ }
+ while ((node = datafiles_iter_next(it)) != NULL) {
+ int len;
+ char *next, *prev, *p;
+ dict_table_t* table;
+ dict_index_t* index;
+ ulint n_index;
+
+ space = node->space;
+
+ /* treat file_per_table only */
+ if (!fil_is_user_tablespace_id(space->id)) {
+ continue;
+ }
+
+ /* node exist == file exist, here */
+ strcpy(info_file_path, node->name);
+ strcpy(info_file_path +
+ strlen(info_file_path) -
+ 4, ".exp");
+
+ len = strlen(info_file_path);
+
+ p = info_file_path;
+ prev = NULL;
+ while ((next = strchr(p, SRV_PATH_SEPARATOR)) != NULL)
+ {
+ prev = p;
+ p = next + 1;
+ }
+ info_file_path[len - 4] = 0;
+ strncpy(table_name, prev, FN_REFLEN);
+
+ info_file_path[len - 4] = '.';
+
+ mutex_enter(&(dict_sys->mutex));
+
+ table = dict_table_get_low(table_name);
+ if (!table) {
+ msg("xtrabackup: error: "
+ "cannot find dictionary "
+ "record of table %s\n",
+ table_name);
+ goto next_node;
+ }
+ index = dict_table_get_first_index(table);
+ n_index = UT_LIST_GET_LEN(table->indexes);
+ if (n_index > 31) {
+ msg("xtrabackup: error: "
+ "sorry, cannot export over "
+ "31 indexes for now.\n");
+ goto next_node;
+ }
+
+ /* Write MySQL 5.6 .cfg file */
+ if (!xb_export_cfg_write(node, table)) {
+ goto next_node;
+ }
+
+ /* init exp file */
+ memset(page, 0, UNIV_PAGE_SIZE);
+ mach_write_to_4(page , 0x78706f72UL);
+ mach_write_to_4(page + 4, 0x74696e66UL);/*"xportinf"*/
+ mach_write_to_4(page + 8, n_index);
+ strncpy((char *) page + 12,
+ table_name, 500);
+
+ msg("xtrabackup: export metadata of "
+ "table '%s' to file `%s` "
+ "(%lu indexes)\n",
+ table_name, info_file_path,
+ n_index);
+
+ n_index = 1;
+ while (index) {
+ mach_write_to_8(page + n_index * 512, index->id);
+ mach_write_to_4(page + n_index * 512 + 8,
+ index->page);
+ strncpy((char *) page + n_index * 512 +
+ 12, index->name, 500);
+
+ msg("xtrabackup: name=%s, "
+ "id.low=%lu, page=%lu\n",
+ index->name,
+ (ulint)(index->id &
+ 0xFFFFFFFFUL),
+ (ulint) index->page);
+ index = dict_table_get_next_index(index);
+ n_index++;
+ }
+
+ srv_normalize_path_for_win(info_file_path);
+ info_file = os_file_create(
+ 0,
+ info_file_path,
+ OS_FILE_OVERWRITE,
+ OS_FILE_NORMAL, OS_DATA_FILE,
+ &success);
+ if (!success) {
+ os_file_get_last_error(TRUE);
+ goto next_node;
+ }
+ success = os_file_write(info_file_path,
+ info_file, page,
+ 0, UNIV_PAGE_SIZE);
+ if (!success) {
+ os_file_get_last_error(TRUE);
+ goto next_node;
+ }
+ success = os_file_flush(info_file);
+ if (!success) {
+ os_file_get_last_error(TRUE);
+ goto next_node;
+ }
+next_node:
+ if (info_file != XB_FILE_UNDEFINED) {
+ os_file_close(info_file);
+ info_file = XB_FILE_UNDEFINED;
+ }
+ mutex_exit(&(dict_sys->mutex));
+ }
+
+ ut_free(buf);
+ }
+
+ /* print the binary log position */
+ trx_sys_print_mysql_binlog_offset();
+ msg("\n");
+
+ /* output to xtrabackup_binlog_pos_innodb and (if
+ backup_safe_binlog_info was available on the server) to
+ xtrabackup_binlog_info. In the latter case xtrabackup_binlog_pos_innodb
+ becomes redundant and is created only for compatibility. */
+ if (!store_binlog_info("xtrabackup_binlog_pos_innodb") ||
+ (recover_binlog_info &&
+ !store_binlog_info(XTRABACKUP_BINLOG_INFO))) {
+
+ exit(EXIT_FAILURE);
+ }
+
+ if (innobase_log_arch_dir)
+ srv_start_lsn = log_sys->lsn = recv_sys->recovered_lsn;
+
+ /* Check whether the log is applied enough or not. */
+ if ((xtrabackup_incremental
+ && srv_start_lsn < incremental_to_lsn)
+ ||(!xtrabackup_incremental
+ && srv_start_lsn < metadata_to_lsn)) {
+ msg("xtrabackup: error: "
+ "The transaction log file is corrupted.\n"
+ "xtrabackup: error: "
+ "The log was not applied to the intended LSN!\n");
+ msg("xtrabackup: Log applied to lsn " LSN_PF "\n",
+ srv_start_lsn);
+ if (xtrabackup_incremental) {
+ msg("xtrabackup: The intended lsn is " LSN_PF "\n",
+ incremental_to_lsn);
+ } else {
+ msg("xtrabackup: The intended lsn is " LSN_PF "\n",
+ metadata_to_lsn);
+ }
+ exit(EXIT_FAILURE);
+ }
+
+ xb_write_galera_info(xtrabackup_incremental);
+
+ if(innodb_end())
+ goto error_cleanup;
+
+ innodb_free_param();
+
+ sync_initialized = FALSE;
+ os_sync_mutex = NULL;
+
+ /* re-init necessary components */
+ ut_mem_init();
+ os_sync_init();
+ sync_init();
+ os_io_init_simple();
+
+ if(xtrabackup_close_temp_log(TRUE))
+ exit(EXIT_FAILURE);
+
+ /* output to metadata file */
+ {
+ char filename[FN_REFLEN];
+
+ strcpy(metadata_type, srv_apply_log_only ?
+ "log-applied" : "full-prepared");
+
+ if(xtrabackup_incremental
+ && metadata_to_lsn < incremental_to_lsn)
+ {
+ metadata_to_lsn = incremental_to_lsn;
+ metadata_last_lsn = incremental_last_lsn;
+ }
+
+ sprintf(filename, "%s/%s", xtrabackup_target_dir, XTRABACKUP_METADATA_FILENAME);
+ if (!xtrabackup_write_metadata(filename)) {
+
+ msg("xtrabackup: Error: failed to write metadata "
+ "to '%s'\n", filename);
+ exit(EXIT_FAILURE);
+ }
+
+ if(xtrabackup_extra_lsndir) {
+ sprintf(filename, "%s/%s", xtrabackup_extra_lsndir, XTRABACKUP_METADATA_FILENAME);
+ if (!xtrabackup_write_metadata(filename)) {
+ msg("xtrabackup: Error: failed to write "
+ "metadata to '%s'\n", filename);
+ exit(EXIT_FAILURE);
+ }
+ }
+ }
+
+ if (!apply_log_finish()) {
+ exit(EXIT_FAILURE);
+ }
+
+ sync_close();
+ sync_initialized = FALSE;
+ if (fil_system) {
+ fil_close();
+ }
+ os_sync_free();
+ // mem_close();
+ os_sync_mutex = NULL;
+ ut_free_all_mem();
+
+ /* start InnoDB once again to create log files */
+
+ if (!xtrabackup_apply_log_only) {
+
+ if(innodb_init_param()) {
+ goto error;
+ }
+
+ srv_apply_log_only = FALSE;
+ srv_rebuild_indexes = FALSE;
+
+ /* increase IO threads */
+ if(srv_n_file_io_threads < 10) {
+ srv_n_read_io_threads = 4;
+ srv_n_write_io_threads = 4;
+ }
+
+ srv_shutdown_state = SRV_SHUTDOWN_NONE;
+
+ if(innodb_init())
+ goto error;
+
+ if(innodb_end())
+ goto error;
+
+ innodb_free_param();
+
+ }
+
+ xb_filters_free();
+
+ return;
+
+error_cleanup:
+ xtrabackup_close_temp_log(FALSE);
+ xb_filters_free();
+
+error:
+ exit(EXIT_FAILURE);
+}
+
+/**************************************************************************
+Signals-related setup. */
+static
+void
+setup_signals()
+/*===========*/
+{
+ struct sigaction sa;
+
+ /* Print a stacktrace on some signals */
+ sa.sa_flags = SA_RESETHAND | SA_NODEFER;
+ sigemptyset(&sa.sa_mask);
+ sigprocmask(SIG_SETMASK,&sa.sa_mask,NULL);
+#ifdef HAVE_STACKTRACE
+ my_init_stacktrace();
+#endif
+ sa.sa_handler = handle_fatal_signal;
+ sigaction(SIGSEGV, &sa, NULL);
+ sigaction(SIGABRT, &sa, NULL);
+ sigaction(SIGBUS, &sa, NULL);
+ sigaction(SIGILL, &sa, NULL);
+ sigaction(SIGFPE, &sa, NULL);
+
+#ifdef __linux__
+ /* Ensure xtrabackup process is killed when the parent one
+ (innobackupex) is terminated with an unhandled signal */
+
+ if (prctl(PR_SET_PDEATHSIG, SIGKILL)) {
+ msg("prctl() failed with errno = %d\n", errno);
+ exit(EXIT_FAILURE);
+ }
+#endif
+}
+
+/**************************************************************************
+Append group name to xb_load_default_groups list. */
+static
+void
+append_defaults_group(const char *group, const char *default_groups[],
+ size_t default_groups_size)
+{
+ uint i;
+ bool appended = false;
+ for (i = 0; i < default_groups_size - 1; i++) {
+ if (default_groups[i] == NULL) {
+ default_groups[i] = group;
+ appended = true;
+ break;
+ }
+ }
+ ut_a(appended);
+}
+
+bool
+xb_init()
+{
+ const char *mixed_options[4] = {NULL, NULL, NULL, NULL};
+ int n_mixed_options;
+
+ /* sanity checks */
+
+ if (opt_slave_info
+ && opt_no_lock
+ && !opt_safe_slave_backup) {
+ msg("Error: --slave-info is used with --no-lock but "
+ "without --safe-slave-backup. The binlog position "
+ "cannot be consistent with the backup data.\n");
+ return(false);
+ }
+
+ if (opt_rsync && xtrabackup_stream_fmt) {
+ msg("Error: --rsync doesn't work with --stream\n");
+ return(false);
+ }
+
+ n_mixed_options = 0;
+
+ if (opt_decompress) {
+ mixed_options[n_mixed_options++] = "--decompress";
+ } else if (opt_decrypt) {
+ mixed_options[n_mixed_options++] = "--decrypt";
+ }
+
+ if (xtrabackup_copy_back) {
+ mixed_options[n_mixed_options++] = "--copy-back";
+ }
+
+ if (xtrabackup_move_back) {
+ mixed_options[n_mixed_options++] = "--move-back";
+ }
+
+ if (xtrabackup_prepare) {
+ mixed_options[n_mixed_options++] = "--apply-log";
+ }
+
+ if (n_mixed_options > 1) {
+ msg("Error: %s and %s are mutually exclusive\n",
+ mixed_options[0], mixed_options[1]);
+ return(false);
+ }
+
+ if (xtrabackup_backup) {
+
+ if (!opt_noversioncheck) {
+ version_check();
+ }
+
+ if ((mysql_connection = xb_mysql_connect()) == NULL) {
+ return(false);
+ }
+
+ if (!get_mysql_vars(mysql_connection)) {
+ return(false);
+ }
+
+ history_start_time = time(NULL);
+
+ }
+
+ return(true);
+}
+
+void
+handle_options(int argc, char **argv, char ***argv_client, char ***argv_server)
+{
+ int i;
+ int ho_error;
+
+ char* target_dir = NULL;
+ bool prepare = false;
+
+ char conf_file[FN_REFLEN];
+ int argc_client = argc;
+ int argc_server = argc;
+
+ *argv_client = argv;
+ *argv_server = argv;
+
+ /* scan options for group and config file to load defaults from */
+ for (i = 1; i < argc; i++) {
+
+ char *optend = strcend(argv[i], '=');
+
+ if (strncmp(argv[i], "--defaults-group",
+ optend - argv[i]) == 0) {
+ defaults_group = optend + 1;
+ append_defaults_group(defaults_group,
+ xb_server_default_groups,
+ array_elements(xb_server_default_groups));
+ }
+
+ if (strncmp(argv[i], "--login-path",
+ optend - argv[i]) == 0) {
+ append_defaults_group(optend + 1,
+ xb_client_default_groups,
+ array_elements(xb_client_default_groups));
+ }
+
+ if (!strncmp(argv[i], "--prepare",
+ optend - argv[i])) {
+ prepare = true;
+ }
+
+ if (!strncmp(argv[i], "--apply-log",
+ optend - argv[i])) {
+ prepare = true;
+ }
+
+ if (!strncmp(argv[i], "--target-dir",
+ optend - argv[i]) && *optend) {
+ target_dir = optend + 1;
+ }
+
+ if (!*optend && argv[i][0] != '-') {
+ target_dir = argv[i];
+ }
+ }
+
+ snprintf(conf_file, sizeof(conf_file), "my");
+
+ if (prepare && target_dir) {
+ snprintf(conf_file, sizeof(conf_file),
+ "%s/backup-my.cnf", target_dir);
+ }
+ if (load_defaults(conf_file, xb_server_default_groups,
+ &argc_server, argv_server)) {
+ exit(EXIT_FAILURE);
+ }
+
+ print_param_str <<
+ "# This MySQL options file was generated by XtraBackup.\n"
+ "[" << defaults_group << "]\n";
+
+ /* We want xtrabackup to ignore unknown options, because it only
+ recognizes a small subset of server variables */
+ my_getopt_skip_unknown = TRUE;
+
+ /* Reset u_max_value for all options, as we don't want the
+ --maximum-... modifier to set the actual option values */
+ for (my_option *optp= xb_server_options; optp->name; optp++) {
+ optp->u_max_value = (G_PTR *) &global_max_value;
+ }
+
+ /* Throw a descriptive error if --defaults-file or --defaults-extra-file
+ is not the first command line argument */
+ for (int i = 2 ; i < argc ; i++) {
+ char *optend = strcend((argv)[i], '=');
+
+ if (optend - argv[i] == 15 &&
+ !strncmp(argv[i], "--defaults-file", optend - argv[i])) {
+
+ msg("xtrabackup: Error: --defaults-file "
+ "must be specified first on the command "
+ "line\n");
+ exit(EXIT_FAILURE);
+ }
+ if (optend - argv[i] == 21 &&
+ !strncmp(argv[i], "--defaults-extra-file",
+ optend - argv[i])) {
+
+ msg("xtrabackup: Error: --defaults-extra-file "
+ "must be specified first on the command "
+ "line\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (argc_server > 0
+ && (ho_error=handle_options(&argc_server, argv_server,
+ xb_server_options, xb_get_one_option)))
+ exit(ho_error);
+
+ if (load_defaults(conf_file, xb_client_default_groups,
+ &argc_client, argv_client)) {
+ exit(EXIT_FAILURE);
+ }
+
+ if (strcmp(base_name(my_progname), INNOBACKUPEX_BIN_NAME) == 0 &&
+ argc_client > 0) {
+ /* emulate innobackupex script */
+ innobackupex_mode = true;
+ if (!ibx_handle_options(&argc_client, argv_client)) {
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (argc_client > 0
+ && (ho_error=handle_options(&argc_client, argv_client,
+ xb_client_options, xb_get_one_option)))
+ exit(ho_error);
+
+ /* Reject command line arguments that don't look like options, i.e. are
+ not of the form '-X' (single-character options) or '--option' (long
+ options) */
+ for (int i = 0 ; i < argc_client ; i++) {
+ const char * const opt = (*argv_client)[i];
+
+ if (strncmp(opt, "--", 2) &&
+ !(strlen(opt) == 2 && opt[0] == '-')) {
+ bool server_option = true;
+
+ for (int j = 0; j < argc_server; j++) {
+ if (opt == (*argv_server)[j]) {
+ server_option = false;
+ break;
+ }
+ }
+
+ if (!server_option) {
+ msg("xtrabackup: Error:"
+ " unknown argument: '%s'\n", opt);
+ exit(EXIT_FAILURE);
+ }
+ }
+ }
+}
+
+/* ================= main =================== */
+
+int main(int argc, char **argv)
+{
+ char **client_defaults, **server_defaults;
+ char cwd[FN_REFLEN];
+ my_bool is_symdir;
+
+ setup_signals();
+
+ MY_INIT(argv[0]);
+
+ pthread_key_create(&THR_THD, NULL);
+ my_pthread_setspecific_ptr(THR_THD, NULL);
+
+ xb_regex_init();
+
+ capture_tool_command(argc, argv);
+
+ if (mysql_server_init(-1, NULL, NULL))
+ {
+ exit(EXIT_FAILURE);
+ }
+
+ system_charset_info= &my_charset_utf8_general_ci;
+ key_map_full.set_all();
+
+ handle_options(argc, argv, &client_defaults, &server_defaults);
+
+ if (innobackupex_mode) {
+ if (!ibx_init()) {
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if ((!xtrabackup_print_param) && (!xtrabackup_prepare) && (strcmp(mysql_data_home, "./") == 0)) {
+ if (!xtrabackup_print_param)
+ usage();
+ msg("\nxtrabackup: Error: Please set parameter 'datadir'\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Expand target-dir, incremental-basedir, etc. */
+
+ my_getwd(cwd, sizeof(cwd), MYF(0));
+
+ my_load_path(xtrabackup_real_target_dir,
+ xtrabackup_target_dir, cwd);
+ unpack_dirname(xtrabackup_real_target_dir,
+ xtrabackup_real_target_dir, &is_symdir);
+ xtrabackup_target_dir= xtrabackup_real_target_dir;
+
+ if (xtrabackup_incremental_basedir) {
+ my_load_path(xtrabackup_real_incremental_basedir,
+ xtrabackup_incremental_basedir, cwd);
+ unpack_dirname(xtrabackup_real_incremental_basedir,
+ xtrabackup_real_incremental_basedir, &is_symdir);
+ xtrabackup_incremental_basedir =
+ xtrabackup_real_incremental_basedir;
+ }
+
+ if (xtrabackup_incremental_dir) {
+ my_load_path(xtrabackup_real_incremental_dir,
+ xtrabackup_incremental_dir, cwd);
+ unpack_dirname(xtrabackup_real_incremental_dir,
+ xtrabackup_real_incremental_dir, &is_symdir);
+ xtrabackup_incremental_dir = xtrabackup_real_incremental_dir;
+ }
+
+ if (xtrabackup_extra_lsndir) {
+ my_load_path(xtrabackup_real_extra_lsndir,
+ xtrabackup_extra_lsndir, cwd);
+ unpack_dirname(xtrabackup_real_extra_lsndir,
+ xtrabackup_real_extra_lsndir, &is_symdir);
+ xtrabackup_extra_lsndir = xtrabackup_real_extra_lsndir;
+ }
+
+ /* get default temporary directory */
+ if (!opt_mysql_tmpdir || !opt_mysql_tmpdir[0]) {
+ opt_mysql_tmpdir = getenv("TMPDIR");
+#if defined(__WIN__)
+ if (!opt_mysql_tmpdir) {
+ opt_mysql_tmpdir = getenv("TEMP");
+ }
+ if (!opt_mysql_tmpdir) {
+ opt_mysql_tmpdir = getenv("TMP");
+ }
+#endif
+ if (!opt_mysql_tmpdir || !opt_mysql_tmpdir[0]) {
+ opt_mysql_tmpdir = const_cast<char*>(DEFAULT_TMPDIR);
+ }
+ }
+
+ /* temporary setting of enough size */
+ srv_page_size_shift = UNIV_PAGE_SIZE_SHIFT_MAX;
+ srv_page_size = UNIV_PAGE_SIZE_MAX;
+ if (xtrabackup_backup && xtrabackup_incremental) {
+ /* direct specification is only for --backup */
+ /* and the lsn is prior to the other option */
+
+ char* endchar;
+ int error = 0;
+ incremental_lsn = strtoll(xtrabackup_incremental, &endchar, 10);
+ if (*endchar != '\0')
+ error = 1;
+
+ if (error) {
+ msg("xtrabackup: value '%s' may be wrong format for "
+ "incremental option.\n", xtrabackup_incremental);
+ exit(EXIT_FAILURE);
+ }
+ } else if (xtrabackup_backup && xtrabackup_incremental_basedir) {
+ char filename[FN_REFLEN];
+
+ sprintf(filename, "%s/%s", xtrabackup_incremental_basedir, XTRABACKUP_METADATA_FILENAME);
+
+ if (!xtrabackup_read_metadata(filename)) {
+ msg("xtrabackup: error: failed to read metadata from "
+ "%s\n", filename);
+ exit(EXIT_FAILURE);
+ }
+
+ incremental_lsn = metadata_to_lsn;
+ xtrabackup_incremental = xtrabackup_incremental_basedir; //dummy
+ } else if (xtrabackup_prepare && xtrabackup_incremental_dir) {
+ char filename[FN_REFLEN];
+
+ sprintf(filename, "%s/%s", xtrabackup_incremental_dir, XTRABACKUP_METADATA_FILENAME);
+
+ if (!xtrabackup_read_metadata(filename)) {
+ msg("xtrabackup: error: failed to read metadata from "
+ "%s\n", filename);
+ exit(EXIT_FAILURE);
+ }
+
+ incremental_lsn = metadata_from_lsn;
+ incremental_to_lsn = metadata_to_lsn;
+ incremental_last_lsn = metadata_last_lsn;
+ xtrabackup_incremental = xtrabackup_incremental_dir; //dummy
+
+ } else if (opt_incremental_history_name) {
+ xtrabackup_incremental = opt_incremental_history_name;
+ } else if (opt_incremental_history_uuid) {
+ xtrabackup_incremental = opt_incremental_history_uuid;
+ } else {
+ xtrabackup_incremental = NULL;
+ }
+
+ if (!xb_init()) {
+ exit(EXIT_FAILURE);
+ }
+
+ /* --print-param */
+ if (xtrabackup_print_param) {
+
+ printf("%s", print_param_str.str().c_str());
+
+ exit(EXIT_SUCCESS);
+ }
+
+ print_version();
+ if (xtrabackup_incremental) {
+ msg("incremental backup from " LSN_PF " is enabled.\n",
+ incremental_lsn);
+ }
+
+ if (xtrabackup_export && innobase_file_per_table == FALSE) {
+ msg("xtrabackup: auto-enabling --innodb-file-per-table due to "
+ "the --export option\n");
+ innobase_file_per_table = TRUE;
+ }
+
+ if (xtrabackup_incremental && xtrabackup_stream &&
+ xtrabackup_stream_fmt == XB_STREAM_FMT_TAR) {
+ msg("xtrabackup: error: "
+ "streaming incremental backups are incompatible with the \n"
+ "'tar' streaming format. Use --stream=xbstream instead.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if ((xtrabackup_compress || xtrabackup_encrypt) && xtrabackup_stream &&
+ xtrabackup_stream_fmt == XB_STREAM_FMT_TAR) {
+ msg("xtrabackup: error: "
+ "compressed and encrypted backups are incompatible with the \n"
+ "'tar' streaming format. Use --stream=xbstream instead.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (!xtrabackup_prepare &&
+ (innobase_log_arch_dir || xtrabackup_archived_to_lsn)) {
+
+ /* Default my.cnf can contain innobase_log_arch_dir option set
+ for server, reset it to allow backup. */
+ innobase_log_arch_dir= NULL;
+ xtrabackup_archived_to_lsn= 0;
+ msg("xtrabackup: warning: "
+ "as --innodb-log-arch-dir and --to-archived-lsn can be used "
+ "only with --prepare they will be reset\n");
+ }
+
+ /* cannot execute both for now */
+ {
+ int num = 0;
+
+ if (xtrabackup_backup) num++;
+ if (xtrabackup_stats) num++;
+ if (xtrabackup_prepare) num++;
+ if (xtrabackup_copy_back) num++;
+ if (xtrabackup_move_back) num++;
+ if (xtrabackup_decrypt_decompress) num++;
+ if (num != 1) { /* !XOR (for now) */
+ usage();
+ exit(EXIT_FAILURE);
+ }
+ }
+
+#ifndef __WIN__
+ if (xtrabackup_debug_sync) {
+ signal(SIGCONT, sigcont_handler);
+ }
+#endif
+
+ /* --backup */
+ if (xtrabackup_backup)
+ xtrabackup_backup_func();
+
+ /* --stats */
+ if (xtrabackup_stats)
+ xtrabackup_stats_func();
+
+ /* --prepare */
+ if (xtrabackup_prepare)
+ xtrabackup_prepare_func();
+
+ if (xtrabackup_copy_back || xtrabackup_move_back) {
+ if (!check_if_param_set("datadir")) {
+ msg("Error: datadir must be specified.\n");
+ exit(EXIT_FAILURE);
+ }
+ if (!copy_back())
+ exit(EXIT_FAILURE);
+ }
+
+ if (xtrabackup_decrypt_decompress && !decrypt_decompress()) {
+ exit(EXIT_FAILURE);
+ }
+
+ backup_cleanup();
+
+ if (innobackupex_mode) {
+ ibx_cleanup();
+ }
+
+ xb_regex_end();
+
+ free_defaults(client_defaults);
+ free_defaults(server_defaults);
+
+ if (THR_THD)
+ (void) pthread_key_delete(THR_THD);
+
+ msg_ts("completed OK!\n");
+
+ exit(EXIT_SUCCESS);
+}
diff --git a/extra/mariabackup/xtrabackup.h b/extra/mariabackup/xtrabackup.h
new file mode 100644
index 00000000000..b31028175c9
--- /dev/null
+++ b/extra/mariabackup/xtrabackup.h
@@ -0,0 +1,232 @@
+/******************************************************
+Copyright (c) 2011-2015 Percona LLC and/or its affiliates.
+
+Declarations for xtrabackup.cc
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#ifndef XB_XTRABACKUP_H
+#define XB_XTRABACKUP_H
+
+#include <my_getopt.h>
+#include "datasink.h"
+#include "xbstream.h"
+#include "changed_page_bitmap.h"
+
+#ifdef __WIN__
+#define XB_FILE_UNDEFINED NULL
+#else
+#define XB_FILE_UNDEFINED (-1)
+#endif
+
+typedef struct {
+ ulint page_size;
+ ulint zip_size;
+ ulint space_id;
+} xb_delta_info_t;
+
+/* ======== Datafiles iterator ======== */
+typedef struct {
+ fil_system_t *system;
+ fil_space_t *space;
+ fil_node_t *node;
+ ibool started;
+ os_ib_mutex_t mutex;
+} datafiles_iter_t;
+
+/* value of the --incremental option */
+extern lsn_t incremental_lsn;
+
+extern char *xtrabackup_target_dir;
+extern char *xtrabackup_incremental_dir;
+extern char *xtrabackup_incremental_basedir;
+extern char *innobase_data_home_dir;
+extern char *innobase_buffer_pool_filename;
+extern ds_ctxt_t *ds_meta;
+extern ds_ctxt_t *ds_data;
+
+/* The last checkpoint LSN at the backup startup time */
+extern lsn_t checkpoint_lsn_start;
+
+extern xb_page_bitmap *changed_page_bitmap;
+
+extern ulint xtrabackup_rebuild_threads;
+
+extern char *xtrabackup_incremental;
+extern my_bool xtrabackup_incremental_force_scan;
+
+extern lsn_t metadata_from_lsn;
+extern lsn_t metadata_to_lsn;
+extern lsn_t metadata_last_lsn;
+
+extern xb_stream_fmt_t xtrabackup_stream_fmt;
+extern ibool xtrabackup_stream;
+
+extern char *xtrabackup_tables;
+extern char *xtrabackup_tables_file;
+extern char *xtrabackup_databases;
+extern char *xtrabackup_databases_file;
+
+extern my_bool xtrabackup_compact;
+extern ibool xtrabackup_compress;
+extern ibool xtrabackup_encrypt;
+
+extern my_bool xtrabackup_backup;
+extern my_bool xtrabackup_prepare;
+extern my_bool xtrabackup_apply_log_only;
+extern my_bool xtrabackup_copy_back;
+extern my_bool xtrabackup_move_back;
+extern my_bool xtrabackup_decrypt_decompress;
+
+extern char *innobase_data_file_path;
+extern char *innobase_doublewrite_file;
+extern char *xtrabackup_encrypt_key;
+extern char *xtrabackup_encrypt_key_file;
+extern longlong innobase_log_file_size;
+extern long innobase_log_files_in_group;
+extern longlong innobase_page_size;
+
+extern const char *xtrabackup_encrypt_algo_names[];
+extern TYPELIB xtrabackup_encrypt_algo_typelib;
+
+extern int xtrabackup_parallel;
+
+extern my_bool xb_close_files;
+extern const char *xtrabackup_compress_alg;
+extern uint xtrabackup_compress_threads;
+extern ulonglong xtrabackup_compress_chunk_size;
+extern ulong xtrabackup_encrypt_algo;
+extern uint xtrabackup_encrypt_threads;
+extern ulonglong xtrabackup_encrypt_chunk_size;
+extern my_bool xtrabackup_export;
+extern char *xtrabackup_incremental_basedir;
+extern char *xtrabackup_extra_lsndir;
+extern char *xtrabackup_incremental_dir;
+extern ulint xtrabackup_log_copy_interval;
+extern my_bool xtrabackup_rebuild_indexes;
+extern char *xtrabackup_stream_str;
+extern long xtrabackup_throttle;
+extern longlong xtrabackup_use_memory;
+
+extern my_bool opt_galera_info;
+extern my_bool opt_slave_info;
+extern my_bool opt_no_lock;
+extern my_bool opt_safe_slave_backup;
+extern my_bool opt_rsync;
+extern my_bool opt_force_non_empty_dirs;
+extern my_bool opt_noversioncheck;
+extern my_bool opt_no_backup_locks;
+extern my_bool opt_decompress;
+extern my_bool opt_remove_original;
+
+extern char *opt_incremental_history_name;
+extern char *opt_incremental_history_uuid;
+
+extern char *opt_user;
+extern char *opt_password;
+extern char *opt_host;
+extern char *opt_defaults_group;
+extern char *opt_socket;
+extern uint opt_port;
+extern char *opt_login_path;
+extern char *opt_log_bin;
+
+extern const char *query_type_names[];
+
+enum query_type_t {QUERY_TYPE_ALL, QUERY_TYPE_UPDATE,
+ QUERY_TYPE_SELECT};
+
+extern TYPELIB query_type_typelib;
+
+extern ulong opt_lock_wait_query_type;
+extern ulong opt_kill_long_query_type;
+
+extern ulong opt_decrypt_algo;
+
+extern uint opt_kill_long_queries_timeout;
+extern uint opt_lock_wait_timeout;
+extern uint opt_lock_wait_threshold;
+extern uint opt_debug_sleep_before_unlock;
+extern uint opt_safe_slave_backup_timeout;
+
+extern const char *opt_history;
+extern my_bool opt_decrypt;
+
+#if defined(HAVE_OPENSSL)
+extern my_bool opt_use_ssl;
+extern my_bool opt_ssl_verify_server_cert;
+#if !defined(HAVE_YASSL)
+extern char *opt_server_public_key;
+#endif
+#endif
+
+enum binlog_info_enum { BINLOG_INFO_OFF, BINLOG_INFO_LOCKLESS, BINLOG_INFO_ON,
+ BINLOG_INFO_AUTO};
+
+extern ulong opt_binlog_info;
+
+void xtrabackup_io_throttling(void);
+my_bool xb_write_delta_metadata(const char *filename,
+ const xb_delta_info_t *info);
+
+datafiles_iter_t *datafiles_iter_new(fil_system_t *f_system);
+fil_node_t *datafiles_iter_next(datafiles_iter_t *it);
+void datafiles_iter_free(datafiles_iter_t *it);
+
+/************************************************************************
+Initialize the tablespace memory cache and populate it by scanning for and
+opening data files */
+ulint xb_data_files_init(void);
+
+/************************************************************************
+Destroy the tablespace memory cache. */
+void xb_data_files_close(void);
+
+/***********************************************************************
+Reads the space flags from a given data file and returns the compressed
+page size, or 0 if the space is not compressed. */
+ulint xb_get_zip_size(os_file_t file);
+
+/************************************************************************
+Checks if a table specified as a name in the form "database/name" (InnoDB 5.6)
+or "./database/name.ibd" (InnoDB 5.5-) should be skipped from backup based on
+the --tables or --tables-file options.
+
+@return TRUE if the table should be skipped. */
+my_bool
+check_if_skip_table(
+/******************/
+ const char* name); /*!< in: path to the table */
+
+/************************************************************************
+Check if parameter is set in defaults file or via command line argument
+@return true if parameter is set. */
+bool
+check_if_param_set(const char *param);
+
+
+void
+xtrabackup_backup_func(void);
+
+my_bool
+xb_get_one_option(int optid,
+ const struct my_option *opt __attribute__((unused)),
+ char *argument);
+
+const char*
+xb_get_copy_action(const char *dflt = "Copying");
+
+#endif /* XB_XTRABACKUP_H */
diff --git a/extra/mariabackup/xtrabackup_version.h.in b/extra/mariabackup/xtrabackup_version.h.in
new file mode 100644
index 00000000000..dc4c7992f8f
--- /dev/null
+++ b/extra/mariabackup/xtrabackup_version.h.in
@@ -0,0 +1,27 @@
+/******************************************************
+Copyright (c) 2013 Percona LLC and/or its affiliates.
+
+Version numbers definitions.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+*******************************************************/
+
+#ifndef XB_VERSION_H
+#define XB_VERSION_H
+
+#define XTRABACKUP_VERSION "@XB_VERSION@"
+#define XTRABACKUP_REVISION "@XB_REVISION@"
+
+#endif /* XB_VERSION_H */