diff options
334 files changed, 5234 insertions, 3075 deletions
diff --git a/client/mysql_upgrade.c b/client/mysql_upgrade.c index 8163596653b..a6215d0a977 100644 --- a/client/mysql_upgrade.c +++ b/client/mysql_upgrade.c @@ -501,7 +501,7 @@ static void find_tool(char *tool_executable_name, const char *tool_name, last_fn_libchar -= 6; } - len= last_fn_libchar - self_name; + len= (int)(last_fn_libchar - self_name); my_snprintf(tool_executable_name, FN_REFLEN, "%.*s%c%s", len, self_name, FN_LIBCHAR, tool_name); diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc index b0b0111c865..30d00706a9f 100644 --- a/client/mysqlbinlog.cc +++ b/client/mysqlbinlog.cc @@ -607,7 +607,7 @@ Exit_status Load_log_processor::process_first_event(const char *bname, Exit_status Load_log_processor::process(Create_file_log_event *ce) { const char *bname= ce->fname + dirname_length(ce->fname); - uint blen= ce->fname_len - (bname-ce->fname); + size_t blen= ce->fname_len - (bname-ce->fname); return process_first_event(bname, blen, ce->block, ce->block_len, ce->file_id, ce); diff --git a/client/mysqldump.c b/client/mysqldump.c index c8ebd2217a3..a260065c64c 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -247,8 +247,8 @@ static struct my_option my_long_options[] = &opt_slave_apply, &opt_slave_apply, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"character-sets-dir", OPT_CHARSETS_DIR, - "Directory for character set files.", &charsets_dir, - &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + "Directory for character set files.", (char **)&charsets_dir, + (char **)&charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"comments", 'i', "Write additional information.", &opt_comments, &opt_comments, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, @@ -285,8 +285,8 @@ static struct my_option my_long_options[] = {"debug", '#', "This is a non-debug version. Catch this and exit.", 0,0, 0, GET_DISABLED, OPT_ARG, 0, 0, 0, 0, 0, 0}, #else - {"debug", '#', "Output debug log.", &default_dbug_option, - &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, + {"debug", '#', "Output debug log.", (char *)&default_dbug_option, + (char *)&default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #endif {"debug-check", OPT_DEBUG_CHECK, "Check memory and open file usage at exit.", &debug_check_flag, &debug_check_flag, 0, @@ -5733,7 +5733,7 @@ static int replace(DYNAMIC_STRING *ds_str, return 1; init_dynamic_string_checked(&ds_tmp, "", ds_str->length + replace_len, 256); - dynstr_append_mem_checked(&ds_tmp, ds_str->str, start - ds_str->str); + dynstr_append_mem_checked(&ds_tmp, ds_str->str, (uint)(start - ds_str->str)); dynstr_append_mem_checked(&ds_tmp, replace_str, replace_len); dynstr_append_checked(&ds_tmp, start + search_len); dynstr_set_checked(ds_str, ds_tmp.str); diff --git a/client/mysqlslap.c b/client/mysqlslap.c index a78bf35d51b..60e0939491c 100644 --- a/client/mysqlslap.c +++ b/client/mysqlslap.c @@ -587,8 +587,8 @@ static struct my_option my_long_options[] = &auto_generate_sql_number, &auto_generate_sql_number, 0, GET_ULL, REQUIRED_ARG, 100, 0, 0, 0, 0, 0}, {"character-sets-dir", OPT_CHARSETS_DIR, - "Directory for character set files.", &charsets_dir, - &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + "Directory for character set files.", (char **)&charsets_dir, + (char **)&charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"commit", OPT_SLAP_COMMIT, "Commit records every X number of statements.", &commit_rate, &commit_rate, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, @@ -817,7 +817,7 @@ get_random_string(char *buf) DBUG_ENTER("get_random_string"); for (x= RAND_STRING_SIZE; x > 0; x--) *buf_ptr++= ALPHANUMERICS[random() % ALPHANUMERICS_SIZE]; - DBUG_RETURN(buf_ptr - buf); + DBUG_RETURN((uint)(buf_ptr - buf)); } diff --git a/client/mysqltest.cc b/client/mysqltest.cc index 84d3164baa3..914c0ac8621 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -767,7 +767,7 @@ public: if (show_from != buf) { // The last new line was found in this buf, adjust offset - show_offset+= (show_from - buf) + 1; + show_offset+= (int)(show_from - buf) + 1; DBUG_PRINT("info", ("adjusted offset to %d", show_offset)); } DBUG_PRINT("info", ("show_offset: %d", show_offset)); @@ -2700,7 +2700,7 @@ void var_query_set(VAR *var, const char *query, const char** query_end) DBUG_ASSERT(query_end); memset(&command, 0, sizeof(command)); command.query= (char*)query; - command.first_word_len= (*query_end - query); + command.first_word_len= (int)(*query_end - query); command.first_argument= command.query + command.first_word_len; command.end= (char*)*query_end; command.abort_on_error= 1; /* avoid uninitialized variables */ @@ -6530,7 +6530,7 @@ void do_delimiter(struct st_command* command) if (!(*p)) die("Can't set empty delimiter"); - delimiter_length= strmake_buf(delimiter, p) - delimiter; + delimiter_length= (uint)(strmake_buf(delimiter, p) - delimiter); DBUG_PRINT("exit", ("delimiter: %s", delimiter)); command->last_argument= p + delimiter_length; @@ -7016,7 +7016,7 @@ int read_command(struct st_command** command_ptr) command->first_argument= p; command->end= strend(command->query); - command->query_len= (command->end - command->query); + command->query_len= (int)(command->end - command->query); parser.read_lines++; DBUG_RETURN(0); } @@ -7561,7 +7561,7 @@ void fix_win_paths(char *val, size_t len) DBUG_PRINT("info", ("Converted \\ to /, p: %s", p)); } } - DBUG_PRINT("exit", (" val: %s, len: %d", val, len)); + DBUG_PRINT("exit", (" val: %s, len: %zu", val, len)); DBUG_VOID_RETURN; #endif } @@ -7573,7 +7573,7 @@ void fix_win_paths(char *val, size_t len) */ void append_field(DYNAMIC_STRING *ds, uint col_idx, MYSQL_FIELD* field, - char* val, ulonglong len, my_bool is_null) + char* val, size_t len, my_bool is_null) { char null[]= "NULL"; @@ -8569,7 +8569,7 @@ void run_query(struct st_connection *cn, struct st_command *command, int flags) if (flags & QUERY_PRINT_ORIGINAL_FLAG) { print_query= command->query; - print_len= command->end - command->query; + print_len= (int)(command->end - command->query); } replace_dynstr_append_mem(ds, print_query, print_len); dynstr_append_mem(ds, delimiter, delimiter_length); @@ -10266,7 +10266,7 @@ void free_replace_regex() */ #define SECURE_REG_BUF if (buf_len < need_buf_len) \ { \ - int off= res_p - buf; \ + ssize_t off= res_p - buf; \ buf= (char*)my_realloc(buf,need_buf_len,MYF(MY_WME+MY_FAE)); \ res_p= buf + off; \ buf_len= need_buf_len; \ @@ -10291,13 +10291,15 @@ int reg_replace(char** buf_p, int* buf_len_p, char *pattern, regmatch_t *subs; char *replace_end; char *buf= *buf_p; - int len; - int buf_len, need_buf_len; + size_t len; + size_t buf_len, need_buf_len; int cflags= REG_EXTENDED | REG_DOTALL; int err_code; char *res_p,*str_p,*str_end; - buf_len= *buf_len_p; + DBUG_ASSERT(*buf_len_p > 0); + + buf_len= (size_t)*buf_len_p; len= strlen(string); str_end= string + len; @@ -10440,7 +10442,7 @@ int reg_replace(char** buf_p, int* buf_len_p, char *pattern, } else /* no match this time, just copy the string as is */ { - int left_in_str= str_end-str_p; + size_t left_in_str= str_end-str_p; need_buf_len= (res_p-buf) + left_in_str; SECURE_REG_BUF memcpy(res_p,str_p,left_in_str); diff --git a/cmake/os/Windows.cmake b/cmake/os/Windows.cmake index 37fd204ed72..021f23a14f2 100644 --- a/cmake/os/Windows.cmake +++ b/cmake/os/Windows.cmake @@ -139,8 +139,8 @@ IF(MSVC) ENDIF() #TODO: update the code and remove the disabled warnings - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4800 /wd4805 /wd4996 /we4700") - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4800 /wd4805 /wd4996 /wd4291 /wd4577 /we4099 /we4700") + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4800 /wd4805 /wd4996 /we4700 /we4311 /we4477 /we4302 /we4090 /wd4267 ") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4800 /wd4805 /wd4996 /wd4291 /wd4577 /we4099 /we4700 /we4311 /we4477 /we4302 /we4090 /wd4267") ENDIF() diff --git a/cmake/os/WindowsCache.cmake b/cmake/os/WindowsCache.cmake index 14976cb44c2..1d8c568163a 100644 --- a/cmake/os/WindowsCache.cmake +++ b/cmake/os/WindowsCache.cmake @@ -89,7 +89,7 @@ SET(HAVE_LDIV 1 CACHE INTERNAL "") SET(HAVE_LIMITS_H 1 CACHE INTERNAL "") SET(HAVE_LOCALE_H 1 CACHE INTERNAL "") SET(HAVE_LOCALTIME_R 1 CACHE INTERNAL "") -SET(HAVE_LOG2 CACHE INTERNAL "") +#SET(HAVE_LOG2 CACHE INTERNAL "") SET(HAVE_LRAND48 CACHE INTERNAL "") SET(HAVE_LSTAT CACHE INTERNAL "") SET(HAVE_MADVISE CACHE INTERNAL "") @@ -140,7 +140,7 @@ SET(HAVE_READLINK CACHE INTERNAL "") SET(HAVE_READ_REAL_TIME CACHE INTERNAL "") SET(HAVE_REALPATH CACHE INTERNAL "") SET(HAVE_RENAME 1 CACHE INTERNAL "") -SET(HAVE_RINT CACHE INTERNAL "") +#SET(HAVE_RINT CACHE INTERNAL "") SET(HAVE_RWLOCK_INIT CACHE INTERNAL "") SET(HAVE_SCHED_H CACHE INTERNAL "") SET(HAVE_SCHED_YIELD CACHE INTERNAL "") diff --git a/dbug/dbug.c b/dbug/dbug.c index 2fe3fbcd761..e4125a3efea 100644 --- a/dbug/dbug.c +++ b/dbug/dbug.c @@ -1369,8 +1369,8 @@ void _db_dump_(uint _line_, const char *keyword, { fprintf(cs->stack->out_file->file, "%s: ", cs->func); } - (void) fprintf(cs->stack->out_file->file, "%s: Memory: 0x%lx Bytes: (%ld)\n", - keyword, (ulong) memory, (long) length); + (void) fprintf(cs->stack->out_file->file, "%s: Memory: %p Bytes: (%ld)\n", + keyword, memory, (long) length); pos=0; while (length-- > 0) diff --git a/extra/innochecksum.cc b/extra/innochecksum.cc index a0976970db7..b937cc6fa1a 100644 --- a/extra/innochecksum.cc +++ b/extra/innochecksum.cc @@ -93,7 +93,7 @@ static bool use_end_page; static bool do_one_page; static my_bool do_leaf; static my_bool per_page_details; -static ulong n_merge; +static ulint n_merge; extern ulong srv_checksum_algorithm; static ulong physical_page_size; /* Page size in bytes on disk. */ static ulong logical_page_size; /* Page size when uncompressed. */ @@ -524,7 +524,7 @@ is_page_corrupted( normal method. */ if (is_encrypted && key_version != 0) { is_corrupted = !fil_space_verify_crypt_checksum(buf, - page_size, space_id, cur_page_num); + page_size, space_id, (ulint)cur_page_num); } else { is_corrupted = true; } @@ -1538,7 +1538,7 @@ int main( byte* buf = NULL; byte* xdes = NULL; /* bytes read count */ - ulong bytes; + ulint bytes; /* current time */ time_t now; /* last time */ @@ -1675,7 +1675,7 @@ int main( } /* Read the minimum page size. */ - bytes = ulong(fread(buf, 1, UNIV_ZIP_SIZE_MIN, fil_in)); + bytes = fread(buf, 1, UNIV_ZIP_SIZE_MIN, fil_in); partial_page_read = true; if (bytes != UNIV_ZIP_SIZE_MIN) { diff --git a/extra/mariabackup/xbstream.c b/extra/mariabackup/xbstream.c index 65257664e54..03bd2fda646 100644 --- a/extra/mariabackup/xbstream.c +++ b/extra/mariabackup/xbstream.c @@ -531,8 +531,8 @@ mode_extract(int n_threads, int argc __attribute__((unused)), ctxt.ds_ctxt = ds_ctxt; ctxt.mutex = &mutex; - tids = malloc(sizeof(pthread_t) * n_threads); - retvals = malloc(sizeof(void*) * n_threads); + tids = calloc(n_threads, sizeof(pthread_t)); + retvals = calloc(n_threads, sizeof(void*)); for (i = 0; i < n_threads; i++) pthread_create(tids + i, NULL, extract_worker_thread_func, @@ -542,7 +542,7 @@ mode_extract(int n_threads, int argc __attribute__((unused)), pthread_join(tids[i], retvals + i); for (i = 0; i < n_threads; i++) { - if ((ulong)retvals[i] == XB_STREAM_READ_ERROR) { + if ((size_t)retvals[i] == XB_STREAM_READ_ERROR) { ret = 1; goto exit; } diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index 5c1584d0a86..c6009fc7014 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -1857,9 +1857,9 @@ xb_write_delta_metadata(const char *filename, const xb_delta_info_t *info) MY_STAT mystat; snprintf(buf, sizeof(buf), - "page_size = %lu\n" - "zip_size = %lu\n" - "space_id = %lu\n", + "page_size = " ULINTPF "\n" + "zip_size = " ULINTPF " \n" + "space_id = " ULINTPF "\n", info->page_size.logical(), info->page_size.is_compressed() ? info->page_size.physical() : 0, @@ -4172,7 +4172,7 @@ exit: char tmpname[FN_REFLEN]; - snprintf(tmpname, FN_REFLEN, "%s/xtrabackup_tmp_#%lu", + snprintf(tmpname, FN_REFLEN, "%s/xtrabackup_tmp_#" ULINTPF, dbname, fil_space->id); msg("xtrabackup: Renaming %s to %s.ibd\n", diff --git a/extra/yassl/taocrypt/src/algebra.cpp b/extra/yassl/taocrypt/src/algebra.cpp index 7dae7d6a917..892de25587e 100644 --- a/extra/yassl/taocrypt/src/algebra.cpp +++ b/extra/yassl/taocrypt/src/algebra.cpp @@ -218,7 +218,7 @@ struct WindowSlider exp >>= skipCount; windowBegin += skipCount; - expWindow = exp % (1 << windowSize); + expWindow = (unsigned int)(exp % (1LL << windowSize)); if (fastNegate && exp.GetBit(windowSize)) { @@ -248,7 +248,7 @@ void AbstractGroup::SimultaneousMultiply(Integer *results, const Integer &base, { exponents.push_back(WindowSlider(*expBegin++, InversionIsFast(), 0)); exponents[i].FindNextWindow(); - buckets[i].resize(1<<(exponents[i].windowSize-1), Identity()); + buckets[i].resize(size_t(1)<<(exponents[i].windowSize-1), Identity()); } unsigned int expBitPosition = 0; diff --git a/include/my_compiler.h b/include/my_compiler.h index fbb4b959e8d..7db070d57e1 100644 --- a/include/my_compiler.h +++ b/include/my_compiler.h @@ -159,7 +159,7 @@ marked as unlikely by the branch prediction mechanism. optimize a rarely invoked function for size instead for speed. */ # define ATTRIBUTE_COLD __attribute__((cold)) # endif -#elif defined _WIN32 +#elif defined _MSC_VER # define ATTRIBUTE_NORETURN __declspec(noreturn) #else # define ATTRIBUTE_NORETURN /* empty */ diff --git a/include/mysql/plugin_audit.h.pp b/include/mysql/plugin_audit.h.pp index 1d82197c321..02fe3b75227 100644 --- a/include/mysql/plugin_audit.h.pp +++ b/include/mysql/plugin_audit.h.pp @@ -277,23 +277,23 @@ struct st_mysql_const_lex_string }; typedef struct st_mysql_const_lex_string MYSQL_CONST_LEX_STRING; extern struct thd_alloc_service_st { - void *(*thd_alloc_func)(void*, unsigned int); - void *(*thd_calloc_func)(void*, unsigned int); + void *(*thd_alloc_func)(void*, size_t); + void *(*thd_calloc_func)(void*, size_t); char *(*thd_strdup_func)(void*, const char *); - char *(*thd_strmake_func)(void*, const char *, unsigned int); - void *(*thd_memdup_func)(void*, const void*, unsigned int); + char *(*thd_strmake_func)(void*, const char *, size_t); + void *(*thd_memdup_func)(void*, const void*, size_t); MYSQL_CONST_LEX_STRING *(*thd_make_lex_string_func)(void*, MYSQL_CONST_LEX_STRING *, - const char *, unsigned int, int); + const char *, size_t, int); } *thd_alloc_service; -void *thd_alloc(void* thd, unsigned int size); -void *thd_calloc(void* thd, unsigned int size); +void *thd_alloc(void* thd, size_t size); +void *thd_calloc(void* thd, size_t size); char *thd_strdup(void* thd, const char *str); -char *thd_strmake(void* thd, const char *str, unsigned int size); -void *thd_memdup(void* thd, const void* str, unsigned int size); +char *thd_strmake(void* thd, const char *str, size_t size); +void *thd_memdup(void* thd, const void* str, size_t size); MYSQL_CONST_LEX_STRING *thd_make_lex_string(void* thd, MYSQL_CONST_LEX_STRING *lex_str, - const char *str, unsigned int size, + const char *str, size_t size, int allocate_lex_string); extern struct thd_autoinc_service_st { void (*thd_get_autoinc_func)(const void* thd, diff --git a/include/mysql/plugin_auth.h.pp b/include/mysql/plugin_auth.h.pp index 0c196b1ad0b..6aba0ddb889 100644 --- a/include/mysql/plugin_auth.h.pp +++ b/include/mysql/plugin_auth.h.pp @@ -277,23 +277,23 @@ struct st_mysql_const_lex_string }; typedef struct st_mysql_const_lex_string MYSQL_CONST_LEX_STRING; extern struct thd_alloc_service_st { - void *(*thd_alloc_func)(void*, unsigned int); - void *(*thd_calloc_func)(void*, unsigned int); + void *(*thd_alloc_func)(void*, size_t); + void *(*thd_calloc_func)(void*, size_t); char *(*thd_strdup_func)(void*, const char *); - char *(*thd_strmake_func)(void*, const char *, unsigned int); - void *(*thd_memdup_func)(void*, const void*, unsigned int); + char *(*thd_strmake_func)(void*, const char *, size_t); + void *(*thd_memdup_func)(void*, const void*, size_t); MYSQL_CONST_LEX_STRING *(*thd_make_lex_string_func)(void*, MYSQL_CONST_LEX_STRING *, - const char *, unsigned int, int); + const char *, size_t, int); } *thd_alloc_service; -void *thd_alloc(void* thd, unsigned int size); -void *thd_calloc(void* thd, unsigned int size); +void *thd_alloc(void* thd, size_t size); +void *thd_calloc(void* thd, size_t size); char *thd_strdup(void* thd, const char *str); -char *thd_strmake(void* thd, const char *str, unsigned int size); -void *thd_memdup(void* thd, const void* str, unsigned int size); +char *thd_strmake(void* thd, const char *str, size_t size); +void *thd_memdup(void* thd, const void* str, size_t size); MYSQL_CONST_LEX_STRING *thd_make_lex_string(void* thd, MYSQL_CONST_LEX_STRING *lex_str, - const char *str, unsigned int size, + const char *str, size_t size, int allocate_lex_string); extern struct thd_autoinc_service_st { void (*thd_get_autoinc_func)(const void* thd, diff --git a/include/mysql/plugin_encryption.h.pp b/include/mysql/plugin_encryption.h.pp index 699296994c2..1f7b4e90908 100644 --- a/include/mysql/plugin_encryption.h.pp +++ b/include/mysql/plugin_encryption.h.pp @@ -277,23 +277,23 @@ struct st_mysql_const_lex_string }; typedef struct st_mysql_const_lex_string MYSQL_CONST_LEX_STRING; extern struct thd_alloc_service_st { - void *(*thd_alloc_func)(void*, unsigned int); - void *(*thd_calloc_func)(void*, unsigned int); + void *(*thd_alloc_func)(void*, size_t); + void *(*thd_calloc_func)(void*, size_t); char *(*thd_strdup_func)(void*, const char *); - char *(*thd_strmake_func)(void*, const char *, unsigned int); - void *(*thd_memdup_func)(void*, const void*, unsigned int); + char *(*thd_strmake_func)(void*, const char *, size_t); + void *(*thd_memdup_func)(void*, const void*, size_t); MYSQL_CONST_LEX_STRING *(*thd_make_lex_string_func)(void*, MYSQL_CONST_LEX_STRING *, - const char *, unsigned int, int); + const char *, size_t, int); } *thd_alloc_service; -void *thd_alloc(void* thd, unsigned int size); -void *thd_calloc(void* thd, unsigned int size); +void *thd_alloc(void* thd, size_t size); +void *thd_calloc(void* thd, size_t size); char *thd_strdup(void* thd, const char *str); -char *thd_strmake(void* thd, const char *str, unsigned int size); -void *thd_memdup(void* thd, const void* str, unsigned int size); +char *thd_strmake(void* thd, const char *str, size_t size); +void *thd_memdup(void* thd, const void* str, size_t size); MYSQL_CONST_LEX_STRING *thd_make_lex_string(void* thd, MYSQL_CONST_LEX_STRING *lex_str, - const char *str, unsigned int size, + const char *str, size_t size, int allocate_lex_string); extern struct thd_autoinc_service_st { void (*thd_get_autoinc_func)(const void* thd, diff --git a/include/mysql/plugin_ftparser.h.pp b/include/mysql/plugin_ftparser.h.pp index e67211e5108..afc4a597b0f 100644 --- a/include/mysql/plugin_ftparser.h.pp +++ b/include/mysql/plugin_ftparser.h.pp @@ -277,23 +277,23 @@ struct st_mysql_const_lex_string }; typedef struct st_mysql_const_lex_string MYSQL_CONST_LEX_STRING; extern struct thd_alloc_service_st { - void *(*thd_alloc_func)(void*, unsigned int); - void *(*thd_calloc_func)(void*, unsigned int); + void *(*thd_alloc_func)(void*, size_t); + void *(*thd_calloc_func)(void*, size_t); char *(*thd_strdup_func)(void*, const char *); - char *(*thd_strmake_func)(void*, const char *, unsigned int); - void *(*thd_memdup_func)(void*, const void*, unsigned int); + char *(*thd_strmake_func)(void*, const char *, size_t); + void *(*thd_memdup_func)(void*, const void*, size_t); MYSQL_CONST_LEX_STRING *(*thd_make_lex_string_func)(void*, MYSQL_CONST_LEX_STRING *, - const char *, unsigned int, int); + const char *, size_t, int); } *thd_alloc_service; -void *thd_alloc(void* thd, unsigned int size); -void *thd_calloc(void* thd, unsigned int size); +void *thd_alloc(void* thd, size_t size); +void *thd_calloc(void* thd, size_t size); char *thd_strdup(void* thd, const char *str); -char *thd_strmake(void* thd, const char *str, unsigned int size); -void *thd_memdup(void* thd, const void* str, unsigned int size); +char *thd_strmake(void* thd, const char *str, size_t size); +void *thd_memdup(void* thd, const void* str, size_t size); MYSQL_CONST_LEX_STRING *thd_make_lex_string(void* thd, MYSQL_CONST_LEX_STRING *lex_str, - const char *str, unsigned int size, + const char *str, size_t size, int allocate_lex_string); extern struct thd_autoinc_service_st { void (*thd_get_autoinc_func)(const void* thd, diff --git a/include/mysql/plugin_password_validation.h.pp b/include/mysql/plugin_password_validation.h.pp index d19a4fd74b2..0d2c0719b87 100644 --- a/include/mysql/plugin_password_validation.h.pp +++ b/include/mysql/plugin_password_validation.h.pp @@ -277,23 +277,23 @@ struct st_mysql_const_lex_string }; typedef struct st_mysql_const_lex_string MYSQL_CONST_LEX_STRING; extern struct thd_alloc_service_st { - void *(*thd_alloc_func)(void*, unsigned int); - void *(*thd_calloc_func)(void*, unsigned int); + void *(*thd_alloc_func)(void*, size_t); + void *(*thd_calloc_func)(void*, size_t); char *(*thd_strdup_func)(void*, const char *); - char *(*thd_strmake_func)(void*, const char *, unsigned int); - void *(*thd_memdup_func)(void*, const void*, unsigned int); + char *(*thd_strmake_func)(void*, const char *, size_t); + void *(*thd_memdup_func)(void*, const void*, size_t); MYSQL_CONST_LEX_STRING *(*thd_make_lex_string_func)(void*, MYSQL_CONST_LEX_STRING *, - const char *, unsigned int, int); + const char *, size_t, int); } *thd_alloc_service; -void *thd_alloc(void* thd, unsigned int size); -void *thd_calloc(void* thd, unsigned int size); +void *thd_alloc(void* thd, size_t size); +void *thd_calloc(void* thd, size_t size); char *thd_strdup(void* thd, const char *str); -char *thd_strmake(void* thd, const char *str, unsigned int size); -void *thd_memdup(void* thd, const void* str, unsigned int size); +char *thd_strmake(void* thd, const char *str, size_t size); +void *thd_memdup(void* thd, const void* str, size_t size); MYSQL_CONST_LEX_STRING *thd_make_lex_string(void* thd, MYSQL_CONST_LEX_STRING *lex_str, - const char *str, unsigned int size, + const char *str, size_t size, int allocate_lex_string); extern struct thd_autoinc_service_st { void (*thd_get_autoinc_func)(const void* thd, diff --git a/include/mysql/service_thd_alloc.h b/include/mysql/service_thd_alloc.h index 8d6ed8e4190..4897fcbea9c 100644 --- a/include/mysql/service_thd_alloc.h +++ b/include/mysql/service_thd_alloc.h @@ -50,14 +50,14 @@ struct st_mysql_const_lex_string typedef struct st_mysql_const_lex_string MYSQL_CONST_LEX_STRING; extern struct thd_alloc_service_st { - void *(*thd_alloc_func)(MYSQL_THD, unsigned int); - void *(*thd_calloc_func)(MYSQL_THD, unsigned int); + void *(*thd_alloc_func)(MYSQL_THD, size_t); + void *(*thd_calloc_func)(MYSQL_THD, size_t); char *(*thd_strdup_func)(MYSQL_THD, const char *); - char *(*thd_strmake_func)(MYSQL_THD, const char *, unsigned int); - void *(*thd_memdup_func)(MYSQL_THD, const void*, unsigned int); + char *(*thd_strmake_func)(MYSQL_THD, const char *, size_t); + void *(*thd_memdup_func)(MYSQL_THD, const void*, size_t); MYSQL_CONST_LEX_STRING *(*thd_make_lex_string_func)(MYSQL_THD, MYSQL_CONST_LEX_STRING *, - const char *, unsigned int, int); + const char *, size_t, int); } *thd_alloc_service; #ifdef MYSQL_DYNAMIC_PLUGIN @@ -92,11 +92,11 @@ extern struct thd_alloc_service_st { @see alloc_root() */ -void *thd_alloc(MYSQL_THD thd, unsigned int size); +void *thd_alloc(MYSQL_THD thd, size_t size); /** @see thd_alloc() */ -void *thd_calloc(MYSQL_THD thd, unsigned int size); +void *thd_calloc(MYSQL_THD thd, size_t size); /** @see thd_alloc() */ @@ -104,11 +104,11 @@ char *thd_strdup(MYSQL_THD thd, const char *str); /** @see thd_alloc() */ -char *thd_strmake(MYSQL_THD thd, const char *str, unsigned int size); +char *thd_strmake(MYSQL_THD thd, const char *str, size_t size); /** @see thd_alloc() */ -void *thd_memdup(MYSQL_THD thd, const void* str, unsigned int size); +void *thd_memdup(MYSQL_THD thd, const void* str, size_t size); /** Create a LEX_STRING in this connection's local memory pool @@ -125,7 +125,7 @@ void *thd_memdup(MYSQL_THD thd, const void* str, unsigned int size); */ MYSQL_CONST_LEX_STRING *thd_make_lex_string(MYSQL_THD thd, MYSQL_CONST_LEX_STRING *lex_str, - const char *str, unsigned int size, + const char *str, size_t size, int allocate_lex_string); #endif diff --git a/include/mysql_async.h b/include/mysql_async.h index 04b975211db..32c1116dc58 100644 --- a/include/mysql_async.h +++ b/include/mysql_async.h @@ -21,9 +21,9 @@ extern int my_connect_async(struct mysql_async_context *b, my_socket fd, const struct sockaddr *name, uint namelen, int vio_timeout); -extern ssize_t my_recv_async(struct mysql_async_context *b, int fd, +extern ssize_t my_recv_async(struct mysql_async_context *b, my_socket fd, unsigned char *buf, size_t size, int timeout); -extern ssize_t my_send_async(struct mysql_async_context *b, int fd, +extern ssize_t my_send_async(struct mysql_async_context *b, my_socket fd, const unsigned char *buf, size_t size, int timeout); extern my_bool my_io_wait_async(struct mysql_async_context *b, diff --git a/include/service_versions.h b/include/service_versions.h index da7a2101f76..582b3912355 100644 --- a/include/service_versions.h +++ b/include/service_versions.h @@ -34,7 +34,7 @@ #define VERSION_my_sha2 0x0100 #define VERSION_my_snprintf 0x0100 #define VERSION_progress_report 0x0100 -#define VERSION_thd_alloc 0x0100 +#define VERSION_thd_alloc 0x0200 #define VERSION_thd_autoinc 0x0100 #define VERSION_thd_error_context 0x0200 #define VERSION_thd_rnd 0x0100 diff --git a/libmariadb b/libmariadb -Subproject ba8310e82dac659482f04cd2e270e666da072f9 +Subproject 931450c3f8793f2653f6292847cbc005b30ed3b diff --git a/libmysqld/libmysql.c b/libmysqld/libmysql.c index 13dbc78aedd..4b0b13862eb 100644 --- a/libmysqld/libmysql.c +++ b/libmysqld/libmysql.c @@ -2053,9 +2053,9 @@ static my_bool store_param(MYSQL_STMT *stmt, MYSQL_BIND *param) { NET *net= &stmt->mysql->net; DBUG_ENTER("store_param"); - DBUG_PRINT("enter",("type: %d buffer: 0x%lx length: %lu is_null: %d", + DBUG_PRINT("enter",("type: %d buffer:%p length: %lu is_null: %d", param->buffer_type, - (long) (param->buffer ? param->buffer : NullS), + param->buffer, *param->length, *param->is_null)); if (*param->is_null) @@ -2979,8 +2979,8 @@ mysql_stmt_send_long_data(MYSQL_STMT *stmt, uint param_number, MYSQL_BIND *param; DBUG_ENTER("mysql_stmt_send_long_data"); DBUG_ASSERT(stmt != 0); - DBUG_PRINT("enter",("param no: %d data: 0x%lx, length : %ld", - param_number, (long) data, length)); + DBUG_PRINT("enter",("param no: %d data: %p, length : %ld", + param_number, data, length)); /* We only need to check for stmt->param_count, if it's not null @@ -3249,7 +3249,7 @@ static void fetch_string_with_conversion(MYSQL_BIND *param, char *value, ulong copy_length; if (start < end) { - copy_length= end - start; + copy_length= (ulong)(end - start); /* We've got some data beyond offset: copy up to buffer_length bytes */ if (param->buffer_length) memcpy(buffer, start, MY_MIN(copy_length, param->buffer_length)); diff --git a/libmysqld/libmysqld.c b/libmysqld/libmysqld.c index 36728cf573c..d1771a09578 100644 --- a/libmysqld/libmysqld.c +++ b/libmysqld/libmysqld.c @@ -205,7 +205,7 @@ mysql_real_connect(MYSQL *mysql,const char *host, const char *user, } } - DBUG_PRINT("exit",("Mysql handler: 0x%lx", (long) mysql)); + DBUG_PRINT("exit",("Mysql handler: %p", mysql)); DBUG_RETURN(mysql); error: diff --git a/mysql-test/include/update_use_source.inc b/mysql-test/include/update_use_source.inc new file mode 100644 index 00000000000..864b58e5d7f --- /dev/null +++ b/mysql-test/include/update_use_source.inc @@ -0,0 +1,147 @@ +# Include to test update with same table as source and target + +--echo # +--echo # Update a with value from subquery on the same table, no search clause. ALL access +--echo # + +start transaction; +--enable_info ONCE +update t1 + set c1=(select a.c3 + from t1 a + where a.c3 = t1.c3); +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1 ; +rollback; + +--echo # +--echo # Update with search clause on the same table +--echo # + +start transaction; +--enable_info ONCE +update t1 + set c1=10 + where c1 <2 + and exists (select 'X' + from t1 a + where a.c1 = t1.c1); +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1 ; +rollback; + +--echo # +--echo # Update via RANGE or INDEX access if an index or a primary key exists +--echo # + +explain update t1 set c1=0 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 > 3; +start transaction; +--enable_info ONCE +update t1 set c1=c1+10 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3; +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +rollback; + +--echo # +--echo # Update with order by +--echo # + +start transaction; +update t1 set c1=c1+10 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3 order by c2; +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +rollback; + +--echo # +--echo Update using a view in subquery +--echo # + +start transaction; +--enable_info ONCE +update t1 + set c1=c1 +(select max(a.c2) + from v1 a + where a.c1 = t1.c1) ; +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +rollback; + +--echo # +--echo # Update throw a view +--echo # + +start transaction; +--enable_info ONCE +update v1 + set c1=c1 + (select max(a.c2) + from t1 a + where a.c1 = v1.c1) +10 +where c3 > 3; +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +rollback; + +--echo # +--echo # Update through a view and using the view in subquery +--echo # + +start transaction; +--enable_info ONCE +update v1 + set c1=c1 + 1 + where c1 <2 + and exists (select 'X' + from v1 a + where a.c1 = v1.c1); +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +rollback; + +--echo # +--echo # Update through a view and using the view in subquery +--echo # + +start transaction; +--enable_info ONCE +update v1 + set c1=(select max(a.c1)+10 + from v1 a + where a.c1 = v1.c1) + where c1 <10 + and exists (select 'X' + from v1 a + where a.c2 = v1.c2); +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +rollback; + +--echo # +--echo # Update of the index or primary key (c3) +--echo # + +start transaction; +explain update t1 set c3=c3+10 where c2 in (select distinct a.c2 from t1 a where t1.c1=a.c1); +--enable_info ONCE +update t1 set c3=c3+10 where c2 in (select distinct a.c2 from t1 a where t1.c1=a.c1); +select c3 from t1; +rollback; + +--echo # +--echo # update with a limit +--echo # + +start transaction; +--enable_info ONCE +update t1 + set c1=(select a.c3 + from t1 a + where a.c3 = t1.c3) + limit 2; +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1 ; +rollback; + +--echo # +--echo # update with a limit and an order by +--echo # + +start transaction; +--enable_info ONCE +update t1 + set c1=(select a.c3 + from t1 a + where a.c3 = t1.c3) + order by c3 desc limit 2; +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1 ; +rollback; diff --git a/mysql-test/r/func_int.result b/mysql-test/r/func_int.result new file mode 100644 index 00000000000..05573858b17 --- /dev/null +++ b/mysql-test/r/func_int.result @@ -0,0 +1,134 @@ +# +# Start of 10.3 tests +# +# +# MDEV-13965 Parameter data type control for Item_longlong_func +# +SELECT ROW(1,1) | 1; +ERROR HY000: Illegal parameter data type row for operation '|' +SELECT 1 | ROW(1,1); +ERROR HY000: Illegal parameter data type row for operation '|' +SELECT ROW(1,1) & 1; +ERROR HY000: Illegal parameter data type row for operation '&' +SELECT 1 & ROW(1,1); +ERROR HY000: Illegal parameter data type row for operation '&' +SELECT ROW(1,1) << 1; +ERROR HY000: Illegal parameter data type row for operation '<<' +SELECT 1 << ROW(1,1); +ERROR HY000: Illegal parameter data type row for operation '<<' +SELECT ROW(1,1) >> 1; +ERROR HY000: Illegal parameter data type row for operation '>>' +SELECT 1 >> ROW(1,1); +ERROR HY000: Illegal parameter data type row for operation '>>' +SELECT ~ROW(1,1); +ERROR HY000: Illegal parameter data type row for operation '~' +SELECT TO_SECONDS(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'to_seconds' +SELECT TIMESTAMPDIFF(SECOND,ROW(1,1), 1); +ERROR HY000: Illegal parameter data type row for operation 'timestampdiff' +SELECT TIMESTAMPDIFF(SECOND,1, ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'timestampdiff' +SELECT INET_ATON(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'inet_aton' +SELECT LAST_INSERT_ID(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'last_insert_id' +# +# MDEV-13967 Parameter data type control for Item_long_func +# +SELECT STRCMP(ROW(1,1),''); +ERROR HY000: Illegal parameter data type row for operation 'strcmp' +SELECT STRCMP('',ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'strcmp' +SELECT CHAR_LENGTH(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'char_length' +SELECT OCTET_LENGTH(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'octet_length' +SELECT UNCOMPRESSED_LENGTH(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'uncompressed_length' +SELECT COERCIBILITY(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'coercibility' +SELECT ASCII(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'ascii' +SELECT CRC32(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'crc32' +SELECT ORD(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'ord' +SELECT SIGN(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'sign' +SELECT LOCATE(ROW(1,1),'a',1); +ERROR HY000: Illegal parameter data type row for operation 'locate' +SELECT LOCATE('a',ROW(1,1),1); +ERROR HY000: Illegal parameter data type row for operation 'locate' +SELECT LOCATE('a','a',ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'locate' +SELECT BIT_COUNT(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'bit_count' +SELECT BENCHMARK(1, ROW(1,1)); +ERROR 21000: Operand should contain 1 column(s) +SELECT BENCHMARK(ROW(1,1),''); +ERROR HY000: Illegal parameter data type row for operation 'benchmark' +SELECT SLEEP(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'sleep' +SELECT GET_LOCK('x', ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'get_lock' +SELECT GET_LOCK(ROW(1,1),'x'); +ERROR HY000: Illegal parameter data type row for operation 'get_lock' +SELECT PERIOD_ADD(ROW(1,1),1); +ERROR HY000: Illegal parameter data type row for operation 'period_add' +SELECT PERIOD_ADD(1,ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'period_add' +SELECT PERIOD_DIFF(ROW(1,1),1); +ERROR HY000: Illegal parameter data type row for operation 'period_diff' +SELECT PERIOD_DIFF(1,ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'period_diff' +SELECT TO_DAYS(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'to_days' +SELECT DAYOFMONTH(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'dayofmonth' +SELECT DAYOFYEAR(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'dayofyear' +SELECT QUARTER(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'quarter' +SELECT YEAR(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'year' +SELECT YEARWEEK(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'yearweek' +SELECT WEEK(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'week' +SELECT WEEK(ROW(1,1),1); +ERROR HY000: Illegal parameter data type row for operation 'week' +SELECT WEEK(1,ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'week' +SELECT HOUR(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'hour' +SELECT MINUTE(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'minute' +SELECT SECOND(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'second' +SELECT MICROSECOND(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'microsecond' +SELECT JSON_DEPTH(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'json_depth' +SELECT JSON_LENGTH(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'json_length' +SELECT JSON_LENGTH('json', ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'json_length' +SELECT JSON_LENGTH(ROW(1,1), ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'json_length' +SELECT REGEXP_INSTR(ROW(1,1),''); +ERROR HY000: Illegal parameter data type row for operation 'regexp_instr' +SELECT REGEXP_INSTR('',ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'regexp_instr' +SELECT FIND_IN_SET(ROW(1,1),''); +ERROR HY000: Illegal parameter data type row for operation 'find_in_set' +SELECT FIND_IN_SET('',ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'find_in_set' +SELECT RELEASE_LOCK(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'release_lock' +SELECT IS_FREE_LOCK(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'is_free_lock' +SELECT IS_USED_LOCK(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'is_used_lock' +# +# End of 10.3 tests +# diff --git a/mysql-test/r/func_regexp.result b/mysql-test/r/func_regexp.result index 53c58d5dddb..187df785f69 100644 --- a/mysql-test/r/func_regexp.result +++ b/mysql-test/r/func_regexp.result @@ -157,3 +157,9 @@ SELECT ' ' REGEXP '[[:space:]]'; SELECT '\t' REGEXP '[[:space:]]'; '\t' REGEXP '[[:space:]]' 1 +# +# MDEV-13967 Parameter data type control for Item_long_func +# +SELECT REGEXP_INSTR('111222333',2); +REGEXP_INSTR('111222333',2) +4 diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index fb0179026c9..9da6caef8e6 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -3294,3 +3294,34 @@ t1 CREATE TABLE `t1` ( ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; SET sql_mode=DEFAULT; +# +# MDEV-13966 Parameter data type control for Item_temporal_func +# +SELECT FROM_DAYS(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'from_days' +SELECT MAKEDATE(ROW(1,1),1); +ERROR HY000: Illegal parameter data type row for operation 'makedate' +SELECT MAKEDATE(1, ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'makedate' +SELECT LAST_DAY(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'last_day' +SELECT SEC_TO_TIME(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'sec_to_time' +SELECT TIMEDIFF(ROW(1,1),1); +ERROR HY000: Illegal parameter data type row for operation 'timediff' +SELECT TIMEDIFF(1, ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'timediff' +SELECT MAKETIME(ROW(1,1),1,1); +ERROR HY000: Illegal parameter data type row for operation 'maketime' +SELECT MAKETIME(1, ROW(1,1), 1); +ERROR HY000: Illegal parameter data type row for operation 'maketime' +SELECT MAKETIME(1, 1, ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'maketime' +SELECT FROM_UNIXTIME(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'from_unixtime' +SELECT CONVERT_TZ(ROW(1,1),1,1); +ERROR HY000: Illegal parameter data type row for operation 'convert_tz' +SELECT CONVERT_TZ(1, ROW(1,1), 1); +ERROR HY000: Illegal parameter data type row for operation 'convert_tz' +SELECT CONVERT_TZ(1, 1, ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'convert_tz' diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result index c9a2a83edd1..fe67da8001f 100644 --- a/mysql-test/r/gis.result +++ b/mysql-test/r/gis.result @@ -4668,5 +4668,255 @@ ERROR HY000: Illegal parameter data type varchar for operation 'st_touches' SELECT MBRTOUCHES(POINT(1,1), 'test'); ERROR HY000: Illegal parameter data type varchar for operation 'st_touches' # +# MDEV-13964 Parameter data type control for Item_real_func +# +SELECT EXP(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'exp' +SELECT LN(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'ln' +SELECT LOG2(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'log2' +SELECT LOG10(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'log10' +SELECT SQRT(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'sqrt' +SELECT ACOS(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'acos' +SELECT ASIN(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'asin' +SELECT COS(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'cos' +SELECT SIN(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'sin' +SELECT TAN(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'tan' +SELECT COT(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'cot' +SELECT LOG(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'log' +SELECT LOG(POINT(1,1),POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'log' +SELECT LOG(1, POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'log' +SELECT ATAN(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'atan' +SELECT ATAN(POINT(1,1),POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'atan' +SELECT ATAN(1, POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'atan' +SELECT POW(POINT(1,1),POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'pow' +SELECT RAND(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'rand' +SELECT RADIANS(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'radians' +SELECT DEGREES(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'degrees' +SELECT EXP(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'exp' +SELECT LN(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'ln' +SELECT LOG2(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'log2' +SELECT LOG10(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'log10' +SELECT SQRT(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'sqrt' +SELECT ACOS(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'acos' +SELECT ASIN(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'asin' +SELECT COS(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'cos' +SELECT SIN(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'sin' +SELECT TAN(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'tan' +SELECT COT(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'cot' +SELECT LOG(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'log' +SELECT LOG(ROW(1,1),ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'log' +SELECT LOG(1, ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'log' +SELECT ATAN(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'atan' +SELECT ATAN(ROW(1,1),ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'atan' +SELECT ATAN(1, ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'atan' +SELECT POW(ROW(1,1),ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'pow' +SELECT RAND(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'rand' +SELECT RADIANS(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'radians' +SELECT DEGREES(ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'degrees' +# +# MDEV-13965 Parameter data type control for Item_longlong_func +# +SELECT POINT(1,1) | 1; +ERROR HY000: Illegal parameter data type geometry for operation '|' +SELECT 1 | POINT(1,1); +ERROR HY000: Illegal parameter data type geometry for operation '|' +SELECT POINT(1,1) & 1; +ERROR HY000: Illegal parameter data type geometry for operation '&' +SELECT 1 & POINT(1,1); +ERROR HY000: Illegal parameter data type geometry for operation '&' +SELECT POINT(1,1) << 1; +ERROR HY000: Illegal parameter data type geometry for operation '<<' +SELECT 1 << POINT(1,1); +ERROR HY000: Illegal parameter data type geometry for operation '<<' +SELECT POINT(1,1) >> 1; +ERROR HY000: Illegal parameter data type geometry for operation '>>' +SELECT 1 >> POINT(1,1); +ERROR HY000: Illegal parameter data type geometry for operation '>>' +SELECT ~POINT(1,1); +ERROR HY000: Illegal parameter data type geometry for operation '~' +SELECT TO_SECONDS(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'to_seconds' +SELECT TIMESTAMPDIFF(SECOND,POINT(1,1), 1); +ERROR HY000: Illegal parameter data type geometry for operation 'timestampdiff' +SELECT TIMESTAMPDIFF(SECOND,1, POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'timestampdiff' +SELECT INET_ATON(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'inet_aton' +SELECT LAST_INSERT_ID(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'last_insert_id' +# +# MDEV-13966 Parameter data type control for Item_temporal_func +# +SELECT FROM_DAYS(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'from_days' +SELECT MAKEDATE(POINT(1,1),1); +ERROR HY000: Illegal parameter data type geometry for operation 'makedate' +SELECT MAKEDATE(1, POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'makedate' +SELECT LAST_DAY(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'last_day' +SELECT SEC_TO_TIME(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'sec_to_time' +SELECT TIMEDIFF(POINT(1,1),1); +ERROR HY000: Illegal parameter data type geometry for operation 'timediff' +SELECT TIMEDIFF(1, POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'timediff' +SELECT MAKETIME(POINT(1,1),1,1); +ERROR HY000: Illegal parameter data type geometry for operation 'maketime' +SELECT MAKETIME(1, POINT(1,1), 1); +ERROR HY000: Illegal parameter data type geometry for operation 'maketime' +SELECT MAKETIME(1, 1, POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'maketime' +SELECT FROM_UNIXTIME(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'from_unixtime' +SELECT CONVERT_TZ(POINT(1,1),1,1); +ERROR HY000: Illegal parameter data type geometry for operation 'convert_tz' +SELECT CONVERT_TZ(1, POINT(1,1), 1); +ERROR HY000: Illegal parameter data type geometry for operation 'convert_tz' +SELECT CONVERT_TZ(1, 1, POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'convert_tz' +# +# MDEV-13967 Parameter data type control for Item_long_func +# +SELECT STRCMP(POINT(1,1),POINT(1,1)); +STRCMP(POINT(1,1),POINT(1,1)) +0 +SELECT CHAR_LENGTH(POINT(1,1)); +CHAR_LENGTH(POINT(1,1)) +25 +SELECT OCTET_LENGTH(POINT(1,1)); +OCTET_LENGTH(POINT(1,1)) +25 +SELECT UNCOMPRESSED_LENGTH(POINT(1,1)); +UNCOMPRESSED_LENGTH(POINT(1,1)) +0 +SELECT COERCIBILITY(POINT(1,1)); +COERCIBILITY(POINT(1,1)) +4 +SELECT ASCII(POINT(1,1)); +ASCII(POINT(1,1)) +0 +SELECT CRC32(POINT(1,1)); +CRC32(POINT(1,1)) +1349318989 +SELECT ORD(POINT(1,1)); +ORD(POINT(1,1)) +0 +SELECT SIGN(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'sign' +SELECT LOCATE('a','a',POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'locate' +SELECT LOCATE(POINT(1,1),POINT(1,1)); +LOCATE(POINT(1,1),POINT(1,1)) +1 +SELECT BIT_COUNT(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'bit_count' +SELECT BENCHMARK(POINT(1,1),''); +ERROR HY000: Illegal parameter data type geometry for operation 'benchmark' +SELECT SLEEP(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'sleep' +SELECT GET_LOCK('x', POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'get_lock' +SELECT PERIOD_ADD(POINT(1,1),1); +ERROR HY000: Illegal parameter data type geometry for operation 'period_add' +SELECT PERIOD_ADD(1,POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'period_add' +SELECT PERIOD_DIFF(POINT(1,1),1); +ERROR HY000: Illegal parameter data type geometry for operation 'period_diff' +SELECT PERIOD_DIFF(1,POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'period_diff' +SELECT TO_DAYS(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'to_days' +SELECT DAYOFMONTH(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'dayofmonth' +SELECT DAYOFYEAR(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'dayofyear' +SELECT QUARTER(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'quarter' +SELECT YEAR(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'year' +SELECT YEARWEEK(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'yearweek' +SELECT WEEK(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'week' +SELECT WEEK(POINT(1,1),1); +ERROR HY000: Illegal parameter data type geometry for operation 'week' +SELECT WEEK(1,POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'week' +SELECT HOUR(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'hour' +SELECT MINUTE(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'minute' +SELECT SECOND(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'second' +SELECT MICROSECOND(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'microsecond' +SELECT JSON_DEPTH(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'json_depth' +SELECT JSON_LENGTH(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'json_length' +SELECT JSON_LENGTH('json', POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'json_length' +SELECT JSON_LENGTH(POINT(1,1), POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'json_length' +SELECT REGEXP_INSTR(POINT(1,1),''); +REGEXP_INSTR(POINT(1,1),'') +1 +SELECT REGEXP_INSTR('',POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'regexp_instr' +SELECT FIND_IN_SET(POINT(1,1),''); +FIND_IN_SET(POINT(1,1),'') +0 +SELECT FIND_IN_SET('',POINT(1,1)); +FIND_IN_SET('',POINT(1,1)) +0 +SELECT RELEASE_LOCK(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'release_lock' +SELECT IS_FREE_LOCK(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'is_free_lock' +SELECT IS_USED_LOCK(POINT(1,1)); +ERROR HY000: Illegal parameter data type geometry for operation 'is_used_lock' +# # End of 10.3 tests # diff --git a/mysql-test/r/join_cache.result b/mysql-test/r/join_cache.result index eb845c63a76..5a404fe46a6 100644 --- a/mysql-test/r/join_cache.result +++ b/mysql-test/r/join_cache.result @@ -5883,7 +5883,7 @@ where c1 = c2-0 and c2 <= (select max(c3) from t3 where c3 = 2 and @counter:=@co id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 2 1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) -2 UNCACHEABLE SUBQUERY t3 system NULL NULL NULL NULL 1 +2 UNCACHEABLE SUBQUERY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE set @counter=0; select count(*) from t1 straight_join t2 where c1 = c2-0 and c2 <= (select max(c3) from t3 where c3 = 2 and @counter:=@counter+1); diff --git a/mysql-test/r/lowercase_view.result b/mysql-test/r/lowercase_view.result index 6ccfe29b2cd..af53f67869d 100644 --- a/mysql-test/r/lowercase_view.result +++ b/mysql-test/r/lowercase_view.result @@ -15,74 +15,6 @@ create table t2aA (col1 int); create view v1Aa as select * from t1aA; create view v2aA as select * from v1aA; create view v3Aa as select v2Aa.col1 from v2aA,t2Aa where v2Aa.col1 = t2aA.col1; -update v2aA set col1 = (select max(col1) from v1Aa); -ERROR HY000: The definition of table 'v1Aa' prevents operation UPDATE on table 'v2aA' -update v2Aa set col1 = (select max(col1) from t1Aa); -ERROR HY000: The definition of table 'v2Aa' prevents operation UPDATE on table 'v2Aa' -update v2aA set col1 = (select max(col1) from v2Aa); -ERROR HY000: Table 'v2aA' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update v2aA,t2Aa set v2Aa.col1 = (select max(col1) from v1aA) where v2aA.col1 = t2aA.col1; -ERROR HY000: The definition of table 'v1aA' prevents operation UPDATE on table 'v2aA' -update t1aA,t2Aa set t1Aa.col1 = (select max(col1) from v1Aa) where t1aA.col1 = t2aA.col1; -ERROR HY000: The definition of table 'v1Aa' prevents operation UPDATE on table 't1aA' -update v1aA,t2Aa set v1Aa.col1 = (select max(col1) from v1aA) where v1Aa.col1 = t2aA.col1; -ERROR HY000: Table 'v1aA' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update t2Aa,v2Aa set v2aA.col1 = (select max(col1) from v1aA) where v2Aa.col1 = t2aA.col1; -ERROR HY000: The definition of table 'v1aA' prevents operation UPDATE on table 't2Aa' -update t2Aa,t1Aa set t1aA.col1 = (select max(col1) from v1Aa) where t1Aa.col1 = t2aA.col1; -ERROR HY000: The definition of table 'v1Aa' prevents operation UPDATE on table 't2Aa' -update t2Aa,v1aA set v1Aa.col1 = (select max(col1) from v1aA) where v1Aa.col1 = t2aA.col1; -ERROR HY000: The definition of table 'v1aA' prevents operation UPDATE on table 't2Aa' -update v2aA,t2Aa set v2Aa.col1 = (select max(col1) from t1aA) where v2aA.col1 = t2aA.col1; -ERROR HY000: The definition of table 'v2aA' prevents operation UPDATE on table 'v2aA' -update t1Aa,t2Aa set t1aA.col1 = (select max(col1) from t1Aa) where t1aA.col1 = t2aA.col1; -ERROR HY000: Table 't1Aa' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update v1aA,t2Aa set v1Aa.col1 = (select max(col1) from t1Aa) where v1aA.col1 = t2aA.col1; -ERROR HY000: The definition of table 'v1aA' prevents operation UPDATE on table 'v1aA' -update t2Aa,v2Aa set v2aA.col1 = (select max(col1) from t1aA) where v2Aa.col1 = t2aA.col1; -ERROR HY000: Table 't2Aa' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update t2Aa,t1Aa set t1aA.col1 = (select max(col1) from t1Aa) where t1aA.col1 = t2aA.col1; -ERROR HY000: Table 't2Aa' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update t2Aa,v1Aa set v1aA.col1 = (select max(col1) from t1Aa) where v1Aa.col1 = t2aA.col1; -ERROR HY000: Table 't2Aa' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update v2aA,t2Aa set v2Aa.col1 = (select max(col1) from v2aA) where v2Aa.col1 = t2aA.col1; -ERROR HY000: Table 'v2aA' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update t1aA,t2Aa set t1Aa.col1 = (select max(col1) from v2aA) where t1aA.col1 = t2aA.col1; -ERROR HY000: The definition of table 'v2aA' prevents operation UPDATE on table 't1aA' -update v1aA,t2Aa set v1Aa.col1 = (select max(col1) from v2Aa) where v1aA.col1 = t2aA.col1; -ERROR HY000: The definition of table 'v2Aa' prevents operation UPDATE on table 'v1aA' -update t2Aa,v2aA set v2Aa.col1 = (select max(col1) from v2aA) where v2Aa.col1 = t2aA.col1; -ERROR HY000: The definition of table 'v2aA' prevents operation UPDATE on table 't2Aa' -update t2Aa,t1Aa set t1aA.col1 = (select max(col1) from v2aA) where t1Aa.col1 = t2aA.col1; -ERROR HY000: The definition of table 'v2aA' prevents operation UPDATE on table 't2Aa' -update t2Aa,v1Aa set v1aA.col1 = (select max(col1) from v2Aa) where v1Aa.col1 = t2aA.col1; -ERROR HY000: The definition of table 'v2Aa' prevents operation UPDATE on table 't2Aa' -update v3aA set v3Aa.col1 = (select max(col1) from v1aA); -ERROR HY000: The definition of table 'v1aA' prevents operation UPDATE on table 'v3aA' -update v3aA set v3Aa.col1 = (select max(col1) from t1aA); -ERROR HY000: The definition of table 'v3aA' prevents operation UPDATE on table 'v3aA' -update v3aA set v3Aa.col1 = (select max(col1) from v2aA); -ERROR HY000: The definition of table 'v2aA' prevents operation UPDATE on table 'v3aA' -update v3aA set v3Aa.col1 = (select max(col1) from v3aA); -ERROR HY000: Table 'v3aA' is specified twice, both as a target for 'UPDATE' and as a separate source for data -delete v2Aa from v2aA,t2Aa where (select max(col1) from v1aA) > 0 and v2Aa.col1 = t2aA.col1; -ERROR HY000: The definition of table 'v1aA' prevents operation DELETE on table 'v2aA' -delete t1aA from t1Aa,t2Aa where (select max(col1) from v1Aa) > 0 and t1aA.col1 = t2aA.col1; -ERROR HY000: The definition of table 'v1Aa' prevents operation DELETE on table 't1Aa' -delete v1aA from v1Aa,t2Aa where (select max(col1) from v1aA) > 0 and v1Aa.col1 = t2aA.col1; -ERROR HY000: Table 'v1Aa' is specified twice, both as a target for 'DELETE' and as a separate source for data -delete v2aA from v2Aa,t2Aa where (select max(col1) from t1Aa) > 0 and v2aA.col1 = t2aA.col1; -ERROR HY000: The definition of table 'v2Aa' prevents operation DELETE on table 'v2Aa' -delete t1aA from t1Aa,t2Aa where (select max(col1) from t1aA) > 0 and t1Aa.col1 = t2aA.col1; -ERROR HY000: Table 't1Aa' is specified twice, both as a target for 'DELETE' and as a separate source for data -delete v1aA from v1Aa,t2Aa where (select max(col1) from t1aA) > 0 and v1aA.col1 = t2aA.col1; -ERROR HY000: The definition of table 'v1Aa' prevents operation DELETE on table 'v1Aa' -delete v2Aa from v2aA,t2Aa where (select max(col1) from v2Aa) > 0 and v2aA.col1 = t2aA.col1; -ERROR HY000: Table 'v2aA' is specified twice, both as a target for 'DELETE' and as a separate source for data -delete t1Aa from t1aA,t2Aa where (select max(col1) from v2Aa) > 0 and t1Aa.col1 = t2aA.col1; -ERROR HY000: The definition of table 'v2Aa' prevents operation DELETE on table 't1aA' -delete v1Aa from v1aA,t2Aa where (select max(col1) from v2aA) > 0 and v1Aa.col1 = t2aA.col1; -ERROR HY000: The definition of table 'v2aA' prevents operation DELETE on table 'v1aA' insert into v2Aa values ((select max(col1) from v1aA)); ERROR HY000: The definition of table 'v1aA' prevents operation INSERT on table 'v2Aa' insert into t1aA values ((select max(col1) from v1Aa)); diff --git a/mysql-test/r/merge.result b/mysql-test/r/merge.result index f55251ac199..afea08593a6 100644 --- a/mysql-test/r/merge.result +++ b/mysql-test/r/merge.result @@ -3714,34 +3714,6 @@ insert into m1 (a) values ((select max(a) from v1)); ERROR HY000: The definition of table 'v1' prevents operation INSERT on table 'm1' insert into m1 (a) values ((select max(a) from tmp, v1)); ERROR HY000: The definition of table 'v1' prevents operation INSERT on table 'm1' -update m1 set a = ((select max(a) from m1)); -ERROR HY000: Table 'm1' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update m1 set a = ((select max(a) from m2)); -ERROR HY000: Table 'm1' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update m1 set a = ((select max(a) from t1)); -ERROR HY000: Table 'm1' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update m1 set a = ((select max(a) from t2)); -ERROR HY000: Table 'm1' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update m1 set a = ((select max(a) from t3, m1)); -ERROR HY000: Table 'm1' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update m1 set a = ((select max(a) from t3, m2)); -ERROR HY000: Table 'm1' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update m1 set a = ((select max(a) from t3, t1)); -ERROR HY000: Table 'm1' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update m1 set a = ((select max(a) from t3, t2)); -ERROR HY000: Table 'm1' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update m1 set a = ((select max(a) from tmp, m1)); -ERROR HY000: Table 'm1' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update m1 set a = ((select max(a) from tmp, m2)); -ERROR HY000: Table 'm1' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update m1 set a = ((select max(a) from tmp, t1)); -ERROR HY000: Table 'm1' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update m1 set a = ((select max(a) from tmp, t2)); -ERROR HY000: Table 'm1' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update m1 set a = ((select max(a) from v1)); -ERROR HY000: The definition of table 'v1' prevents operation UPDATE on table 'm1' -update m1 set a = ((select max(a) from tmp, v1)); -ERROR HY000: The definition of table 'v1' prevents operation UPDATE on table 'm1' drop view v1; drop temporary table tmp; drop table t1, t2, t3, m1, m2; diff --git a/mysql-test/r/multi_update.result b/mysql-test/r/multi_update.result index 634b3897ba0..45239f6e090 100644 --- a/mysql-test/r/multi_update.result +++ b/mysql-test/r/multi_update.result @@ -1,4 +1,3 @@ -CALL mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT."); create table t1(id1 int not null auto_increment primary key, t char(12)); create table t2(id2 int not null, t char(12)); create table t3(id3 int not null, t char(12), index(id3)); @@ -429,6 +428,7 @@ connection root; revoke all privileges on mysqltest.t1 from mysqltest_1@localhost; revoke all privileges on mysqltest.* from mysqltest_1@localhost; delete from mysql.user where user=_binary'mysqltest_1'; +flush privileges; drop database mysqltest; connection default; disconnect user1; @@ -442,7 +442,6 @@ drop table t1, t2, t3; create table t1 (col1 int); create table t2 (col1 int); update t1,t2 set t1.col1 = (select max(col1) from t1) where t1.col1 = t2.col1; -ERROR HY000: Table 't1' is specified twice, both as a target for 'UPDATE' and as a separate source for data delete t1 from t1,t2 where t1.col1 < (select max(col1) from t1) and t1.col1 = t2.col1; ERROR HY000: Table 't1' is specified twice, both as a target for 'DELETE' and as a separate source for data drop table t1,t2; @@ -565,66 +564,6 @@ id c1 c2 2 test t ppc 9 abc ppc drop table t1, t2; -CREATE TABLE `t1` ( -`a` int(11) NOT NULL auto_increment, -`b` int(11) default NULL, -PRIMARY KEY (`a`) -) ENGINE=MyISAM DEFAULT CHARSET=latin1 ; -CREATE TABLE `t2` ( -`a` int(11) NOT NULL auto_increment, -`b` int(11) default NULL, -PRIMARY KEY (`a`) -) ENGINE=MyISAM DEFAULT CHARSET=latin1 ; -set @sav_binlog_format= @@session.binlog_format; -set @@session.binlog_format= mixed; -insert into t1 values (1,1),(2,2); -insert into t2 values (1,1),(4,4); -reset master; -UPDATE t2,t1 SET t2.a=t1.a+2; -ERROR 23000: Duplicate entry '3' for key 'PRIMARY' -select * from t2 /* must be (3,1), (4,4) */; -a b -3 1 -4 4 -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# -master-bin.000001 # Annotate_rows # # UPDATE t2,t1 SET t2.a=t1.a+2 -master-bin.000001 # Table_map # # table_id: # (test.t2) -master-bin.000001 # Update_rows_v1 # # table_id: # flags: STMT_END_F -master-bin.000001 # Query # # COMMIT -delete from t1; -delete from t2; -insert into t1 values (1,2),(3,4),(4,4); -insert into t2 values (1,2),(3,4),(4,4); -reset master; -UPDATE t2,t1 SET t2.a=t2.b where t2.a=t1.a; -ERROR 23000: Duplicate entry '4' for key 'PRIMARY' -include/show_binlog_events.inc -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Gtid # # BEGIN GTID #-#-# -master-bin.000001 # Annotate_rows # # UPDATE t2,t1 SET t2.a=t2.b where t2.a=t1.a -master-bin.000001 # Table_map # # table_id: # (test.t2) -master-bin.000001 # Update_rows_v1 # # table_id: # flags: STMT_END_F -master-bin.000001 # Query # # COMMIT -drop table t1, t2; -set @@session.binlog_format= @sav_binlog_format; -CREATE TABLE t1 (a int, PRIMARY KEY (a)); -CREATE TABLE t2 (a int, PRIMARY KEY (a)); -CREATE TABLE t3 (a int, PRIMARY KEY (a)) ENGINE=MyISAM; -create trigger trg_del_t3 before delete on t3 for each row insert into t1 values (1); -insert into t2 values (1),(2); -insert into t3 values (1),(2); -reset master; -delete t3.* from t2,t3 where t2.a=t3.a; -ERROR 23000: Duplicate entry '1' for key 'PRIMARY' -select count(*) from t1 /* must be 1 */; -count(*) -1 -select count(*) from t3 /* must be 1 */; -count(*) -1 -drop table t1, t2, t3; # # Bug#49534: multitable IGNORE update with sql_safe_updates error # causes debug assertion @@ -1002,3 +941,30 @@ deallocate prepare stmt1; drop view v3,v2,v1; drop table t1,t2,t3; end of 5.5 tests +create table t1 (c1 int, c3 int); +insert t1(c3) values (1), (2), (3), (4), (5), (6), (7), (8); +create table t2 select * from t1; +update t1, t2 set t1.c1=t2.c3 where t1.c3=t2.c3 order by t1.c3 limit 3; +select * from t1; +c1 c3 +1 1 +2 2 +3 3 +NULL 4 +NULL 5 +NULL 6 +NULL 7 +NULL 8 +update t1 set c1=NULL; +update t1, t2 set t1.c1=t2.c3 where t1.c3=t2.c3 order by t1.c3 desc limit 2; +select * from t1; +c1 c3 +NULL 1 +NULL 2 +NULL 3 +NULL 4 +NULL 5 +NULL 6 +7 7 +8 8 +drop table t1, t2; diff --git a/mysql-test/r/multi_update_binlog.result b/mysql-test/r/multi_update_binlog.result new file mode 100644 index 00000000000..e77a4530dbf --- /dev/null +++ b/mysql-test/r/multi_update_binlog.result @@ -0,0 +1,61 @@ +CALL mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT."); +CREATE TABLE `t1` ( +`a` int(11) NOT NULL auto_increment, +`b` int(11) default NULL, +PRIMARY KEY (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 ; +CREATE TABLE `t2` ( +`a` int(11) NOT NULL auto_increment, +`b` int(11) default NULL, +PRIMARY KEY (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 ; +set @sav_binlog_format= @@session.binlog_format; +set @@session.binlog_format= mixed; +insert into t1 values (1,1),(2,2); +insert into t2 values (1,1),(4,4); +reset master; +UPDATE t2,t1 SET t2.a=t1.a+2; +ERROR 23000: Duplicate entry '3' for key 'PRIMARY' +select * from t2 /* must be (3,1), (4,4) */; +a b +3 1 +4 4 +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # UPDATE t2,t1 SET t2.a=t1.a+2 +master-bin.000001 # Table_map # # table_id: # (test.t2) +master-bin.000001 # Update_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # COMMIT +delete from t1; +delete from t2; +insert into t1 values (1,2),(3,4),(4,4); +insert into t2 values (1,2),(3,4),(4,4); +reset master; +UPDATE t2,t1 SET t2.a=t2.b where t2.a=t1.a; +ERROR 23000: Duplicate entry '4' for key 'PRIMARY' +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Gtid # # BEGIN GTID #-#-# +master-bin.000001 # Annotate_rows # # UPDATE t2,t1 SET t2.a=t2.b where t2.a=t1.a +master-bin.000001 # Table_map # # table_id: # (test.t2) +master-bin.000001 # Update_rows_v1 # # table_id: # flags: STMT_END_F +master-bin.000001 # Query # # COMMIT +drop table t1, t2; +set @@session.binlog_format= @sav_binlog_format; +CREATE TABLE t1 (a int, PRIMARY KEY (a)); +CREATE TABLE t2 (a int, PRIMARY KEY (a)); +CREATE TABLE t3 (a int, PRIMARY KEY (a)) ENGINE=MyISAM; +create trigger trg_del_t3 before delete on t3 for each row insert into t1 values (1); +insert into t2 values (1),(2); +insert into t3 values (1),(2); +reset master; +delete t3.* from t2,t3 where t2.a=t3.a; +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +select count(*) from t1 /* must be 1 */; +count(*) +1 +select count(*) from t3 /* must be 1 */; +count(*) +1 +drop table t1, t2, t3; diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index 919693efffb..7ba8b545e6a 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -583,8 +583,6 @@ a b 0 10 1 11 2 12 -update t1 set b= (select b from t1); -ERROR HY000: Table 't1' is specified twice, both as a target for 'UPDATE' and as a separate source for data update t1 set b= (select b from t2); ERROR 21000: Subquery returns more than 1 row update t1 set b= (select b from t2 where t1.a = t2.a); diff --git a/mysql-test/r/subselect_no_exists_to_in.result b/mysql-test/r/subselect_no_exists_to_in.result index 806475b3380..c09f3c94710 100644 --- a/mysql-test/r/subselect_no_exists_to_in.result +++ b/mysql-test/r/subselect_no_exists_to_in.result @@ -587,8 +587,6 @@ a b 0 10 1 11 2 12 -update t1 set b= (select b from t1); -ERROR HY000: Table 't1' is specified twice, both as a target for 'UPDATE' and as a separate source for data update t1 set b= (select b from t2); ERROR 21000: Subquery returns more than 1 row update t1 set b= (select b from t2 where t1.a = t2.a); diff --git a/mysql-test/r/subselect_no_mat.result b/mysql-test/r/subselect_no_mat.result index 237a6dbf9bb..0aefeaf44d9 100644 --- a/mysql-test/r/subselect_no_mat.result +++ b/mysql-test/r/subselect_no_mat.result @@ -590,8 +590,6 @@ a b 0 10 1 11 2 12 -update t1 set b= (select b from t1); -ERROR HY000: Table 't1' is specified twice, both as a target for 'UPDATE' and as a separate source for data update t1 set b= (select b from t2); ERROR 21000: Subquery returns more than 1 row update t1 set b= (select b from t2 where t1.a = t2.a); diff --git a/mysql-test/r/subselect_no_opts.result b/mysql-test/r/subselect_no_opts.result index af1afe47f32..92defb3c36d 100644 --- a/mysql-test/r/subselect_no_opts.result +++ b/mysql-test/r/subselect_no_opts.result @@ -586,8 +586,6 @@ a b 0 10 1 11 2 12 -update t1 set b= (select b from t1); -ERROR HY000: Table 't1' is specified twice, both as a target for 'UPDATE' and as a separate source for data update t1 set b= (select b from t2); ERROR 21000: Subquery returns more than 1 row update t1 set b= (select b from t2 where t1.a = t2.a); diff --git a/mysql-test/r/subselect_no_scache.result b/mysql-test/r/subselect_no_scache.result index 75be8642069..b47dab2e79e 100644 --- a/mysql-test/r/subselect_no_scache.result +++ b/mysql-test/r/subselect_no_scache.result @@ -589,8 +589,6 @@ a b 0 10 1 11 2 12 -update t1 set b= (select b from t1); -ERROR HY000: Table 't1' is specified twice, both as a target for 'UPDATE' and as a separate source for data update t1 set b= (select b from t2); ERROR 21000: Subquery returns more than 1 row update t1 set b= (select b from t2 where t1.a = t2.a); diff --git a/mysql-test/r/subselect_no_semijoin.result b/mysql-test/r/subselect_no_semijoin.result index a6a6397375b..9d04ddd9829 100644 --- a/mysql-test/r/subselect_no_semijoin.result +++ b/mysql-test/r/subselect_no_semijoin.result @@ -586,8 +586,6 @@ a b 0 10 1 11 2 12 -update t1 set b= (select b from t1); -ERROR HY000: Table 't1' is specified twice, both as a target for 'UPDATE' and as a separate source for data update t1 set b= (select b from t2); ERROR 21000: Subquery returns more than 1 row update t1 set b= (select b from t2 where t1.a = t2.a); diff --git a/mysql-test/r/update_use_source.result b/mysql-test/r/update_use_source.result new file mode 100644 index 00000000000..e5585fcee5d --- /dev/null +++ b/mysql-test/r/update_use_source.result @@ -0,0 +1,1201 @@ +create table t1 (old_c1 integer, old_c2 integer,c1 integer, c2 integer, c3 integer) engine=InnoDb; +create view v1 as select * from t1 where c2=2; +create trigger trg_t1 before update on t1 for each row +begin +set new.old_c1=old.c1; +set new.old_c2=old.c2; +end; +/ +insert into t1(c1,c2,c3) values (1,1,1); +insert into t1(c1,c2,c3) values (1,2,2); +insert into t1(c1,c2,c3) values (1,3,3); +insert into t1(c1,c2,c3) values (2,1,4); +insert into t1(c1,c2,c3) values (2,2,5); +insert into t1(c1,c2,c3) values (2,3,6); +insert into t1(c1,c2,c3) values (2,4,7); +insert into t1(c1,c2,c3) values (2,5,8); +commit; +select * from t1; +old_c1 old_c2 c1 c2 c3 +NULL NULL 1 1 1 +NULL NULL 1 2 2 +NULL NULL 1 3 3 +NULL NULL 2 1 4 +NULL NULL 2 2 5 +NULL NULL 2 3 6 +NULL NULL 2 4 7 +NULL NULL 2 5 8 +Test without any index +# +# Update a with value from subquery on the same table, no search clause. ALL access +# +start transaction; +update t1 +set c1=(select a.c3 +from t1 a +where a.c3 = t1.c3); +affected rows: 8 +info: Rows matched: 8 Changed: 8 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1 ; +concat(old_c1,'->',c1) c3 Changed +1->1 1 +1->2 2 * +1->3 3 * +2->4 4 * +2->5 5 * +2->6 6 * +2->7 7 * +2->8 8 * +rollback; +# +# Update with search clause on the same table +# +start transaction; +update t1 +set c1=10 +where c1 <2 +and exists (select 'X' + from t1 a +where a.c1 = t1.c1); +affected rows: 3 +info: Rows matched: 3 Changed: 3 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1 ; +concat(old_c1,'->',c1) c3 Changed +1->10 1 * +1->10 2 * +1->10 3 * +NULL 4 +NULL 5 +NULL 6 +NULL 7 +NULL 8 +rollback; +# +# Update via RANGE or INDEX access if an index or a primary key exists +# +explain update t1 set c1=0 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 > 3; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where +2 DEPENDENT SUBQUERY a ALL NULL NULL NULL NULL 8 Using where +start transaction; +update t1 set c1=c1+10 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3; +affected rows: 4 +info: Rows matched: 4 Changed: 4 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +NULL 2 +1->11 3 * +NULL 4 +NULL 5 +2->12 6 * +2->12 7 * +2->12 8 * +rollback; +# +# Update with order by +# +start transaction; +update t1 set c1=c1+10 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3 order by c2; +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +NULL 2 +1->11 3 * +NULL 4 +NULL 5 +2->12 6 * +2->12 7 * +2->12 8 * +rollback; +# +Update using a view in subquery +# +start transaction; +update t1 +set c1=c1 +(select max(a.c2) +from v1 a +where a.c1 = t1.c1) ; +affected rows: 8 +info: Rows matched: 8 Changed: 8 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +1->3 1 * +1->3 2 * +1->3 3 * +2->4 4 * +2->4 5 * +2->4 6 * +2->4 7 * +2->4 8 * +rollback; +# +# Update throw a view +# +start transaction; +update v1 +set c1=c1 + (select max(a.c2) +from t1 a +where a.c1 = v1.c1) +10 +where c3 > 3; +affected rows: 1 +info: Rows matched: 1 Changed: 1 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +NULL 2 +NULL 3 +NULL 4 +2->17 5 * +NULL 6 +NULL 7 +NULL 8 +rollback; +# +# Update through a view and using the view in subquery +# +start transaction; +update v1 +set c1=c1 + 1 +where c1 <2 +and exists (select 'X' + from v1 a +where a.c1 = v1.c1); +affected rows: 1 +info: Rows matched: 1 Changed: 1 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +1->2 2 * +NULL 3 +NULL 4 +NULL 5 +NULL 6 +NULL 7 +NULL 8 +rollback; +# +# Update through a view and using the view in subquery +# +start transaction; +update v1 +set c1=(select max(a.c1)+10 +from v1 a +where a.c1 = v1.c1) +where c1 <10 +and exists (select 'X' + from v1 a +where a.c2 = v1.c2); +affected rows: 2 +info: Rows matched: 2 Changed: 2 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +1->11 2 * +NULL 3 +NULL 4 +2->12 5 * +NULL 6 +NULL 7 +NULL 8 +rollback; +# +# Update of the index or primary key (c3) +# +start transaction; +explain update t1 set c3=c3+10 where c2 in (select distinct a.c2 from t1 a where t1.c1=a.c1); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 8 +1 PRIMARY a ALL NULL NULL NULL NULL 8 Using where; FirstMatch(t1) +update t1 set c3=c3+10 where c2 in (select distinct a.c2 from t1 a where t1.c1=a.c1); +affected rows: 8 +info: Rows matched: 8 Changed: 8 Warnings: 0 +select c3 from t1; +c3 +11 +12 +13 +14 +15 +16 +17 +18 +rollback; +# +# update with a limit +# +start transaction; +update t1 +set c1=(select a.c3 +from t1 a +where a.c3 = t1.c3) +limit 2; +affected rows: 2 +info: Rows matched: 2 Changed: 2 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1 ; +concat(old_c1,'->',c1) c3 Changed +1->1 1 +1->2 2 * +NULL 3 +NULL 4 +NULL 5 +NULL 6 +NULL 7 +NULL 8 +rollback; +# +# update with a limit and an order by +# +start transaction; +update t1 +set c1=(select a.c3 +from t1 a +where a.c3 = t1.c3) +order by c3 desc limit 2; +affected rows: 2 +info: Rows matched: 2 Changed: 2 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1 ; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +NULL 2 +NULL 3 +NULL 4 +NULL 5 +NULL 6 +2->7 7 * +2->8 8 * +rollback; +Test with an index on updated columns +create index t1_c2 on t1 (c2,c1); +# +# Update a with value from subquery on the same table, no search clause. ALL access +# +start transaction; +update t1 +set c1=(select a.c3 +from t1 a +where a.c3 = t1.c3); +affected rows: 8 +info: Rows matched: 8 Changed: 8 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1 ; +concat(old_c1,'->',c1) c3 Changed +1->1 1 +1->2 2 * +1->3 3 * +2->4 4 * +2->5 5 * +2->6 6 * +2->7 7 * +2->8 8 * +rollback; +# +# Update with search clause on the same table +# +start transaction; +update t1 +set c1=10 +where c1 <2 +and exists (select 'X' + from t1 a +where a.c1 = t1.c1); +affected rows: 3 +info: Rows matched: 3 Changed: 3 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1 ; +concat(old_c1,'->',c1) c3 Changed +1->10 1 * +1->10 2 * +1->10 3 * +NULL 4 +NULL 5 +NULL 6 +NULL 7 +NULL 8 +rollback; +# +# Update via RANGE or INDEX access if an index or a primary key exists +# +explain update t1 set c1=0 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 > 3; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 range t1_c2 t1_c2 5 NULL 2 Using where +2 DEPENDENT SUBQUERY a ref t1_c2 t1_c2 5 test.t1.c2 4 Using index +start transaction; +update t1 set c1=c1+10 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3; +affected rows: 4 +info: Rows matched: 4 Changed: 4 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +NULL 2 +1->11 3 * +NULL 4 +NULL 5 +2->12 6 * +2->12 7 * +2->12 8 * +rollback; +# +# Update with order by +# +start transaction; +update t1 set c1=c1+10 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3 order by c2; +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +NULL 2 +1->11 3 * +NULL 4 +NULL 5 +2->12 6 * +2->12 7 * +2->12 8 * +rollback; +# +Update using a view in subquery +# +start transaction; +update t1 +set c1=c1 +(select max(a.c2) +from v1 a +where a.c1 = t1.c1) ; +affected rows: 8 +info: Rows matched: 8 Changed: 8 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +1->3 1 * +1->3 2 * +1->3 3 * +2->4 4 * +2->4 5 * +2->4 6 * +2->4 7 * +2->4 8 * +rollback; +# +# Update throw a view +# +start transaction; +update v1 +set c1=c1 + (select max(a.c2) +from t1 a +where a.c1 = v1.c1) +10 +where c3 > 3; +affected rows: 1 +info: Rows matched: 1 Changed: 1 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +NULL 2 +NULL 3 +NULL 4 +2->17 5 * +NULL 6 +NULL 7 +NULL 8 +rollback; +# +# Update through a view and using the view in subquery +# +start transaction; +update v1 +set c1=c1 + 1 +where c1 <2 +and exists (select 'X' + from v1 a +where a.c1 = v1.c1); +affected rows: 1 +info: Rows matched: 1 Changed: 1 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +1->2 2 * +NULL 3 +NULL 4 +NULL 5 +NULL 6 +NULL 7 +NULL 8 +rollback; +# +# Update through a view and using the view in subquery +# +start transaction; +update v1 +set c1=(select max(a.c1)+10 +from v1 a +where a.c1 = v1.c1) +where c1 <10 +and exists (select 'X' + from v1 a +where a.c2 = v1.c2); +affected rows: 2 +info: Rows matched: 2 Changed: 2 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +1->11 2 * +NULL 3 +NULL 4 +2->12 5 * +NULL 6 +NULL 7 +NULL 8 +rollback; +# +# Update of the index or primary key (c3) +# +start transaction; +explain update t1 set c3=c3+10 where c2 in (select distinct a.c2 from t1 a where t1.c1=a.c1); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL t1_c2 NULL NULL NULL 8 Using where +1 PRIMARY a ref t1_c2 t1_c2 10 test.t1.c2,test.t1.c1 1 Using index; FirstMatch(t1) +update t1 set c3=c3+10 where c2 in (select distinct a.c2 from t1 a where t1.c1=a.c1); +affected rows: 8 +info: Rows matched: 8 Changed: 8 Warnings: 0 +select c3 from t1; +c3 +11 +12 +13 +14 +15 +16 +17 +18 +rollback; +# +# update with a limit +# +start transaction; +update t1 +set c1=(select a.c3 +from t1 a +where a.c3 = t1.c3) +limit 2; +affected rows: 2 +info: Rows matched: 2 Changed: 2 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1 ; +concat(old_c1,'->',c1) c3 Changed +1->1 1 +1->2 2 * +NULL 3 +NULL 4 +NULL 5 +NULL 6 +NULL 7 +NULL 8 +rollback; +# +# update with a limit and an order by +# +start transaction; +update t1 +set c1=(select a.c3 +from t1 a +where a.c3 = t1.c3) +order by c3 desc limit 2; +affected rows: 2 +info: Rows matched: 2 Changed: 2 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1 ; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +NULL 2 +NULL 3 +NULL 4 +NULL 5 +NULL 6 +2->7 7 * +2->8 8 * +rollback; +Test with an index on updated columns +create index t1_c3 on t1 (c3); +# +# Update a with value from subquery on the same table, no search clause. ALL access +# +start transaction; +update t1 +set c1=(select a.c3 +from t1 a +where a.c3 = t1.c3); +affected rows: 8 +info: Rows matched: 8 Changed: 8 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1 ; +concat(old_c1,'->',c1) c3 Changed +1->1 1 +1->2 2 * +1->3 3 * +2->4 4 * +2->5 5 * +2->6 6 * +2->7 7 * +2->8 8 * +rollback; +# +# Update with search clause on the same table +# +start transaction; +update t1 +set c1=10 +where c1 <2 +and exists (select 'X' + from t1 a +where a.c1 = t1.c1); +affected rows: 3 +info: Rows matched: 3 Changed: 3 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1 ; +concat(old_c1,'->',c1) c3 Changed +1->10 1 * +1->10 2 * +1->10 3 * +NULL 4 +NULL 5 +NULL 6 +NULL 7 +NULL 8 +rollback; +# +# Update via RANGE or INDEX access if an index or a primary key exists +# +explain update t1 set c1=0 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 > 3; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 range t1_c2 t1_c2 5 NULL 2 Using where +2 DEPENDENT SUBQUERY a ref t1_c2 t1_c2 5 test.t1.c2 1 Using index +start transaction; +update t1 set c1=c1+10 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3; +affected rows: 4 +info: Rows matched: 4 Changed: 4 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +NULL 2 +1->11 3 * +NULL 4 +NULL 5 +2->12 6 * +2->12 7 * +2->12 8 * +rollback; +# +# Update with order by +# +start transaction; +update t1 set c1=c1+10 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3 order by c2; +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +NULL 2 +1->11 3 * +NULL 4 +NULL 5 +2->12 6 * +2->12 7 * +2->12 8 * +rollback; +# +Update using a view in subquery +# +start transaction; +update t1 +set c1=c1 +(select max(a.c2) +from v1 a +where a.c1 = t1.c1) ; +affected rows: 8 +info: Rows matched: 8 Changed: 8 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +1->3 1 * +1->3 2 * +1->3 3 * +2->4 4 * +2->4 5 * +2->4 6 * +2->4 7 * +2->4 8 * +rollback; +# +# Update throw a view +# +start transaction; +update v1 +set c1=c1 + (select max(a.c2) +from t1 a +where a.c1 = v1.c1) +10 +where c3 > 3; +affected rows: 1 +info: Rows matched: 1 Changed: 1 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +NULL 2 +NULL 3 +NULL 4 +2->17 5 * +NULL 6 +NULL 7 +NULL 8 +rollback; +# +# Update through a view and using the view in subquery +# +start transaction; +update v1 +set c1=c1 + 1 +where c1 <2 +and exists (select 'X' + from v1 a +where a.c1 = v1.c1); +affected rows: 1 +info: Rows matched: 1 Changed: 1 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +1->2 2 * +NULL 3 +NULL 4 +NULL 5 +NULL 6 +NULL 7 +NULL 8 +rollback; +# +# Update through a view and using the view in subquery +# +start transaction; +update v1 +set c1=(select max(a.c1)+10 +from v1 a +where a.c1 = v1.c1) +where c1 <10 +and exists (select 'X' + from v1 a +where a.c2 = v1.c2); +affected rows: 2 +info: Rows matched: 2 Changed: 2 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +1->11 2 * +NULL 3 +NULL 4 +2->12 5 * +NULL 6 +NULL 7 +NULL 8 +rollback; +# +# Update of the index or primary key (c3) +# +start transaction; +explain update t1 set c3=c3+10 where c2 in (select distinct a.c2 from t1 a where t1.c1=a.c1); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL t1_c2 NULL NULL NULL 8 Using where +1 PRIMARY a ref t1_c2 t1_c2 10 test.t1.c2,test.t1.c1 1 Using index; FirstMatch(t1) +update t1 set c3=c3+10 where c2 in (select distinct a.c2 from t1 a where t1.c1=a.c1); +affected rows: 8 +info: Rows matched: 8 Changed: 8 Warnings: 0 +select c3 from t1; +c3 +11 +12 +13 +14 +15 +16 +17 +18 +rollback; +# +# update with a limit +# +start transaction; +update t1 +set c1=(select a.c3 +from t1 a +where a.c3 = t1.c3) +limit 2; +affected rows: 2 +info: Rows matched: 2 Changed: 2 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1 ; +concat(old_c1,'->',c1) c3 Changed +1->1 1 +1->2 2 * +NULL 3 +NULL 4 +NULL 5 +NULL 6 +NULL 7 +NULL 8 +rollback; +# +# update with a limit and an order by +# +start transaction; +update t1 +set c1=(select a.c3 +from t1 a +where a.c3 = t1.c3) +order by c3 desc limit 2; +affected rows: 2 +info: Rows matched: 2 Changed: 2 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1 ; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +NULL 2 +NULL 3 +NULL 4 +NULL 5 +NULL 6 +2->7 7 * +2->8 8 * +rollback; +Test with a primary key on updated columns +drop index t1_c3 on t1; +alter table t1 add primary key (c3); +# +# Update a with value from subquery on the same table, no search clause. ALL access +# +start transaction; +update t1 +set c1=(select a.c3 +from t1 a +where a.c3 = t1.c3); +affected rows: 8 +info: Rows matched: 8 Changed: 8 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1 ; +concat(old_c1,'->',c1) c3 Changed +1->1 1 +1->2 2 * +1->3 3 * +2->4 4 * +2->5 5 * +2->6 6 * +2->7 7 * +2->8 8 * +rollback; +# +# Update with search clause on the same table +# +start transaction; +update t1 +set c1=10 +where c1 <2 +and exists (select 'X' + from t1 a +where a.c1 = t1.c1); +affected rows: 3 +info: Rows matched: 3 Changed: 3 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1 ; +concat(old_c1,'->',c1) c3 Changed +1->10 1 * +1->10 2 * +1->10 3 * +NULL 4 +NULL 5 +NULL 6 +NULL 7 +NULL 8 +rollback; +# +# Update via RANGE or INDEX access if an index or a primary key exists +# +explain update t1 set c1=0 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 > 3; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 range t1_c2 t1_c2 5 NULL 2 Using where +2 DEPENDENT SUBQUERY a ref t1_c2 t1_c2 5 test.t1.c2 1 Using index +start transaction; +update t1 set c1=c1+10 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3; +affected rows: 4 +info: Rows matched: 4 Changed: 4 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +NULL 2 +1->11 3 * +NULL 4 +NULL 5 +2->12 6 * +2->12 7 * +2->12 8 * +rollback; +# +# Update with order by +# +start transaction; +update t1 set c1=c1+10 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3 order by c2; +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +NULL 2 +1->11 3 * +NULL 4 +NULL 5 +2->12 6 * +2->12 7 * +2->12 8 * +rollback; +# +Update using a view in subquery +# +start transaction; +update t1 +set c1=c1 +(select max(a.c2) +from v1 a +where a.c1 = t1.c1) ; +affected rows: 8 +info: Rows matched: 8 Changed: 8 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +1->3 1 * +1->3 2 * +1->3 3 * +2->4 4 * +2->4 5 * +2->4 6 * +2->4 7 * +2->4 8 * +rollback; +# +# Update throw a view +# +start transaction; +update v1 +set c1=c1 + (select max(a.c2) +from t1 a +where a.c1 = v1.c1) +10 +where c3 > 3; +affected rows: 1 +info: Rows matched: 1 Changed: 1 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +NULL 2 +NULL 3 +NULL 4 +2->17 5 * +NULL 6 +NULL 7 +NULL 8 +rollback; +# +# Update through a view and using the view in subquery +# +start transaction; +update v1 +set c1=c1 + 1 +where c1 <2 +and exists (select 'X' + from v1 a +where a.c1 = v1.c1); +affected rows: 1 +info: Rows matched: 1 Changed: 1 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +1->2 2 * +NULL 3 +NULL 4 +NULL 5 +NULL 6 +NULL 7 +NULL 8 +rollback; +# +# Update through a view and using the view in subquery +# +start transaction; +update v1 +set c1=(select max(a.c1)+10 +from v1 a +where a.c1 = v1.c1) +where c1 <10 +and exists (select 'X' + from v1 a +where a.c2 = v1.c2); +affected rows: 2 +info: Rows matched: 2 Changed: 2 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +1->11 2 * +NULL 3 +NULL 4 +2->12 5 * +NULL 6 +NULL 7 +NULL 8 +rollback; +# +# Update of the index or primary key (c3) +# +start transaction; +explain update t1 set c3=c3+10 where c2 in (select distinct a.c2 from t1 a where t1.c1=a.c1); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL t1_c2 NULL NULL NULL 8 Using where +1 PRIMARY a ref t1_c2 t1_c2 10 test.t1.c2,test.t1.c1 1 Using index; FirstMatch(t1) +update t1 set c3=c3+10 where c2 in (select distinct a.c2 from t1 a where t1.c1=a.c1); +affected rows: 8 +info: Rows matched: 8 Changed: 8 Warnings: 0 +select c3 from t1; +c3 +11 +14 +12 +15 +13 +16 +17 +18 +rollback; +# +# update with a limit +# +start transaction; +update t1 +set c1=(select a.c3 +from t1 a +where a.c3 = t1.c3) +limit 2; +affected rows: 2 +info: Rows matched: 2 Changed: 2 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1 ; +concat(old_c1,'->',c1) c3 Changed +1->1 1 +1->2 2 * +NULL 3 +NULL 4 +NULL 5 +NULL 6 +NULL 7 +NULL 8 +rollback; +# +# update with a limit and an order by +# +start transaction; +update t1 +set c1=(select a.c3 +from t1 a +where a.c3 = t1.c3) +order by c3 desc limit 2; +affected rows: 2 +info: Rows matched: 2 Changed: 2 Warnings: 0 +select concat(old_c1,'->',c1),c3, case when c1 != old_c1 then '*' else ' ' end "Changed" from t1 ; +concat(old_c1,'->',c1) c3 Changed +NULL 1 +NULL 2 +NULL 3 +NULL 4 +NULL 5 +NULL 6 +2->7 7 * +2->8 8 * +rollback; +# Update with error "Subquery returns more than 1 row" +update t1 set c2=(select c2 from t1); +ERROR 21000: Subquery returns more than 1 row +# Update with error "Subquery returns more than 1 row" and order by +update t1 set c2=(select c2 from t1) order by c3; +ERROR 21000: Subquery returns more than 1 row +Duplicate value on update a primary key +start transaction; +update t1 set c3=0 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3; +ERROR 23000: Duplicate entry '0' for key 'PRIMARY' +rollback; +Duplicate value on update a primary key with ignore +start transaction; +update ignore t1 set c3=0 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3; +affected rows: 4 +info: Rows matched: 4 Changed: 4 Warnings: 0 +rollback; +Duplicate value on update a primary key and limit +start transaction; +update t1 set c3=0 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3 limit 2; +ERROR 23000: Duplicate entry '0' for key 'PRIMARY' +rollback; +Duplicate value on update a primary key with ignore and limit +start transaction; +update ignore t1 set c3=0 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3 limit 2; +affected rows: 2 +info: Rows matched: 2 Changed: 2 Warnings: 0 +rollback; +# Update no rows found +update t1 +set c1=10 +where c1 <2 +and exists (select 'X' + from t1 a +where a.c1 = t1.c1 + 10); +affected rows: 0 +info: Rows matched: 0 Changed: 0 Warnings: 0 +# Update no rows changed +drop trigger trg_t1; +start transaction; +update t1 +set c1=c1 +where c1 <2 +and exists (select 'X' + from t1 a +where a.c1 = t1.c1); +affected rows: 0 +info: Rows matched: 3 Changed: 0 Warnings: 0 +rollback; +# +# Check call of after trigger +# +create or replace trigger trg_t2 after update on t1 for each row +begin +declare msg varchar(100); +if (new.c3 = 5) then +set msg=concat('in after update trigger on ',new.c3); +SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = msg; +end if; +end; +/ +update t1 set c1=2 where c3 in (select distinct a.c3 from t1 a where a.c1=t1.c1); +ERROR 45000: in after update trigger on 5 +# +# Check update with order by and after trigger +# +update t1 set c1=2 where c3 in (select distinct a.c3 from t1 a where a.c1=t1.c1) order by t1.c2; +ERROR 45000: in after update trigger on 5 +drop view v1; +# +# Check update on view with check option +# +create view v1 as select * from t1 where c2=2 with check option; +start transaction; +update v1 set c2=3 where c1=1; +ERROR 44000: CHECK OPTION failed `test`.`v1` +rollback; +start transaction; +update v1 set c2=(select max(c3) from v1) where c1=1; +ERROR 44000: CHECK OPTION failed `test`.`v1` +rollback; +start transaction; +update v1 set c2=(select min(va.c3) from v1 va), c1=0 where c1=1; +rollback; +drop view v1; +drop table t1; +# +# Test with a temporary table +# +create temporary table t1 (c1 integer, c2 integer, c3 integer) engine=InnoDb; +insert into t1(c1,c2,c3) values (1,1,1); +insert into t1(c1,c2,c3) values (1,2,2); +insert into t1(c1,c2,c3) values (1,3,3); +insert into t1(c1,c2,c3) values (2,1,4); +insert into t1(c1,c2,c3) values (2,2,5); +insert into t1(c1,c2,c3) values (2,3,6); +insert into t1(c1,c2,c3) values (2,4,7); +insert into t1(c1,c2,c3) values (2,5,8); +start transaction; +update t1 +set c1=(select a.c2 +from t1 a +where a.c3 = t1.c3) limit 3; +affected rows: 2 +info: Rows matched: 3 Changed: 2 Warnings: 0 +select * from t1 ; +c1 c2 c3 +1 1 1 +2 2 2 +3 3 3 +2 1 4 +2 2 5 +2 3 6 +2 4 7 +2 5 8 +rollback; +drop table t1; +# +# Test on dynamic columns (blob) +# +create table assets ( +item_name varchar(32) primary key, -- A common attribute for all items +dynamic_cols blob -- Dynamic columns will be stored here +); +INSERT INTO assets VALUES ('MariaDB T-shirt', COLUMN_CREATE('color', 'blue', 'size', 'XL')); +INSERT INTO assets VALUES ('Thinkpad Laptop', COLUMN_CREATE('color', 'black', 'price', 500)); +SELECT item_name, COLUMN_GET(dynamic_cols, 'color' as char) AS color FROM assets; +item_name color +MariaDB T-shirt blue +Thinkpad Laptop black +UPDATE assets SET dynamic_cols=COLUMN_ADD(dynamic_cols, 'warranty', '3 years') WHERE item_name='Thinkpad Laptop'; +SELECT item_name, COLUMN_GET(dynamic_cols, 'warranty' as char) AS color FROM assets; +item_name color +MariaDB T-shirt NULL +Thinkpad Laptop 3 years +UPDATE assets SET dynamic_cols=COLUMN_ADD(dynamic_cols, 'warranty', '4 years') +WHERE item_name in (select b.item_name +from assets b +where COLUMN_GET(b.dynamic_cols, 'color' as char) ='black'); +SELECT item_name, COLUMN_GET(dynamic_cols, 'warranty' as char) AS color FROM assets; +item_name color +MariaDB T-shirt NULL +Thinkpad Laptop 4 years +UPDATE assets SET dynamic_cols=COLUMN_ADD(dynamic_cols, 'warranty', (select COLUMN_GET(b.dynamic_cols, 'color' as char) +from assets b +where assets.item_name = item_name)); +SELECT item_name, COLUMN_GET(dynamic_cols, 'warranty' as char) AS color FROM assets; +item_name color +MariaDB T-shirt blue +Thinkpad Laptop black +drop table assets ; +# +# Test on fulltext columns +# +CREATE TABLE ft2(copy TEXT,FULLTEXT(copy)) ENGINE=MyISAM; +INSERT INTO ft2(copy) VALUES +('MySQL vs MariaDB database'), +('Oracle vs MariaDB database'), +('PostgreSQL vs MariaDB database'), +('MariaDB overview'), +('Foreign keys'), +('Primary keys'), +('Indexes'), +('Transactions'), +('Triggers'); +SELECT * FROM ft2 WHERE MATCH(copy) AGAINST('database'); +copy +MySQL vs MariaDB database +Oracle vs MariaDB database +PostgreSQL vs MariaDB database +update ft2 set copy = (select max(concat('mykeyword ',substr(b.copy,1,5))) from ft2 b WHERE MATCH(b.copy) AGAINST('database')) +where MATCH(copy) AGAINST('keys'); +SELECT * FROM ft2 WHERE MATCH(copy) AGAINST('mykeyword'); +copy +mykeyword Postg +mykeyword Postg +drop table ft2; +# +# Test with MyISAM +# +create table t1 (old_c1 integer, old_c2 integer,c1 integer, c2 integer, c3 integer) engine=MyISAM; +insert t1 (c1,c2,c3) select 0,seq,seq%10 from seq_1_to_500; +insert t1 (c1,c2,c3) select 1,seq,seq%10 from seq_1_to_400; +insert t1 (c1,c2,c3) select 2,seq,seq%10 from seq_1_to_300; +insert t1 (c1,c2,c3) select 3,seq,seq%10 from seq_1_to_200; +create index t1_idx1 on t1(c3); +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status Table is already up to date +update t1 set c1=2 where exists (select 'x' from t1); +select count(*) from t1 where c1=2; +count(*) +1400 +update t1 set c1=3 where c3 in (select c3 from t1 b where t1.c3=b.c1); +select count(*) from t1 where c1=3; +count(*) +140 +drop table t1; +# +# Test error on multi_update conversion on view with order by or limit +# +create table t1 (c1 integer) engine=InnoDb; +create table t2 (c1 integer) engine=InnoDb; +create view v1 as select t1.c1 as "t1c1" ,t2.c1 as "t2c1" from t1,t2 where t1.c1=t2.c1; +update v1 set t1c1=2 order by 1; +ERROR 42S22: Unknown column '1' in 'order clause' +update v1 set t1c1=2 limit 1; +drop table t1; +drop table t2; +drop view v1; diff --git a/mysql-test/r/user_var.result b/mysql-test/r/user_var.result index 0dd8b80a568..bf3d4f6dada 100644 --- a/mysql-test/r/user_var.result +++ b/mysql-test/r/user_var.result @@ -1,4 +1,3 @@ -drop table if exists t1,t2; set @a := foo; ERROR 42S22: Unknown column 'foo' in 'field list' set @a := connection_id() + 3; @@ -126,14 +125,14 @@ select @a+0, @a:=@a+0+count(*), count(*), @a+0 from t1 group by i; set @a=0; select @a,@a:="hello",@a,@a:=3,@a,@a:="hello again" from t1 group by i; @a @a:="hello" @a @a:=3 @a @a:="hello again" -0 hello 0 3 3 hello again -0 hello 0 3 3 hello again -0 hello 0 3 3 hello again +0 hello 0 3 0 hello again +0 hello 0 3 0 hello again +0 hello 0 3 0 hello again select @a,@a:="hello",@a,@a:=3,@a,@a:="hello again" from t1 group by i; @a @a:="hello" @a @a:=3 @a @a:="hello again" -hello again hello hello 3 3 hello again -hello again hello hello 3 3 hello again -hello again hello hello 3 3 hello again +hello again hello hello again 3 hello again hello again +hello again hello hello again 3 hello again hello again +hello again hello hello again 3 hello again hello again drop table t1; set @a=_latin2'test'; select charset(@a),collation(@a),coercibility(@a); @@ -570,6 +569,9 @@ End of 5.5 tests # set @var= repeat('a',20000); 1 +explain select @a:=max(seq) from seq_1_to_1000000; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away # # Start of 10.3 tests # diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result index 676432690b4..dde58ed0ab2 100644 --- a/mysql-test/r/variables.result +++ b/mysql-test/r/variables.result @@ -1548,7 +1548,7 @@ one 1 explain SELECT 1 as 'one' FROM t1 GROUP BY @a:= (SELECT ROUND(f1) FROM t1 WHERE f1 = 0); id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 2 SUBQUERY t1 ALL NULL NULL NULL NULL 2 Using where SELECT 1 as 'one' FROM t1 GROUP BY @a:= (SELECT ROUND(f1) FROM t1 WHERE f1 = 0); one @@ -1559,7 +1559,7 @@ one set sql_buffer_result=1; explain SELECT 1 as 'one' FROM t1 GROUP BY @a:= (SELECT ROUND(f1) FROM t1 WHERE f1 = 0); id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using temporary 2 SUBQUERY t1 ALL NULL NULL NULL NULL 2 Using where SELECT 1 as 'one' FROM t1 GROUP BY @a:= (SELECT ROUND(f1) FROM t1 WHERE f1 = 0); one diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result index 24c669308cd..6eead303c7a 100644 --- a/mysql-test/r/view.result +++ b/mysql-test/r/view.result @@ -943,74 +943,6 @@ create table t3 (col1 datetime not null); create view v1 as select * from t1; create view v2 as select * from v1; create view v3 as select v2.col1 from v2,t2 where v2.col1 = t2.col1; -update v2 set col1 = (select max(col1) from v1); -ERROR HY000: The definition of table 'v1' prevents operation UPDATE on table 'v2' -update v2 set col1 = (select max(col1) from t1); -ERROR HY000: The definition of table 'v2' prevents operation UPDATE on table 'v2' -update v2 set col1 = (select max(col1) from v2); -ERROR HY000: Table 'v2' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update v2,t2 set v2.col1 = (select max(col1) from v1) where v2.col1 = t2.col1; -ERROR HY000: The definition of table 'v1' prevents operation UPDATE on table 'v2' -update t1,t2 set t1.col1 = (select max(col1) from v1) where t1.col1 = t2.col1; -ERROR HY000: The definition of table 'v1' prevents operation UPDATE on table 't1' -update v1,t2 set v1.col1 = (select max(col1) from v1) where v1.col1 = t2.col1; -ERROR HY000: Table 'v1' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update t2,v2 set v2.col1 = (select max(col1) from v1) where v2.col1 = t2.col1; -ERROR HY000: The definition of table 'v1' prevents operation UPDATE on table 't2' -update t2,t1 set t1.col1 = (select max(col1) from v1) where t1.col1 = t2.col1; -ERROR HY000: The definition of table 'v1' prevents operation UPDATE on table 't2' -update t2,v1 set v1.col1 = (select max(col1) from v1) where v1.col1 = t2.col1; -ERROR HY000: The definition of table 'v1' prevents operation UPDATE on table 't2' -update v2,t2 set v2.col1 = (select max(col1) from t1) where v2.col1 = t2.col1; -ERROR HY000: The definition of table 'v2' prevents operation UPDATE on table 'v2' -update t1,t2 set t1.col1 = (select max(col1) from t1) where t1.col1 = t2.col1; -ERROR HY000: Table 't1' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update v1,t2 set v1.col1 = (select max(col1) from t1) where v1.col1 = t2.col1; -ERROR HY000: The definition of table 'v1' prevents operation UPDATE on table 'v1' -update t2,v2 set v2.col1 = (select max(col1) from t1) where v2.col1 = t2.col1; -ERROR HY000: Table 't2' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update t2,t1 set t1.col1 = (select max(col1) from t1) where t1.col1 = t2.col1; -ERROR HY000: Table 't2' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update t2,v1 set v1.col1 = (select max(col1) from t1) where v1.col1 = t2.col1; -ERROR HY000: Table 't2' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update v2,t2 set v2.col1 = (select max(col1) from v2) where v2.col1 = t2.col1; -ERROR HY000: Table 'v2' is specified twice, both as a target for 'UPDATE' and as a separate source for data -update t1,t2 set t1.col1 = (select max(col1) from v2) where t1.col1 = t2.col1; -ERROR HY000: The definition of table 'v2' prevents operation UPDATE on table 't1' -update v1,t2 set v1.col1 = (select max(col1) from v2) where v1.col1 = t2.col1; -ERROR HY000: The definition of table 'v2' prevents operation UPDATE on table 'v1' -update t2,v2 set v2.col1 = (select max(col1) from v2) where v2.col1 = t2.col1; -ERROR HY000: The definition of table 'v2' prevents operation UPDATE on table 't2' -update t2,t1 set t1.col1 = (select max(col1) from v2) where t1.col1 = t2.col1; -ERROR HY000: The definition of table 'v2' prevents operation UPDATE on table 't2' -update t2,v1 set v1.col1 = (select max(col1) from v2) where v1.col1 = t2.col1; -ERROR HY000: The definition of table 'v2' prevents operation UPDATE on table 't2' -update v3 set v3.col1 = (select max(col1) from v1); -ERROR HY000: The definition of table 'v1' prevents operation UPDATE on table 'v3' -update v3 set v3.col1 = (select max(col1) from t1); -ERROR HY000: The definition of table 'v3' prevents operation UPDATE on table 'v3' -update v3 set v3.col1 = (select max(col1) from v2); -ERROR HY000: The definition of table 'v2' prevents operation UPDATE on table 'v3' -update v3 set v3.col1 = (select max(col1) from v3); -ERROR HY000: Table 'v3' is specified twice, both as a target for 'UPDATE' and as a separate source for data -delete v2 from v2,t2 where (select max(col1) from v1) > 0 and v2.col1 = t2.col1; -ERROR HY000: The definition of table 'v1' prevents operation DELETE on table 'v2' -delete t1 from t1,t2 where (select max(col1) from v1) > 0 and t1.col1 = t2.col1; -ERROR HY000: The definition of table 'v1' prevents operation DELETE on table 't1' -delete v1 from v1,t2 where (select max(col1) from v1) > 0 and v1.col1 = t2.col1; -ERROR HY000: Table 'v1' is specified twice, both as a target for 'DELETE' and as a separate source for data -delete v2 from v2,t2 where (select max(col1) from t1) > 0 and v2.col1 = t2.col1; -ERROR HY000: The definition of table 'v2' prevents operation DELETE on table 'v2' -delete t1 from t1,t2 where (select max(col1) from t1) > 0 and t1.col1 = t2.col1; -ERROR HY000: Table 't1' is specified twice, both as a target for 'DELETE' and as a separate source for data -delete v1 from v1,t2 where (select max(col1) from t1) > 0 and v1.col1 = t2.col1; -ERROR HY000: The definition of table 'v1' prevents operation DELETE on table 'v1' -delete v2 from v2,t2 where (select max(col1) from v2) > 0 and v2.col1 = t2.col1; -ERROR HY000: Table 'v2' is specified twice, both as a target for 'DELETE' and as a separate source for data -delete t1 from t1,t2 where (select max(col1) from v2) > 0 and t1.col1 = t2.col1; -ERROR HY000: The definition of table 'v2' prevents operation DELETE on table 't1' -delete v1 from v1,t2 where (select max(col1) from v2) > 0 and v1.col1 = t2.col1; -ERROR HY000: The definition of table 'v2' prevents operation DELETE on table 'v1' insert into v2 values ((select max(col1) from v1)); ERROR HY000: The definition of table 'v1' prevents operation INSERT on table 'v2' insert into t1 values ((select max(col1) from v1)); @@ -2024,8 +1956,6 @@ create view v1 as select f59, f60 from t1 where f59 in (select f59 from t1); update v1 set f60=2345; ERROR HY000: The target table v1 of the UPDATE is not updatable -update t1 set f60=(select max(f60) from v1); -ERROR HY000: The definition of table 'v1' prevents operation UPDATE on table 't1' drop view v1; drop table t1; create table t1 (s1 int); diff --git a/mysql-test/suite/galera/r/galera_mdev_13787.result b/mysql-test/suite/galera/r/galera_mdev_13787.result new file mode 100644 index 00000000000..b1caec0283c --- /dev/null +++ b/mysql-test/suite/galera/r/galera_mdev_13787.result @@ -0,0 +1,4 @@ +connection node_1; +create table t(a int); +insert into t select 1; +DROP TABLE t; diff --git a/mysql-test/suite/galera/t/galera_mdev_13787.opt b/mysql-test/suite/galera/t/galera_mdev_13787.opt new file mode 100644 index 00000000000..27ec1e3f00e --- /dev/null +++ b/mysql-test/suite/galera/t/galera_mdev_13787.opt @@ -0,0 +1 @@ +--innodb-stats-persistent=1 diff --git a/mysql-test/suite/galera/t/galera_mdev_13787.test b/mysql-test/suite/galera/t/galera_mdev_13787.test new file mode 100644 index 00000000000..940cffb8b65 --- /dev/null +++ b/mysql-test/suite/galera/t/galera_mdev_13787.test @@ -0,0 +1,6 @@ +--source include/galera_cluster.inc +--source include/have_innodb.inc +--connection node_1 +create table t(a int); +insert into t select 1; +DROP TABLE t; diff --git a/mysql-test/suite/innodb/r/innodb-truncate.result b/mysql-test/suite/innodb/r/innodb-truncate.result index f63e9272850..a606868ae52 100644 --- a/mysql-test/suite/innodb/r/innodb-truncate.result +++ b/mysql-test/suite/innodb/r/innodb-truncate.result @@ -47,12 +47,24 @@ SET @@SESSION.foreign_key_checks = @old_foreign_key_checks; # # Test that TRUNCATE resets auto-increment. # -CREATE TABLE t1 (a INT PRIMARY KEY NOT NULL AUTO_INCREMENT); -INSERT INTO t1 VALUES (NULL), (NULL); +CREATE TABLE t1 (a INT PRIMARY KEY NOT NULL AUTO_INCREMENT, +b INT, c INT, d INT, e INT, f INT, g INT, h INT, i INT, j INT, k INT, +l INT, m INT, n INT, o INT, p INT, q INT, r INT, s INT, t INT, u INT, +KEY(b),KEY(c),KEY(d),KEY(e),KEY(f),KEY(g),KEY(h),KEY(i),KEY(j),KEY(k), +KEY(l),KEY(m),KEY(n),KEY(o),KEY(p),KEY(q),KEY(r),KEY(s),KEY(t),KEY(u), +KEY(c,b),KEY(d,b),KEY(e,b),KEY(f,b),KEY(g,b),KEY(h,b),KEY(i,b),KEY(j,b), +KEY(k,b),KEY(l,b),KEY(m,b),KEY(n,b),KEY(o,b),KEY(p,b),KEY(q,b),KEY(r,b), +KEY(s,b),KEY(t,b),KEY(u,b), +KEY(d,c),KEY(e,c),KEY(f,c),KEY(g,c),KEY(h,c),KEY(i,c),KEY(j,c), +KEY(k,c),KEY(l,c),KEY(m,c),KEY(n,c),KEY(o,c),KEY(p,c),KEY(q,c),KEY(r,c), +KEY(s,c),KEY(t,c),KEY(u,c), +KEY(e,d),KEY(f,d),KEY(g,d),KEY(h,d),KEY(i,d),KEY(j,d) +) ENGINE=InnoDB; +INSERT INTO t1 () VALUES (), (); SELECT AUTO_INCREMENT FROM INFORMATION_SCHEMA.TABLES WHERE table_name = 't1'; AUTO_INCREMENT 3 -SELECT * FROM t1 ORDER BY a; +SELECT a FROM t1 ORDER BY a; a 1 2 @@ -60,8 +72,8 @@ TRUNCATE TABLE t1; SELECT AUTO_INCREMENT FROM INFORMATION_SCHEMA.TABLES WHERE table_name = 't1'; AUTO_INCREMENT 1 -INSERT INTO t1 VALUES (NULL), (NULL); -SELECT * FROM t1 ORDER BY a; +INSERT INTO t1 () VALUES (), (); +SELECT a FROM t1 ORDER BY a; a 1 2 diff --git a/mysql-test/suite/innodb/t/innodb-truncate.test b/mysql-test/suite/innodb/t/innodb-truncate.test index ae25aabd323..8f9b1f1f0e9 100644 --- a/mysql-test/suite/innodb/t/innodb-truncate.test +++ b/mysql-test/suite/innodb/t/innodb-truncate.test @@ -52,13 +52,24 @@ SET @@SESSION.foreign_key_checks = @old_foreign_key_checks; --echo # Test that TRUNCATE resets auto-increment. --echo # -CREATE TABLE t1 (a INT PRIMARY KEY NOT NULL AUTO_INCREMENT); -INSERT INTO t1 VALUES (NULL), (NULL); +CREATE TABLE t1 (a INT PRIMARY KEY NOT NULL AUTO_INCREMENT, + b INT, c INT, d INT, e INT, f INT, g INT, h INT, i INT, j INT, k INT, + l INT, m INT, n INT, o INT, p INT, q INT, r INT, s INT, t INT, u INT, + KEY(b),KEY(c),KEY(d),KEY(e),KEY(f),KEY(g),KEY(h),KEY(i),KEY(j),KEY(k), + KEY(l),KEY(m),KEY(n),KEY(o),KEY(p),KEY(q),KEY(r),KEY(s),KEY(t),KEY(u), + KEY(c,b),KEY(d,b),KEY(e,b),KEY(f,b),KEY(g,b),KEY(h,b),KEY(i,b),KEY(j,b), + KEY(k,b),KEY(l,b),KEY(m,b),KEY(n,b),KEY(o,b),KEY(p,b),KEY(q,b),KEY(r,b), + KEY(s,b),KEY(t,b),KEY(u,b), + KEY(d,c),KEY(e,c),KEY(f,c),KEY(g,c),KEY(h,c),KEY(i,c),KEY(j,c), + KEY(k,c),KEY(l,c),KEY(m,c),KEY(n,c),KEY(o,c),KEY(p,c),KEY(q,c),KEY(r,c), + KEY(s,c),KEY(t,c),KEY(u,c), + KEY(e,d),KEY(f,d),KEY(g,d),KEY(h,d),KEY(i,d),KEY(j,d) +) ENGINE=InnoDB; +INSERT INTO t1 () VALUES (), (); SELECT AUTO_INCREMENT FROM INFORMATION_SCHEMA.TABLES WHERE table_name = 't1'; -SELECT * FROM t1 ORDER BY a; +SELECT a FROM t1 ORDER BY a; TRUNCATE TABLE t1; SELECT AUTO_INCREMENT FROM INFORMATION_SCHEMA.TABLES WHERE table_name = 't1'; -INSERT INTO t1 VALUES (NULL), (NULL); -SELECT * FROM t1 ORDER BY a; +INSERT INTO t1 () VALUES (), (); +SELECT a FROM t1 ORDER BY a; DROP TABLE t1; - diff --git a/mysql-test/suite/rpl/r/rpl_gtid_basic.result b/mysql-test/suite/rpl/r/rpl_gtid_basic.result index 3722e438d32..32df09789cc 100644 --- a/mysql-test/suite/rpl/r/rpl_gtid_basic.result +++ b/mysql-test/suite/rpl/r/rpl_gtid_basic.result @@ -568,3 +568,16 @@ ERROR HY000: Function or expression 'binlog_gtid_pos()' cannot be used in the DE # # End of 10.2 tests # +# +# Start of 10.3 tests +# +# +# MDEV-13967 Parameter data type control for Item_long_func +# +SELECT MASTER_GTID_WAIT(ROW(1,1),'str'); +ERROR HY000: Illegal parameter data type row for operation 'master_gtid_wait' +SELECT MASTER_GTID_WAIT('str',ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'master_gtid_wait' +# +# End of 10.3 tests +# diff --git a/mysql-test/suite/rpl/r/rpl_master_pos_wait.result b/mysql-test/suite/rpl/r/rpl_master_pos_wait.result index 18298986069..04f55fc1263 100644 --- a/mysql-test/suite/rpl/r/rpl_master_pos_wait.result +++ b/mysql-test/suite/rpl/r/rpl_master_pos_wait.result @@ -45,4 +45,17 @@ master_pos_wait('master-bin.000001',1000000,1,"my_slave") STOP SLAVE 'my_slave'; RESET SLAVE 'my_slave' ALL; change master to master_port=MASTER_MYPORT, master_host='127.0.0.1', master_user='root'; +# +# Start of 10.3 tests +# +# +# MDEV-13965 Parameter data type control for Item_longlong_func +# +SELECT MASTER_POS_WAIT('x',1,ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'master_pos_wait' +SELECT MASTER_POS_WAIT('x',1,1,ROW(1,1)); +ERROR HY000: Illegal parameter data type row for operation 'master_pos_wait' +# +# End of 10.3 tests +# include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_gtid_basic.test b/mysql-test/suite/rpl/t/rpl_gtid_basic.test index b04f82e1725..e14bf89425d 100644 --- a/mysql-test/suite/rpl/t/rpl_gtid_basic.test +++ b/mysql-test/suite/rpl/t/rpl_gtid_basic.test @@ -14,3 +14,23 @@ CREATE TABLE t1 (a VARCHAR(100) DEFAULT BINLOG_GTID_POS("master-bin.000001", 600 --echo # --echo # End of 10.2 tests --echo # + + +--echo # +--echo # Start of 10.3 tests +--echo # + +--echo # +--echo # MDEV-13967 Parameter data type control for Item_long_func +--echo # + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT MASTER_GTID_WAIT(ROW(1,1),'str'); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT MASTER_GTID_WAIT('str',ROW(1,1)); + + +--echo # +--echo # End of 10.3 tests +--echo # diff --git a/mysql-test/suite/rpl/t/rpl_master_pos_wait.test b/mysql-test/suite/rpl/t/rpl_master_pos_wait.test index a3f3ff56464..d8c8162ed9f 100644 --- a/mysql-test/suite/rpl/t/rpl_master_pos_wait.test +++ b/mysql-test/suite/rpl/t/rpl_master_pos_wait.test @@ -56,5 +56,23 @@ eval change master to master_port=$MASTER_MYPORT, master_host='127.0.0.1', maste # End of 10.0 tests +--echo # +--echo # Start of 10.3 tests +--echo # + +--echo # +--echo # MDEV-13965 Parameter data type control for Item_longlong_func +--echo # + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT MASTER_POS_WAIT('x',1,ROW(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT MASTER_POS_WAIT('x',1,1,ROW(1,1)); + +--echo # +--echo # End of 10.3 tests +--echo # + + --let $rpl_only_running_threads= 1 --source include/rpl_end.inc diff --git a/mysql-test/t/func_int.test b/mysql-test/t/func_int.test new file mode 100644 index 00000000000..98794561933 --- /dev/null +++ b/mysql-test/t/func_int.test @@ -0,0 +1,188 @@ +--echo # +--echo # Start of 10.3 tests +--echo # + + +--echo # +--echo # MDEV-13965 Parameter data type control for Item_longlong_func +--echo # + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT ROW(1,1) | 1; +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT 1 | ROW(1,1); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT ROW(1,1) & 1; +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT 1 & ROW(1,1); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT ROW(1,1) << 1; +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT 1 << ROW(1,1); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT ROW(1,1) >> 1; +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT 1 >> ROW(1,1); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT ~ROW(1,1); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT TO_SECONDS(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT TIMESTAMPDIFF(SECOND,ROW(1,1), 1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT TIMESTAMPDIFF(SECOND,1, ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT INET_ATON(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT LAST_INSERT_ID(ROW(1,1)); + + +--echo # +--echo # MDEV-13967 Parameter data type control for Item_long_func +--echo # + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT STRCMP(ROW(1,1),''); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT STRCMP('',ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT CHAR_LENGTH(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT OCTET_LENGTH(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT UNCOMPRESSED_LENGTH(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT COERCIBILITY(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT ASCII(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT CRC32(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT ORD(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT SIGN(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT LOCATE(ROW(1,1),'a',1); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT LOCATE('a',ROW(1,1),1); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT LOCATE('a','a',ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT BIT_COUNT(ROW(1,1)); + +--error ER_OPERAND_COLUMNS +SELECT BENCHMARK(1, ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT BENCHMARK(ROW(1,1),''); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT SLEEP(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT GET_LOCK('x', ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT GET_LOCK(ROW(1,1),'x'); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT PERIOD_ADD(ROW(1,1),1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT PERIOD_ADD(1,ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT PERIOD_DIFF(ROW(1,1),1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT PERIOD_DIFF(1,ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT TO_DAYS(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT DAYOFMONTH(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT DAYOFYEAR(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT QUARTER(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT YEAR(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT YEARWEEK(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT WEEK(ROW(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT WEEK(ROW(1,1),1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT WEEK(1,ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT HOUR(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT MINUTE(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT SECOND(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT MICROSECOND(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT JSON_DEPTH(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT JSON_LENGTH(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT JSON_LENGTH('json', ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT JSON_LENGTH(ROW(1,1), ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT REGEXP_INSTR(ROW(1,1),''); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT REGEXP_INSTR('',ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT FIND_IN_SET(ROW(1,1),''); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT FIND_IN_SET('',ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT RELEASE_LOCK(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT IS_FREE_LOCK(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT IS_USED_LOCK(ROW(1,1)); + +--echo # +--echo # End of 10.3 tests +--echo # diff --git a/mysql-test/t/func_regexp.test b/mysql-test/t/func_regexp.test index d0ab0cc9044..6d5186269a5 100644 --- a/mysql-test/t/func_regexp.test +++ b/mysql-test/t/func_regexp.test @@ -104,3 +104,9 @@ SELECT '\t' REGEXP '[[:blank:]]'; SELECT ' ' REGEXP '[[:space:]]'; SELECT '\t' REGEXP '[[:space:]]'; + +--echo # +--echo # MDEV-13967 Parameter data type control for Item_long_func +--echo # +SELECT REGEXP_INSTR('111222333',2); + diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index a3f542aaf57..3c154a8f67e 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -1880,3 +1880,44 @@ CREATE TABLE t1 AS SELECT TO_SECONDS('9999-12-31 23:59:59'); SHOW CREATE TABLE t1; DROP TABLE t1; SET sql_mode=DEFAULT; + + +--echo # +--echo # MDEV-13966 Parameter data type control for Item_temporal_func +--echo # + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT FROM_DAYS(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT MAKEDATE(ROW(1,1),1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT MAKEDATE(1, ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT LAST_DAY(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT SEC_TO_TIME(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT TIMEDIFF(ROW(1,1),1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT TIMEDIFF(1, ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT MAKETIME(ROW(1,1),1,1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT MAKETIME(1, ROW(1,1), 1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT MAKETIME(1, 1, ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT FROM_UNIXTIME(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT CONVERT_TZ(ROW(1,1),1,1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT CONVERT_TZ(1, ROW(1,1), 1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT CONVERT_TZ(1, 1, ROW(1,1)); diff --git a/mysql-test/t/gis.test b/mysql-test/t/gis.test index 91e4b3d49f4..94e56774bf4 100644 --- a/mysql-test/t/gis.test +++ b/mysql-test/t/gis.test @@ -2689,6 +2689,303 @@ SELECT MBRTOUCHES('test', POINT(1,1)); --error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION SELECT MBRTOUCHES(POINT(1,1), 'test'); + +--echo # +--echo # MDEV-13964 Parameter data type control for Item_real_func +--echo # + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT EXP(POINT(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT LN(POINT(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT LOG2(POINT(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT LOG10(POINT(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT SQRT(POINT(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT ACOS(POINT(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT ASIN(POINT(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT COS(POINT(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT SIN(POINT(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT TAN(POINT(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT COT(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT LOG(POINT(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT LOG(POINT(1,1),POINT(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT LOG(1, POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT ATAN(POINT(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT ATAN(POINT(1,1),POINT(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT ATAN(1, POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT POW(POINT(1,1),POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT RAND(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT RADIANS(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT DEGREES(POINT(1,1)); + + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT EXP(ROW(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT LN(ROW(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT LOG2(ROW(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT LOG10(ROW(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT SQRT(ROW(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT ACOS(ROW(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT ASIN(ROW(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT COS(ROW(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT SIN(ROW(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT TAN(ROW(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT COT(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT LOG(ROW(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT LOG(ROW(1,1),ROW(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT LOG(1, ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT ATAN(ROW(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT ATAN(ROW(1,1),ROW(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT ATAN(1, ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT POW(ROW(1,1),ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT RAND(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT RADIANS(ROW(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT DEGREES(ROW(1,1)); + + +--echo # +--echo # MDEV-13965 Parameter data type control for Item_longlong_func +--echo # + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT POINT(1,1) | 1; +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT 1 | POINT(1,1); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT POINT(1,1) & 1; +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT 1 & POINT(1,1); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT POINT(1,1) << 1; +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT 1 << POINT(1,1); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT POINT(1,1) >> 1; +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT 1 >> POINT(1,1); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT ~POINT(1,1); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT TO_SECONDS(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT TIMESTAMPDIFF(SECOND,POINT(1,1), 1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT TIMESTAMPDIFF(SECOND,1, POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT INET_ATON(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT LAST_INSERT_ID(POINT(1,1)); + + +--echo # +--echo # MDEV-13966 Parameter data type control for Item_temporal_func +--echo # + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT FROM_DAYS(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT MAKEDATE(POINT(1,1),1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT MAKEDATE(1, POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT LAST_DAY(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT SEC_TO_TIME(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT TIMEDIFF(POINT(1,1),1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT TIMEDIFF(1, POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT MAKETIME(POINT(1,1),1,1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT MAKETIME(1, POINT(1,1), 1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT MAKETIME(1, 1, POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT FROM_UNIXTIME(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT CONVERT_TZ(POINT(1,1),1,1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT CONVERT_TZ(1, POINT(1,1), 1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT CONVERT_TZ(1, 1, POINT(1,1)); + + +--echo # +--echo # MDEV-13967 Parameter data type control for Item_long_func +--echo # + +SELECT STRCMP(POINT(1,1),POINT(1,1)); +SELECT CHAR_LENGTH(POINT(1,1)); +SELECT OCTET_LENGTH(POINT(1,1)); +SELECT UNCOMPRESSED_LENGTH(POINT(1,1)); +SELECT COERCIBILITY(POINT(1,1)); +SELECT ASCII(POINT(1,1)); +SELECT CRC32(POINT(1,1)); +SELECT ORD(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT SIGN(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT LOCATE('a','a',POINT(1,1)); + +SELECT LOCATE(POINT(1,1),POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT BIT_COUNT(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT BENCHMARK(POINT(1,1),''); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT SLEEP(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT GET_LOCK('x', POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT PERIOD_ADD(POINT(1,1),1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT PERIOD_ADD(1,POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT PERIOD_DIFF(POINT(1,1),1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT PERIOD_DIFF(1,POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT TO_DAYS(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT DAYOFMONTH(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT DAYOFYEAR(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT QUARTER(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT YEAR(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT YEARWEEK(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT WEEK(POINT(1,1)); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT WEEK(POINT(1,1),1); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT WEEK(1,POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT HOUR(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT MINUTE(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT SECOND(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT MICROSECOND(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT JSON_DEPTH(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT JSON_LENGTH(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT JSON_LENGTH('json', POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT JSON_LENGTH(POINT(1,1), POINT(1,1)); + +SELECT REGEXP_INSTR(POINT(1,1),''); +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT REGEXP_INSTR('',POINT(1,1)); + +SELECT FIND_IN_SET(POINT(1,1),''); +SELECT FIND_IN_SET('',POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT RELEASE_LOCK(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT IS_FREE_LOCK(POINT(1,1)); + +--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION +SELECT IS_USED_LOCK(POINT(1,1)); + + --echo # --echo # End of 10.3 tests --echo # diff --git a/mysql-test/t/lowercase_view.test b/mysql-test/t/lowercase_view.test index 4c91383db60..cdd0256d639 100644 --- a/mysql-test/t/lowercase_view.test +++ b/mysql-test/t/lowercase_view.test @@ -24,74 +24,6 @@ create view v1Aa as select * from t1aA; create view v2aA as select * from v1aA; create view v3Aa as select v2Aa.col1 from v2aA,t2Aa where v2Aa.col1 = t2aA.col1; -- error 1443 -update v2aA set col1 = (select max(col1) from v1Aa); --- error 1443 -update v2Aa set col1 = (select max(col1) from t1Aa); --- error 1093 -update v2aA set col1 = (select max(col1) from v2Aa); --- error 1443 -update v2aA,t2Aa set v2Aa.col1 = (select max(col1) from v1aA) where v2aA.col1 = t2aA.col1; --- error 1443 -update t1aA,t2Aa set t1Aa.col1 = (select max(col1) from v1Aa) where t1aA.col1 = t2aA.col1; --- error 1093 -update v1aA,t2Aa set v1Aa.col1 = (select max(col1) from v1aA) where v1Aa.col1 = t2aA.col1; --- error 1443 -update t2Aa,v2Aa set v2aA.col1 = (select max(col1) from v1aA) where v2Aa.col1 = t2aA.col1; --- error 1443 -update t2Aa,t1Aa set t1aA.col1 = (select max(col1) from v1Aa) where t1Aa.col1 = t2aA.col1; --- error 1443 -update t2Aa,v1aA set v1Aa.col1 = (select max(col1) from v1aA) where v1Aa.col1 = t2aA.col1; --- error 1443 -update v2aA,t2Aa set v2Aa.col1 = (select max(col1) from t1aA) where v2aA.col1 = t2aA.col1; --- error 1093 -update t1Aa,t2Aa set t1aA.col1 = (select max(col1) from t1Aa) where t1aA.col1 = t2aA.col1; --- error 1443 -update v1aA,t2Aa set v1Aa.col1 = (select max(col1) from t1Aa) where v1aA.col1 = t2aA.col1; --- error 1093 -update t2Aa,v2Aa set v2aA.col1 = (select max(col1) from t1aA) where v2Aa.col1 = t2aA.col1; --- error 1093 -update t2Aa,t1Aa set t1aA.col1 = (select max(col1) from t1Aa) where t1aA.col1 = t2aA.col1; --- error 1093 -update t2Aa,v1Aa set v1aA.col1 = (select max(col1) from t1Aa) where v1Aa.col1 = t2aA.col1; --- error 1093 -update v2aA,t2Aa set v2Aa.col1 = (select max(col1) from v2aA) where v2Aa.col1 = t2aA.col1; --- error 1443 -update t1aA,t2Aa set t1Aa.col1 = (select max(col1) from v2aA) where t1aA.col1 = t2aA.col1; --- error 1443 -update v1aA,t2Aa set v1Aa.col1 = (select max(col1) from v2Aa) where v1aA.col1 = t2aA.col1; --- error 1443 -update t2Aa,v2aA set v2Aa.col1 = (select max(col1) from v2aA) where v2Aa.col1 = t2aA.col1; --- error 1443 -update t2Aa,t1Aa set t1aA.col1 = (select max(col1) from v2aA) where t1Aa.col1 = t2aA.col1; --- error 1443 -update t2Aa,v1Aa set v1aA.col1 = (select max(col1) from v2Aa) where v1Aa.col1 = t2aA.col1; --- error 1443 -update v3aA set v3Aa.col1 = (select max(col1) from v1aA); --- error 1443 -update v3aA set v3Aa.col1 = (select max(col1) from t1aA); --- error 1443 -update v3aA set v3Aa.col1 = (select max(col1) from v2aA); --- error 1093 -update v3aA set v3Aa.col1 = (select max(col1) from v3aA); --- error 1443 -delete v2Aa from v2aA,t2Aa where (select max(col1) from v1aA) > 0 and v2Aa.col1 = t2aA.col1; --- error 1443 -delete t1aA from t1Aa,t2Aa where (select max(col1) from v1Aa) > 0 and t1aA.col1 = t2aA.col1; --- error 1093 -delete v1aA from v1Aa,t2Aa where (select max(col1) from v1aA) > 0 and v1Aa.col1 = t2aA.col1; --- error 1443 -delete v2aA from v2Aa,t2Aa where (select max(col1) from t1Aa) > 0 and v2aA.col1 = t2aA.col1; --- error 1093 -delete t1aA from t1Aa,t2Aa where (select max(col1) from t1aA) > 0 and t1Aa.col1 = t2aA.col1; --- error 1443 -delete v1aA from v1Aa,t2Aa where (select max(col1) from t1aA) > 0 and v1aA.col1 = t2aA.col1; --- error 1093 -delete v2Aa from v2aA,t2Aa where (select max(col1) from v2Aa) > 0 and v2aA.col1 = t2aA.col1; --- error 1443 -delete t1Aa from t1aA,t2Aa where (select max(col1) from v2Aa) > 0 and t1Aa.col1 = t2aA.col1; --- error 1443 -delete v1Aa from v1aA,t2Aa where (select max(col1) from v2aA) > 0 and v1Aa.col1 = t2aA.col1; --- error 1443 insert into v2Aa values ((select max(col1) from v1aA)); -- error 1443 insert into t1aA values ((select max(col1) from v1Aa)); @@ -176,4 +108,3 @@ DROP TABLE `ttt`; --echo End of 5.0 tests. - diff --git a/mysql-test/t/merge.test b/mysql-test/t/merge.test index 95c78caf034..99309e6a7f0 100644 --- a/mysql-test/t/merge.test +++ b/mysql-test/t/merge.test @@ -2732,38 +2732,6 @@ insert into m1 (a) values ((select max(a) from v1)); insert into m1 (a) values ((select max(a) from tmp, v1)); ---error ER_UPDATE_TABLE_USED -update m1 set a = ((select max(a) from m1)); ---error ER_UPDATE_TABLE_USED -update m1 set a = ((select max(a) from m2)); ---error ER_UPDATE_TABLE_USED -update m1 set a = ((select max(a) from t1)); ---error ER_UPDATE_TABLE_USED -update m1 set a = ((select max(a) from t2)); - ---error ER_UPDATE_TABLE_USED -update m1 set a = ((select max(a) from t3, m1)); ---error ER_UPDATE_TABLE_USED -update m1 set a = ((select max(a) from t3, m2)); ---error ER_UPDATE_TABLE_USED -update m1 set a = ((select max(a) from t3, t1)); ---error ER_UPDATE_TABLE_USED -update m1 set a = ((select max(a) from t3, t2)); - ---error ER_UPDATE_TABLE_USED -update m1 set a = ((select max(a) from tmp, m1)); ---error ER_UPDATE_TABLE_USED -update m1 set a = ((select max(a) from tmp, m2)); ---error ER_UPDATE_TABLE_USED -update m1 set a = ((select max(a) from tmp, t1)); ---error ER_UPDATE_TABLE_USED -update m1 set a = ((select max(a) from tmp, t2)); - ---error ER_VIEW_PREVENT_UPDATE -update m1 set a = ((select max(a) from v1)); ---error ER_VIEW_PREVENT_UPDATE -update m1 set a = ((select max(a) from tmp, v1)); - drop view v1; drop temporary table tmp; drop table t1, t2, t3, m1, m2; @@ -2948,4 +2916,3 @@ eval set global storage_engine=$default; # Check that all connections opened by test cases in this file are really # gone so execution of other tests won't be affected by their presence. --source include/wait_until_count_sessions.inc - diff --git a/mysql-test/t/multi_update.test b/mysql-test/t/multi_update.test index 64e61f7c0b5..5feebe87a5a 100644 --- a/mysql-test/t/multi_update.test +++ b/mysql-test/t/multi_update.test @@ -2,12 +2,6 @@ # Test of update statement that uses many tables. # -# Requires grants, so won't work with embedded server test -source include/not_embedded.inc; -source include/have_log_bin.inc; - -CALL mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT."); - create table t1(id1 int not null auto_increment primary key, t char(12)); create table t2(id2 int not null, t char(12)); create table t3(id3 int not null, t char(12), index(id3)); @@ -376,6 +370,7 @@ connection root; revoke all privileges on mysqltest.t1 from mysqltest_1@localhost; revoke all privileges on mysqltest.* from mysqltest_1@localhost; delete from mysql.user where user=_binary'mysqltest_1'; +flush privileges; drop database mysqltest; connection default; disconnect user1; @@ -396,7 +391,6 @@ drop table t1, t2, t3; # create table t1 (col1 int); create table t2 (col1 int); --- error ER_UPDATE_TABLE_USED update t1,t2 set t1.col1 = (select max(col1) from t1) where t1.col1 = t2.col1; -- error ER_UPDATE_TABLE_USED delete t1 from t1,t2 where t1.col1 < (select max(col1) from t1) and t1.col1 = t2.col1; @@ -497,84 +491,6 @@ select * from t1 order by i1; select * from t2 order by id; drop table t1, t2; -# -# Bug#27716 multi-update did partially and has not binlogged -# - -CREATE TABLE `t1` ( - `a` int(11) NOT NULL auto_increment, - `b` int(11) default NULL, - PRIMARY KEY (`a`) -) ENGINE=MyISAM DEFAULT CHARSET=latin1 ; - -CREATE TABLE `t2` ( - `a` int(11) NOT NULL auto_increment, - `b` int(11) default NULL, - PRIMARY KEY (`a`) -) ENGINE=MyISAM DEFAULT CHARSET=latin1 ; - -# as the test is about to see erroed queries in binlog -set @sav_binlog_format= @@session.binlog_format; -set @@session.binlog_format= mixed; - - -# A. testing multi_update::send_error() effective update -insert into t1 values (1,1),(2,2); -insert into t2 values (1,1),(4,4); -reset master; ---error ER_DUP_ENTRY -UPDATE t2,t1 SET t2.a=t1.a+2; -# check -select * from t2 /* must be (3,1), (4,4) */; -source include/show_binlog_events.inc; - -# B. testing multi_update::send_error() ineffective update -# (as there is a policy described at mysql_update() still go to binlog) -delete from t1; -delete from t2; -insert into t1 values (1,2),(3,4),(4,4); -insert into t2 values (1,2),(3,4),(4,4); -reset master; ---error ER_DUP_ENTRY -UPDATE t2,t1 SET t2.a=t2.b where t2.a=t1.a; -source include/show_binlog_events.inc; - -# cleanup -drop table t1, t2; -set @@session.binlog_format= @sav_binlog_format; - -# -# Bug#29136 erred multi-delete on trans table does not rollback -# - -# prepare -CREATE TABLE t1 (a int, PRIMARY KEY (a)); -CREATE TABLE t2 (a int, PRIMARY KEY (a)); -CREATE TABLE t3 (a int, PRIMARY KEY (a)) ENGINE=MyISAM; -create trigger trg_del_t3 before delete on t3 for each row insert into t1 values (1); - -insert into t2 values (1),(2); -insert into t3 values (1),(2); -reset master; - -# exec cases B, A - see innodb.test - -# B. send_eof() and send_error() afterward - ---error ER_DUP_ENTRY -delete t3.* from t2,t3 where t2.a=t3.a; - -# check -select count(*) from t1 /* must be 1 */; -select count(*) from t3 /* must be 1 */; - -# cleanup -drop table t1, t2, t3; - -# -# Add further tests from here -# - --echo # --echo # Bug#49534: multitable IGNORE update with sql_safe_updates error --echo # causes debug assertion @@ -984,3 +900,17 @@ deallocate prepare stmt1; drop view v3,v2,v1; drop table t1,t2,t3; --echo end of 5.5 tests + +# +# MDEV-13911 Support ORDER BY and LIMIT in multi-table update +# + +create table t1 (c1 int, c3 int); +insert t1(c3) values (1), (2), (3), (4), (5), (6), (7), (8); +create table t2 select * from t1; +update t1, t2 set t1.c1=t2.c3 where t1.c3=t2.c3 order by t1.c3 limit 3; +select * from t1; +update t1 set c1=NULL; +update t1, t2 set t1.c1=t2.c3 where t1.c3=t2.c3 order by t1.c3 desc limit 2; +select * from t1; +drop table t1, t2; diff --git a/mysql-test/t/multi_update_binlog.test b/mysql-test/t/multi_update_binlog.test new file mode 100644 index 00000000000..16155aa1af3 --- /dev/null +++ b/mysql-test/t/multi_update_binlog.test @@ -0,0 +1,82 @@ +# +# Test of update statement that uses many tables. +# + +source include/have_log_bin.inc; + +CALL mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT."); + +# +# Bug#27716 multi-update did partially and has not binlogged +# + +CREATE TABLE `t1` ( + `a` int(11) NOT NULL auto_increment, + `b` int(11) default NULL, + PRIMARY KEY (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 ; + +CREATE TABLE `t2` ( + `a` int(11) NOT NULL auto_increment, + `b` int(11) default NULL, + PRIMARY KEY (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 ; + +# as the test is about to see erroed queries in binlog +set @sav_binlog_format= @@session.binlog_format; +set @@session.binlog_format= mixed; + + +# A. testing multi_update::send_error() effective update +insert into t1 values (1,1),(2,2); +insert into t2 values (1,1),(4,4); +reset master; +--error ER_DUP_ENTRY +UPDATE t2,t1 SET t2.a=t1.a+2; +# check +select * from t2 /* must be (3,1), (4,4) */; +source include/show_binlog_events.inc; + +# B. testing multi_update::send_error() ineffective update +# (as there is a policy described at mysql_update() still go to binlog) +delete from t1; +delete from t2; +insert into t1 values (1,2),(3,4),(4,4); +insert into t2 values (1,2),(3,4),(4,4); +reset master; +--error ER_DUP_ENTRY +UPDATE t2,t1 SET t2.a=t2.b where t2.a=t1.a; +source include/show_binlog_events.inc; + +# cleanup +drop table t1, t2; +set @@session.binlog_format= @sav_binlog_format; + +# +# Bug#29136 erred multi-delete on trans table does not rollback +# + +# prepare +CREATE TABLE t1 (a int, PRIMARY KEY (a)); +CREATE TABLE t2 (a int, PRIMARY KEY (a)); +CREATE TABLE t3 (a int, PRIMARY KEY (a)) ENGINE=MyISAM; +create trigger trg_del_t3 before delete on t3 for each row insert into t1 values (1); + +insert into t2 values (1),(2); +insert into t3 values (1),(2); +reset master; + +# exec cases B, A - see innodb.test + +# B. send_eof() and send_error() afterward + +--error ER_DUP_ENTRY +delete t3.* from t2,t3 where t2.a=t3.a; + +# check +select count(*) from t1 /* must be 1 */; +select count(*) from t3 /* must be 1 */; + +# cleanup +drop table t1, t2, t3; + diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index 7412eae8ecf..5e1e1494fee 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -325,8 +325,6 @@ create table t2 (a int NOT NULL, b int, primary key (a)); insert into t1 values (0, 10),(1, 11),(2, 12); insert into t2 values (1, 21),(2, 22),(3, 23); select * from t1; --- error ER_UPDATE_TABLE_USED -update t1 set b= (select b from t1); -- error ER_SUBQUERY_NO_1_ROW update t1 set b= (select b from t2); update t1 set b= (select b from t2 where t1.a = t2.a); diff --git a/mysql-test/t/update_use_source.test b/mysql-test/t/update_use_source.test new file mode 100644 index 00000000000..7ed5f95d68d --- /dev/null +++ b/mysql-test/t/update_use_source.test @@ -0,0 +1,245 @@ +--source include/have_sequence.inc +--source include/have_innodb.inc + +create table t1 (old_c1 integer, old_c2 integer,c1 integer, c2 integer, c3 integer) engine=InnoDb; +create view v1 as select * from t1 where c2=2; +delimiter /; +create trigger trg_t1 before update on t1 for each row +begin + set new.old_c1=old.c1; + set new.old_c2=old.c2; +end; +/ +delimiter ;/ + +insert into t1(c1,c2,c3) values (1,1,1); +insert into t1(c1,c2,c3) values (1,2,2); +insert into t1(c1,c2,c3) values (1,3,3); +insert into t1(c1,c2,c3) values (2,1,4); +insert into t1(c1,c2,c3) values (2,2,5); +insert into t1(c1,c2,c3) values (2,3,6); +insert into t1(c1,c2,c3) values (2,4,7); +insert into t1(c1,c2,c3) values (2,5,8); + +commit; +select * from t1; + +--echo Test without any index +--source include/update_use_source.inc + +--echo Test with an index on updated columns +create index t1_c2 on t1 (c2,c1); +--source include/update_use_source.inc + +--echo Test with an index on updated columns +create index t1_c3 on t1 (c3); +--source include/update_use_source.inc + +--echo Test with a primary key on updated columns +drop index t1_c3 on t1; +alter table t1 add primary key (c3); +--source include/update_use_source.inc + +--echo # Update with error "Subquery returns more than 1 row" +--error ER_SUBQUERY_NO_1_ROW +update t1 set c2=(select c2 from t1); + +--echo # Update with error "Subquery returns more than 1 row" and order by +--error ER_SUBQUERY_NO_1_ROW +update t1 set c2=(select c2 from t1) order by c3; + +-- echo Duplicate value on update a primary key +start transaction; +--error ER_DUP_ENTRY +update t1 set c3=0 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3; +rollback; + +-- echo Duplicate value on update a primary key with ignore +start transaction; +--enable_info ONCE +update ignore t1 set c3=0 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3; +rollback; + +-- echo Duplicate value on update a primary key and limit +start transaction; +--error ER_DUP_ENTRY +update t1 set c3=0 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3 limit 2; +rollback; + +-- echo Duplicate value on update a primary key with ignore and limit +start transaction; +--enable_info ONCE +update ignore t1 set c3=0 where exists (select 'X' from t1 a where a.c2 = t1.c2) and c2 >= 3 limit 2; +rollback; + +--echo # Update no rows found +--enable_info ONCE +update t1 + set c1=10 + where c1 <2 + and exists (select 'X' + from t1 a + where a.c1 = t1.c1 + 10); + +--echo # Update no rows changed +drop trigger trg_t1; +start transaction; +--enable_info ONCE +update t1 + set c1=c1 + where c1 <2 + and exists (select 'X' + from t1 a + where a.c1 = t1.c1); +rollback; + +--echo # +--echo # Check call of after trigger +--echo # + +delimiter /; +create or replace trigger trg_t2 after update on t1 for each row +begin + declare msg varchar(100); + if (new.c3 = 5) then + set msg=concat('in after update trigger on ',new.c3); + SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = msg; + end if; +end; +/ +delimiter ;/ +--error 1644 +update t1 set c1=2 where c3 in (select distinct a.c3 from t1 a where a.c1=t1.c1); + +--echo # +--echo # Check update with order by and after trigger +--echo # + +--error 1644 +update t1 set c1=2 where c3 in (select distinct a.c3 from t1 a where a.c1=t1.c1) order by t1.c2; + +drop view v1; +--echo # +--echo # Check update on view with check option +--echo # + +create view v1 as select * from t1 where c2=2 with check option; + +start transaction; +-- error 1369 +update v1 set c2=3 where c1=1; +rollback; + +start transaction; +-- error 1369 +update v1 set c2=(select max(c3) from v1) where c1=1; +rollback; + +start transaction; +update v1 set c2=(select min(va.c3) from v1 va), c1=0 where c1=1; +rollback; + +drop view v1; +drop table t1; + +--echo # +--echo # Test with a temporary table +--echo # + +create temporary table t1 (c1 integer, c2 integer, c3 integer) engine=InnoDb; +insert into t1(c1,c2,c3) values (1,1,1); +insert into t1(c1,c2,c3) values (1,2,2); +insert into t1(c1,c2,c3) values (1,3,3); +insert into t1(c1,c2,c3) values (2,1,4); +insert into t1(c1,c2,c3) values (2,2,5); +insert into t1(c1,c2,c3) values (2,3,6); +insert into t1(c1,c2,c3) values (2,4,7); +insert into t1(c1,c2,c3) values (2,5,8); + +start transaction; +--enable_info ONCE +update t1 + set c1=(select a.c2 + from t1 a + where a.c3 = t1.c3) limit 3; +select * from t1 ; +rollback; +drop table t1; + +--echo # +--echo # Test on dynamic columns (blob) +--echo # + +create table assets ( + item_name varchar(32) primary key, -- A common attribute for all items + dynamic_cols blob -- Dynamic columns will be stored here +); +INSERT INTO assets VALUES ('MariaDB T-shirt', COLUMN_CREATE('color', 'blue', 'size', 'XL')); +INSERT INTO assets VALUES ('Thinkpad Laptop', COLUMN_CREATE('color', 'black', 'price', 500)); +SELECT item_name, COLUMN_GET(dynamic_cols, 'color' as char) AS color FROM assets; +UPDATE assets SET dynamic_cols=COLUMN_ADD(dynamic_cols, 'warranty', '3 years') WHERE item_name='Thinkpad Laptop'; +SELECT item_name, COLUMN_GET(dynamic_cols, 'warranty' as char) AS color FROM assets; +UPDATE assets SET dynamic_cols=COLUMN_ADD(dynamic_cols, 'warranty', '4 years') + WHERE item_name in (select b.item_name + from assets b + where COLUMN_GET(b.dynamic_cols, 'color' as char) ='black'); +SELECT item_name, COLUMN_GET(dynamic_cols, 'warranty' as char) AS color FROM assets; + +UPDATE assets SET dynamic_cols=COLUMN_ADD(dynamic_cols, 'warranty', (select COLUMN_GET(b.dynamic_cols, 'color' as char) + from assets b + where assets.item_name = item_name)); +SELECT item_name, COLUMN_GET(dynamic_cols, 'warranty' as char) AS color FROM assets; +drop table assets ; + +--echo # +--echo # Test on fulltext columns +--echo # +CREATE TABLE ft2(copy TEXT,FULLTEXT(copy)) ENGINE=MyISAM; +INSERT INTO ft2(copy) VALUES + ('MySQL vs MariaDB database'), + ('Oracle vs MariaDB database'), + ('PostgreSQL vs MariaDB database'), + ('MariaDB overview'), + ('Foreign keys'), + ('Primary keys'), + ('Indexes'), + ('Transactions'), + ('Triggers'); +SELECT * FROM ft2 WHERE MATCH(copy) AGAINST('database'); +update ft2 set copy = (select max(concat('mykeyword ',substr(b.copy,1,5))) from ft2 b WHERE MATCH(b.copy) AGAINST('database')) + where MATCH(copy) AGAINST('keys'); +SELECT * FROM ft2 WHERE MATCH(copy) AGAINST('mykeyword'); +drop table ft2; + +--echo # +--echo # Test with MyISAM +--echo # + +create table t1 (old_c1 integer, old_c2 integer,c1 integer, c2 integer, c3 integer) engine=MyISAM; +insert t1 (c1,c2,c3) select 0,seq,seq%10 from seq_1_to_500; +insert t1 (c1,c2,c3) select 1,seq,seq%10 from seq_1_to_400; +insert t1 (c1,c2,c3) select 2,seq,seq%10 from seq_1_to_300; +insert t1 (c1,c2,c3) select 3,seq,seq%10 from seq_1_to_200; +create index t1_idx1 on t1(c3); +analyze table t1; + +update t1 set c1=2 where exists (select 'x' from t1); +select count(*) from t1 where c1=2; +update t1 set c1=3 where c3 in (select c3 from t1 b where t1.c3=b.c1); +select count(*) from t1 where c1=3; +drop table t1; + + +--echo # +--echo # Test error on multi_update conversion on view with order by or limit +--echo # + +create table t1 (c1 integer) engine=InnoDb; +create table t2 (c1 integer) engine=InnoDb; +create view v1 as select t1.c1 as "t1c1" ,t2.c1 as "t2c1" from t1,t2 where t1.c1=t2.c1; +--error ER_BAD_FIELD_ERROR +update v1 set t1c1=2 order by 1; +update v1 set t1c1=2 limit 1; +drop table t1; +drop table t2; +drop view v1; diff --git a/mysql-test/t/user_var.test b/mysql-test/t/user_var.test index aae12ae4cbd..54ceb8fd3a5 100644 --- a/mysql-test/t/user_var.test +++ b/mysql-test/t/user_var.test @@ -1,7 +1,5 @@ # Initialise ---disable_warnings -drop table if exists t1,t2; ---enable_warnings +source include/have_sequence.inc; --error 1054 set @a := foo; @@ -501,6 +499,13 @@ eval select $tmp < $tmp2; --enable_column_names --enable_query_log +# +# MDEV-13897 SELECT @a := MAX(col) FROM t requires full index scan +# +explain select @a:=max(seq) from seq_1_to_1000000; + +# End of 10.1 tests + --echo # --echo # Start of 10.3 tests --echo # diff --git a/mysql-test/t/view.test b/mysql-test/t/view.test index 00be48c172c..4850b6c06cb 100644 --- a/mysql-test/t/view.test +++ b/mysql-test/t/view.test @@ -865,74 +865,6 @@ create view v1 as select * from t1; create view v2 as select * from v1; create view v3 as select v2.col1 from v2,t2 where v2.col1 = t2.col1; -- error ER_VIEW_PREVENT_UPDATE -update v2 set col1 = (select max(col1) from v1); --- error ER_VIEW_PREVENT_UPDATE -update v2 set col1 = (select max(col1) from t1); --- error ER_UPDATE_TABLE_USED -update v2 set col1 = (select max(col1) from v2); --- error ER_VIEW_PREVENT_UPDATE -update v2,t2 set v2.col1 = (select max(col1) from v1) where v2.col1 = t2.col1; --- error ER_VIEW_PREVENT_UPDATE -update t1,t2 set t1.col1 = (select max(col1) from v1) where t1.col1 = t2.col1; --- error ER_UPDATE_TABLE_USED -update v1,t2 set v1.col1 = (select max(col1) from v1) where v1.col1 = t2.col1; --- error ER_VIEW_PREVENT_UPDATE -update t2,v2 set v2.col1 = (select max(col1) from v1) where v2.col1 = t2.col1; --- error ER_VIEW_PREVENT_UPDATE -update t2,t1 set t1.col1 = (select max(col1) from v1) where t1.col1 = t2.col1; --- error ER_VIEW_PREVENT_UPDATE -update t2,v1 set v1.col1 = (select max(col1) from v1) where v1.col1 = t2.col1; --- error ER_VIEW_PREVENT_UPDATE -update v2,t2 set v2.col1 = (select max(col1) from t1) where v2.col1 = t2.col1; --- error ER_UPDATE_TABLE_USED -update t1,t2 set t1.col1 = (select max(col1) from t1) where t1.col1 = t2.col1; --- error ER_VIEW_PREVENT_UPDATE -update v1,t2 set v1.col1 = (select max(col1) from t1) where v1.col1 = t2.col1; --- error ER_UPDATE_TABLE_USED -update t2,v2 set v2.col1 = (select max(col1) from t1) where v2.col1 = t2.col1; --- error ER_UPDATE_TABLE_USED -update t2,t1 set t1.col1 = (select max(col1) from t1) where t1.col1 = t2.col1; --- error ER_UPDATE_TABLE_USED -update t2,v1 set v1.col1 = (select max(col1) from t1) where v1.col1 = t2.col1; --- error ER_UPDATE_TABLE_USED -update v2,t2 set v2.col1 = (select max(col1) from v2) where v2.col1 = t2.col1; --- error ER_VIEW_PREVENT_UPDATE -update t1,t2 set t1.col1 = (select max(col1) from v2) where t1.col1 = t2.col1; --- error ER_VIEW_PREVENT_UPDATE -update v1,t2 set v1.col1 = (select max(col1) from v2) where v1.col1 = t2.col1; --- error ER_VIEW_PREVENT_UPDATE -update t2,v2 set v2.col1 = (select max(col1) from v2) where v2.col1 = t2.col1; --- error ER_VIEW_PREVENT_UPDATE -update t2,t1 set t1.col1 = (select max(col1) from v2) where t1.col1 = t2.col1; --- error ER_VIEW_PREVENT_UPDATE -update t2,v1 set v1.col1 = (select max(col1) from v2) where v1.col1 = t2.col1; --- error ER_VIEW_PREVENT_UPDATE -update v3 set v3.col1 = (select max(col1) from v1); --- error ER_VIEW_PREVENT_UPDATE -update v3 set v3.col1 = (select max(col1) from t1); --- error ER_VIEW_PREVENT_UPDATE -update v3 set v3.col1 = (select max(col1) from v2); --- error ER_UPDATE_TABLE_USED -update v3 set v3.col1 = (select max(col1) from v3); --- error ER_VIEW_PREVENT_UPDATE -delete v2 from v2,t2 where (select max(col1) from v1) > 0 and v2.col1 = t2.col1; --- error ER_VIEW_PREVENT_UPDATE -delete t1 from t1,t2 where (select max(col1) from v1) > 0 and t1.col1 = t2.col1; --- error ER_UPDATE_TABLE_USED -delete v1 from v1,t2 where (select max(col1) from v1) > 0 and v1.col1 = t2.col1; --- error ER_VIEW_PREVENT_UPDATE -delete v2 from v2,t2 where (select max(col1) from t1) > 0 and v2.col1 = t2.col1; --- error ER_UPDATE_TABLE_USED -delete t1 from t1,t2 where (select max(col1) from t1) > 0 and t1.col1 = t2.col1; --- error ER_VIEW_PREVENT_UPDATE -delete v1 from v1,t2 where (select max(col1) from t1) > 0 and v1.col1 = t2.col1; --- error ER_UPDATE_TABLE_USED -delete v2 from v2,t2 where (select max(col1) from v2) > 0 and v2.col1 = t2.col1; --- error ER_VIEW_PREVENT_UPDATE -delete t1 from t1,t2 where (select max(col1) from v2) > 0 and t1.col1 = t2.col1; --- error ER_VIEW_PREVENT_UPDATE -delete v1 from v1,t2 where (select max(col1) from v2) > 0 and v1.col1 = t2.col1; --- error ER_VIEW_PREVENT_UPDATE insert into v2 values ((select max(col1) from v1)); -- error ER_VIEW_PREVENT_UPDATE insert into t1 values ((select max(col1) from v1)); @@ -1803,8 +1735,6 @@ create view v1 as select f59, f60 from t1 where f59 in (select f59 from t1); -- error ER_NON_UPDATABLE_TABLE update v1 set f60=2345; --- error ER_VIEW_PREVENT_UPDATE -update t1 set f60=(select max(f60) from v1); drop view v1; drop table t1; diff --git a/mysys/hash.c b/mysys/hash.c index ad01afba29e..57242735d99 100644 --- a/mysys/hash.c +++ b/mysys/hash.c @@ -84,7 +84,7 @@ my_hash_init2(HASH *hash, uint growth_size, CHARSET_INFO *charset, { my_bool res; DBUG_ENTER("my_hash_init"); - DBUG_PRINT("enter",("hash: 0x%lx size: %u", (long) hash, (uint) size)); + DBUG_PRINT("enter",("hash:%p size: %u", hash, (uint) size)); hash->records=0; hash->key_offset=key_offset; @@ -144,8 +144,8 @@ static inline void my_hash_free_elements(HASH *hash) void my_hash_free(HASH *hash) { DBUG_ENTER("my_hash_free"); - DBUG_PRINT("enter",("hash: 0x%lx elements: %ld", - (long) hash, hash->records)); + DBUG_PRINT("enter",("hash:%p elements: %ld", + hash, hash->records)); my_hash_free_elements(hash); hash->free= 0; @@ -166,7 +166,7 @@ void my_hash_free(HASH *hash) void my_hash_reset(HASH *hash) { DBUG_ENTER("my_hash_reset"); - DBUG_PRINT("enter",("hash: 0x%lxd", (long) hash)); + DBUG_PRINT("enter",("hash:%p", hash)); my_hash_free_elements(hash); reset_dynamic(&hash->array); @@ -844,8 +844,8 @@ my_bool my_hash_check(HASH *hash) blength, records)) != i) { DBUG_PRINT("error", ("Record in wrong link at %d: Start %d " - "Record: 0x%lx Record-link %d", - idx, i, (long) hash_info->data, rec_link)); + "Record:%p Record-link %d", + idx, i, hash_info->data, rec_link)); error=1; } else diff --git a/mysys/lf_alloc-pin.c b/mysys/lf_alloc-pin.c index e2073df1e4d..bf2b8a12846 100644 --- a/mysys/lf_alloc-pin.c +++ b/mysys/lf_alloc-pin.c @@ -355,7 +355,7 @@ static void lf_pinbox_real_free(LF_PINS *pins) lf_dynarray_iterate(&pinbox->pinarray, (lf_dynarray_func)harvest_pins, &hv); - npins= hv.granary-addr; + npins= (int)(hv.granary-addr); /* and sort them */ if (npins) qsort(addr, npins, sizeof(void *), (qsort_cmp)ptr_cmp); diff --git a/mysys/list.c b/mysys/list.c index fb46120db04..2276ca72b48 100644 --- a/mysys/list.c +++ b/mysys/list.c @@ -27,7 +27,7 @@ LIST *list_add(LIST *root, LIST *element) { DBUG_ENTER("list_add"); - DBUG_PRINT("enter",("root: 0x%lx element: 0x%lx", (long) root, (long) element)); + DBUG_PRINT("enter",("root: %p element: %p", root, element)); if (root) { if (root->prev) /* If add in mid of list */ diff --git a/mysys/mf_iocache.c b/mysys/mf_iocache.c index 0c2c1da354c..a52ea2c5c40 100644 --- a/mysys/mf_iocache.c +++ b/mysys/mf_iocache.c @@ -154,8 +154,8 @@ int init_io_cache(IO_CACHE *info, File file, size_t cachesize, my_off_t pos; my_off_t end_of_file= ~(my_off_t) 0; DBUG_ENTER("init_io_cache"); - DBUG_PRINT("enter",("cache: 0x%lx type: %d pos: %ld", - (ulong) info, (int) type, (ulong) seek_offset)); + DBUG_PRINT("enter",("cache:%p type: %d pos: %llu", + info, (int) type, (ulonglong) seek_offset)); info->file= file; info->type= TYPE_NOT_SET; /* Don't set it until mutex are created */ @@ -437,8 +437,8 @@ my_bool reinit_io_cache(IO_CACHE *info, enum cache_type type, my_bool clear_cache) { DBUG_ENTER("reinit_io_cache"); - DBUG_PRINT("enter",("cache: 0x%lx type: %d seek_offset: %lu clear_cache: %d", - (ulong) info, type, (ulong) seek_offset, + DBUG_PRINT("enter",("cache:%p type: %d seek_offset: %llu clear_cache: %d", + info, type, (ulonglong) seek_offset, (int) clear_cache)); DBUG_ASSERT(type == READ_CACHE || type == WRITE_CACHE); @@ -865,10 +865,10 @@ void init_io_cache_share(IO_CACHE *read_cache, IO_CACHE_SHARE *cshare, IO_CACHE *write_cache, uint num_threads) { DBUG_ENTER("init_io_cache_share"); - DBUG_PRINT("io_cache_share", ("read_cache: 0x%lx share: 0x%lx " - "write_cache: 0x%lx threads: %u", - (long) read_cache, (long) cshare, - (long) write_cache, num_threads)); + DBUG_PRINT("io_cache_share", ("read_cache: %p share: %p " + "write_cache: %p threads: %u", + read_cache, cshare, + write_cache, num_threads)); DBUG_ASSERT(num_threads > 1); DBUG_ASSERT(read_cache->type == READ_CACHE); @@ -930,9 +930,9 @@ void remove_io_thread(IO_CACHE *cache) flush_io_cache(cache); mysql_mutex_lock(&cshare->mutex); - DBUG_PRINT("io_cache_share", ("%s: 0x%lx", + DBUG_PRINT("io_cache_share", ("%s: %p", (cache == cshare->source_cache) ? - "writer" : "reader", (long) cache)); + "writer" : "reader", cache)); /* Remove from share. */ total= --cshare->total_threads; @@ -1006,9 +1006,9 @@ static int lock_io_cache(IO_CACHE *cache, my_off_t pos) /* Enter the lock. */ mysql_mutex_lock(&cshare->mutex); cshare->running_threads--; - DBUG_PRINT("io_cache_share", ("%s: 0x%lx pos: %lu running: %u", + DBUG_PRINT("io_cache_share", ("%s: %p pos: %lu running: %u", (cache == cshare->source_cache) ? - "writer" : "reader", (long) cache, (ulong) pos, + "writer" : "reader", cache, (ulong) pos, cshare->running_threads)); if (cshare->source_cache) @@ -1145,10 +1145,10 @@ static void unlock_io_cache(IO_CACHE *cache) { IO_CACHE_SHARE *cshare= cache->share; DBUG_ENTER("unlock_io_cache"); - DBUG_PRINT("io_cache_share", ("%s: 0x%lx pos: %lu running: %u", + DBUG_PRINT("io_cache_share", ("%s: %p pos: %lu running: %u", (cache == cshare->source_cache) ? "writer" : "reader", - (long) cache, (ulong) cshare->pos_in_file, + cache, (ulong) cshare->pos_in_file, cshare->total_threads)); cshare->running_threads= cshare->total_threads; @@ -1899,7 +1899,7 @@ int my_b_flush_io_cache(IO_CACHE *info, int need_append_buffer_lock) size_t length; my_bool append_cache= (info->type == SEQ_READ_APPEND); DBUG_ENTER("my_b_flush_io_cache"); - DBUG_PRINT("enter", ("cache: 0x%lx", (long) info)); + DBUG_PRINT("enter", ("cache: %p", info)); if (!append_cache) need_append_buffer_lock= 0; @@ -1977,7 +1977,7 @@ int end_io_cache(IO_CACHE *info) { int error=0; DBUG_ENTER("end_io_cache"); - DBUG_PRINT("enter",("cache: 0x%lx", (ulong) info)); + DBUG_PRINT("enter",("cache: %p", info)); /* Every thread must call remove_io_thread(). The last one destroys diff --git a/mysys/mf_keycache.c b/mysys/mf_keycache.c index c7fc2fbb84f..19e1e165b12 100644 --- a/mysys/mf_keycache.c +++ b/mysys/mf_keycache.c @@ -610,11 +610,11 @@ int init_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, keycache->waiting_for_hash_link.last_thread= NULL; keycache->waiting_for_block.last_thread= NULL; DBUG_PRINT("exit", - ("disk_blocks: %d block_root: 0x%lx hash_entries: %d\ - hash_root: 0x%lx hash_links: %d hash_link_root: 0x%lx", - keycache->disk_blocks, (long) keycache->block_root, - keycache->hash_entries, (long) keycache->hash_root, - keycache->hash_links, (long) keycache->hash_link_root)); + ("disk_blocks: %d block_root: %p hash_entries: %d\ + hash_root: %p hash_links: %d hash_link_root: %p", + keycache->disk_blocks, keycache->block_root, + keycache->hash_entries, keycache->hash_root, + keycache->hash_links, keycache->hash_link_root)); } else { @@ -964,7 +964,7 @@ static void end_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, my_bool cleanup) { DBUG_ENTER("end_simple_key_cache"); - DBUG_PRINT("enter", ("key_cache: 0x%lx", (long) keycache)); + DBUG_PRINT("enter", ("key_cache: %p", keycache)); if (!keycache->key_cache_inited) DBUG_VOID_RETURN; @@ -4365,7 +4365,7 @@ int flush_simple_key_cache_blocks(SIMPLE_KEY_CACHE_CB *keycache, { int res= 0; DBUG_ENTER("flush_key_blocks"); - DBUG_PRINT("enter", ("keycache: 0x%lx", (long) keycache)); + DBUG_PRINT("enter", ("keycache: %p", keycache)); if (!keycache->key_cache_inited) DBUG_RETURN(0); @@ -4795,11 +4795,11 @@ void keycache_debug_log_close(void) static int fail_block(BLOCK_LINK *block __attribute__((unused))) { #ifndef DBUG_OFF - F_B_PRT("block->next_used: %lx\n", (ulong) block->next_used); - F_B_PRT("block->prev_used: %lx\n", (ulong) block->prev_used); - F_B_PRT("block->next_changed: %lx\n", (ulong) block->next_changed); - F_B_PRT("block->prev_changed: %lx\n", (ulong) block->prev_changed); - F_B_PRT("block->hash_link: %lx\n", (ulong) block->hash_link); + F_B_PRT("block->next_used: %p\n", block->next_used); + F_B_PRT("block->prev_used: %p\n", block->prev_used); + F_B_PRT("block->next_changed: %p\n", block->next_changed); + F_B_PRT("block->prev_changed: %p\n", block->prev_changed); + F_B_PRT("block->hash_link: %p\n", block->hash_link); F_B_PRT("block->status: %u\n", block->status); F_B_PRT("block->length: %u\n", block->length); F_B_PRT("block->offset: %u\n", block->offset); @@ -4813,9 +4813,9 @@ static int fail_block(BLOCK_LINK *block __attribute__((unused))) #ifndef DBUG_OFF static int fail_hlink(HASH_LINK *hlink __attribute__((unused))) { - F_B_PRT("hlink->next: %lx\n", (ulong) hlink->next); - F_B_PRT("hlink->prev: %lx\n", (ulong) hlink->prev); - F_B_PRT("hlink->block: %lx\n", (ulong) hlink->block); + F_B_PRT("hlink->next: %p\n", hlink->next); + F_B_PRT("hlink->prev: %p\n", hlink->prev); + F_B_PRT("hlink->block: %p\n", hlink->block); F_B_PRT("hlink->diskpos: %lu\n", (ulong) hlink->diskpos); F_B_PRT("hlink->file: %d\n", hlink->file); return 0; /* Let the assert fail. */ @@ -5193,7 +5193,7 @@ int init_partitioned_key_cache(PARTITIONED_KEY_CACHE_CB *keycache, } } - keycache->partitions= partitions= partition_ptr-keycache->partition_array; + keycache->partitions= partitions= (uint) (partition_ptr-keycache->partition_array); keycache->key_cache_mem_size= mem_per_cache * partitions; for (i= 0; i < (int) partitions; i++) keycache->partition_array[i]->hash_factor= partitions; @@ -5361,7 +5361,7 @@ void end_partitioned_key_cache(PARTITIONED_KEY_CACHE_CB *keycache, uint i; uint partitions= keycache->partitions; DBUG_ENTER("partitioned_end_key_cache"); - DBUG_PRINT("enter", ("key_cache: 0x%lx", (long) keycache)); + DBUG_PRINT("enter", ("key_cache: %p", keycache)); for (i= 0; i < partitions; i++) { @@ -5675,7 +5675,7 @@ int flush_partitioned_key_cache_blocks(PARTITIONED_KEY_CACHE_CB *keycache, int err= 0; ulonglong *dirty_part_map= (ulonglong *) file_extra; DBUG_ENTER("partitioned_flush_key_blocks"); - DBUG_PRINT("enter", ("keycache: 0x%lx", (long) keycache)); + DBUG_PRINT("enter", ("keycache: %p", keycache)); for (i= 0; i < partitions; i++) { diff --git a/mysys/my_default.c b/mysys/my_default.c index e7d661b33e5..37f6d2bfbbf 100644 --- a/mysys/my_default.c +++ b/mysys/my_default.c @@ -173,8 +173,8 @@ fn_expand(const char *filename, char *result_buf) char dir[FN_REFLEN]; const int flags= MY_UNPACK_FILENAME | MY_SAFE_PATH | MY_RELATIVE_PATH; DBUG_ENTER("fn_expand"); - DBUG_PRINT("enter", ("filename: %s, result_buf: 0x%lx", - filename, (unsigned long) result_buf)); + DBUG_PRINT("enter", ("filename: %s, result_buf: %p", + filename, result_buf)); if (my_getwd(dir, sizeof(dir), MYF(0))) DBUG_RETURN(3); DBUG_PRINT("debug", ("dir: %s", dir)); diff --git a/mysys/my_fopen.c b/mysys/my_fopen.c index c720e2c9168..7dde1e1b4fc 100644 --- a/mysys/my_fopen.c +++ b/mysys/my_fopen.c @@ -74,7 +74,7 @@ FILE *my_fopen(const char *filename, int flags, myf MyFlags) my_file_total_opened++; my_file_info[filedesc].type= STREAM_BY_FOPEN; mysql_mutex_unlock(&THR_LOCK_open); - DBUG_PRINT("exit",("stream: 0x%lx", (long) fd)); + DBUG_PRINT("exit",("stream: %p", fd)); DBUG_RETURN(fd); } else @@ -222,7 +222,7 @@ int my_fclose(FILE *fd, myf MyFlags) { int err,file; DBUG_ENTER("my_fclose"); - DBUG_PRINT("my",("stream: 0x%lx MyFlags: %lu", (long) fd, MyFlags)); + DBUG_PRINT("my",("stream: %p MyFlags: %lu", fd, MyFlags)); mysql_mutex_lock(&THR_LOCK_open); file= my_fileno(fd); @@ -292,7 +292,7 @@ FILE *my_fdopen(File Filedes, const char *name, int Flags, myf MyFlags) mysql_mutex_unlock(&THR_LOCK_open); } - DBUG_PRINT("exit",("stream: 0x%lx", (long) fd)); + DBUG_PRINT("exit",("stream: %p", fd)); DBUG_RETURN(fd); } /* my_fdopen */ diff --git a/mysys/my_fstream.c b/mysys/my_fstream.c index de752fa149f..bfcf24bfa2e 100644 --- a/mysys/my_fstream.c +++ b/mysys/my_fstream.c @@ -46,8 +46,8 @@ size_t my_fread(FILE *stream, uchar *Buffer, size_t Count, myf MyFlags) { size_t readbytes; DBUG_ENTER("my_fread"); - DBUG_PRINT("my",("stream: 0x%lx Buffer: 0x%lx Count: %u MyFlags: %lu", - (long) stream, (long) Buffer, (uint) Count, MyFlags)); + DBUG_PRINT("my",("stream: %p Buffer %p Count: %u MyFlags: %lu", + stream, Buffer, (uint) Count, MyFlags)); if ((readbytes= fread(Buffer, sizeof(char), Count, stream)) != Count) { @@ -94,8 +94,8 @@ size_t my_fwrite(FILE *stream, const uchar *Buffer, size_t Count, myf MyFlags) uint errors; #endif DBUG_ENTER("my_fwrite"); - DBUG_PRINT("my",("stream: 0x%lx Buffer: 0x%lx Count: %u MyFlags: %lu", - (long) stream, (long) Buffer, (uint) Count, MyFlags)); + DBUG_PRINT("my",("stream:%p Buffer:%p Count: %u MyFlags: %lu", + stream, Buffer, (uint) Count, MyFlags)); #if !defined(NO_BACKGROUND) && defined(USE_MY_STREAM) errors=0; @@ -163,8 +163,8 @@ my_off_t my_fseek(FILE *stream, my_off_t pos, int whence, myf MyFlags __attribute__((unused))) { DBUG_ENTER("my_fseek"); - DBUG_PRINT("my",("stream: 0x%lx pos: %lu whence: %d MyFlags: %lu", - (long) stream, (long) pos, whence, MyFlags)); + DBUG_PRINT("my",("stream:%p pos: %llu whence: %d MyFlags: %lu", + stream, (ulonglong) pos, whence, MyFlags)); DBUG_RETURN(fseek(stream, (off_t) pos, whence) ? MY_FILEPOS_ERROR : (my_off_t) ftell(stream)); } /* my_seek */ @@ -174,11 +174,11 @@ my_off_t my_fseek(FILE *stream, my_off_t pos, int whence, my_off_t my_ftell(FILE *stream, myf MyFlags __attribute__((unused))) { - off_t pos; + long long pos; DBUG_ENTER("my_ftell"); - DBUG_PRINT("my",("stream: 0x%lx MyFlags: %lu", (long) stream, MyFlags)); - pos=ftell(stream); - DBUG_PRINT("exit",("ftell: %lu",(ulong) pos)); + DBUG_PRINT("my",("stream:%p MyFlags: %lu", stream, MyFlags)); + pos=IF_WIN(_ftelli64(stream),ftell(stream)); + DBUG_PRINT("exit",("ftell: %lld",pos)); DBUG_RETURN((my_off_t) pos); } /* my_ftell */ diff --git a/mysys/my_getopt.c b/mysys/my_getopt.c index 8eff81393d4..0edca1a72fd 100644 --- a/mysys/my_getopt.c +++ b/mysys/my_getopt.c @@ -1433,7 +1433,7 @@ static uint print_name(const struct my_option *optp) for (;*s;s++) putchar(*s == '_' ? '-' : *s); - return s - optp->name; + return (uint)(s - optp->name); } /** prints option comment with indentation and wrapping. @@ -1474,7 +1474,7 @@ static uint print_comment(const char *comment, putchar(' '); } printf("%s", comment); - return curpos + (end - comment); + return curpos + (int)(end - comment); } diff --git a/mysys/my_getwd.c b/mysys/my_getwd.c index bfa28f1d372..46710e79f62 100644 --- a/mysys/my_getwd.c +++ b/mysys/my_getwd.c @@ -48,8 +48,8 @@ int my_getwd(char * buf, size_t size, myf MyFlags) { char * pos; DBUG_ENTER("my_getwd"); - DBUG_PRINT("my",("buf: 0x%lx size: %u MyFlags %lu", - (long) buf, (uint) size, MyFlags)); + DBUG_PRINT("my",("buf:%p size: %u MyFlags %lu", + buf, (uint) size, MyFlags)); if (size < 1) DBUG_RETURN(-1); diff --git a/mysys/my_init.c b/mysys/my_init.c index 19291a76b00..7db0a58d471 100644 --- a/mysys/my_init.c +++ b/mysys/my_init.c @@ -253,8 +253,6 @@ void my_parameter_handler(const wchar_t * expression, const wchar_t * function, const wchar_t * file, unsigned int line, uintptr_t pReserved) { - DBUG_PRINT("my",("Expression: %s function: %s file: %s, line: %d", - expression, function, file, line)); __debugbreak(); } diff --git a/mysys/my_lib.c b/mysys/my_lib.c index abc7b8a3161..d5f54f00b85 100644 --- a/mysys/my_lib.c +++ b/mysys/my_lib.c @@ -221,7 +221,7 @@ MY_DIR *my_dir(const char *path, myf MyFlags) long handle; #endif DBUG_ENTER("my_dir"); - DBUG_PRINT("my",("path: '%s' stat: %d MyFlags: %d",path,MyFlags)); + DBUG_PRINT("my",("path: '%s' MyFlags: %d",path,MyFlags)); /* Put LIB-CHAR as last path-character if not there */ tmp_file=tmp_path; @@ -347,8 +347,8 @@ MY_STAT *my_stat(const char *path, MY_STAT *stat_area, myf my_flags) { int m_used; DBUG_ENTER("my_stat"); - DBUG_PRINT("my", ("path: '%s' stat_area: 0x%lx MyFlags: %lu", path, - (long) stat_area, my_flags)); + DBUG_PRINT("my", ("path: '%s' stat_area: %p MyFlags: %lu", path, + stat_area, my_flags)); if ((m_used= (stat_area == NULL))) if (!(stat_area= (MY_STAT *) my_malloc(sizeof(MY_STAT), my_flags))) diff --git a/mysys/my_safehash.c b/mysys/my_safehash.c index c34f3c456cd..e50c5105875 100644 --- a/mysys/my_safehash.c +++ b/mysys/my_safehash.c @@ -166,7 +166,7 @@ uchar *safe_hash_search(SAFE_HASH *hash, const uchar *key, uint length, result= def; else result= ((SAFE_HASH_ENTRY*) result)->data; - DBUG_PRINT("exit",("data: 0x%lx", (long) result)); + DBUG_PRINT("exit",("data: %p", result)); DBUG_RETURN(result); } @@ -197,7 +197,7 @@ my_bool safe_hash_set(SAFE_HASH *hash, const uchar *key, uint length, SAFE_HASH_ENTRY *entry; my_bool error= 0; DBUG_ENTER("safe_hash_set"); - DBUG_PRINT("enter",("key: %.*s data: 0x%lx", length, key, (long) data)); + DBUG_PRINT("enter",("key: %.*s data: %p", length, key, data)); mysql_rwlock_wrlock(&hash->mutex); entry= (SAFE_HASH_ENTRY*) my_hash_search(&hash->hash, key, length); diff --git a/mysys/my_winthread.c b/mysys/my_winthread.c index 31385fad360..f3335621c38 100644 --- a/mysys/my_winthread.c +++ b/mysys/my_winthread.c @@ -121,6 +121,15 @@ int pthread_join(pthread_t thread, void **value_ptr) goto error_return; } + if (!GetExitCodeThread(handle, &ret)) + { + errno= EINVAL; + goto error_return; + } + + if (value_ptr) + *value_ptr= (void *)(size_t)ret; + CloseHandle(handle); return 0; diff --git a/mysys/thr_lock.c b/mysys/thr_lock.c index 56ac5298da7..3bc16d8c1db 100644 --- a/mysys/thr_lock.c +++ b/mysys/thr_lock.c @@ -761,9 +761,9 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_INFO *owner, ulong lock_wait_timeout) PSI_TABLE_LOCK, lock_type); mysql_mutex_lock(&lock->mutex); - DBUG_PRINT("lock",("data: 0x%lx thread: 0x%lx lock: 0x%lx type: %d", - (long) data, (ulong) data->owner->thread_id, - (long) lock, (int) lock_type)); + DBUG_PRINT("lock",("data:%p thread:%lu lock:%p type: %d", + data, (ulong) data->owner->thread_id, + lock, (int) lock_type)); check_locks(lock,(uint) lock_type <= (uint) TL_READ_NO_INSERT ? "enter read_lock" : "enter write_lock", lock_type, 0); if ((int) lock_type <= (int) TL_READ_NO_INSERT) @@ -798,7 +798,7 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_INFO *owner, ulong lock_wait_timeout) See Bug#42147 for more information. */ - DBUG_PRINT("lock",("write locked 1 by thread: 0x%lx", + DBUG_PRINT("lock",("write locked 1 by thread:%lu", (ulong) lock->write.data->owner->thread_id)); if (thr_lock_owner_equal(data->owner, lock->write.data->owner) || (lock->write.data->type <= TL_WRITE_DELAYED && @@ -943,8 +943,8 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_INFO *owner, ulong lock_wait_timeout) ! lock->read_no_write_count) || has_old_lock(lock->write.data, data->owner)) { - DBUG_PRINT("info", ("write_wait.data: 0x%lx old_type: %d", - (ulong) lock->write_wait.data, + DBUG_PRINT("info", ("write_wait.data: %p old_type: %d", + lock->write_wait.data, lock->write.data->type)); (*lock->write.last)=data; /* Add to running fifo */ @@ -957,13 +957,13 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_INFO *owner, ulong lock_wait_timeout) statistic_increment(locks_immediate,&THR_LOCK_lock); goto end; } - DBUG_PRINT("lock",("write locked 2 by thread: 0x%lx", + DBUG_PRINT("lock",("write locked 2 by thread: %lu", (ulong) lock->write.data->owner->thread_id)); } else { - DBUG_PRINT("info", ("write_wait.data: 0x%lx", - (ulong) lock->write_wait.data)); + DBUG_PRINT("info", ("write_wait.data:%p", + lock->write_wait.data)); if (!lock->write_wait.data) { /* no scheduled write locks */ my_bool concurrent_insert= 0; @@ -993,7 +993,7 @@ thr_lock(THR_LOCK_DATA *data, THR_LOCK_INFO *owner, ulong lock_wait_timeout) goto end; } } - DBUG_PRINT("lock",("write locked 3 by thread: 0x%lx type: %d", + DBUG_PRINT("lock",("write locked 3 by thread:%lu type: %d", (ulong) lock->read.data->owner->thread_id, data->type)); } @@ -1059,8 +1059,8 @@ static inline void free_all_read_locks(THR_LOCK *lock, lock->read_no_write_count++; } /* purecov: begin inspected */ - DBUG_PRINT("lock",("giving read lock to thread: 0x%lx", - (ulong) data->owner->thread_id)); + DBUG_PRINT("lock",("giving read lock to thread: %lu", + (ulong)data->owner->thread_id)); /* purecov: end */ data->cond=0; /* Mark thread free */ mysql_cond_signal(cond); @@ -1078,7 +1078,7 @@ void thr_unlock(THR_LOCK_DATA *data, uint unlock_flags) THR_LOCK *lock=data->lock; enum thr_lock_type lock_type=data->type; DBUG_ENTER("thr_unlock"); - DBUG_PRINT("lock",("data: %p thread: 0x%lx lock: %p", + DBUG_PRINT("lock",("data: %p thread: %lu lock: %p", data, (ulong) data->owner->thread_id, lock)); mysql_mutex_lock(&lock->mutex); @@ -1172,7 +1172,7 @@ static void wake_up_waiters(THR_LOCK *lock) (*lock->check_status)(data->status_param)) data->type=TL_WRITE; /* Upgrade lock */ /* purecov: begin inspected */ - DBUG_PRINT("lock",("giving write lock of type %d to thread: 0x%lx", + DBUG_PRINT("lock",("giving write lock of type %d to thread: %lu", data->type, (ulong) data->owner->thread_id)); /* purecov: end */ { @@ -1281,7 +1281,7 @@ thr_multi_lock(THR_LOCK_DATA **data, uint count, THR_LOCK_INFO *owner, { THR_LOCK_DATA **pos, **end, **first_lock; DBUG_ENTER("thr_multi_lock"); - DBUG_PRINT("lock",("data: 0x%lx count: %d", (long) data, count)); + DBUG_PRINT("lock",("data: %p count: %d", data, count)); if (count > 1) sort_locks(data,count); @@ -1302,8 +1302,8 @@ thr_multi_lock(THR_LOCK_DATA **data, uint count, THR_LOCK_INFO *owner, DBUG_RETURN(result); } #ifdef MAIN - printf("Thread: %s Got lock: 0x%lx type: %d\n",my_thread_name(), - (long) pos[0]->lock, pos[0]->type); fflush(stdout); + printf("Thread: %s Got lock:%p type: %d\n",my_thread_name(), + pos[0]->lock, pos[0]->type); fflush(stdout); #endif } DEBUG_SYNC_C("thr_multi_lock_after_thr_lock"); @@ -1402,21 +1402,21 @@ void thr_multi_unlock(THR_LOCK_DATA **data,uint count, uint unlock_flags) { THR_LOCK_DATA **pos,**end; DBUG_ENTER("thr_multi_unlock"); - DBUG_PRINT("lock",("data: 0x%lx count: %d flags: %u", (long) data, count, + DBUG_PRINT("lock",("data: %p count: %d flags: %u", data, count, unlock_flags)); for (pos=data,end=data+count; pos < end ; pos++) { #ifdef MAIN - printf("Thread: %s Rel lock: 0x%lx type: %d\n", - my_thread_name(), (long) pos[0]->lock, pos[0]->type); + printf("Thread: %s Rel lock: %p type: %d\n", + my_thread_name(), pos[0]->lock, pos[0]->type); fflush(stdout); #endif if ((*pos)->type != TL_UNLOCK) thr_unlock(*pos, unlock_flags); else { - DBUG_PRINT("lock",("Free lock: data: %p thread: 0x%lx lock: %p", + DBUG_PRINT("lock",("Free lock: data: %p thread:%lu lock: %p", *pos, (ulong) (*pos)->owner->thread_id, (*pos)->lock)); } @@ -1693,7 +1693,7 @@ void thr_print_locks(void) if ((lock->write.data || lock->read.data || lock->write_wait.data || lock->read_wait.data)) { - printf("lock: 0x%lx:",(ulong) lock); + printf("lock: %p:", lock); if ((lock->write_wait.data || lock->read_wait.data) && (! lock->read.data && ! lock->write.data)) printf(" WARNING: "); diff --git a/mysys/tree.c b/mysys/tree.c index e106dd43c00..13f9bd808a2 100644 --- a/mysys/tree.c +++ b/mysys/tree.c @@ -90,7 +90,7 @@ void init_tree(TREE *tree, size_t default_alloc_size, size_t memory_limit, myf my_flags) { DBUG_ENTER("init_tree"); - DBUG_PRINT("enter",("tree: 0x%lx size: %d", (long) tree, size)); + DBUG_PRINT("enter",("tree: %p size: %d", tree, size)); if (default_alloc_size < DEFAULT_ALLOC_SIZE) default_alloc_size= DEFAULT_ALLOC_SIZE; @@ -140,7 +140,7 @@ static int free_tree(TREE *tree, my_bool abort, myf free_flags) { int error, first_error= 0; DBUG_ENTER("free_tree"); - DBUG_PRINT("enter",("tree: 0x%lx", (long) tree)); + DBUG_PRINT("enter",("tree: %p", tree)); if (tree->root) /* If initialized */ { diff --git a/mysys/typelib.c b/mysys/typelib.c index 96842b1a3ad..e45ede2c43a 100644 --- a/mysys/typelib.c +++ b/mysys/typelib.c @@ -88,7 +88,7 @@ static int find_type_eol(const char **x, const TYPELIB *typelib, uint flags, const char *j; CHARSET_INFO *cs= &my_charset_latin1; DBUG_ENTER("find_type_eol"); - DBUG_PRINT("enter",("x: '%s' lib: 0x%lx", *x, (long) typelib)); + DBUG_PRINT("enter",("x: '%s' lib: %p", *x, typelib)); DBUG_ASSERT(!(flags & ~(FIND_TYPE_NO_PREFIX | FIND_TYPE_COMMA_TERM))); @@ -200,7 +200,7 @@ my_ulonglong find_typeset(char *x, TYPELIB *lib, int *err) int find; char *i; DBUG_ENTER("find_set"); - DBUG_PRINT("enter",("x: '%s' lib: 0x%lx", x, (long) lib)); + DBUG_PRINT("enter",("x: '%s' lib: %p", x, lib)); if (!lib->count) { @@ -399,7 +399,7 @@ my_ulonglong find_set_from_flags(const TYPELIB *lib, uint default_name, continue; err: *err_pos= (char*)start; - *err_len= end - start; + *err_len= (uint)(end - start); break; } } diff --git a/mysys/wqueue.c b/mysys/wqueue.c index d4f699fd9bc..270f1895d75 100644 --- a/mysys/wqueue.c +++ b/mysys/wqueue.c @@ -225,17 +225,17 @@ void wqueue_add_and_wait(WQUEUE *wqueue, { DBUG_ENTER("wqueue_add_and_wait"); DBUG_PRINT("enter", - ("thread: 0x%lx cond: 0x%lx mutex: 0x%lx", - (ulong) thread, (ulong) &thread->suspend, (ulong) lock)); + ("thread: %p cond: %p mutex: %p", + thread, &thread->suspend, lock)); wqueue_add_to_queue(wqueue, thread); do { - DBUG_PRINT("info", ("wait... cond: 0x%lx mutex: 0x%lx", - (ulong) &thread->suspend, (ulong) lock)); + DBUG_PRINT("info", ("wait... cond: %p mutex: %p", + &thread->suspend, lock)); mysql_cond_wait(&thread->suspend, lock); - DBUG_PRINT("info", ("wait done cond: 0x%lx mutex: 0x%lx next: 0x%lx", - (ulong) &thread->suspend, (ulong) lock, - (ulong) thread->next)); + DBUG_PRINT("info", ("wait done cond: %p mutex: %p next: %p", + &thread->suspend, lock, + thread->next)); } while (thread->next); DBUG_VOID_RETURN; diff --git a/mysys_ssl/my_md5.cc b/mysys_ssl/my_md5.cc index 582c83d0522..359bcd49ec6 100644 --- a/mysys_ssl/my_md5.cc +++ b/mysys_ssl/my_md5.cc @@ -90,7 +90,7 @@ void my_md5(uchar *digest, const char *buf, size_t len) char ctx_buf[EVP_MD_CTX_SIZE]; EVP_MD_CTX * const ctx= (EVP_MD_CTX*)ctx_buf; md5_init(ctx); - md5_input(ctx, (const uchar *)buf, len); + md5_input(ctx, (const uchar *)buf, (uint) len); md5_result(ctx, digest); } @@ -117,7 +117,7 @@ void my_md5_multi(uchar *digest, ...) md5_init(ctx); for (str= va_arg(args, const uchar*); str; str= va_arg(args, const uchar*)) - md5_input(ctx, str, va_arg(args, size_t)); + md5_input(ctx, str, (uint) va_arg(args, size_t)); md5_result(ctx, digest); va_end(args); @@ -135,7 +135,7 @@ void my_md5_init(void *context) void my_md5_input(void *context, const uchar *buf, size_t len) { - md5_input((EVP_MD_CTX *)context, buf, len); + md5_input((EVP_MD_CTX *)context, buf, (uint) len); } void my_md5_result(void *context, uchar *digest) diff --git a/mysys_ssl/my_sha.ic b/mysys_ssl/my_sha.ic index a7ec8bad593..e4433b49a0f 100644 --- a/mysys_ssl/my_sha.ic +++ b/mysys_ssl/my_sha.ic @@ -134,7 +134,7 @@ void my_sha(uchar *digest, const char *buf, size_t len) CONTEXT context; sha_init_fast(&context); - sha_input(&context, (const uchar *)buf, len); + sha_input(&context, (const uchar *)buf, (unsigned int)len); sha_result(&context, digest); } @@ -161,7 +161,7 @@ void my_sha_multi(uchar *digest, ...) sha_init_fast(&context); for (str= va_arg(args, const uchar*); str; str= va_arg(args, const uchar*)) - sha_input(&context, str, va_arg(args, size_t)); + sha_input(&context, str, (uint) va_arg(args, size_t)); sha_result(&context, digest); va_end(args); @@ -179,7 +179,7 @@ void my_sha_init(void *context) void my_sha_input(void *context, const uchar *buf, size_t len) { - sha_input((CONTEXT *)context, buf, len); + sha_input((CONTEXT *)context, buf, (uint) len); } void my_sha_result(void *context, uchar *digest) diff --git a/plugin/auth_ed25519/server_ed25519.c b/plugin/auth_ed25519/server_ed25519.c index 88760275b9b..e3f00409ae3 100644 --- a/plugin/auth_ed25519/server_ed25519.c +++ b/plugin/auth_ed25519/server_ed25519.c @@ -33,7 +33,6 @@ static int loaded= 0; static int auth(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *info) { - unsigned int i; int pkt_len; unsigned long nonce[CRYPTO_LONGS + NONCE_LONGS]; unsigned char *pkt, *reply= (unsigned char*)nonce; @@ -51,8 +50,8 @@ static int auth(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *info) info->password_used= PASSWORD_USED_YES; /* prepare random nonce */ - for (i=CRYPTO_LONGS; i < CRYPTO_LONGS + NONCE_LONGS; i++) - nonce[i]= thd_rnd(info->thd) * ~0UL; + if (my_random_bytes((unsigned char *)nonce, (int)sizeof(nonce))) + return CR_AUTH_USER_CREDENTIALS; /* send it */ if (vio->write_packet(vio, reply + CRYPTO_BYTES, NONCE_BYTES)) diff --git a/plugin/auth_pipe/auth_pipe.c b/plugin/auth_pipe/auth_pipe.c index 20c33c07e84..a803653b31a 100644 --- a/plugin/auth_pipe/auth_pipe.c +++ b/plugin/auth_pipe/auth_pipe.c @@ -36,7 +36,7 @@ static int pipe_auth(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *info) unsigned char *pkt; MYSQL_PLUGIN_VIO_INFO vio_info; char username[UNLEN + 1]; - size_t username_length; + DWORD username_length; int ret; /* no user name yet ? read the client handshake packet with the user name */ @@ -54,7 +54,7 @@ static int pipe_auth(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *info) if (!ImpersonateNamedPipeClient(vio_info.handle)) return CR_ERROR; - username_length= sizeof(username) - 1; + username_length=UNLEN; ret= CR_ERROR; if (GetUserName(username, &username_length)) { diff --git a/plugin/feedback/feedback.cc b/plugin/feedback/feedback.cc index 81a08c8bdf0..8ed6ef64b0c 100644 --- a/plugin/feedback/feedback.cc +++ b/plugin/feedback/feedback.cc @@ -112,7 +112,7 @@ static COND* make_cond(THD *thd, TABLE_LIST *tables, LEX_STRING *filter) Item_field *fld= new (thd->mem_root) Item_field(thd, &nrc, db, table, field); Item_string *pattern= new (thd->mem_root) Item_string(thd, filter->str, - filter->length, cs); + (uint) filter->length, cs); Item_string *escape= new (thd->mem_root) Item_string(thd, "\\", 1, cs); if (!fld || !pattern || !escape) diff --git a/plugin/feedback/sender_thread.cc b/plugin/feedback/sender_thread.cc index 4742d5f4920..bcd10f6713f 100644 --- a/plugin/feedback/sender_thread.cc +++ b/plugin/feedback/sender_thread.cc @@ -238,7 +238,7 @@ static void send_report(const char *when) Url *url= todo[i]; if (thd) // for nicer SHOW PROCESSLIST - thd->set_query(const_cast<char*>(url->url()), url->url_length()); + thd->set_query(const_cast<char*>(url->url()), (uint) url->url_length()); if (url->send(str.ptr(), str.length())) i++; diff --git a/plugin/feedback/utils.cc b/plugin/feedback/utils.cc index a43dad8d630..09abbb2de23 100644 --- a/plugin/feedback/utils.cc +++ b/plugin/feedback/utils.cc @@ -151,7 +151,7 @@ namespace feedback { */ #define INSERT2(NAME,LEN,VALUE) \ do { \ - table->field[0]->store(NAME, LEN, system_charset_info); \ + table->field[0]->store(NAME, (uint) LEN, system_charset_info); \ table->field[1]->store VALUE; \ if (schema_table_store_record(thd, table)) \ return 1; \ @@ -159,7 +159,7 @@ namespace feedback { #define INSERT1(NAME,VALUE) \ do { \ - table->field[0]->store(NAME, sizeof(NAME)-1, system_charset_info); \ + table->field[0]->store(NAME, (uint) sizeof(NAME)-1, system_charset_info); \ table->field[1]->store VALUE; \ if (schema_table_store_record(thd, table)) \ return 1; \ @@ -186,7 +186,7 @@ static my_bool show_plugins(THD *thd, plugin_ref plugin, void *arg) (plugin_decl(plugin)->version) & 0xff); INSERT2(name, name_len, - (version, version_len, system_charset_info)); + (version, (uint)version_len, system_charset_info)); name_len= my_snprintf(name, sizeof(name), "%s used", plugin_name(plugin)->str); @@ -358,10 +358,10 @@ int fill_linux_info(THD *thd, TABLE_LIST *tables) #ifdef HAVE_SYS_UTSNAME_H if (have_ubuf) { - INSERT1("Uname_sysname", (ubuf.sysname, strlen(ubuf.sysname), cs)); - INSERT1("Uname_release", (ubuf.release, strlen(ubuf.release), cs)); - INSERT1("Uname_version", (ubuf.version, strlen(ubuf.version), cs)); - INSERT1("Uname_machine", (ubuf.machine, strlen(ubuf.machine), cs)); + INSERT1("Uname_sysname", (ubuf.sysname, (uint) strlen(ubuf.sysname), cs)); + INSERT1("Uname_release", (ubuf.release, (uint) strlen(ubuf.release), cs)); + INSERT1("Uname_version", (ubuf.version, (uint) strlen(ubuf.version), cs)); + INSERT1("Uname_machine", (ubuf.machine, (uint) strlen(ubuf.machine), cs)); } #endif diff --git a/plugin/file_key_management/parser.cc b/plugin/file_key_management/parser.cc index 03f78422c47..13a9dfa0cb6 100644 --- a/plugin/file_key_management/parser.cc +++ b/plugin/file_key_management/parser.cc @@ -231,9 +231,9 @@ bool Parser::parse_file(std::map<uint,keyentry> *keys, const char *secret) return 0; } -void Parser::report_error(const char *reason, uint position) +void Parser::report_error(const char *reason, size_t position) { - my_printf_error(EE_READ, "%s at %s line %u, column %u", + my_printf_error(EE_READ, "%s at %s line %u, column %zu", ME_ERROR_LOG, reason, filename, line_number, position + 1); } diff --git a/plugin/file_key_management/parser.h b/plugin/file_key_management/parser.h index 627b7fd84a6..044be0f5b95 100644 --- a/plugin/file_key_management/parser.h +++ b/plugin/file_key_management/parser.h @@ -23,6 +23,7 @@ Created 09/15/2014 #include <my_crypt.h> #include <ctype.h> #include <map> +#include <stdlib.h> /* size_t */ struct keyentry { unsigned int id; @@ -43,7 +44,7 @@ class Parser unsigned char *key, unsigned char *iv); bool read_filekey(const char *filekey, char *secret); bool parse_file(std::map<unsigned int ,keyentry> *keys, const char *secret); - void report_error(const char *reason, unsigned int position); + void report_error(const char *reason, size_t position); int parse_line(char **line_ptr, keyentry *key); char* read_and_decrypt_file(const char *secret); diff --git a/plugin/query_response_time/query_response_time.cc b/plugin/query_response_time/query_response_time.cc index 10b9391d9da..5d6119d20ef 100644 --- a/plugin/query_response_time/query_response_time.cc +++ b/plugin/query_response_time/query_response_time.cc @@ -221,7 +221,7 @@ public: print_time(total, sizeof(total), TOTAL_STRING_FORMAT, this->total(i)); } fields[0]->store(time,strlen(time),system_charset_info); - fields[1]->store(this->count(i)); + fields[1]->store((longlong)this->count(i),true); fields[2]->store(total,strlen(total),system_charset_info); if (schema_table_store_record(thd, table)) { diff --git a/plugin/server_audit/server_audit.c b/plugin/server_audit/server_audit.c index 2f6c8a8afda..1035065eec0 100644 --- a/plugin/server_audit/server_audit.c +++ b/plugin/server_audit/server_audit.c @@ -20,6 +20,7 @@ #define _my_thread_var loc_thread_var #include <my_config.h> +#include <assert.h> #ifndef _WIN32 #include <syslog.h> @@ -138,7 +139,7 @@ static size_t loc_write(File Filedes, const uchar *Buffer, size_t Count) { size_t writtenbytes; #ifdef _WIN32 - writtenbytes= my_win_write(Filedes, Buffer, Count); + writtenbytes= (size_t)_write(Filedes, Buffer, (unsigned int)Count); #else writtenbytes= write(Filedes, Buffer, Count); #endif @@ -152,10 +153,29 @@ static File loc_open(const char *FileName, int Flags) /* Special flags */ { File fd; -#if defined(_WIN32) - fd= my_win_open(FileName, Flags); +#ifdef _WIN32 + HANDLE h; + /* + We could just use _open() here. but prefer to open in unix-similar way + just like my_open() does it on Windows. + This gives atomic multiprocess-safe appends, and possibility to rename + or even delete file while it is open, and CRT lacks this features. + */ + assert(Flags == (O_APPEND | O_CREAT | O_WRONLY)); + h= CreateFile(FileName, FILE_APPEND_DATA, + FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE, NULL, + OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); + if (h == INVALID_HANDLE_VALUE) + { + fd= -1; + my_osmaperr(GetLastError()); + } + else + { + fd= _open_osfhandle((intptr)h, O_WRONLY|O_BINARY); + } #else - fd = open(FileName, Flags, my_umask); + fd= open(FileName, Flags, my_umask); #endif my_errno= errno; return fd; @@ -171,7 +191,7 @@ static int loc_close(File fd) err= close(fd); } while (err == -1 && errno == EINTR); #else - err= my_win_close(fd); + err= close(fd); #endif my_errno=errno; return err; @@ -201,32 +221,9 @@ static int loc_rename(const char *from, const char *to) } -static my_off_t loc_seek(File fd, my_off_t pos, int whence) -{ - os_off_t newpos= -1; -#ifdef _WIN32 - newpos= my_win_lseek(fd, pos, whence); -#else - newpos= lseek(fd, pos, whence); -#endif - if (newpos == (os_off_t) -1) - { - my_errno= errno; - return MY_FILEPOS_ERROR; - } - - return (my_off_t) newpos; -} - - static my_off_t loc_tell(File fd) { - os_off_t pos; -#if defined (HAVE_TELL) && !defined (_WIN32) - pos= tell(fd); -#else - pos= loc_seek(fd, 0L, MY_SEEK_CUR); -#endif + os_off_t pos= IF_WIN(_telli64(fd),lseek(fd, 0, SEEK_CUR)); if (pos == (os_off_t) -1) { my_errno= errno; @@ -990,7 +987,7 @@ static int start_logging() if (output_type == OUTPUT_FILE) { char alt_path_buffer[FN_REFLEN+1+DEFAULT_FILENAME_LEN]; - MY_STAT *f_stat; + struct stat *f_stat= (struct stat *)alt_path_buffer; const char *alt_fname= file_path; while (*alt_fname == ' ') @@ -1005,7 +1002,7 @@ static int start_logging() { /* See if the directory exists with the name of file_path. */ /* Log file name should be [file_path]/server_audit.log then. */ - if ((f_stat= my_stat(file_path, (MY_STAT *)alt_path_buffer, MYF(0))) && + if (stat(file_path, (struct stat *)alt_path_buffer) == 0 && S_ISDIR(f_stat->st_mode)) { size_t p_len= strlen(file_path); @@ -1421,7 +1418,7 @@ static size_t escape_string_hide_passwords(const char *str, unsigned int len, } next_s++; } - len-= next_s - str; + len-= (uint)(next_s - str); str= next_s; continue; } diff --git a/plugin/userstat/index_stats.cc b/plugin/userstat/index_stats.cc index 236130d327f..87e6da63e38 100644 --- a/plugin/userstat/index_stats.cc +++ b/plugin/userstat/index_stats.cc @@ -35,11 +35,11 @@ static int index_stats_fill(THD *thd, TABLE_LIST *tables, COND *cond) index_name_length= (index_stats->index_name_length - schema_name_length - table_name_length - 3); - table->field[0]->store(tmp_table.db, schema_name_length, + table->field[0]->store(tmp_table.db, (uint)schema_name_length, system_charset_info); - table->field[1]->store(tmp_table.table_name, table_name_length, + table->field[1]->store(tmp_table.table_name, (uint) table_name_length, system_charset_info); - table->field[2]->store(index_name, index_name_length, system_charset_info); + table->field[2]->store(index_name, (uint) index_name_length, system_charset_info); table->field[3]->store((longlong)index_stats->rows_read, TRUE); if (schema_table_store_record(thd, table)) diff --git a/scripts/comp_sql.c b/scripts/comp_sql.c index bcc653a3b7f..6f32d6fe9a8 100644 --- a/scripts/comp_sql.c +++ b/scripts/comp_sql.c @@ -68,7 +68,7 @@ static void die(const char *fmt, ...) char *fgets_fn(char *buffer, size_t size, fgets_input_t input, int *error) { - char *line= fgets(buffer, size, (FILE*) input); + char *line= fgets(buffer, (int)size, (FILE*) input); if (error) *error= (line == NULL) ? ferror((FILE*)input) : 0; return line; diff --git a/scripts/wsrep_sst_common.sh b/scripts/wsrep_sst_common.sh index 7bfca799a77..0333a2db882 100755 --- a/scripts/wsrep_sst_common.sh +++ b/scripts/wsrep_sst_common.sh @@ -256,12 +256,12 @@ parse_cnf() # look in group+suffix if [[ -n $WSREP_SST_OPT_CONF_SUFFIX ]]; then - reval=$($MY_PRINT_DEFAULTS -c $WSREP_SST_OPT_CONF "${group}${WSREP_SST_OPT_CONF_SUFFIX}" | awk -F= '{if ($1 ~ /_/) { gsub(/_/,"-",$1); print $1"="$2 } else { print $0 }}' | grep -- "--$var=" | cut -d= -f2- | tail -1) + reval=$($MY_PRINT_DEFAULTS "${group}${WSREP_SST_OPT_CONF_SUFFIX}" | awk -F= '{if ($1 ~ /_/) { gsub(/_/,"-",$1); print $1"="$2 } else { print $0 }}' | grep -- "--$var=" | cut -d= -f2- | tail -1) fi # look in group if [[ -z $reval ]]; then - reval=$($MY_PRINT_DEFAULTS -c $WSREP_SST_OPT_CONF $group | awk -F= '{if ($1 ~ /_/) { gsub(/_/,"-",$1); print $1"="$2 } else { print $0 }}' | grep -- "--$var=" | cut -d= -f2- | tail -1) + reval=$($MY_PRINT_DEFAULTS $group | awk -F= '{if ($1 ~ /_/) { gsub(/_/,"-",$1); print $1"="$2 } else { print $0 }}' | grep -- "--$var=" | cut -d= -f2- | tail -1) fi # use default if we haven't found a value diff --git a/scripts/wsrep_sst_xtrabackup-v2.sh b/scripts/wsrep_sst_xtrabackup-v2.sh index 78a7d76da09..a413bd81b42 100644 --- a/scripts/wsrep_sst_xtrabackup-v2.sh +++ b/scripts/wsrep_sst_xtrabackup-v2.sh @@ -494,7 +494,7 @@ read_cnf() ssystag+="-" if [[ $ssyslog -ne -1 ]];then - if $MY_PRINT_DEFAULTS -c $WSREP_SST_OPT_CONF mysqld_safe | tr '_' '-' | grep -q -- "--syslog";then + if $MY_PRINT_DEFAULTS mysqld_safe | tr '_' '-' | grep -q -- "--syslog";then ssyslog=1 fi fi @@ -671,7 +671,7 @@ check_extra() local use_socket=1 if [[ $uextra -eq 1 ]];then if $MY_PRINT_DEFAULTS --mysqld | tr '_' '-' | grep -- "--thread-handling=" | grep -q 'pool-of-threads';then - local eport=$($MY_PRINT_DEFAULTS -c $WSREP_SST_OPT_CONF mysqld | tr '_' '-' | grep -- "--extra-port=" | cut -d= -f2) + local eport=$($MY_PRINT_DEFAULTS mysqld | tr '_' '-' | grep -- "--extra-port=" | cut -d= -f2) if [[ -n $eport ]];then # Xtrabackup works only locally. # Hence, setting host to 127.0.0.1 unconditionally. @@ -867,14 +867,14 @@ if [[ $ssyslog -eq 1 ]];then } INNOAPPLY="${INNOBACKUPEX_BIN} $disver $iapts --apply-log \$rebuildcmd \${DATA} 2>&1 | logger -p daemon.err -t ${ssystag}innobackupex-apply " - INNOMOVE="${INNOBACKUPEX_BIN} --defaults-file=${WSREP_SST_OPT_CONF} $disver $impts --datadir=${DATA} --move-back --force-non-empty-directories \${DATA} 2>&1 | logger -p daemon.err -t ${ssystag}innobackupex-move " - INNOBACKUP="${INNOBACKUPEX_BIN} ${WSREP_SST_OPT_CONF} $disver $iopts \$tmpopts \$INNOEXTRA --galera-info --stream=\$sfmt \$itmpdir 2> >(logger -p daemon.err -t ${ssystag}innobackupex-backup)" + INNOMOVE="${INNOBACKUPEX_BIN} ${WSREP_SST_OPT_DEFAULT} $disver $impts --datadir=${DATA} --move-back --force-non-empty-directories \${DATA} 2>&1 | logger -p daemon.err -t ${ssystag}innobackupex-move " + INNOBACKUP="${INNOBACKUPEX_BIN} ${WSREP_SST_OPT_DEFAULT} $disver $iopts \$tmpopts \$INNOEXTRA --galera-info --stream=\$sfmt \$itmpdir 2> >(logger -p daemon.err -t ${ssystag}innobackupex-backup)" fi else INNOAPPLY="${INNOBACKUPEX_BIN} $disver $iapts --apply-log \$rebuildcmd \${DATA} &>\${DATA}/innobackup.prepare.log" - INNOMOVE="${INNOBACKUPEX_BIN} --defaults-file=${WSREP_SST_OPT_CONF} --defaults-group=mysqld${WSREP_SST_OPT_CONF_SUFFIX} $disver $impts --datadir=${DATA} --move-back --force-non-empty-directories \${DATA} &>\${DATA}/innobackup.move.log" - INNOBACKUP="${INNOBACKUPEX_BIN} ${WSREP_SST_OPT_CONF} $disver $iopts \$tmpopts \$INNOEXTRA --galera-info --stream=\$sfmt \$itmpdir 2>\${DATA}/innobackup.backup.log" + INNOMOVE="${INNOBACKUPEX_BIN} ${WSREP_SST_OPT_DEFAULT} --defaults-group=mysqld${WSREP_SST_OPT_CONF_SUFFIX} $disver $impts --datadir=${DATA} --move-back --force-non-empty-directories \${DATA} &>\${DATA}/innobackup.move.log" + INNOBACKUP="${INNOBACKUPEX_BIN} ${WSREP_SST_OPT_DEFAULT} $disver $iopts \$tmpopts \$INNOEXTRA --galera-info --stream=\$sfmt \$itmpdir 2>\${DATA}/innobackup.backup.log" fi get_stream diff --git a/sql-common/client.c b/sql-common/client.c index 2792c575aef..a19d9fca8d1 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -952,7 +952,7 @@ void STDCALL mysql_free_result(MYSQL_RES *result) { DBUG_ENTER("mysql_free_result"); - DBUG_PRINT("enter",("mysql_res: 0x%lx", (long) result)); + DBUG_PRINT("enter",("mysql_res: %p", result)); if (result) { MYSQL *mysql= result->handle; @@ -1653,7 +1653,7 @@ mysql_init(MYSQL *mysql) */ mysql->reconnect= 0; - DBUG_PRINT("mysql",("mysql: 0x%lx", (long) mysql)); + DBUG_PRINT("mysql",("mysql: %p", mysql)); return mysql; } @@ -2807,11 +2807,11 @@ void mpvio_info(Vio *vio, MYSQL_PLUGIN_VIO_INFO *info) switch (vio->type) { case VIO_TYPE_TCPIP: info->protocol= MYSQL_VIO_TCP; - info->socket= vio_fd(vio); + info->socket= (int)vio_fd(vio); return; case VIO_TYPE_SOCKET: info->protocol= MYSQL_VIO_SOCKET; - info->socket= vio_fd(vio); + info->socket= (int)vio_fd(vio); return; case VIO_TYPE_SSL: { @@ -2821,7 +2821,7 @@ void mpvio_info(Vio *vio, MYSQL_PLUGIN_VIO_INFO *info) return; info->protocol= addr.sa_family == AF_UNIX ? MYSQL_VIO_SOCKET : MYSQL_VIO_TCP; - info->socket= vio_fd(vio); + info->socket= (int)vio_fd(vio); return; } #ifdef _WIN32 @@ -3379,7 +3379,7 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, } DBUG_PRINT("info", ("End of connect attempts, sock: %d status: %d error: %d", - sock, status, saved_error)); + (int)sock, status, saved_error)); freeaddrinfo(res_lst); @@ -3558,11 +3558,11 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, scramble_data_len= pkt_scramble_len; scramble_plugin= scramble_data + scramble_data_len; if (scramble_data + scramble_data_len > pkt_end) - scramble_data_len= pkt_end - scramble_data; + scramble_data_len= (int)(pkt_end - scramble_data); } else { - scramble_data_len= pkt_end - scramble_data; + scramble_data_len= (int)(pkt_end - scramble_data); scramble_plugin= native_password_plugin_name; } } @@ -3633,7 +3633,7 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, } #endif - DBUG_PRINT("exit", ("Mysql handler: 0x%lx", (long) mysql)); + DBUG_PRINT("exit", ("Mysql handler: %p",mysql)); DBUG_RETURN(mysql); error: @@ -3955,7 +3955,7 @@ void STDCALL mysql_close_slow_part(MYSQL *mysql) void STDCALL mysql_close(MYSQL *mysql) { DBUG_ENTER("mysql_close"); - DBUG_PRINT("enter", ("mysql: 0x%lx", (long) mysql)); + DBUG_PRINT("enter", ("mysql: %p", mysql)); if (mysql) /* Some simple safety */ { @@ -4066,7 +4066,7 @@ int STDCALL mysql_real_query(MYSQL *mysql, const char *query, ulong length) { DBUG_ENTER("mysql_real_query"); - DBUG_PRINT("enter",("handle: 0x%lx", (long) mysql)); + DBUG_PRINT("enter",("handle: %p", mysql)); DBUG_PRINT("query",("Query = '%-.4096s'",query)); if (mysql_send_query(mysql,query,length)) diff --git a/sql-common/my_time.c b/sql-common/my_time.c index 4f415d83573..c8d8453b62f 100644 --- a/sql-common/my_time.c +++ b/sql-common/my_time.c @@ -203,7 +203,7 @@ static uint skip_digits(const char **str, const char *end) while (s < end && my_isdigit(&my_charset_latin1, *s)) s++; *str= s; - return s - start; + return (uint)(s - start); } @@ -237,7 +237,7 @@ static void get_microseconds(ulong *val, MYSQL_TIME_STATUS *status, uint tmp= 0; /* For the case '10:10:10.' */ if (get_digits(&tmp, number_of_fields, str, end, 6)) status->warnings|= MYSQL_TIME_WARN_TRUNCATED; - if ((status->precision= (*str - start)) < 6) + if ((status->precision= (uint)(*str - start)) < 6) *val= (ulong) (tmp * log_10_int[6 - (*str - start)]); else *val= tmp; @@ -359,7 +359,7 @@ str_to_datetime(const char *str, uint length, MYSQL_TIME *l_time, const char *start= str; if (get_number(&l_time->year, &number_of_fields, &str, end)) status->warnings|= MYSQL_TIME_WARN_TRUNCATED; - year_length= str - start; + year_length= (uint)(str - start); if (!status->warnings && (get_punct(&str, end) diff --git a/sql-common/mysql_async.c b/sql-common/mysql_async.c index decf48e0e69..1bac16edd1e 100644 --- a/sql-common/mysql_async.c +++ b/sql-common/mysql_async.c @@ -128,7 +128,7 @@ my_connect_async(struct mysql_async_context *b, my_socket fd, #endif ssize_t -my_recv_async(struct mysql_async_context *b, int fd, +my_recv_async(struct mysql_async_context *b, my_socket fd, unsigned char *buf, size_t size, int timeout) { ssize_t res; @@ -156,7 +156,7 @@ my_recv_async(struct mysql_async_context *b, int fd, ssize_t -my_send_async(struct mysql_async_context *b, int fd, +my_send_async(struct mysql_async_context *b, my_socket fd, const unsigned char *buf, size_t size, int timeout) { ssize_t res; diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index 329682fb8b4..8a03692598d 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -247,6 +247,7 @@ IF(MSVC AND NOT WITHOUT_DYNAMIC_PLUGINS) SET(CMAKE_CONFIGURABLE_FILE_CONTENT " IF ((mysqld_lib.def IS_NEWER_THAN mysqld_lib.lib) OR (mysqld_lib.def IS_NEWER_THAN mysqld_lib.exp)) + FILE(REMOVE mysqld_lib.lib mysqld_lib.exp) SET(ENV{VS_UNICODE_OUTPUT}) EXECUTE_PROCESS ( COMMAND \"${CMAKE_LINKER}\" /lib /NAME:mysqld.exe \"/DEF:${MYSQLD_DEF}\" /MACHINE:${_PLATFORM} diff --git a/sql/create_options.cc b/sql/create_options.cc index ca3ace5a5b9..d010b73c222 100644 --- a/sql/create_options.cc +++ b/sql/create_options.cc @@ -125,8 +125,8 @@ static bool set_one_value(ha_create_table_option *opt, MEM_ROOT *root) { DBUG_ENTER("set_one_value"); - DBUG_PRINT("enter", ("opt: 0x%lx type: %u name '%s' value: '%s'", - (ulong) opt, + DBUG_PRINT("enter", ("opt: %p type: %u name '%s' value: '%s'", + opt, opt->type, opt->name, (value->str ? value->str : "<DEFAULT>"))); switch (opt->type) diff --git a/sql/debug_sync.cc b/sql/debug_sync.cc index 99d39e31994..d44b313ec24 100644 --- a/sql/debug_sync.cc +++ b/sql/debug_sync.cc @@ -549,7 +549,7 @@ static void debug_sync_reset(THD *thd) static void debug_sync_remove_action(st_debug_sync_control *ds_control, st_debug_sync_action *action) { - uint dsp_idx= action - ds_control->ds_action; + uint dsp_idx= (uint)(action - ds_control->ds_action); DBUG_ENTER("debug_sync_remove_action"); DBUG_ASSERT(ds_control); DBUG_ASSERT(ds_control == current_thd->debug_sync_control); @@ -681,8 +681,8 @@ static st_debug_sync_action *debug_sync_get_action(THD *thd, } DBUG_ASSERT(action >= ds_control->ds_action); DBUG_ASSERT(action < ds_control->ds_action + ds_control->ds_active); - DBUG_PRINT("debug_sync", ("action: 0x%lx array: 0x%lx count: %u", - (long) action, (long) ds_control->ds_action, + DBUG_PRINT("debug_sync", ("action: %p array: %p count: %u", + action, ds_control->ds_action, ds_control->ds_active)); DBUG_RETURN(action); @@ -871,7 +871,7 @@ static char *debug_sync_token(char **token_p, uint *token_length_p, ptr, ptrend, MY_SEQ_NONSPACES); /* Get token length. */ - *token_length_p= ptr - *token_p; + *token_length_p= (uint)(ptr - *token_p); /* If necessary, terminate token. */ if (*ptr) diff --git a/sql/discover.cc b/sql/discover.cc index 8785845d6e5..a683166fb7f 100644 --- a/sql/discover.cc +++ b/sql/discover.cc @@ -229,7 +229,7 @@ int extension_based_table_discovery(MY_DIR *dirp, const char *ext_meta, cur++; } advance(from, to, cur, skip); - dirp->number_of_files= to - dirp->dir_entry; + dirp->number_of_files= (uint)(to - dirp->dir_entry); return 0; } diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc index 02ac5633d65..69d17725bea 100644 --- a/sql/event_data_objects.cc +++ b/sql/event_data_objects.cc @@ -561,7 +561,7 @@ Event_queue_element::load_from_row(THD *thd, TABLE *table) } if ((ptr= get_field(&mem_root, table->field[ET_FIELD_ORIGINATOR])) == NullS) DBUG_RETURN(TRUE); - originator = table->field[ET_FIELD_ORIGINATOR]->val_int(); + originator = (uint32) table->field[ET_FIELD_ORIGINATOR]->val_int(); /* ToDo : Andrey . Find a way not to allocate ptr on event_mem_root */ if ((ptr= get_field(&mem_root, @@ -911,9 +911,9 @@ Event_queue_element::compute_next_execution_time() { my_time_t time_now; DBUG_ENTER("Event_queue_element::compute_next_execution_time"); - DBUG_PRINT("enter", ("starts: %lu ends: %lu last_executed: %lu this: 0x%lx", + DBUG_PRINT("enter", ("starts: %lu ends: %lu last_executed: %lu this: %p", (long) starts, (long) ends, (long) last_executed, - (long) this)); + this)); if (status != Event_parse_data::ENABLED) { diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc index a14d3c1eef4..9d899cb637e 100644 --- a/sql/event_db_repository.cc +++ b/sql/event_db_repository.cc @@ -1063,7 +1063,7 @@ Event_db_repository::load_named_event(THD *thd, const LEX_CSTRING *dbname, TABLE_LIST event_table; DBUG_ENTER("Event_db_repository::load_named_event"); - DBUG_PRINT("enter",("thd: 0x%lx name: %*s", (long) thd, + DBUG_PRINT("enter",("thd: %p name: %*s", thd, (int) name->length, name->str)); event_table.init_one_table("mysql", 5, "event", 5, "event", TL_READ); @@ -1186,7 +1186,7 @@ Event_db_repository::check_system_tables(THD *thd) const unsigned int event_priv_column_position= 29; DBUG_ENTER("Event_db_repository::check_system_tables"); - DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd)); + DBUG_PRINT("enter", ("thd: %p", thd)); /* Check mysql.db */ tables.init_one_table("mysql", 5, "db", 2, "db", TL_READ); diff --git a/sql/event_parse_data.cc b/sql/event_parse_data.cc index c598c8a15ee..b2ff80626db 100644 --- a/sql/event_parse_data.cc +++ b/sql/event_parse_data.cc @@ -494,9 +494,9 @@ Event_parse_data::check_parse_data(THD *thd) { bool ret; DBUG_ENTER("Event_parse_data::check_parse_data"); - DBUG_PRINT("info", ("execute_at: 0x%lx expr=0x%lx starts=0x%lx ends=0x%lx", - (long) item_execute_at, (long) item_expression, - (long) item_starts, (long) item_ends)); + DBUG_PRINT("info", ("execute_at: %p expr=%p starts=%p ends=%p", + item_execute_at, item_expression, + item_starts, item_ends)); init_name(thd, identifier); @@ -529,9 +529,9 @@ Event_parse_data::init_definer(THD *thd) size_t definer_user_len= thd->lex->definer->user.length; size_t definer_host_len= thd->lex->definer->host.length; char *tmp; - DBUG_PRINT("info",("init definer_user thd->mem_root: 0x%lx " - "definer_user: 0x%lx", (long) thd->mem_root, - (long) definer_user)); + DBUG_PRINT("info",("init definer_user thd->mem_root: %p " + "definer_user: %p", thd->mem_root, + definer_user)); /* + 1 for @ */ DBUG_PRINT("info",("init definer as whole")); diff --git a/sql/event_queue.cc b/sql/event_queue.cc index a8ddff81110..e46326afe18 100644 --- a/sql/event_queue.cc +++ b/sql/event_queue.cc @@ -135,7 +135,7 @@ bool Event_queue::init_queue(THD *thd) { DBUG_ENTER("Event_queue::init_queue"); - DBUG_PRINT("enter", ("this: 0x%lx", (long) this)); + DBUG_PRINT("enter", ("this: %p", this)); LOCK_QUEUE_DATA(); @@ -201,7 +201,7 @@ Event_queue::create_event(THD *thd, Event_queue_element *new_element, bool *created) { DBUG_ENTER("Event_queue::create_event"); - DBUG_PRINT("enter", ("thd: 0x%lx et=%s.%s", (long) thd, + DBUG_PRINT("enter", ("thd: %p et=%s.%s", thd, new_element->dbname.str, new_element->name.str)); /* Will do nothing if the event is disabled */ @@ -213,7 +213,7 @@ Event_queue::create_event(THD *thd, Event_queue_element *new_element, DBUG_RETURN(FALSE); } - DBUG_PRINT("info", ("new event in the queue: 0x%lx", (long) new_element)); + DBUG_PRINT("info", ("new event in the queue: %p", new_element)); LOCK_QUEUE_DATA(); *created= (queue_insert_safe(&queue, (uchar *) new_element) == FALSE); @@ -266,7 +266,7 @@ Event_queue::update_event(THD *thd, const LEX_CSTRING *dbname, /* If not disabled event */ if (new_element) { - DBUG_PRINT("info", ("new event in the queue: 0x%lx", (long) new_element)); + DBUG_PRINT("info", ("new event in the queue: %p", new_element)); queue_insert_safe(&queue, (uchar *) new_element); mysql_cond_broadcast(&COND_queue_state); } @@ -549,7 +549,7 @@ Event_queue::dbug_dump_queue(my_time_t when) i++) { et= ((Event_queue_element*)queue_element(&queue, i)); - DBUG_PRINT("info", ("et: 0x%lx name: %s.%s", (long) et, + DBUG_PRINT("info", ("et: %p name: %s.%s", et, et->dbname.str, et->name.str)); DBUG_PRINT("info", ("exec_at: %lu starts: %lu ends: %lu execs_so_far: %u " "expr: %ld et.exec_at: %ld now: %ld " @@ -677,8 +677,8 @@ Event_queue::get_top_for_execution_if_time(THD *thd, end: UNLOCK_QUEUE_DATA(); - DBUG_PRINT("info", ("returning %d et_new: 0x%lx ", - ret, (long) *event_name)); + DBUG_PRINT("info", ("returning %d et_new: %p ", + ret, *event_name)); if (*event_name) { diff --git a/sql/event_scheduler.cc b/sql/event_scheduler.cc index e8b586801d6..d9db7afe1ab 100644 --- a/sql/event_scheduler.cc +++ b/sql/event_scheduler.cc @@ -295,7 +295,7 @@ Event_worker_thread::run(THD *thd, Event_queue_element_for_exec *event) res= post_init_event_thread(thd); DBUG_ENTER("Event_worker_thread::run"); - DBUG_PRINT("info", ("Time is %ld, THD: 0x%lx", (long) my_time(0), (long) thd)); + DBUG_PRINT("info", ("Time is %u, THD: %p", (uint)my_time(0), thd)); inc_thread_running(); if (res) @@ -420,7 +420,7 @@ Event_scheduler::start(int *err_no) scheduler_thd= new_thd; DBUG_PRINT("info", ("Setting state go RUNNING")); state= RUNNING; - DBUG_PRINT("info", ("Forking new thread for scheduler. THD: 0x%lx", (long) new_thd)); + DBUG_PRINT("info", ("Forking new thread for scheduler. THD: %p", new_thd)); if ((*err_no= mysql_thread_create(key_thread_event_scheduler, &th, &connection_attrib, event_scheduler_thread, @@ -485,7 +485,7 @@ Event_scheduler::run(THD *thd) } DBUG_PRINT("info", ("get_top_for_execution_if_time returned " - "event_name=0x%lx", (long) event_name)); + "event_name=%p", event_name)); if (event_name) { if ((res= execute_top(event_name))) @@ -566,7 +566,7 @@ Event_scheduler::execute_top(Event_queue_element_for_exec *event_name) started_events++; executed_events++; // For SHOW STATUS - DBUG_PRINT("info", ("Event is in THD: 0x%lx", (long) new_thd)); + DBUG_PRINT("info", ("Event is in THD: %p", new_thd)); DBUG_RETURN(FALSE); error: @@ -617,7 +617,7 @@ Event_scheduler::stop() { THD *thd= current_thd; DBUG_ENTER("Event_scheduler::stop"); - DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd)); + DBUG_PRINT("enter", ("thd: %p", thd)); LOCK_DATA(); DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state].str)); diff --git a/sql/events.cc b/sql/events.cc index 9ecc55fbdf0..069fa97aa36 100644 --- a/sql/events.cc +++ b/sql/events.cc @@ -1121,7 +1121,7 @@ Events::load_events_from_db(THD *thd) uint count= 0; ulong saved_master_access; DBUG_ENTER("Events::load_events_from_db"); - DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd)); + DBUG_PRINT("enter", ("thd: %p", thd)); /* NOTE: even if we run in read-only mode, we should be able to lock the diff --git a/sql/field.cc b/sql/field.cc index 363c3bbf73b..e10c82347d5 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -5404,7 +5404,7 @@ int Field_temporal_with_date::store(double nr) ErrConvDouble str(nr); longlong tmp= double_to_datetime(nr, <ime, - sql_mode_for_dates(thd), &error); + (uint) sql_mode_for_dates(thd), &error); return store_TIME_with_warning(<ime, &str, error, tmp != -1); } @@ -7983,7 +7983,7 @@ int Field_blob::store(const char *from,uint length,CHARSET_INFO *cs) DBUG_ASSERT(length <= max_data_length()); new_length= length; - copy_length= table->in_use->variables.group_concat_max_len; + copy_length= (uint)MY_MIN(UINT_MAX,table->in_use->variables.group_concat_max_len); if (new_length > copy_length) { new_length= Well_formed_prefix(cs, @@ -8394,8 +8394,8 @@ const uchar *Field_blob::unpack(uchar *to, const uchar *from, const uchar *from_end, uint param_data) { DBUG_ENTER("Field_blob::unpack"); - DBUG_PRINT("enter", ("to: 0x%lx; from: 0x%lx; param_data: %u", - (ulong) to, (ulong) from, param_data)); + DBUG_PRINT("enter", ("to: %p; from: %p; param_data: %u", + to, from, param_data)); uint const master_packlength= param_data > 0 ? param_data & 0xFF : packlength; if (from + master_packlength > from_end) @@ -8584,7 +8584,7 @@ uint gis_field_options_read(const uchar *buf, uint buf_len, } end_of_record: - return cbuf - buf; + return (uint)(cbuf - buf); } @@ -9315,8 +9315,8 @@ Field_bit::do_last_null_byte() const bits. On systems with CHAR_BIT > 8 (not very common), the storage will lose the extra bits. */ - DBUG_PRINT("test", ("bit_ofs: %d, bit_len: %d bit_ptr: 0x%lx", - bit_ofs, bit_len, (long) bit_ptr)); + DBUG_PRINT("test", ("bit_ofs: %d, bit_len: %d bit_ptr: %p", + bit_ofs, bit_len, bit_ptr)); uchar *result; if (bit_len == 0) result= null_ptr; @@ -10000,7 +10000,7 @@ void Column_definition::create_length_to_internal_length_bit() } else { - pack_length= length / 8; + pack_length= (uint) length / 8; /* We need one extra byte to store the bits we save among the null bits */ key_length= pack_length + MY_TEST(length & 7); } @@ -10010,7 +10010,7 @@ void Column_definition::create_length_to_internal_length_bit() void Column_definition::create_length_to_internal_length_newdecimal() { key_length= pack_length= - my_decimal_get_binary_size(my_decimal_length_to_precision(length, + my_decimal_get_binary_size(my_decimal_length_to_precision((uint) length, decimals, flags & UNSIGNED_FLAG), @@ -10117,9 +10117,9 @@ bool Column_definition::fix_attributes_decimal() my_error(ER_M_BIGGER_THAN_D, MYF(0), field_name.str); return true; } - length= my_decimal_precision_to_length(length, decimals, + length= my_decimal_precision_to_length((uint) length, decimals, flags & UNSIGNED_FLAG); - pack_length= my_decimal_get_binary_size(length, decimals); + pack_length= my_decimal_get_binary_size((uint) length, decimals); return false; } @@ -10128,7 +10128,7 @@ bool Column_definition::fix_attributes_bit() { if (!length) length= 1; - pack_length= (length + 7) / 8; + pack_length= ((uint) length + 7) / 8; return check_length(ER_TOO_BIG_DISPLAYWIDTH, MAX_BIT_FIELD_LENGTH); } @@ -10226,7 +10226,7 @@ bool Column_definition::check(THD *thd) DBUG_RETURN(true); /* Remember the value of length */ - char_length= length; + char_length= (uint)length; /* Set NO_DEFAULT_VALUE_FLAG if this field doesn't have a default value and @@ -10651,7 +10651,7 @@ Column_definition::Column_definition(THD *thd, Field *old_field, interval_list.empty(); // prepare_interval_field() needs this - char_length= length; + char_length= (uint)length; /* Copy the default (constant/function) from the column object orig_field, if @@ -10994,7 +10994,7 @@ bool Field::save_in_field_default_value(bool view_error_processing) { my_message(ER_CANT_CREATE_GEOMETRY_OBJECT, ER_THD(thd, ER_CANT_CREATE_GEOMETRY_OBJECT), MYF(0)); - return -1; + return true; } if (view_error_processing) @@ -11013,13 +11013,13 @@ bool Field::save_in_field_default_value(bool view_error_processing) ER_THD(thd, ER_NO_DEFAULT_FOR_FIELD), field_name.str); } - return 1; + return true; } set_default(); return !is_null() && validate_value_in_record_with_warn(thd, table->record[0]) && - thd->is_error() ? -1 : 0; + thd->is_error(); } diff --git a/sql/field.h b/sql/field.h index 061232ea1ef..bf115b38816 100644 --- a/sql/field.h +++ b/sql/field.h @@ -4071,15 +4071,16 @@ public: } void create_length_to_internal_length_simple() { - key_length= pack_length= type_handler()->calc_pack_length(length); + key_length= pack_length= type_handler()->calc_pack_length((uint32) length); } void create_length_to_internal_length_string() { length*= charset->mbmaxlen; if (real_field_type() == MYSQL_TYPE_VARCHAR && compression_method()) length++; - key_length= length; - pack_length= type_handler()->calc_pack_length(length); + DBUG_ASSERT(length <= UINT_MAX32); + key_length= (uint) length; + pack_length= type_handler()->calc_pack_length((uint32) length); } void create_length_to_internal_length_typelib() { diff --git a/sql/filesort.cc b/sql/filesort.cc index 04431a2bccc..d2e167ce7bf 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -140,7 +140,8 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, table_map first_table_bit) { int error; - size_t memory_available= thd->variables.sortbuff_size; + DBUG_ASSERT(thd->variables.sortbuff_size <= SIZE_T_MAX); + size_t memory_available= (size_t)thd->variables.sortbuff_size; uint maxbuffer; BUFFPEK *buffpek; ha_rows num_rows= HA_POS_ERROR; @@ -1805,7 +1806,7 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file, if (flag == 0) { if (my_b_write(to_file, (uchar*) buffpek->key, - (rec_length*buffpek->mem_count))) + (size_t)(rec_length*buffpek->mem_count))) { error= 1; goto err; /* purecov: inspected */ } diff --git a/sql/gen_lex_token.cc b/sql/gen_lex_token.cc index 91b91f49e37..ebd966d9301 100644 --- a/sql/gen_lex_token.cc +++ b/sql/gen_lex_token.cc @@ -77,7 +77,7 @@ void set_token(int tok, const char *str) } compiled_token_array[tok].m_token_string= str; - compiled_token_array[tok].m_token_length= strlen(str); + compiled_token_array[tok].m_token_length= (int)strlen(str); compiled_token_array[tok].m_append_space= true; compiled_token_array[tok].m_start_expr= false; } diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 810df2f0ac1..effabc3a3c0 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -2506,7 +2506,7 @@ register_query_cache_dependant_tables(THD *thd, part= i * num_subparts + j; /* we store the end \0 as part of the key */ end= strmov(engine_pos, sub_elem->partition_name); - length= end - engine_key; + length= (uint)(end - engine_key); /* Copy the suffix also to query cache key */ memcpy(query_cache_key_end, engine_key_end, (end - engine_key_end)); if (reg_query_cache_dependant_table(thd, engine_key, length, @@ -2522,7 +2522,7 @@ register_query_cache_dependant_tables(THD *thd, else { char *end= engine_pos+1; // copy end \0 - uint length= end - engine_key; + uint length= (uint)(end - engine_key); /* Copy the suffix also to query cache key */ memcpy(query_cache_key_end, engine_key_end, (end - engine_key_end)); if (reg_query_cache_dependant_table(thd, engine_key, length, @@ -4802,8 +4802,8 @@ int ha_partition::rnd_init(bool scan) } /* Now we see what the index of our first important partition is */ - DBUG_PRINT("info", ("m_part_info->read_partitions: 0x%lx", - (long) m_part_info->read_partitions.bitmap)); + DBUG_PRINT("info", ("m_part_info->read_partitions: %p", + m_part_info->read_partitions.bitmap)); part_id= bitmap_get_first_set(&(m_part_info->read_partitions)); DBUG_PRINT("info", ("m_part_spec.start_part %d", part_id)); @@ -6737,7 +6737,7 @@ int ha_partition::info(uint flag) /* Get variables if not already done */ if (!(flag & HA_STATUS_VARIABLE) || !bitmap_is_set(&(m_part_info->read_partitions), - (file_array - m_file))) + (uint)(file_array - m_file))) file->info(HA_STATUS_VARIABLE | no_lock_flag | extra_var_flag); if (file->stats.records > max_records) { @@ -7704,7 +7704,7 @@ ha_rows ha_partition::estimate_rows_upper_bound() do { - if (bitmap_is_set(&(m_part_info->read_partitions), (file - m_file))) + if (bitmap_is_set(&(m_part_info->read_partitions), (uint)(file - m_file))) { rows= (*file)->estimate_rows_upper_bound(); if (rows == HA_POS_ERROR) diff --git a/sql/handler.cc b/sql/handler.cc index bcb68ca0695..7609ed047a0 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -259,7 +259,7 @@ handler *get_new_handler(TABLE_SHARE *share, MEM_ROOT *alloc, { handler *file; DBUG_ENTER("get_new_handler"); - DBUG_PRINT("enter", ("alloc: 0x%lx", (long) alloc)); + DBUG_PRINT("enter", ("alloc: %p", alloc)); if (db_type && db_type->state == SHOW_OPTION_YES && db_type->create) { @@ -1376,7 +1376,7 @@ int ha_commit_trans(THD *thd, bool all) uint rw_ha_count= ha_check_and_coalesce_trx_read_only(thd, ha_info, all); /* rw_trans is TRUE when we in a transaction changing data */ bool rw_trans= is_real_trans && - (rw_ha_count > !thd->is_current_stmt_binlog_disabled()); + (rw_ha_count > (thd->is_current_stmt_binlog_disabled()?0U:1U)); MDL_request mdl_request; DBUG_PRINT("info", ("is_real_trans: %d rw_trans: %d rw_ha_count: %d", is_real_trans, rw_trans, rw_ha_count)); @@ -3209,8 +3209,8 @@ void handler::column_bitmaps_signal() { DBUG_ENTER("column_bitmaps_signal"); if (table) - DBUG_PRINT("info", ("read_set: 0x%lx write_set: 0x%lx", - (long) table->read_set, (long) table->write_set)); + DBUG_PRINT("info", ("read_set: %p write_set: %p", + table->read_set, table->write_set)); DBUG_VOID_RETURN; } @@ -5586,9 +5586,9 @@ TYPELIB *ha_known_exts(void) } -static bool stat_print(THD *thd, const char *type, uint type_len, - const char *file, uint file_len, - const char *status, uint status_len) +static bool stat_print(THD *thd, const char *type, size_t type_len, + const char *file, size_t file_len, + const char *status, size_t status_len) { Protocol *protocol= thd->protocol; protocol->prepare_for_resend(); @@ -5770,9 +5770,9 @@ bool handler::check_table_binlog_row_based_internal(bool binlog_row) static int write_locked_table_maps(THD *thd) { DBUG_ENTER("write_locked_table_maps"); - DBUG_PRINT("enter", ("thd: 0x%lx thd->lock: 0x%lx " - "thd->extra_lock: 0x%lx", - (long) thd, (long) thd->lock, (long) thd->extra_lock)); + DBUG_PRINT("enter", ("thd:%p thd->lock:%p " + "thd->extra_lock: %p", + thd, thd->lock, thd->extra_lock)); DBUG_PRINT("debug", ("get_binlog_table_maps(): %d", thd->get_binlog_table_maps())); diff --git a/sql/handler.h b/sql/handler.h index 5e0adfca072..f681040db39 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -789,9 +789,9 @@ struct TABLE_SHARE; struct HA_CREATE_INFO; struct st_foreign_key_info; typedef struct st_foreign_key_info FOREIGN_KEY_INFO; -typedef bool (stat_print_fn)(THD *thd, const char *type, uint type_len, - const char *file, uint file_len, - const char *status, uint status_len); +typedef bool (stat_print_fn)(THD *thd, const char *type, size_t type_len, + const char *file, size_t file_len, + const char *status, size_t status_len); enum ha_stat_type { HA_ENGINE_STATUS, HA_ENGINE_LOGS, HA_ENGINE_MUTEX }; extern st_plugin_int *hton2plugin[MAX_HA]; diff --git a/sql/item.cc b/sql/item.cc index 54426c761c8..ad17cd5811c 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -1069,6 +1069,17 @@ bool Item::check_type_can_return_int(const char *opname) const } +bool Item::check_type_can_return_decimal(const char *opname) const +{ + const Type_handler *handler= type_handler(); + if (handler->can_return_decimal()) + return false; + my_error(ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION, MYF(0), + handler->name().ptr(), opname); + return true; +} + + bool Item::check_type_can_return_real(const char *opname) const { const Type_handler *handler= type_handler(); @@ -1080,6 +1091,50 @@ bool Item::check_type_can_return_real(const char *opname) const } +bool Item::check_type_can_return_date(const char *opname) const +{ + const Type_handler *handler= type_handler(); + if (handler->can_return_date()) + return false; + my_error(ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION, MYF(0), + handler->name().ptr(), opname); + return true; +} + + +bool Item::check_type_can_return_time(const char *opname) const +{ + const Type_handler *handler= type_handler(); + if (handler->can_return_time()) + return false; + my_error(ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION, MYF(0), + handler->name().ptr(), opname); + return true; +} + + +bool Item::check_type_can_return_str(const char *opname) const +{ + const Type_handler *handler= type_handler(); + if (handler->can_return_str()) + return false; + my_error(ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION, MYF(0), + handler->name().ptr(), opname); + return true; +} + + +bool Item::check_type_can_return_text(const char *opname) const +{ + const Type_handler *handler= type_handler(); + if (handler->can_return_text()) + return false; + my_error(ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION, MYF(0), + handler->name().ptr(), opname); + return true; +} + + bool Item::check_type_scalar(const char *opname) const { const Type_handler *handler= type_handler(); @@ -1109,7 +1164,7 @@ void Item::set_name(THD *thd, const char *str, uint length, CHARSET_INFO *cs) if (!cs->ctype || cs->mbminlen > 1) { str+= cs->cset->scan(cs, str, str + length, MY_SEQ_SPACES); - length-= str - str_start; + length-= (uint)(str - str_start); } else { @@ -6398,8 +6453,8 @@ void Item_field::save_org_in_field(Field *to, fast_field_copier fast_field_copier_func) { DBUG_ENTER("Item_field::save_org_in_field"); - DBUG_PRINT("enter", ("setup: 0x%lx data: 0x%lx", - (ulong) to, (ulong) fast_field_copier_func)); + DBUG_PRINT("enter", ("setup: %p data: %p", + to, fast_field_copier_func)); if (fast_field_copier_func) { if (field->is_null()) diff --git a/sql/item.h b/sql/item.h index ddaf44770ab..0e7db0d6575 100644 --- a/sql/item.h +++ b/sql/item.h @@ -89,6 +89,7 @@ public: const char *dbug_print_item(Item *item); +class Virtual_tmp_table; class sp_head; class Protocol; struct TABLE_LIST; @@ -659,7 +660,8 @@ protected: return value; } bool get_date_with_conversion_from_item(Item *item, - MYSQL_TIME *ltime, uint fuzzydate) + MYSQL_TIME *ltime, + ulonglong fuzzydate) { DBUG_ASSERT(fixed == 1); return (null_value= item->get_date_with_conversion(ltime, fuzzydate)); @@ -1385,14 +1387,14 @@ public: virtual longlong val_datetime_packed() { MYSQL_TIME ltime; - uint fuzzydate= TIME_FUZZY_DATES | TIME_INVALID_DATES; + ulonglong fuzzydate= TIME_FUZZY_DATES | TIME_INVALID_DATES; return get_date_with_conversion(<ime, fuzzydate) ? 0 : pack_time(<ime); } // Get a TIME value in numeric packed format for comparison virtual longlong val_time_packed() { MYSQL_TIME ltime; - uint fuzzydate= TIME_FUZZY_DATES | TIME_INVALID_DATES | TIME_TIME_ONLY; + ulonglong fuzzydate= TIME_FUZZY_DATES | TIME_INVALID_DATES | TIME_TIME_ONLY; return get_date(<ime, fuzzydate) ? 0 : pack_time(<ime); } // Get a temporal value in packed DATE/DATETIME or TIME format @@ -1710,7 +1712,12 @@ public: bool check_type_or_binary(const char *opname, const Type_handler *handler) const; bool check_type_general_purpose_string(const char *opname) const; bool check_type_can_return_int(const char *opname) const; + bool check_type_can_return_decimal(const char *opname) const; bool check_type_can_return_real(const char *opname) const; + bool check_type_can_return_str(const char *opname) const; + bool check_type_can_return_text(const char *opname) const; + bool check_type_can_return_date(const char *opname) const; + bool check_type_can_return_time(const char *opname) const; // It is not row => null inside is impossible virtual bool null_inside() { return 0; } // used in row subselects to get value of elements @@ -2088,16 +2095,12 @@ public: class Item_spvar_args: public Item_args { - TABLE *m_table; + Virtual_tmp_table *m_table; public: Item_spvar_args():Item_args(), m_table(NULL) { } ~Item_spvar_args(); bool row_create_items(THD *thd, List<Spvar_definition> *list); - Field *get_row_field(uint i) const - { - DBUG_ASSERT(m_table); - return m_table->field[i]; - } + Field *get_row_field(uint i) const; }; diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 3ec382b0541..0d57066c2d2 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -2325,7 +2325,7 @@ Item_func_ifnull::str_op(String *str) } -bool Item_func_ifnull::date_op(MYSQL_TIME *ltime, uint fuzzydate) +bool Item_func_ifnull::date_op(MYSQL_TIME *ltime, ulonglong fuzzydate) { DBUG_ASSERT(fixed == 1); if (!args[0]->get_date_with_conversion(ltime, fuzzydate & ~TIME_FUZZY_DATES)) @@ -2798,7 +2798,7 @@ Item_func_nullif::decimal_op(my_decimal * decimal_value) bool -Item_func_nullif::date_op(MYSQL_TIME *ltime, uint fuzzydate) +Item_func_nullif::date_op(MYSQL_TIME *ltime, ulonglong fuzzydate) { DBUG_ASSERT(fixed == 1); if (!compare()) @@ -2938,7 +2938,7 @@ my_decimal *Item_func_case::decimal_op(my_decimal *decimal_value) } -bool Item_func_case::date_op(MYSQL_TIME *ltime, uint fuzzydate) +bool Item_func_case::date_op(MYSQL_TIME *ltime, ulonglong fuzzydate) { DBUG_ASSERT(fixed == 1); Item *item= find_item(); @@ -3349,7 +3349,7 @@ double Item_func_coalesce::real_op() } -bool Item_func_coalesce::date_op(MYSQL_TIME *ltime,uint fuzzydate) +bool Item_func_coalesce::date_op(MYSQL_TIME *ltime, ulonglong fuzzydate) { DBUG_ASSERT(fixed == 1); for (uint i= 0; i < arg_count; i++) @@ -3601,7 +3601,7 @@ uchar *in_row::get_value(Item *item) void in_row::set(uint pos, Item *item) { DBUG_ENTER("in_row::set"); - DBUG_PRINT("enter", ("pos: %u item: 0x%lx", pos, (ulong) item)); + DBUG_PRINT("enter", ("pos: %u item: %p", pos,item)); ((cmp_item_row*) base)[pos].store_value_by_template(current_thd, &tmp, item); DBUG_VOID_RETURN; } @@ -3842,7 +3842,7 @@ cmp_item* cmp_item_row::make_same() cmp_item_row::~cmp_item_row() { DBUG_ENTER("~cmp_item_row"); - DBUG_PRINT("enter",("this: 0x%lx", (long) this)); + DBUG_PRINT("enter",("this: %p", this)); if (comparators) { for (uint i= 0; i < n; i++) @@ -5374,7 +5374,7 @@ void Regexp_processor_pcre::set_recursion_limit(THD *thd) DBUG_ASSERT(thd == current_thd); stack_used= available_stack_size(thd->thread_stack, &stack_used); m_pcre_extra.match_limit_recursion= - (my_thread_stack_size - STACK_MIN_SIZE - stack_used)/my_pcre_frame_size; + (ulong)((my_thread_stack_size - STACK_MIN_SIZE - stack_used)/my_pcre_frame_size); } diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 0756899f47c..f0862608463 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -930,6 +930,8 @@ public: class Item_func_strcmp :public Item_long_func { + bool check_arguments() const + { return check_argument_types_can_return_str(0, 2); } String value1, value2; DTCollation cmp_collation; public: @@ -993,7 +995,7 @@ public: longlong int_op(); String *str_op(String *); my_decimal *decimal_op(my_decimal *); - bool date_op(MYSQL_TIME *ltime,uint fuzzydate); + bool date_op(MYSQL_TIME *ltime, ulonglong fuzzydate); void fix_length_and_dec() { if (!aggregate_for_result(func_name(), args, arg_count, true)) @@ -1065,7 +1067,7 @@ public: longlong int_op(); String *str_op(String *str); my_decimal *decimal_op(my_decimal *); - bool date_op(MYSQL_TIME *ltime,uint fuzzydate); + bool date_op(MYSQL_TIME *ltime, ulonglong fuzzydate); void fix_length_and_dec() { Item_func_case_abbreviation2::fix_length_and_dec2(args); @@ -1097,7 +1099,7 @@ public: :Item_func_case_abbreviation2(thd, a, b, c) { } - bool date_op(MYSQL_TIME *ltime, uint fuzzydate) + bool date_op(MYSQL_TIME *ltime, ulonglong fuzzydate) { return get_date_with_conversion_from_item(find_item(), ltime, fuzzydate); } @@ -1210,7 +1212,7 @@ public: Item_func_hybrid_field_type::cleanup(); arg_count= 2; // See the comment to the constructor } - bool date_op(MYSQL_TIME *ltime, uint fuzzydate); + bool date_op(MYSQL_TIME *ltime, ulonglong fuzzydate); double real_op(); longlong int_op(); String *str_op(String *str); @@ -2099,7 +2101,7 @@ public: longlong int_op(); String *str_op(String *); my_decimal *decimal_op(my_decimal *); - bool date_op(MYSQL_TIME *ltime, uint fuzzydate); + bool date_op(MYSQL_TIME *ltime, ulonglong fuzzydate); bool fix_fields(THD *thd, Item **ref); table_map not_null_tables() const { return 0; } const char *func_name() const { return "case"; } @@ -2829,6 +2831,11 @@ public: */ class Item_func_regexp_instr :public Item_long_func { + bool check_arguments() const + { + return args[0]->check_type_can_return_str(func_name()) || + args[1]->check_type_can_return_text(func_name()); + } Regexp_processor_pcre re; DTCollation cmp_collation; public: diff --git a/sql/item_create.cc b/sql/item_create.cc index 32f47ae5b23..6edeade0244 100644 --- a/sql/item_create.cc +++ b/sql/item_create.cc @@ -3302,7 +3302,7 @@ Create_udf_func Create_udf_func::s_singleton; Item* Create_udf_func::create_func(THD *thd, LEX_CSTRING *name, List<Item> *item_list) { - udf_func *udf= find_udf(name->str, name->length); + udf_func *udf= find_udf(name->str, (uint) name->length); DBUG_ASSERT(udf); return create(thd, udf, item_list); } diff --git a/sql/item_func.cc b/sql/item_func.cc index 2af7338b3c6..28690ff2bee 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -218,6 +218,58 @@ bool Item_func::check_argument_types_can_return_real(uint start, } +bool Item_func::check_argument_types_can_return_text(uint start, + uint end) const +{ + for (uint i= start; i < end ; i++) + { + DBUG_ASSERT(i < arg_count); + if (args[i]->check_type_can_return_text(func_name())) + return true; + } + return false; +} + + +bool Item_func::check_argument_types_can_return_str(uint start, + uint end) const +{ + for (uint i= start; i < end ; i++) + { + DBUG_ASSERT(i < arg_count); + if (args[i]->check_type_can_return_str(func_name())) + return true; + } + return false; +} + + +bool Item_func::check_argument_types_can_return_date(uint start, + uint end) const +{ + for (uint i= start; i < end ; i++) + { + DBUG_ASSERT(i < arg_count); + if (args[i]->check_type_can_return_date(func_name())) + return true; + } + return false; +} + + +bool Item_func::check_argument_types_can_return_time(uint start, + uint end) const +{ + for (uint i= start; i < end ; i++) + { + DBUG_ASSERT(i < arg_count); + if (args[i]->check_type_can_return_time(func_name())) + return true; + } + return false; +} + + bool Item_func::check_argument_types_scalar(uint start, uint end) const { for (uint i= start; i < end; i++) diff --git a/sql/item_func.h b/sql/item_func.h index 04045eefafb..de213df0fc5 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -48,6 +48,10 @@ protected: uint start, uint end) const; bool check_argument_types_can_return_int(uint start, uint end) const; bool check_argument_types_can_return_real(uint start, uint end) const; + bool check_argument_types_can_return_str(uint start, uint end) const; + bool check_argument_types_can_return_text(uint start, uint end) const; + bool check_argument_types_can_return_date(uint start, uint end) const; + bool check_argument_types_can_return_time(uint start, uint end) const; public: table_map not_null_tables_cache; @@ -485,8 +489,8 @@ public: bool get_date_from_date_op(MYSQL_TIME *ltime, ulonglong fuzzydate) { return date_op(ltime, - fuzzydate | - (field_type() == MYSQL_TYPE_TIME ? TIME_TIME_ONLY : 0)); + (fuzzydate | + (field_type() == MYSQL_TYPE_TIME ? TIME_TIME_ONLY : 0))); } // Value methods that involve conversion @@ -606,7 +610,7 @@ public: field type is a temporal type. @return The result of the operation. */ - virtual bool date_op(MYSQL_TIME *res, uint fuzzy_date)= 0; + virtual bool date_op(MYSQL_TIME *res, ulonglong fuzzy_date)= 0; }; @@ -665,7 +669,11 @@ public: Item_func_hybrid_field_type(thd, list) { } String *str_op(String *str) { DBUG_ASSERT(0); return 0; } - bool date_op(MYSQL_TIME *ltime, uint fuzzydate) { DBUG_ASSERT(0); return true; } + bool date_op(MYSQL_TIME *ltime, ulonglong fuzzydate) + { + DBUG_ASSERT(0); + return true; + } }; @@ -1178,6 +1186,8 @@ public: class Item_dec_func :public Item_real_func { + bool check_arguments() const + { return check_argument_types_can_return_real(0, arg_count); } public: Item_dec_func(THD *thd, Item *a): Item_real_func(thd, a) {} Item_dec_func(THD *thd, Item *a, Item *b): Item_real_func(thd, a, b) {} @@ -1403,6 +1413,8 @@ class Item_func_rand :public Item_real_func { struct my_rnd_struct *rand; bool first_eval; // TRUE if val_real() is called 1st time + bool check_arguments() const + { return check_argument_types_can_return_int(0, arg_count); } public: Item_func_rand(THD *thd, Item *a): Item_real_func(thd, a), rand(0), first_eval(TRUE) {} @@ -1426,6 +1438,8 @@ private: class Item_func_sign :public Item_long_func { + bool check_arguments() const + { return args[0]->check_type_can_return_real(func_name()); } public: Item_func_sign(THD *thd, Item *a): Item_long_func(thd, a) {} const char *func_name() const { return "sign"; } @@ -1441,6 +1455,8 @@ class Item_func_units :public Item_real_func { char *name; double mul,add; + bool check_arguments() const + { return check_argument_types_can_return_real(0, arg_count); } public: Item_func_units(THD *thd, char *name_arg, Item *a, double mul_arg, double add_arg): @@ -1590,14 +1606,23 @@ public: }; -class Item_func_octet_length :public Item_long_func +class Item_long_func_length: public Item_long_func +{ + bool check_arguments() const + { return args[0]->check_type_can_return_str(func_name()); } +public: + Item_long_func_length(THD *thd, Item *a): Item_long_func(thd, a) {} + void fix_length_and_dec() { max_length=10; } +}; + + +class Item_func_octet_length :public Item_long_func_length { String value; public: - Item_func_octet_length(THD *thd, Item *a): Item_long_func(thd, a) {} + Item_func_octet_length(THD *thd, Item *a): Item_long_func_length(thd, a) {} longlong val_int(); const char *func_name() const { return "octet_length"; } - void fix_length_and_dec() { max_length=10; } Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return get_item_copy<Item_func_octet_length>(thd, mem_root, this); } }; @@ -1617,20 +1642,21 @@ public: { return get_item_copy<Item_func_bit_length>(thd, mem_root, this); } }; -class Item_func_char_length :public Item_long_func +class Item_func_char_length :public Item_long_func_length { String value; public: - Item_func_char_length(THD *thd, Item *a): Item_long_func(thd, a) {} + Item_func_char_length(THD *thd, Item *a): Item_long_func_length(thd, a) {} longlong val_int(); const char *func_name() const { return "char_length"; } - void fix_length_and_dec() { max_length=10; } Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return get_item_copy<Item_func_char_length>(thd, mem_root, this); } }; class Item_func_coercibility :public Item_long_func { + bool check_arguments() const + { return args[0]->check_type_can_return_str(func_name()); } public: Item_func_coercibility(THD *thd, Item *a): Item_long_func(thd, a) {} longlong val_int(); @@ -1657,6 +1683,11 @@ public: */ class Item_func_locate :public Item_long_func { + bool check_arguments() const + { + return check_argument_types_can_return_str(0, 2) || + (arg_count > 2 && args[2]->check_type_can_return_int(func_name())); + } String value1,value2; DTCollation cmp_collation; public: @@ -1694,6 +1725,8 @@ public: class Item_func_ascii :public Item_long_func { + bool check_arguments() const + { return check_argument_types_can_return_str(0, arg_count); } String value; public: Item_func_ascii(THD *thd, Item *a): Item_long_func(thd, a) {} @@ -1706,6 +1739,8 @@ public: class Item_func_ord :public Item_long_func { + bool check_arguments() const + { return args[0]->check_type_can_return_str(func_name()); } String value; public: Item_func_ord(THD *thd, Item *a): Item_long_func(thd, a) {} @@ -1718,6 +1753,8 @@ public: class Item_func_find_in_set :public Item_long_func { + bool check_arguments() const + { return check_argument_types_can_return_str(0, 2); } String value,value2; uint enum_value; ulonglong enum_bit; @@ -1736,6 +1773,8 @@ public: class Item_func_bit: public Item_longlong_func { + bool check_arguments() const + { return check_argument_types_can_return_int(0, arg_count); } public: Item_func_bit(THD *thd, Item *a, Item *b): Item_longlong_func(thd, a, b) {} Item_func_bit(THD *thd, Item *a): Item_longlong_func(thd, a) {} @@ -1772,6 +1811,8 @@ public: class Item_func_bit_count :public Item_long_func { + bool check_arguments() const + { return args[0]->check_type_can_return_int(func_name()); } public: Item_func_bit_count(THD *thd, Item *a): Item_long_func(thd, a) {} longlong val_int(); @@ -1822,6 +1863,8 @@ public: class Item_func_last_insert_id :public Item_longlong_func { + bool check_arguments() const + { return check_argument_types_can_return_int(0, arg_count); } public: Item_func_last_insert_id(THD *thd): Item_longlong_func(thd) {} Item_func_last_insert_id(THD *thd, Item *a): Item_longlong_func(thd, a) {} @@ -1845,6 +1888,11 @@ public: class Item_func_benchmark :public Item_long_func { + bool check_arguments() const + { + return args[0]->check_type_can_return_int(func_name()) || + args[1]->check_type_scalar(func_name()); + } public: Item_func_benchmark(THD *thd, Item *count_expr, Item *expr): Item_long_func(thd, count_expr, expr) @@ -1867,6 +1915,8 @@ void item_func_sleep_free(void); class Item_func_sleep :public Item_long_func { + bool check_arguments() const + { return args[0]->check_type_can_return_real(func_name()); } public: Item_func_sleep(THD *thd, Item *a): Item_long_func(thd, a) {} void fix_length_and_dec() { fix_char_length(1); } @@ -2147,6 +2197,11 @@ void mysql_ull_set_explicit_lock_duration(THD *thd); class Item_func_get_lock :public Item_long_func { + bool check_arguments() const + { + return args[0]->check_type_general_purpose_string(func_name()) || + args[1]->check_type_can_return_real(func_name()); + } String value; public: Item_func_get_lock(THD *thd, Item *a, Item *b) :Item_long_func(thd, a, b) {} @@ -2169,6 +2224,8 @@ class Item_func_get_lock :public Item_long_func class Item_func_release_lock :public Item_long_func { + bool check_arguments() const + { return args[0]->check_type_general_purpose_string(func_name()); } String value; public: Item_func_release_lock(THD *thd, Item *a): Item_long_func(thd, a) {} @@ -2193,6 +2250,14 @@ public: class Item_master_pos_wait :public Item_longlong_func { + bool check_arguments() const + { + return + args[0]->check_type_general_purpose_string(func_name()) || + args[1]->check_type_can_return_int(func_name()) || + (arg_count > 2 && args[2]->check_type_can_return_int(func_name())) || + (arg_count > 3 && args[3]->check_type_general_purpose_string(func_name())); + } String value; public: Item_master_pos_wait(THD *thd, Item *a, Item *b) @@ -2215,6 +2280,11 @@ public: class Item_master_gtid_wait :public Item_long_func { + bool check_arguments() const + { + return args[0]->check_type_general_purpose_string(func_name()) || + (arg_count > 1 && args[1]->check_type_can_return_real(func_name())); + } String value; public: Item_master_gtid_wait(THD *thd, Item *a) @@ -2319,12 +2389,6 @@ public: bool update(); bool fix_fields(THD *thd, Item **ref); void fix_length_and_dec(); - table_map used_tables() const - { - return used_tables_cache | RAND_TABLE_BIT; - } - bool const_item() const { return 0; } - bool is_expensive() { return 1; } void print(String *str, enum_query_type query_type); enum precedence precedence() const { return ASSIGN_PRECEDENCE; } void print_as_stmt(String *str, enum_query_type query_type); @@ -2578,6 +2642,8 @@ public: class Item_func_is_free_lock :public Item_long_func { + bool check_arguments() const + { return args[0]->check_type_general_purpose_string(func_name()); } String value; public: Item_func_is_free_lock(THD *thd, Item *a): Item_long_func(thd, a) {} @@ -2594,6 +2660,8 @@ public: class Item_func_is_used_lock :public Item_long_func { + bool check_arguments() const + { return args[0]->check_type_general_purpose_string(func_name()); } String value; public: Item_func_is_used_lock(THD *thd, Item *a): Item_long_func(thd, a) {} diff --git a/sql/item_inetfunc.cc b/sql/item_inetfunc.cc index c4cb9b2bae2..c434953ad43 100644 --- a/sql/item_inetfunc.cc +++ b/sql/item_inetfunc.cc @@ -211,13 +211,13 @@ String *Item_func_inet_str_base::val_str_ascii(String *buffer) IPv4-part differently on different platforms. */ -static bool str_to_ipv4(const char *str, int str_length, in_addr *ipv4_address) +static bool str_to_ipv4(const char *str, size_t str_length, in_addr *ipv4_address) { if (str_length < 7) { DBUG_PRINT("error", ("str_to_ipv4(%.*s): " "invalid IPv4 address: too short.", - str_length, str)); + (int) str_length, str)); return false; } @@ -225,7 +225,7 @@ static bool str_to_ipv4(const char *str, int str_length, in_addr *ipv4_address) { DBUG_PRINT("error", ("str_to_ipv4(%.*s): " "invalid IPv4 address: too long.", - str_length, str)); + (int) str_length, str)); return false; } @@ -236,7 +236,7 @@ static bool str_to_ipv4(const char *str, int str_length, in_addr *ipv4_address) int dot_count= 0; char c= 0; - while (((p - str) < str_length) && *p) + while (((p - str) < (int)str_length) && *p) { c= *p++; @@ -248,7 +248,7 @@ static bool str_to_ipv4(const char *str, int str_length, in_addr *ipv4_address) { DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: " "too many characters in a group.", - str_length, str)); + (int) str_length, str)); return false; } @@ -258,7 +258,7 @@ static bool str_to_ipv4(const char *str, int str_length, in_addr *ipv4_address) { DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: " "invalid byte value.", - str_length, str)); + (int) str_length, str)); return false; } } @@ -268,7 +268,7 @@ static bool str_to_ipv4(const char *str, int str_length, in_addr *ipv4_address) { DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: " "too few characters in a group.", - str_length, str)); + (int) str_length, str)); return false; } @@ -281,7 +281,7 @@ static bool str_to_ipv4(const char *str, int str_length, in_addr *ipv4_address) if (dot_count > 3) { DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: " - "too many dots.", str_length, str)); + "too many dots.", (int) str_length, str)); return false; } } @@ -289,7 +289,7 @@ static bool str_to_ipv4(const char *str, int str_length, in_addr *ipv4_address) { DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: " "invalid character at pos %d.", - str_length, str, (int) (p - str))); + (int) str_length, str, (int) (p - str))); return false; } } @@ -297,7 +297,7 @@ static bool str_to_ipv4(const char *str, int str_length, in_addr *ipv4_address) if (c == '.') { DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: " - "ending at '.'.", str_length, str)); + "ending at '.'.", (int) str_length, str)); return false; } @@ -305,14 +305,14 @@ static bool str_to_ipv4(const char *str, int str_length, in_addr *ipv4_address) { DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: " "too few groups.", - str_length, str)); + (int) str_length, str)); return false; } ipv4_bytes[3]= (unsigned char) byte_value; DBUG_PRINT("info", ("str_to_ipv4(%.*s): valid IPv4 address: %d.%d.%d.%d", - str_length, str, + (int) str_length, str, ipv4_bytes[0], ipv4_bytes[1], ipv4_bytes[2], ipv4_bytes[3])); return true; @@ -493,7 +493,7 @@ static bool str_to_ipv6(const char *str, int str_length, in6_addr *ipv6_address) return false; } - int bytes_to_move= dst - gap_ptr; + int bytes_to_move= (int)(dst - gap_ptr); for (int i= 1; i <= bytes_to_move; ++i) { diff --git a/sql/item_inetfunc.h b/sql/item_inetfunc.h index 33586c29175..13ce003a374 100644 --- a/sql/item_inetfunc.h +++ b/sql/item_inetfunc.h @@ -26,6 +26,8 @@ class Item_func_inet_aton : public Item_longlong_func { + bool check_arguments() const + { return check_argument_types_can_return_text(0, arg_count); } public: Item_func_inet_aton(THD *thd, Item *a): Item_longlong_func(thd, a) {} longlong val_int(); diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc index 1542b42b1fb..f804f83f122 100644 --- a/sql/item_jsonfunc.cc +++ b/sql/item_jsonfunc.cc @@ -48,7 +48,7 @@ static bool eq_ascii_string(const CHARSET_INFO *cs, } -static bool append_simple(String *s, const char *a, uint a_len) +static bool append_simple(String *s, const char *a, size_t a_len) { if (!s->realloc_with_extra_if_needed(s->length() + a_len)) { @@ -60,7 +60,7 @@ static bool append_simple(String *s, const char *a, uint a_len) } -static inline bool append_simple(String *s, const uchar *a, uint a_len) +static inline bool append_simple(String *s, const uchar *a, size_t a_len) { return append_simple(s, (const char *) a, a_len); } @@ -255,7 +255,7 @@ void report_json_error_ex(String *js, json_engine_t *je, Sql_condition::enum_warning_level lv) { THD *thd= current_thd; - int position= (const char *) je->s.c_str - js->ptr(); + int position= (int)((const char *) je->s.c_str - js->ptr()); uint code; n_param++; @@ -312,7 +312,7 @@ static void report_path_error_ex(String *ps, json_path_t *p, Sql_condition::enum_warning_level lv) { THD *thd= current_thd; - int position= (const char *) p->s.c_str - ps->ptr() + 1; + int position= (int)((const char *) p->s.c_str - ps->ptr() + 1); uint code; n_param++; @@ -539,7 +539,7 @@ bool Item_func_json_query::check_and_get_value(json_engine_t *je, String *res, return true; } - res->set((const char *) je->value, je->s.c_str - value, je->s.cs); + res->set((const char *) je->value, (uint32)(je->s.c_str - value), je->s.cs); return false; } @@ -742,7 +742,8 @@ String *Item_func_json_extract::read_json(String *str, json_path_t p; const uchar *value; int not_first_value= 0; - uint n_arg, v_len; + uint n_arg; + size_t v_len; int possible_multiple_values; if ((null_value= args[0]->null_value)) @@ -1530,7 +1531,8 @@ String *Item_func_json_array_append::val_str(String *str) { json_engine_t je; String *js= args[0]->val_json(&tmp_js); - uint n_arg, n_path, str_rest_len; + uint n_arg, n_path; + size_t str_rest_len; const uchar *ar_end; DBUG_ASSERT(fixed == 1); @@ -2863,7 +2865,7 @@ skip_search: } while (json_read_keyname_chr(&je) == 0); if (je.s.error) goto err_return; - key_len= key_end - key_start; + key_len= (int)(key_end - key_start); if (!check_key_in_list(str, key_start, key_len)) { @@ -3137,7 +3139,7 @@ String *Item_func_json_format::val_str(String *str) { if (arg_count > 1) { - tab_size= args[1]->val_int(); + tab_size= (int)args[1]->val_int(); if (args[1]->null_value) { null_value= 1; diff --git a/sql/item_jsonfunc.h b/sql/item_jsonfunc.h index 7da72dc7f89..77e7588be25 100644 --- a/sql/item_jsonfunc.h +++ b/sql/item_jsonfunc.h @@ -295,6 +295,12 @@ public: class Item_func_json_length: public Item_long_func { + bool check_arguments() const + { + return args[0]->check_type_can_return_text(func_name()) || + (arg_count > 1 && + args[1]->check_type_general_purpose_string(func_name())); + } protected: json_path_with_flags path; String tmp_js; @@ -312,6 +318,8 @@ public: class Item_func_json_depth: public Item_long_func { + bool check_arguments() const + { return args[0]->check_type_can_return_text(func_name()); } protected: String tmp_js; public: diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index cc3c421f185..d9b375e7be1 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -257,7 +257,7 @@ String *Item_func_sha2::val_str_ascii(String *str) str->realloc((uint) digest_length*2 + 1); /* Each byte as two nybbles */ /* Convert the large number to a string-hex representation. */ - array_to_hex((char *) str->ptr(), digest_buf, digest_length); + array_to_hex((char *) str->ptr(), digest_buf, (uint)digest_length); /* We poked raw bytes in. We must inform the the String of its length. */ str->length((uint) digest_length*2); /* Each byte as two nybbles */ @@ -272,7 +272,7 @@ void Item_func_sha2::fix_length_and_dec() maybe_null= 1; max_length = 0; - int sha_variant= args[1]->const_item() ? args[1]->val_int() : 512; + int sha_variant= (int)(args[1]->const_item() ? args[1]->val_int() : 512); switch (sha_variant) { case 0: // SHA-256 is the default @@ -3796,12 +3796,12 @@ String *Item_func_like_range::val_str(String *str) if (!res || args[0]->null_value || args[1]->null_value || nbytes < 0 || nbytes > MAX_BLOB_WIDTH || - min_str.alloc(nbytes) || max_str.alloc(nbytes)) + min_str.alloc((size_t)nbytes) || max_str.alloc((size_t)nbytes)) goto err; null_value=0; if (cs->coll->like_range(cs, res->ptr(), res->length(), - '\\', '_', '%', nbytes, + '\\', '_', '%', (size_t)nbytes, (char*) min_str.ptr(), (char*) max_str.ptr(), &min_len, &max_len)) goto err; @@ -3878,7 +3878,7 @@ String *Item_load_file::val_str(String *str) if ((file= mysql_file_open(key_file_loadfile, file_name->ptr(), O_RDONLY, MYF(0))) < 0) goto err; - if (mysql_file_read(file, (uchar*) tmp_value.ptr(), stat_info.st_size, + if (mysql_file_read(file, (uchar*) tmp_value.ptr(), (size_t)stat_info.st_size, MYF(MY_NABP))) { mysql_file_close(file, MYF(0)); @@ -4104,7 +4104,7 @@ String *Item_func_quote::val_str(String *str) if ((mblen= cs->cset->wc_mb(cs, '\'', (uchar *) to, to_end)) <= 0) goto toolong; to+= mblen; - new_length= to - str->ptr(); + new_length= (uint)(to - str->ptr()); goto ret; } diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index d73bd0b76d7..1c35588f884 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -1451,6 +1451,8 @@ public: class Item_func_crc32 :public Item_long_func { + bool check_arguments() const + { return args[0]->check_type_can_return_str(func_name()); } String value; public: Item_func_crc32(THD *thd, Item *a): Item_long_func(thd, a) @@ -1462,11 +1464,12 @@ public: { return get_item_copy<Item_func_crc32>(thd, mem_root, this); } }; -class Item_func_uncompressed_length : public Item_long_func +class Item_func_uncompressed_length : public Item_long_func_length { String value; public: - Item_func_uncompressed_length(THD *thd, Item *a): Item_long_func(thd, a) {} + Item_func_uncompressed_length(THD *thd, Item *a) + :Item_long_func_length(thd, a) {} const char *func_name() const{return "uncompressed_length";} void fix_length_and_dec() { max_length=10; maybe_null= true; } longlong val_int(); diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 55196b451de..84e1aae1ad5 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -58,7 +58,7 @@ Item_subselect::Item_subselect(THD *thd_arg): changed(0), is_correlated(FALSE), with_recursive_reference(0) { DBUG_ENTER("Item_subselect::Item_subselect"); - DBUG_PRINT("enter", ("this: 0x%lx", (ulong) this)); + DBUG_PRINT("enter", ("this: %p", this)); sortbuffer.str= 0; #ifndef DBUG_OFF @@ -84,8 +84,8 @@ void Item_subselect::init(st_select_lex *select_lex, */ DBUG_ENTER("Item_subselect::init"); - DBUG_PRINT("enter", ("select_lex: 0x%lx this: 0x%lx", - (ulong) select_lex, (ulong) this)); + DBUG_PRINT("enter", ("select_lex: %p this: %p", + select_lex, this)); unit= select_lex->master_unit(); if (unit->item) @@ -130,7 +130,7 @@ void Item_subselect::init(st_select_lex *select_lex, /* The subquery is an expression cache candidate */ upper->expr_cache_may_be_used[upper->parsing_place]= TRUE; } - DBUG_PRINT("info", ("engine: 0x%lx", (ulong)engine)); + DBUG_PRINT("info", ("engine: %p", engine)); DBUG_VOID_RETURN; } @@ -215,7 +215,7 @@ void Item_allany_subselect::cleanup() Item_subselect::~Item_subselect() { DBUG_ENTER("Item_subselect::~Item_subselect"); - DBUG_PRINT("enter", ("this: 0x%lx", (ulong) this)); + DBUG_PRINT("enter", ("this: %p", this)); if (own_engine) delete engine; else diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 755ed7d302b..d6e42efd11c 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -36,9 +36,9 @@ used in Item_sum_* */ -ulonglong Item_sum::ram_limitation(THD *thd) +size_t Item_sum::ram_limitation(THD *thd) { - return MY_MIN(thd->variables.tmp_memory_table_size, + return (size_t)MY_MIN(thd->variables.tmp_memory_table_size, thd->variables.max_heap_table_size); } diff --git a/sql/item_sum.h b/sql/item_sum.h index a160d0ee522..7845ed3318f 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -382,7 +382,7 @@ protected: */ Item **orig_args, *tmp_orig_args[2]; - static ulonglong ram_limitation(THD *thd); + static size_t ram_limitation(THD *thd); public: diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index f1f69d0723b..d00120018b8 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -715,7 +715,7 @@ static bool get_interval_info(const char *str,uint length,CHARSET_INFO *cs, { const char *end=str+length; uint i; - long msec_length= 0; + int msec_length= 0; while (str != end && !my_isdigit(cs,*str)) str++; @@ -726,7 +726,7 @@ static bool get_interval_info(const char *str,uint length,CHARSET_INFO *cs, const char *start= str; for (value= 0; str != end && my_isdigit(cs, *str); str++) value= value*10 + *str - '0'; - msec_length= 6 - (str - start); + msec_length= 6 - (int)(str - start); values[i]= value; while (str != end && !my_isdigit(cs,*str)) str++; @@ -1062,7 +1062,7 @@ longlong Item_func_week::val_int() if (get_arg0_date(<ime, TIME_NO_ZERO_DATE | TIME_NO_ZERO_IN_DATE)) return 0; if (arg_count > 1) - week_format= args[1]->val_int(); + week_format= (uint)args[1]->val_int(); else week_format= current_thd->variables.default_week_format; return (longlong) calc_week(<ime, week_mode(week_format), &year); @@ -2457,7 +2457,7 @@ String *Item_char_typecast::copy(String *str, CHARSET_INFO *strcs) null_value= 1; // EOM return 0; } - check_truncation_with_warn(str, copier.source_end_pos() - str->ptr()); + check_truncation_with_warn(str, (uint)(copier.source_end_pos() - str->ptr())); return &tmp_value; } diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index 20ecb4774b3..94794c6789d 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -33,8 +33,31 @@ enum date_time_format_types bool get_interval_value(Item *args,interval_type int_type, INTERVAL *interval); + +class Item_long_func_date_field: public Item_long_func +{ + bool check_arguments() const + { return args[0]->check_type_can_return_date(func_name()); } +public: + Item_long_func_date_field(THD *thd, Item *a) + :Item_long_func(thd, a) { } +}; + + +class Item_long_func_time_field: public Item_long_func +{ + bool check_arguments() const + { return args[0]->check_type_can_return_time(func_name()); } +public: + Item_long_func_time_field(THD *thd, Item *a) + :Item_long_func(thd, a) { } +}; + + class Item_func_period_add :public Item_long_func { + bool check_arguments() const + { return check_argument_types_can_return_int(0, 2); } public: Item_func_period_add(THD *thd, Item *a, Item *b): Item_long_func(thd, a, b) {} longlong val_int(); @@ -50,6 +73,8 @@ public: class Item_func_period_diff :public Item_long_func { + bool check_arguments() const + { return check_argument_types_can_return_int(0, 2); } public: Item_func_period_diff(THD *thd, Item *a, Item *b): Item_long_func(thd, a, b) {} longlong val_int(); @@ -64,10 +89,10 @@ public: }; -class Item_func_to_days :public Item_long_func +class Item_func_to_days :public Item_long_func_date_field { public: - Item_func_to_days(THD *thd, Item *a): Item_long_func(thd, a) {} + Item_func_to_days(THD *thd, Item *a): Item_long_func_date_field(thd, a) {} longlong val_int(); const char *func_name() const { return "to_days"; } void fix_length_and_dec() @@ -91,6 +116,8 @@ public: class Item_func_to_seconds :public Item_longlong_func { + bool check_arguments() const + { return check_argument_types_can_return_date(0, arg_count); } public: Item_func_to_seconds(THD *thd, Item *a): Item_longlong_func(thd, a) {} longlong val_int(); @@ -115,10 +142,10 @@ public: }; -class Item_func_dayofmonth :public Item_long_func +class Item_func_dayofmonth :public Item_long_func_date_field { public: - Item_func_dayofmonth(THD *thd, Item *a): Item_long_func(thd, a) {} + Item_func_dayofmonth(THD *thd, Item *a): Item_long_func_date_field(thd, a) {} longlong val_int(); const char *func_name() const { return "dayofmonth"; } void fix_length_and_dec() @@ -195,10 +222,10 @@ public: }; -class Item_func_dayofyear :public Item_long_func +class Item_func_dayofyear :public Item_long_func_date_field { public: - Item_func_dayofyear(THD *thd, Item *a): Item_long_func(thd, a) {} + Item_func_dayofyear(THD *thd, Item *a): Item_long_func_date_field(thd, a) {} longlong val_int(); const char *func_name() const { return "dayofyear"; } void fix_length_and_dec() @@ -218,10 +245,10 @@ public: }; -class Item_func_hour :public Item_long_func +class Item_func_hour :public Item_long_func_time_field { public: - Item_func_hour(THD *thd, Item *a): Item_long_func(thd, a) {} + Item_func_hour(THD *thd, Item *a): Item_long_func_time_field(thd, a) {} longlong val_int(); const char *func_name() const { return "hour"; } void fix_length_and_dec() @@ -241,10 +268,10 @@ public: }; -class Item_func_minute :public Item_long_func +class Item_func_minute :public Item_long_func_time_field { public: - Item_func_minute(THD *thd, Item *a): Item_long_func(thd, a) {} + Item_func_minute(THD *thd, Item *a): Item_long_func_time_field(thd, a) {} longlong val_int(); const char *func_name() const { return "minute"; } void fix_length_and_dec() @@ -264,10 +291,10 @@ public: }; -class Item_func_quarter :public Item_long_func +class Item_func_quarter :public Item_long_func_date_field { public: - Item_func_quarter(THD *thd, Item *a): Item_long_func(thd, a) {} + Item_func_quarter(THD *thd, Item *a): Item_long_func_date_field(thd, a) {} longlong val_int(); const char *func_name() const { return "quarter"; } void fix_length_and_dec() @@ -287,10 +314,10 @@ public: }; -class Item_func_second :public Item_long_func +class Item_func_second :public Item_long_func_time_field { public: - Item_func_second(THD *thd, Item *a): Item_long_func(thd, a) {} + Item_func_second(THD *thd, Item *a): Item_long_func_time_field(thd, a) {} longlong val_int(); const char *func_name() const { return "second"; } void fix_length_and_dec() @@ -312,6 +339,11 @@ public: class Item_func_week :public Item_long_func { + bool check_arguments() const + { + return args[0]->check_type_can_return_date(func_name()) || + (arg_count > 1 && args[1]->check_type_can_return_int(func_name())); + } public: Item_func_week(THD *thd, Item *a): Item_long_func(thd, a) {} Item_func_week(THD *thd, Item *a, Item *b): Item_long_func(thd, a, b) {} @@ -339,8 +371,14 @@ public: class Item_func_yearweek :public Item_long_func { + bool check_arguments() const + { + return args[0]->check_type_can_return_date(func_name()) || + args[1]->check_type_can_return_int(func_name()); + } public: - Item_func_yearweek(THD *thd, Item *a, Item *b): Item_long_func(thd, a, b) {} + Item_func_yearweek(THD *thd, Item *a, Item *b) + :Item_long_func(thd, a, b) {} longlong val_int(); const char *func_name() const { return "yearweek"; } void fix_length_and_dec() @@ -360,10 +398,10 @@ public: }; -class Item_func_year :public Item_long_func +class Item_func_year :public Item_long_func_date_field { public: - Item_func_year(THD *thd, Item *a): Item_long_func(thd, a) {} + Item_func_year(THD *thd, Item *a): Item_long_func_date_field(thd, a) {} longlong val_int(); const char *func_name() const { return "year"; } enum_monotonicity_info get_monotonicity_info() const; @@ -455,7 +493,11 @@ public: } double real_op() { DBUG_ASSERT(0); return 0; } String *str_op(String *str) { DBUG_ASSERT(0); return 0; } - bool date_op(MYSQL_TIME *ltime, uint fuzzydate) { DBUG_ASSERT(0); return true; } + bool date_op(MYSQL_TIME *ltime, ulonglong fuzzydate) + { + DBUG_ASSERT(0); + return true; + } }; @@ -780,6 +822,8 @@ public: class Item_func_from_days :public Item_datefunc { + bool check_arguments() const + { return args[0]->check_type_can_return_int(func_name()); } public: Item_func_from_days(THD *thd, Item *a): Item_datefunc(thd, a) {} const char *func_name() const { return "from_days"; } @@ -836,6 +880,8 @@ public: class Item_func_from_unixtime :public Item_datetimefunc { + bool check_arguments() const + { return args[0]->check_type_can_return_decimal(func_name()); } Time_zone *tz; public: Item_func_from_unixtime(THD *thd, Item *a): Item_datetimefunc(thd, a) {} @@ -863,6 +909,11 @@ class Time_zone; */ class Item_func_convert_tz :public Item_datetimefunc { + bool check_arguments() const + { + return args[0]->check_type_can_return_date(func_name()) || + check_argument_types_can_return_text(1, arg_count); + } /* If time zone parameters are constants we are caching objects that represent them (we use separate from_tz_cached/to_tz_cached members @@ -889,6 +940,8 @@ class Item_func_convert_tz :public Item_datetimefunc class Item_func_sec_to_time :public Item_timefunc { + bool check_arguments() const + { return args[0]->check_type_can_return_decimal(func_name()); } public: Item_func_sec_to_time(THD *thd, Item *item): Item_timefunc(thd, item) {} bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date); @@ -1128,6 +1181,8 @@ public: class Item_func_makedate :public Item_datefunc { + bool check_arguments() const + { return check_argument_types_can_return_int(0, arg_count); } public: Item_func_makedate(THD *thd, Item *a, Item *b): Item_datefunc(thd, a, b) {} @@ -1157,6 +1212,8 @@ public: class Item_func_timediff :public Item_timefunc { + bool check_arguments() const + { return check_argument_types_can_return_time(0, arg_count); } public: Item_func_timediff(THD *thd, Item *a, Item *b): Item_timefunc(thd, a, b) {} const char *func_name() const { return "timediff"; } @@ -1173,6 +1230,11 @@ public: class Item_func_maketime :public Item_timefunc { + bool check_arguments() const + { + return check_argument_types_can_return_int(0, 2) || + args[2]->check_type_can_return_decimal(func_name()); + } public: Item_func_maketime(THD *thd, Item *a, Item *b, Item *c): Item_timefunc(thd, a, b, c) @@ -1189,10 +1251,10 @@ public: }; -class Item_func_microsecond :public Item_long_func +class Item_func_microsecond :public Item_long_func_time_field { public: - Item_func_microsecond(THD *thd, Item *a): Item_long_func(thd, a) {} + Item_func_microsecond(THD *thd, Item *a): Item_long_func_time_field(thd, a) {} longlong val_int(); const char *func_name() const { return "microsecond"; } void fix_length_and_dec() @@ -1214,6 +1276,8 @@ public: class Item_func_timestamp_diff :public Item_longlong_func { + bool check_arguments() const + { return check_argument_types_can_return_date(0, arg_count); } const interval_type int_type; public: Item_func_timestamp_diff(THD *thd, Item *a, Item *b, interval_type type_arg): @@ -1279,6 +1343,8 @@ public: class Item_func_last_day :public Item_datefunc { + bool check_arguments() const + { return args[0]->check_type_can_return_date(func_name()); } public: Item_func_last_day(THD *thd, Item *a): Item_datefunc(thd, a) {} const char *func_name() const { return "last_day"; } diff --git a/sql/key.cc b/sql/key.cc index 9e44bc2e6b0..3ee083e560f 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -374,7 +374,7 @@ void field_unpack(String *to, Field *field, const uchar *rec, uint max_length, { const char *tmp_end= tmp.ptr() + tmp.length(); while (tmp_end > tmp.ptr() && !*--tmp_end) ; - tmp.length(tmp_end - tmp.ptr() + 1); + tmp.length((uint32)(tmp_end - tmp.ptr() + 1)); } if (cs->mbmaxlen > 1 && prefix_key) { diff --git a/sql/lock.cc b/sql/lock.cc index a45b93a0260..6fa68786b93 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -827,7 +827,7 @@ MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, uint flags) we may allocate too much, but better safe than memory overrun. And in the FLUSH case, the memory is released quickly anyway. */ - sql_lock->lock_count= locks - locks_buf; + sql_lock->lock_count= (uint)(locks - locks_buf); DBUG_ASSERT(sql_lock->lock_count <= lock_count); DBUG_PRINT("info", ("sql_lock->table_count %d sql_lock->lock_count %d", sql_lock->table_count, sql_lock->lock_count)); diff --git a/sql/log.cc b/sql/log.cc index 948bd7844bd..450f677d363 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -1328,7 +1328,7 @@ bool LOGGER::slow_log_print(THD *thd, const char *query, uint query_length, } /* fill in user_host value: the format is "%s[%s] @ %s [%s]" */ - user_host_len= (strxnmov(user_host_buff, MAX_USER_HOST_SIZE, + user_host_len= (uint)(strxnmov(user_host_buff, MAX_USER_HOST_SIZE, sctx->priv_user, "[", sctx->user ? sctx->user : (thd->slave_thread ? "SQL_SLAVE" : ""), "] @ ", sctx->host ? sctx->host : "", " [", @@ -5310,7 +5310,7 @@ bool MYSQL_BIN_LOG::write_event_buffer(uchar* buf, uint len) if (!ebuf) goto err; - crypto.set_iv(iv, my_b_append_tell(&log_file)); + crypto.set_iv(iv, (uint32)my_b_append_tell(&log_file)); /* we want to encrypt everything, excluding the event length: @@ -5537,9 +5537,9 @@ binlog_cache_mngr *THD::binlog_setup_trx_data() cache_mngr= (binlog_cache_mngr*) my_malloc(sizeof(binlog_cache_mngr), MYF(MY_ZEROFILL)); if (!cache_mngr || open_cached_file(&cache_mngr->stmt_cache.cache_log, mysql_tmpdir, - LOG_PREFIX, binlog_stmt_cache_size, MYF(MY_WME)) || + LOG_PREFIX, (size_t)binlog_stmt_cache_size, MYF(MY_WME)) || open_cached_file(&cache_mngr->trx_cache.cache_log, mysql_tmpdir, - LOG_PREFIX, binlog_cache_size, MYF(MY_WME))) + LOG_PREFIX, (size_t)binlog_cache_size, MYF(MY_WME))) { my_free(cache_mngr); DBUG_RETURN(0); // Didn't manage to set it up @@ -5668,8 +5668,8 @@ int THD::binlog_write_table_map(TABLE *table, bool is_transactional, { int error; DBUG_ENTER("THD::binlog_write_table_map"); - DBUG_PRINT("enter", ("table: 0x%lx (%s: #%lu)", - (long) table, table->s->table_name.str, + DBUG_PRINT("enter", ("table: %p (%s: #%lu)", + table, table->s->table_name.str, table->s->table_map_id)); /* Ensure that all events in a GTID group are in the same cache */ @@ -5818,7 +5818,7 @@ MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd, { DBUG_ENTER("MYSQL_BIN_LOG::flush_and_set_pending_rows_event(event)"); DBUG_ASSERT(WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open()); - DBUG_PRINT("enter", ("event: 0x%lx", (long) event)); + DBUG_PRINT("enter", ("event: %p", event)); int error= 0; binlog_cache_mngr *const cache_mngr= @@ -5829,7 +5829,7 @@ MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd, binlog_cache_data *cache_data= cache_mngr->get_binlog_cache_data(use_trans_cache(thd, is_transactional)); - DBUG_PRINT("info", ("cache_mngr->pending(): 0x%lx", (long) cache_data->pending())); + DBUG_PRINT("info", ("cache_mngr->pending(): %p", cache_data->pending())); if (Rows_log_event* pending= cache_data->pending()) { diff --git a/sql/log_event.cc b/sql/log_event.cc index 73987a562fa..21cc26905a9 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -838,19 +838,19 @@ query_event_uncompress(const Format_description_log_event *description_event, if (end <= tmp) return 1; - int32 comp_len = len - (tmp - src) - - (contain_checksum ? BINLOG_CHECKSUM_LEN : 0); + int32 comp_len = (int32)(len - (tmp - src) - + (contain_checksum ? BINLOG_CHECKSUM_LEN : 0)); uint32 un_len = binlog_get_uncompress_len(tmp); // bad event if (comp_len < 0 || un_len == 0) return 1; - *newlen = (tmp - src) + un_len; + *newlen = (ulong)(tmp - src) + un_len; if(contain_checksum) *newlen += BINLOG_CHECKSUM_LEN; - uint32 alloc_size = ALIGN_SIZE(*newlen); + uint32 alloc_size = (uint32)ALIGN_SIZE(*newlen); char *new_dst = NULL; @@ -963,17 +963,17 @@ row_log_event_uncompress(const Format_description_log_event *description_event, if (un_len == 0) return 1; - long comp_len = len - (tmp - src) - - (contain_checksum ? BINLOG_CHECKSUM_LEN : 0); + int32 comp_len = (int32)(len - (tmp - src) - + (contain_checksum ? BINLOG_CHECKSUM_LEN : 0)); //bad event if (comp_len <=0) return 1; - *newlen = (tmp - src) + un_len; + *newlen = ulong(tmp - src) + un_len; if(contain_checksum) *newlen += BINLOG_CHECKSUM_LEN; - uint32 alloc_size = ALIGN_SIZE(*newlen); + size_t alloc_size = ALIGN_SIZE(*newlen); *is_malloc = false; if (alloc_size <= buf_size) @@ -1626,7 +1626,7 @@ int Log_event_writer::write_header(uchar *pos, size_t len) if (ctx) { uchar iv[BINLOG_IV_LENGTH]; - crypto->set_iv(iv, my_b_safe_tell(file)); + crypto->set_iv(iv, (uint32)my_b_safe_tell(file)); if (encryption_ctx_init(ctx, crypto->key, crypto->key_length, iv, sizeof(iv), ENCRYPTION_FLAG_ENCRYPT | ENCRYPTION_FLAG_NOPAD, ENCRYPTION_KEY_SYSTEM_DATA, crypto->key_version)) @@ -2427,9 +2427,9 @@ void Log_event::print_header(IO_CACHE* file, if (checksum_alg != BINLOG_CHECKSUM_ALG_OFF && checksum_alg != BINLOG_CHECKSUM_ALG_UNDEF) { - char checksum_buf[BINLOG_CHECKSUM_LEN * 2 + 4]; // to fit to "0x%lx " + char checksum_buf[BINLOG_CHECKSUM_LEN * 2 + 4]; // to fit to "%p " size_t const bytes_written= - my_snprintf(checksum_buf, sizeof(checksum_buf), "0x%08lx ", (ulong) crc); + my_snprintf(checksum_buf, sizeof(checksum_buf), "0x%08x ", crc); my_b_printf(file, "%s ", get_type(&binlog_checksum_typelib, checksum_alg)); my_b_printf(file, checksum_buf, bytes_written); } @@ -3968,7 +3968,7 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg, db(thd_arg->db), q_len((uint32) query_length), thread_id(thd_arg->thread_id), /* save the original thread id; we already know the server id */ - slave_proxy_id(thd_arg->variables.pseudo_thread_id), + slave_proxy_id((ulong)thd_arg->variables.pseudo_thread_id), flags2_inited(1), sql_mode_inited(1), charset_inited(1), sql_mode(thd_arg->variables.sql_mode), auto_increment_increment(thd_arg->variables.auto_increment_increment), @@ -4172,7 +4172,7 @@ get_str_len_and_pointer(const Log_event::Byte **src, if (length > 0) { if (*src + length >= end) - return *src + length - end + 1; // Number of bytes missing + return (int)(*src + length - end + 1); // Number of bytes missing *dst= (char *)*src + 1; // Will be copied later } *len= length; @@ -4273,7 +4273,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, data_len = event_len - (common_header_len + post_header_len); buf+= common_header_len; - slave_proxy_id= thread_id = uint4korr(buf + Q_THREAD_ID_OFFSET); + thread_id = slave_proxy_id = uint4korr(buf + Q_THREAD_ID_OFFSET); exec_time = uint4korr(buf + Q_EXEC_TIME_OFFSET); db_len = (uchar)buf[Q_DB_LEN_OFFSET]; // TODO: add a check of all *_len vars error_code = uint2korr(buf + Q_ERR_CODE_OFFSET); @@ -4347,8 +4347,8 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, break; } case Q_CATALOG_NZ_CODE: - DBUG_PRINT("info", ("case Q_CATALOG_NZ_CODE; pos: 0x%lx; end: 0x%lx", - (ulong) pos, (ulong) end)); + DBUG_PRINT("info", ("case Q_CATALOG_NZ_CODE; pos:%p; end:%p", + pos, end)); if (get_str_len_and_pointer(&pos, &catalog, &catalog_len, end)) { DBUG_PRINT("info", ("query= 0")); @@ -6490,7 +6490,7 @@ Load_log_event::Load_log_event(THD *thd_arg, sql_exchange *ex, thd_arg->thread_specific_used ? LOG_EVENT_THREAD_SPECIFIC_F : 0, using_trans), thread_id(thd_arg->thread_id), - slave_proxy_id(thd_arg->variables.pseudo_thread_id), + slave_proxy_id((ulong)thd_arg->variables.pseudo_thread_id), num_fields(0),fields(0), field_lens(0),field_block_len(0), table_name(table_name_arg ? table_name_arg : ""), @@ -6614,7 +6614,7 @@ int Load_log_event::copy_log_event(const char *buf, ulong event_len, char* buf_end = (char*)buf + event_len; /* this is the beginning of the post-header */ const char* data_head = buf + description_event->common_header_len; - slave_proxy_id= thread_id= uint4korr(data_head + L_THREAD_ID_OFFSET); + thread_id= slave_proxy_id= uint4korr(data_head + L_THREAD_ID_OFFSET); exec_time = uint4korr(data_head + L_EXEC_TIME_OFFSET); skip_lines = uint4korr(data_head + L_SKIP_LINES_OFFSET); table_name_len = (uint)data_head[L_TBL_LEN_OFFSET]; @@ -8680,7 +8680,7 @@ User_var_log_event(const char* buf, uint event_len, Old events will not have this extra byte, thence, we keep the flags set to UNDEF_F. */ - uint bytes_read= ((val + val_len) - buf_start); + size_t bytes_read= ((val + val_len) - buf_start); #ifdef DBUG_ASSERT_EXISTS bool old_pre_checksum_fd= description_event->is_version_before_checksum( &description_event->server_version_split); @@ -10389,7 +10389,7 @@ void Rows_log_event::uncompress_buf() if (new_buf) { if(!binlog_buf_uncompress((char *)m_rows_buf, (char *)new_buf, - m_rows_cur - m_rows_buf, &un_len)) + (uint32)(m_rows_cur - m_rows_buf), &un_len)) { my_free(m_rows_buf); m_rows_buf = new_buf; @@ -10425,9 +10425,9 @@ int Rows_log_event::get_data_size() uchar *end= net_store_length(buf, m_width); DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master", - return 6 + no_bytes_in_map(&m_cols) + (end - buf) + + return (int)(6 + no_bytes_in_map(&m_cols) + (end - buf) + (general_type_code == UPDATE_ROWS_EVENT ? no_bytes_in_map(&m_cols_ai) : 0) + - (m_rows_cur - m_rows_buf);); + m_rows_cur - m_rows_buf);); int data_size= 0; Log_event_type type = get_type_code(); @@ -10463,7 +10463,7 @@ int Rows_log_event::do_add_row_data(uchar *row_data, size_t length) would save binlog space. TODO */ DBUG_ENTER("Rows_log_event::do_add_row_data"); - DBUG_PRINT("enter", ("row_data: 0x%lx length: %lu", (ulong) row_data, + DBUG_PRINT("enter", ("row_data:%p length: %lu", row_data, (ulong) length)); /* @@ -10843,8 +10843,8 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) table= m_table= rgi->m_table_map.get_table(m_table_id); - DBUG_PRINT("debug", ("m_table: 0x%lx, m_table_id: %lu%s", - (ulong) m_table, m_table_id, + DBUG_PRINT("debug", ("m_table:%p, m_table_id: %lu%s", + m_table, m_table_id, table && master_had_triggers ? " (master had triggers)" : "")); if (table) @@ -10964,8 +10964,8 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) m_curr_row_end. */ - DBUG_PRINT("info", ("curr_row: 0x%lu; curr_row_end: 0x%lu; rows_end: 0x%lu", - (ulong) m_curr_row, (ulong) m_curr_row_end, (ulong) m_rows_end)); + DBUG_PRINT("info", ("curr_row: %p; curr_row_end: %p; rows_end:%p", + m_curr_row, m_curr_row_end, m_rows_end)); if (!m_curr_row_end && !error) error= unpack_current_row(rgi); diff --git a/sql/log_event.h b/sql/log_event.h index dbacb18528d..c8f3241cb3d 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -5127,7 +5127,7 @@ static inline bool copy_event_cache_to_string_and_reinit(IO_CACHE *cache, LEX_ST String tmp; reinit_io_cache(cache, READ_CACHE, 0L, FALSE, FALSE); - if (tmp.append(cache, cache->end_of_file)) + if (tmp.append(cache, (uint32)cache->end_of_file)) goto err; reinit_io_cache(cache, WRITE_CACHE, 0, FALSE, TRUE); diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc index a7f491671c1..2c079a34d56 100644 --- a/sql/log_event_old.cc +++ b/sql/log_event_old.cc @@ -364,10 +364,10 @@ copy_extra_record_fields(TABLE *table, my_ptrdiff_t master_fields) { DBUG_ENTER("copy_extra_record_fields(table, master_reclen, master_fields)"); - DBUG_PRINT("info", ("Copying to 0x%lx " + DBUG_PRINT("info", ("Copying to %p " "from field %lu at offset %lu " "to field %d at offset %lu", - (long) table->record[0], + table->record[0], (ulong) master_fields, (ulong) master_reclength, table->s->fields, table->s->reclength)); /* @@ -625,8 +625,8 @@ replace_record(THD *thd, TABLE *table, static int find_and_fetch_row(TABLE *table, uchar *key) { DBUG_ENTER("find_and_fetch_row(TABLE *table, uchar *key, uchar *record)"); - DBUG_PRINT("enter", ("table: 0x%lx, key: 0x%lx record: 0x%lx", - (long) table, (long) key, (long) table->record[1])); + DBUG_PRINT("enter", ("table: %p, key: %p record: %p", + table, key, table->record[1])); DBUG_ASSERT(table->in_use != NULL); @@ -1254,8 +1254,8 @@ Old_rows_log_event::Old_rows_log_event(const char *buf, uint event_len, const uchar* const ptr_rows_data= (const uchar*) ptr_after_width; size_t const data_size= event_len - (ptr_rows_data - (const uchar *) buf); - DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %lu", - m_table_id, m_flags, m_width, (ulong) data_size)); + DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %zu", + m_table_id, m_flags, m_width, data_size)); DBUG_DUMP("rows_data", (uchar*) ptr_rows_data, data_size); m_rows_buf= (uchar*) my_malloc(data_size, MYF(MY_WME)); @@ -1290,8 +1290,8 @@ int Old_rows_log_event::get_data_size() uchar *end= net_store_length(buf, (m_width + 7) / 8); DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master", - return 6 + no_bytes_in_map(&m_cols) + (end - buf) + - (m_rows_cur - m_rows_buf);); + return (int)(6 + no_bytes_in_map(&m_cols) + (end - buf) + + m_rows_cur - m_rows_buf);); int data_size= ROWS_HEADER_LEN; data_size+= no_bytes_in_map(&m_cols); data_size+= (uint) (end - buf); @@ -1310,8 +1310,8 @@ int Old_rows_log_event::do_add_row_data(uchar *row_data, size_t length) would save binlog space. TODO */ DBUG_ENTER("Old_rows_log_event::do_add_row_data"); - DBUG_PRINT("enter", ("row_data: 0x%lx length: %lu", (ulong) row_data, - (ulong) length)); + DBUG_PRINT("enter", ("row_data: %p length: %zu",row_data, + length)); /* Don't print debug messages when running valgrind since they can trigger false warnings. @@ -1599,8 +1599,8 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi) */ DBUG_PRINT("info", ("error: %d", error)); - DBUG_PRINT("info", ("curr_row: 0x%lu; curr_row_end: 0x%lu; rows_end: 0x%lu", - (ulong) m_curr_row, (ulong) m_curr_row_end, (ulong) m_rows_end)); + DBUG_PRINT("info", ("curr_row: %p; curr_row_end:%p; rows_end: %p", + m_curr_row, m_curr_row_end, m_rows_end)); if (!m_curr_row_end && !error) unpack_current_row(rgi); diff --git a/sql/mf_iocache_encr.cc b/sql/mf_iocache_encr.cc index ae314d826a0..546e0fe03a0 100644 --- a/sql/mf_iocache_encr.cc +++ b/sql/mf_iocache_encr.cc @@ -57,7 +57,7 @@ static int my_b_encr_read(IO_CACHE *info, uchar *Buffer, size_t Count) if (info->seek_not_done) { - size_t wpos; + my_off_t wpos; pos_offset= pos_in_file % info->buffer_length; pos_in_file-= pos_offset; @@ -92,7 +92,7 @@ static int my_b_encr_read(IO_CACHE *info, uchar *Buffer, size_t Count) DBUG_RETURN(1); } - elength= wlength - (ebuffer - wbuffer); + elength= wlength - (uint)(ebuffer - wbuffer); set_iv(iv, pos_in_file, crypt_data->inbuf_counter); if (encryption_crypt(ebuffer, elength, info->buffer, &length, @@ -106,7 +106,7 @@ static int my_b_encr_read(IO_CACHE *info, uchar *Buffer, size_t Count) DBUG_ASSERT(length <= info->buffer_length); - copied= MY_MIN(Count, length - pos_offset); + copied= MY_MIN(Count, (size_t)(length - pos_offset)); memcpy(Buffer, info->buffer + pos_offset, copied); Count-= copied; @@ -120,7 +120,7 @@ static int my_b_encr_read(IO_CACHE *info, uchar *Buffer, size_t Count) if (wlength < crypt_data->block_length && pos_in_file < info->end_of_file) { - info->error= pos_in_file - old_pos_in_file; + info->error= (int)(pos_in_file - old_pos_in_file); DBUG_RETURN(1); } } while (Count); @@ -184,7 +184,7 @@ static int my_b_encr_write(IO_CACHE *info, const uchar *Buffer, size_t Count) my_errno= 1; DBUG_RETURN(info->error= -1); } - wlength= elength + ebuffer - wbuffer; + wlength= elength + (uint)(ebuffer - wbuffer); if (length == info->buffer_length) { diff --git a/sql/mysqld.cc b/sql/mysqld.cc index e1cdee8298f..8f9a62de4c6 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -1593,8 +1593,8 @@ static void close_connections(void) /* kill connection thread */ #if !defined(__WIN__) - DBUG_PRINT("quit", ("waiting for select thread: 0x%lx", - (ulong) select_thread)); + DBUG_PRINT("quit", ("waiting for select thread: %lu", + (ulong)select_thread)); mysql_mutex_lock(&LOCK_start_thread); while (select_thread_in_use) @@ -2897,7 +2897,7 @@ void signal_thd_deleted() void unlink_thd(THD *thd) { DBUG_ENTER("unlink_thd"); - DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd)); + DBUG_PRINT("enter", ("thd: %p", thd)); /* Do not decrement when its wsrep system thread. wsrep_applier is set for @@ -4880,7 +4880,7 @@ static void init_ssl() opt_ssl_ca, opt_ssl_capath, opt_ssl_cipher, &error, opt_ssl_crl, opt_ssl_crlpath); - DBUG_PRINT("info",("ssl_acceptor_fd: 0x%lx", (long) ssl_acceptor_fd)); + DBUG_PRINT("info",("ssl_acceptor_fd: %p", ssl_acceptor_fd)); if (!ssl_acceptor_fd) { sql_print_warning("Failed to setup SSL"); @@ -5031,7 +5031,8 @@ static int init_server_components() global_system_variables.query_cache_type= 1; } query_cache_init(); - query_cache_resize(query_cache_size); + DBUG_ASSERT(query_cache_size < ULONG_MAX); + query_cache_resize((ulong)query_cache_size); my_rnd_init(&sql_rand,(ulong) server_start_time,(ulong) server_start_time/2); setup_fpu(); init_thr_lock(); @@ -5878,7 +5879,7 @@ int mysqld_main(int argc, char **argv) ulonglong new_thread_stack_size; new_thread_stack_size= my_setstacksize(&connection_attrib, - my_thread_stack_size); + (size_t)my_thread_stack_size); if (new_thread_stack_size != my_thread_stack_size) SYSVAR_AUTOSIZE(my_thread_stack_size, new_thread_stack_size); @@ -7935,9 +7936,9 @@ static int show_table_definitions(THD *thd, SHOW_VAR *var, char *buff, static int show_flush_commands(THD *thd, SHOW_VAR *var, char *buff, enum enum_var_type scope) { - var->type= SHOW_LONG; + var->type= SHOW_LONGLONG; var->value= buff; - *((long *) buff)= (long) tdc_refresh_version(); + *((longlong *) buff)= (longlong)tdc_refresh_version(); return 0; } diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 01aff0a9e7d..404c5a1f6d3 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1321,7 +1321,7 @@ QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT() file->ha_end_keyread(); if (free_file) { - DBUG_PRINT("info", ("Freeing separate handler 0x%lx (free: %d)", (long) file, + DBUG_PRINT("info", ("Freeing separate handler %p (free: %d)", file, free_file)); file->ha_external_lock(current_thd, F_UNLCK); file->ha_close(); @@ -1483,7 +1483,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler, in_ror_merged_scan= 1; if (reuse_handler) { - DBUG_PRINT("info", ("Reusing handler 0x%lx", (long) file)); + DBUG_PRINT("info", ("Reusing handler %p", file)); if (init()) { DBUG_RETURN(1); @@ -2827,7 +2827,7 @@ double records_in_column_ranges(PARAM *param, uint idx, /* Handle cases when we don't have a valid non-empty list of range */ if (!tree) - return HA_POS_ERROR; + return DBL_MAX; if (tree->type == SEL_ARG::IMPOSSIBLE) return (0L); @@ -2847,9 +2847,9 @@ double records_in_column_ranges(PARAM *param, uint idx, max_endp= range.end_key.length? &range.end_key : NULL; rows= get_column_range_cardinality(field, min_endp, max_endp, range.range_flag); - if (HA_POS_ERROR == rows) + if (DBL_MAX == rows) { - total_rows= HA_POS_ERROR; + total_rows= DBL_MAX; break; } total_rows += rows; @@ -3083,7 +3083,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond) else { rows= records_in_column_ranges(¶m, idx, key); - if (rows != HA_POS_ERROR) + if (rows != DBL_MAX) key->field->cond_selectivity= rows/table_records; } } @@ -4756,7 +4756,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge, unique_calc_buff_size= Unique::get_cost_calc_buff_size((ulong)non_cpk_scan_records, param->table->file->ref_length, - param->thd->variables.sortbuff_size); + (size_t)param->thd->variables.sortbuff_size); if (param->imerge_cost_buff_size < unique_calc_buff_size) { if (!(param->imerge_cost_buff= (uint*)alloc_root(param->mem_root, @@ -4768,7 +4768,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge, imerge_cost += Unique::get_use_cost(param->imerge_cost_buff, (uint)non_cpk_scan_records, param->table->file->ref_length, - param->thd->variables.sortbuff_size, + (size_t)param->thd->variables.sortbuff_size, TIME_FOR_COMPARE_ROWID, FALSE, NULL); DBUG_PRINT("info",("index_merge total cost: %g (wanted: less then %g)", @@ -5021,7 +5021,7 @@ typedef struct st_common_index_intersect_info PARAM *param; /* context info for range optimizations */ uint key_size; /* size of a ROWID element stored in Unique object */ uint compare_factor; /* 1/compare - cost to compare two ROWIDs */ - ulonglong max_memory_size; /* maximum space allowed for Unique objects */ + size_t max_memory_size; /* maximum space allowed for Unique objects */ ha_rows table_cardinality; /* estimate of the number of records in table */ double cutoff_cost; /* discard index intersects with greater costs */ INDEX_SCAN_INFO *cpk_scan; /* clustered primary key used in intersection */ @@ -5216,7 +5216,7 @@ bool prepare_search_best_index_intersect(PARAM *param, common->param= param; common->key_size= table->file->ref_length; common->compare_factor= TIME_FOR_COMPARE_ROWID; - common->max_memory_size= param->thd->variables.sortbuff_size; + common->max_memory_size= (size_t)param->thd->variables.sortbuff_size; common->cutoff_cost= cutoff_cost; common->cpk_scan= NULL; common->table_cardinality= @@ -5652,7 +5652,7 @@ bool check_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr, uint *buff_elems= common_info->buff_elems; uint key_size= common_info->key_size; uint compare_factor= common_info->compare_factor; - ulonglong max_memory_size= common_info->max_memory_size; + size_t max_memory_size= common_info->max_memory_size; records_sent_to_unique+= ext_index_scan_records; cost= Unique::get_use_cost(buff_elems, (size_t) records_sent_to_unique, key_size, @@ -10239,7 +10239,7 @@ void SEL_ARG::test_use_count(SEL_ARG *root) ulong count=count_key_part_usage(root,pos->next_key_part); if (count > pos->next_key_part->use_count) { - sql_print_information("Use_count: Wrong count for key at 0x%lx, %lu " + sql_print_information("Use_count: Wrong count for key at %p, %lu " "should be %lu", (long unsigned int)pos, pos->next_key_part->use_count, count); return; @@ -10248,7 +10248,7 @@ void SEL_ARG::test_use_count(SEL_ARG *root) } } if (e_count != elements) - sql_print_warning("Wrong use count: %u (should be %u) for tree at 0x%lx", + sql_print_warning("Wrong use count: %u (should be %u) for tree at %p", e_count, elements, (long unsigned int) this); } #endif @@ -10934,7 +10934,7 @@ int read_keys_and_merge_scans(THD *thd, unique= new Unique(refpos_order_cmp, (void *)file, file->ref_length, - thd->variables.sortbuff_size, + (size_t)thd->variables.sortbuff_size, intersection ? quick_selects.elements : 0); if (!unique) goto err; @@ -14634,7 +14634,7 @@ static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map, if (!tmp.length()) tmp.append(STRING_WITH_LEN("(empty)")); - DBUG_PRINT("info", ("SEL_TREE: 0x%lx (%s) scans: %s", (long) tree, msg, + DBUG_PRINT("info", ("SEL_TREE: %p (%s) scans: %s", tree, msg, tmp.c_ptr_safe())); DBUG_VOID_RETURN; diff --git a/sql/parse_file.cc b/sql/parse_file.cc index 1cc40e3e39b..ebb08a23009 100644 --- a/sql/parse_file.cc +++ b/sql/parse_file.cc @@ -258,9 +258,9 @@ sql_create_definition_file(const LEX_CSTRING *dir, int path_end; File_option *param; DBUG_ENTER("sql_create_definition_file"); - DBUG_PRINT("enter", ("Dir: %s, file: %s, base 0x%lx", + DBUG_PRINT("enter", ("Dir: %s, file: %s, base %p", dir ? dir->str : "", - file_name->str, (ulong) base)); + file_name->str, base)); if (dir) { @@ -437,7 +437,7 @@ sql_parse_prepare(const LEX_CSTRING *file_name, MEM_ROOT *mem_root, DBUG_RETURN(0); } - if ((len= mysql_file_read(file, (uchar *)buff, stat_info.st_size, + if ((len= mysql_file_read(file, (uchar *)buff, (size_t)stat_info.st_size, MYF(MY_WME))) == MY_FILE_ERROR) { mysql_file_close(file, MYF(MY_WME)); diff --git a/sql/rpl_filter.cc b/sql/rpl_filter.cc index 8af919990d0..d09b7aee31c 100644 --- a/sql/rpl_filter.cc +++ b/sql/rpl_filter.cc @@ -239,7 +239,7 @@ Rpl_filter::db_ok_with_wild_table(const char *db) int len; end= strmov(hash_key, db); *end++= '.'; - len= end - hash_key ; + len= (int)(end - hash_key); if (wild_do_table_inited && find_wild(&wild_do_table, hash_key, len)) { DBUG_PRINT("return",("1")); diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc index 65044daecc8..4a6e813d73b 100644 --- a/sql/rpl_parallel.cc +++ b/sql/rpl_parallel.cc @@ -1698,7 +1698,7 @@ rpl_parallel_thread::get_qev_common(Log_event *ev, ulonglong event_size) } qev->typ= rpl_parallel_thread::queued_event::QUEUED_EVENT; qev->ev= ev; - qev->event_size= event_size; + qev->event_size= (size_t)event_size; qev->next= NULL; return qev; } diff --git a/sql/rpl_record.cc b/sql/rpl_record.cc index 01d9da52d4e..9ff5004414c 100644 --- a/sql/rpl_record.cc +++ b/sql/rpl_record.cc @@ -105,10 +105,10 @@ pack_row(TABLE *table, MY_BITMAP const* cols, #endif pack_ptr= field->pack(pack_ptr, field->ptr + offset, field->max_data_length()); - DBUG_PRINT("debug", ("field: %s; real_type: %d, pack_ptr: 0x%lx;" - " pack_ptr':0x%lx; bytes: %d", + DBUG_PRINT("debug", ("field: %s; real_type: %d, pack_ptr: %p;" + " pack_ptr':%p; bytes: %d", field->field_name.str, field->real_type(), - (ulong) old_pack_ptr, (ulong) pack_ptr, + old_pack_ptr,pack_ptr, (int) (pack_ptr - old_pack_ptr))); DBUG_DUMP("packed_data", old_pack_ptr, pack_ptr - old_pack_ptr); } @@ -322,9 +322,9 @@ unpack_row(rpl_group_info *rgi, pack_ptr= f->unpack(f->ptr, pack_ptr, row_end, metadata); DBUG_PRINT("debug", ("field: %s; metadata: 0x%x;" - " pack_ptr: 0x%lx; pack_ptr': 0x%lx; bytes: %d", + " pack_ptr: %p; pack_ptr': %p; bytes: %d", f->field_name.str, metadata, - (ulong) old_pack_ptr, (ulong) pack_ptr, + old_pack_ptr, pack_ptr, (int) (pack_ptr - old_pack_ptr))); if (!pack_ptr) { @@ -336,11 +336,11 @@ unpack_row(rpl_group_info *rgi, Galera Node throws "Could not read field" error and drops out of cluster */ WSREP_WARN("ROW event unpack field: %s metadata: 0x%x;" - " pack_ptr: 0x%lx; conv_table %p conv_field %p table %s" - " row_end: 0x%lx", + " pack_ptr: %p; conv_table %p conv_field %p table %s" + " row_end: %p", f->field_name.str, metadata, - (ulong) old_pack_ptr, conv_table, conv_field, - (table_found) ? "found" : "not found", (ulong)row_end + old_pack_ptr, conv_table, conv_field, + (table_found) ? "found" : "not found", row_end ); } diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index 8dff4146909..efb256fbe11 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -236,7 +236,7 @@ a file name for --relay-log-index option", opt_relaylog_index_name); mysql_mutex_lock(log_lock); if (relay_log.open_index_file(buf_relaylog_index_name, ln, TRUE) || relay_log.open(ln, LOG_BIN, 0, 0, SEQ_READ_APPEND, - max_relay_log_size, 1, TRUE)) + (ulong)max_relay_log_size, 1, TRUE)) { mysql_mutex_unlock(log_lock); mysql_mutex_unlock(&data_lock); @@ -1177,7 +1177,7 @@ int purge_relay_logs(Relay_log_info* rli, THD *thd, bool just_reset, } mysql_mutex_lock(rli->relay_log.get_log_lock()); if (rli->relay_log.open(ln, LOG_BIN, 0, 0, SEQ_READ_APPEND, - (rli->max_relay_log_size ? rli->max_relay_log_size : + (ulong)(rli->max_relay_log_size ? rli->max_relay_log_size : max_binlog_size), 1, TRUE)) { sql_print_error("Unable to purge relay log files. Failed to open relay " @@ -1560,9 +1560,9 @@ scan_one_gtid_slave_pos_table(THD *thd, HASH *hash, DYNAMIC_ARRAY *array, goto end; } } - domain_id= (ulonglong)table->field[0]->val_int(); + domain_id= (uint32)table->field[0]->val_int(); sub_id= (ulonglong)table->field[1]->val_int(); - server_id= (ulonglong)table->field[2]->val_int(); + server_id= (uint32)table->field[2]->val_int(); seq_no= (ulonglong)table->field[3]->val_int(); DBUG_PRINT("info", ("Read slave state row: %u-%u-%lu sub_id=%lu\n", (unsigned)domain_id, (unsigned)server_id, diff --git a/sql/rpl_tblmap.cc b/sql/rpl_tblmap.cc index 1f4aa45a101..80d093722f7 100644 --- a/sql/rpl_tblmap.cc +++ b/sql/rpl_tblmap.cc @@ -66,8 +66,8 @@ TABLE* table_mapping::get_table(ulong table_id) entry *e= find_entry(table_id); if (e) { - DBUG_PRINT("info", ("tid %lu -> table 0x%lx (%s)", - table_id, (long) e->table, + DBUG_PRINT("info", ("tid %lu -> table %p (%s)", + table_id, e->table, MAYBE_TABLE_NAME(e->table))); DBUG_RETURN(e->table); } @@ -105,9 +105,9 @@ int table_mapping::expand() int table_mapping::set_table(ulong table_id, TABLE* table) { DBUG_ENTER("table_mapping::set_table(ulong,TABLE*)"); - DBUG_PRINT("enter", ("table_id: %lu table: 0x%lx (%s)", + DBUG_PRINT("enter", ("table_id: %lu table: %p (%s)", table_id, - (long) table, MAYBE_TABLE_NAME(table))); + table, MAYBE_TABLE_NAME(table))); entry *e= find_entry(table_id); if (e == 0) { @@ -133,8 +133,8 @@ int table_mapping::set_table(ulong table_id, TABLE* table) DBUG_RETURN(ERR_MEMORY_ALLOCATION); } - DBUG_PRINT("info", ("tid %lu -> table 0x%lx (%s)", - table_id, (long) e->table, + DBUG_PRINT("info", ("tid %lu -> table %p (%s)", + table_id, e->table, MAYBE_TABLE_NAME(e->table))); DBUG_RETURN(0); // All OK } diff --git a/sql/session_tracker.cc b/sql/session_tracker.cc index 8d64fb29332..ff9780a9531 100644 --- a/sql/session_tracker.cc +++ b/sql/session_tracker.cc @@ -967,7 +967,7 @@ bool Current_schema_tracker::update(THD *thd, set_var *) bool Current_schema_tracker::store(THD *thd, String *buf) { - ulonglong db_length, length; + size_t db_length, length; /* Protocol made (by unknown reasons) redundant: @@ -1320,16 +1320,16 @@ bool Transaction_state_tracker::store(THD *thd, String *buf) } { - ulonglong length= buf->length() - start; + size_t length= buf->length() - start; uchar *place= (uchar *)(buf->ptr() + (start - 2)); DBUG_ASSERT(length < 249); // in fact < 110 DBUG_ASSERT(start >= 3); DBUG_ASSERT((place - 1)[0] == SESSION_TRACK_TRANSACTION_CHARACTERISTICS); /* Length of the overall entity. */ - place[0]= length + 1; + place[0]= (uchar)length + 1; /* Transaction characteristics (length-encoded string). */ - place[1]= length; + place[1]= (uchar)length; } } diff --git a/sql/slave.cc b/sql/slave.cc index 5177ac0d1b4..46932e36e95 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -3682,11 +3682,11 @@ sql_delay_event(Log_event *ev, THD *thd, rpl_group_info *rgi) "ev->when= %lu " "rli->mi->clock_diff_with_master= %lu " "now= %ld " - "sql_delay_end= %lu " + "sql_delay_end= %llu " "nap_time= %ld", sql_delay, (long)ev->when, rli->mi->clock_diff_with_master, - (long)now, sql_delay_end, (long)nap_time)); + (long)now, (ulonglong)sql_delay_end, (long)nap_time)); if (sql_delay_end > now) { @@ -4743,12 +4743,12 @@ Stopping slave I/O thread due to out-of-memory error from master"); lastchecktime = currenttime; if(tokenamount < network_read_len) { - ulonglong micro_time = 1000*1000 * (network_read_len - tokenamount) / speed_limit_in_bytes ; - ulonglong second_time = micro_time / (1000 * 1000); - micro_time = micro_time % (1000 * 1000); + ulonglong duration =1000ULL*1000 * (network_read_len - tokenamount) / speed_limit_in_bytes; + time_t second_time = (time_t)(duration / (1000 * 1000)); + uint micro_time = duration % (1000 * 1000); // at least sleep 1000 micro second - my_sleep(micro_time > 1000 ? micro_time : 1000); + my_sleep(MY_MAX(micro_time,1000)); /* If it sleep more than one second, diff --git a/sql/sp.cc b/sql/sp.cc index 8f4acd40f10..e2884f49d99 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -720,8 +720,8 @@ Sp_handler::db_find_and_cache_routine(THD *thd, if (rc == SP_OK) { sp_cache_insert(get_cache(thd), *sp); - DBUG_PRINT("info", ("added new: 0x%lx, level: %lu, flags %x", - (ulong) sp[0], sp[0]->m_recursion_level, + DBUG_PRINT("info", ("added new: %p, level: %lu, flags %x", + sp[0], sp[0]->m_recursion_level, sp[0]->m_flags)); } return rc; @@ -1775,11 +1775,11 @@ Sp_handler::sp_clone_and_link_routine(THD *thd, String retstr(64); retstr.set_charset(sp->get_creation_ctx()->get_client_cs()); - DBUG_PRINT("info", ("found: 0x%lx", (ulong)sp)); + DBUG_PRINT("info", ("found: %p", sp)); if (sp->m_first_free_instance) { - DBUG_PRINT("info", ("first free: 0x%lx level: %lu flags %x", - (ulong)sp->m_first_free_instance, + DBUG_PRINT("info", ("first free: %p level: %lu flags %x", + sp->m_first_free_instance, sp->m_first_free_instance->m_recursion_level, sp->m_first_free_instance->m_flags)); DBUG_ASSERT(!(sp->m_first_free_instance->m_flags & sp_head::IS_INVOKED)); @@ -1819,8 +1819,8 @@ Sp_handler::sp_clone_and_link_routine(THD *thd, new_sp->m_recursion_level= level; new_sp->m_first_instance= sp; sp->m_last_cached_sp= sp->m_first_free_instance= new_sp; - DBUG_PRINT("info", ("added level: 0x%lx, level: %lu, flags %x", - (ulong)new_sp, new_sp->m_recursion_level, + DBUG_PRINT("info", ("added level: %p, level: %lu, flags %x", + new_sp, new_sp->m_recursion_level, new_sp->m_flags)); DBUG_RETURN(new_sp); } diff --git a/sql/sp_head.cc b/sql/sp_head.cc index ebfee30ecd3..0bc269c414c 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -515,7 +515,7 @@ sp_head::operator new(size_t size) throw() if (sp == NULL) DBUG_RETURN(NULL); sp->main_mem_root= own_root; - DBUG_PRINT("info", ("mem_root 0x%lx", (ulong) &sp->mem_root)); + DBUG_PRINT("info", ("mem_root %p", &sp->mem_root)); DBUG_RETURN(sp); } @@ -532,8 +532,8 @@ sp_head::operator delete(void *ptr, size_t size) throw() /* Make a copy of main_mem_root as free_root will free the sp */ own_root= sp->main_mem_root; - DBUG_PRINT("info", ("mem_root 0x%lx moved to 0x%lx", - (ulong) &sp->mem_root, (ulong) &own_root)); + DBUG_PRINT("info", ("mem_root %p moved to %p", + &sp->mem_root, &own_root)); free_root(&own_root, MYF(0)); DBUG_VOID_RETURN; @@ -1027,9 +1027,9 @@ sp_head::execute(THD *thd, bool merge_da_on_success) if (m_next_cached_sp) { DBUG_PRINT("info", - ("first free for 0x%lx ++: 0x%lx->0x%lx level: %lu flags %x", - (ulong)m_first_instance, (ulong) this, - (ulong) m_next_cached_sp, + ("first free for %p ++: %p->%p level: %lu flags %x", + m_first_instance, this, + m_next_cached_sp, m_next_cached_sp->m_recursion_level, m_next_cached_sp->m_flags)); } @@ -1333,10 +1333,10 @@ sp_head::execute(THD *thd, bool merge_da_on_success) } m_flags&= ~IS_INVOKED; DBUG_PRINT("info", - ("first free for 0x%lx --: 0x%lx->0x%lx, level: %lu, flags %x", - (ulong) m_first_instance, - (ulong) m_first_instance->m_first_free_instance, - (ulong) this, m_recursion_level, m_flags)); + ("first free for %p --: %p->%p, level: %lu, flags %x", + m_first_instance, + m_first_instance->m_first_free_instance, + this, m_recursion_level, m_flags)); /* Check that we have one of following: @@ -2324,8 +2324,8 @@ sp_head::backpatch(sp_label *lab) { if (bp->lab == lab) { - DBUG_PRINT("info", ("backpatch: (m_ip %d, label 0x%lx <%s>) to dest %d", - bp->instr->m_ip, (ulong) lab, lab->name.str, dest)); + DBUG_PRINT("info", ("backpatch: (m_ip %d, label %p <%s>) to dest %d", + bp->instr->m_ip, lab, lab->name.str, dest)); bp->instr->backpatch(dest, lab->ctx); } } @@ -2355,8 +2355,8 @@ sp_head::backpatch_goto(THD *thd, sp_label *lab,sp_label *lab_begin_block) if (bp->instr_type == GOTO) { DBUG_PRINT("info", - ("backpatch_goto: (m_ip %d, label 0x%lx <%s>) to dest %d", - bp->instr->m_ip, (ulong) lab, lab->name.str, dest)); + ("backpatch_goto: (m_ip %d, label %p <%s>) to dest %d", + bp->instr->m_ip, lab, lab->name.str, dest)); bp->instr->backpatch(dest, lab->ctx); // Jump resolved, remove from the list li.remove(); @@ -2504,8 +2504,8 @@ sp_head::reset_thd_mem_root(THD *thd) DBUG_ENTER("sp_head::reset_thd_mem_root"); m_thd_root= thd->mem_root; thd->mem_root= &main_mem_root; - DBUG_PRINT("info", ("mem_root 0x%lx moved to thd mem root 0x%lx", - (ulong) &mem_root, (ulong) &thd->mem_root)); + DBUG_PRINT("info", ("mem_root %p moved to thd mem root %p", + &mem_root, &thd->mem_root)); free_list= thd->free_list; // Keep the old list thd->free_list= NULL; // Start a new one m_thd= thd; @@ -2535,8 +2535,8 @@ sp_head::restore_thd_mem_root(THD *thd) set_query_arena(thd); // Get new free_list and mem_root state= STMT_INITIALIZED_FOR_SP; - DBUG_PRINT("info", ("mem_root 0x%lx returned from thd mem root 0x%lx", - (ulong) &mem_root, (ulong) &thd->mem_root)); + DBUG_PRINT("info", ("mem_root %p returned from thd mem root %p", + &mem_root, &thd->mem_root)); thd->free_list= flist; // Restore the old one thd->mem_root= m_thd_root; m_thd= NULL; diff --git a/sql/sp_pcontext.h b/sql/sp_pcontext.h index 215ebbe5f77..9c879099410 100644 --- a/sql/sp_pcontext.h +++ b/sql/sp_pcontext.h @@ -433,11 +433,11 @@ public: /// @return the current number of variables used in the parent contexts /// (from the root), including this context. uint current_var_count() const - { return m_var_offset + m_vars.elements(); } + { return m_var_offset + (uint)m_vars.elements(); } /// @return the number of variables in this context alone. uint context_var_count() const - { return m_vars.elements(); } + { return (uint)m_vars.elements(); } /// return the i-th variable on the current context sp_variable *get_context_variable(uint i) const @@ -678,10 +678,10 @@ public: { return m_cursors.elements(); } uint max_cursor_index() const - { return m_max_cursor_index + m_cursors.elements(); } + { return m_max_cursor_index + (uint)m_cursors.elements(); } uint current_cursor_count() const - { return m_cursor_offset + m_cursors.elements(); } + { return m_cursor_offset + (uint)m_cursors.elements(); } void set_for_loop(const Lex_for_loop_st &for_loop) { diff --git a/sql/sp_rcontext.cc b/sql/sp_rcontext.cc index b96ded8cf80..70ab5a576b3 100644 --- a/sql/sp_rcontext.cc +++ b/sql/sp_rcontext.cc @@ -51,9 +51,7 @@ sp_rcontext::sp_rcontext(const sp_pcontext *root_parsing_ctx, sp_rcontext::~sp_rcontext() { - if (m_var_table) - free_blobs(m_var_table); - + delete m_var_table; // Leave m_handlers, m_handler_call_stack, m_var_items, m_cstack // and m_case_expr_holders untouched. // They are allocated in mem roots and will be freed accordingly. @@ -375,10 +373,16 @@ bool Item_spvar_args::row_create_items(THD *thd, List<Spvar_definition> *list) } +Field *Item_spvar_args::get_row_field(uint i) const +{ + DBUG_ASSERT(m_table); + return m_table->field[i]; +} + + Item_spvar_args::~Item_spvar_args() { - if (m_table) - free_blobs(m_table); + delete m_table; } diff --git a/sql/sp_rcontext.h b/sql/sp_rcontext.h index 1b1329c17f0..66df0359d28 100644 --- a/sql/sp_rcontext.h +++ b/sql/sp_rcontext.h @@ -34,6 +34,7 @@ class sp_instr_cpush; class Query_arena; class sp_head; class Item_cache; +class Virtual_tmp_table; /* @@ -363,7 +364,7 @@ private: const sp_pcontext *m_root_parsing_ctx; /// Virtual table for storing SP-variables. - TABLE *m_var_table; + Virtual_tmp_table *m_var_table; /// Collection of Item_field proxies, each of them points to the /// corresponding field in m_var_table. diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index e61940ea82d..ae7dc336720 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -12359,7 +12359,7 @@ static bool find_mpvio_user(MPVIO_EXT *mpvio) static bool read_client_connect_attrs(char **ptr, char *end, CHARSET_INFO *from_cs) { - size_t length; + ulonglong length; char *ptr_save= *ptr; /* not enough bytes to hold the length */ @@ -12381,10 +12381,10 @@ read_client_connect_attrs(char **ptr, char *end, CHARSET_INFO *from_cs) return true; #ifdef HAVE_PSI_THREAD_INTERFACE - if (PSI_THREAD_CALL(set_thread_connect_attrs)(*ptr, length, from_cs) && + if (PSI_THREAD_CALL(set_thread_connect_attrs)(*ptr, (size_t)length, from_cs) && current_thd->variables.log_warnings) - sql_print_warning("Connection attributes of length %lu were truncated", - (unsigned long) length); + sql_print_warning("Connection attributes of length %llu were truncated", + length); #endif return false; } @@ -12641,7 +12641,7 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio, char *user= end; char *passwd= strend(user)+1; - uint user_len= passwd - user - 1, db_len; + uint user_len= (uint)(passwd - user - 1), db_len; char *db= passwd; char user_buff[USERNAME_LENGTH + 1]; // buffer to store user in utf8 uint dummy_errors; @@ -12656,15 +12656,22 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio, Cast *passwd to an unsigned char, so that it doesn't extend the sign for *passwd > 127 and become 2**32-127+ after casting to uint. */ - uint passwd_len; + ulonglong len; + size_t passwd_len; + if (!(thd->client_capabilities & CLIENT_SECURE_CONNECTION)) - passwd_len= strlen(passwd); + len= strlen(passwd); else if (!(thd->client_capabilities & CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA)) - passwd_len= (uchar)(*passwd++); + len= (uchar)(*passwd++); else - passwd_len= safe_net_field_length_ll((uchar**)&passwd, + { + len= safe_net_field_length_ll((uchar**)&passwd, net->read_pos + pkt_len - (uchar*)passwd); - + if (len > pkt_len) + return packet_error; + } + + passwd_len= (size_t)len; db= thd->client_capabilities & CLIENT_CONNECT_WITH_DB ? db + passwd_len + 1 : 0; diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc index ce12b73a9e2..c81ca438bcc 100644 --- a/sql/sql_admin.cc +++ b/sql/sql_admin.cc @@ -590,7 +590,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, } } #endif - DBUG_PRINT("admin", ("table: 0x%lx", (long) table->table)); + DBUG_PRINT("admin", ("table: %p", table->table)); if (prepare_func) { diff --git a/sql/sql_audit.h b/sql/sql_audit.h index 4979375559b..7a11f5aa113 100644 --- a/sql/sql_audit.h +++ b/sql/sql_audit.h @@ -60,15 +60,26 @@ static inline void mysql_audit_notify(THD *thd, uint event_class, #endif extern void mysql_audit_release(THD *thd); +static inline unsigned int strlen_uint(const char *s) +{ + return (uint)strlen(s); +} + +static inline unsigned int safe_strlen_uint(const char *s) +{ + return (uint)safe_strlen(s); +} + #define MAX_USER_HOST_SIZE 512 static inline uint make_user_name(THD *thd, char *buf) { const Security_context *sctx= thd->security_ctx; - return strxnmov(buf, MAX_USER_HOST_SIZE, + char *end= strxnmov(buf, MAX_USER_HOST_SIZE, sctx->priv_user[0] ? sctx->priv_user : "", "[", sctx->user ? sctx->user : "", "] @ ", sctx->host ? sctx->host : "", " [", - sctx->ip ? sctx->ip : "", "]", NullS) - buf; + sctx->ip ? sctx->ip : "", "]", NullS); + return (uint)(end-buf); } /** @@ -110,7 +121,7 @@ void mysql_audit_general_log(THD *thd, time_t time, event.general_thread_id= (unsigned long)thd->thread_id; event.general_charset= thd->variables.character_set_client; event.database= thd->db; - event.database_length= thd->db_length; + event.database_length= (unsigned int)thd->db_length; event.query_id= thd->query_id; } else @@ -152,7 +163,7 @@ void mysql_audit_general(THD *thd, uint event_subtype, event.general_error_code= error_code; event.general_time= my_time(0); event.general_command= msg; - event.general_command_length= safe_strlen(msg); + event.general_command_length= safe_strlen_uint(msg); if (thd) { @@ -164,7 +175,7 @@ void mysql_audit_general(THD *thd, uint event_subtype, event.general_charset= thd->query_string.charset(); event.general_rows= thd->get_stmt_da()->current_row_for_warning(); event.database= thd->db; - event.database_length= thd->db_length; + event.database_length= (uint)thd->db_length; event.query_id= thd->query_id; } else @@ -199,19 +210,19 @@ void mysql_audit_notify_connection_connect(THD *thd) thd->get_stmt_da()->sql_errno() : 0; event.thread_id= (unsigned long)thd->thread_id; event.user= sctx->user; - event.user_length= safe_strlen(sctx->user); + event.user_length= safe_strlen_uint(sctx->user); event.priv_user= sctx->priv_user; - event.priv_user_length= strlen(sctx->priv_user); + event.priv_user_length= strlen_uint(sctx->priv_user); event.external_user= sctx->external_user; - event.external_user_length= safe_strlen(sctx->external_user); + event.external_user_length= safe_strlen_uint(sctx->external_user); event.proxy_user= sctx->proxy_user; - event.proxy_user_length= strlen(sctx->proxy_user); + event.proxy_user_length= strlen_uint(sctx->proxy_user); event.host= sctx->host; - event.host_length= safe_strlen(sctx->host); + event.host_length= safe_strlen_uint(sctx->host); event.ip= sctx->ip; - event.ip_length= safe_strlen(sctx->ip); + event.ip_length= safe_strlen_uint(sctx->ip); event.database= thd->db; - event.database_length= safe_strlen(thd->db); + event.database_length= safe_strlen_uint(thd->db); mysql_audit_notify(thd, MYSQL_AUDIT_CONNECTION_CLASS, &event); } @@ -229,19 +240,19 @@ void mysql_audit_notify_connection_disconnect(THD *thd, int errcode) event.status= errcode; event.thread_id= (unsigned long)thd->thread_id; event.user= sctx->user; - event.user_length= safe_strlen(sctx->user); + event.user_length= safe_strlen_uint(sctx->user); event.priv_user= sctx->priv_user; - event.priv_user_length= strlen(sctx->priv_user); + event.priv_user_length= strlen_uint(sctx->priv_user); event.external_user= sctx->external_user; - event.external_user_length= safe_strlen(sctx->external_user); + event.external_user_length= safe_strlen_uint(sctx->external_user); event.proxy_user= sctx->proxy_user; - event.proxy_user_length= strlen(sctx->proxy_user); + event.proxy_user_length= strlen_uint(sctx->proxy_user); event.host= sctx->host; - event.host_length= safe_strlen(sctx->host); + event.host_length= safe_strlen_uint(sctx->host); event.ip= sctx->ip; - event.ip_length= safe_strlen(sctx->ip) ; + event.ip_length= safe_strlen_uint(sctx->ip) ; event.database= thd->db; - event.database_length= safe_strlen(thd->db); + event.database_length= safe_strlen_uint(thd->db); mysql_audit_notify(thd, MYSQL_AUDIT_CONNECTION_CLASS, &event); } @@ -260,19 +271,19 @@ void mysql_audit_notify_connection_change_user(THD *thd) thd->get_stmt_da()->sql_errno() : 0; event.thread_id= (unsigned long)thd->thread_id; event.user= sctx->user; - event.user_length= safe_strlen(sctx->user); + event.user_length= safe_strlen_uint(sctx->user); event.priv_user= sctx->priv_user; - event.priv_user_length= strlen(sctx->priv_user); + event.priv_user_length= strlen_uint(sctx->priv_user); event.external_user= sctx->external_user; - event.external_user_length= safe_strlen(sctx->external_user); + event.external_user_length= safe_strlen_uint(sctx->external_user); event.proxy_user= sctx->proxy_user; - event.proxy_user_length= strlen(sctx->proxy_user); + event.proxy_user_length= strlen_uint(sctx->proxy_user); event.host= sctx->host; - event.host_length= safe_strlen(sctx->host); + event.host_length= safe_strlen_uint(sctx->host); event.ip= sctx->ip; - event.ip_length= safe_strlen(sctx->ip); + event.ip_length= safe_strlen_uint(sctx->ip); event.database= thd->db; - event.database_length= safe_strlen(thd->db); + event.database_length= safe_strlen_uint(thd->db); mysql_audit_notify(thd, MYSQL_AUDIT_CONNECTION_CLASS, &event); } @@ -297,9 +308,9 @@ void mysql_audit_external_lock(THD *thd, TABLE_SHARE *share, int lock) event.host= sctx->host; event.ip= sctx->ip; event.database= share->db.str; - event.database_length= share->db.length; + event.database_length= (unsigned int)share->db.length; event.table= share->table_name.str; - event.table_length= share->table_name.length; + event.table_length= (unsigned int)share->table_name.length; event.new_database= 0; event.new_database_length= 0; event.new_table= 0; @@ -331,9 +342,9 @@ void mysql_audit_create_table(TABLE *table) event.host= sctx->host; event.ip= sctx->ip; event.database= share->db.str; - event.database_length= share->db.length; + event.database_length= (unsigned int)share->db.length; event.table= share->table_name.str; - event.table_length= share->table_name.length; + event.table_length= (unsigned int)share->table_name.length; event.new_database= 0; event.new_database_length= 0; event.new_table= 0; @@ -363,9 +374,9 @@ void mysql_audit_drop_table(THD *thd, TABLE_LIST *table) event.host= sctx->host; event.ip= sctx->ip; event.database= table->db; - event.database_length= table->db_length; + event.database_length= (unsigned int)table->db_length; event.table= table->table_name; - event.table_length= table->table_name_length; + event.table_length= (unsigned int)table->table_name_length; event.new_database= 0; event.new_database_length= 0; event.new_table= 0; @@ -396,13 +407,13 @@ void mysql_audit_rename_table(THD *thd, const char *old_db, const char *old_tb, event.host= sctx->host; event.ip= sctx->ip; event.database= old_db; - event.database_length= strlen(old_db); + event.database_length= strlen_uint(old_db); event.table= old_tb; - event.table_length= strlen(old_tb); + event.table_length= strlen_uint(old_tb); event.new_database= new_db; - event.new_database_length= strlen(new_db); + event.new_database_length= strlen_uint(new_db); event.new_table= new_tb; - event.new_table_length= strlen(new_tb); + event.new_table_length= strlen_uint(new_tb); event.query_id= thd->query_id; mysql_audit_notify(thd, MYSQL_AUDIT_TABLE_CLASS, &event); @@ -428,9 +439,9 @@ void mysql_audit_alter_table(THD *thd, TABLE_LIST *table) event.host= sctx->host; event.ip= sctx->ip; event.database= table->db; - event.database_length= table->db_length; + event.database_length= (unsigned int)table->db_length; event.table= table->table_name; - event.table_length= table->table_name_length; + event.table_length= (unsigned int)table->table_name_length; event.new_database= 0; event.new_database_length= 0; event.new_table= 0; diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 93dd6239749..e59007d3e5a 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -325,7 +325,7 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *db, const char *wild) struct close_cached_tables_arg { - ulong refresh_version; + tdc_version_t refresh_version; TDC_element *element; }; @@ -351,7 +351,7 @@ bool close_cached_tables(THD *thd, TABLE_LIST *tables, { bool result= FALSE; struct timespec abstime; - ulong refresh_version; + tdc_version_t refresh_version; DBUG_ENTER("close_cached_tables"); DBUG_ASSERT(thd || (!wait_for_refresh && !tables)); @@ -716,8 +716,8 @@ void close_thread_tables(THD *thd) #ifdef EXTRA_DEBUG DBUG_PRINT("tcache", ("open tables:")); for (table= thd->open_tables; table; table= table->next) - DBUG_PRINT("tcache", ("table: '%s'.'%s' 0x%lx", table->s->db.str, - table->s->table_name.str, (long) table)); + DBUG_PRINT("tcache", ("table: '%s'.'%s' %p", table->s->db.str, + table->s->table_name.str, table)); #endif #if defined(ENABLED_DEBUG_SYNC) @@ -857,8 +857,8 @@ void close_thread_table(THD *thd, TABLE **table_ptr) { TABLE *table= *table_ptr; DBUG_ENTER("close_thread_table"); - DBUG_PRINT("tcache", ("table: '%s'.'%s' 0x%lx", table->s->db.str, - table->s->table_name.str, (long) table)); + DBUG_PRINT("tcache", ("table: '%s'.'%s' %p", table->s->db.str, + table->s->table_name.str, table)); DBUG_ASSERT(!table->file->keyread_enabled()); DBUG_ASSERT(!table->file || table->file->inited == handler::NONE); @@ -1192,8 +1192,8 @@ bool wait_while_table_is_used(THD *thd, TABLE *table, { DBUG_ENTER("wait_while_table_is_used"); DBUG_ASSERT(!table->s->tmp_table); - DBUG_PRINT("enter", ("table: '%s' share: 0x%lx db_stat: %u version: %lu", - table->s->table_name.str, (ulong) table->s, + DBUG_PRINT("enter", ("table: '%s' share: %p db_stat: %u version: %lld", + table->s->table_name.str, table->s, table->db_stat, table->s->tdc->version)); if (thd->mdl_context.upgrade_shared_lock( @@ -1812,7 +1812,7 @@ retry_share: { if (share->tdc->flushed) { - DBUG_PRINT("info", ("Found old share version: %lu current: %lu", + DBUG_PRINT("info", ("Found old share version: %lld current: %lld", share->tdc->version, tdc_refresh_version())); /* We already have an MDL lock. But we have encountered an old @@ -5244,8 +5244,8 @@ find_field_in_view(THD *thd, TABLE_LIST *table_list, { DBUG_ENTER("find_field_in_view"); DBUG_PRINT("enter", - ("view: '%s', field name: '%s', item name: '%s', ref 0x%lx", - table_list->alias, name, item_name, (ulong) ref)); + ("view: '%s', field name: '%s', item name: '%s', ref %p", + table_list->alias, name, item_name, ref)); Field_iterator_view field_it; field_it.set(table_list); Query_arena *arena= 0, backup; @@ -5329,8 +5329,8 @@ find_field_in_natural_join(THD *thd, TABLE_LIST *table_ref, const char *name, Field *UNINIT_VAR(found_field); Query_arena *UNINIT_VAR(arena), backup; DBUG_ENTER("find_field_in_natural_join"); - DBUG_PRINT("enter", ("field name: '%s', ref 0x%lx", - name, (ulong) ref)); + DBUG_PRINT("enter", ("field name: '%s', ref %p", + name, ref)); DBUG_ASSERT(table_ref->is_natural_join && table_ref->join_columns); DBUG_ASSERT(*actual_table == NULL); @@ -5556,8 +5556,8 @@ find_field_in_table_ref(THD *thd, TABLE_LIST *table_list, DBUG_ASSERT(name); DBUG_ASSERT(item_name); DBUG_PRINT("enter", - ("table: '%s' field name: '%s' item name: '%s' ref 0x%lx", - table_list->alias, name, item_name, (ulong) ref)); + ("table: '%s' field name: '%s' item name: '%s' ref %p", + table_list->alias, name, item_name, ref)); /* Check that the table and database that qualify the current field name @@ -7456,7 +7456,7 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name, bool found; char name_buff[SAFE_NAME_LEN+1]; DBUG_ENTER("insert_fields"); - DBUG_PRINT("arena", ("stmt arena: 0x%lx", (ulong)thd->stmt_arena)); + DBUG_PRINT("arena", ("stmt arena: %p",thd->stmt_arena)); if (db_name && lower_case_table_names) { diff --git a/sql/sql_binlog.cc b/sql/sql_binlog.cc index d9d057938eb..aa1a1cd6941 100644 --- a/sql/sql_binlog.cc +++ b/sql/sql_binlog.cc @@ -192,8 +192,8 @@ void mysql_client_binlog_statement(THD* thd) since it will read from unassigned memory. */ DBUG_PRINT("info", - ("bytes_decoded: %d strptr: 0x%lx endptr: 0x%lx ('%c':%d)", - bytes_decoded, (long) strptr, (long) endptr, *endptr, + ("bytes_decoded: %d strptr: %p endptr: %p ('%c':%d)", + bytes_decoded, strptr, endptr, *endptr, *endptr)); #endif diff --git a/sql/sql_bootstrap.cc b/sql/sql_bootstrap.cc index 533459365af..ce7d7a9fc93 100644 --- a/sql/sql_bootstrap.cc +++ b/sql/sql_bootstrap.cc @@ -24,8 +24,8 @@ int read_bootstrap_query(char *query, int *query_length, { char line_buffer[MAX_BOOTSTRAP_LINE_SIZE]; const char *line; - int len; - int query_len= 0; + size_t len; + size_t query_len= 0; int fgets_error= 0; *error= 0; diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index ed5a8f84127..e09b1788441 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -355,27 +355,27 @@ const uchar *query_state_map; #endif #if defined(EXTRA_DEBUG) && !defined(DBUG_OFF) -#define RW_WLOCK(M) {DBUG_PRINT("lock", ("rwlock wlock 0x%lx",(ulong)(M))); \ +#define RW_WLOCK(M) {DBUG_PRINT("lock", ("rwlock wlock %p",(M))); \ if (!mysql_rwlock_wrlock(M)) DBUG_PRINT("lock", ("rwlock wlock ok")); \ else DBUG_PRINT("lock", ("rwlock wlock FAILED %d", errno)); } -#define RW_RLOCK(M) {DBUG_PRINT("lock", ("rwlock rlock 0x%lx", (ulong)(M))); \ +#define RW_RLOCK(M) {DBUG_PRINT("lock", ("rwlock rlock %p",(M))); \ if (!mysql_rwlock_rdlock(M)) DBUG_PRINT("lock", ("rwlock rlock ok")); \ else DBUG_PRINT("lock", ("rwlock wlock FAILED %d", errno)); } -#define RW_UNLOCK(M) {DBUG_PRINT("lock", ("rwlock unlock 0x%lx",(ulong)(M))); \ +#define RW_UNLOCK(M) {DBUG_PRINT("lock", ("rwlock unlock %p",(M))); \ if (!mysql_rwlock_unlock(M)) DBUG_PRINT("lock", ("rwlock unlock ok")); \ else DBUG_PRINT("lock", ("rwlock unlock FAILED %d", errno)); } -#define BLOCK_LOCK_WR(B) {DBUG_PRINT("lock", ("%d LOCK_WR 0x%lx",\ - __LINE__,(ulong)(B))); \ +#define BLOCK_LOCK_WR(B) {DBUG_PRINT("lock", ("%d LOCK_WR %p",\ + __LINE__,(B))); \ B->query()->lock_writing();} -#define BLOCK_LOCK_RD(B) {DBUG_PRINT("lock", ("%d LOCK_RD 0x%lx",\ - __LINE__,(ulong)(B))); \ +#define BLOCK_LOCK_RD(B) {DBUG_PRINT("lock", ("%d LOCK_RD %p",\ + __LINE__,(B))); \ B->query()->lock_reading();} #define BLOCK_UNLOCK_WR(B) { \ - DBUG_PRINT("lock", ("%d UNLOCK_WR 0x%lx",\ - __LINE__,(ulong)(B)));B->query()->unlock_writing();} + DBUG_PRINT("lock", ("%d UNLOCK_WR %p",\ + __LINE__,(B)));B->query()->unlock_writing();} #define BLOCK_UNLOCK_RD(B) { \ - DBUG_PRINT("lock", ("%d UNLOCK_RD 0x%lx",\ - __LINE__,(ulong)(B)));B->query()->unlock_reading();} + DBUG_PRINT("lock", ("%d UNLOCK_RD %p",\ + __LINE__,(B)));B->query()->unlock_reading();} #define DUMP(C) DBUG_EXECUTE("qcache", {\ (C)->cache_dump(); (C)->queries_dump();(C)->tables_dump();}) #else @@ -646,7 +646,7 @@ bool Query_cache::try_lock(THD *thd, Cache_try_lock_mode mode) else if (mode == TIMEOUT) { struct timespec waittime; - set_timespec_nsec(waittime,(ulong)(50000000L)); /* Wait for 50 msec */ + set_timespec_nsec(waittime,50000000UL); /* Wait for 50 msec */ int res= mysql_cond_timedwait(&COND_cache_status_changed, &structure_guard_mutex, &waittime); if (res == ETIMEDOUT) @@ -820,7 +820,7 @@ inline Query_cache_block * Query_cache_block_table::block() void Query_cache_block::init(ulong block_length) { DBUG_ENTER("Query_cache_block::init"); - DBUG_PRINT("qcache", ("init block: 0x%lx length: %lu", (ulong) this, + DBUG_PRINT("qcache", ("init block: %p length: %lu", this, block_length)); length = block_length; used = 0; @@ -832,8 +832,8 @@ void Query_cache_block::init(ulong block_length) void Query_cache_block::destroy() { DBUG_ENTER("Query_cache_block::destroy"); - DBUG_PRINT("qcache", ("destroy block 0x%lx, type %d", - (ulong) this, type)); + DBUG_PRINT("qcache", ("destroy block %p, type %d", + this, type)); type = INCOMPLETE; DBUG_VOID_RETURN; } @@ -935,7 +935,7 @@ bool Query_cache_query::try_lock_writing() DBUG_PRINT("info", ("can't lock rwlock")); DBUG_RETURN(0); } - DBUG_PRINT("info", ("rwlock 0x%lx locked", (ulong) &lock)); + DBUG_PRINT("info", ("rwlock %p locked", &lock)); DBUG_RETURN(1); } @@ -964,9 +964,9 @@ void Query_cache_query::init_n_lock() res=0; wri = 0; len = 0; ready= 0; hit_count = 0; mysql_rwlock_init(key_rwlock_query_cache_query_lock, &lock); lock_writing(); - DBUG_PRINT("qcache", ("inited & locked query for block 0x%lx", - (long) (((uchar*) this) - - ALIGN_SIZE(sizeof(Query_cache_block))))); + DBUG_PRINT("qcache", ("inited & locked query for block %p", + (uchar*) this - + ALIGN_SIZE(sizeof(Query_cache_block)))); DBUG_VOID_RETURN; } @@ -974,9 +974,9 @@ void Query_cache_query::init_n_lock() void Query_cache_query::unlock_n_destroy() { DBUG_ENTER("Query_cache_query::unlock_n_destroy"); - DBUG_PRINT("qcache", ("destroyed & unlocked query for block 0x%lx", - (long) (((uchar*) this) - - ALIGN_SIZE(sizeof(Query_cache_block))))); + DBUG_PRINT("qcache", ("destroyed & unlocked query for block %p", + (uchar*) this - + ALIGN_SIZE(sizeof(Query_cache_block)))); /* The following call is not needed on system where one can destroy an active semaphore @@ -1109,7 +1109,7 @@ Query_cache::insert(THD *thd, Query_cache_tls *query_cache_tls, { DBUG_PRINT("warning", ("Can't append data")); header->result(result); - DBUG_PRINT("qcache", ("free query 0x%lx", (ulong) query_block)); + DBUG_PRINT("qcache", ("free query %p", query_block)); // The following call will remove the lock on query_block query_cache.free_query(query_block); query_cache.refused++; @@ -1445,8 +1445,8 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) flags.default_week_format= thd->variables.default_week_format; DBUG_PRINT("qcache", ("\ long %d, 4.1: %d, eof: %d, bin_proto: %d, more results %d, pkt_nr: %d, \ -CS client: %u, CS result: %u, CS conn: %u, limit: %lu, TZ: 0x%lx, \ -sql mode: 0x%llx, sort len: %lu, conncat len: %lu, div_precision: %lu, \ +CS client: %u, CS result: %u, CS conn: %u, limit: %llu, TZ: %p, \ +sql mode: 0x%llx, sort len: %llu, conncat len: %llu, div_precision: %lu, \ def_week_frmt: %lu, in_trans: %d, autocommit: %d", (int)flags.client_long_flag, (int)flags.client_protocol_41, @@ -1457,8 +1457,8 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d", flags.character_set_client_num, flags.character_set_results_num, flags.collation_connection_num, - (ulong) flags.limit, - (ulong) flags.time_zone, + (ulonglong)flags.limit, + flags.time_zone, flags.sql_mode, flags.max_sort_length, flags.group_concat_max_len, @@ -1521,7 +1521,7 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d", /* Check if another thread is processing the same query? */ Query_cache_block *competitor = (Query_cache_block *) my_hash_search(&queries, (uchar*) query, tot_length); - DBUG_PRINT("qcache", ("competitor 0x%lx", (ulong) competitor)); + DBUG_PRINT("qcache", ("competitor %p", competitor)); if (competitor == 0) { /* Query is not in cache and no one is working with it; Store it */ @@ -1531,8 +1531,8 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d", Query_cache_block::QUERY, local_tables); if (query_block != 0) { - DBUG_PRINT("qcache", ("query block 0x%lx allocated, %lu", - (ulong) query_block, query_block->used)); + DBUG_PRINT("qcache", ("query block %p allocated, %lu", + query_block, query_block->used)); Query_cache_query *header = query_block->query(); header->init_n_lock(); @@ -1943,8 +1943,8 @@ Query_cache::send_result_to_client(THD *thd, char *org_sql, uint query_length) flags.lc_time_names= thd->variables.lc_time_names; DBUG_PRINT("qcache", ("\ long %d, 4.1: %d, eof: %d, bin_proto: %d, more results %d, pkt_nr: %d, \ -CS client: %u, CS result: %u, CS conn: %u, limit: %lu, TZ: 0x%lx, \ -sql mode: 0x%llx, sort len: %lu, conncat len: %lu, div_precision: %lu, \ +CS client: %u, CS result: %u, CS conn: %u, limit: %llu, TZ: %p, \ +sql mode: 0x%llx, sort len: %llu, conncat len: %llu, div_precision: %lu, \ def_week_frmt: %lu, in_trans: %d, autocommit: %d", (int)flags.client_long_flag, (int)flags.client_protocol_41, @@ -1955,8 +1955,8 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d", flags.character_set_client_num, flags.character_set_results_num, flags.collation_connection_num, - (ulong) flags.limit, - (ulong) flags.time_zone, + (ulonglong) flags.limit, + flags.time_zone, flags.sql_mode, flags.max_sort_length, flags.group_concat_max_len, @@ -1983,7 +1983,7 @@ lookup: DBUG_PRINT("qcache", ("No query in query hash or no results")); goto err_unlock; } - DBUG_PRINT("qcache", ("Query in query hash 0x%lx", (ulong)query_block)); + DBUG_PRINT("qcache", ("Query in query hash %p",query_block)); #ifdef WITH_WSREP if (once_more && WSREP_CLIENT(thd) && wsrep_must_sync_wait(thd)) @@ -2014,7 +2014,7 @@ lookup: BLOCK_UNLOCK_RD(query_block); goto err_unlock; } - DBUG_PRINT("qcache", ("Query have result 0x%lx", (ulong) query)); + DBUG_PRINT("qcache", ("Query have result %p", query)); if (thd->in_multi_stmt_transaction_mode() && (query->tables_type() & HA_CACHE_TBL_TRANSACT)) @@ -2111,9 +2111,9 @@ lookup: if (engine_data != table->engine_data()) { DBUG_PRINT("qcache", - ("Handler require invalidation queries of %.*s %lu-%lu", + ("Handler require invalidation queries of %.*s %llu-%llu", qcache_se_key_len, qcache_se_key_name, - (ulong) engine_data, (ulong) table->engine_data())); + engine_data, table->engine_data())); invalidate_table_internal(thd, (uchar *) table->db(), table->key_length()); @@ -2152,9 +2152,9 @@ lookup: THD_STAGE_INFO(thd, stage_sending_cached_result_to_client); do { - DBUG_PRINT("qcache", ("Results (len: %lu used: %lu headers: %lu)", + DBUG_PRINT("qcache", ("Results (len: %lu used: %lu headers: %u)", result_block->length, result_block->used, - (ulong) (result_block->headers_len()+ + (uint) (result_block->headers_len()+ ALIGN_SIZE(sizeof(Query_cache_result))))); Query_cache_result *result = result_block->result(); @@ -2192,7 +2192,7 @@ lookup: thd->get_stmt_da()->disable_status(); BLOCK_UNLOCK_RD(query_block); - MYSQL_QUERY_CACHE_HIT(thd->query(), (ulong) thd->limit_found_rows); + MYSQL_QUERY_CACHE_HIT(thd->query(), thd->limit_found_rows); DBUG_RETURN(1); // Result sent to client err_unlock: @@ -2899,8 +2899,8 @@ my_bool Query_cache::free_old_query() void Query_cache::free_query_internal(Query_cache_block *query_block) { DBUG_ENTER("Query_cache::free_query_internal"); - DBUG_PRINT("qcache", ("free query 0x%lx %lu bytes result", - (ulong) query_block, + DBUG_PRINT("qcache", ("free query %p %lu bytes result", + query_block, query_block->query()->length() )); queries_in_cache--; @@ -2969,8 +2969,8 @@ void Query_cache::free_query_internal(Query_cache_block *query_block) void Query_cache::free_query(Query_cache_block *query_block) { DBUG_ENTER("Query_cache::free_query"); - DBUG_PRINT("qcache", ("free query 0x%lx %lu bytes result", - (ulong) query_block, + DBUG_PRINT("qcache", ("free query %p %lu bytes result", + query_block, query_block->query()->length() )); my_hash_delete(&queries,(uchar *) query_block); @@ -3017,8 +3017,8 @@ Query_cache::append_result_data(Query_cache_block **current_block, Query_cache_block *query_block) { DBUG_ENTER("Query_cache::append_result_data"); - DBUG_PRINT("qcache", ("append %lu bytes to 0x%lx query", - data_len, (long) query_block)); + DBUG_PRINT("qcache", ("append %lu bytes to %p query", + data_len, query_block)); if (query_block->query()->add(data_len) > query_cache_limit) { @@ -3035,8 +3035,8 @@ Query_cache::append_result_data(Query_cache_block **current_block, } Query_cache_block *last_block = (*current_block)->prev; - DBUG_PRINT("qcache", ("lastblock 0x%lx len %lu used %lu", - (ulong) last_block, last_block->length, + DBUG_PRINT("qcache", ("lastblock %p len %lu used %lu", + last_block, last_block->length, last_block->used)); my_bool success = 1; ulong last_block_free_space= last_block->length - last_block->used; @@ -3081,8 +3081,8 @@ Query_cache::append_result_data(Query_cache_block **current_block, if (success && last_block_free_space > 0) { ulong to_copy = MY_MIN(data_len,last_block_free_space); - DBUG_PRINT("qcache", ("use free space %lub at block 0x%lx to copy %lub", - last_block_free_space, (ulong)last_block, to_copy)); + DBUG_PRINT("qcache", ("use free space %lub at block %p to copy %lub", + last_block_free_space,last_block, to_copy)); memcpy((uchar*) last_block + last_block->used, data, to_copy); last_block->used+=to_copy; } @@ -3123,8 +3123,8 @@ my_bool Query_cache::write_result_data(Query_cache_block **result_block, { block->type = type; ulong length = block->used - headers_len; - DBUG_PRINT("qcache", ("write %lu byte in block 0x%lx",length, - (ulong)block)); + DBUG_PRINT("qcache", ("write %lu byte in block %p",length, + block)); memcpy((uchar*) block+headers_len, rest, length); rest += length; block = block->next; @@ -3387,12 +3387,12 @@ Query_cache::register_tables_from_list(THD *thd, TABLE_LIST *tables_used, else { DBUG_PRINT("qcache", - ("table: %s db: %s openinfo: 0x%lx keylen: %lu key: 0x%lx", + ("table: %s db: %s openinfo: %p keylen: %zu key: %p", tables_used->table->s->table_name.str, tables_used->table->s->table_cache_key.str, - (ulong) tables_used->table, - (ulong) tables_used->table->s->table_cache_key.length, - (ulong) tables_used->table->s->table_cache_key.str)); + tables_used->table, + tables_used->table->s->table_cache_key.length, + tables_used->table->s->table_cache_key.str)); if (!insert_table(thd, tables_used->table->s->table_cache_key.length, tables_used->table->s->table_cache_key.str, @@ -3429,8 +3429,8 @@ my_bool Query_cache::register_all_tables(THD *thd, TABLE_COUNTER_TYPE tables_arg) { TABLE_COUNTER_TYPE n; - DBUG_PRINT("qcache", ("register tables block 0x%lx, n %d, header %x", - (ulong) block, (int) tables_arg, + DBUG_PRINT("qcache", ("register tables block %p, n %d, header %x", + block, (int) tables_arg, (int) ALIGN_SIZE(sizeof(Query_cache_block)))); Query_cache_block_table *block_table = block->table(0); @@ -3469,8 +3469,8 @@ Query_cache::insert_table(THD *thd, uint key_len, const char *key, my_bool hash) { DBUG_ENTER("Query_cache::insert_table"); - DBUG_PRINT("qcache", ("insert table node 0x%lx, len %d", - (ulong)node, key_len)); + DBUG_PRINT("qcache", ("insert table node %p, len %d", + node, key_len)); Query_cache_block *table_block= (hash ? @@ -3481,11 +3481,11 @@ Query_cache::insert_table(THD *thd, uint key_len, const char *key, table_block->table()->engine_data() != engine_data) { DBUG_PRINT("qcache", - ("Handler require invalidation queries of %s.%s %lu-%lu", + ("Handler require invalidation queries of %s.%s %llu-%llu", table_block->table()->db(), table_block->table()->table(), - (ulong) engine_data, - (ulong) table_block->table()->engine_data())); + engine_data, + table_block->table()->engine_data())); /* as far as we delete all queries with this table, table block will be deleted, too @@ -3500,8 +3500,8 @@ Query_cache::insert_table(THD *thd, uint key_len, const char *key, if (table_block == 0) { - DBUG_PRINT("qcache", ("new table block from 0x%lx (%u)", - (ulong) key, (int) key_len)); + DBUG_PRINT("qcache", ("new table block from %p (%u)", + key, (int) key_len)); table_block= write_block_data(key_len, (uchar*) key, ALIGN_SIZE(sizeof(Query_cache_table)), Query_cache_block::TABLE, 1); @@ -3710,7 +3710,7 @@ Query_cache::get_free_block(ulong len, my_bool not_less, ulong min) if (block != 0) exclude_from_free_memory_list(block); - DBUG_PRINT("qcache",("getting block 0x%lx", (ulong) block)); + DBUG_PRINT("qcache",("getting block %p", block)); DBUG_RETURN(block); } @@ -3721,9 +3721,9 @@ void Query_cache::free_memory_block(Query_cache_block *block) block->used=0; block->type= Query_cache_block::FREE; // mark block as free in any case DBUG_PRINT("qcache", - ("first_block 0x%lx, block 0x%lx, pnext 0x%lx pprev 0x%lx", - (ulong) first_block, (ulong) block, (ulong) block->pnext, - (ulong) block->pprev)); + ("first_block %p, block %p, pnext %p pprev %p", + first_block, block, block->pnext, + block->pprev)); if (block->pnext != first_block && block->pnext->is_free()) block = join_free_blocks(block, block->pnext); @@ -3755,8 +3755,8 @@ void Query_cache::split_block(Query_cache_block *block, ulong len) else free_memory_block(new_block); - DBUG_PRINT("qcache", ("split 0x%lx (%lu) new 0x%lx", - (ulong) block, len, (ulong) new_block)); + DBUG_PRINT("qcache", ("split %p (%lu) new %p", + block, len, new_block)); DBUG_VOID_RETURN; } @@ -3768,9 +3768,9 @@ Query_cache::join_free_blocks(Query_cache_block *first_block_arg, Query_cache_block *second_block; DBUG_ENTER("Query_cache::join_free_blocks"); DBUG_PRINT("qcache", - ("join first 0x%lx, pnext 0x%lx, in list 0x%lx", - (ulong) first_block_arg, (ulong) first_block_arg->pnext, - (ulong) block_in_list)); + ("join first %p, pnext %p, in list %p", + first_block_arg, first_block_arg->pnext, + block_in_list)); exclude_from_free_memory_list(block_in_list); second_block = first_block_arg->pnext; @@ -3792,7 +3792,7 @@ my_bool Query_cache::append_next_free_block(Query_cache_block *block, { Query_cache_block *next_block = block->pnext; DBUG_ENTER("Query_cache::append_next_free_block"); - DBUG_PRINT("enter", ("block 0x%lx, add_size %lu", (ulong) block, + DBUG_PRINT("enter", ("block %p, add_size %lu", block, add_size)); if (next_block != first_block && next_block->is_free()) @@ -3824,8 +3824,8 @@ void Query_cache::exclude_from_free_memory_list(Query_cache_block *free_block) bin->number--; free_memory-=free_block->length; free_memory_blocks--; - DBUG_PRINT("qcache",("exclude block 0x%lx, bin 0x%lx", (ulong) free_block, - (ulong) bin)); + DBUG_PRINT("qcache",("exclude block %p, bin %p", free_block, + bin)); DBUG_VOID_RETURN; } @@ -3842,8 +3842,8 @@ void Query_cache::insert_into_free_memory_list(Query_cache_block *free_block) free_block->data()); *bin_ptr = bins+idx; (*bin_ptr)->number++; - DBUG_PRINT("qcache",("insert block 0x%lx, bin[%d] 0x%lx", - (ulong) free_block, idx, (ulong) *bin_ptr)); + DBUG_PRINT("qcache",("insert block %p, bin[%d] %p", + free_block, idx, *bin_ptr)); DBUG_VOID_RETURN; } @@ -3940,7 +3940,7 @@ Query_cache::double_linked_list_simple_include(Query_cache_block *point, list_pointer) { DBUG_ENTER("Query_cache::double_linked_list_simple_include"); - DBUG_PRINT("qcache", ("including block 0x%lx", (ulong) point)); + DBUG_PRINT("qcache", ("including block %p", point)); if (*list_pointer == 0) *list_pointer=point->next=point->prev=point; else @@ -3959,8 +3959,8 @@ Query_cache::double_linked_list_exclude(Query_cache_block *point, Query_cache_block **list_pointer) { DBUG_ENTER("Query_cache::double_linked_list_exclude"); - DBUG_PRINT("qcache", ("excluding block 0x%lx, list 0x%lx", - (ulong) point, (ulong) list_pointer)); + DBUG_PRINT("qcache", ("excluding block %p, list %p", + point, list_pointer)); if (point->next == point) *list_pointer = 0; // empty list else @@ -4249,7 +4249,7 @@ my_bool Query_cache::move_by_type(uchar **border, switch (block->type) { case Query_cache_block::FREE: { - DBUG_PRINT("qcache", ("block 0x%lx FREE", (ulong) block)); + DBUG_PRINT("qcache", ("block %p FREE", block)); if (*border == 0) { *border = (uchar *) block; @@ -4268,7 +4268,7 @@ my_bool Query_cache::move_by_type(uchar **border, case Query_cache_block::TABLE: { HASH_SEARCH_STATE record_idx; - DBUG_PRINT("qcache", ("block 0x%lx TABLE", (ulong) block)); + DBUG_PRINT("qcache", ("block %p TABLE", block)); if (*border == 0) break; ulong len = block->length, used = block->used; @@ -4304,9 +4304,9 @@ my_bool Query_cache::move_by_type(uchar **border, nlist_root->prev = tprev; tprev->next = nlist_root; DBUG_PRINT("qcache", - ("list_root: 0x%lx tnext 0x%lx tprev 0x%lx tprev->next 0x%lx tnext->prev 0x%lx", - (ulong) list_root, (ulong) tnext, (ulong) tprev, - (ulong)tprev->next, (ulong)tnext->prev)); + ("list_root: %p tnext %p tprev %p tprev->next %p tnext->prev %p", + list_root, tnext, tprev, + tprev->next,tnext->prev)); /* Go through all queries that uses this table and change them to point to the new table object @@ -4321,14 +4321,14 @@ my_bool Query_cache::move_by_type(uchar **border, /* Fix hash to point at moved block */ my_hash_replace(&tables, &record_idx, (uchar*) new_block); - DBUG_PRINT("qcache", ("moved %lu bytes to 0x%lx, new gap at 0x%lx", - len, (ulong) new_block, (ulong) *border)); + DBUG_PRINT("qcache", ("moved %lu bytes to %p, new gap at %p", + len, new_block, *border)); break; } case Query_cache_block::QUERY: { HASH_SEARCH_STATE record_idx; - DBUG_PRINT("qcache", ("block 0x%lx QUERY", (ulong) block)); + DBUG_PRINT("qcache", ("block %p QUERY", block)); if (*border == 0) break; BLOCK_LOCK_WR(block); @@ -4414,8 +4414,8 @@ my_bool Query_cache::move_by_type(uchar **border, } /* Fix hash to point at moved block */ my_hash_replace(&queries, &record_idx, (uchar*) new_block); - DBUG_PRINT("qcache", ("moved %lu bytes to 0x%lx, new gap at 0x%lx", - len, (ulong) new_block, (ulong) *border)); + DBUG_PRINT("qcache", ("moved %lu bytes to %p, new gap at %p", + len, new_block, *border)); break; } case Query_cache_block::RES_INCOMPLETE: @@ -4423,7 +4423,7 @@ my_bool Query_cache::move_by_type(uchar **border, case Query_cache_block::RES_CONT: case Query_cache_block::RESULT: { - DBUG_PRINT("qcache", ("block 0x%lx RES* (%d)", (ulong) block, + DBUG_PRINT("qcache", ("block %p RES* (%d)", block, (int) block->type)); if (*border == 0) break; @@ -4463,13 +4463,13 @@ my_bool Query_cache::move_by_type(uchar **border, new_block->length -= free_space; } BLOCK_UNLOCK_WR(query_block); - DBUG_PRINT("qcache", ("moved %lu bytes to 0x%lx, new gap at 0x%lx", - len, (ulong) new_block, (ulong) *border)); + DBUG_PRINT("qcache", ("moved %lu bytes to %p, new gap at %p", + len, new_block, *border)); break; } default: - DBUG_PRINT("error", ("unexpected block type %d, block 0x%lx", - (int)block->type, (ulong) block)); + DBUG_PRINT("error", ("unexpected block type %d, block %p", + (int)block->type, block)); ok = 0; } DBUG_RETURN(ok); @@ -4670,16 +4670,16 @@ void Query_cache::bins_dump() DBUG_PRINT("qcache", ("-------------------------")); for (i=0; i < mem_bin_num; i++) { - DBUG_PRINT("qcache", ("%10lu %3d 0x%lx", bins[i].size, bins[i].number, - (ulong)&(bins[i]))); + DBUG_PRINT("qcache", ("%10lu %3d %p", bins[i].size, bins[i].number, + &(bins[i]))); if (bins[i].free_blocks) { Query_cache_block *block = bins[i].free_blocks; do{ - DBUG_PRINT("qcache", ("\\-- %lu 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx", - block->length, (ulong)block, - (ulong)block->next, (ulong)block->prev, - (ulong)block->pnext, (ulong)block->pprev)); + DBUG_PRINT("qcache", ("\\-- %lu %p %p %p %p %p", + block->length,block, + block->next,block->prev, + block->pnext,block->pprev)); block = block->next; } while ( block != bins[i].free_blocks ); } @@ -4703,11 +4703,11 @@ void Query_cache::cache_dump() do { DBUG_PRINT("qcache", - ("%10lu %10lu %1d %2d 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx", + ("%10lu %10lu %1d %2d %p %p %p %p %p", i->length, i->used, (int)i->type, - i->n_tables, (ulong)i, - (ulong)i->next, (ulong)i->prev, (ulong)i->pnext, - (ulong)i->pprev)); + i->n_tables,i, + i->next,i->prev,i->pnext, + i->pprev)); i = i->pnext; } while ( i != first_block ); DBUG_PRINT("qcache", ("-------------------------------------")); @@ -4737,15 +4737,15 @@ void Query_cache::queries_dump() Query_cache_query_flags flags; memcpy(&flags, str+len, QUERY_CACHE_FLAGS_SIZE); str[len]= 0; // make zero ending DB name - DBUG_PRINT("qcache", ("F: %u C: %u L: %lu T: '%s' (%lu) '%s' '%s'", + DBUG_PRINT("qcache", ("F: %u C: %u L: %llu T: '%s' (%zu) '%s' '%s'", flags.client_long_flag, flags.character_set_client_num, - (ulong)flags.limit, + flags.limit, flags.time_zone->get_name()->ptr(), - (ulong) len, str, strend(str)+1)); - DBUG_PRINT("qcache", ("-b- 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx", (ulong) block, - (ulong) block->next, (ulong) block->prev, - (ulong)block->pnext, (ulong)block->pprev)); + len, str, strend(str)+1)); + DBUG_PRINT("qcache", ("-b- %p %p %p %p %p", block, + block->next, block->prev, + block->pnext,block->pprev)); memcpy(str + len, &flags, QUERY_CACHE_FLAGS_SIZE); // restore flags for (TABLE_COUNTER_TYPE t= 0; t < block->n_tables; t++) { @@ -4759,14 +4759,14 @@ void Query_cache::queries_dump() Query_cache_block *result_beg = result_block; do { - DBUG_PRINT("qcache", ("-r- %u %lu/%lu 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx", + DBUG_PRINT("qcache", ("-r- %u %lu/%lu %p %p %p %p %p", (uint) result_block->type, result_block->length, result_block->used, - (ulong) result_block, - (ulong) result_block->next, - (ulong) result_block->prev, - (ulong) result_block->pnext, - (ulong) result_block->pprev)); + result_block, + result_block->next, + result_block->prev, + result_block->pnext, + result_block->pprev)); result_block = result_block->next; } while ( result_block != result_beg ); } @@ -4845,14 +4845,14 @@ my_bool Query_cache::check_integrity(bool locked) if (!block) break; - DBUG_PRINT("qcache", ("block 0x%lx, type %u...", - (ulong) block, (uint) block->type)); + DBUG_PRINT("qcache", ("block %p, type %u...", + block, (uint) block->type)); // Check allignment - if ((((long)block) % (long) ALIGN_SIZE(1)) != - (((long)first_block) % (long)ALIGN_SIZE(1))) + if ((((size_t)block) % ALIGN_SIZE(1)) != + (((size_t)first_block) % ALIGN_SIZE(1))) { DBUG_PRINT("error", - ("block 0x%lx do not aligned by %d", (ulong) block, + ("block %p do not aligned by %d", block, (int) ALIGN_SIZE(1))); result = 1; } @@ -4863,10 +4863,10 @@ my_bool Query_cache::check_integrity(bool locked) ((uchar*)first_block) + query_cache_size) { DBUG_PRINT("error", - ("block 0x%lx, type %u, ended at 0x%lx, but cache ended at 0x%lx", - (ulong) block, (uint) block->type, - (ulong) (((uchar*)block) + block->length), - (ulong) (((uchar*)first_block) + query_cache_size))); + ("block %p, type %u, ended at %p, but cache ended at %p", + block, (uint) block->type, + (((uchar*)block) + block->length), + (((uchar*)first_block) + query_cache_size))); result = 1; } } @@ -4874,10 +4874,10 @@ my_bool Query_cache::check_integrity(bool locked) if (((uchar*)block) + block->length != ((uchar*)block->pnext)) { DBUG_PRINT("error", - ("block 0x%lx, type %u, ended at 0x%lx, but next block beginning at 0x%lx", - (ulong) block, (uint) block->type, - (ulong) (((uchar*)block) + block->length), - (ulong) ((uchar*)block->pnext))); + ("block %p, type %u, ended at %p, but next block beginning at %p", + block, (uint) block->type, + (((uchar*)block) + block->length), + ((uchar*)block->pnext))); } if (block->type == Query_cache_block::FREE) free+= block->length; @@ -4893,11 +4893,11 @@ my_bool Query_cache::check_integrity(bool locked) ((uchar*)bin) >= ((uchar*)first_block)) { DBUG_PRINT("error", - ("free block 0x%lx have bin pointer 0x%lx beyaond of bins array bounds [0x%lx,0x%lx]", - (ulong) block, - (ulong) bin, - (ulong) bins, - (ulong) first_block)); + ("free block %p have bin pointer %p beyaond of bins array bounds [%p,%p]", + block, + bin, + bins, + first_block)); result = 1; } else @@ -4944,11 +4944,11 @@ my_bool Query_cache::check_integrity(bool locked) ((uchar*)query_block) >= (((uchar*)first_block) + query_cache_size)) { DBUG_PRINT("error", - ("result block 0x%lx have query block pointer 0x%lx beyaond of block pool bounds [0x%lx,0x%lx]", - (ulong) block, - (ulong) query_block, - (ulong) first_block, - (ulong) (((uchar*)first_block) + query_cache_size))); + ("result block %p have query block pointer %p beyaond of block pool bounds [%p,%p]", + block, + query_block, + first_block, + (((uchar*)first_block) + query_cache_size))); result = 1; } else @@ -4964,8 +4964,8 @@ my_bool Query_cache::check_integrity(bool locked) break; } default: - DBUG_PRINT("error", ("block 0x%lx have incorrect type %u", - (long) block, block->type)); + DBUG_PRINT("error", ("block %p have incorrect type %u", + block, block->type)); result = 1; } @@ -4993,15 +4993,15 @@ my_bool Query_cache::check_integrity(bool locked) { do { - DBUG_PRINT("qcache", ("block 0x%lx, type %u...", - (ulong) block, (uint) block->type)); + DBUG_PRINT("qcache", ("block %p, type %u...", + block, (uint) block->type)); size_t length; uchar *key = query_cache_query_get_key((uchar*) block, &length, 0); uchar* val = my_hash_search(&queries, key, length); if (((uchar*)block) != val) { - DBUG_PRINT("error", ("block 0x%lx found in queries hash like 0x%lx", - (ulong) block, (ulong) val)); + DBUG_PRINT("error", ("block %p found in queries hash like %p", + block, val)); } if (in_blocks(block)) result = 1; @@ -5011,8 +5011,8 @@ my_bool Query_cache::check_integrity(bool locked) Query_cache_block * result_block = results; do { - DBUG_PRINT("qcache", ("block 0x%lx, type %u...", - (ulong) block, (uint) block->type)); + DBUG_PRINT("qcache", ("block %p, type %u...", + block, (uint) block->type)); if (in_blocks(result_block)) result = 1; @@ -5028,15 +5028,15 @@ my_bool Query_cache::check_integrity(bool locked) { do { - DBUG_PRINT("qcache", ("block 0x%lx, type %u...", - (ulong) block, (uint) block->type)); + DBUG_PRINT("qcache", ("block %p, type %u...", + block, (uint) block->type)); size_t length; uchar *key = query_cache_table_get_key((uchar*) block, &length, 0); uchar* val = my_hash_search(&tables, key, length); if (((uchar*)block) != val) { - DBUG_PRINT("error", ("block 0x%lx found in tables hash like 0x%lx", - (ulong) block, (ulong) val)); + DBUG_PRINT("error", ("block %p found in tables hash like %p", + block, val)); } if (in_blocks(block)) @@ -5053,8 +5053,8 @@ my_bool Query_cache::check_integrity(bool locked) uint count = 0; do { - DBUG_PRINT("qcache", ("block 0x%lx, type %u...", - (ulong) block, (uint) block->type)); + DBUG_PRINT("qcache", ("block %p, type %u...", + block, (uint) block->type)); if (in_blocks(block)) result = 1; @@ -5086,13 +5086,13 @@ my_bool Query_cache::in_blocks(Query_cache_block * point) if (block->pprev->pnext != block) { DBUG_PRINT("error", - ("block 0x%lx in physical list is incorrect linked, prev block 0x%lx refered as next to 0x%lx (check from 0x%lx)", - (ulong) block, (ulong) block->pprev, - (ulong) block->pprev->pnext, - (ulong) point)); + ("block %p in physical list is incorrect linked, prev block %p refered as next to %p (check from %p)", + block, block->pprev, + block->pprev->pnext, + point)); //back trace for (; block != point; block = block->pnext) - DBUG_PRINT("error", ("back trace 0x%lx", (ulong) block)); + DBUG_PRINT("error", ("back trace %p", block)); result = 1; goto err1; } @@ -5101,8 +5101,8 @@ my_bool Query_cache::in_blocks(Query_cache_block * point) if (block != first_block) { DBUG_PRINT("error", - ("block 0x%lx (0x%lx<-->0x%lx) not owned by pysical list", - (ulong) block, (ulong) block->pprev, (ulong )block->pnext)); + ("block %p (%p<-->%p) not owned by pysical list", + block, block->pprev, block->pnext)); return 1; } @@ -5114,13 +5114,13 @@ err1: if (block->pnext->pprev != block) { DBUG_PRINT("error", - ("block 0x%lx in physicel list is incorrect linked, next block 0x%lx refered as prev to 0x%lx (check from 0x%lx)", - (ulong) block, (ulong) block->pnext, - (ulong) block->pnext->pprev, - (ulong) point)); + ("block %p in physicel list is incorrect linked, next block %p refered as prev to %p (check from %p)", + block, block->pnext, + block->pnext->pprev, + point)); //back trace for (; block != point; block = block->pprev) - DBUG_PRINT("error", ("back trace 0x%lx", (ulong) block)); + DBUG_PRINT("error", ("back trace %p", block)); result = 1; goto err2; } @@ -5143,13 +5143,13 @@ my_bool Query_cache::in_list(Query_cache_block * root, if (block->prev->next != block) { DBUG_PRINT("error", - ("block 0x%lx in list '%s' 0x%lx is incorrect linked, prev block 0x%lx refered as next to 0x%lx (check from 0x%lx)", - (ulong) block, name, (ulong) root, (ulong) block->prev, - (ulong) block->prev->next, - (ulong) point)); + ("block %p in list '%s' %p is incorrect linked, prev block %p refered as next to %p (check from %p)", + block, name, root, block->prev, + block->prev->next, + point)); //back trace for (; block != point; block = block->next) - DBUG_PRINT("error", ("back trace 0x%lx", (ulong) block)); + DBUG_PRINT("error", ("back trace %p", block)); result = 1; goto err1; } @@ -5158,10 +5158,10 @@ my_bool Query_cache::in_list(Query_cache_block * root, if (block != root) { DBUG_PRINT("error", - ("block 0x%lx (0x%lx<-->0x%lx) not owned by list '%s' 0x%lx", - (ulong) block, - (ulong) block->prev, (ulong) block->next, - name, (ulong) root)); + ("block %p (%p<-->%p) not owned by list '%s' %p", + block, + block->prev, block->next, + name, root)); return 1; } err1: @@ -5172,13 +5172,13 @@ err1: if (block->next->prev != block) { DBUG_PRINT("error", - ("block 0x%lx in list '%s' 0x%lx is incorrect linked, next block 0x%lx refered as prev to 0x%lx (check from 0x%lx)", - (ulong) block, name, (ulong) root, (ulong) block->next, - (ulong) block->next->prev, - (ulong) point)); + ("block %p in list '%s' %p is incorrect linked, next block %p refered as prev to %p (check from %p)", + block, name, root, block->next, + block->next->prev, + point)); //back trace for (; block != point; block = block->prev) - DBUG_PRINT("error", ("back trace 0x%lx", (ulong) block)); + DBUG_PRINT("error", ("back trace %p", block)); result = 1; goto err2; } @@ -5191,13 +5191,13 @@ err2: void dump_node(Query_cache_block_table * node, const char * call, const char * descr) { - DBUG_PRINT("qcache", ("%s: %s: node: 0x%lx", call, descr, (ulong) node)); - DBUG_PRINT("qcache", ("%s: %s: node block: 0x%lx", - call, descr, (ulong) node->block())); - DBUG_PRINT("qcache", ("%s: %s: next: 0x%lx", call, descr, - (ulong) node->next)); - DBUG_PRINT("qcache", ("%s: %s: prev: 0x%lx", call, descr, - (ulong) node->prev)); + DBUG_PRINT("qcache", ("%s: %s: node: %p", call, descr, node)); + DBUG_PRINT("qcache", ("%s: %s: node block: %p", + call, descr, node->block())); + DBUG_PRINT("qcache", ("%s: %s: next: %p", call, descr, + node->next)); + DBUG_PRINT("qcache", ("%s: %s: prev: %p", call, descr, + node->prev)); } my_bool Query_cache::in_table_list(Query_cache_block_table * root, @@ -5214,17 +5214,17 @@ my_bool Query_cache::in_table_list(Query_cache_block_table * root, if (table->prev->next != table) { DBUG_PRINT("error", - ("table 0x%lx(0x%lx) in list '%s' 0x%lx(0x%lx) is incorrect linked, prev table 0x%lx(0x%lx) refered as next to 0x%lx(0x%lx) (check from 0x%lx(0x%lx))", - (ulong) table, (ulong) table->block(), name, - (ulong) root, (ulong) root->block(), - (ulong) table->prev, (ulong) table->prev->block(), - (ulong) table->prev->next, - (ulong) table->prev->next->block(), - (ulong) point, (ulong) point->block())); + ("table %p(%p) in list '%s' %p(%p) is incorrect linked, prev table %p(%p) refered as next to %p(%p) (check from %p(%p))", + table, table->block(), name, + root, root->block(), + table->prev, table->prev->block(), + table->prev->next, + table->prev->next->block(), + point, point->block())); //back trace for (; table != point; table = table->next) - DBUG_PRINT("error", ("back trace 0x%lx(0x%lx)", - (ulong) table, (ulong) table->block())); + DBUG_PRINT("error", ("back trace %p(%p)", + table, table->block())); result = 1; goto err1; } @@ -5233,11 +5233,11 @@ my_bool Query_cache::in_table_list(Query_cache_block_table * root, if (table != root) { DBUG_PRINT("error", - ("table 0x%lx(0x%lx) (0x%lx(0x%lx)<-->0x%lx(0x%lx)) not owned by list '%s' 0x%lx(0x%lx)", - (ulong) table, (ulong) table->block(), - (ulong) table->prev, (ulong) table->prev->block(), - (ulong) table->next, (ulong) table->next->block(), - name, (ulong) root, (ulong) root->block())); + ("table %p(%p) (%p(%p)<-->%p(%p)) not owned by list '%s' %p(%p)", + table, table->block(), + table->prev, table->prev->block(), + table->next, table->next->block(), + name, root, root->block())); return 1; } err1: @@ -5249,17 +5249,17 @@ err1: if (table->next->prev != table) { DBUG_PRINT("error", - ("table 0x%lx(0x%lx) in list '%s' 0x%lx(0x%lx) is incorrect linked, next table 0x%lx(0x%lx) refered as prev to 0x%lx(0x%lx) (check from 0x%lx(0x%lx))", - (ulong) table, (ulong) table->block(), - name, (ulong) root, (ulong) root->block(), - (ulong) table->next, (ulong) table->next->block(), - (ulong) table->next->prev, - (ulong) table->next->prev->block(), - (ulong) point, (ulong) point->block())); + ("table %p(%p) in list '%s' %p(%p) is incorrect linked, next table %p(%p) refered as prev to %p(%p) (check from %p(%p))", + table, table->block(), + name, root, root->block(), + table->next, table->next->block(), + table->next->prev, + table->next->prev->block(), + point, point->block())); //back trace for (; table != point; table = table->prev) - DBUG_PRINT("error", ("back trace 0x%lx(0x%lx)", - (ulong) table, (ulong) table->block())); + DBUG_PRINT("error", ("back trace %p(%p)", + table, table->block())); result = 1; goto err2; } diff --git a/sql/sql_cache.h b/sql/sql_cache.h index 383bff305d9..a6592e9b782 100644 --- a/sql/sql_cache.h +++ b/sql/sql_cache.h @@ -560,8 +560,8 @@ struct Query_cache_query_flags ha_rows limit; Time_zone *time_zone; sql_mode_t sql_mode; - ulong max_sort_length; - ulong group_concat_max_len; + ulonglong max_sort_length; + ulonglong group_concat_max_len; ulong default_week_format; ulong div_precision_increment; MY_LOCALE *lc_time_names; diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 2a66427a26f..5c8a378eacd 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -569,8 +569,8 @@ char *thd_get_error_context_description(THD *thd, char *buffer, const char *proc_info= thd->proc_info; len= my_snprintf(header, sizeof(header), - "MySQL thread id %lu, OS thread handle %p, query id %lu", - (ulong) thd->thread_id, (void*) thd->real_id, (ulong) thd->query_id); + "MySQL thread id %lu, OS thread handle %lu, query id %lu", + (ulong) thd->thread_id, (ulong) thd->real_id, (ulong) thd->query_id); str.length(0); str.append(header, len); @@ -936,7 +936,7 @@ THD::THD(my_thread_id id, bool is_wsrep_applier) by adding the address of the stack. */ tmp= (ulong) (my_rnd(&sql_rand) * 0xffffffff); - my_rnd_init(&rand, tmp + (ulong) &rand, tmp + (ulong) ::global_query_id); + my_rnd_init(&rand, tmp + (ulong)((size_t) &rand), tmp + (ulong) ::global_query_id); substitute_null_with_insert_id = FALSE; lock_info.mysql_thd= (void *)this; @@ -1177,13 +1177,13 @@ Sql_condition* THD::raise_condition(uint sql_errno, } extern "C" -void *thd_alloc(MYSQL_THD thd, unsigned int size) +void *thd_alloc(MYSQL_THD thd, size_t size) { return thd->alloc(size); } extern "C" -void *thd_calloc(MYSQL_THD thd, unsigned int size) +void *thd_calloc(MYSQL_THD thd, size_t size) { return thd->calloc(size); } @@ -1195,14 +1195,14 @@ char *thd_strdup(MYSQL_THD thd, const char *str) } extern "C" -char *thd_strmake(MYSQL_THD thd, const char *str, unsigned int size) +char *thd_strmake(MYSQL_THD thd, const char *str, size_t size) { return thd->strmake(str, size); } extern "C" LEX_CSTRING *thd_make_lex_string(THD *thd, LEX_CSTRING *lex_str, - const char *str, unsigned int size, + const char *str, size_t size, int allocate_lex_string) { return allocate_lex_string ? thd->make_clex_string(str, size) @@ -1210,7 +1210,7 @@ LEX_CSTRING *thd_make_lex_string(THD *thd, LEX_CSTRING *lex_str, } extern "C" -void *thd_memdup(MYSQL_THD thd, const void* str, unsigned int size) +void *thd_memdup(MYSQL_THD thd, const void* str, size_t size) { return thd->memdup(str, size); } @@ -3634,7 +3634,7 @@ void Query_arena::free_items() { next= free_list->next; DBUG_ASSERT(free_list != next); - DBUG_PRINT("info", ("free item: 0x%lx", (ulong) free_list)); + DBUG_PRINT("info", ("free item: %p", free_list)); free_list->delete_self(); } /* Postcondition: free_list is 0 */ @@ -4103,7 +4103,7 @@ int select_materialize_with_stats::send_data(List<Item> &items) void TMP_TABLE_PARAM::init() { DBUG_ENTER("TMP_TABLE_PARAM::init"); - DBUG_PRINT("enter", ("this: 0x%lx", (ulong)this)); + DBUG_PRINT("enter", ("this: %p", this)); field_count= sum_func_count= func_count= hidden_field_count= 0; group_parts= group_length= group_null_parts= 0; quick_group= 1; @@ -5403,9 +5403,9 @@ void THD::inc_status_sort_range() void THD::inc_status_sort_rows(ha_rows count) { - statistic_add(status_var.filesort_rows_, count, &LOCK_status); + statistic_add(status_var.filesort_rows_, (ulong)count, &LOCK_status); #ifdef HAVE_PSI_STATEMENT_INTERFACE - PSI_STATEMENT_CALL(inc_statement_sort_rows)(m_statement_psi, count); + PSI_STATEMENT_CALL(inc_statement_sort_rows)(m_statement_psi, (ulong)count); #endif } diff --git a/sql/sql_class.h b/sql/sql_class.h index 7d4a34b6613..bc72fc25a18 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -3949,7 +3949,7 @@ public: mysql_mutex_unlock(&LOCK_thd_data); #ifdef HAVE_PSI_THREAD_INTERFACE if (result) - PSI_THREAD_CALL(set_thread_db)(new_db, new_db_len); + PSI_THREAD_CALL(set_thread_db)(new_db, (int) new_db_len); #endif return result; } @@ -3974,7 +3974,7 @@ public: db_length= new_db_len; mysql_mutex_unlock(&LOCK_thd_data); #ifdef HAVE_PSI_THREAD_INTERFACE - PSI_THREAD_CALL(set_thread_db)(new_db, new_db_len); + PSI_THREAD_CALL(set_thread_db)(new_db, (int) new_db_len); #endif } } diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 3a161ce6d31..32ff44d9343 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -45,7 +45,7 @@ // end_read_record #include "sql_partition.h" // make_used_partitions_str -#define MEM_STRIP_BUF_SIZE thd->variables.sortbuff_size +#define MEM_STRIP_BUF_SIZE ((size_t) thd->variables.sortbuff_size) /* @brief diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index b857dc8d2ec..2df3af03af5 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -641,7 +641,7 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) SELECT_LEX_UNIT *unit= derived->get_unit(); DBUG_ENTER("mysql_derived_prepare"); bool res= FALSE; - DBUG_PRINT("enter", ("unit 0x%lx", (ulong) unit)); + DBUG_PRINT("enter", ("unit %p", unit)); if (!unit) DBUG_RETURN(FALSE); diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 4ae66dcd32f..e592a873ef0 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -1472,8 +1472,8 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, bool res= 0; table_map map= 0; DBUG_ENTER("mysql_prepare_insert"); - DBUG_PRINT("enter", ("table_list: 0x%lx table: 0x%lx view: %d", - (ulong)table_list, (ulong)table, + DBUG_PRINT("enter", ("table_list: %p table: %p view: %d", + table_list, table, (int)insert_into_view)); /* INSERT should have a SELECT or VALUES clause */ DBUG_ASSERT (!select_insert || !values); diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc index 5b3d46fc747..9a7b8f2af21 100644 --- a/sql/sql_join_cache.cc +++ b/sql/sql_join_cache.cc @@ -696,7 +696,7 @@ void JOIN_CACHE::set_constants() pack_length_with_blob_ptrs= pack_length + blobs*sizeof(uchar *); min_buff_size= 0; min_records= 1; - buff_size= MY_MAX(join->thd->variables.join_buff_size, + buff_size= (size_t)MY_MAX(join->thd->variables.join_buff_size, get_min_join_buffer_size()); size_of_rec_ofs= offset_size(buff_size); size_of_rec_len= blobs ? size_of_rec_ofs : offset_size(len); @@ -841,7 +841,7 @@ ulong JOIN_CACHE::get_max_join_buffer_size(bool optimize_buff_size) len+= get_max_key_addon_space_per_record() + avg_aux_buffer_incr; space_per_record= len; - size_t limit_sz= join->thd->variables.join_buff_size; + size_t limit_sz= (size_t)join->thd->variables.join_buff_size; if (join_tab->join_buffer_size_limit) set_if_smaller(limit_sz, join_tab->join_buffer_size_limit); if (!optimize_buff_size) @@ -3829,7 +3829,7 @@ uint JOIN_TAB_SCAN_MRR::aux_buffer_incr(ulong recno) set_if_bigger(rec_per_key, 1); if (recno == 1) incr= ref->key_length + tab->file->ref_length; - incr+= tab->file->stats.mrr_length_per_rec * rec_per_key; + incr+= (uint)(tab->file->stats.mrr_length_per_rec * rec_per_key); return incr; } diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 3bd72314714..b8d51a5783f 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -391,7 +391,7 @@ void Lex_input_stream::body_utf8_append(const char *ptr, if (m_cpp_utf8_processed_ptr >= ptr) return; - int bytes_to_copy= ptr - m_cpp_utf8_processed_ptr; + size_t bytes_to_copy= ptr - m_cpp_utf8_processed_ptr; memcpy(m_body_utf8_ptr, m_cpp_utf8_processed_ptr, bytes_to_copy); m_body_utf8_ptr += bytes_to_copy; @@ -778,7 +778,7 @@ void LEX::start(THD *thd_arg) void lex_end(LEX *lex) { DBUG_ENTER("lex_end"); - DBUG_PRINT("enter", ("lex: 0x%lx", (long) lex)); + DBUG_PRINT("enter", ("lex: %p", lex)); lex_end_stage1(lex); lex_end_stage2(lex); @@ -2656,7 +2656,7 @@ bool st_select_lex::add_gorder_to_list(THD *thd, Item *item, bool asc) bool st_select_lex::add_item_to_list(THD *thd, Item *item) { DBUG_ENTER("st_select_lex::add_item_to_list"); - DBUG_PRINT("info", ("Item: 0x%lx", (long) item)); + DBUG_PRINT("info", ("Item: %p", item)); DBUG_RETURN(item_list.push_back(item, thd->mem_root)); } @@ -4763,9 +4763,9 @@ bool LEX::set_arena_for_set_stmt(Query_arena *backup) Query_arena_memroot(mem_root_for_set_stmt, Query_arena::STMT_INITIALIZED))) DBUG_RETURN(1); - DBUG_PRINT("info", ("mem_root: 0x%lx arena: 0x%lx", - (ulong) mem_root_for_set_stmt, - (ulong) arena_for_set_stmt)); + DBUG_PRINT("info", ("mem_root: %p arena: %p", + mem_root_for_set_stmt, + arena_for_set_stmt)); thd->set_n_backup_active_arena(arena_for_set_stmt, backup); DBUG_RETURN(0); } @@ -4776,9 +4776,9 @@ void LEX::reset_arena_for_set_stmt(Query_arena *backup) DBUG_ENTER("LEX::reset_arena_for_set_stmt"); DBUG_ASSERT(arena_for_set_stmt); thd->restore_active_arena(arena_for_set_stmt, backup); - DBUG_PRINT("info", ("mem_root: 0x%lx arena: 0x%lx", - (ulong) arena_for_set_stmt->mem_root, - (ulong) arena_for_set_stmt)); + DBUG_PRINT("info", ("mem_root: %p arena: %p", + arena_for_set_stmt->mem_root, + arena_for_set_stmt)); DBUG_VOID_RETURN; } @@ -4788,9 +4788,9 @@ void LEX::free_arena_for_set_stmt() DBUG_ENTER("LEX::free_arena_for_set_stmt"); if (!arena_for_set_stmt) return; - DBUG_PRINT("info", ("mem_root: 0x%lx arena: 0x%lx", - (ulong) arena_for_set_stmt->mem_root, - (ulong) arena_for_set_stmt)); + DBUG_PRINT("info", ("mem_root: %p arena: %p", + arena_for_set_stmt->mem_root, + arena_for_set_stmt)); arena_for_set_stmt->free_items(); delete(arena_for_set_stmt); free_root(mem_root_for_set_stmt, MYF(MY_KEEP_PREALLOC)); diff --git a/sql/sql_manager.cc b/sql/sql_manager.cc index eeaf888cc09..f787d39b774 100644 --- a/sql/sql_manager.cc +++ b/sql/sql_manager.cc @@ -159,7 +159,7 @@ void stop_handle_manager() if (manager_thread_in_use) { mysql_mutex_lock(&LOCK_manager); - DBUG_PRINT("quit", ("initiate shutdown of handle manager thread: 0x%lx", + DBUG_PRINT("quit", ("initiate shutdown of handle manager thread: %lu", (ulong)manager_thread)); mysql_cond_signal(&COND_manager); mysql_mutex_unlock(&LOCK_manager); diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 82794367b74..0c68615f911 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1482,10 +1482,10 @@ uint maria_multi_check(THD *thd, char *packet, uint packet_length) { char *packet_start= packet; size_t subpacket_length= net_field_length((uchar **)&packet_start); - uint length_length= packet_start - packet; + size_t length_length= packet_start - packet; // length of command + 3 bytes where that length was stored - DBUG_PRINT("info", ("sub-packet length: %ld + %d command: %x", - (ulong)subpacket_length, length_length, + DBUG_PRINT("info", ("sub-packet length: %zu + %zu command: %x", + subpacket_length, length_length, packet_start[3])); if (subpacket_length == 0 || @@ -1939,7 +1939,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, (The packet is guaranteed to end with an end zero) */ arg_end= strend(packet); - uint arg_length= arg_end - packet; + uint arg_length= (uint)(arg_end - packet); /* Check given table name length. */ if (packet_length - arg_length > NAME_LEN + 1 || arg_length > SAFE_NAME_LEN) @@ -2178,7 +2178,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, length= my_snprintf(buff, buff_len - 1, "Uptime: %lu Threads: %d Questions: %lu " - "Slow queries: %lu Opens: %lu Flush tables: %lu " + "Slow queries: %lu Opens: %lu Flush tables: %lld " "Open tables: %u Queries per second avg: %u.%03u", uptime, (int) thread_count, (ulong) thd->query_id, @@ -2267,7 +2267,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, char *packet_start= packet; /* We have to store next length because it will be destroyed by '\0' */ size_t next_subpacket_length= net_field_length((uchar **)&packet_start); - uint next_length_length= packet_start - packet; + size_t next_length_length= packet_start - packet; unsigned char *readbuff= net->buff; if (net_allocate_new_packet(net, thd, MYF(0))) @@ -2282,7 +2282,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, { current_com++; size_t subpacket_length= next_subpacket_length + next_length_length; - uint length_length= next_length_length; + size_t length_length= next_length_length; if (subpacket_length < packet_length) { packet_start= packet + subpacket_length; @@ -4555,6 +4555,7 @@ end_with_restore_list: else res= 0; + unit->set_limit(select_lex); res= mysql_multi_update_prepare(thd); #ifdef HAVE_REPLICATION @@ -7690,7 +7691,7 @@ void create_select_for_variable(THD *thd, LEX_CSTRING *var_name) if ((var= get_system_var(thd, OPT_SESSION, var_name, &null_clex_str))) { end= strxmov(buff, "@@session.", var_name->str, NullS); - var->set_name(thd, buff, end-buff, system_charset_info); + var->set_name(thd, buff, (uint)(end-buff), system_charset_info); add_item_to_list(thd, var); } DBUG_VOID_RETURN; @@ -9083,7 +9084,6 @@ Item * all_any_subquery_creator(THD *thd, Item *left_expr, bool multi_update_precheck(THD *thd, TABLE_LIST *tables) { - const char *msg= 0; TABLE_LIST *table; LEX *lex= thd->lex; SELECT_LEX *select_lex= &lex->select_lex; @@ -9139,15 +9139,6 @@ bool multi_update_precheck(THD *thd, TABLE_LIST *tables) } } - if (select_lex->order_list.elements) - msg= "ORDER BY"; - else if (select_lex->select_limit) - msg= "LIMIT"; - if (msg) - { - my_error(ER_WRONG_USAGE, MYF(0), "UPDATE", msg); - DBUG_RETURN(TRUE); - } DBUG_RETURN(FALSE); } diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 9fc67272bfa..3fc49dc42ca 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -4884,7 +4884,7 @@ that are reorganised. { if (!alt_part_info->use_default_partitions) { - DBUG_PRINT("info", ("part_info: 0x%lx", (long) tab_part_info)); + DBUG_PRINT("info", ("part_info: %p", tab_part_info)); tab_part_info->use_default_partitions= FALSE; } tab_part_info->use_default_num_partitions= FALSE; diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 00bacad8195..a4e6546e38a 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -775,7 +775,7 @@ static st_plugin_dl *plugin_dl_add(const LEX_CSTRING *dl, int report) if (global_system_variables.log_warnings > 2) { struct link_map *lm = (struct link_map*) plugin_dl.handle; - sql_print_information("Loaded '%s' with offset 0x%lx", dl->str, lm->l_addr); + sql_print_information("Loaded '%s' with offset 0x%zx", dl->str, (size_t)lm->l_addr); } #endif @@ -983,8 +983,8 @@ static plugin_ref intern_plugin_lock(LEX *lex, plugin_ref rc) *plugin= pi; #endif pi->ref_count++; - DBUG_PRINT("lock",("thd: 0x%lx plugin: \"%s\" LOCK ref_count: %d", - (long) current_thd, pi->name.str, pi->ref_count)); + DBUG_PRINT("lock",("thd: %p plugin: \"%s\" LOCK ref_count: %d", + current_thd, pi->name.str, pi->ref_count)); if (lex) insert_dynamic(&lex->plugins, (uchar*)&plugin); @@ -1365,8 +1365,8 @@ static void intern_plugin_unlock(LEX *lex, plugin_ref plugin) DBUG_ASSERT(pi->ref_count); pi->ref_count--; - DBUG_PRINT("lock",("thd: 0x%lx plugin: \"%s\" UNLOCK ref_count: %d", - (long) current_thd, pi->name.str, pi->ref_count)); + DBUG_PRINT("lock",("thd: %p plugin: \"%s\" UNLOCK ref_count: %d", + current_thd, pi->name.str, pi->ref_count)); if (pi->state == PLUGIN_IS_DELETED && !pi->ref_count) reap_needed= true; @@ -3280,8 +3280,8 @@ static void plugin_vars_free_values(sys_var *vars) { /* Free the string from global_system_variables. */ char **valptr= (char**) piv->real_value_ptr(NULL, OPT_GLOBAL); - DBUG_PRINT("plugin", ("freeing value for: '%s' addr: 0x%lx", - var->name.str, (long) valptr)); + DBUG_PRINT("plugin", ("freeing value for: '%s' addr: %p", + var->name.str, valptr)); my_free(*valptr); *valptr= NULL; } @@ -3344,14 +3344,14 @@ uchar* sys_var_pluginvar::real_value_ptr(THD *thd, enum_var_type type) { switch (plugin_var->flags & PLUGIN_VAR_TYPEMASK) { case PLUGIN_VAR_BOOL: - thd->sys_var_tmp.my_bool_value= option.def_value; + thd->sys_var_tmp.my_bool_value= (my_bool)option.def_value; return (uchar*) &thd->sys_var_tmp.my_bool_value; case PLUGIN_VAR_INT: - thd->sys_var_tmp.int_value= option.def_value; + thd->sys_var_tmp.int_value= (int)option.def_value; return (uchar*) &thd->sys_var_tmp.int_value; case PLUGIN_VAR_LONG: case PLUGIN_VAR_ENUM: - thd->sys_var_tmp.long_value= option.def_value; + thd->sys_var_tmp.long_value= (long)option.def_value; return (uchar*) &thd->sys_var_tmp.long_value; case PLUGIN_VAR_LONGLONG: case PLUGIN_VAR_SET: diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index b81d1f7542b..7fc6141bcea 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1400,6 +1400,7 @@ static int mysql_test_update(Prepared_statement *stmt, int res; THD *thd= stmt->thd; uint table_count= 0; + TABLE_LIST *update_source_table; SELECT_LEX *select= &stmt->lex->select_lex; #ifndef NO_EMBEDDED_ACCESS_CHECKS uint want_privilege; @@ -1413,9 +1414,11 @@ static int mysql_test_update(Prepared_statement *stmt, if (mysql_handle_derived(thd->lex, DT_INIT)) goto error; - if (table_list->is_multitable()) + if (((update_source_table= unique_table(thd, table_list, + table_list->next_global, 0)) || + table_list->is_multitable())) { - DBUG_ASSERT(table_list->view != 0); + DBUG_ASSERT(update_source_table || table_list->view != 0); DBUG_PRINT("info", ("Switch to multi-update")); /* pass counter value */ thd->lex->table_count= table_count; @@ -3227,7 +3230,7 @@ void mysql_sql_stmt_execute(THD *thd) DBUG_VOID_RETURN; } - DBUG_PRINT("info",("stmt: 0x%lx", (long) stmt)); + DBUG_PRINT("info",("stmt: %p", stmt)); if (lex->prepared_stmt_params_fix_fields(thd)) DBUG_VOID_RETURN; @@ -3754,8 +3757,8 @@ void Prepared_statement::setup_set_params() Prepared_statement::~Prepared_statement() { DBUG_ENTER("Prepared_statement::~Prepared_statement"); - DBUG_PRINT("enter",("stmt: 0x%lx cursor: 0x%lx", - (long) this, (long) cursor)); + DBUG_PRINT("enter",("stmt: %p cursor: %p", + this, cursor)); delete cursor; /* We have to call free on the items even if cleanup is called as some items, @@ -3782,7 +3785,7 @@ Query_arena::Type Prepared_statement::type() const void Prepared_statement::cleanup_stmt() { DBUG_ENTER("Prepared_statement::cleanup_stmt"); - DBUG_PRINT("enter",("stmt: 0x%lx", (long) this)); + DBUG_PRINT("enter",("stmt: %p", this)); lex->restore_set_statement_var(); cleanup_items(free_list); thd->cleanup_after_query(); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index ba3d2e7da2f..394492f6a1d 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1990,7 +1990,8 @@ int JOIN::optimize_stage2() FORCE INDEX FOR ORDER BY can be used to prevent join buffering when sorting on the first table. */ - if (!stable || !stable->force_index_order) + if (!stable || (!stable->force_index_order && + !map2table[stable->tablenr]->keep_current_rowid)) { if (group_list) simple_group= 0; @@ -7873,7 +7874,7 @@ double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s, */ if (!is_hash_join_key_no(key)) { - table_map quick_key_map= (table_map(1) << table->quick_key_parts[key]) - 1; + key_part_map quick_key_map= (key_part_map(1) << table->quick_key_parts[key]) - 1; if (table->quick_rows[key] && !(quick_key_map & ~table->const_key_parts[key])) { @@ -10603,8 +10604,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) tmp_cond= new (thd->mem_root) Item_func_trig_cond(thd, tmp_cond, &first_inner_tab-> not_null_compl); - DBUG_PRINT("info", ("Item_func_trig_cond 0x%lx", - (ulong) tmp_cond)); + DBUG_PRINT("info", ("Item_func_trig_cond %p", + tmp_cond)); if (tmp_cond) tmp_cond->quick_fix_field(); /* Add the predicate to other pushed down predicates */ @@ -10612,8 +10613,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) *sel_cond_ref= !(*sel_cond_ref) ? tmp_cond : new (thd->mem_root) Item_cond_and(thd, *sel_cond_ref, tmp_cond); - DBUG_PRINT("info", ("Item_cond_and 0x%lx", - (ulong)(*sel_cond_ref))); + DBUG_PRINT("info", ("Item_cond_and %p", + (*sel_cond_ref))); if (!(*sel_cond_ref)) DBUG_RETURN(1); (*sel_cond_ref)->quick_fix_field(); @@ -20340,9 +20341,9 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), Item *item= *group->item; if (group->fast_field_copier_setup != group->field) { - DBUG_PRINT("info", ("new setup 0x%lx -> 0x%lx", - (ulong)group->fast_field_copier_setup, - (ulong)group->field)); + DBUG_PRINT("info", ("new setup %p -> %p", + group->fast_field_copier_setup, + group->field)); group->fast_field_copier_setup= group->field; group->fast_field_copier_func= item->setup_fast_field_copier(group->field); @@ -24976,8 +24977,8 @@ int JOIN::save_explain_data_intern(Explain_query *output, JOIN *join= this; /* Legacy: this code used to be a non-member function */ int cur_error= 0; DBUG_ENTER("JOIN::save_explain_data_intern"); - DBUG_PRINT("info", ("Select 0x%lx, type %s, message %s", - (ulong)join->select_lex, join->select_lex->type, + DBUG_PRINT("info", ("Select %p, type %s, message %s", + join->select_lex, join->select_lex->type, message ? message : "NULL")); DBUG_ASSERT(have_query_plan == QEP_AVAILABLE); /* fake_select_lex is created/printed by Explain_union */ diff --git a/sql/sql_select.h b/sql/sql_select.h index 9f91733ae9c..b6b8deb99f5 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -2013,7 +2013,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, class Virtual_tmp_table: public TABLE { /** - Destruct collected fields. This method is called on errors only, + Destruct collected fields. This method can be called on errors, when we could not make the virtual temporary table completely, e.g. when some of the fields could not be created or added. @@ -2024,7 +2024,10 @@ class Virtual_tmp_table: public TABLE void destruct_fields() { for (uint i= 0; i < s->fields; i++) + { + field[i]->free(); delete field[i]; // to invoke the field destructor + } s->fields= 0; // safety } @@ -2144,7 +2147,7 @@ public: TABLE object ready for read and write in case of success */ -inline TABLE * +inline Virtual_tmp_table * create_virtual_tmp_table(THD *thd, List<Spvar_definition> &field_list) { Virtual_tmp_table *table; diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc index c070c8e5549..30b2e11139b 100644 --- a/sql/sql_servers.cc +++ b/sql/sql_servers.cc @@ -350,8 +350,8 @@ get_server_from_table_to_cache(TABLE *table) DBUG_PRINT("info", ("server->socket %s", server->socket)); if (my_hash_insert(&servers_cache, (uchar*) server)) { - DBUG_PRINT("info", ("had a problem inserting server %s at %lx", - server->server_name, (long unsigned int) server)); + DBUG_PRINT("info", ("had a problem inserting server %s at %p", + server->server_name, server)); // error handling needed here DBUG_RETURN(TRUE); } @@ -431,13 +431,13 @@ insert_server_record_into_cache(FOREIGN_SERVER *server) We succeded in insertion of the server to the table, now insert the server to the cache */ - DBUG_PRINT("info", ("inserting server %s at %lx, length %d", - server->server_name, (long unsigned int) server, + DBUG_PRINT("info", ("inserting server %s at %p, length %d", + server->server_name, server, server->server_name_length)); if (my_hash_insert(&servers_cache, (uchar*) server)) { - DBUG_PRINT("info", ("had a problem inserting server %s at %lx", - server->server_name, (long unsigned int) server)); + DBUG_PRINT("info", ("had a problem inserting server %s at %p", + server->server_name, server)); // error handling needed here error= 1; } @@ -804,8 +804,8 @@ int update_server_record_in_cache(FOREIGN_SERVER *existing, */ if (my_hash_insert(&servers_cache, (uchar*)altered)) { - DBUG_PRINT("info", ("had a problem inserting server %s at %lx", - altered->server_name, (long unsigned int) altered)); + DBUG_PRINT("info", ("had a problem inserting server %s at %p", + altered->server_name,altered)); error= ER_OUT_OF_RESOURCES; } diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 68ded844938..de928fe9e85 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -2527,7 +2527,7 @@ public: size_t size __attribute__((unused))) { TRASH(ptr, size); } - ulong thread_id; + my_thread_id thread_id; uint32 os_thread_id; ulonglong start_time; uint command; @@ -2701,7 +2701,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) while ((thd_info=thread_infos.get())) { protocol->prepare_for_resend(); - protocol->store((ulonglong) thd_info->thread_id); + protocol->store(thd_info->thread_id); protocol->store(thd_info->user, system_charset_info); protocol->store(thd_info->host, system_charset_info); protocol->store(thd_info->db, system_charset_info); @@ -6563,7 +6563,7 @@ static bool store_trigger(THD *thd, Trigger *trigger, { table->field[16]->set_notnull(); thd->variables.time_zone->gmt_sec_to_TIME(×tamp, - trigger->create_time/100); + (my_time_t)(trigger->create_time/100)); /* timestamp is with 6 digits */ timestamp.second_part= (trigger->create_time % 100) * 10000; ((Field_temporal_with_date*) table->field[16])->store_time_dec(×tamp, @@ -9590,7 +9590,7 @@ static bool show_create_trigger_impl(THD *thd, Trigger *trigger) { MYSQL_TIME timestamp; thd->variables.time_zone->gmt_sec_to_TIME(×tamp, - trigger->create_time/100); + (my_time_t)(trigger->create_time/100)); timestamp.second_part= (trigger->create_time % 100) * 10000; p->store(×tamp, 2); } diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc index 3e023767807..0048e525cad 100644 --- a/sql/sql_statistics.cc +++ b/sql/sql_statistics.cc @@ -3757,7 +3757,10 @@ double get_column_avg_frequency(Field * field) using the statistical data from the table column_stats. @retval - The required estimate of the rows in the column range + - The required estimate of the rows in the column range + - If there is some kind of error, this function should return DBL_MAX (and + not HA_POS_ERROR as that is an integer constant). + */ double get_column_range_cardinality(Field *field, diff --git a/sql/sql_string.cc b/sql/sql_string.cc index 2c35abe3ff0..70ddf7b1241 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -31,9 +31,9 @@ ** String functions *****************************************************************************/ -bool String::real_alloc(uint32 length) +bool String::real_alloc(size_t length) { - uint32 arg_length= ALIGN_SIZE(length + 1); + size_t arg_length= ALIGN_SIZE(length + 1); DBUG_ASSERT(arg_length > length); if (arg_length <= length) return TRUE; /* Overflow */ @@ -45,7 +45,8 @@ bool String::real_alloc(uint32 length) (thread_specific ? MY_THREAD_SPECIFIC : 0))))) return TRUE; - Alloced_length=arg_length; + DBUG_ASSERT(length < UINT_MAX32); + Alloced_length=(uint32) arg_length; alloced=1; } Ptr[0]=0; @@ -80,7 +81,7 @@ bool String::real_alloc(uint32 length) @retval true An error occurred when attempting to allocate memory. */ -bool String::realloc_raw(uint32 alloc_length) +bool String::realloc_raw(size_t alloc_length) { if (Alloced_length <= alloc_length) { @@ -112,7 +113,8 @@ bool String::realloc_raw(uint32 alloc_length) else return TRUE; // Signal error Ptr= new_ptr; - Alloced_length= len; + DBUG_ASSERT(len < UINT_MAX32); + Alloced_length= (uint32)len; } return FALSE; } @@ -194,7 +196,7 @@ bool String::set_real(double num,uint decimals, CHARSET_INFO *cs) if (decimals >= FLOATING_POINT_DECIMALS) { len= my_gcvt(num, MY_GCVT_ARG_DOUBLE, sizeof(buff) - 1, buff, NULL); - return copy(buff, len, &my_charset_latin1, cs, &dummy_errors); + return copy(buff, (uint)len, &my_charset_latin1, cs, &dummy_errors); } len= my_fcvt(num, decimals, buff, NULL); return copy(buff, (uint32) len, &my_charset_latin1, cs, @@ -234,10 +236,11 @@ bool String::copy(const String &str) return FALSE; } -bool String::copy(const char *str,uint32 arg_length, CHARSET_INFO *cs) +bool String::copy(const char *str,size_t arg_length, CHARSET_INFO *cs) { if (alloc(arg_length)) return TRUE; + DBUG_ASSERT(arg_length < UINT_MAX32); if ((str_length=arg_length)) memcpy(Ptr,str,arg_length); Ptr[arg_length]=0; @@ -494,8 +497,10 @@ bool String::append(const String &s) Append an ASCII string to the a string of the current character set */ -bool String::append(const char *s,uint32 arg_length) +bool String::append(const char *s,size_t size) { + DBUG_ASSERT(size <= UINT_MAX32); + uint32 arg_length= (uint32) size; if (!arg_length) return FALSE; @@ -539,7 +544,7 @@ bool String::append_longlong(longlong val) if (realloc(str_length+MAX_BIGINT_WIDTH+2)) return TRUE; char *end= (char*) longlong10_to_str(val, (char*) Ptr + str_length, -10); - str_length= end - Ptr; + str_length= (uint32)(end - Ptr); return FALSE; } @@ -549,7 +554,7 @@ bool String::append_ulonglong(ulonglong val) if (realloc(str_length+MAX_BIGINT_WIDTH+2)) return TRUE; char *end= (char*) longlong10_to_str(val, (char*) Ptr + str_length, 10); - str_length= end - Ptr; + str_length= (uint32) (end - Ptr); return FALSE; } @@ -558,7 +563,7 @@ bool String::append_ulonglong(ulonglong val) with character set recoding */ -bool String::append(const char *s,uint32 arg_length, CHARSET_INFO *cs) +bool String::append(const char *s, uint arg_length, CHARSET_INFO *cs) { uint32 offset; @@ -645,7 +650,7 @@ bool String::append_with_prefill(const char *s,uint32 arg_length, uint32 String::numchars() const { - return str_charset->cset->numchars(str_charset, Ptr, Ptr+str_length); + return (uint32) str_charset->cset->numchars(str_charset, Ptr, Ptr+str_length); } int String::charpos(longlong i,uint32 offset) @@ -774,7 +779,7 @@ void String::qs_append(const char *str, uint32 len) void String::qs_append(double d) { char *buff = Ptr + str_length; - str_length+= my_gcvt(d, MY_GCVT_ARG_DOUBLE, FLOATING_POINT_BUFFER - 1, buff, + str_length+= (uint32) my_gcvt(d, MY_GCVT_ARG_DOUBLE, FLOATING_POINT_BUFFER - 1, buff, NULL); } @@ -1078,10 +1083,10 @@ String_copier::well_formed_copy(CHARSET_INFO *to_cs, my_charset_same(from_cs, to_cs)) { m_cannot_convert_error_pos= NULL; - return to_cs->cset->copy_fix(to_cs, to, to_length, from, from_length, + return (uint) to_cs->cset->copy_fix(to_cs, to, to_length, from, from_length, nchars, this); } - return my_convert_fix(to_cs, to, to_length, from_cs, from, from_length, + return (uint) my_convert_fix(to_cs, to, to_length, from_cs, from, from_length, nchars, this, this); } @@ -1216,5 +1221,5 @@ uint convert_to_printable(char *to, size_t to_len, memcpy(dots, STRING_WITH_LEN("...\0")); else *t= '\0'; - return t - to; + return (uint) (t - to); } diff --git a/sql/sql_string.h b/sql/sql_string.h index af3765443c5..c88c58b1b40 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -357,22 +357,22 @@ public: Ptr=0; str_length=0; /* Safety */ } - inline bool alloc(uint32 arg_length) + inline bool alloc(size_t arg_length) { if (arg_length < Alloced_length) return 0; return real_alloc(arg_length); } - bool real_alloc(uint32 arg_length); // Empties old string - bool realloc_raw(uint32 arg_length); - bool realloc(uint32 arg_length) + bool real_alloc(size_t arg_length); // Empties old string + bool realloc_raw(size_t arg_length); + bool realloc(size_t arg_length) { if (realloc_raw(arg_length)) return TRUE; Ptr[arg_length]=0; // This make other funcs shorter return FALSE; } - bool realloc_with_extra(uint32 arg_length) + bool realloc_with_extra(size_t arg_length) { if (extra_alloc < 4096) extra_alloc= extra_alloc*2+128; @@ -381,7 +381,7 @@ public: Ptr[arg_length]=0; // This make other funcs shorter return FALSE; } - bool realloc_with_extra_if_needed(uint32 arg_length) + bool realloc_with_extra_if_needed(size_t arg_length) { if (arg_length < Alloced_length) { @@ -391,7 +391,7 @@ public: return realloc_with_extra(arg_length); } // Shrink the buffer, but only if it is allocated on the heap. - inline void shrink(uint32 arg_length) + inline void shrink(size_t arg_length) { if (!is_alloced()) return; @@ -408,7 +408,7 @@ public: else { Ptr=new_ptr; - Alloced_length=arg_length; + Alloced_length=(uint32)arg_length; } } } @@ -431,7 +431,7 @@ public: bool copy(); // Alloc string if not alloced bool copy(const String &s); // Allocate new string - bool copy(const char *s,uint32 arg_length, CHARSET_INFO *cs); // Allocate new string + bool copy(const char *s,size_t arg_length, CHARSET_INFO *cs); // Allocate new string static bool needs_conversion(uint32 arg_length, CHARSET_INFO *cs_from, CHARSET_INFO *cs_to, uint32 *offset); @@ -484,8 +484,8 @@ public: DBUG_ASSERT(ls.length < UINT_MAX32); return append(ls.str, (uint32) ls.length); } - bool append(const char *s, uint32 arg_length); - bool append(const char *s, uint32 arg_length, CHARSET_INFO *cs); + bool append(const char *s, size_t size); + bool append(const char *s, uint arg_length, CHARSET_INFO *cs); bool append_ulonglong(ulonglong val); bool append_longlong(longlong val); bool append(IO_CACHE* file, uint32 arg_length); @@ -568,10 +568,11 @@ public: float8store(Ptr + str_length, *d); str_length += 8; } - void q_append(const char *data, uint32 data_len) + void q_append(const char *data, size_t data_len) { memcpy(Ptr + str_length, data, data_len); - str_length += data_len; + DBUG_ASSERT(str_length <= UINT_MAX32 - data_len); + str_length += (uint)data_len; } void q_append(const LEX_CSTRING *ls) { diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 9c746e470f7..724389592e3 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -2275,9 +2275,9 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, size_t db_length= table->db_length; handlerton *table_type= 0; - DBUG_PRINT("table", ("table_l: '%s'.'%s' table: 0x%lx s: 0x%lx", - table->db, table->table_name, (long) table->table, - table->table ? (long) table->table->s : (long) -1)); + DBUG_PRINT("table", ("table_l: '%s'.'%s' table: %p s: %p", + table->db, table->table_name, table->table, + table->table ? table->table->s : NULL)); /* If we are in locked tables mode and are dropping a temporary table, @@ -2526,8 +2526,8 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, mysql_audit_drop_table(thd, table); } - DBUG_PRINT("table", ("table: 0x%lx s: 0x%lx", (long) table->table, - table->table ? (long) table->table->s : (long) -1)); + DBUG_PRINT("table", ("table: %p s: %p", table->table, + table->table ? table->table->s : NULL)); } DEBUG_SYNC(thd, "rm_table_no_locks_before_binlog"); thd->thread_specific_used|= (trans_tmp_table_deleted || @@ -4229,7 +4229,7 @@ bool Column_definition::prepare_blob_field(THD *thd) real_field_type() == FIELD_TYPE_MEDIUM_BLOB) { /* The user has given a length to the blob column */ - set_handler(Type_handler::blob_type_handler(length)); + set_handler(Type_handler::blob_type_handler((uint) length)); pack_length= type_handler()->calc_pack_length(0); } length= 0; diff --git a/sql/sql_test.cc b/sql/sql_test.cc index b130cd9ebe2..ab12278ef8c 100644 --- a/sql/sql_test.cc +++ b/sql/sql_test.cc @@ -433,7 +433,7 @@ static void push_locks_into_array(DYNAMIC_ARRAY *ar, THR_LOCK_DATA *data, if (table && table->s->tmp_table == NO_TMP_TABLE) { TABLE_LOCK_INFO table_lock_info; - table_lock_info.thread_id= table->in_use->thread_id; + table_lock_info.thread_id= (ulong)table->in_use->thread_id; memcpy(table_lock_info.table_name, table->s->table_cache_key.str, table->s->table_cache_key.length); table_lock_info.table_name[strlen(table_lock_info.table_name)]='.'; diff --git a/sql/sql_time.h b/sql/sql_time.h index 91a026512c1..1832e4501ed 100644 --- a/sql/sql_time.h +++ b/sql/sql_time.h @@ -193,7 +193,7 @@ inline bool parse_date_time_format(timestamp_type format_type, { return parse_date_time_format(format_type, date_time_format->format.str, - date_time_format->format.length, + (uint) date_time_format->format.length, date_time_format); } diff --git a/sql/sql_type.h b/sql/sql_type.h index 525a05b71e6..0354adfde1e 100644 --- a/sql/sql_type.h +++ b/sql/sql_type.h @@ -704,7 +704,12 @@ public: } virtual bool is_scalar_type() const { return true; } virtual bool can_return_int() const { return true; } + virtual bool can_return_decimal() const { return true; } virtual bool can_return_real() const { return true; } + virtual bool can_return_str() const { return true; } + virtual bool can_return_text() const { return true; } + virtual bool can_return_date() const { return true; } + virtual bool can_return_time() const { return true; } virtual bool is_general_purpose_string_type() const { return false; } virtual uint Item_time_precision(Item *item) const; virtual uint Item_datetime_precision(Item *item) const; @@ -1001,7 +1006,12 @@ public: const Name name() const { return m_name_row; } bool is_scalar_type() const { return false; } bool can_return_int() const { return false; } + bool can_return_decimal() const { return false; } bool can_return_real() const { return false; } + bool can_return_str() const { return false; } + bool can_return_text() const { return false; } + bool can_return_date() const { return false; } + bool can_return_time() const { return false; } enum_field_types field_type() const { DBUG_ASSERT(0); @@ -2689,7 +2699,11 @@ public: TABLE *table) const; bool can_return_int() const { return false; } + bool can_return_decimal() const { return false; } bool can_return_real() const { return false; } + bool can_return_text() const { return false; } + bool can_return_date() const { return false; } + bool can_return_time() const { return false; } bool is_traditional_type() const { return false; diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 3f39765b531..d188be8bc97 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -278,6 +278,7 @@ int mysql_update(THD *thd, killed_state killed_status= NOT_KILLED; Update_plan query_plan(thd->mem_root); Explain_update *explain; + TABLE_LIST *update_source_table; query_plan.index= MAX_KEY; query_plan.using_filesort= FALSE; DBUG_ENTER("mysql_update"); @@ -290,9 +291,11 @@ int mysql_update(THD *thd, if (mysql_handle_derived(thd->lex, DT_INIT)) DBUG_RETURN(1); - if (table_list->is_multitable()) + if (((update_source_table=unique_table(thd, table_list, + table_list->next_global, 0)) || + table_list->is_multitable())) { - DBUG_ASSERT(table_list->view != 0); + DBUG_ASSERT(update_source_table || table_list->view != 0); DBUG_PRINT("info", ("Switch to multi-update")); /* pass counter value */ thd->lex->table_count= table_count; @@ -1540,16 +1543,6 @@ int mysql_multi_update_prepare(THD *thd) } DBUG_PRINT("info", ("table: %s want_privilege: %u", tl->alias, (uint) table->grant.want_privilege)); - if (tl->lock_type != TL_READ && - tl->lock_type != TL_READ_NO_INSERT) - { - TABLE_LIST *duplicate; - if ((duplicate= unique_table(thd, tl, table_list, 0))) - { - update_non_unique_table_error(table_list, "UPDATE", duplicate); - DBUG_RETURN(TRUE); - } - } } /* Set exclude_from_table_unique_test value back to FALSE. It is needed for @@ -1595,10 +1588,9 @@ bool mysql_multi_update(THD *thd, List<Item> total_list; res= mysql_select(thd, - table_list, select_lex->with_wild, - total_list, - conds, 0, (ORDER *) NULL, (ORDER *)NULL, (Item *) NULL, - (ORDER *)NULL, + table_list, select_lex->with_wild, total_list, conds, + select_lex->order_list.elements, select_lex->order_list.first, + (ORDER *)NULL, (Item *) NULL, (ORDER *)NULL, options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK | OPTION_SETUP_TABLES_DONE, *result, unit, select_lex); @@ -1857,6 +1849,8 @@ static bool safe_update_on_fly(THD *thd, JOIN_TAB *join_tab, TABLE *table= join_tab->table; if (unique_table(thd, table_ref, all_tables, 0)) return 0; + if (join_tab->join->order) // FIXME this is probably too strong + return 0; switch (join_tab->type) { case JT_SYSTEM: case JT_CONST: @@ -1934,6 +1928,7 @@ multi_update::initialize_tables(JOIN *join) } } table->prepare_for_position(); + join->map2table[table->tablenr]->keep_current_rowid= true; /* enable uncacheable flag if we update a view with check option @@ -2001,6 +1996,7 @@ loop_end: that we need a position to be read first. */ tbl->prepare_for_position(); + join->map2table[tbl->tablenr]->keep_current_rowid= true; Field_string *field= new Field_string(tbl->file->ref_length, 0, &field_name, diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 26941e9d6e7..fd6ecbbdec9 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -1156,7 +1156,7 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table, bool result, view_is_mergeable; TABLE_LIST *UNINIT_VAR(view_main_select_tables); DBUG_ENTER("mysql_make_view"); - DBUG_PRINT("info", ("table: 0x%lx (%s)", (ulong) table, table->table_name)); + DBUG_PRINT("info", ("table: %p (%s)", table, table->table_name)); if (table->required_type == TABLE_TYPE_NORMAL) { diff --git a/sql/sql_window.cc b/sql/sql_window.cc index 18bab30d2fd..d22fff9d486 100644 --- a/sql/sql_window.cc +++ b/sql/sql_window.cc @@ -1827,7 +1827,7 @@ public: private: void move_cursor_if_possible() { - int rows_difference= n_rows - n_rows_behind; + longlong rows_difference= n_rows - n_rows_behind; if (rows_difference > 0) /* We still have to wait. */ return; diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index a801251f5df..277d67ca014 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -16483,7 +16483,7 @@ view_select: opt_with_clause query_expression_body_view view_check_option { LEX *lex= Lex; - uint len= YYLIP->get_cpp_ptr() - lex->create_view->select.str; + size_t len= YYLIP->get_cpp_ptr() - lex->create_view->select.str; uint not_used; void *create_view_select= thd->memdup(lex->create_view->select.str, len); lex->create_view->select.length= len; diff --git a/sql/sql_yacc_ora.yy b/sql/sql_yacc_ora.yy index c3a84fcc482..ad8b6697ce7 100644 --- a/sql/sql_yacc_ora.yy +++ b/sql/sql_yacc_ora.yy @@ -16686,7 +16686,7 @@ view_select: opt_with_clause query_expression_body_view view_check_option { LEX *lex= Lex; - uint len= YYLIP->get_cpp_ptr() - lex->create_view->select.str; + size_t len= YYLIP->get_cpp_ptr() - lex->create_view->select.str; uint not_used; void *create_view_select= thd->memdup(lex->create_view->select.str, len); lex->create_view->select.length= len; diff --git a/sql/strfunc.cc b/sql/strfunc.cc index 43a2698c4be..f4c8fcd517f 100644 --- a/sql/strfunc.cc +++ b/sql/strfunc.cc @@ -158,7 +158,7 @@ uint find_type2(const TYPELIB *typelib, const char *x, uint length, int pos; const char *j; DBUG_ENTER("find_type2"); - DBUG_PRINT("enter",("x: '%.*s' lib: 0x%lx", length, x, (long) typelib)); + DBUG_PRINT("enter",("x: '%.*s' lib: %p", length, x, typelib)); if (!typelib->count) { diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 6ae9a7e8f2e..d16ae7f85a3 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -2783,7 +2783,7 @@ static Sys_var_enum Sys_thread_handling( #ifdef HAVE_QUERY_CACHE static bool fix_query_cache_size(sys_var *self, THD *thd, enum_var_type type) { - ulong new_cache_size= query_cache.resize(query_cache_size); + ulong new_cache_size= query_cache.resize((ulong)query_cache_size); /* Note: query_cache_size is a global variable reflecting the requested cache size. See also query_cache_size_arg @@ -4835,7 +4835,7 @@ static Sys_var_multi_source_ulonglong Sys_slave_skip_counter( static bool update_max_relay_log_size(sys_var *self, THD *thd, Master_info *mi) { mi->rli.max_relay_log_size= thd->variables.max_relay_log_size; - mi->rli.relay_log.set_max_size(mi->rli.max_relay_log_size); + mi->rli.relay_log.set_max_size((ulong)mi->rli.max_relay_log_size); return false; } @@ -5487,7 +5487,7 @@ static Sys_var_ulong Sys_rowid_merge_buff_size( "rowid_merge_buff_size", "The size of the buffers used [NOT] IN evaluation via partial matching", SESSION_VAR(rowid_merge_buff_size), CMD_LINE(REQUIRED_ARG), - VALID_RANGE(0, ((ulonglong)~(intptr)0)/2), DEFAULT(8*1024*1024), + VALID_RANGE(0, LONG_MAX), DEFAULT(8*1024*1024), BLOCK_SIZE(1)); static Sys_var_mybool Sys_userstat( diff --git a/sql/sys_vars.ic b/sql/sys_vars.ic index f3ce4467a5f..da6d902a167 100644 --- a/sql/sys_vars.ic +++ b/sql/sys_vars.ic @@ -234,25 +234,25 @@ typedef Sys_var_integer<long, GET_LONG, SHOW_SLONG> Sys_var_long; template<> uchar *Sys_var_int::default_value_ptr(THD *thd) { - thd->sys_var_tmp.int_value= option.def_value; + thd->sys_var_tmp.int_value= (int)option.def_value; return (uchar*) &thd->sys_var_tmp.int_value; } template<> uchar *Sys_var_uint::default_value_ptr(THD *thd) { - thd->sys_var_tmp.uint_value= option.def_value; + thd->sys_var_tmp.uint_value= (uint)option.def_value; return (uchar*) &thd->sys_var_tmp.uint_value; } template<> uchar *Sys_var_long::default_value_ptr(THD *thd) { - thd->sys_var_tmp.long_value= option.def_value; + thd->sys_var_tmp.long_value= (long)option.def_value; return (uchar*) &thd->sys_var_tmp.long_value; } template<> uchar *Sys_var_ulong::default_value_ptr(THD *thd) { - thd->sys_var_tmp.ulong_value= option.def_value; + thd->sys_var_tmp.ulong_value= (ulong)option.def_value; return (uchar*) &thd->sys_var_tmp.ulong_value; } @@ -369,7 +369,7 @@ public: uchar *global_value_ptr(THD *thd, const LEX_CSTRING *base) { return valptr(thd, global_var(ulong)); } uchar *default_value_ptr(THD *thd) - { return valptr(thd, option.def_value); } + { return valptr(thd, (ulong)option.def_value); } }; /** @@ -416,7 +416,7 @@ public: { var->save_result.ulonglong_value= option.def_value; } uchar *default_value_ptr(THD *thd) { - thd->sys_var_tmp.my_bool_value= option.def_value; + thd->sys_var_tmp.my_bool_value=(my_bool) option.def_value; return (uchar*) &thd->sys_var_tmp.my_bool_value; } }; diff --git a/sql/table.cc b/sql/table.cc index 7dd93d5107d..70adb869e0a 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -338,7 +338,7 @@ TABLE_SHARE *alloc_table_share(const char *db, const char *table_name, */ do { - share->table_map_id= my_atomic_add64_explicit(&last_table_id, 1, + share->table_map_id=(ulong) my_atomic_add64_explicit(&last_table_id, 1, MY_MEMORY_ORDER_RELAXED); } while (unlikely(share->table_map_id == ~0UL)); } @@ -3013,8 +3013,8 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, Field **field_ptr; uint8 save_context_analysis_only= thd->lex->context_analysis_only; DBUG_ENTER("open_table_from_share"); - DBUG_PRINT("enter",("name: '%s.%s' form: 0x%lx", share->db.str, - share->table_name.str, (long) outparam)); + DBUG_PRINT("enter",("name: '%s.%s' form: %p", share->db.str, + share->table_name.str, outparam)); thd->lex->context_analysis_only&= ~CONTEXT_ANALYSIS_ONLY_VIEW; // not a view @@ -3425,7 +3425,7 @@ int closefrm(register TABLE *table) { int error=0; DBUG_ENTER("closefrm"); - DBUG_PRINT("enter", ("table: 0x%lx", (long) table)); + DBUG_PRINT("enter", ("table: %p", table)); if (table->db_stat) error=table->file->ha_close(); diff --git a/sql/table_cache.cc b/sql/table_cache.cc index 0cbc06ff81e..6067ecb059d 100644 --- a/sql/table_cache.cc +++ b/sql/table_cache.cc @@ -68,7 +68,7 @@ I_P_List <TDC_element, I_P_List_null_counter, I_P_List_fast_push_back<TDC_element> > unused_shares; -static int64 tdc_version; /* Increments on each reload */ +static tdc_version_t tdc_version; /* Increments on each reload */ static bool tdc_inited; @@ -913,8 +913,8 @@ retry: } end: - DBUG_PRINT("exit", ("share: 0x%lx ref_count: %u", - (ulong) share, share->tdc->ref_count)); + DBUG_PRINT("exit", ("share: %p ref_count: %u", + share, share->tdc->ref_count)); if (flags & GTS_NOLOCK) { tdc_release_share(share); @@ -945,8 +945,8 @@ void tdc_release_share(TABLE_SHARE *share) mysql_mutex_lock(&share->tdc->LOCK_table_share); DBUG_PRINT("enter", - ("share: 0x%lx table: %s.%s ref_count: %u version: %lu", - (ulong) share, share->db.str, share->table_name.str, + ("share: %p table: %s.%s ref_count: %u version: %lld", + share, share->db.str, share->table_name.str, share->tdc->ref_count, share->tdc->version)); DBUG_ASSERT(share->tdc->ref_count); @@ -1181,8 +1181,7 @@ bool tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type, */ int tdc_wait_for_old_version(THD *thd, const char *db, const char *table_name, - ulong wait_timeout, uint deadlock_weight, - ulong refresh_version) + ulong wait_timeout, uint deadlock_weight, tdc_version_t refresh_version) { TDC_element *element; @@ -1201,16 +1200,16 @@ int tdc_wait_for_old_version(THD *thd, const char *db, const char *table_name, } -ulong tdc_refresh_version(void) +tdc_version_t tdc_refresh_version(void) { - return my_atomic_load64_explicit(&tdc_version, MY_MEMORY_ORDER_RELAXED); + return (tdc_version_t)my_atomic_load64_explicit(&tdc_version, MY_MEMORY_ORDER_RELAXED); } -ulong tdc_increment_refresh_version(void) +tdc_version_t tdc_increment_refresh_version(void) { - ulong v= my_atomic_add64_explicit(&tdc_version, 1, MY_MEMORY_ORDER_RELAXED); - DBUG_PRINT("tcache", ("incremented global refresh_version to: %lu", v)); + tdc_version_t v= (tdc_version_t)my_atomic_add64_explicit(&tdc_version, 1, MY_MEMORY_ORDER_RELAXED); + DBUG_PRINT("tcache", ("incremented global refresh_version to: %lld", v)); return v + 1; } diff --git a/sql/table_cache.h b/sql/table_cache.h index cac69296cc2..2e5bb3428dc 100644 --- a/sql/table_cache.h +++ b/sql/table_cache.h @@ -26,12 +26,14 @@ struct Share_free_tables char pad[CPU_LEVEL1_DCACHE_LINESIZE]; }; +typedef int64 tdc_version_t; +#define TDC_VERSION_MAX INT_MAX64 struct TDC_element { uchar m_key[NAME_LEN + 1 + NAME_LEN + 1]; uint m_key_length; - ulong version; + tdc_version_t version; bool flushed; TABLE_SHARE *share; @@ -84,12 +86,14 @@ extern void tdc_release_share(TABLE_SHARE *share); extern bool tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type, const char *db, const char *table_name, bool kill_delayed_threads); + + extern int tdc_wait_for_old_version(THD *thd, const char *db, const char *table_name, ulong wait_timeout, uint deadlock_weight, - ulong refresh_version= ULONG_MAX); -extern ulong tdc_refresh_version(void); -extern ulong tdc_increment_refresh_version(void); + tdc_version_t refresh_version= TDC_VERSION_MAX); +extern tdc_version_t tdc_refresh_version(void); +extern tdc_version_t tdc_increment_refresh_version(void); extern int tdc_iterate(THD *thd, my_hash_walk_action action, void *argument, bool no_dups= false); diff --git a/sql/temporary_tables.cc b/sql/temporary_tables.cc index b191693073c..2c3cd0fe24e 100644 --- a/sql/temporary_tables.cc +++ b/sql/temporary_tables.cc @@ -1133,8 +1133,8 @@ TABLE *THD::open_temporary_table(TMP_TABLE_SHARE *share, thread_safe_increment32(&slave_open_temp_tables); } - DBUG_PRINT("tmptable", ("Opened table: '%s'.'%s' 0x%lx", table->s->db.str, - table->s->table_name.str, (long) table)); + DBUG_PRINT("tmptable", ("Opened table: '%s'.'%s'%p", table->s->db.str, + table->s->table_name.str, table)); DBUG_RETURN(table); } @@ -1227,9 +1227,9 @@ void THD::close_temporary_table(TABLE *table) { DBUG_ENTER("THD::close_temporary_table"); - DBUG_PRINT("tmptable", ("closing table: '%s'.'%s' 0x%lx alias: '%s'", + DBUG_PRINT("tmptable", ("closing table: '%s'.'%s'%p alias: '%s'", table->s->db.str, table->s->table_name.str, - (long) table, table->alias.c_ptr())); + table, table->alias.c_ptr())); closefrm(table); my_free(table); diff --git a/sql/threadpool_generic.cc b/sql/threadpool_generic.cc index 7365e99dca6..3fdaff0504f 100644 --- a/sql/threadpool_generic.cc +++ b/sql/threadpool_generic.cc @@ -55,6 +55,9 @@ typedef OVERLAPPED_ENTRY native_event; #error threadpool is not available on this platform #endif +#ifdef _MSC_VER +#pragma warning (disable : 4312) +#endif static void io_poll_close(int fd) { @@ -447,10 +450,11 @@ static void* native_event_get_userdata(native_event *event) #elif defined(HAVE_IOCP) + static int io_poll_create() { HANDLE h= CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0, 0); - return (int)h; + return PtrToInt(h); } @@ -476,7 +480,7 @@ int io_poll_start_read(int pollfd, int fd, void *, void *opt) static int io_poll_associate_fd(int pollfd, int fd, void *data, void *opt) { - HANDLE h= CreateIoCompletionPort((HANDLE)fd, (HANDLE)pollfd, (ULONG_PTR)data, 0); + HANDLE h= CreateIoCompletionPort(IntToPtr(fd), IntToPtr(pollfd), (ULONG_PTR)data, 0); if (!h) return -1; return io_poll_start_read(pollfd,fd, 0, opt); @@ -504,7 +508,6 @@ static void* native_event_get_userdata(native_event *event) { return (void *)event->lpCompletionKey; } - #endif @@ -1483,7 +1486,7 @@ static int change_group(TP_connection_generic *c, thread_group_t *new_group) { int ret= 0; - int fd= mysql_socket_getfd(c->thd->net.vio->mysql_socket); + int fd= (int)mysql_socket_getfd(c->thd->net.vio->mysql_socket); DBUG_ASSERT(c->thread_group == old_group); @@ -1511,7 +1514,7 @@ static int change_group(TP_connection_generic *c, int TP_connection_generic::start_io() { - int fd= mysql_socket_getfd(thd->net.vio->mysql_socket); + int fd= (int)mysql_socket_getfd(thd->net.vio->mysql_socket); #ifndef HAVE_IOCP /* diff --git a/sql/udf_example.c b/sql/udf_example.c index ce39b3461b3..cc9a703373c 100644 --- a/sql/udf_example.c +++ b/sql/udf_example.c @@ -1095,7 +1095,7 @@ my_bool is_const_init(UDF_INIT *initid, UDF_ARGS *args, char *message) strmov(message, "IS_CONST accepts only one argument"); return 1; } - initid->ptr= (char*)((args->args[0] != NULL) ? 1UL : 0); + initid->ptr= (char*)((args->args[0] != NULL) ? (size_t)1 : (size_t)0); return 0; } diff --git a/sql/uniques.cc b/sql/uniques.cc index e7dc40a5a5f..894e959cace 100644 --- a/sql/uniques.cc +++ b/sql/uniques.cc @@ -79,7 +79,7 @@ int unique_intersect_write_to_ptrs(uchar* key, element_count count, Unique *uniq Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg, - uint size_arg, ulonglong max_in_memory_size_arg, + uint size_arg, size_t max_in_memory_size_arg, uint min_dupl_count_arg) :max_in_memory_size(max_in_memory_size_arg), size(size_arg), @@ -91,7 +91,7 @@ Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg, if (min_dupl_count_arg) full_size+= sizeof(element_count); with_counters= MY_TEST(min_dupl_count_arg); - init_tree(&tree, (ulong) (max_in_memory_size / 16), 0, size, comp_func, + init_tree(&tree, (max_in_memory_size / 16), 0, size, comp_func, NULL, comp_func_fixed_arg, MYF(MY_THREAD_SPECIFIC)); /* If the following fail's the next add will also fail */ my_init_dynamic_array(&file_ptrs, sizeof(BUFFPEK), 16, 16, @@ -306,7 +306,7 @@ static double get_merge_many_buffs_cost(uint *buffer, */ double Unique::get_use_cost(uint *buffer, size_t nkeys, uint key_size, - ulonglong max_in_memory_size, + size_t max_in_memory_size, uint compare_factor, bool intersect_fl, bool *in_memory) { diff --git a/sql/uniques.h b/sql/uniques.h index 0210e879788..efc79953bb6 100644 --- a/sql/uniques.h +++ b/sql/uniques.h @@ -30,7 +30,7 @@ class Unique :public Sql_alloc { DYNAMIC_ARRAY file_ptrs; ulong max_elements; - ulonglong max_in_memory_size; + size_t max_in_memory_size; IO_CACHE file; TREE tree; ulong filtered_out_elems; @@ -46,7 +46,7 @@ public: ulong elements; SORT_INFO sort; Unique(qsort_cmp2 comp_func, void *comp_func_fixed_arg, - uint size_arg, ulonglong max_in_memory_size_arg, + uint size_arg, size_t max_in_memory_size_arg, uint min_dupl_count_arg= 0); ~Unique(); ulong elements_in_tree() { return tree.elements_in_tree; } @@ -72,12 +72,12 @@ public: } static double get_use_cost(uint *buffer, size_t nkeys, uint key_size, - ulonglong max_in_memory_size, uint compare_factor, + size_t max_in_memory_size, uint compare_factor, bool intersect_fl, bool *in_memory); inline static int get_cost_calc_buff_size(size_t nkeys, uint key_size, - ulonglong max_in_memory_size) + size_t max_in_memory_size) { - register ulonglong max_elems_in_tree= + register size_t max_elems_in_tree= max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size); return (int) (sizeof(uint)*(1 + nkeys/max_elems_in_tree)); } @@ -86,7 +86,7 @@ public: bool walk(TABLE *table, tree_walk_action action, void *walk_action_arg); uint get_size() const { return size; } - ulonglong get_max_in_memory_size() const { return max_in_memory_size; } + size_t get_max_in_memory_size() const { return max_in_memory_size; } friend int unique_write_to_file(uchar* key, element_count count, Unique *unique); friend int unique_write_to_ptrs(uchar* key, element_count count, Unique *unique); diff --git a/sql/unireg.cc b/sql/unireg.cc index cfcd0851f81..49724a3f47e 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -452,9 +452,9 @@ static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo, int2store(pos+6, key->block_size); pos+=8; key_parts+=key->user_defined_key_parts; - DBUG_PRINT("loop", ("flags: %lu key_parts: %d key_part: 0x%lx", + DBUG_PRINT("loop", ("flags: %lu key_parts: %d key_part: %p", key->flags, key->user_defined_key_parts, - (long) key->key_part)); + key->key_part)); for (key_part=key->key_part,key_part_end=key_part+key->user_defined_key_parts ; key_part != key_part_end ; key_part++) @@ -620,7 +620,7 @@ static bool pack_header(THD *thd, uchar *forminfo, field->field_name.str)) DBUG_RETURN(1); - totlength+= field->length; + totlength+= (size_t)field->length; com_length+= field->comment.length; /* We mark first TIMESTAMP field with NOW() in DEFAULT or ON UPDATE @@ -950,7 +950,7 @@ static bool make_empty_rec(THD *thd, uchar *buff, uint table_options, /* regfield don't have to be deleted as it's allocated on THD::mem_root */ Field *regfield= make_field(&share, thd->mem_root, buff+field->offset + data_offset, - field->length, + (uint32)field->length, null_pos + null_count / 8, null_count & 7, field->pack_flag, diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index 15c5c6a603e..e478058c7fd 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -410,7 +410,7 @@ unsigned int ha_archive::pack_row_v1(uchar *record) pos+= length; } } - DBUG_RETURN(pos - record_buffer->buffer); + DBUG_RETURN((int)(pos - record_buffer->buffer)); } /* diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index 29bccc4afeb..2733479a4e6 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -1564,7 +1564,7 @@ static PCSZ MakeKey(PGLOBAL g, UDF_ARGS *args, int i) n = strlen(s); if (IsJson(args, i)) - j = strchr(s, '_') - s + 1; + j = (int)(strchr(s, '_') - s + 1); if (j && n > j) { s += j; diff --git a/storage/connect/plugutil.cpp b/storage/connect/plugutil.cpp index d63674e2e36..dbf90fb5599 100644 --- a/storage/connect/plugutil.cpp +++ b/storage/connect/plugutil.cpp @@ -534,7 +534,7 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size) if (trace) htrc("PlugSubAlloc: %s\n", g->Message); - throw 1234; + abort(); } /* endif size OS32 code */ /*********************************************************************/ diff --git a/storage/connect/tabmysql.cpp b/storage/connect/tabmysql.cpp index d1e2ae69608..b9c97bd2f40 100644 --- a/storage/connect/tabmysql.cpp +++ b/storage/connect/tabmysql.cpp @@ -124,8 +124,8 @@ bool MYSQLDEF::GetServerInfo(PGLOBAL g, const char *server_name) DBUG_RETURN(true); } // endif server - DBUG_PRINT("info", ("get_server_by_name returned server at %lx", - (size_t) server)); + DBUG_PRINT("info", ("get_server_by_name returned server at %p", + server)); // TODO: We need to examine which of these can really be NULL Hostname = PlugDup(g, server->host); @@ -681,7 +681,7 @@ bool TDBMYSQL::MakeCommand(PGLOBAL g) strlwr(strcpy(name, Name)); // Not a keyword if ((p = strstr(qrystr, name))) { - Query->Set(Qrystr, p - qrystr); + Query->Set(Qrystr, (uint)(p - qrystr)); if (qtd && *(p-1) == ' ') { Query->Append('`'); diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index 059113e2fa5..e6fbceb4af2 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -614,8 +614,8 @@ int get_connection(MEM_ROOT *mem_root, FEDERATED_SHARE *share) error_num=1; goto error; } - DBUG_PRINT("info", ("get_server_by_name returned server at %lx", - (long unsigned int) server)); + DBUG_PRINT("info", ("get_server_by_name returned server at %p", + server)); /* Most of these should never be empty strings, error handling will @@ -716,15 +716,15 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATED_SHARE *share, TABLE *table, share->port= 0; share->socket= 0; - DBUG_PRINT("info", ("share at %lx", (long unsigned int) share)); + DBUG_PRINT("info", ("share at %p", share)); DBUG_PRINT("info", ("Length: %u", (uint) table->s->connect_string.length)); DBUG_PRINT("info", ("String: '%.*s'", (int) table->s->connect_string.length, table->s->connect_string.str)); share->connection_string= strmake_root(mem_root, table->s->connect_string.str, table->s->connect_string.length); - DBUG_PRINT("info",("parse_url alloced share->connection_string %lx", - (long unsigned int) share->connection_string)); + DBUG_PRINT("info",("parse_url alloced share->connection_string %p", + share->connection_string)); DBUG_PRINT("info",("share->connection_string %s",share->connection_string)); /* @@ -737,9 +737,9 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATED_SHARE *share, TABLE *table, DBUG_PRINT("info", ("share->connection_string %s internal format \ - share->connection_string %lx", + share->connection_string %p", share->connection_string, - (long unsigned int) share->connection_string)); + share->connection_string)); /* ok, so we do a little parsing, but not completely! */ share->parsed= FALSE; @@ -793,8 +793,8 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATED_SHARE *share, TABLE *table, // Add a null for later termination of table name share->connection_string[table->s->connect_string.length]= 0; share->scheme= share->connection_string; - DBUG_PRINT("info",("parse_url alloced share->scheme %lx", - (long unsigned int) share->scheme)); + DBUG_PRINT("info",("parse_url alloced share->scheme %p", + share->scheme)); /* remove addition of null terminator and store length diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc index 63290e5feda..a761c635305 100644 --- a/storage/federatedx/ha_federatedx.cc +++ b/storage/federatedx/ha_federatedx.cc @@ -553,8 +553,8 @@ int get_connection(MEM_ROOT *mem_root, FEDERATEDX_SHARE *share) error_num=1; goto error; } - DBUG_PRINT("info", ("get_server_by_name returned server at %lx", - (long unsigned int) server)); + DBUG_PRINT("info", ("get_server_by_name returned server at %p", + server)); /* Most of these should never be empty strings, error handling will @@ -655,15 +655,15 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATEDX_SHARE *share, share->port= 0; share->socket= 0; - DBUG_PRINT("info", ("share at %lx", (long unsigned int) share)); + DBUG_PRINT("info", ("share at %p", share)); DBUG_PRINT("info", ("Length: %u", (uint) table_s->connect_string.length)); DBUG_PRINT("info", ("String: '%.*s'", (int) table_s->connect_string.length, table_s->connect_string.str)); share->connection_string= strmake_root(mem_root, table_s->connect_string.str, table_s->connect_string.length); - DBUG_PRINT("info",("parse_url alloced share->connection_string %lx", - (long unsigned int) share->connection_string)); + DBUG_PRINT("info",("parse_url alloced share->connection_string %p", + share->connection_string)); DBUG_PRINT("info",("share->connection_string: %s",share->connection_string)); /* @@ -676,9 +676,9 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATEDX_SHARE *share, DBUG_PRINT("info", ("share->connection_string: %s internal format " - "share->connection_string: %lx", + "share->connection_string: %p", share->connection_string, - (ulong) share->connection_string)); + share->connection_string)); /* ok, so we do a little parsing, but not completely! */ share->parsed= FALSE; @@ -731,8 +731,8 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATEDX_SHARE *share, // Add a null for later termination of table name share->connection_string[table_s->connect_string.length]= 0; share->scheme= share->connection_string; - DBUG_PRINT("info",("parse_url alloced share->scheme: %lx", - (ulong) share->scheme)); + DBUG_PRINT("info",("parse_url alloced share->scheme: %p", + share->scheme)); /* Remove addition of null terminator and store length diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc index d10edb52340..a75efb7fb62 100644 --- a/storage/heap/ha_heap.cc +++ b/storage/heap/ha_heap.cc @@ -215,7 +215,7 @@ void ha_heap::update_key_stats() else { ha_rows hash_buckets= file->s->keydef[i].hash_buckets; - ha_rows no_records= hash_buckets ? (file->s->records/hash_buckets) : 2; + ulong no_records= hash_buckets ? (ulong)(file->s->records/hash_buckets) : 2; if (no_records < 2) no_records= 2; key->rec_per_key[key->user_defined_key_parts-1]= no_records; diff --git a/storage/innobase/CMakeLists.txt b/storage/innobase/CMakeLists.txt index d5491e48114..0a9a1a68458 100644 --- a/storage/innobase/CMakeLists.txt +++ b/storage/innobase/CMakeLists.txt @@ -162,6 +162,7 @@ IF(NOT TARGET innobase) RETURN() ENDIF() +INCLUDE(${MYSQL_CMAKE_SCRIPT_DIR}/compile_flags.cmake) # A GCC bug causes crash when compiling these files on ARM64 with -O1+ # Compile them with -O0 as a workaround. IF(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") @@ -169,7 +170,6 @@ IF(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") EXECUTE_PROCESS(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION) IF(GCC_VERSION VERSION_LESS 5.2) - INCLUDE(${MYSQL_CMAKE_SCRIPT_DIR}/compile_flags.cmake) ADD_COMPILE_FLAGS( btr/btr0btr.cc btr/btr0cur.cc @@ -179,5 +179,10 @@ IF(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") ) ENDIF() ENDIF() +IF(MSVC) + # silence "switch statement contains 'default' but no 'case' label + # on generated file. + TARGET_COMPILE_OPTIONS(innobase PRIVATE "/wd4065") +ENDIF() ADD_SUBDIRECTORY(${CMAKE_SOURCE_DIR}/extra/mariabackup ${CMAKE_BINARY_DIR}/extra/mariabackup) diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index 63eac83337f..8295c0573cf 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -5055,7 +5055,6 @@ btr_cur_pessimistic_delete( ulint n_reserved = 0; bool success; ibool ret = FALSE; - ulint level; mem_heap_t* heap; ulint* offsets; #ifdef UNIV_DEBUG @@ -5113,6 +5112,10 @@ btr_cur_pessimistic_delete( #endif /* UNIV_ZIP_DEBUG */ } + if (flags == 0) { + lock_update_delete(block, rec); + } + if (UNIV_UNLIKELY(page_get_n_recs(page) < 2) && UNIV_UNLIKELY(dict_index_get_page(index) != block->page.id.page_no())) { @@ -5127,13 +5130,7 @@ btr_cur_pessimistic_delete( goto return_after_reservations; } - if (flags == 0) { - lock_update_delete(block, rec); - } - - level = btr_page_get_level(page, mtr); - - if (level == 0) { + if (page_is_leaf(page)) { btr_search_update_hash_on_delete(cursor); } else if (UNIV_UNLIKELY(page_rec_is_first(rec, page))) { rec_t* next_rec = page_rec_get_next(rec); @@ -5188,6 +5185,7 @@ btr_cur_pessimistic_delete( on a page, we have to change the parent node pointer so that it is equal to the new leftmost node pointer on the page */ + ulint level = btr_page_get_level(page, mtr); btr_node_ptr_delete(index, block, mtr); diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 1c67a5a0dfc..0a4d4d276e9 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -790,7 +790,7 @@ buf_page_is_checksum_valid_none( && srv_checksum_algorithm == SRV_CHECKSUM_ALGORITHM_STRICT_NONE) { fprintf(log_file, "page::%llu; none checksum: calculated" - " = " ULINTPF "; recorded checksum_field1 = " ULINTPF + " = %lu; recorded checksum_field1 = " ULINTPF " recorded checksum_field2 = " ULINTPF "\n", cur_page_num, BUF_NO_CHECKSUM_MAGIC, checksum_field1, checksum_field2); diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc index 3c2b697b0f4..f7ea768f5c1 100644 --- a/storage/innobase/buf/buf0rea.cc +++ b/storage/innobase/buf/buf0rea.cc @@ -292,8 +292,8 @@ buf_read_ahead_random( node != NULL; node = UT_LIST_GET_NEXT(chain, node)) { - size += os_file_get_size(node->handle) - / page_size.physical(); + size += ulint(os_file_get_size(node->handle) + / page_size.physical()); } ut_ad(size == space->size); diff --git a/storage/innobase/dict/dict0boot.cc b/storage/innobase/dict/dict0boot.cc index 710ef122882..50a55172c59 100644 --- a/storage/innobase/dict/dict0boot.cc +++ b/storage/innobase/dict/dict0boot.cc @@ -79,7 +79,9 @@ dict_hdr_get_new_id( mtr_start(&mtr); if (table) { - dict_disable_redo_if_temporary(table, &mtr); + if (table->is_temporary()) { + mtr.set_log_mode(MTR_LOG_NO_REDO); + } } else if (disable_redo) { /* In non-read-only mode we need to ensure that space-id header page is written to disk else if page is removed from buffer @@ -87,8 +89,8 @@ dict_hdr_get_new_id( to another tablespace. This is not a case with read-only mode as there is no new object that is created except temporary tablespace. */ - mtr_set_log_mode(&mtr, - (srv_read_only_mode ? MTR_LOG_NONE : MTR_LOG_NO_REDO)); + mtr.set_log_mode(srv_read_only_mode + ? MTR_LOG_NONE : MTR_LOG_NO_REDO); } /* Server started and let's say space-id = x diff --git a/storage/innobase/dict/dict0crea.cc b/storage/innobase/dict/dict0crea.cc index 6429408c80d..16d57bb67f5 100644 --- a/storage/innobase/dict/dict0crea.cc +++ b/storage/innobase/dict/dict0crea.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -2550,7 +2551,6 @@ replacing what was there previously. @param[in] flags Tablespace flags @param[in] path Tablespace path @param[in] trx Transaction -@param[in] commit If true, commit the transaction @return error code or DB_SUCCESS */ dberr_t dict_replace_tablespace_in_dictionary( @@ -2558,8 +2558,7 @@ dict_replace_tablespace_in_dictionary( const char* name, ulint flags, const char* path, - trx_t* trx, - bool commit) + trx_t* trx) { if (!srv_sys_tablespaces_open) { /* Startup procedure is not yet ready for updates. */ @@ -2608,11 +2607,6 @@ dict_replace_tablespace_in_dictionary( return(error); } - if (commit) { - trx->op_info = "committing tablespace and datafile definition"; - trx_commit(trx); - } - trx->op_info = ""; return(error); diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc index a36aabd1810..7627fdac0d5 100644 --- a/storage/innobase/dict/dict0dict.cc +++ b/storage/innobase/dict/dict0dict.cc @@ -1274,31 +1274,6 @@ dict_table_add_system_columns( #endif } -/** Mark if table has big rows. -@param[in,out] table table handler */ -void -dict_table_set_big_rows( - dict_table_t* table) -{ - ulint row_len = 0; - for (ulint i = 0; i < table->n_def; i++) { - ulint col_len = dict_col_get_max_size( - dict_table_get_nth_col(table, i)); - - row_len += col_len; - - /* If we have a single unbounded field, or several gigantic - fields, mark the maximum row size as BIG_ROW_SIZE. */ - if (row_len >= BIG_ROW_SIZE || col_len >= BIG_ROW_SIZE) { - row_len = BIG_ROW_SIZE; - - break; - } - } - - table->big_rows = (row_len >= BIG_ROW_SIZE) ? TRUE : FALSE; -} - /**********************************************************************//** Adds a table object to the dictionary cache. */ void @@ -1321,8 +1296,6 @@ dict_table_add_to_cache( fold = ut_fold_string(table->name.m_name); id_fold = ut_fold_ull(table->id); - dict_table_set_big_rows(table); - /* Look for a table with the same name: error if such exists */ { dict_table_t* table2; diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc index 15a7693eec7..532d7ace740 100644 --- a/storage/innobase/dict/dict0load.cc +++ b/storage/innobase/dict/dict0load.cc @@ -990,7 +990,7 @@ dict_replace_tablespace_and_filepath( SYS_DATAFILES. Assume the record is also missing in SYS_TABLESPACES. Insert records into them both. */ err = dict_replace_tablespace_in_dictionary( - space_id, name, fsp_flags, filepath, trx, false); + space_id, name, fsp_flags, filepath, trx); trx_commit_for_mysql(trx); trx->dict_operation_lock_mode = 0; diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc index 17e66b3d99c..99fb115ea2c 100644 --- a/storage/innobase/dict/dict0stats.cc +++ b/storage/innobase/dict/dict0stats.cc @@ -887,7 +887,6 @@ dict_stats_update_transient_for_index( ulint size; mtr_start(&mtr); - dict_disable_redo_if_temporary(index->table, &mtr); mtr_s_lock(dict_index_get_lock(index), &mtr); diff --git a/storage/innobase/fil/fil0crypt.cc b/storage/innobase/fil/fil0crypt.cc index 5e7e5a2f21b..08a832a4cd5 100644 --- a/storage/innobase/fil/fil0crypt.cc +++ b/storage/innobase/fil/fil0crypt.cc @@ -1324,7 +1324,7 @@ fil_crypt_realloc_iops( if (10 * state->cnt_waited > state->batch) { /* if we waited more than 10% re-estimate max_iops */ ulint avg_wait_time_us = - state->sum_waited_us / state->cnt_waited; + ulint(state->sum_waited_us / state->cnt_waited); if (avg_wait_time_us == 0) { avg_wait_time_us = 1; // prevent division by zero @@ -1669,7 +1669,7 @@ fil_crypt_get_page_throttle_func( /* average page load */ ulint add_sleeptime_ms = 0; - ulint avg_wait_time_us = state->sum_waited_us / state->cnt_waited; + ulint avg_wait_time_us =ulint(state->sum_waited_us / state->cnt_waited); ulint alloc_wait_us = 1000000 / state->allocated_iops; if (avg_wait_time_us < alloc_wait_us) { diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index f34619663ad..2897d5f9be8 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -1062,32 +1062,23 @@ fil_space_extend_must_retry( const ulint page_size = pageSize.physical(); #ifdef _WIN32 - /* Logically or physically extend the file with zero bytes, - depending on whether it is sparse. */ - - /* FIXME: Call DeviceIoControl(node->handle, FSCTL_SET_SPARSE, ...) - when opening a file when FSP_FLAGS_HAS_PAGE_COMPRESSION(). */ - { - FILE_END_OF_FILE_INFO feof; - /* fil_read_first_page() expects UNIV_PAGE_SIZE bytes. - fil_node_open_file() expects at least 4 * UNIV_PAGE_SIZE bytes. - Do not shrink short ROW_FORMAT=COMPRESSED files. */ - feof.EndOfFile.QuadPart = std::max( + os_offset_t new_file_size = + std::max( os_offset_t(size - file_start_page_no) * page_size, - os_offset_t(FIL_IBD_FILE_INITIAL_SIZE - * UNIV_PAGE_SIZE)); - *success = SetFileInformationByHandle(node->handle, - FileEndOfFileInfo, - &feof, sizeof feof); - if (!*success) { - ib::error() << "extending file '" << node->name - << "' from " - << os_offset_t(node->size) * page_size - << " to " << feof.EndOfFile.QuadPart - << " bytes failed with " << GetLastError(); - } else { - last_page_no = size; - } + os_offset_t(FIL_IBD_FILE_INITIAL_SIZE * UNIV_PAGE_SIZE)); + + /* os_file_change_size_win32() handles both compressed(sparse) + and normal files correctly. + It allocates physical storage for normal files and "virtual" + storage for sparse ones.*/ + *success = os_file_change_size_win32(node->name, + node->handle, new_file_size); + + if (*success) { + last_page_no = size; + } else { + ib::error() << "extending file '" << node->name + << " to size " << new_file_size << " failed"; } #else /* We will logically extend the file with ftruncate() if @@ -1232,7 +1223,8 @@ fil_space_extend_must_retry( default: ut_ad(space->purpose == FIL_TYPE_TABLESPACE || space->purpose == FIL_TYPE_IMPORT); - if (space->purpose == FIL_TYPE_TABLESPACE) { + if (space->purpose == FIL_TYPE_TABLESPACE + && !space->is_being_truncated) { fil_flush_low(space); } return(false); @@ -3843,7 +3835,19 @@ fil_ibd_create( return(DB_ERROR); } - success= false; + bool punch_hole = false; + +#ifdef _WIN32 + + if (FSP_FLAGS_HAS_PAGE_COMPRESSION(flags)) { + punch_hole = os_file_set_sparse_win32(file); + } + + success = os_file_change_size_win32(path, file, size * UNIV_PAGE_SIZE); + +#else + + success= false; #ifdef HAVE_POSIX_FALLOCATE /* Extend the file using posix_fallocate(). This is required by @@ -3882,7 +3886,7 @@ fil_ibd_create( be lost after this call, if it succeeds. In this case the file should be full of NULs. */ - bool punch_hole = os_is_sparse_file_supported(path, file); + punch_hole = os_is_sparse_file_supported(file); if (punch_hole) { @@ -3894,6 +3898,7 @@ fil_ibd_create( punch_hole = false; } } +#endif ulint block_size = os_file_get_block_size(file, path); diff --git a/storage/innobase/fil/fil0pagecompress.cc b/storage/innobase/fil/fil0pagecompress.cc index f2a7b7bd291..2dc9abd2fca 100644 --- a/storage/innobase/fil/fil0pagecompress.cc +++ b/storage/innobase/fil/fil0pagecompress.cc @@ -100,6 +100,9 @@ fil_compress_page( int comp_level = int(level); ulint header_len = FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE; ulint write_size = 0; +#if HAVE_LZO + lzo_uint write_size_lzo = write_size; +#endif /* Cache to avoid change during function execution */ ulint comp_method = innodb_compression_algorithm; bool allocated = false; @@ -182,7 +185,9 @@ fil_compress_page( #ifdef HAVE_LZO case PAGE_LZO_ALGORITHM: err = lzo1x_1_15_compress( - buf, len, out_buf+header_len, &write_size, out_buf+UNIV_PAGE_SIZE); + buf, len, out_buf+header_len, &write_size_lzo, out_buf+UNIV_PAGE_SIZE); + + write_size = write_size_lzo; if (err != LZO_E_OK || write_size > UNIV_PAGE_SIZE-header_len) { goto err_exit; @@ -516,8 +521,11 @@ fil_decompress_page( #ifdef HAVE_LZO case PAGE_LZO_ALGORITHM: { ulint olen = 0; + lzo_uint olen_lzo = olen; err = lzo1x_decompress((const unsigned char *)buf+header_len, - actual_size,(unsigned char *)in_buf, &olen, NULL); + actual_size,(unsigned char *)in_buf, &olen_lzo, NULL); + + olen = olen_lzo; if (err != LZO_E_OK || (olen == 0 || olen > UNIV_PAGE_SIZE)) { len = olen; diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc index dd8de511b21..09012ad4101 100644 --- a/storage/innobase/fts/fts0fts.cc +++ b/storage/innobase/fts/fts0fts.cc @@ -1784,7 +1784,7 @@ fts_create_one_common_table( FTS_CONFIG_TABLE_VALUE_COL_LEN); } - error = row_create_table_for_mysql(new_table, trx, false, + error = row_create_table_for_mysql(new_table, trx, FIL_ENCRYPTION_DEFAULT, FIL_DEFAULT_ENCRYPTION_KEY); if (error == DB_SUCCESS) { @@ -2001,7 +2001,7 @@ fts_create_one_index_table( (DATA_MTYPE_MAX << 16) | DATA_UNSIGNED | DATA_NOT_NULL, FTS_INDEX_ILIST_LEN); - error = row_create_table_for_mysql(new_table, trx, false, + error = row_create_table_for_mysql(new_table, trx, FIL_ENCRYPTION_DEFAULT, FIL_DEFAULT_ENCRYPTION_KEY); if (error == DB_SUCCESS) { diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index cfe8038c7cb..1cb10cfb557 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -49,9 +49,6 @@ this program; if not, write to the Free Software Foundation, Inc., #include <sql_class.h> #include <sql_show.h> #include <sql_table.h> -#include <sql_tablespace.h> -// MySQL 5.7 Header */ -// #include <sql_thd_internal_api.h> #include <table_cache.h> #include <my_check_opt.h> #include <my_bitmap.h> @@ -146,12 +143,6 @@ TABLE *get_purge_table(THD *thd); #include <string> #include <sstream> -/* for ha_innopart, Native InnoDB Partitioning. */ -/* JAN: TODO: MySQL 5.7 Native InnoDB Partitioning */ -#ifdef HAVE_HA_INNOPART_H -#include "ha_innopart.h" -#endif - #include <mysql/plugin.h> #include <mysql/service_wsrep.h> @@ -1303,16 +1294,6 @@ innobase_release_savepoint( static void innobase_checkpoint_request(handlerton *hton, void *cookie); -/************************************************************************//** -Function for constructing an InnoDB table handler instance. */ -static -handler* -innobase_create_handler( -/*====================*/ - handlerton* hton, /*!< in/out: handlerton for InnoDB */ - TABLE_SHARE* table, - MEM_ROOT* mem_root); - /** @brief Initialize the default value of innodb_commit_concurrency. Once InnoDB is running, the innodb_commit_concurrency must not change @@ -1560,26 +1541,6 @@ innobase_create_handler( TABLE_SHARE* table, MEM_ROOT* mem_root) { -#ifdef MYSQL_INNODB_PARTITIONING - /* If the table: - 1) have type InnoDB (not the generic partition handlerton) - 2) have partitioning defined - Then return the native partitioning handler ha_innopart - else return normal ha_innobase handler. */ - if (table - && table->db_type() == innodb_hton_ptr // 1) - && table->partition_info_str // 2) - && table->partition_info_str_len) { // 2) - ha_innopart* file = new (mem_root) ha_innopart(hton, table); - if (file && file->init_partitioning(mem_root)) - { - delete file; - return(NULL); - } - return(file); - } -#endif - return(new (mem_root) ha_innobase(hton, table)); } @@ -2105,7 +2066,7 @@ convert_error_code_to_mysql( bool comp = !!(flags & DICT_TF_COMPACT); ulint free_space = page_get_free_space_of_empty(comp) / 2; - if (free_space >= (comp ? COMPRESSED_REC_MAX_DATA_SIZE : + if (free_space >= ulint(comp ? COMPRESSED_REC_MAX_DATA_SIZE : REDUNDANT_REC_MAX_DATA_SIZE)) { free_space = (comp ? COMPRESSED_REC_MAX_DATA_SIZE : REDUNDANT_REC_MAX_DATA_SIZE) - 1; @@ -2859,48 +2820,6 @@ check_trx_exists( return(trx); } -#ifdef MYSQL_REPLACE_TRX_IN_THD -/** InnoDB transaction object that is currently associated with THD is -replaced with that of the 2nd argument. The previous value is -returned through the 3rd argument's buffer, unless it's NULL. When -the buffer is not provided (value NULL) that should mean the caller -restores previously saved association so the current trx has to be -additionally freed from all association with MYSQL. - -@param[in,out] thd MySQL thread handle -@param[in] new_trx_arg replacement trx_t -@param[in,out] ptr_trx_arg pointer to a buffer to store old trx_t */ -static -void -innodb_replace_trx_in_thd( - THD* thd, - void* new_trx_arg, - void** ptr_trx_arg) -{ - trx_t*& trx = thd_to_trx(thd); - - ut_ad(new_trx_arg == NULL - || (((trx_t*) new_trx_arg)->mysql_thd == thd - && !((trx_t*) new_trx_arg)->is_recovered)); - - if (ptr_trx_arg) { - *ptr_trx_arg = trx; - - ut_ad(trx == NULL - || (trx->mysql_thd == thd && !trx->is_recovered)); - - } else if (trx->state == TRX_STATE_NOT_STARTED) { - ut_ad(thd == trx->mysql_thd); - trx_free_for_mysql(trx); - } else { - ut_ad(thd == trx->mysql_thd); - ut_ad(trx_state_eq(trx, TRX_STATE_PREPARED)); - trx_disconnect_prepared(trx); - } - trx = static_cast<trx_t*>(new_trx_arg); -} -#endif /* MYSQL_REPLACE_TRX_IN_THD */ - /************************************************************************* Gets current trx. */ trx_t* @@ -3759,11 +3678,6 @@ innobase_init( innobase_hton->flags = HTON_SUPPORTS_EXTENDED_KEYS | HTON_SUPPORTS_FOREIGN_KEYS; -#ifdef MYSQL_REPLACE_TRX_IN_THD - innobase_hton->replace_native_transaction_in_thd = - innodb_replace_trx_in_thd; -#endif - #ifdef WITH_WSREP innobase_hton->abort_transaction=wsrep_abort_transaction; innobase_hton->set_checkpoint=innobase_wsrep_set_checkpoint; @@ -4394,7 +4308,7 @@ innobase_commit_low( } trx->will_lock = 0; #ifdef WITH_WSREP - if (wsrep_on(thd)) { thd_proc_info(thd, tmp); } + if (thd && wsrep_on(thd)) { thd_proc_info(thd, tmp); } #endif /* WITH_WSREP */ } @@ -5285,19 +5199,6 @@ ha_innobase::table_flags() const THD* thd = ha_thd(); handler::Table_flags flags = m_int_table_flags; - /* If querying the table flags when no table_share is given, - then we must check if the table to be created/checked is partitioned. - */ - if (table_share == NULL) { - /* JAN: TODO: MySQL 5.7 Partitioning && thd_get_work_part_info(thd) != NULL) { */ - /* Currently ha_innopart does not support - all InnoDB features such as GEOMETRY, FULLTEXT etc. */ - /* JAN: TODO: MySQL 5.7 - flags &= ~(HA_INNOPART_DISABLED_TABLE_FLAGS); - } - */ - } - /* Need to use tx_isolation here since table flags is (also) called before prebuilt is inited. */ @@ -11621,9 +11522,9 @@ err_col: } else { if (err == DB_SUCCESS) { err = row_create_table_for_mysql( - table, m_trx, false, + table, m_trx, (fil_encryption_t)options->encryption, - options->encryption_key_id); + (uint32_t)options->encryption_key_id); } DBUG_EXECUTE_IF("ib_crash_during_create_for_encryption", @@ -15187,7 +15088,7 @@ ha_innobase::update_table_comment( { uint length = (uint) strlen(comment); char* str=0; - long flen; + size_t flen; std::string fk_str; /* We do not know if MySQL can call this function before calling @@ -15215,9 +15116,7 @@ ha_innobase::update_table_comment( flen = fk_str.length(); - if (flen < 0) { - flen = 0; - } else if (length + flen + 3 > 64000) { + if (length + flen + 3 > 64000) { flen = 64000 - 3 - length; } /* allocate buffer for the full string */ @@ -15306,7 +15205,7 @@ get_foreign_key_info( ptr = dict_remove_db_name(foreign->id); f_key_info.foreign_id = thd_make_lex_string( - thd, 0, ptr, (uint) strlen(ptr), 1); + thd, 0, ptr, strlen(ptr), 1); /* Name format: database name, '/', table name, '\0' */ @@ -15318,13 +15217,13 @@ get_foreign_key_info( len = filename_to_tablename(tmp_buff, name_buff, sizeof(name_buff)); f_key_info.referenced_db = thd_make_lex_string( - thd, 0, name_buff, static_cast<unsigned int>(len), 1); + thd, 0, name_buff, len, 1); /* Referenced (parent) table name */ ptr = dict_remove_db_name(foreign->referenced_table_name); len = filename_to_tablename(ptr, name_buff, sizeof(name_buff)); f_key_info.referenced_table = thd_make_lex_string( - thd, 0, name_buff, static_cast<unsigned int>(len), 1); + thd, 0, name_buff, len, 1); /* Dependent (child) database name */ len = dict_get_db_name_len(foreign->foreign_table_name); @@ -15334,22 +15233,22 @@ get_foreign_key_info( len = filename_to_tablename(tmp_buff, name_buff, sizeof(name_buff)); f_key_info.foreign_db = thd_make_lex_string( - thd, 0, name_buff, static_cast<unsigned int>(len), 1); + thd, 0, name_buff, len, 1); /* Dependent (child) table name */ ptr = dict_remove_db_name(foreign->foreign_table_name); len = filename_to_tablename(ptr, name_buff, sizeof(name_buff)); f_key_info.foreign_table = thd_make_lex_string( - thd, 0, name_buff, static_cast<unsigned int>(len), 1); + thd, 0, name_buff, len, 1); do { ptr = foreign->foreign_col_names[i]; name = thd_make_lex_string(thd, name, ptr, - (uint) strlen(ptr), 1); + strlen(ptr), 1); f_key_info.foreign_fields.push_back(name); ptr = foreign->referenced_col_names[i]; name = thd_make_lex_string(thd, name, ptr, - (uint) strlen(ptr), 1); + strlen(ptr), 1); f_key_info.referenced_fields.push_back(name); } while (++i < foreign->n_fields); @@ -15406,7 +15305,7 @@ get_foreign_key_info( thd, f_key_info.referenced_key_name, foreign->referenced_index->name, - (uint) strlen(foreign->referenced_index->name), + strlen(foreign->referenced_index->name), 1); } else { referenced_key_name = NULL; @@ -21467,8 +21366,8 @@ innobase_find_mysql_table_for_vc( char dbname[MAX_DATABASE_NAME_LEN + 1]; char tbname[MAX_TABLE_NAME_LEN + 1]; char* name = table->name.m_name; - uint dbnamelen = dict_get_db_name_len(name); - uint tbnamelen = strlen(name) - dbnamelen - 1; + uint dbnamelen = (uint) dict_get_db_name_len(name); + uint tbnamelen = (uint) strlen(name) - dbnamelen - 1; char t_dbname[MAX_DATABASE_NAME_LEN + 1]; char t_tbname[MAX_TABLE_NAME_LEN + 1]; @@ -21483,7 +21382,6 @@ innobase_find_mysql_table_for_vc( if (is_part != NULL) { *is_part = '\0'; - tbnamelen = is_part - tbname; } dbnamelen = filename_to_tablename(dbname, t_dbname, @@ -21993,7 +21891,7 @@ innobase_convert_to_filename_charset( return(static_cast<uint>(strconvert( cs_from, from, strlen(from), - cs_to, to, static_cast<size_t>(len), &errors))); + cs_to, to, static_cast<uint>(len), &errors))); } /********************************************************************** @@ -22012,7 +21910,7 @@ innobase_convert_to_system_charset( return(static_cast<uint>(strconvert( cs1, from, strlen(from), - cs2, to, static_cast<size_t>(len), errors))); + cs2, to, static_cast<uint>(len), errors))); } /********************************************************************** diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index bd241f6db82..f4edd6131cf 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -63,11 +63,6 @@ static const char *MSG_UNSUPPORTED_ALTER_ONLINE_ON_VIRTUAL_COLUMN= "INPLACE ADD or DROP of virtual columns cannot be " "combined with other ALTER TABLE actions"; -/* For supporting Native InnoDB Partitioning. */ -/* JAN: TODO: MySQL 5.7 -#include "partition_info.h" -#include "ha_innopart.h" -*/ /** Operations for creating secondary indexes (no rebuild needed) */ static const Alter_inplace_info::HA_ALTER_FLAGS INNOBASE_ONLINE_CREATE = Alter_inplace_info::ADD_INDEX @@ -326,7 +321,7 @@ my_error_innodb( bool comp = !!(flags & DICT_TF_COMPACT); ulint free_space = page_get_free_space_of_empty(comp) / 2; - if (free_space >= (comp ? COMPRESSED_REC_MAX_DATA_SIZE : + if (free_space >= ulint(comp ? COMPRESSED_REC_MAX_DATA_SIZE : REDUNDANT_REC_MAX_DATA_SIZE)) { free_space = (comp ? COMPRESSED_REC_MAX_DATA_SIZE : REDUNDANT_REC_MAX_DATA_SIZE) - 1; @@ -414,12 +409,10 @@ innobase_need_rebuild( == Alter_inplace_info::CHANGE_CREATE_OPTION && !(ha_alter_info->create_info->used_fields & (HA_CREATE_USED_ROW_FORMAT - | HA_CREATE_USED_KEY_BLOCK_SIZE))) { - // JAN: TODO: MySQL 5.7 - // | HA_CREATE_USED_TABLESPACE))) { + | HA_CREATE_USED_KEY_BLOCK_SIZE))) { /* Any other CHANGE_CREATE_OPTION than changing - ROW_FORMAT, KEY_BLOCK_SIZE or TABLESPACE can be done - without rebuilding the table. */ + ROW_FORMAT or KEY_BLOCK_SIZE can be done without + rebuilding the table. */ return(false); } @@ -4691,7 +4684,7 @@ prepare_inplace_alter_table_dict( } error = row_create_table_for_mysql( - ctx->new_table, ctx->trx, false, mode, key_id); + ctx->new_table, ctx->trx, mode, key_id); switch (error) { dict_table_t* temp_table; @@ -9061,373 +9054,6 @@ foreign_fail: DBUG_RETURN(false); } - -/** Helper class for in-place alter, see handler.h */ -class ha_innopart_inplace_ctx : public inplace_alter_handler_ctx -{ -/* Only used locally in this file, so have everything public for -conveniance. */ -public: - /** Total number of partitions. */ - uint m_tot_parts; - /** Array of inplace contexts for all partitions. */ - inplace_alter_handler_ctx** ctx_array; - /** Array of prebuilt for all partitions. */ - row_prebuilt_t** prebuilt_array; - - ha_innopart_inplace_ctx(THD *thd, uint tot_parts) - : inplace_alter_handler_ctx(), - m_tot_parts(tot_parts), - ctx_array(), - prebuilt_array() - {} - - ~ha_innopart_inplace_ctx() - { - if (ctx_array) { - for (uint i = 0; i < m_tot_parts; i++) { - delete ctx_array[i]; - } - ut_free(ctx_array); - } - if (prebuilt_array) { - /* First entry is the original prebuilt! */ - for (uint i = 1; i < m_tot_parts; i++) { - /* Don't close the tables. */ - prebuilt_array[i]->table = NULL; - row_prebuilt_free(prebuilt_array[i], false); - } - ut_free(prebuilt_array); - } - } -}; - -#ifdef MYSQL_INNODB_PARTITIONING - -/** Check if supported inplace alter table. -@param[in] altered_table Altered MySQL table. -@param[in] ha_alter_info Information about inplace operations to do. -@return Lock level, not supported or error */ -enum_alter_inplace_result -ha_innopart::check_if_supported_inplace_alter( - TABLE* altered_table, - Alter_inplace_info* ha_alter_info) -{ - DBUG_ENTER("ha_innopart::check_if_supported_inplace_alter"); - DBUG_ASSERT(ha_alter_info->handler_ctx == NULL); - - /* Not supporting these for partitioned tables yet! */ - - /* FK not yet supported. */ - if (ha_alter_info->handler_flags - & (Alter_inplace_info::ADD_FOREIGN_KEY - | Alter_inplace_info::DROP_FOREIGN_KEY)) { - - ha_alter_info->unsupported_reason = innobase_get_err_msg( - ER_FOREIGN_KEY_ON_PARTITIONED); - DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); - } - /* FTS not yet supported either. */ - if ((ha_alter_info->handler_flags - & Alter_inplace_info::ADD_INDEX)) { - - for (uint i = 0; i < ha_alter_info->index_add_count; i++) { - const KEY* key = - &ha_alter_info->key_info_buffer[ - ha_alter_info->index_add_buffer[i]]; - if (key->flags & HA_FULLTEXT) { - DBUG_ASSERT(!(key->flags & HA_KEYFLAG_MASK - & ~(HA_FULLTEXT - | HA_PACK_KEY - | HA_GENERATED_KEY - | HA_BINARY_PACK_KEY))); - ha_alter_info->unsupported_reason = - innobase_get_err_msg( - ER_FULLTEXT_NOT_SUPPORTED_WITH_PARTITIONING); - DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); - } - } - } - /* We cannot allow INPLACE to change order of KEY partitioning fields! */ - if ((ha_alter_info->handler_flags - & Alter_inplace_info::ALTER_STORED_COLUMN_ORDER) - && !m_part_info->same_key_column_order( - &ha_alter_info->alter_info->create_list)) { - - DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); - } - - /* Cannot allow INPLACE for drop and create PRIMARY KEY if partition is - on Primary Key - PARTITION BY KEY() */ - if ((ha_alter_info->handler_flags - & (Alter_inplace_info::ADD_PK_INDEX - | Alter_inplace_info::DROP_PK_INDEX))) { - - /* Check partition by key(). */ - if ((m_part_info->part_type == HASH_PARTITION) - && m_part_info->list_of_part_fields - && m_part_info->part_field_list.is_empty()) { - - DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); - } - - /* Check sub-partition by key(). */ - if ((m_part_info->subpart_type == HASH_PARTITION) - && m_part_info->list_of_subpart_fields - && m_part_info->subpart_field_list.is_empty()) { - - DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); - } - } - - /* Check for PK and UNIQUE should already be done when creating the - new table metadata. - (fix_partition_info/check_primary_key+check_unique_key) */ - - set_partition(0); - DBUG_RETURN(ha_innobase::check_if_supported_inplace_alter(altered_table, - ha_alter_info)); -} - -/** Prepare inplace alter table. -Allows InnoDB to update internal structures with concurrent -writes blocked (provided that check_if_supported_inplace_alter() -did not return HA_ALTER_INPLACE_NO_LOCK). -This will be invoked before inplace_alter_table(). -@param[in] altered_table TABLE object for new version of table. -@param[in] ha_alter_info Structure describing changes to be done -by ALTER TABLE and holding data used during in-place alter. -@retval true Failure. -@retval false Success. */ -bool -ha_innopart::prepare_inplace_alter_table( - TABLE* altered_table, - Alter_inplace_info* ha_alter_info) -{ - THD* thd; - ha_innopart_inplace_ctx* ctx_parts; - bool res = true; - DBUG_ENTER("ha_innopart::prepare_inplace_alter_table"); - DBUG_ASSERT(ha_alter_info->handler_ctx == NULL); - - thd = ha_thd(); - - /* Clean up all ins/upd nodes. */ - clear_ins_upd_nodes(); - /* Based on Sql_alloc class, return NULL for new on failure. */ - ctx_parts = new ha_innopart_inplace_ctx(thd, m_tot_parts); - if (!ctx_parts) { - DBUG_RETURN(HA_ALTER_ERROR); - } - - uint ctx_array_size = sizeof(inplace_alter_handler_ctx*) - * (m_tot_parts + 1); - ctx_parts->ctx_array = - static_cast<inplace_alter_handler_ctx**>( - ut_malloc(ctx_array_size, - mem_key_partitioning)); - if (!ctx_parts->ctx_array) { - DBUG_RETURN(HA_ALTER_ERROR); - } - - /* Set all to NULL, including the terminating one. */ - memset(ctx_parts->ctx_array, 0, ctx_array_size); - - ctx_parts->prebuilt_array = static_cast<row_prebuilt_t**>( - ut_malloc(sizeof(row_prebuilt_t*) - * m_tot_parts, - mem_key_partitioning)); - if (!ctx_parts->prebuilt_array) { - DBUG_RETURN(HA_ALTER_ERROR); - } - /* For the first partition use the current prebuilt. */ - ctx_parts->prebuilt_array[0] = m_prebuilt; - /* Create new prebuilt for the rest of the partitions. - It is needed for the current implementation of - ha_innobase::commit_inplace_alter_table(). */ - for (uint i = 1; i < m_tot_parts; i++) { - row_prebuilt_t* tmp_prebuilt; - tmp_prebuilt = row_create_prebuilt( - m_part_share->get_table_part(i), - table_share->reclength); - /* Use same trx as original prebuilt. */ - tmp_prebuilt->trx = m_prebuilt->trx; - ctx_parts->prebuilt_array[i] = tmp_prebuilt; - } - - const char* save_tablespace = - ha_alter_info->create_info->tablespace; - - const char* save_data_file_name = - ha_alter_info->create_info->data_file_name; - - for (uint i = 0; i < m_tot_parts; i++) { - m_prebuilt = ctx_parts->prebuilt_array[i]; - m_prebuilt_ptr = ctx_parts->prebuilt_array + i; - ha_alter_info->handler_ctx = ctx_parts->ctx_array[i]; - set_partition(i); - - /* Set the tablespace and data_file_name value of the - alter_info to the tablespace value and data_file_name - value that was existing for the partition originally, - so that for ALTER TABLE the tablespace clause in create - option is ignored for existing partitions, and later - set it back to its old value */ - - ha_alter_info->create_info->tablespace = - m_prebuilt->table->tablespace; - ha_alter_info->create_info->data_file_name = - m_prebuilt->table->data_dir_path; - - res = ha_innobase::prepare_inplace_alter_table(altered_table, - ha_alter_info); - update_partition(i); - ctx_parts->ctx_array[i] = ha_alter_info->handler_ctx; - if (res) { - break; - } - } - m_prebuilt = ctx_parts->prebuilt_array[0]; - m_prebuilt_ptr = &m_prebuilt; - ha_alter_info->handler_ctx = ctx_parts; - ha_alter_info->group_commit_ctx = ctx_parts->ctx_array; - ha_alter_info->create_info->tablespace = save_tablespace; - ha_alter_info->create_info->data_file_name = save_data_file_name; - DBUG_RETURN(res); -} - -/** Inplace alter table. -Alter the table structure in-place with operations -specified using Alter_inplace_info. -The level of concurrency allowed during this operation depends -on the return value from check_if_supported_inplace_alter(). -@param[in] altered_table TABLE object for new version of table. -@param[in] ha_alter_info Structure describing changes to be done -by ALTER TABLE and holding data used during in-place alter. -@retval true Failure. -@retval false Success. */ -bool -ha_innopart::inplace_alter_table( - TABLE* altered_table, - Alter_inplace_info* ha_alter_info) -{ - bool res = true; - ha_innopart_inplace_ctx* ctx_parts; - - ctx_parts = static_cast<ha_innopart_inplace_ctx*>( - ha_alter_info->handler_ctx); - for (uint i = 0; i < m_tot_parts; i++) { - m_prebuilt = ctx_parts->prebuilt_array[i]; - ha_alter_info->handler_ctx = ctx_parts->ctx_array[i]; - set_partition(i); - res = ha_innobase::inplace_alter_table(altered_table, - ha_alter_info); - ut_ad(ctx_parts->ctx_array[i] == ha_alter_info->handler_ctx); - ctx_parts->ctx_array[i] = ha_alter_info->handler_ctx; - if (res) { - break; - } - } - m_prebuilt = ctx_parts->prebuilt_array[0]; - ha_alter_info->handler_ctx = ctx_parts; - return(res); -} - -/** Commit or rollback inplace alter table. -Commit or rollback the changes made during -prepare_inplace_alter_table() and inplace_alter_table() inside -the storage engine. Note that the allowed level of concurrency -during this operation will be the same as for -inplace_alter_table() and thus might be higher than during -prepare_inplace_alter_table(). (E.g concurrent writes were -blocked during prepare, but might not be during commit). -@param[in] altered_table TABLE object for new version of table. -@param[in] ha_alter_info Structure describing changes to be done -by ALTER TABLE and holding data used during in-place alter. -@param[in] commit true => Commit, false => Rollback. -@retval true Failure. -@retval false Success. */ -bool -ha_innopart::commit_inplace_alter_table( - TABLE* altered_table, - Alter_inplace_info* ha_alter_info, - bool commit) -{ - bool res = false; - ha_innopart_inplace_ctx* ctx_parts; - - ctx_parts = static_cast<ha_innopart_inplace_ctx*>( - ha_alter_info->handler_ctx); - ut_ad(ctx_parts); - ut_ad(ctx_parts->prebuilt_array); - ut_ad(ctx_parts->prebuilt_array[0] == m_prebuilt); - if (commit) { - /* Commit is done through first partition (group commit). */ - ut_ad(ha_alter_info->group_commit_ctx == ctx_parts->ctx_array); - ha_alter_info->handler_ctx = ctx_parts->ctx_array[0]; - set_partition(0); - res = ha_innobase::commit_inplace_alter_table(altered_table, - ha_alter_info, - commit); - ut_ad(res || !ha_alter_info->group_commit_ctx); - goto end; - } - /* Rollback is done for each partition. */ - for (uint i = 0; i < m_tot_parts; i++) { - m_prebuilt = ctx_parts->prebuilt_array[i]; - ha_alter_info->handler_ctx = ctx_parts->ctx_array[i]; - set_partition(i); - if (ha_innobase::commit_inplace_alter_table(altered_table, - ha_alter_info, commit)) { - res = true; - } - ut_ad(ctx_parts->ctx_array[i] == ha_alter_info->handler_ctx); - ctx_parts->ctx_array[i] = ha_alter_info->handler_ctx; - } -end: - /* Move the ownership of the new tables back to - the m_part_share. */ - ha_innobase_inplace_ctx* ctx; - for (uint i = 0; i < m_tot_parts; i++) { - /* TODO: Fix to only use one prebuilt (i.e. make inplace - alter partition aware instead of using multiple prebuilt - copies... */ - ctx = static_cast<ha_innobase_inplace_ctx*>( - ctx_parts->ctx_array[i]); - if (ctx) { - m_part_share->set_table_part(i, ctx->prebuilt->table); - ctx->prebuilt->table = NULL; - ctx_parts->prebuilt_array[i] = ctx->prebuilt; - } - } - /* The above juggling of prebuilt must be reset here. */ - m_prebuilt = ctx_parts->prebuilt_array[0]; - m_prebuilt->table = m_part_share->get_table_part(0); - ha_alter_info->handler_ctx = ctx_parts; - return(res); -} - -/** Notify the storage engine that the table structure (.frm) has -been updated. - -ha_partition allows inplace operations that also upgrades the engine -if it supports partitioning natively. So if this is the case then -we will remove the .par file since it is not used with ha_innopart -(we use the internal data dictionary instead). */ -void -ha_innopart::notify_table_changed() -{ - char tmp_par_path[FN_REFLEN + 1]; - strxnmov(tmp_par_path, FN_REFLEN, table->s->normalized_path.str, - ".par", NullS); - - if (my_access(tmp_par_path, W_OK) == 0) - { - my_delete(tmp_par_path, MYF(0)); - } -} -#endif /* MYSQL_INNODB_PARTITIONING */ - /** @param thd the session @param start_value the lower bound diff --git a/storage/innobase/include/dict0crea.h b/storage/innobase/include/dict0crea.h index 76eb48d1e7e..86a4bcf23a3 100644 --- a/storage/innobase/include/dict0crea.h +++ b/storage/innobase/include/dict0crea.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -242,7 +243,6 @@ replacing what was there previously. @param[in] flags Tablespace flags @param[in] path Tablespace path @param[in] trx Transaction -@param[in] commit If true, commit the transaction @return error code or DB_SUCCESS */ dberr_t dict_replace_tablespace_in_dictionary( @@ -250,8 +250,7 @@ dict_replace_tablespace_in_dictionary( const char* name, ulint flags, const char* path, - trx_t* trx, - bool commit); + trx_t* trx); /** Delete records from SYS_TABLESPACES and SYS_DATAFILES associated with a particular tablespace ID. diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h index 57b625f317c..736419f9dd7 100644 --- a/storage/innobase/include/dict0dict.h +++ b/storage/innobase/include/dict0dict.h @@ -386,13 +386,6 @@ dict_table_add_system_columns( dict_table_t* table, /*!< in/out: table */ mem_heap_t* heap) /*!< in: temporary heap */ MY_ATTRIBUTE((nonnull)); - -/** Mark if table has big rows. -@param[in,out] table table handler */ -void -dict_table_set_big_rows( - dict_table_t* table) - MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Adds a table object to the dictionary cache. */ void @@ -1943,24 +1936,7 @@ dict_table_is_discarded( const dict_table_t* table) /*!< in: table to check */ MY_ATTRIBUTE((warn_unused_result)); -/********************************************************************//** -Check if it is a temporary table. -@return true if temporary table flag is set. */ -UNIV_INLINE -bool -dict_table_is_temporary( -/*====================*/ - const dict_table_t* table) /*!< in: table to check */ - MY_ATTRIBUTE((warn_unused_result)); - -/********************************************************************//** -Turn-off redo-logging if temporary table. */ -UNIV_INLINE -void -dict_disable_redo_if_temporary( -/*===========================*/ - const dict_table_t* table, /*!< in: table to check */ - mtr_t* mtr); /*!< out: mini-transaction */ +#define dict_table_is_temporary(table) (table)->is_temporary() /*********************************************************************//** This function should be called whenever a page is successfully diff --git a/storage/innobase/include/dict0dict.ic b/storage/innobase/include/dict0dict.ic index f04f8faa686..134a4d63066 100644 --- a/storage/innobase/include/dict0dict.ic +++ b/storage/innobase/include/dict0dict.ic @@ -1495,32 +1495,6 @@ dict_table_is_discarded( return(DICT_TF2_FLAG_IS_SET(table, DICT_TF2_DISCARDED)); } -/********************************************************************//** -Check if it is a temporary table. -@return true if temporary table flag is set. */ -UNIV_INLINE -bool -dict_table_is_temporary( -/*====================*/ - const dict_table_t* table) /*!< in: table to check */ -{ - return(DICT_TF2_FLAG_IS_SET(table, DICT_TF2_TEMPORARY)); -} - -/********************************************************************//** -Turn-off redo-logging if temporary table. */ -UNIV_INLINE -void -dict_disable_redo_if_temporary( -/*===========================*/ - const dict_table_t* table, /*!< in: table to check */ - mtr_t* mtr) /*!< out: mini-transaction */ -{ - if (dict_table_is_temporary(table)) { - mtr_set_log_mode(mtr, MTR_LOG_NO_REDO); - } -} - /** Check if the table is found is a file_per_table tablespace. This test does not use table flags2 since some REDUNDANT tables in the system tablespace may have garbage in the MIX_LEN field where flags2 is diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index 034729e1595..5c285ef215d 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -1315,6 +1315,12 @@ struct dict_table_t { { return !(~flags & DICT_TF_MASK_NO_ROLLBACK); } + /** @return whether this is a temporary table */ + bool is_temporary() const + { + return flags2 & DICT_TF2_TEMPORARY; + } + /** @return whether this table is readable @retval true normally @retval false if this is a single-table tablespace @@ -1497,10 +1503,6 @@ struct dict_table_t { /*!< set of foreign key constraints which refer to this table */ dict_foreign_set referenced_set; - /** TRUE if the maximum length of a single row exceeds BIG_ROW_SIZE. - Initialized in dict_table_add_to_cache(). */ - unsigned big_rows:1; - /** Statistics for query optimization. @{ */ /** Creation state of 'stats_latch'. */ diff --git a/storage/innobase/include/fsp0pagecompress.ic b/storage/innobase/include/fsp0pagecompress.ic index f4b95162b2a..d1f2ea45fbd 100644 --- a/storage/innobase/include/fsp0pagecompress.ic +++ b/storage/innobase/include/fsp0pagecompress.ic @@ -82,7 +82,7 @@ UNIV_INLINE const char* fil_get_compression_alg_name( /*=========================*/ - ulint comp_alg) /*!<in: compression algorithm number */ + ib_uint64_t comp_alg) /*!<in: compression algorithm number */ { switch(comp_alg) { case PAGE_UNCOMPRESSED: diff --git a/storage/innobase/include/ha_prototypes.h b/storage/innobase/include/ha_prototypes.h index 459304fc712..5b97b4b3a88 100644 --- a/storage/innobase/include/ha_prototypes.h +++ b/storage/innobase/include/ha_prototypes.h @@ -39,10 +39,8 @@ class THD; // JAN: TODO missing features: #undef MYSQL_FT_INIT_EXT -#undef MYSQL_INNODB_PARTITIONING #undef MYSQL_PFS #undef MYSQL_RENAME_INDEX -#undef MYSQL_REPLACE_TRX_IN_THD #undef MYSQL_STORE_FTS_DOC_ID /*******************************************************************//** diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h index b3de1bf27f2..60b07f2fe72 100644 --- a/storage/innobase/include/lock0lock.h +++ b/storage/innobase/include/lock0lock.h @@ -190,7 +190,7 @@ lock_update_merge_left( const buf_block_t* right_block); /*!< in: merged index page which will be discarded */ /*************************************************************//** -Updates the lock table when a page is splited and merged to +Updates the lock table when a page is split and merged to two pages. */ UNIV_INTERN void @@ -1073,16 +1073,9 @@ std::string lock_get_info( const lock_t*); -/*************************************************************//** -Updates the lock table when a page is split and merged to -two pages. */ -UNIV_INTERN -void -lock_update_split_and_merge( - const buf_block_t* left_block, /*!< in: left page to which merged */ - const rec_t* orig_pred, /*!< in: original predecessor of - supremum on the left page before merge*/ - const buf_block_t* right_block);/*!< in: right page from which merged */ +/*******************************************************************//** +@return whether wsrep_on is true on trx->mysql_thd*/ +#define wsrep_on_trx(trx) ((trx)->mysql_thd && wsrep_on((trx)->mysql_thd)) #endif /* WITH_WSREP */ diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h index c06a61ef1be..4cd8d79bb41 100644 --- a/storage/innobase/include/os0file.h +++ b/storage/innobase/include/os0file.h @@ -636,6 +636,9 @@ os_file_create_simple_no_error_handling_func( bool* success) MY_ATTRIBUTE((warn_unused_result)); +#ifdef _WIN32 +#define os_file_set_nocache(fd, file_name, operation_name) do{}while(0) +#else /** Tries to disable OS caching on an opened file descriptor. @param[in] fd file descriptor to alter @param[in] file_name file name, used in the diagnostic message @@ -644,9 +647,10 @@ os_file_create_simple_no_error_handling_func( void os_file_set_nocache( /*================*/ - os_file_t fd, /*!< in: file descriptor to alter */ + int fd, /*!< in: file descriptor to alter */ const char* file_name, const char* operation_name); +#endif /** NOTE! Use the corresponding macro os_file_create(), not directly this function! @@ -1563,20 +1567,48 @@ innobase_mysql_tmpfile( void os_file_set_umask(ulint umask); +#ifdef _WIN32 + +/** +Make file sparse, on Windows. + +@param[in] file file handle +@return true on success, false on error */ +bool os_file_set_sparse_win32(os_file_t file); + +/** +Changes file size on Windows + +If file is extended, following happens the bytes between +old and new EOF are zeros. + +If file is sparse, "virtual" block is added at the end of +allocated area. + +If file is normal, file system allocates storage. + +@param[in] pathname file path +@param[in] file file handle +@param[in] size size to preserve in bytes +@return true if success */ +bool +os_file_change_size_win32( + const char* pathname, + os_file_t file, + os_offset_t size); + +#endif /*_WIN32 */ + /** Check if the file system supports sparse files. Warning: On POSIX systems we try and punch a hole from offset 0 to the system configured page size. This should only be called on an empty file. -Note: On Windows we use the name and on Unices we use the file handle. - -@param[in] name File name @param[in] fh File handle for the file - if opened @return true if the file system supports sparse files */ bool os_is_sparse_file_supported( - const char* path, os_file_t fh) MY_ATTRIBUTE((warn_unused_result)); diff --git a/storage/innobase/include/row0mysql.h b/storage/innobase/include/row0mysql.h index 2b000603929..8d3752974a6 100644 --- a/storage/innobase/include/row0mysql.h +++ b/storage/innobase/include/row0mysql.h @@ -352,7 +352,6 @@ row_create_table_for_mysql( (will be freed, or on DB_SUCCESS added to the data dictionary cache) */ trx_t* trx, /*!< in/out: transaction */ - bool commit, /*!< in: if true, commit the transaction */ fil_encryption_t mode, /*!< in: encryption mode */ uint32_t key_id) /*!< in: encryption key_id */ MY_ATTRIBUTE((warn_unused_result)); diff --git a/storage/innobase/include/row0upd.h b/storage/innobase/include/row0upd.h index 359d243a0cd..ec7995dd096 100644 --- a/storage/innobase/include/row0upd.h +++ b/storage/innobase/include/row0upd.h @@ -87,8 +87,7 @@ upd_field_set_field_no( upd_field_t* upd_field, /*!< in: update vector field */ ulint field_no, /*!< in: field number in a clustered index */ - dict_index_t* index, /*!< in: index */ - trx_t* trx); /*!< in: transaction */ + dict_index_t* index); /** set field number to a update vector field, marks this field is updated @param[in,out] upd_field update vector field diff --git a/storage/innobase/include/row0upd.ic b/storage/innobase/include/row0upd.ic index 18c72309930..11271d6e9af 100644 --- a/storage/innobase/include/row0upd.ic +++ b/storage/innobase/include/row0upd.ic @@ -95,22 +95,10 @@ upd_field_set_field_no( upd_field_t* upd_field, /*!< in: update vector field */ ulint field_no, /*!< in: field number in a clustered index */ - dict_index_t* index, /*!< in: index */ - trx_t* trx) /*!< in: transaction */ + dict_index_t* index) /*!< in: index */ { upd_field->field_no = unsigned(field_no); upd_field->orig_len = 0; - - if (UNIV_UNLIKELY(field_no >= dict_index_get_n_fields(index))) { - ib::error() - << " trying to access field " << field_no - << " in " << index->name - << " of table " << index->table->name - << " which contains only " << index->n_fields - << " fields"; - ut_ad(0); - } - dict_col_copy_type(dict_index_get_nth_col(index, field_no), dfield_get_type(&upd_field->new_val)); } diff --git a/storage/innobase/include/sync0types.h b/storage/innobase/include/sync0types.h index e79124fa983..194fa50e2fe 100644 --- a/storage/innobase/include/sync0types.h +++ b/storage/innobase/include/sync0types.h @@ -1188,10 +1188,16 @@ struct MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) simple_counter { compile_time_assert(!atomic || sizeof(Type) == sizeof(lint)); if (atomic) { - /* Silence MSVS warnings when instantiating - this template with atomic=false. */ +#ifdef _MSC_VER +// Suppress type conversion/ possible loss of data warning +#pragma warning (push) +#pragma warning (disable : 4244) +#endif return Type(my_atomic_addlint(reinterpret_cast<lint*> (&m_counter), i)); +#ifdef _MSC_VER +#pragma warning (pop) +#endif } else { return m_counter += i; } diff --git a/storage/innobase/include/trx0rec.h b/storage/innobase/include/trx0rec.h index 97f23b11790..3dc35c7fda8 100644 --- a/storage/innobase/include/trx0rec.h +++ b/storage/innobase/include/trx0rec.h @@ -136,7 +136,6 @@ trx_undo_update_rec_get_update( trx_id_t trx_id, /*!< in: transaction id from this undorecord */ roll_ptr_t roll_ptr,/*!< in: roll pointer from this undo record */ ulint info_bits,/*!< in: info bits from this undo record */ - trx_t* trx, /*!< in: transaction */ mem_heap_t* heap, /*!< in: memory heap from which the memory needed is allocated */ upd_t** upd); /*!< out, own: update vector */ diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 90755445d95..8f96fd577e7 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -1455,7 +1455,7 @@ lock_rec_other_has_conflicting( if (lock_rec_has_to_wait(true, trx, mode, lock, is_supremum)) { #ifdef WITH_WSREP - if (wsrep_on(trx->mysql_thd)) { + if (wsrep_on_trx(trx)) { trx_mutex_enter(lock->trx); wsrep_kill_victim((trx_t *)trx, (lock_t *)lock); trx_mutex_exit(lock->trx); @@ -1985,8 +1985,7 @@ RecLock::create( } #ifdef WITH_WSREP - if (c_lock && - wsrep_on(trx->mysql_thd) && + if (c_lock && wsrep_on_trx(trx) && wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { lock_t *hash = (lock_t *)c_lock->hash; lock_t *prev = NULL; diff --git a/storage/innobase/lock/lock0wait.cc b/storage/innobase/lock/lock0wait.cc index 0954ad9430a..0ed55558dc4 100644 --- a/storage/innobase/lock/lock0wait.cc +++ b/storage/innobase/lock/lock0wait.cc @@ -189,8 +189,7 @@ wsrep_is_BF_lock_timeout( /*====================*/ trx_t* trx) /* in: trx to check for lock priority */ { - if (wsrep_on(trx->mysql_thd) && - wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { + if (wsrep_on_trx(trx) && wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { fprintf(stderr, "WSREP: BF lock wait long\n"); srv_print_innodb_monitor = TRUE; srv_print_innodb_lock_monitor = TRUE; @@ -198,7 +197,7 @@ wsrep_is_BF_lock_timeout( return TRUE; } return FALSE; - } +} #endif /* WITH_WSREP */ /***************************************************************//** @@ -399,7 +398,7 @@ lock_wait_suspend_thread( if (lock_wait_timeout < 100000000 && wait_time > (double) lock_wait_timeout #ifdef WITH_WSREP - && (!wsrep_on(trx->mysql_thd) || + && (!wsrep_on_trx(trx) || (!wsrep_is_BF_lock_timeout(trx) && trx->error_state != DB_DEADLOCK)) #endif /* WITH_WSREP */ && !trx_is_high_priority(trx)) { diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index 27770aed0eb..0fd98350579 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -858,7 +858,8 @@ os_file_get_block_size( &tmp); if (!result) { - if (GetLastError() == ERROR_INVALID_FUNCTION) { + DWORD err = GetLastError(); + if (err == ERROR_INVALID_FUNCTION || err == ERROR_NOT_SUPPORTED) { // Don't report error, it is driver's fault, not ours or users. // We handle this with fallback. Report wit info message, just once. static bool write_info = true; @@ -3779,6 +3780,7 @@ os_file_get_last_error_low( return(OS_FILE_ERROR_MAX + err); } + /** NOTE! Use the corresponding macro os_file_create_simple(), not directly this function! A simple function to open or create a file. @@ -3897,15 +3899,6 @@ os_file_create_simple_func( retry = false; *success = true; - - DWORD temp; - - /* This is a best effort use case, if it fails then - we will find out when we try and punch the hole. */ - - os_win32_device_io_control( - file, FSCTL_SET_SPARSE, NULL, 0, NULL, 0, - &temp); } } while (retry); @@ -4298,13 +4291,6 @@ os_file_create_func( /* Bind the file handle to completion port */ ut_a(CreateIoCompletionPort(file, completion_port, 0, 0)); } - DWORD temp; - - /* This is a best effort use case, if it fails then - we will find out when we try and punch the hole. */ - os_win32_device_io_control( - file, FSCTL_SET_SPARSE, NULL, 0, NULL, 0, - &temp); } } while (retry); @@ -4752,16 +4738,36 @@ os_file_get_status_win32( return(DB_SUCCESS); } -/** Truncates a file to a specified size in bytes. -Do nothing if the size to preserve is greater or equal to the current -size of the file. +/** +Sets a sparse flag on Windows file. +@param[in] file file handle +@return true on success, false on error +*/ +bool os_file_set_sparse_win32(os_file_t file) +{ + + DWORD temp; + return os_win32_device_io_control(file, FSCTL_SET_SPARSE, 0, 0, 0, 0,&temp); +} + + +/** +Change file size on Windows. + +If file is extended, the bytes between old and new EOF +are zeros. + +If file is sparse, "virtual" block is added at the end of +allocated area. + +If file is normal, file system allocates storage. + @param[in] pathname file path -@param[in] file file to be truncated +@param[in] file file handle @param[in] size size to preserve in bytes @return true if success */ -static bool -os_file_truncate_win32( +os_file_change_size_win32( const char* pathname, os_file_t file, os_offset_t size) @@ -5254,6 +5260,7 @@ os_file_handle_error_no_exit( name, operation, false, on_error_silent)); } +#ifndef _WIN32 /** Tries to disable OS caching on an opened file descriptor. @param[in] fd file descriptor to alter @param[in] file_name file name, used in the diagnostic message @@ -5261,7 +5268,7 @@ os_file_handle_error_no_exit( message */ void os_file_set_nocache( - os_file_t fd MY_ATTRIBUTE((unused)), + int fd MY_ATTRIBUTE((unused)), const char* file_name MY_ATTRIBUTE((unused)), const char* operation_name MY_ATTRIBUTE((unused))) { @@ -5310,6 +5317,8 @@ short_warning: #endif /* defined(UNIV_SOLARIS) && defined(DIRECTIO_ON) */ } +#endif /* _WIN32 */ + /** Write the specified number of zeros to a newly created file. @param[in] name name of the file or path as a null-terminated string @@ -5324,6 +5333,9 @@ os_file_set_size( os_offset_t size, bool read_only) { +#ifdef _WIN32 + return os_file_change_size_win32(name, file, size); +#endif /* Write up to 1 megabyte at a time. */ ulint buf_size = ut_min( static_cast<ulint>(64), @@ -5413,7 +5425,7 @@ os_file_truncate( } #ifdef _WIN32 - return(os_file_truncate_win32(pathname, file, size)); + return(os_file_change_size_win32(pathname, file, size)); #else /* _WIN32 */ return(os_file_truncate_posix(pathname, file, size)); #endif /* _WIN32 */ @@ -5553,14 +5565,10 @@ IORequest::punch_hole(os_file_t fh, os_offset_t off, ulint len) Warning: On POSIX systems we try and punch a hole from offset 0 to the system configured page size. This should only be called on an empty file. - -Note: On Windows we use the name and on Unices we use the file handle. - -@param[in] name File name @param[in] fh File handle for the file - if opened @return true if the file system supports sparse files */ bool -os_is_sparse_file_supported(const char* path, os_file_t fh) +os_is_sparse_file_supported(os_file_t fh) { /* In this debugging mode, we act as if punch hole is supported, then we skip any calls to actually punch a hole. In this way, @@ -5570,7 +5578,13 @@ os_is_sparse_file_supported(const char* path, os_file_t fh) ); #ifdef _WIN32 - return(os_is_sparse_file_supported_win32(path)); + BY_HANDLE_FILE_INFORMATION info; + if (GetFileInformationByHandle(fh,&info)) { + if (info.dwFileAttributes != INVALID_FILE_ATTRIBUTES) { + return (info.dwFileAttributes & FILE_ATTRIBUTE_SPARSE_FILE) != 0; + } + } + return false; #else dberr_t err; @@ -7531,9 +7545,9 @@ AIO::to_file(FILE* file) const fprintf(file, "%s IO for %s (offset=" UINT64PF - ", size=" ULINTPF ")\n", + ", size=%lu)\n", slot.type.is_read() ? "read" : "write", - slot.name, slot.offset, slot.len); + slot.name, slot.offset, (unsigned long)(slot.len)); } } diff --git a/storage/innobase/pars/pars0pars.cc b/storage/innobase/pars/pars0pars.cc index 21325cac12a..56ca037f247 100644 --- a/storage/innobase/pars/pars0pars.cc +++ b/storage/innobase/pars/pars0pars.cc @@ -1180,7 +1180,7 @@ pars_process_assign_list( upd_field_set_field_no(upd_field, dict_index_get_nth_col_pos( clust_index, col_sym->col_no, NULL), - clust_index, NULL); + clust_index); upd_field->exp = assign_node->val; if (!dict_col_get_fixed_size( diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index a8258fa2c0f..9b9d19ae960 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -2487,9 +2487,12 @@ row_ins_index_entry_big_rec( DEBUG_SYNC_C_IF_THD(thd, "before_row_ins_extern_latch"); - mtr_start(&mtr); - mtr.set_named_space(index->space); - dict_disable_redo_if_temporary(index->table, &mtr); + mtr.start(); + if (index->table->is_temporary()) { + mtr.set_log_mode(MTR_LOG_NO_REDO); + } else { + mtr.set_named_space(index->space); + } btr_pcur_open(index, entry, PAGE_CUR_LE, BTR_MODIFY_TREE, &pcur, &mtr); @@ -2508,7 +2511,7 @@ row_ins_index_entry_big_rec( index, offsets); } - mtr_commit(&mtr); + mtr.commit(); btr_pcur_close(&pcur); diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index 0cf34d8fb38..fa466d09d30 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -1094,8 +1094,8 @@ row_merge_read( const bool success = os_file_read_no_error_handling_int_fd( request, fd, buf, ofs, srv_sort_buf_size); - /* For encrypted tables, decrypt data after reading and copy data */ - if (log_tmp_is_encrypted()) { + /* If encryption is enabled decrypt buffer */ + if (success && log_tmp_is_encrypted()) { if (!log_tmp_block_decrypt(buf, srv_sort_buf_size, crypt_buf, ofs, space)) { return (FALSE); @@ -4092,7 +4092,7 @@ row_merge_file_create( if (merge_file->fd >= 0) { if (srv_disable_sort_file_cache) { - os_file_set_nocache((os_file_t)merge_file->fd, + os_file_set_nocache(merge_file->fd, "row0merge.cc", "sort"); } } diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 583acdc482b..eb1c253be1c 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -2370,7 +2370,6 @@ row_create_table_for_mysql( (will be freed, or on DB_SUCCESS added to the data dictionary cache) */ trx_t* trx, /*!< in/out: transaction */ - bool commit, /*!< in: if true, commit the transaction */ fil_encryption_t mode, /*!< in: encryption mode */ uint32_t key_id) /*!< in: encryption key_id */ { @@ -2400,10 +2399,6 @@ err_exit: #endif /* !DBUG_OFF */ dict_mem_table_free(table); - if (commit) { - trx_commit_for_mysql(trx); - } - trx->op_info = ""; return(DB_ERROR); @@ -2448,7 +2443,7 @@ err_exit: err = dict_replace_tablespace_in_dictionary( table->space, table->name.m_name, fil_space_get_flags(table->space), - path, trx, commit); + path, trx); ut_free(path); @@ -2474,10 +2469,6 @@ err_exit: DICT_ERR_IGNORE_NONE)) { dict_table_close_and_drop(trx, table); - - if (commit) { - trx_commit_for_mysql(trx); - } } else { dict_mem_table_free(table); } diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc index ba6c1282eaa..b041917d398 100644 --- a/storage/innobase/row/row0purge.cc +++ b/storage/innobase/row/row0purge.cc @@ -984,7 +984,6 @@ err_exit: ptr = trx_undo_update_rec_get_update(ptr, clust_index, type, node->trx_id, roll_ptr, info_bits, - thr_get_trx(thr), node->heap, &(node->update)); /* Read to the partial row the fields that occur in indexes */ diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index 1b045a23fe9..08cce51a503 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -1657,8 +1657,7 @@ table_loop: #ifdef BTR_CUR_HASH_ADAPT if (consistent_read && plan->unique_search && !plan->pcur_is_open - && !plan->must_get_clust - && !plan->table->big_rows) { + && !plan->must_get_clust) { if (!search_latch_locked) { btr_search_s_lock(index); @@ -2085,8 +2084,7 @@ skip_lock: ut_ad(plan->pcur.latch_mode == BTR_SEARCH_LEAF); if ((plan->n_rows_fetched <= SEL_PREFETCH_LIMIT) - || plan->unique_search || plan->no_prefetch - || plan->table->big_rows) { + || plan->unique_search || plan->no_prefetch) { /* No prefetch in operation: go to the next table */ diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc index 60a9f49b576..b50b4e94cfb 100644 --- a/storage/innobase/row/row0uins.cc +++ b/storage/innobase/row/row0uins.cc @@ -76,9 +76,12 @@ row_undo_ins_remove_clust_rec( ut_ad(dict_index_is_clust(index)); ut_ad(node->trx->in_rollback); - mtr_start(&mtr); - mtr.set_named_space(index->space); - dict_disable_redo_if_temporary(index->table, &mtr); + mtr.start(); + if (index->table->is_temporary()) { + mtr.set_log_mode(MTR_LOG_NO_REDO); + } else { + mtr.set_named_space(index->space); + } /* This is similar to row_undo_mod_clust(). The DDL thread may already have copied this row from the log to the new table. @@ -125,9 +128,9 @@ row_undo_ins_remove_clust_rec( dict_drop_index_tree( btr_pcur_get_rec(&node->pcur), &(node->pcur), &mtr); - mtr_commit(&mtr); + mtr.commit(); - mtr_start(&mtr); + mtr.start(); success = btr_pcur_restore_position( BTR_MODIFY_LEAF, &node->pcur, &mtr); @@ -142,9 +145,12 @@ row_undo_ins_remove_clust_rec( btr_pcur_commit_specify_mtr(&node->pcur, &mtr); retry: /* If did not succeed, try pessimistic descent to tree */ - mtr_start(&mtr); - mtr.set_named_space(index->space); - dict_disable_redo_if_temporary(index->table, &mtr); + mtr.start(); + if (index->table->is_temporary()) { + mtr.set_log_mode(MTR_LOG_NO_REDO); + } else { + mtr.set_named_space(index->space); + } success = btr_pcur_restore_position( BTR_MODIFY_TREE | BTR_LATCH_FOR_DELETE, diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc index 1ed25e7076a..049b1048724 100644 --- a/storage/innobase/row/row0umod.cc +++ b/storage/innobase/row/row0umod.cc @@ -272,9 +272,12 @@ row_undo_mod_clust( pcur = &node->pcur; index = btr_cur_get_index(btr_pcur_get_btr_cur(pcur)); - mtr_start(&mtr); - mtr.set_named_space(index->space); - dict_disable_redo_if_temporary(index->table, &mtr); + mtr.start(); + if (index->table->is_temporary()) { + mtr.set_log_mode(MTR_LOG_NO_REDO); + } else { + mtr.set_named_space(index->space); + } online = dict_index_is_online_ddl(index); if (online) { @@ -304,8 +307,11 @@ row_undo_mod_clust( descent down the index tree */ mtr_start_trx(&mtr, thr_get_trx(thr)); - mtr.set_named_space(index->space); - dict_disable_redo_if_temporary(index->table, &mtr); + if (index->table->is_temporary()) { + mtr.set_log_mode(MTR_LOG_NO_REDO); + } else { + mtr.set_named_space(index->space); + } err = row_undo_mod_clust_low( node, &offsets, &offsets_heap, @@ -363,8 +369,11 @@ row_undo_mod_clust( if (err == DB_SUCCESS && node->rec_type == TRX_UNDO_UPD_DEL_REC) { mtr_start_trx(&mtr, thr_get_trx(thr)); - mtr.set_named_space(index->space); - dict_disable_redo_if_temporary(index->table, &mtr); + if (index->table->is_temporary()) { + mtr.set_log_mode(MTR_LOG_NO_REDO); + } else { + mtr.set_named_space(index->space); + } /* It is not necessary to call row_log_table, because the record is delete-marked and would thus @@ -378,8 +387,11 @@ row_undo_mod_clust( pessimistic descent down the index tree */ mtr_start_trx(&mtr, thr_get_trx(thr)); - mtr.set_named_space(index->space); - dict_disable_redo_if_temporary(index->table, &mtr); + if (index->table->is_temporary()) { + mtr.set_log_mode(MTR_LOG_NO_REDO); + } else { + mtr.set_named_space(index->space); + } err = row_undo_mod_remove_clust_low( node, &mtr, @@ -1159,7 +1171,7 @@ close_table: node->heap); ptr = trx_undo_update_rec_get_update(ptr, clust_index, type, trx_id, - roll_ptr, info_bits, node->trx, + roll_ptr, info_bits, node->heap, &(node->update)); node->new_trx_id = trx_id; node->cmpl_info = cmpl_info; diff --git a/storage/innobase/row/row0undo.cc b/storage/innobase/row/row0undo.cc index 0679b29b6ca..8826ebdd0cb 100644 --- a/storage/innobase/row/row0undo.cc +++ b/storage/innobase/row/row0undo.cc @@ -172,7 +172,6 @@ row_undo_search_clust_to_pcur( rec_offs_init(offsets_); mtr_start(&mtr); - dict_disable_redo_if_temporary(node->table, &mtr); clust_index = dict_table_get_first_index(node->table); diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc index 1444f6f7c7d..2eceef14025 100644 --- a/storage/innobase/row/row0upd.cc +++ b/storage/innobase/row/row0upd.cc @@ -464,8 +464,8 @@ inline bool wsrep_must_process_fk(const upd_node_t* node, const trx_t* trx) { - if (que_node_get_type(node->common.parent) != QUE_NODE_UPDATE || - !wsrep_on(trx->mysql_thd)) { + if (que_node_get_type(node->common.parent) != QUE_NODE_UPDATE + || !wsrep_on_trx(trx)) { return false; } @@ -1006,7 +1006,7 @@ row_upd_build_sec_rec_difference_binary( dfield_copy(&(upd_field->new_val), dfield); - upd_field_set_field_no(upd_field, i, index, NULL); + upd_field_set_field_no(upd_field, i, index); n_diff++; } @@ -1103,7 +1103,7 @@ row_upd_build_difference_binary( dfield_copy(&(upd_field->new_val), dfield); - upd_field_set_field_no(upd_field, i, index, trx); + upd_field_set_field_no(upd_field, i, index); n_diff++; } diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index d87ffeea21f..8c7922bcaf8 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -1891,9 +1891,6 @@ innobase_start_or_create_for_mysql() return(srv_init_abort(DB_ERROR)); } - compile_time_assert(ulonglong(ULINT_MAX) * UNIV_PAGE_SIZE_MIN - >= 512ULL << 30); - os_normalize_path(srv_data_home); /* Check if the data files exist or not. */ diff --git a/storage/innobase/trx/trx0rec.cc b/storage/innobase/trx/trx0rec.cc index 5a8e1e23546..3e874bbeed3 100644 --- a/storage/innobase/trx/trx0rec.cc +++ b/storage/innobase/trx/trx0rec.cc @@ -1445,7 +1445,6 @@ trx_undo_update_rec_get_update( trx_id_t trx_id, /*!< in: transaction id from this undo record */ roll_ptr_t roll_ptr,/*!< in: roll pointer from this undo record */ ulint info_bits,/*!< in: info bits from this undo record */ - trx_t* trx, /*!< in: transaction */ mem_heap_t* heap, /*!< in: memory heap from which the memory needed is allocated */ upd_t** upd) /*!< out, own: update vector */ @@ -1481,7 +1480,7 @@ trx_undo_update_rec_get_update( upd_field_set_field_no(upd_field, dict_index_get_sys_col_pos(index, DATA_TRX_ID), - index, trx); + index); dfield_set_data(&(upd_field->new_val), buf, DATA_TRX_ID_LEN); upd_field = upd_get_nth_field(update, n_fields + 1); @@ -1492,7 +1491,7 @@ trx_undo_update_rec_get_update( upd_field_set_field_no( upd_field, dict_index_get_sys_col_pos(index, DATA_ROLL_PTR), - index, trx); + index); dfield_set_data(&(upd_field->new_val), buf, DATA_ROLL_PTR_LEN); /* Store then the updated ordinary columns to the update vector */ @@ -1553,7 +1552,7 @@ trx_undo_update_rec_get_update( upd_field_set_v_field_no( upd_field, field_no, index); } else { - upd_field_set_field_no(upd_field, field_no, index, trx); + upd_field_set_field_no(upd_field, field_no, index); } ptr = trx_undo_rec_get_col_val(ptr, &field, &len, &orig_len); @@ -2228,7 +2227,7 @@ trx_undo_prev_version_build( ptr = trx_undo_update_rec_get_update(ptr, index, type, trx_id, roll_ptr, info_bits, - NULL, heap, &update); + heap, &update); ut_a(ptr); if (row_upd_changes_field_size_or_external(index, offsets, update)) { diff --git a/storage/innobase/ut/ut0ut.cc b/storage/innobase/ut/ut0ut.cc index 7393a24b72c..7ad80c3cd1f 100644 --- a/storage/innobase/ut/ut0ut.cc +++ b/storage/innobase/ut/ut0ut.cc @@ -834,6 +834,15 @@ error::~error() sql_print_error("InnoDB: %s", m_oss.str().c_str()); } +#ifdef _MSC_VER +/* disable warning + "ib::fatal::~fatal': destructor never returns, potential memory leak" + on Windows. +*/ +#pragma warning (push) +#pragma warning (disable : 4722) +#endif + ATTRIBUTE_NORETURN fatal::~fatal() { @@ -841,6 +850,10 @@ fatal::~fatal() abort(); } +#ifdef _MSC_VER +#pragma warning (pop) +#endif + error_or_warn::~error_or_warn() { if (m_error) { diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc index 4c078b11e4d..321837bd425 100644 --- a/storage/maria/ha_maria.cc +++ b/storage/maria/ha_maria.cc @@ -405,7 +405,7 @@ static void _ma_check_print_msg(HA_CHECK *param, const char *msg_type, { THD *thd= (THD *) param->thd; Protocol *protocol= thd->protocol; - uint length, msg_length; + size_t length, msg_length; char msgbuf[MYSQL_ERRMSG_SIZE]; char name[NAME_LEN * 2 + 2]; @@ -442,10 +442,10 @@ static void _ma_check_print_msg(HA_CHECK *param, const char *msg_type, push_warning). */ protocol->prepare_for_resend(); - protocol->store(name, length, system_charset_info); + protocol->store(name, (uint)length, system_charset_info); protocol->store(param->op_name, system_charset_info); protocol->store(msg_type, system_charset_info); - protocol->store(msgbuf, msg_length, system_charset_info); + protocol->store(msgbuf, (uint)msg_length, system_charset_info); if (protocol->write()) sql_print_error("Failed on my_net_write, writing to stderr instead: %s.%s: %s\n", param->db_name, param->table_name, msgbuf); @@ -620,8 +620,8 @@ static int table2maria(TABLE *table_arg, data_file_type row_type, } } } - DBUG_PRINT("loop", ("found: 0x%lx recpos: %d minpos: %d length: %d", - (long) found, recpos, minpos, length)); + DBUG_PRINT("loop", ("found: %p recpos: %d minpos: %d length: %d", + found, recpos, minpos, length)); if (!found) break; @@ -3444,7 +3444,7 @@ bool maria_show_status(handlerton *hton, { char *file; const char *status; - uint length, status_len; + size_t length, status_len; MY_STAT stat_buff, *stat; const char error[]= "can't stat"; char object[SHOW_MSG_LEN]; @@ -3472,8 +3472,8 @@ bool maria_show_status(handlerton *hton, status= needed; status_len= sizeof(needed) - 1; } - length= my_snprintf(object, SHOW_MSG_LEN, "Size %12lu ; %s", - (ulong) stat->st_size, file); + length= my_snprintf(object, SHOW_MSG_LEN, "Size %12llu ; %s", + (ulonglong) stat->st_size, file); } print(thd, engine_name->str, engine_name->length, diff --git a/storage/maria/ma_bitmap.c b/storage/maria/ma_bitmap.c index 977dd888365..4b0f03c66ed 100644 --- a/storage/maria/ma_bitmap.c +++ b/storage/maria/ma_bitmap.c @@ -864,8 +864,8 @@ static void _ma_print_bitmap_changes(MARIA_FILE_BITMAP *bitmap) end= bitmap->map + bitmap->used_size; DBUG_LOCK_FILE; - fprintf(DBUG_FILE,"\nBitmap page changes at page: %lu bitmap: 0x%lx\n", - (ulong) bitmap->page, (long) bitmap->map); + fprintf(DBUG_FILE,"\nBitmap page changes at page: %lu bitmap: %p\n", + (ulong) bitmap->page, bitmap->map); page= (ulong) bitmap->page+1; for (pos= bitmap->map, org_pos= bitmap->map + bitmap->block_size ; diff --git a/storage/maria/ma_blockrec.c b/storage/maria/ma_blockrec.c index 6432d55a8d3..fec8bf2d72d 100644 --- a/storage/maria/ma_blockrec.c +++ b/storage/maria/ma_blockrec.c @@ -7540,7 +7540,7 @@ void _ma_print_block_info(MARIA_SHARE *share, uchar *buff) { LSN lsn= lsn_korr(buff); - printf("LSN: %lu,0x%lx type: %u dir_entries: %u dir_free: %u empty_space: %u\n", + printf("LSN:" LSN_FMT " type: %u dir_entries: %u dir_free: %u empty_space: %u\n", LSN_IN_PARTS(lsn), (uint)buff[PAGE_TYPE_OFFSET], (uint)buff[DIR_COUNT_OFFSET], diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c index 4303bf66652..2f638fb1065 100644 --- a/storage/maria/ma_check.c +++ b/storage/maria/ma_check.c @@ -772,7 +772,7 @@ static void maria_collect_stats_nonulls_first(HA_KEYSEG *keyseg, ulonglong *notnull, const uchar *key) { - uint first_null, kp; + size_t first_null, kp; first_null= ha_find_null(keyseg, key) - keyseg; /* All prefix tuples that don't include keypart_{first_null} are not-null @@ -814,7 +814,7 @@ int maria_collect_stats_nonulls_next(HA_KEYSEG *keyseg, ulonglong *notnull, const uchar *last_key) { uint diffs[2]; - uint first_null_seg, kp; + size_t first_null_seg, kp; HA_KEYSEG *seg; /* @@ -2519,8 +2519,8 @@ static int maria_drop_all_indexes(HA_CHECK *param, MARIA_HA *info, DBUG_PRINT("repair", ("creating missing indexes")); for (i= 0; i < share->base.keys; i++) { - DBUG_PRINT("repair", ("index #: %u key_root: 0x%lx active: %d", - i, (long) state->key_root[i], + DBUG_PRINT("repair", ("index #: %u key_root:%lld active: %d", + i, state->key_root[i], maria_is_key_active(state->key_map, i))); if ((state->key_root[i] != HA_OFFSET_ERROR) && !maria_is_key_active(state->key_map, i)) @@ -4477,8 +4477,8 @@ int maria_repair_parallel(HA_CHECK *param, register MARIA_HA *info, */ sort_param[i].read_cache= ((rep_quick || !i) ? param->read_cache : new_data_cache); - DBUG_PRINT("io_cache_share", ("thread: %u read_cache: 0x%lx", - i, (long) &sort_param[i].read_cache)); + DBUG_PRINT("io_cache_share", ("thread: %u read_cache: %p", + i, &sort_param[i].read_cache)); /* two approaches: the same amount of memory for each thread @@ -5671,7 +5671,7 @@ static int sort_maria_ft_key_write(MARIA_SORT_PARAM *sort_param, key_block++; sort_info->key_block=key_block; sort_param->keyinfo= &share->ft2_keyinfo; - ft_buf->count=(ft_buf->buf - p)/val_len; + ft_buf->count=(uint)(ft_buf->buf - p)/val_len; /* flushing buffer to second-level tree */ for (error=0; !error && p < ft_buf->buf; p+= val_len) diff --git a/storage/maria/ma_checkpoint.c b/storage/maria/ma_checkpoint.c index 1cf07852623..adfa53ee24b 100644 --- a/storage/maria/ma_checkpoint.c +++ b/storage/maria/ma_checkpoint.c @@ -170,7 +170,7 @@ static int really_execute_checkpoint(void) "Horizon" is a lower bound of the LSN of the next log record. */ checkpoint_start_log_horizon= translog_get_horizon(); - DBUG_PRINT("info",("checkpoint_start_log_horizon (%lu,0x%lx)", + DBUG_PRINT("info",("checkpoint_start_log_horizon " LSN_FMT, LSN_IN_PARTS(checkpoint_start_log_horizon))); lsn_store(checkpoint_start_log_horizon_char, checkpoint_start_log_horizon); @@ -333,10 +333,11 @@ int ma_checkpoint_init(ulong interval) else if (interval > 0) { compile_time_assert(sizeof(void *) >= sizeof(ulong)); + size_t intv= interval; if ((res= mysql_thread_create(key_thread_checkpoint, &checkpoint_control.thread, NULL, ma_checkpoint_background, - (void*) interval))) + (void*) intv))) checkpoint_control.killed= TRUE; } else @@ -375,7 +376,7 @@ static void flush_all_tables(int what_to_flush) MA_STATE_INFO_WRITE_DONT_MOVE_OFFSET| MA_STATE_INFO_WRITE_LOCK); DBUG_PRINT("maria_flush_states", - ("is_of_horizon: LSN (%lu,0x%lx)", + ("is_of_horizon: LSN " LSN_FMT, LSN_IN_PARTS(info->s->state.is_of_horizon))); break; case 2: @@ -546,8 +547,8 @@ pthread_handler_t ma_checkpoint_background(void *arg) right after "case 0", thus having 'dfile' unset. So the thread cares only about the interval's value when it started. */ - const ulong interval= (ulong)arg; - uint sleeps, sleep_time; + const size_t interval= (size_t)arg; + size_t sleeps, sleep_time; TRANSLOG_ADDRESS log_horizon_at_last_checkpoint= translog_get_horizon(); ulonglong pagecache_flushes_at_last_checkpoint= diff --git a/storage/maria/ma_close.c b/storage/maria/ma_close.c index d89a69f02ab..882e9f585f1 100644 --- a/storage/maria/ma_close.c +++ b/storage/maria/ma_close.c @@ -30,9 +30,9 @@ int maria_close(register MARIA_HA *info) MARIA_SHARE *share= info->s; my_bool internal_table= share->internal_table; DBUG_ENTER("maria_close"); - DBUG_PRINT("enter",("name: '%s' base: 0x%lx reopen: %u locks: %u", + DBUG_PRINT("enter",("name: '%s' base: %p reopen: %u locks: %u", share->open_file_name.str, - (long) info, (uint) share->reopen, + info, (uint) share->reopen, (uint) share->tot_locks)); /* Check that we have unlocked key delete-links properly */ diff --git a/storage/maria/ma_commit.c b/storage/maria/ma_commit.c index 358f564d3f1..68435a45c0a 100644 --- a/storage/maria/ma_commit.c +++ b/storage/maria/ma_commit.c @@ -121,7 +121,7 @@ int maria_begin(MARIA_HA *info) if (unlikely(!trn)) DBUG_RETURN(HA_ERR_OUT_OF_MEM); - DBUG_PRINT("info", ("TRN set to 0x%lx", (ulong) trn)); + DBUG_PRINT("info", ("TRN set to %p", trn)); _ma_set_trn_for_table(info, trn); } DBUG_RETURN(0); diff --git a/storage/maria/ma_control_file.c b/storage/maria/ma_control_file.c index b46cf7e1765..1ccb67d5698 100644 --- a/storage/maria/ma_control_file.c +++ b/storage/maria/ma_control_file.c @@ -146,6 +146,8 @@ static CONTROL_FILE_ERROR create_control_file(const char *name, { uint32 sum; uchar buffer[CF_CREATE_TIME_TOTAL_SIZE]; + ulong rnd1,rnd2; + DBUG_ENTER("maria_create_control_file"); if ((control_file_fd= mysql_file_create(key_file_control, name, 0, @@ -157,7 +159,9 @@ static CONTROL_FILE_ERROR create_control_file(const char *name, cf_changeable_size= CF_CHANGEABLE_TOTAL_SIZE; /* Create unique uuid for the control file */ - my_uuid_init((ulong) &buffer, (ulong) &maria_uuid); + my_random_bytes((uchar *)&rnd1, sizeof (rnd1)); + my_random_bytes((uchar *)&rnd2, sizeof (rnd2)); + my_uuid_init(rnd1, rnd2); my_uuid(maria_uuid); /* Prepare and write the file header */ diff --git a/storage/maria/ma_create.c b/storage/maria/ma_create.c index 624c8148f49..058b675d6a7 100644 --- a/storage/maria/ma_create.c +++ b/storage/maria/ma_create.c @@ -33,6 +33,27 @@ static int compare_columns(MARIA_COLUMNDEF **a, MARIA_COLUMNDEF **b); + +static ulonglong update_tot_length(ulonglong tot_length, ulonglong max_rows, uint length) +{ + ulonglong tot_length_part; + + if (tot_length == ULONGLONG_MAX) + return ULONGLONG_MAX; + + tot_length_part= (max_rows/(ulong) ((maria_block_size - + MAX_KEYPAGE_HEADER_SIZE - KEYPAGE_CHECKSUM_SIZE)/ + (length*2))); + if (tot_length_part >= ULONGLONG_MAX / maria_block_size) + return ULONGLONG_MAX; + + if (tot_length > ULONGLONG_MAX - tot_length_part * maria_block_size) + return ULONGLONG_MAX; + + return tot_length + tot_length_part * maria_block_size; +} + + /* Old options is used when recreating database, from maria_chk */ @@ -57,7 +78,7 @@ int maria_create(const char *name, enum data_file_type datafile_type, char kfilename[FN_REFLEN], klinkname[FN_REFLEN], *klinkname_ptr; char dfilename[FN_REFLEN], dlinkname[FN_REFLEN], *dlinkname_ptr= 0; ulong pack_reclength; - ulonglong tot_length,max_rows, tmp, tot_length_part; + ulonglong tot_length,max_rows, tmp; enum en_fieldtype type; enum data_file_type org_datafile_type= datafile_type; MARIA_SHARE share; @@ -661,23 +682,7 @@ int maria_create(const char *name, enum data_file_type datafile_type, if (length > max_key_length) max_key_length= length; - if (tot_length == ULLONG_MAX) - continue; - - tot_length_part= (max_rows/(ulong) (((uint) maria_block_size - - MAX_KEYPAGE_HEADER_SIZE - - KEYPAGE_CHECKSUM_SIZE)/ - (length*2))); - if (tot_length_part >= (ULLONG_MAX / maria_block_size + - ULLONG_MAX % maria_block_size)) - tot_length= ULLONG_MAX; - else - { - if (tot_length > ULLONG_MAX - tot_length_part * maria_block_size) - tot_length= ULLONG_MAX; - else - tot_length+= tot_length_part * maria_block_size; - } + tot_length= update_tot_length(tot_length, max_rows, length); } unique_key_parts=0; @@ -687,23 +692,7 @@ int maria_create(const char *name, enum data_file_type datafile_type, unique_key_parts+=uniquedef->keysegs; share.state.key_root[keys+i]= HA_OFFSET_ERROR; - if (tot_length == ULLONG_MAX) - continue; - ulonglong tot_length_part= (max_rows/(ulong) (((uint) maria_block_size - - MAX_KEYPAGE_HEADER_SIZE - - KEYPAGE_CHECKSUM_SIZE) / - ((MARIA_UNIQUE_HASH_LENGTH + pointer)*2))); - - if (tot_length_part >= (ULLONG_MAX / maria_block_size + - ULLONG_MAX % maria_block_size)) - tot_length= ULLONG_MAX; - else - { - if (tot_length > ULLONG_MAX - tot_length_part * maria_block_size) - tot_length= ULLONG_MAX; - else - tot_length+= tot_length_part * maria_block_size; - } + tot_length= update_tot_length(tot_length, max_rows, MARIA_UNIQUE_HASH_LENGTH + pointer); } keys+=uniques; /* Each unique has 1 key */ key_segs+=uniques; /* Each unique has 1 key seg */ diff --git a/storage/maria/ma_delete.c b/storage/maria/ma_delete.c index 7921ab59a8f..c5a2378dc2b 100644 --- a/storage/maria/ma_delete.c +++ b/storage/maria/ma_delete.c @@ -559,9 +559,9 @@ static int del(MARIA_HA *info, MARIA_KEY *key, MARIA_KEY ret_key; MARIA_PAGE next_page; DBUG_ENTER("del"); - DBUG_PRINT("enter",("leaf_page: %lu keypos: 0x%lx", + DBUG_PRINT("enter",("leaf_page: %lu keypos: %p", (ulong) (leaf_page->pos / share->block_size), - (ulong) keypos)); + keypos)); DBUG_DUMP("leaf_buff", leaf_page->buff, leaf_page->size); page_flag= leaf_page->flag; @@ -773,9 +773,9 @@ static int underflow(MARIA_HA *info, MARIA_KEYDEF *keyinfo, MARIA_KEY tmp_key, anc_key, leaf_key; MARIA_PAGE next_page; DBUG_ENTER("underflow"); - DBUG_PRINT("enter",("leaf_page: %lu keypos: 0x%lx", + DBUG_PRINT("enter",("leaf_page: %lu keypos: %p", (ulong) (leaf_page->pos / share->block_size), - (ulong) keypos)); + keypos)); DBUG_DUMP("anc_buff", anc_page->buff, anc_page->size); DBUG_DUMP("leaf_buff", leaf_page->buff, leaf_page->size); @@ -918,8 +918,8 @@ static int underflow(MARIA_HA *info, MARIA_KEYDEF *keyinfo, anc_end_pos= anc_buff + new_anc_length; - DBUG_PRINT("test",("anc_buff: 0x%lx anc_end_pos: 0x%lx", - (long) anc_buff, (long) anc_end_pos)); + DBUG_PRINT("test",("anc_buff:%p anc_end_pos:%p", + anc_buff, anc_end_pos)); if (!first_key && !_ma_get_last_key(&anc_key, anc_page, keypos)) goto err; @@ -1308,8 +1308,8 @@ static uint remove_key(MARIA_KEYDEF *keyinfo, uint page_flag, uint nod_flag, int s_length; uchar *start; DBUG_ENTER("remove_key"); - DBUG_PRINT("enter", ("keypos: 0x%lx page_end: 0x%lx", - (long) keypos, (long) page_end)); + DBUG_PRINT("enter", ("keypos:%p page_end: %p", + keypos, page_end)); start= s_temp->key_pos= keypos; s_temp->changed_length= 0; diff --git a/storage/maria/ma_dynrec.c b/storage/maria/ma_dynrec.c index 7f34b73089f..e5c108a18c6 100644 --- a/storage/maria/ma_dynrec.c +++ b/storage/maria/ma_dynrec.c @@ -1343,8 +1343,8 @@ ulong _ma_rec_unpack(register MARIA_HA *info, register uchar *to, uchar *from, err: _ma_set_fatal_error(info->s, HA_ERR_WRONG_IN_RECORD); - DBUG_PRINT("error",("to_end: 0x%lx -> 0x%lx from_end: 0x%lx -> 0x%lx", - (long) to, (long) to_end, (long) from, (long) from_end)); + DBUG_PRINT("error",("to_end: %p -> %p from_end: %p -> %p", + to, to_end, from, from_end)); DBUG_DUMP("from", info->rec_buff, info->s->base.min_pack_length); DBUG_RETURN(MY_FILE_ERROR); } /* _ma_rec_unpack */ diff --git a/storage/maria/ma_key.c b/storage/maria/ma_key.c index 89693f45dca..6f3e17ed80d 100644 --- a/storage/maria/ma_key.c +++ b/storage/maria/ma_key.c @@ -318,7 +318,7 @@ MARIA_KEY *_ma_make_key(MARIA_HA *info, MARIA_KEY *int_key, uint keynr, key+= length; } _ma_dpointer(info->s, key, filepos); - int_key->data_length= (key - int_key->data); + int_key->data_length= (uint)(key - int_key->data); int_key->ref_length= info->s->rec_reflength; int_key->flag= 0; if (_ma_have_versioning(info) && trid) @@ -449,7 +449,7 @@ MARIA_KEY *_ma_pack_key(register MARIA_HA *info, MARIA_KEY *int_key, /* set flag to SEARCH_PART_KEY if we are not using all key parts */ int_key->flag= keyseg->type ? SEARCH_PART_KEY : 0; int_key->ref_length= 0; - int_key->data_length= (key - int_key->data); + int_key->data_length= (uint)(key - int_key->data); DBUG_PRINT("exit", ("length: %u", int_key->data_length)); DBUG_RETURN(int_key); diff --git a/storage/maria/ma_keycache.c b/storage/maria/ma_keycache.c index c3083445aee..39459c486fd 100644 --- a/storage/maria/ma_keycache.c +++ b/storage/maria/ma_keycache.c @@ -54,8 +54,8 @@ int maria_assign_to_pagecache(MARIA_HA *info, MARIA_SHARE* share= info->s; DBUG_ENTER("maria_assign_to_pagecache"); DBUG_PRINT("enter", - ("old_pagecache_handle: 0x%lx new_pagecache_handle: 0x%lx", - (long) share->pagecache, (long) pagecache)); + ("old_pagecache_handle:%p new_pagecache_handle:%p", + share->pagecache, pagecache)); /* Skip operation if we didn't change key cache. This can happen if we diff --git a/storage/maria/ma_loghandler.c b/storage/maria/ma_loghandler.c index 94a5c9f1e71..7f0ac64afd1 100644 --- a/storage/maria/ma_loghandler.c +++ b/storage/maria/ma_loghandler.c @@ -909,7 +909,7 @@ char *translog_filename_by_fileno(uint32 file_no, char *path) length= (uint) (int10_to_str(file_no, buff, 10) - buff); strmov(end - length +1, buff); - DBUG_PRINT("info", ("Path: '%s' path: 0x%lx", path, (ulong) path)); + DBUG_PRINT("info", ("Path: '%s' path: %p", path, path)); DBUG_RETURN(path); } @@ -1013,8 +1013,8 @@ static TRANSLOG_FILE *get_logfile_by_number(uint32 file_no) file= *dynamic_element(&log_descriptor.open_files, log_descriptor.max_file - file_no, TRANSLOG_FILE **); mysql_rwlock_unlock(&log_descriptor.open_files_lock); - DBUG_PRINT("info", ("File 0x%lx File no: %lu, File handler: %d", - (ulong)file, (ulong)file_no, + DBUG_PRINT("info", ("File %p File no: %u, File handler: %d", + file, file_no, (file ? file->handler.file : -1))); DBUG_ASSERT(!file || file->number == file_no); DBUG_RETURN(file); @@ -1127,7 +1127,7 @@ static my_bool translog_max_lsn_to_header(File file, LSN lsn) my_bool rc; DBUG_ENTER("translog_max_lsn_to_header"); DBUG_PRINT("enter", ("File descriptor: %ld " - "lsn: (%lu,0x%lx)", + "lsn: " LSN_FMT, (long) file, LSN_IN_PARTS(lsn))); @@ -1200,7 +1200,7 @@ my_bool translog_read_file_header(LOGHANDLER_FILE_INFO *desc, File file) translog_interpret_file_header(desc, page_buff); DBUG_PRINT("info", ("timestamp: %llu aria ver: %lu mysql ver: %lu " "server id %lu page size %lu file number %lu " - "max lsn: (%lu,0x%lx)", + "max lsn: " LSN_FMT, (ulonglong) desc->timestamp, (ulong) desc->maria_version, (ulong) desc->mysql_version, @@ -1229,7 +1229,7 @@ static my_bool translog_set_lsn_for_files(uint32 from_file, uint32 to_file, { uint32 file; DBUG_ENTER("translog_set_lsn_for_files"); - DBUG_PRINT("enter", ("From: %lu to: %lu lsn: (%lu,0x%lx) locked: %d", + DBUG_PRINT("enter", ("From: %lu to: %lu lsn: " LSN_FMT " locked: %d", (ulong) from_file, (ulong) to_file, LSN_IN_PARTS(lsn), is_locked)); @@ -1458,7 +1458,7 @@ LSN translog_get_file_max_lsn_stored(uint32 file) info.max_lsn= LSN_ERROR; } - DBUG_PRINT("info", ("Max lsn: (%lu,0x%lx)", LSN_IN_PARTS(info.max_lsn))); + DBUG_PRINT("info", ("Max lsn: " LSN_FMT, LSN_IN_PARTS(info.max_lsn))); DBUG_RETURN(info.max_lsn); } } @@ -1482,8 +1482,8 @@ static my_bool translog_buffer_init(struct st_translog_buffer *buffer, int num) buffer->pre_force_close_horizon= buffer->prev_last_lsn= buffer->last_lsn= LSN_IMPOSSIBLE; - DBUG_PRINT("info", ("last_lsn and prev_last_lsn set to 0 buffer: 0x%lx", - (ulong) buffer)); + DBUG_PRINT("info", ("last_lsn and prev_last_lsn set to 0 buffer: %p", + buffer)); buffer->buffer_no= (uint8) num; /* This Buffer File */ @@ -1684,8 +1684,8 @@ static void translog_buffer_lock(struct st_translog_buffer *buffer) { DBUG_ENTER("translog_buffer_lock"); DBUG_PRINT("enter", - ("Lock buffer #%u: (0x%lx)", (uint) buffer->buffer_no, - (ulong) buffer)); + ("Lock buffer #%u: %p", buffer->buffer_no, + buffer)); mysql_mutex_lock(&buffer->mutex); DBUG_VOID_RETURN; } @@ -1706,8 +1706,8 @@ static void translog_buffer_lock(struct st_translog_buffer *buffer) static void translog_buffer_unlock(struct st_translog_buffer *buffer) { DBUG_ENTER("translog_buffer_unlock"); - DBUG_PRINT("enter", ("Unlock buffer... #%u (0x%lx)", - (uint) buffer->buffer_no, (ulong) buffer)); + DBUG_PRINT("enter", ("Unlock buffer... #%u (%p)", + (uint) buffer->buffer_no, buffer)); mysql_mutex_unlock(&buffer->mutex); DBUG_VOID_RETURN; @@ -1751,7 +1751,7 @@ static void translog_new_page_header(TRANSLOG_ADDRESS *horizon, if (log_descriptor.flags & TRANSLOG_PAGE_CRC) { #ifndef DBUG_OFF - DBUG_PRINT("info", ("write 0x11223344 CRC to (%lu,0x%lx)", + DBUG_PRINT("info", ("write 0x11223344 CRC to " LSN_FMT, LSN_IN_PARTS(*horizon))); /* This will be overwritten by real CRC; This is just for debugging */ int4store(ptr, 0x11223344); @@ -1770,16 +1770,16 @@ static void translog_new_page_header(TRANSLOG_ADDRESS *horizon, ptr+= TRANSLOG_PAGE_SIZE / DISK_DRIVE_SECTOR_SIZE; } { - uint len= (ptr - cursor->ptr); + size_t len= (ptr - cursor->ptr); (*horizon)+= len; /* increasing the offset part of the address */ - cursor->current_page_fill= len; + cursor->current_page_fill= (uint16)len; if (!cursor->chaser) - cursor->buffer->size+= len; + cursor->buffer->size+= (translog_size_t)len; } cursor->ptr= ptr; - DBUG_PRINT("info", ("NewP buffer #%u: 0x%lx chaser: %d Size: %lu (%lu) " - "Horizon: (%lu,0x%lx)", - (uint) cursor->buffer->buffer_no, (ulong) cursor->buffer, + DBUG_PRINT("info", ("NewP buffer #%u: %p chaser: %d Size: %lu (%lu) " + "Horizon: " LSN_FMT, + (uint) cursor->buffer->buffer_no, cursor->buffer, cursor->chaser, (ulong) cursor->buffer->size, (ulong) (cursor->ptr - cursor->buffer->buffer), LSN_IN_PARTS(*horizon))); @@ -1882,17 +1882,17 @@ static void translog_finish_page(TRANSLOG_ADDRESS *horizon, uint16 left= TRANSLOG_PAGE_SIZE - cursor->current_page_fill; uchar *page= cursor->ptr - cursor->current_page_fill; DBUG_ENTER("translog_finish_page"); - DBUG_PRINT("enter", ("Buffer: #%u 0x%lx " - "Buffer addr: (%lu,0x%lx) " - "Page addr: (%lu,0x%lx) " - "size:%lu (%lu) Pg:%u left:%u", - (uint) cursor->buffer_no, (ulong) cursor->buffer, + DBUG_PRINT("enter", ("Buffer: #%u %p " + "Buffer addr: " LSN_FMT " " + "Page addr: " LSN_FMT " " + "size:%u (%u) Pg:%u left:%u", + (uint) cursor->buffer_no, cursor->buffer, LSN_IN_PARTS(cursor->buffer->offset), - (ulong) LSN_FILE_NO(*horizon), - (ulong) (LSN_OFFSET(*horizon) - + (uint)LSN_FILE_NO(*horizon), + (uint)(LSN_OFFSET(*horizon) - cursor->current_page_fill), - (ulong) cursor->buffer->size, - (ulong) (cursor->ptr -cursor->buffer->buffer), + (uint) cursor->buffer->size, + (uint) (cursor->ptr -cursor->buffer->buffer), (uint) cursor->current_page_fill, (uint) left)); DBUG_ASSERT(LSN_FILE_NO(*horizon) == LSN_FILE_NO(cursor->buffer->offset)); translog_check_cursor(cursor); @@ -1914,10 +1914,10 @@ static void translog_finish_page(TRANSLOG_ADDRESS *horizon, cursor->buffer->size+= left; /* We are finishing the page so reset the counter */ cursor->current_page_fill= 0; - DBUG_PRINT("info", ("Finish Page buffer #%u: 0x%lx " + DBUG_PRINT("info", ("Finish Page buffer #%u: %p " "chaser: %d Size: %lu (%lu)", (uint) cursor->buffer->buffer_no, - (ulong) cursor->buffer, cursor->chaser, + cursor->buffer, cursor->chaser, (ulong) cursor->buffer->size, (ulong) (cursor->ptr - cursor->buffer->buffer))); translog_check_cursor(cursor); @@ -1956,9 +1956,9 @@ static void translog_finish_page(TRANSLOG_ADDRESS *horizon, static void translog_wait_for_closing(struct st_translog_buffer *buffer) { DBUG_ENTER("translog_wait_for_closing"); - DBUG_PRINT("enter", ("Buffer #%u 0x%lx copies in progress: %u " + DBUG_PRINT("enter", ("Buffer #%u %p copies in progress: %u " "is closing %u File: %d size: %lu", - (uint) buffer->buffer_no, (ulong) buffer, + (uint) buffer->buffer_no, buffer, (uint) buffer->copy_to_buffer_in_progress, (uint) buffer->is_closing_buffer, (buffer->file ? buffer->file->handler.file : -1), @@ -1967,12 +1967,12 @@ static void translog_wait_for_closing(struct st_translog_buffer *buffer) while (buffer->is_closing_buffer) { - DBUG_PRINT("info", ("wait for writers... buffer: #%u 0x%lx", - (uint) buffer->buffer_no, (ulong) buffer)); + DBUG_PRINT("info", ("wait for writers... buffer: #%u %p", + (uint) buffer->buffer_no, buffer)); DBUG_ASSERT(buffer->file != NULL); mysql_cond_wait(&buffer->waiting_filling_buffer, &buffer->mutex); - DBUG_PRINT("info", ("wait for writers done buffer: #%u 0x%lx", - (uint) buffer->buffer_no, (ulong) buffer)); + DBUG_PRINT("info", ("wait for writers done buffer: #%u %p", + (uint) buffer->buffer_no, buffer)); } DBUG_VOID_RETURN; @@ -1988,9 +1988,9 @@ static void translog_wait_for_closing(struct st_translog_buffer *buffer) static void translog_wait_for_writers(struct st_translog_buffer *buffer) { DBUG_ENTER("translog_wait_for_writers"); - DBUG_PRINT("enter", ("Buffer #%u 0x%lx copies in progress: %u " + DBUG_PRINT("enter", ("Buffer #%u %p copies in progress: %u " "is closing %u File: %d size: %lu", - (uint) buffer->buffer_no, (ulong) buffer, + (uint) buffer->buffer_no, buffer, (uint) buffer->copy_to_buffer_in_progress, (uint) buffer->is_closing_buffer, (buffer->file ? buffer->file->handler.file : -1), @@ -1999,12 +1999,12 @@ static void translog_wait_for_writers(struct st_translog_buffer *buffer) while (buffer->copy_to_buffer_in_progress) { - DBUG_PRINT("info", ("wait for writers... buffer: #%u 0x%lx", - (uint) buffer->buffer_no, (ulong) buffer)); + DBUG_PRINT("info", ("wait for writers... buffer: #%u %p", + (uint) buffer->buffer_no, buffer)); DBUG_ASSERT(buffer->file != NULL); mysql_cond_wait(&buffer->waiting_filling_buffer, &buffer->mutex); - DBUG_PRINT("info", ("wait for writers done buffer: #%u 0x%lx", - (uint) buffer->buffer_no, (ulong) buffer)); + DBUG_PRINT("info", ("wait for writers done buffer: #%u %p", + (uint) buffer->buffer_no, buffer)); } DBUG_VOID_RETURN; @@ -2029,9 +2029,9 @@ static void translog_wait_for_buffer_free(struct st_translog_buffer *buffer) TRANSLOG_FILE *file= buffer->file; uint8 ver= buffer->ver; DBUG_ENTER("translog_wait_for_buffer_free"); - DBUG_PRINT("enter", ("Buffer #%u 0x%lx copies in progress: %u " + DBUG_PRINT("enter", ("Buffer #%u %p copies in progress: %u " "is closing %u File: %d size: %lu", - (uint) buffer->buffer_no, (ulong) buffer, + (uint) buffer->buffer_no, buffer, (uint) buffer->copy_to_buffer_in_progress, (uint) buffer->is_closing_buffer, (buffer->file ? buffer->file->handler.file : -1), @@ -2044,11 +2044,11 @@ static void translog_wait_for_buffer_free(struct st_translog_buffer *buffer) while (buffer->file != NULL) { - DBUG_PRINT("info", ("wait for writers... buffer: #%u 0x%lx", - (uint) buffer->buffer_no, (ulong) buffer)); + DBUG_PRINT("info", ("wait for writers... buffer: #%u %p", + (uint) buffer->buffer_no, buffer)); mysql_cond_wait(&buffer->waiting_filling_buffer, &buffer->mutex); - DBUG_PRINT("info", ("wait for writers done. buffer: #%u 0x%lx", - (uint) buffer->buffer_no, (ulong) buffer)); + DBUG_PRINT("info", ("wait for writers done. buffer: #%u %p", + (uint) buffer->buffer_no, buffer)); } DBUG_ASSERT(buffer->copy_to_buffer_in_progress == 0); DBUG_VOID_RETURN; @@ -2096,15 +2096,15 @@ static void translog_start_buffer(struct st_translog_buffer *buffer, { DBUG_ENTER("translog_start_buffer"); DBUG_PRINT("enter", - ("Assign buffer: #%u (0x%lx) offset: 0x%lx(%lu)", - (uint) buffer->buffer_no, (ulong) buffer, - (ulong) LSN_OFFSET(log_descriptor.horizon), - (ulong) LSN_OFFSET(log_descriptor.horizon))); + ("Assign buffer: #%u (%p) offset: 0x%x(%u)", + (uint) buffer->buffer_no, buffer, + (uint) LSN_OFFSET(log_descriptor.horizon), + (uint) LSN_OFFSET(log_descriptor.horizon))); DBUG_ASSERT(buffer_no == buffer->buffer_no); buffer->pre_force_close_horizon= buffer->prev_last_lsn= buffer->last_lsn= LSN_IMPOSSIBLE; - DBUG_PRINT("info", ("last_lsn and prev_last_lsn set to 0 buffer: 0x%lx", - (ulong) buffer)); + DBUG_PRINT("info", ("last_lsn and prev_last_lsn set to 0 buffer: %p", + buffer)); buffer->offset= log_descriptor.horizon; buffer->next_buffer_offset= LSN_IMPOSSIBLE; buffer->file= get_current_logfile(); @@ -2112,11 +2112,11 @@ static void translog_start_buffer(struct st_translog_buffer *buffer, buffer->size= 0; buffer->skipped_data= 0; translog_cursor_init(cursor, buffer, buffer_no); - DBUG_PRINT("info", ("file: #%ld (%d) init cursor #%u: 0x%lx " + DBUG_PRINT("info", ("file: #%ld (%d) init cursor #%u: %p " "chaser: %d Size: %lu (%lu)", (long) (buffer->file ? buffer->file->number : 0), (buffer->file ? buffer->file->handler.file : -1), - (uint) cursor->buffer->buffer_no, (ulong) cursor->buffer, + (uint) cursor->buffer->buffer_no, cursor->buffer, cursor->chaser, (ulong) cursor->buffer->size, (ulong) (cursor->ptr - cursor->buffer->buffer))); translog_check_cursor(cursor); @@ -2153,7 +2153,7 @@ static my_bool translog_buffer_next(TRANSLOG_ADDRESS *horizon, my_bool chasing= cursor->chaser; DBUG_ENTER("translog_buffer_next"); - DBUG_PRINT("info", ("horizon: (%lu,0x%lx) chasing: %d", + DBUG_PRINT("info", ("horizon: " LSN_FMT " chasing: %d", LSN_IN_PARTS(log_descriptor.horizon), chasing)); DBUG_ASSERT(cmp_translog_addr(log_descriptor.horizon, *horizon) >= 0); @@ -2205,9 +2205,9 @@ static my_bool translog_buffer_next(TRANSLOG_ADDRESS *horizon, BUFFER_MAX_LSN(log_descriptor.buffers + old_buffer_no); } log_descriptor.buffers[old_buffer_no].next_buffer_offset= new_buffer->offset; - DBUG_PRINT("info", ("prev_last_lsn set to (%lu,0x%lx) buffer: 0x%lx", + DBUG_PRINT("info", ("prev_last_lsn set to " LSN_FMT " buffer:%p", LSN_IN_PARTS(new_buffer->prev_last_lsn), - (ulong) new_buffer)); + new_buffer)); translog_new_page_header(horizon, cursor); DBUG_RETURN(0); } @@ -2230,9 +2230,9 @@ static void translog_set_sent_to_disk(struct st_translog_buffer *buffer) DBUG_ENTER("translog_set_sent_to_disk"); mysql_mutex_lock(&log_descriptor.sent_to_disk_lock); - DBUG_PRINT("enter", ("lsn: (%lu,0x%lx) in_buffers: (%lu,0x%lx) " - "in_buffers_only: (%lu,0x%lx) start: (%lu,0x%lx) " - "sent_to_disk: (%lu,0x%lx)", + DBUG_PRINT("enter", ("lsn: " LSN_FMT " in_buffers: " LSN_FMT " " + "in_buffers_only: " LSN_FMT " start: " LSN_FMT " " + "sent_to_disk: " LSN_FMT, LSN_IN_PARTS(lsn), LSN_IN_PARTS(in_buffers), LSN_IN_PARTS(log_descriptor.log_start), @@ -2270,8 +2270,8 @@ static void translog_set_only_in_buffers(TRANSLOG_ADDRESS in_buffers) { DBUG_ENTER("translog_set_only_in_buffers"); mysql_mutex_lock(&log_descriptor.sent_to_disk_lock); - DBUG_PRINT("enter", ("in_buffers: (%lu,0x%lx) " - "in_buffers_only: (%lu,0x%lx)", + DBUG_PRINT("enter", ("in_buffers: " LSN_FMT " " + "in_buffers_only: " LSN_FMT, LSN_IN_PARTS(in_buffers), LSN_IN_PARTS(log_descriptor.in_buffers_only))); /* LSN_IMPOSSIBLE == 0 => it will work for very first time */ @@ -2325,7 +2325,7 @@ static LSN translog_get_sent_to_disk() DBUG_ENTER("translog_get_sent_to_disk"); mysql_mutex_lock(&log_descriptor.sent_to_disk_lock); lsn= log_descriptor.sent_to_disk; - DBUG_PRINT("info", ("sent to disk up to (%lu,0x%lx)", LSN_IN_PARTS(lsn))); + DBUG_PRINT("info", ("sent to disk up to " LSN_FMT, LSN_IN_PARTS(lsn))); mysql_mutex_unlock(&log_descriptor.sent_to_disk_lock); DBUG_RETURN(lsn); } @@ -2534,9 +2534,9 @@ my_bool translog_prev_buffer_flush_wait(struct st_translog_buffer *buffer) TRANSLOG_FILE *file= buffer->file; uint8 ver= buffer->ver; DBUG_ENTER("translog_prev_buffer_flush_wait"); - DBUG_PRINT("enter", ("buffer: 0x%lx #%u offset: (%lu,0x%lx) " - "prev sent: (%lu,0x%lx) prev offset: (%lu,0x%lx)", - (ulong) buffer, (uint) buffer->buffer_no, + DBUG_PRINT("enter", ("buffer: %p #%u offset: " LSN_FMT " " + "prev sent: " LSN_FMT " prev offset: " LSN_FMT, + buffer, (uint) buffer->buffer_no, LSN_IN_PARTS(buffer->offset), LSN_IN_PARTS(buffer->prev_sent_to_disk), LSN_IN_PARTS(buffer->prev_buffer_offset))); @@ -2575,8 +2575,8 @@ static my_bool translog_buffer_flush(struct st_translog_buffer *buffer) uint skipped_data; DBUG_ENTER("translog_buffer_flush"); DBUG_PRINT("enter", - ("Buffer: #%u 0x%lx file: %d offset: (%lu,0x%lx) size: %lu", - (uint) buffer->buffer_no, (ulong) buffer, + ("Buffer: #%u %p file: %d offset: " LSN_FMT " size: %lu", + (uint) buffer->buffer_no, buffer, buffer->file->handler.file, LSN_IN_PARTS(buffer->offset), (ulong) buffer->size)); @@ -2615,11 +2615,11 @@ static my_bool translog_buffer_flush(struct st_translog_buffer *buffer) #ifndef DBUG_OFF TRANSLOG_ADDRESS addr= (buffer->offset + i); #endif - DBUG_PRINT("info", ("send log form %lu till %lu address: (%lu,0x%lx) " - "page #: %lu buffer size: %lu buffer: 0x%lx", + DBUG_PRINT("info", ("send log form %lu till %lu address: " LSN_FMT " " + "page #: %lu buffer size: %lu buffer: %p", (ulong) i, (ulong) (i + TRANSLOG_PAGE_SIZE), LSN_IN_PARTS(addr), (ulong) pg, (ulong) buffer->size, - (ulong) buffer)); + buffer)); DBUG_ASSERT(log_descriptor.pagecache->block_size == TRANSLOG_PAGE_SIZE); DBUG_ASSERT(i + TRANSLOG_PAGE_SIZE <= buffer->size); if (translog_status != TRANSLOG_OK && translog_status != TRANSLOG_SHUTDOWN) @@ -2636,9 +2636,9 @@ static my_bool translog_buffer_flush(struct st_translog_buffer *buffer) TRANSLOG_PAGE_SIZE - skipped_data)) { DBUG_PRINT("error", - ("Can't write page (%lu,0x%lx) to pagecache, error: %d", - (ulong) buffer->file->number, - (ulong) (LSN_OFFSET(buffer->offset)+ i), + ("Can't write page " LSN_FMT " to pagecache, error: %d", + buffer->file->number, + (uint)(LSN_OFFSET(buffer->offset)+ i), my_errno)); translog_stop_writing(); DBUG_RETURN(1); @@ -2651,10 +2651,10 @@ static my_bool translog_buffer_flush(struct st_translog_buffer *buffer) LSN_OFFSET(buffer->offset) + buffer->skipped_data, log_write_flags)) { - DBUG_PRINT("error", ("Can't write buffer (%lu,0x%lx) size %lu " + DBUG_PRINT("error", ("Can't write buffer " LSN_FMT " size %lu " "to the disk (%d)", - (ulong) file->handler.file, - (ulong) LSN_OFFSET(buffer->offset), + (uint) file->handler.file, + (uint) LSN_OFFSET(buffer->offset), (ulong) buffer->size, errno)); translog_stop_writing(); DBUG_RETURN(1); @@ -2874,10 +2874,10 @@ static my_bool translog_page_validator(int res, PAGECACHE_IO_HOOK_ARGS *args) if ((pgcache_page_no_t) uint3korr(page) != page_no || (uint32) uint3korr(page + 3) != data->number) { - DBUG_PRINT("error", ("Page (%lu,0x%lx): " + DBUG_PRINT("error", ("Page " LSN_FMT ": " "page address written in the page is incorrect: " "File %lu instead of %lu or page %lu instead of %lu", - (ulong) data->number, (ulong) offset, + (uint)data->number, (uint)offset, (ulong) uint3korr(page + 3), (ulong) data->number, (ulong) uint3korr(page), (ulong) page_no)); @@ -2888,9 +2888,9 @@ static my_bool translog_page_validator(int res, PAGECACHE_IO_HOOK_ARGS *args) if (flags & ~(TRANSLOG_PAGE_CRC | TRANSLOG_SECTOR_PROTECTION | TRANSLOG_RECORD_CRC)) { - DBUG_PRINT("error", ("Page (%lu,0x%lx): " + DBUG_PRINT("error", ("Page " LSN_FMT ": " "Garbage in the page flags field detected : %x", - (ulong) data->number, (ulong) offset, + (uint) data->number, (uint) offset, (uint) flags)); DBUG_RETURN(1); } @@ -2902,9 +2902,9 @@ static my_bool translog_page_validator(int res, PAGECACHE_IO_HOOK_ARGS *args) this_page_page_overhead); if (crc != uint4korr(page_pos)) { - DBUG_PRINT("error", ("Page (%lu,0x%lx): " + DBUG_PRINT("error", ("Page " LSN_FMT ": " "CRC mismatch: calculated: %lx on the page %lx", - (ulong) data->number, (ulong) offset, + (uint) data->number, (uint) offset, (ulong) crc, (ulong) uint4korr(page_pos))); DBUG_RETURN(1); } @@ -2985,10 +2985,10 @@ static uchar *translog_get_page(TRANSLOG_VALIDATOR_DATA *data, uchar *buffer, uint32 file_no= LSN_FILE_NO(addr); TRANSLOG_FILE *file; DBUG_ENTER("translog_get_page"); - DBUG_PRINT("enter", ("File: %lu Offset: %lu(0x%lx)", - (ulong) file_no, - (ulong) LSN_OFFSET(addr), - (ulong) LSN_OFFSET(addr))); + DBUG_PRINT("enter", ("File: %u Offset: %u(0x%x)", + file_no, + (uint) LSN_OFFSET(addr), + (uint) LSN_OFFSET(addr))); /* it is really page address */ DBUG_ASSERT(LSN_OFFSET(addr) % TRANSLOG_PAGE_SIZE == 0); @@ -2998,7 +2998,7 @@ static uchar *translog_get_page(TRANSLOG_VALIDATOR_DATA *data, uchar *buffer, restart: in_buffers= translog_only_in_buffers(); - DBUG_PRINT("info", ("in_buffers: (%lu,0x%lx)", + DBUG_PRINT("info", ("in_buffers: " LSN_FMT, LSN_IN_PARTS(in_buffers))); if (in_buffers != LSN_IMPOSSIBLE && cmp_translog_addr(addr, in_buffers) >= 0) @@ -3168,9 +3168,9 @@ restart: PAGECACHE_LOCK_READ : PAGECACHE_LOCK_LEFT_UNLOCKED), direct_link); - DBUG_PRINT("info", ("Direct link is assigned to : 0x%lx * 0x%lx", - (ulong) direct_link, - (ulong)(direct_link ? *direct_link : NULL))); + DBUG_PRINT("info", ("Direct link is assigned to : %p * %p", + direct_link, + (direct_link ? *direct_link : NULL))); data->was_recovered= file->was_recovered; DBUG_RETURN(buffer); } @@ -3186,8 +3186,8 @@ restart: static void translog_free_link(PAGECACHE_BLOCK_LINK *direct_link) { DBUG_ENTER("translog_free_link"); - DBUG_PRINT("info", ("Direct link: 0x%lx", - (ulong) direct_link)); + DBUG_PRINT("info", ("Direct link: %p", + direct_link)); if (direct_link) pagecache_unlock_by_link(log_descriptor.pagecache, direct_link, PAGECACHE_LOCK_READ_UNLOCK, PAGECACHE_UNPIN, @@ -3380,8 +3380,8 @@ static my_bool translog_truncate_log(TRANSLOG_ADDRESS addr) uchar page_buff[TRANSLOG_PAGE_SIZE]; DBUG_ENTER("translog_truncate_log"); /* TODO: write warning to the client */ - DBUG_PRINT("warning", ("removing all records from (%lu,0x%lx) " - "till (%lu,0x%lx)", + DBUG_PRINT("warning", ("removing all records from " LSN_FMT " " + "till " LSN_FMT, LSN_IN_PARTS(addr), LSN_IN_PARTS(log_descriptor.horizon))); DBUG_ASSERT(cmp_translog_addr(addr, log_descriptor.horizon) < 0); @@ -3680,8 +3680,8 @@ my_bool translog_init_with_table(const char *directory, { if (translog_buffer_init(log_descriptor.buffers + i, i)) goto err; - DBUG_PRINT("info", ("translog_buffer buffer #%u: 0x%lx", - i, (ulong) log_descriptor.buffers + i)); + DBUG_PRINT("info", ("translog_buffer buffer #%u:%p", + i, log_descriptor.buffers + i)); } /* @@ -3929,9 +3929,9 @@ my_bool translog_init_with_table(const char *directory, log_descriptor.horizon= LSN_REPLACE_OFFSET(log_descriptor.horizon, (chunk_offset + LSN_OFFSET(last_valid_page))); - DBUG_PRINT("info", ("Move Page #%u: 0x%lx chaser: %d Size: %lu (%lu)", + DBUG_PRINT("info", ("Move Page #%u: %p chaser: %d Size: %lu (%lu)", (uint) log_descriptor.bc.buffer_no, - (ulong) log_descriptor.bc.buffer, + log_descriptor.bc.buffer, log_descriptor.bc.chaser, (ulong) log_descriptor.bc.buffer->size, (ulong) (log_descriptor.bc.ptr - log_descriptor.bc. @@ -4095,7 +4095,7 @@ my_bool translog_init_with_table(const char *directory, There is no harm in leaving it "as-is". */ log_descriptor.previous_flush_horizon= log_descriptor.horizon; - DBUG_PRINT("info", ("previous_flush_horizon: (%lu,0x%lx)", + DBUG_PRINT("info", ("previous_flush_horizon: " LSN_FMT, LSN_IN_PARTS(log_descriptor. previous_flush_horizon))); DBUG_RETURN(0); @@ -4123,7 +4123,7 @@ my_bool translog_init_with_table(const char *directory, translog_size_t rec_len; int len; uchar buffer[1]; - DBUG_PRINT("info", ("going to check the last found record (%lu,0x%lx)", + DBUG_PRINT("info", ("going to check the last found record " LSN_FMT, LSN_IN_PARTS(last_lsn))); len= @@ -4132,7 +4132,7 @@ my_bool translog_init_with_table(const char *directory, len == RECHEADER_READ_EOF)) { DBUG_PRINT("error", ("unexpected end of log or record during " - "reading record header: (%lu,0x%lx) len: %d", + "reading record header: " LSN_FMT " len: %d", LSN_IN_PARTS(last_lsn), len)); if (readonly) log_descriptor.log_start= log_descriptor.horizon= last_lsn; @@ -4156,7 +4156,7 @@ my_bool translog_init_with_table(const char *directory, if (rec_len != 1) { DBUG_PRINT("error", ("unexpected end of log or record during " - "reading record body: (%lu,0x%lx) len: %d", + "reading record body: " LSN_FMT " len: %d", LSN_IN_PARTS(rec.lsn), len)); if (readonly) @@ -4174,7 +4174,7 @@ my_bool translog_init_with_table(const char *directory, } } log_descriptor.previous_flush_horizon= log_descriptor.horizon; - DBUG_PRINT("info", ("previous_flush_horizon: (%lu,0x%lx)", + DBUG_PRINT("info", ("previous_flush_horizon: " LSN_FMT, LSN_IN_PARTS(log_descriptor.previous_flush_horizon))); DBUG_RETURN(0); err: @@ -4193,8 +4193,8 @@ static void translog_buffer_destroy(struct st_translog_buffer *buffer) { DBUG_ENTER("translog_buffer_destroy"); DBUG_PRINT("enter", - ("Buffer #%u: 0x%lx file: %d offset: (%lu,0x%lx) size: %lu", - (uint) buffer->buffer_no, (ulong) buffer, + ("Buffer #%u: %p file: %d offset: " LSN_FMT " size: %lu", + (uint) buffer->buffer_no, buffer, (buffer->file ? buffer->file->handler.file : -1), LSN_IN_PARTS(buffer->offset), (ulong) buffer->size)); @@ -4212,7 +4212,7 @@ static void translog_buffer_destroy(struct st_translog_buffer *buffer) translog_buffer_flush(buffer); translog_buffer_unlock(buffer); } - DBUG_PRINT("info", ("Destroy mutex: 0x%lx", (ulong) &buffer->mutex)); + DBUG_PRINT("info", ("Destroy mutex: %p", &buffer->mutex)); mysql_mutex_destroy(&buffer->mutex); mysql_cond_destroy(&buffer->waiting_filling_buffer); DBUG_VOID_RETURN; @@ -4322,15 +4322,15 @@ static my_bool translog_page_next(TRANSLOG_ADDRESS *horizon, TRANSLOG_PAGE_SIZE))) DBUG_RETURN(1); *prev_buffer= buffer; - DBUG_PRINT("info", ("Buffer #%u (0x%lu): have to be flushed", - (uint) buffer->buffer_no, (ulong) buffer)); + DBUG_PRINT("info", ("Buffer #%u (%p): have to be flushed", + (uint) buffer->buffer_no, buffer)); } else { - DBUG_PRINT("info", ("Use the same buffer #%u (0x%lu): " + DBUG_PRINT("info", ("Use the same buffer #%u (%p): " "Buffer Size: %lu (%lu)", (uint) buffer->buffer_no, - (ulong) buffer, + buffer, (ulong) cursor->buffer->size, (ulong) (cursor->ptr - cursor->buffer->buffer))); translog_finish_page(horizon, cursor); @@ -4374,9 +4374,9 @@ static my_bool translog_write_data_on_page(TRANSLOG_ADDRESS *horizon, cursor->current_page_fill+= length; if (!cursor->chaser) cursor->buffer->size+= length; - DBUG_PRINT("info", ("Write data buffer #%u: 0x%lx " + DBUG_PRINT("info", ("Write data buffer #%u: %p " "chaser: %d Size: %lu (%lu)", - (uint) cursor->buffer->buffer_no, (ulong) cursor->buffer, + (uint) cursor->buffer->buffer_no, cursor->buffer, cursor->chaser, (ulong) cursor->buffer->size, (ulong) (cursor->ptr - cursor->buffer->buffer))); translog_check_cursor(cursor); @@ -4429,9 +4429,9 @@ static my_bool translog_write_parts_on_page(TRANSLOG_ADDRESS *horizon, DBUG_ASSERT(cur < parts->elements); part= parts->parts + cur; buff= part->str; - DBUG_PRINT("info", ("Part: %u Length: %lu left: %lu buff: 0x%lx", + DBUG_PRINT("info", ("Part: %u Length: %lu left: %lu buff: %p", (uint) (cur + 1), (ulong) part->length, (ulong) left, - (ulong) buff)); + buff)); if (part->length > left) { @@ -4448,8 +4448,8 @@ static my_bool translog_write_parts_on_page(TRANSLOG_ADDRESS *horizon, cur++; DBUG_PRINT("info", ("moved to next part (len: %lu)", (ulong) len)); } - DBUG_PRINT("info", ("copy: 0x%lx <- 0x%lx %u", - (ulong) cursor->ptr, (ulong)buff, (uint)len)); + DBUG_PRINT("info", ("copy: %p <- %p %u", + cursor->ptr, buff, len)); if (likely(len)) { memcpy(cursor->ptr, buff, len); @@ -4458,9 +4458,9 @@ static my_bool translog_write_parts_on_page(TRANSLOG_ADDRESS *horizon, } } while (left); - DBUG_PRINT("info", ("Horizon: (%lu,0x%lx) Length %lu(0x%lx)", + DBUG_PRINT("info", ("Horizon: " LSN_FMT " Length %u(0x%x)", LSN_IN_PARTS(*horizon), - (ulong) length, (ulong) length)); + length, length)); parts->current= cur; (*horizon)+= length; /* offset increasing */ cursor->current_page_fill+= length; @@ -4470,14 +4470,14 @@ static my_bool translog_write_parts_on_page(TRANSLOG_ADDRESS *horizon, We do not not updating parts->total_record_length here because it is need only before writing record to have total length */ - DBUG_PRINT("info", ("Write parts buffer #%u: 0x%lx " + DBUG_PRINT("info", ("Write parts buffer #%u: %p " "chaser: %d Size: %lu (%lu) " - "Horizon: (%lu,0x%lx) buff offset: 0x%lx", - (uint) cursor->buffer->buffer_no, (ulong) cursor->buffer, + "Horizon: " LSN_FMT " buff offset: 0x%x", + (uint) cursor->buffer->buffer_no, cursor->buffer, cursor->chaser, (ulong) cursor->buffer->size, (ulong) (cursor->ptr - cursor->buffer->buffer), LSN_IN_PARTS(*horizon), - (ulong) (LSN_OFFSET(cursor->buffer->offset) + + (uint) (LSN_OFFSET(cursor->buffer->offset) + cursor->buffer->size))); translog_check_cursor(cursor); @@ -4535,8 +4535,8 @@ translog_buffer_increase_writers(struct st_translog_buffer *buffer) DBUG_ENTER("translog_buffer_increase_writers"); translog_buffer_lock_assert_owner(buffer); buffer->copy_to_buffer_in_progress++; - DBUG_PRINT("info", ("copy_to_buffer_in_progress. Buffer #%u 0x%lx progress: %d", - (uint) buffer->buffer_no, (ulong) buffer, + DBUG_PRINT("info", ("copy_to_buffer_in_progress. Buffer #%u %p progress: %d", + (uint) buffer->buffer_no, buffer, buffer->copy_to_buffer_in_progress)); DBUG_VOID_RETURN; } @@ -4556,8 +4556,8 @@ static void translog_buffer_decrease_writers(struct st_translog_buffer *buffer) translog_buffer_lock_assert_owner(buffer); buffer->copy_to_buffer_in_progress--; DBUG_PRINT("info", - ("copy_to_buffer_in_progress. Buffer #%u 0x%lx progress: %d", - (uint) buffer->buffer_no, (ulong) buffer, + ("copy_to_buffer_in_progress. Buffer #%u %p progress: %d", + (uint) buffer->buffer_no, buffer, buffer->copy_to_buffer_in_progress)); if (buffer->copy_to_buffer_in_progress == 0) mysql_cond_broadcast(&buffer->waiting_filling_buffer); @@ -4701,7 +4701,7 @@ static my_bool translog_advance_pointer(int pages, uint16 last_page_data) pages * TRANSLOG_PAGE_SIZE + last_page_offset); translog_size_t buffer_end_offset, file_end_offset, min_offset; DBUG_ENTER("translog_advance_pointer"); - DBUG_PRINT("enter", ("Pointer: (%lu, 0x%lx) + %u + %u pages + %u + %u", + DBUG_PRINT("enter", ("Pointer: " LSN_FMT " + %u + %u pages + %u + %u", LSN_IN_PARTS(log_descriptor.horizon), (uint) (TRANSLOG_PAGE_SIZE - log_descriptor.bc.current_page_fill), @@ -4751,20 +4751,20 @@ static my_bool translog_advance_pointer(int pages, uint16 last_page_data) file_end_offset= (TRANSLOG_PAGE_SIZE - log_descriptor.bc.current_page_fill); } - DBUG_PRINT("info", ("offset: %lu buffer_end_offs: %lu, " - "file_end_offs: %lu", - (ulong) offset, (ulong) buffer_end_offset, - (ulong) file_end_offset)); - DBUG_PRINT("info", ("Buff #%u %u (0x%lx) offset 0x%lx + size 0x%lx = " - "0x%lx (0x%lx)", - (uint) log_descriptor.bc.buffer->buffer_no, - (uint) log_descriptor.bc.buffer_no, - (ulong) log_descriptor.bc.buffer, - (ulong) LSN_OFFSET(log_descriptor.bc.buffer->offset), - (ulong) log_descriptor.bc.buffer->size, - (ulong) (LSN_OFFSET(log_descriptor.bc.buffer->offset) + + DBUG_PRINT("info", ("offset: %u buffer_end_offs: %u, " + "file_end_offs: %u", + offset, buffer_end_offset, + file_end_offset)); + DBUG_PRINT("info", ("Buff #%u %u (%p) offset 0x%x + size 0x%x = " + "0x%x (0x%x)", + log_descriptor.bc.buffer->buffer_no, + log_descriptor.bc.buffer_no, + log_descriptor.bc.buffer, + (uint) LSN_OFFSET(log_descriptor.bc.buffer->offset), + log_descriptor.bc.buffer->size, + (uint) (LSN_OFFSET(log_descriptor.bc.buffer->offset) + log_descriptor.bc.buffer->size), - (ulong) LSN_OFFSET(log_descriptor.horizon))); + (uint) LSN_OFFSET(log_descriptor.horizon))); DBUG_ASSERT(LSN_OFFSET(log_descriptor.bc.buffer->offset) + log_descriptor.bc.buffer->size == LSN_OFFSET(log_descriptor.horizon)); @@ -4795,9 +4795,9 @@ static my_bool translog_advance_pointer(int pages, uint16 last_page_data) /* TODO: check is it ptr or size enough */ log_descriptor.bc.buffer->size+= min_offset; log_descriptor.bc.ptr+= min_offset; - DBUG_PRINT("info", ("NewP buffer #%u: 0x%lx chaser: %d Size: %lu (%lu)", + DBUG_PRINT("info", ("NewP buffer #%u: %p chaser: %d Size: %lu (%lu)", (uint) log_descriptor.bc.buffer->buffer_no, - (ulong) log_descriptor.bc.buffer, + log_descriptor.bc.buffer, log_descriptor.bc.chaser, (ulong) log_descriptor.bc.buffer->size, (ulong) (log_descriptor.bc.ptr -log_descriptor.bc. @@ -4841,10 +4841,10 @@ end: translog_buffer_increase_writers(log_descriptor.bc.buffer); log_descriptor.horizon+= offset; /* offset increasing */ log_descriptor.bc.current_page_fill= last_page_offset; - DBUG_PRINT("info", ("NewP buffer #%u: 0x%lx chaser: %d Size: %lu (%lu) " + DBUG_PRINT("info", ("NewP buffer #%u: %p chaser: %d Size: %lu (%lu) " "offset: %u last page: %u", (uint) log_descriptor.bc.buffer->buffer_no, - (ulong) log_descriptor.bc.buffer, + log_descriptor.bc.buffer, log_descriptor.bc.chaser, (ulong) log_descriptor.bc.buffer->size, (ulong) (log_descriptor.bc.ptr - @@ -4852,7 +4852,7 @@ end: buffer), (uint) offset, (uint) last_page_offset)); DBUG_PRINT("info", - ("pointer moved to: (%lu, 0x%lx)", + ("pointer moved to: " LSN_FMT, LSN_IN_PARTS(log_descriptor.horizon))); translog_check_cursor(&log_descriptor.bc); log_descriptor.bc.protected= 0; @@ -4892,7 +4892,7 @@ static uint translog_get_current_page_rest() static uint translog_get_current_buffer_rest() { - return ((log_descriptor.bc.buffer->buffer + TRANSLOG_WRITE_BUFFER - + return (uint)((log_descriptor.bc.buffer->buffer + TRANSLOG_WRITE_BUFFER - log_descriptor.bc.ptr) / TRANSLOG_PAGE_SIZE); } @@ -4939,7 +4939,7 @@ static inline void set_lsn(LSN *lsn, LSN value) *lsn= value; /* we generate LSN so something is not flushed in log */ log_descriptor.is_everything_flushed= 0; - DBUG_PRINT("info", ("new LSN appeared: (%lu,0x%lx)", LSN_IN_PARTS(value))); + DBUG_PRINT("info", ("new LSN appeared: " LSN_FMT, LSN_IN_PARTS(value))); DBUG_VOID_RETURN; } @@ -5030,9 +5030,9 @@ translog_write_variable_record_1group(LSN *lsn, rc|= translog_advance_pointer((int)(full_pages + additional_chunk3_page), (record_rest ? record_rest + 3 : 0)); log_descriptor.bc.buffer->last_lsn= *lsn; - DBUG_PRINT("info", ("last_lsn set to (%lu,0x%lx) buffer: 0x%lx", + DBUG_PRINT("info", ("last_lsn set to " LSN_FMT " buffer: %p", LSN_IN_PARTS(log_descriptor.bc.buffer->last_lsn), - (ulong) log_descriptor.bc.buffer)); + log_descriptor.bc.buffer)); translog_unlock(); @@ -5055,7 +5055,7 @@ translog_write_variable_record_1group(LSN *lsn, /* fill the pages */ translog_write_parts_on_page(&horizon, &cursor, first_page, parts); - DBUG_PRINT("info", ("absolute horizon: (%lu,0x%lx) local: (%lu,0x%lx)", + DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT, LSN_IN_PARTS(log_descriptor.horizon), LSN_IN_PARTS(horizon))); @@ -5064,7 +5064,7 @@ translog_write_variable_record_1group(LSN *lsn, if (translog_write_variable_record_chunk2_page(parts, &horizon, &cursor)) DBUG_RETURN(1); - DBUG_PRINT("info", ("absolute horizon: (%lu,0x%lx) local: (%lu,0x%lx)", + DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT, LSN_IN_PARTS(log_descriptor.horizon), LSN_IN_PARTS(horizon))); } @@ -5076,7 +5076,7 @@ translog_write_variable_record_1group(LSN *lsn, page_capacity_chunk_2 - 2, &horizon, &cursor)) DBUG_RETURN(1); - DBUG_PRINT("info", ("absolute horizon: (%lu,0x%lx) local: (%lu,0x%lx)", + DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT, LSN_IN_PARTS(log_descriptor.horizon), LSN_IN_PARTS(horizon))); DBUG_ASSERT(cursor.current_page_fill == TRANSLOG_PAGE_SIZE); @@ -5086,11 +5086,11 @@ translog_write_variable_record_1group(LSN *lsn, record_rest, &horizon, &cursor)) DBUG_RETURN(1); - DBUG_PRINT("info", ("absolute horizon: (%lu,0x%lx) local: (%lu,0x%lx)", - (ulong) LSN_FILE_NO(log_descriptor.horizon), - (ulong) LSN_OFFSET(log_descriptor.horizon), - (ulong) LSN_FILE_NO(horizon), - (ulong) LSN_OFFSET(horizon))); + DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT, + (uint) LSN_FILE_NO(log_descriptor.horizon), + (uint) LSN_OFFSET(log_descriptor.horizon), + (uint) LSN_FILE_NO(horizon), + (uint) LSN_OFFSET(horizon))); translog_buffer_lock(cursor.buffer); translog_buffer_decrease_writers(cursor.buffer); @@ -5156,9 +5156,9 @@ translog_write_variable_record_1chunk(LSN *lsn, &log_descriptor.bc, parts->total_record_length, parts); log_descriptor.bc.buffer->last_lsn= *lsn; - DBUG_PRINT("info", ("last_lsn set to (%lu,0x%lx) buffer: 0x%lx", + DBUG_PRINT("info", ("last_lsn set to " LSN_FMT " buffer: %p", LSN_IN_PARTS(log_descriptor.bc.buffer->last_lsn), - (ulong) log_descriptor.bc.buffer)); + log_descriptor.bc.buffer)); translog_unlock(); /* @@ -5202,9 +5202,9 @@ static uchar *translog_put_LSN_diff(LSN base_lsn, LSN lsn, uchar *dst) { uint64 diff; DBUG_ENTER("translog_put_LSN_diff"); - DBUG_PRINT("enter", ("Base: (%lu,0x%lx) val: (%lu,0x%lx) dst: 0x%lx", + DBUG_PRINT("enter", ("Base: " LSN_FMT " val: " LSN_FMT " dst:%p", LSN_IN_PARTS(base_lsn), LSN_IN_PARTS(lsn), - (ulong) dst)); + dst)); DBUG_ASSERT(base_lsn > lsn); diff= base_lsn - lsn; DBUG_PRINT("info", ("Diff: 0x%llx", (ulonglong) diff)); @@ -5248,7 +5248,7 @@ static uchar *translog_put_LSN_diff(LSN base_lsn, LSN lsn, uchar *dst) dst[1]= 1; lsn_store(dst + 2, lsn); } - DBUG_PRINT("info", ("new dst: 0x%lx", (ulong) dst)); + DBUG_PRINT("info", ("new dst:%p", dst)); DBUG_RETURN(dst); } @@ -5287,8 +5287,8 @@ static uchar *translog_get_LSN_from_diff(LSN base_lsn, uchar *src, uchar *dst) uint32 file_no, rec_offset; uint8 code; DBUG_ENTER("translog_get_LSN_from_diff"); - DBUG_PRINT("enter", ("Base: (%lu,0x%lx) src: 0x%lx dst 0x%lx", - LSN_IN_PARTS(base_lsn), (ulong) src, (ulong) dst)); + DBUG_PRINT("enter", ("Base: " LSN_FMT " src:%p dst %p", + LSN_IN_PARTS(base_lsn), src, dst)); first_byte= *((uint8*) src); code= first_byte >> 6; /* Length is in 2 most significant bits */ first_byte&= 0x3F; @@ -5305,8 +5305,8 @@ static uchar *translog_get_LSN_from_diff(LSN base_lsn, uchar *src, uchar *dst) in real life) */ memcpy(dst, src + 1, LSN_STORE_SIZE); - DBUG_PRINT("info", ("Special case of full LSN, new src: 0x%lx", - (ulong) (src + 1 + LSN_STORE_SIZE))); + DBUG_PRINT("info", ("Special case of full LSN, new src:%p", + src + 1 + LSN_STORE_SIZE)); DBUG_RETURN(src + 1 + LSN_STORE_SIZE); } rec_offset= LSN_OFFSET(base_lsn) - ((first_byte << 8) + *((uint8*)src)); @@ -5341,7 +5341,7 @@ static uchar *translog_get_LSN_from_diff(LSN base_lsn, uchar *src, uchar *dst) lsn= MAKE_LSN(file_no, rec_offset); src+= code + 1; lsn_store(dst, lsn); - DBUG_PRINT("info", ("new src: 0x%lx", (ulong) src)); + DBUG_PRINT("info", ("new src:%p", src)); DBUG_RETURN(src); } @@ -5375,7 +5375,7 @@ static void translog_relative_LSN_encode(struct st_translog_parts *parts, { uint copied= part->length; LEX_CUSTRING *next_part; - DBUG_PRINT("info", ("Using buffer: 0x%lx", (ulong) compressed_LSNs)); + DBUG_PRINT("info", ("Using buffer:%p", compressed_LSNs)); memcpy(buffer, part->str, part->length); next_part= parts->parts + parts->current + 1; do @@ -5592,7 +5592,7 @@ translog_write_variable_record_mgroup(LSN *lsn, translog_write_data_on_page(&horizon, &cursor, 1, chunk2_header); translog_write_parts_on_page(&horizon, &cursor, first_page - 1, parts); - DBUG_PRINT("info", ("absolute horizon: (%lu,0x%lx) local: (%lu,0x%lx) " + DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT " " "Left %lu", LSN_IN_PARTS(log_descriptor.horizon), LSN_IN_PARTS(horizon), @@ -5604,8 +5604,8 @@ translog_write_variable_record_mgroup(LSN *lsn, if (translog_write_variable_record_chunk2_page(parts, &horizon, &cursor)) goto err; - DBUG_PRINT("info", ("absolute horizon: (%lu,0x%lx) " - "local: (%lu,0x%lx) " + DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " " + "local: " LSN_FMT " " "Left: %lu", LSN_IN_PARTS(log_descriptor.horizon), LSN_IN_PARTS(horizon), @@ -5763,7 +5763,7 @@ translog_write_variable_record_mgroup(LSN *lsn, DBUG_PRINT("info", ("chunk 2 to finish first page")); translog_write_data_on_page(&horizon, &cursor, 1, chunk2_header); translog_write_parts_on_page(&horizon, &cursor, first_page - 1, parts); - DBUG_PRINT("info", ("absolute horizon: (%lu,0x%lx) local: (%lu,0x%lx) " + DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT " " "Left: %lu", LSN_IN_PARTS(log_descriptor.horizon), LSN_IN_PARTS(horizon), @@ -5780,7 +5780,7 @@ translog_write_variable_record_mgroup(LSN *lsn, int2store(chunk3_header + 1, chunk3_size); translog_write_data_on_page(&horizon, &cursor, 3, chunk3_header); translog_write_parts_on_page(&horizon, &cursor, chunk3_size, parts); - DBUG_PRINT("info", ("absolute horizon: (%lu,0x%lx) local: (%lu,0x%lx) " + DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT " " "Left: %lu", LSN_IN_PARTS(log_descriptor.horizon), LSN_IN_PARTS(horizon), @@ -5798,7 +5798,7 @@ translog_write_variable_record_mgroup(LSN *lsn, if (translog_write_variable_record_chunk2_page(parts, &horizon, &cursor)) goto err; - DBUG_PRINT("info", ("absolute horizon: (%lu,0x%lx) local: (%lu,0x%lx) " + DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT " " "Left: %lu", LSN_IN_PARTS(log_descriptor.horizon), LSN_IN_PARTS(horizon), @@ -5812,7 +5812,7 @@ translog_write_variable_record_mgroup(LSN *lsn, chunk3_size, &horizon, &cursor)) goto err; - DBUG_PRINT("info", ("absolute horizon: (%lu,0x%lx) local: (%lu,0x%lx)", + DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT, LSN_IN_PARTS(log_descriptor.horizon), LSN_IN_PARTS(horizon))); @@ -5847,9 +5847,9 @@ translog_write_variable_record_mgroup(LSN *lsn, translog_lock(); set_lsn(lsn, horizon); buffer_of_last_lsn->last_lsn= *lsn; - DBUG_PRINT("info", ("last_lsn set to (%lu,0x%lx) buffer: 0x%lx", + DBUG_PRINT("info", ("last_lsn set to " LSN_FMT " buffer: %p", LSN_IN_PARTS(buffer_of_last_lsn->last_lsn), - (ulong) buffer_of_last_lsn)); + buffer_of_last_lsn)); if (log_record_type_descriptor[type].inwrite_hook && (*log_record_type_descriptor[type].inwrite_hook) (type, trn, tbl_info, @@ -5977,7 +5977,7 @@ static my_bool translog_write_variable_record(LSN *lsn, DBUG_ENTER("translog_write_variable_record"); translog_lock(); - DBUG_PRINT("info", ("horizon: (%lu,0x%lx)", + DBUG_PRINT("info", ("horizon: " LSN_FMT, LSN_IN_PARTS(log_descriptor.horizon))); page_rest= TRANSLOG_PAGE_SIZE - log_descriptor.bc.current_page_fill; DBUG_PRINT("info", ("header length: %u page_rest: %u", @@ -6093,7 +6093,7 @@ static my_bool translog_write_fixed_record(LSN *lsn, log_record_type_descriptor[type].fixed_length)); translog_lock(); - DBUG_PRINT("info", ("horizon: (%lu,0x%lx)", + DBUG_PRINT("info", ("horizon: " LSN_FMT, LSN_IN_PARTS(log_descriptor.horizon))); DBUG_ASSERT(log_descriptor.bc.current_page_fill <= TRANSLOG_PAGE_SIZE); @@ -6157,9 +6157,9 @@ static my_bool translog_write_fixed_record(LSN *lsn, parts->total_record_length, parts); log_descriptor.bc.buffer->last_lsn= *lsn; - DBUG_PRINT("info", ("last_lsn set to (%lu,0x%lx) buffer: 0x%lx", + DBUG_PRINT("info", ("last_lsn set to " LSN_FMT " buffer: %p", LSN_IN_PARTS(log_descriptor.bc.buffer->last_lsn), - (ulong) log_descriptor.bc.buffer)); + log_descriptor.bc.buffer)); err: translog_unlock(); @@ -6349,7 +6349,7 @@ my_bool translog_write_record(LSN *lsn, } } - DBUG_PRINT("info", ("LSN: (%lu,0x%lx)", LSN_IN_PARTS(*lsn))); + DBUG_PRINT("info", ("LSN: " LSN_FMT, LSN_IN_PARTS(*lsn))); DBUG_RETURN(rc); } @@ -6564,8 +6564,8 @@ my_bool translog_scanner_init(LSN lsn, my_bool use_direct) { DBUG_ENTER("translog_scanner_init"); - DBUG_PRINT("enter", ("Scanner: 0x%lx LSN: (%lu,0x%lx)", - (ulong) scanner, LSN_IN_PARTS(lsn))); + DBUG_PRINT("enter", ("Scanner: %p LSN: " LSN_FMT, + scanner, LSN_IN_PARTS(lsn))); DBUG_ASSERT(translog_status == TRANSLOG_OK || translog_status == TRANSLOG_READONLY); @@ -6576,7 +6576,7 @@ my_bool translog_scanner_init(LSN lsn, scanner->direct_link= NULL; scanner->horizon= translog_get_horizon(); - DBUG_PRINT("info", ("horizon: (%lu,0x%lx)", LSN_IN_PARTS(scanner->horizon))); + DBUG_PRINT("info", ("horizon: " LSN_FMT, LSN_IN_PARTS(scanner->horizon))); /* lsn < horizon */ DBUG_ASSERT(lsn <= scanner->horizon); @@ -6602,7 +6602,7 @@ my_bool translog_scanner_init(LSN lsn, void translog_destroy_scanner(TRANSLOG_SCANNER_DATA *scanner) { DBUG_ENTER("translog_destroy_scanner"); - DBUG_PRINT("enter", ("Scanner: 0x%lx", (ulong)scanner)); + DBUG_PRINT("enter", ("Scanner: %p", scanner)); translog_free_link(scanner->direct_link); DBUG_VOID_RETURN; } @@ -6624,11 +6624,11 @@ static my_bool translog_scanner_eol(TRANSLOG_SCANNER_DATA *scanner) { DBUG_ENTER("translog_scanner_eol"); DBUG_PRINT("enter", - ("Horizon: (%lu, 0x%lx) Current: (%lu, 0x%lx+0x%x=0x%lx)", + ("Horizon: " LSN_FMT " Current: (%u, 0x%x+0x%x=0x%x)", LSN_IN_PARTS(scanner->horizon), LSN_IN_PARTS(scanner->page_addr), (uint) scanner->page_offset, - (ulong) (LSN_OFFSET(scanner->page_addr) + scanner->page_offset))); + (uint) (LSN_OFFSET(scanner->page_addr) + scanner->page_offset))); if (scanner->horizon > (scanner->page_addr + scanner->page_offset)) { @@ -6733,7 +6733,7 @@ translog_get_next_chunk(TRANSLOG_SCANNER_DATA *scanner) translog_free_link(scanner->direct_link); if (translog_scanner_eof(scanner)) { - DBUG_PRINT("info", ("horizon: (%lu,0x%lx) pageaddr: (%lu,0x%lx)", + DBUG_PRINT("info", ("horizon: " LSN_FMT " pageaddr: " LSN_FMT, LSN_IN_PARTS(scanner->horizon), LSN_IN_PARTS(scanner->page_addr))); /* if it is log end it have to be caught before */ @@ -6834,7 +6834,7 @@ translog_variable_length_header(uchar *page, translog_size_t page_offset, src+= (2 + 2); page_rest= (uint16) (TRANSLOG_PAGE_SIZE - (src - page)); curr= 0; - header_to_skip= src - (page + page_offset); + header_to_skip= (uint) (src - (page + page_offset)); buff->chunk0_pages= 0; for (;;) @@ -6852,7 +6852,7 @@ translog_variable_length_header(uchar *page, translog_size_t page_offset, DBUG_ASSERT(curr < buff->groups_no); buff->groups[curr].addr= lsn_korr(src + i * (7 + 1)); buff->groups[curr].num= src[i * (7 + 1) + 7]; - DBUG_PRINT("info", ("group #%u (%lu,0x%lx) chunks: %u", + DBUG_PRINT("info", ("group #%u " LSN_FMT " chunks: %u", curr, LSN_IN_PARTS(buff->groups[curr].addr), (uint) buff->groups[curr].num)); @@ -6874,7 +6874,7 @@ translog_variable_length_header(uchar *page, translog_size_t page_offset, buff->chunk0_data_addr+= (header_to_skip + read_length * (7 + 1)); } buff->chunk0_data_len= chunk_len - 2 - read_length * (7 + 1); - DBUG_PRINT("info", ("Data address: (%lu,0x%lx) len: %u", + DBUG_PRINT("info", ("Data address: " LSN_FMT " len: %u", LSN_IN_PARTS(buff->chunk0_data_addr), buff->chunk0_data_len)); break; @@ -6997,7 +6997,7 @@ int translog_read_record_header_from_buffer(uchar *page, translog_status == TRANSLOG_READONLY); buff->type= (page[page_offset] & TRANSLOG_REC_TYPE); buff->short_trid= uint2korr(page + page_offset + 1); - DBUG_PRINT("info", ("Type %u, Short TrID %u, LSN (%lu,0x%lx)", + DBUG_PRINT("info", ("Type %u, Short TrID %u, LSN " LSN_FMT, (uint) buff->type, (uint)buff->short_trid, LSN_IN_PARTS(buff->lsn))); /* Read required bytes from the header and call hook */ @@ -7046,7 +7046,7 @@ int translog_read_record_header(LSN lsn, TRANSLOG_HEADER_BUFFER *buff) TRANSLOG_ADDRESS addr; TRANSLOG_VALIDATOR_DATA data; DBUG_ENTER("translog_read_record_header"); - DBUG_PRINT("enter", ("LSN: (%lu,0x%lx)", LSN_IN_PARTS(lsn))); + DBUG_PRINT("enter", ("LSN: " LSN_FMT, LSN_IN_PARTS(lsn))); DBUG_ASSERT(LSN_OFFSET(lsn) % TRANSLOG_PAGE_SIZE != 0); DBUG_ASSERT(translog_status == TRANSLOG_OK || translog_status == TRANSLOG_READONLY); @@ -7091,8 +7091,8 @@ int translog_read_record_header_scan(TRANSLOG_SCANNER_DATA *scanner, { translog_size_t res; DBUG_ENTER("translog_read_record_header_scan"); - DBUG_PRINT("enter", ("Scanner: Cur: (%lu,0x%lx) Hrz: (%lu,0x%lx) " - "Lst: (%lu,0x%lx) Offset: %u(%x) fixed %d", + DBUG_PRINT("enter", ("Scanner: Cur: " LSN_FMT " Hrz: " LSN_FMT " " + "Lst: " LSN_FMT " Offset: %u(%x) fixed %d", LSN_IN_PARTS(scanner->page_addr), LSN_IN_PARTS(scanner->horizon), LSN_IN_PARTS(scanner->last_file_page), @@ -7137,9 +7137,9 @@ int translog_read_next_record_header(TRANSLOG_SCANNER_DATA *scanner, DBUG_ENTER("translog_read_next_record_header"); buff->groups_no= 0; /* to be sure that we will free it right */ - DBUG_PRINT("enter", ("scanner: 0x%lx", (ulong) scanner)); - DBUG_PRINT("info", ("Scanner: Cur: (%lu,0x%lx) Hrz: (%lu,0x%lx) " - "Lst: (%lu,0x%lx) Offset: %u(%x) fixed: %d", + DBUG_PRINT("enter", ("scanner: %p", scanner)); + DBUG_PRINT("info", ("Scanner: Cur: " LSN_FMT " Hrz: " LSN_FMT " " + "Lst: " LSN_FMT " Offset: %u(%x) fixed: %d", LSN_IN_PARTS(scanner->page_addr), LSN_IN_PARTS(scanner->horizon), LSN_IN_PARTS(scanner->last_file_page), @@ -7159,7 +7159,7 @@ int translog_read_next_record_header(TRANSLOG_SCANNER_DATA *scanner, buff->lsn= LSN_IMPOSSIBLE; DBUG_RETURN(RECHEADER_READ_EOF); } - DBUG_PRINT("info", ("Page: (%lu,0x%lx) offset: %lu byte: %x", + DBUG_PRINT("info", ("Page: " LSN_FMT " offset: %lu byte: %x", LSN_IN_PARTS(scanner->page_addr), (ulong) scanner->page_offset, (uint) scanner->page[scanner->page_offset])); @@ -7368,8 +7368,8 @@ translog_size_t translog_read_record(LSN lsn, DBUG_RETURN(0); } DBUG_PRINT("info", ("Offset: %lu length: %lu " - "Scanner: Cur: (%lu,0x%lx) Hrz: (%lu,0x%lx) " - "Lst: (%lu,0x%lx) Offset: %u(%x) fixed: %d", + "Scanner: Cur: " LSN_FMT " Hrz: " LSN_FMT " " + "Lst: " LSN_FMT " Offset: %u(%x) fixed: %d", (ulong) offset, (ulong) length, LSN_IN_PARTS(data->scanner.page_addr), LSN_IN_PARTS(data->scanner.horizon), @@ -7458,15 +7458,15 @@ static void translog_force_current_buffer_to_finish() uint16 UNINIT_VAR(current_page_fill), write_counter, previous_offset; DBUG_ENTER("translog_force_current_buffer_to_finish"); - DBUG_PRINT("enter", ("Buffer #%u 0x%lx " - "Buffer addr: (%lu,0x%lx) " - "Page addr: (%lu,0x%lx) " + DBUG_PRINT("enter", ("Buffer #%u %p " + "Buffer addr: " LSN_FMT " " + "Page addr: " LSN_FMT " " "size: %lu (%lu) Pg: %u left: %u in progress %u", (uint) old_buffer_no, - (ulong) old_buffer, + old_buffer, LSN_IN_PARTS(old_buffer->offset), - (ulong) LSN_FILE_NO(log_descriptor.horizon), - (ulong) (LSN_OFFSET(log_descriptor.horizon) - + LSN_FILE_NO(log_descriptor.horizon), + (uint)(LSN_OFFSET(log_descriptor.horizon) - log_descriptor.bc.current_page_fill), (ulong) old_buffer->size, (ulong) (log_descriptor.bc.ptr -log_descriptor.bc. @@ -7500,10 +7500,10 @@ static void translog_force_current_buffer_to_finish() memset(log_descriptor.bc.ptr, TRANSLOG_FILLER, left); old_buffer->size+= left; - DBUG_PRINT("info", ("Finish Page buffer #%u: 0x%lx " + DBUG_PRINT("info", ("Finish Page buffer #%u: %p " "Size: %lu", (uint) old_buffer->buffer_no, - (ulong) old_buffer, + old_buffer, (ulong) old_buffer->size)); DBUG_ASSERT(old_buffer->buffer_no == log_descriptor.bc.buffer_no); @@ -7537,9 +7537,9 @@ static void translog_force_current_buffer_to_finish() log_descriptor.bc.write_counter= write_counter; log_descriptor.bc.previous_offset= previous_offset; new_buffer->prev_last_lsn= BUFFER_MAX_LSN(old_buffer); - DBUG_PRINT("info", ("prev_last_lsn set to (%lu,0x%lx) buffer: 0x%lx", + DBUG_PRINT("info", ("prev_last_lsn set to " LSN_FMT " buffer: %p", LSN_IN_PARTS(new_buffer->prev_last_lsn), - (ulong) new_buffer)); + new_buffer)); /* Advances this log pointer, increases writers and let other threads to @@ -7575,8 +7575,8 @@ static void translog_force_current_buffer_to_finish() */ DBUG_ASSERT(!old_buffer->is_closing_buffer); old_buffer->is_closing_buffer= 1; /* Other flushes will wait */ - DBUG_PRINT("enter", ("Buffer #%u 0x%lx is_closing_buffer set", - (uint) old_buffer->buffer_no, (ulong) old_buffer)); + DBUG_PRINT("enter", ("Buffer #%u %p is_closing_buffer set", + (uint) old_buffer->buffer_no, old_buffer)); translog_wait_for_writers(old_buffer); #ifndef DBUG_OFF /* We blocked flushing this buffer so the buffer should not changed */ @@ -7606,12 +7606,12 @@ static void translog_force_current_buffer_to_finish() uint32 crc= translog_crc(data + log_descriptor.page_overhead, TRANSLOG_PAGE_SIZE - log_descriptor.page_overhead); - DBUG_PRINT("info", ("CRC: 0x%lx", (ulong) crc)); + DBUG_PRINT("info", ("CRC: 0x%x", crc)); int4store(data + 3 + 3 + 1, crc); } old_buffer->is_closing_buffer= 0; - DBUG_PRINT("enter", ("Buffer #%u 0x%lx is_closing_buffer cleared", - (uint) old_buffer->buffer_no, (ulong) old_buffer)); + DBUG_PRINT("enter", ("Buffer #%u %p is_closing_buffer cleared", + (uint) old_buffer->buffer_no, old_buffer)); mysql_cond_broadcast(&old_buffer->waiting_filling_buffer); if (left) @@ -7650,7 +7650,7 @@ static void translog_force_current_buffer_to_finish() void translog_flush_wait_for_end(LSN lsn) { DBUG_ENTER("translog_flush_wait_for_end"); - DBUG_PRINT("enter", ("LSN: (%lu,0x%lx)", LSN_IN_PARTS(lsn))); + DBUG_PRINT("enter", ("LSN: " LSN_FMT, LSN_IN_PARTS(lsn))); mysql_mutex_assert_owner(&log_descriptor.log_flush_lock); while (cmp_translog_addr(log_descriptor.flushed, lsn) < 0) mysql_cond_wait(&log_descriptor.log_flush_cond, @@ -7670,7 +7670,7 @@ void translog_flush_set_new_goal_and_wait(TRANSLOG_ADDRESS lsn) { int flush_no= log_descriptor.flush_no; DBUG_ENTER("translog_flush_set_new_goal_and_wait"); - DBUG_PRINT("enter", ("LSN: (%lu,0x%lx)", LSN_IN_PARTS(lsn))); + DBUG_PRINT("enter", ("LSN: " LSN_FMT, LSN_IN_PARTS(lsn))); mysql_mutex_assert_owner(&log_descriptor.log_flush_lock); if (cmp_translog_addr(lsn, log_descriptor.next_pass_max_lsn) > 0) { @@ -7776,7 +7776,7 @@ void translog_flush_buffers(TRANSLOG_ADDRESS *lsn, start_buffer_no= i; DBUG_PRINT("info", - ("start from: %u current: %u prev last lsn: (%lu,0x%lx)", + ("start from: %u current: %u prev last lsn: " LSN_FMT, (uint) start_buffer_no, (uint) log_descriptor.bc.buffer_no, LSN_IN_PARTS(log_descriptor.bc.buffer->prev_last_lsn))); @@ -7790,7 +7790,7 @@ void translog_flush_buffers(TRANSLOG_ADDRESS *lsn, { struct st_translog_buffer *buffer= log_descriptor.bc.buffer; *lsn= log_descriptor.bc.buffer->last_lsn; /* fix lsn if it was horizon */ - DBUG_PRINT("info", ("LSN to flush fixed to last lsn: (%lu,0x%lx)", + DBUG_PRINT("info", ("LSN to flush fixed to last lsn: " LSN_FMT, LSN_IN_PARTS(*lsn))); last_buffer_no= log_descriptor.bc.buffer_no; log_descriptor.is_everything_flushed= 1; @@ -7817,7 +7817,7 @@ void translog_flush_buffers(TRANSLOG_ADDRESS *lsn, DBUG_ASSERT(log_descriptor.bc.buffer->prev_last_lsn != LSN_IMPOSSIBLE); /* fix lsn if it was horizon */ *lsn= log_descriptor.bc.buffer->prev_last_lsn; - DBUG_PRINT("info", ("LSN to flush fixed to prev last lsn: (%lu,0x%lx)", + DBUG_PRINT("info", ("LSN to flush fixed to prev last lsn: " LSN_FMT, LSN_IN_PARTS(*lsn))); last_buffer_no= ((log_descriptor.bc.buffer_no + TRANSLOG_BUFFERS_NO -1) % TRANSLOG_BUFFERS_NO); @@ -7836,10 +7836,10 @@ void translog_flush_buffers(TRANSLOG_ADDRESS *lsn, { struct st_translog_buffer *buffer= log_descriptor.buffers + i; translog_buffer_lock(buffer); - DBUG_PRINT("info", ("Check buffer: 0x%lx #: %u " - "prev last LSN: (%lu,0x%lx) " - "last LSN: (%lu,0x%lx) status: %s", - (ulong)(buffer), + DBUG_PRINT("info", ("Check buffer:%p #: %u " + "prev last LSN: " LSN_FMT " " + "last LSN: " LSN_FMT " status: %s", + buffer, (uint) i, LSN_IN_PARTS(buffer->prev_last_lsn), LSN_IN_PARTS(buffer->last_lsn), @@ -7853,7 +7853,7 @@ void translog_flush_buffers(TRANSLOG_ADDRESS *lsn, buffer->pre_force_close_horizon : buffer->offset + buffer->size); /* pre_force_close_horizon is reset during new buffer start */ - DBUG_PRINT("info", ("flush_horizon: (%lu,0x%lx)", + DBUG_PRINT("info", ("flush_horizon: " LSN_FMT, LSN_IN_PARTS(*flush_horizon))); DBUG_ASSERT(*flush_horizon <= log_descriptor.horizon); @@ -7922,12 +7922,12 @@ my_bool translog_flush(TRANSLOG_ADDRESS lsn) my_bool rc= 0; my_bool hgroup_commit_at_start; DBUG_ENTER("translog_flush"); - DBUG_PRINT("enter", ("Flush up to LSN: (%lu,0x%lx)", LSN_IN_PARTS(lsn))); + DBUG_PRINT("enter", ("Flush up to LSN: " LSN_FMT, LSN_IN_PARTS(lsn))); DBUG_ASSERT(translog_status == TRANSLOG_OK || translog_status == TRANSLOG_READONLY); mysql_mutex_lock(&log_descriptor.log_flush_lock); - DBUG_PRINT("info", ("Everything is flushed up to (%lu,0x%lx)", + DBUG_PRINT("info", ("Everything is flushed up to " LSN_FMT, LSN_IN_PARTS(log_descriptor.flushed))); if (cmp_translog_addr(log_descriptor.flushed, lsn) >= 0) { @@ -7956,7 +7956,7 @@ my_bool translog_flush(TRANSLOG_ADDRESS lsn) } log_descriptor.flush_in_progress= 1; flush_horizon= log_descriptor.previous_flush_horizon; - DBUG_PRINT("info", ("flush_in_progress is set, flush_horizon: (%lu,0x%lx)", + DBUG_PRINT("info", ("flush_in_progress is set, flush_horizon: " LSN_FMT, LSN_IN_PARTS(flush_horizon))); mysql_mutex_unlock(&log_descriptor.log_flush_lock); @@ -8019,7 +8019,7 @@ retest: log_descriptor.next_pass_max_lsn= LSN_IMPOSSIBLE; /* prevent other thread from continue */ log_descriptor.max_lsn_requester= pthread_self(); - DBUG_PRINT("info", ("flush took next goal: (%lu,0x%lx)", + DBUG_PRINT("info", ("flush took next goal: " LSN_FMT, LSN_IN_PARTS(lsn))); mysql_mutex_unlock(&log_descriptor.log_flush_lock); @@ -8129,7 +8129,7 @@ int translog_assign_id_to_share(MARIA_HA *tbl_info, TRN *trn) } i= 1; /* scan the whole array */ } while (id == 0); - DBUG_PRINT("info", ("id_to_share: 0x%lx -> %u", (ulong)share, id)); + DBUG_PRINT("info", ("id_to_share: %p -> %u", share, id)); fileid_store(log_data, id); log_array[TRANSLOG_INTERNAL_PARTS + 0].str= log_data; log_array[TRANSLOG_INTERNAL_PARTS + 0].length= sizeof(log_data); @@ -8180,8 +8180,8 @@ int translog_assign_id_to_share(MARIA_HA *tbl_info, TRN *trn) void translog_deassign_id_from_share(MARIA_SHARE *share) { - DBUG_PRINT("info", ("id_to_share: 0x%lx id %u -> 0", - (ulong)share, share->id)); + DBUG_PRINT("info", ("id_to_share: %p id %u -> 0", + share, share->id)); /* We don't need any mutex as we are called only when closing the last instance of the table or at the end of REPAIR: no writes can be @@ -8376,7 +8376,7 @@ LSN translog_first_lsn_in_log() uint16 chunk_offset; uchar *page; DBUG_ENTER("translog_first_lsn_in_log"); - DBUG_PRINT("info", ("Horizon: (%lu,0x%lx)", LSN_IN_PARTS(horizon))); + DBUG_PRINT("info", ("Horizon: " LSN_FMT, LSN_IN_PARTS(horizon))); DBUG_ASSERT(translog_status == TRANSLOG_OK || translog_status == TRANSLOG_READONLY); @@ -8415,7 +8415,7 @@ LSN translog_first_theoretical_lsn() uchar *page; TRANSLOG_VALIDATOR_DATA data; DBUG_ENTER("translog_first_theoretical_lsn"); - DBUG_PRINT("info", ("Horizon: (%lu,0x%lx)", LSN_IN_PARTS(addr))); + DBUG_PRINT("info", ("Horizon: " LSN_FMT, LSN_IN_PARTS(addr))); DBUG_ASSERT(translog_status == TRANSLOG_OK || translog_status == TRANSLOG_READONLY); @@ -8455,7 +8455,7 @@ my_bool translog_purge(TRANSLOG_ADDRESS low) TRANSLOG_ADDRESS horizon= translog_get_horizon(); int rc= 0; DBUG_ENTER("translog_purge"); - DBUG_PRINT("enter", ("low: (%lu,0x%lx)", LSN_IN_PARTS(low))); + DBUG_PRINT("enter", ("low: " LSN_FMT, LSN_IN_PARTS(low))); DBUG_ASSERT(translog_status == TRANSLOG_OK || translog_status == TRANSLOG_READONLY); @@ -8880,7 +8880,7 @@ static void dump_header_page(uchar *buff) printf(" WARNING: page size is not equal compiled in one %lu!!!\n", (ulong) TRANSLOG_PAGE_SIZE); printf(" File number %lu\n" - " Max lsn: (%lu,0x%lx)\n", + " Max lsn: " LSN_FMT "\n", desc.file_number, LSN_IN_PARTS(desc.max_lsn)); } @@ -8976,7 +8976,7 @@ static uchar *dump_chunk(uchar *buffer, uchar *ptr) { TRANSLOG_ADDRESS gpr_addr= lsn_korr(hdr_ptr); uint pages= hdr_ptr[LSN_STORE_SIZE]; - printf (" Group +#%u: (%lu,0x%lx) pages: %u\n", + printf (" Group +#%u: " LSN_FMT " pages: %u\n", (uint) i, LSN_IN_PARTS(gpr_addr), pages); } } @@ -9110,8 +9110,8 @@ static void dump_datapage(uchar *buffer, File handler) ptr= buffer + header_len; while (ptr && ptr < buffer + TRANSLOG_PAGE_SIZE) { - printf(" Chunk (%lu,0x%lx):\n", - (ulong)file, (ulong) offset + (ptr - buffer)); + printf(" Chunk %d %lld:\n", + file,((longlong) (ptr - buffer)+ offset)); ptr= dump_chunk(buffer, ptr); } } diff --git a/storage/maria/ma_loghandler_lsn.h b/storage/maria/ma_loghandler_lsn.h index 69481761e80..113b57914c5 100644 --- a/storage/maria/ma_loghandler_lsn.h +++ b/storage/maria/ma_loghandler_lsn.h @@ -47,7 +47,8 @@ typedef TRANSLOG_ADDRESS LSN; #define LSN_FILE_NO_PART(L) ((L) & ((int64)0xFFFFFF00000000LL)) /* Parts of LSN for printing */ -#define LSN_IN_PARTS(L) (ulong)LSN_FILE_NO(L),(ulong)LSN_OFFSET(L) +#define LSN_IN_PARTS(L) (uint)LSN_FILE_NO(L),(uint)LSN_OFFSET(L) +#define LSN_FMT "(%u,0x%x)" /* Gets record offset of a LSN/log address */ #define LSN_OFFSET(L) (ulong) ((L) & 0xFFFFFFFFL) diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c index c1fdd52357f..5a0c81d3e3e 100644 --- a/storage/maria/ma_open.c +++ b/storage/maria/ma_open.c @@ -1383,7 +1383,7 @@ uint _ma_state_info_write(MARIA_SHARE *share, uint pWrite) is too new). Recovery does it by itself. */ share->state.is_of_horizon= translog_get_horizon(); - DBUG_PRINT("info", ("is_of_horizon set to LSN (%lu,0x%lx)", + DBUG_PRINT("info", ("is_of_horizon set to LSN " LSN_FMT, LSN_IN_PARTS(share->state.is_of_horizon))); } res= _ma_state_info_write_sub(share->kfile.file, &share->state, pWrite); diff --git a/storage/maria/ma_page.c b/storage/maria/ma_page.c index d6acc0520c1..ccaba3b7a33 100644 --- a/storage/maria/ma_page.c +++ b/storage/maria/ma_page.c @@ -544,8 +544,8 @@ my_bool _ma_compact_keypage(MARIA_PAGE *ma_page, TrID min_read_from) { if (!(page= (*ma_page->keyinfo->skip_key)(&key, 0, 0, page))) { - DBUG_PRINT("error",("Couldn't find last key: page_pos: 0x%lx", - (long) page)); + DBUG_PRINT("error",("Couldn't find last key: page_pos: %p", + page)); _ma_set_fatal_error(share, HA_ERR_CRASHED); DBUG_RETURN(1); } diff --git a/storage/maria/ma_pagecache.c b/storage/maria/ma_pagecache.c index 9449c730a04..8ed97d6eae4 100644 --- a/storage/maria/ma_pagecache.c +++ b/storage/maria/ma_pagecache.c @@ -97,10 +97,10 @@ #define PCBLOCK_INFO(B) \ DBUG_PRINT("info", \ - ("block: 0x%lx fd: %lu page: %lu status: 0x%x " \ - "hshL: 0x%lx requests: %u/%u wrlocks: %u rdlocks: %u " \ + ("block: %p fd: %lu page: %lu status: 0x%x " \ + "hshL: %p requests: %u/%u wrlocks: %u rdlocks: %u " \ "rdlocks_q: %u pins: %u type: %s", \ - (ulong)(B), \ + (B), \ (ulong)((B)->hash_link ? \ (B)->hash_link->file.file : \ 0), \ @@ -108,7 +108,7 @@ (B)->hash_link->pageno : \ 0), \ (uint) (B)->status, \ - (ulong)(B)->hash_link, \ + (B)->hash_link, \ (uint) (B)->requests, \ (uint)((B)->hash_link ? \ (B)->hash_link->requests : \ @@ -659,9 +659,9 @@ static my_bool pagecache_fwrite(PAGECACHE *pagecache, /* Todo: Integrate this with write_callback so we have only one callback */ if ((*filedesc->flush_log_callback)(&args)) DBUG_RETURN(1); - DBUG_PRINT("info", ("pre_write_hook: 0x%lx data: 0x%lx", - (ulong) filedesc->pre_write_hook, - (ulong) filedesc->callback_data)); + DBUG_PRINT("info", ("pre_write_hook:%p data: %p", + filedesc->pre_write_hook, + filedesc->callback_data)); if ((*filedesc->pre_write_hook)(&args)) { DBUG_PRINT("error", ("write callback problem")); @@ -2789,7 +2789,7 @@ static void check_and_set_lsn(PAGECACHE *pagecache, */ DBUG_ASSERT((block->type == PAGECACHE_LSN_PAGE) || maria_in_recovery); old= lsn_korr(block->buffer); - DBUG_PRINT("info", ("old lsn: (%lu, 0x%lx) new lsn: (%lu, 0x%lx)", + DBUG_PRINT("info", ("old lsn: " LSN_FMT " new lsn: " LSN_FMT, LSN_IN_PARTS(old), LSN_IN_PARTS(lsn))); if (cmp_translog_addr(lsn, old) > 0) { @@ -3832,8 +3832,8 @@ restart: block= page_link->block; if (block->status & (PCBLOCK_REASSIGNED | PCBLOCK_IN_SWITCH)) { - DBUG_PRINT("info", ("Block 0x%0lx already is %s", - (ulong) block, + DBUG_PRINT("info", ("Block %p already is %s", + block, ((block->status & PCBLOCK_REASSIGNED) ? "reassigned" : "in switch"))); PCBLOCK_INFO(block); diff --git a/storage/maria/ma_recovery.c b/storage/maria/ma_recovery.c index b1d2378870f..256cb2b45b2 100644 --- a/storage/maria/ma_recovery.c +++ b/storage/maria/ma_recovery.c @@ -558,7 +558,7 @@ static void display_record_position(const LOG_DESC *log_desc, form a group, so we indent below the group's end record */ tprint(tracef, - "%sRec#%u LSN (%lu,0x%lx) short_trid %u %s(num_type:%u) len %lu\n", + "%sRec#%u LSN " LSN_FMT " short_trid %u %s(num_type:%u) len %lu\n", number ? "" : " ", number, LSN_IN_PARTS(rec->lsn), rec->short_trid, log_desc->name, rec->type, (ulong)rec->record_length); @@ -617,7 +617,7 @@ prototype_redo_exec_hook(LONG_TRANSACTION_ID) llstr(long_trid, llbuf); eprint(tracef, "Found an old transaction long_trid %s short_trid %u" " with same short id as this new transaction, and has neither" - " committed nor rollback (undo_lsn: (%lu,0x%lx))", + " committed nor rollback (undo_lsn: " LSN_FMT ")", llbuf, sid, LSN_IN_PARTS(ulsn)); goto err; } @@ -640,7 +640,7 @@ static void new_transaction(uint16 sid, TrID long_id, LSN undo_lsn, all_active_trans[sid].long_trid= long_id; llstr(long_id, llbuf); tprint(tracef, "Transaction long_trid %s short_trid %u starts," - " undo_lsn (%lu,0x%lx) first_undo_lsn (%lu,0x%lx)\n", + " undo_lsn " LSN_FMT " first_undo_lsn " LSN_FMT "\n", llbuf, sid, LSN_IN_PARTS(undo_lsn), LSN_IN_PARTS(first_undo_lsn)); all_active_trans[sid].undo_lsn= undo_lsn; all_active_trans[sid].first_undo_lsn= first_undo_lsn; @@ -833,7 +833,7 @@ prototype_redo_exec_hook(REDO_CREATE_TABLE) } if (cmp_translog_addr(share->state.create_rename_lsn, rec->lsn) >= 0) { - tprint(tracef, "Table '%s' has create_rename_lsn (%lu,0x%lx) more " + tprint(tracef, "Table '%s' has create_rename_lsn " LSN_FMT " more " "recent than record, ignoring creation", name, LSN_IN_PARTS(share->state.create_rename_lsn)); error= 0; @@ -1009,7 +1009,7 @@ prototype_redo_exec_hook(REDO_RENAME_TABLE) } if (cmp_translog_addr(share->state.create_rename_lsn, rec->lsn) >= 0) { - tprint(tracef, ", has create_rename_lsn (%lu,0x%lx) more recent than" + tprint(tracef, ", has create_rename_lsn " LSN_FMT " more recent than" " record, ignoring renaming", LSN_IN_PARTS(share->state.create_rename_lsn)); error= 0; @@ -1064,7 +1064,7 @@ prototype_redo_exec_hook(REDO_RENAME_TABLE) } if (cmp_translog_addr(share->state.create_rename_lsn, rec->lsn) >= 0) { - tprint(tracef, ", has create_rename_lsn (%lu,0x%lx) more recent than" + tprint(tracef, ", has create_rename_lsn " LSN_FMT " more recent than" " record, ignoring renaming", LSN_IN_PARTS(share->state.create_rename_lsn)); /* @@ -1233,7 +1233,7 @@ prototype_redo_exec_hook(REDO_DROP_TABLE) } if (cmp_translog_addr(share->state.create_rename_lsn, rec->lsn) >= 0) { - tprint(tracef, ", has create_rename_lsn (%lu,0x%lx) more recent than" + tprint(tracef, ", has create_rename_lsn " LSN_FMT " more recent than" " record, ignoring removal", LSN_IN_PARTS(share->state.create_rename_lsn)); error= 0; @@ -1403,8 +1403,8 @@ static int new_table(uint16 sid, const char *name, LSN lsn_of_file_id) } if (cmp_translog_addr(lsn_of_file_id, share->state.create_rename_lsn) <= 0) { - tprint(tracef, ", has create_rename_lsn (%lu,0x%lx) more recent than" - " LOGREC_FILE_ID's LSN (%lu,0x%lx), ignoring open request", + tprint(tracef, ", has create_rename_lsn " LSN_FMT " more recent than" + " LOGREC_FILE_ID's LSN " LSN_FMT ", ignoring open request", LSN_IN_PARTS(share->state.create_rename_lsn), LSN_IN_PARTS(lsn_of_file_id)); recovery_warnings++; @@ -1873,7 +1873,7 @@ prototype_redo_exec_hook(UNDO_ROW_INSERT) share= info->s; if (cmp_translog_addr(rec->lsn, share->state.is_of_horizon) >= 0) { - tprint(tracef, " state has LSN (%lu,0x%lx) older than record, updating" + tprint(tracef, " state has LSN " LSN_FMT " older than record, updating" " rows' count\n", LSN_IN_PARTS(share->state.is_of_horizon)); share->state.state.records++; if (share->calc_checksum) @@ -2136,7 +2136,7 @@ prototype_redo_exec_hook(CLR_END) if (info == NULL) DBUG_RETURN(0); share= info->s; - tprint(tracef, " CLR_END was about %s, undo_lsn now LSN (%lu,0x%lx)\n", + tprint(tracef, " CLR_END was about %s, undo_lsn now LSN " LSN_FMT "\n", log_desc->name, LSN_IN_PARTS(previous_undo_lsn)); enlarge_buffer(rec); @@ -2296,7 +2296,7 @@ prototype_undo_exec_hook(UNDO_ROW_INSERT) info->trn= 0; /* trn->undo_lsn is updated in an inwrite_hook when writing the CLR_END */ tprint(tracef, " rows' count %lu\n", (ulong)info->s->state.state.records); - tprint(tracef, " undo_lsn now LSN (%lu,0x%lx)\n", + tprint(tracef, " undo_lsn now LSN " LSN_FMT "\n", LSN_IN_PARTS(trn->undo_lsn)); return error; } @@ -2335,7 +2335,7 @@ prototype_undo_exec_hook(UNDO_ROW_DELETE) rec->record_length - (LSN_STORE_SIZE + FILEID_STORE_SIZE)); info->trn= 0; - tprint(tracef, " rows' count %lu\n undo_lsn now LSN (%lu,0x%lx)\n", + tprint(tracef, " rows' count %lu\n undo_lsn now LSN " LSN_FMT "\n", (ulong)share->state.state.records, LSN_IN_PARTS(trn->undo_lsn)); return error; } @@ -2374,7 +2374,7 @@ prototype_undo_exec_hook(UNDO_ROW_UPDATE) rec->record_length - (LSN_STORE_SIZE + FILEID_STORE_SIZE)); info->trn= 0; - tprint(tracef, " undo_lsn now LSN (%lu,0x%lx)\n", + tprint(tracef, " undo_lsn now LSN " LSN_FMT "\n", LSN_IN_PARTS(trn->undo_lsn)); return error; } @@ -2415,7 +2415,7 @@ prototype_undo_exec_hook(UNDO_KEY_INSERT) FILEID_STORE_SIZE); info->trn= 0; /* trn->undo_lsn is updated in an inwrite_hook when writing the CLR_END */ - tprint(tracef, " undo_lsn now LSN (%lu,0x%lx)\n", + tprint(tracef, " undo_lsn now LSN " LSN_FMT "\n", LSN_IN_PARTS(trn->undo_lsn)); return error; } @@ -2456,7 +2456,7 @@ prototype_undo_exec_hook(UNDO_KEY_DELETE) FILEID_STORE_SIZE, FALSE); info->trn= 0; /* trn->undo_lsn is updated in an inwrite_hook when writing the CLR_END */ - tprint(tracef, " undo_lsn now LSN (%lu,0x%lx)\n", + tprint(tracef, " undo_lsn now LSN " LSN_FMT "\n", LSN_IN_PARTS(trn->undo_lsn)); return error; } @@ -2497,7 +2497,7 @@ prototype_undo_exec_hook(UNDO_KEY_DELETE_WITH_ROOT) FILEID_STORE_SIZE, TRUE); info->trn= 0; /* trn->undo_lsn is updated in an inwrite_hook when writing the CLR_END */ - tprint(tracef, " undo_lsn now LSN (%lu,0x%lx)\n", + tprint(tracef, " undo_lsn now LSN " LSN_FMT "\n", LSN_IN_PARTS(trn->undo_lsn)); return error; } @@ -2525,7 +2525,7 @@ prototype_undo_exec_hook(UNDO_BULK_INSERT) error= _ma_apply_undo_bulk_insert(info, previous_undo_lsn); info->trn= 0; /* trn->undo_lsn is updated in an inwrite_hook when writing the CLR_END */ - tprint(tracef, " undo_lsn now LSN (%lu,0x%lx)\n", + tprint(tracef, " undo_lsn now LSN " LSN_FMT "\n", LSN_IN_PARTS(trn->undo_lsn)); return error; } @@ -2663,7 +2663,7 @@ static int run_redo_phase(LSN lsn, LSN lsn_end, enum maria_apply_log_way apply) if (lsn_end != LSN_IMPOSSIBLE && rec2.lsn >= lsn_end) { tprint(tracef, - "lsn_end reached at (%lu,0x%lx). " + "lsn_end reached at " LSN_FMT ". " "Skipping rest of redo entries", LSN_IN_PARTS(rec2.lsn)); translog_destroy_scanner(&scanner); @@ -2818,7 +2818,7 @@ static uint end_of_redo_phase(my_bool prepare_for_undo_phase) TRN *trn; if (gslsn != LSN_IMPOSSIBLE) { - tprint(tracef, "Group at LSN (%lu,0x%lx) short_trid %u incomplete\n", + tprint(tracef, "Group at LSN " LSN_FMT " short_trid %u incomplete\n", LSN_IN_PARTS(gslsn), sid); all_active_trans[sid].group_start_lsn= LSN_IMPOSSIBLE; } @@ -3109,7 +3109,7 @@ static MARIA_HA *get_MARIA_HA_from_REDO_record(const table was). */ DBUG_ASSERT(cmp_translog_addr(rec->lsn, checkpoint_start) < 0); - tprint(tracef, ", table's LOGREC_FILE_ID has LSN (%lu,0x%lx) more recent" + tprint(tracef, ", table's LOGREC_FILE_ID has LSN " LSN_FMT " more recent" " than record, skipping record", LSN_IN_PARTS(share->lsn_of_file_id)); return NULL; @@ -3117,7 +3117,7 @@ static MARIA_HA *get_MARIA_HA_from_REDO_record(const if (cmp_translog_addr(rec->lsn, share->state.skip_redo_lsn) <= 0) { /* probably a bulk insert repair */ - tprint(tracef, ", has skip_redo_lsn (%lu,0x%lx) more recent than" + tprint(tracef, ", has skip_redo_lsn " LSN_FMT " more recent than" " record, skipping record\n", LSN_IN_PARTS(share->state.skip_redo_lsn)); return NULL; @@ -3176,7 +3176,7 @@ static MARIA_HA *get_MARIA_HA_from_UNDO_record(const if (cmp_translog_addr(rec->lsn, share->lsn_of_file_id) <= 0) { - tprint(tracef, ", table's LOGREC_FILE_ID has LSN (%lu,0x%lx) more recent" + tprint(tracef, ", table's LOGREC_FILE_ID has LSN " LSN_FMT " more recent" " than record, skipping record", LSN_IN_PARTS(share->lsn_of_file_id)); return NULL; @@ -3185,7 +3185,7 @@ static MARIA_HA *get_MARIA_HA_from_UNDO_record(const cmp_translog_addr(rec->lsn, share->state.skip_redo_lsn) <= 0) { /* probably a bulk insert repair */ - tprint(tracef, ", has skip_redo_lsn (%lu,0x%lx) more recent than" + tprint(tracef, ", has skip_redo_lsn " LSN_FMT " more recent than" " record, skipping record\n", LSN_IN_PARTS(share->state.skip_redo_lsn)); return NULL; @@ -3220,12 +3220,12 @@ static LSN parse_checkpoint_record(LSN lsn) LSN minimum_rec_lsn_of_active_transactions, minimum_rec_lsn_of_dirty_pages; struct st_dirty_page *next_dirty_page_in_pool; - tprint(tracef, "Loading data from checkpoint record at LSN (%lu,0x%lx)\n", + tprint(tracef, "Loading data from checkpoint record at LSN " LSN_FMT "\n", LSN_IN_PARTS(lsn)); if ((len= translog_read_record_header(lsn, &rec)) == RECHEADER_READ_ERROR || rec.type != LOGREC_CHECKPOINT) { - eprint(tracef, "Cannot find checkpoint record at LSN (%lu,0x%lx)", + eprint(tracef, "Cannot find checkpoint record at LSN " LSN_FMT, LSN_IN_PARTS(lsn)); return LSN_ERROR; } @@ -3243,7 +3243,7 @@ static LSN parse_checkpoint_record(LSN lsn) ptr= log_record_buffer.str; start_address= lsn_korr(ptr); ptr+= LSN_STORE_SIZE; - tprint(tracef, "Checkpoint record has start_horizon at (%lu,0x%lx)\n", + tprint(tracef, "Checkpoint record has start_horizon at " LSN_FMT "\n", LSN_IN_PARTS(start_address)); /* transactions */ @@ -3261,7 +3261,7 @@ static LSN parse_checkpoint_record(LSN lsn) takes to write one or a few rows, roughly). */ tprint(tracef, "Checkpoint record has min_rec_lsn of active transactions" - " at (%lu,0x%lx)\n", + " at " LSN_FMT "\n", LSN_IN_PARTS(minimum_rec_lsn_of_active_transactions)); set_if_smaller(start_address, minimum_rec_lsn_of_active_transactions); @@ -3349,7 +3349,7 @@ static LSN parse_checkpoint_record(LSN lsn) page_id, rec_lsn, next_dirty_page_in_pool++)) return LSN_ERROR; if (maria_recovery_verbose) - tprint(tracef, "%8u %8u %12lu %lu,0x%lx\n", (uint) table_id, + tprint(tracef, "%8u %8u %12lu " LSN_FMT "\n", (uint) table_id, (uint) is_index, (ulong) page_id, LSN_IN_PARTS(rec_lsn)); set_if_smaller(minimum_rec_lsn_of_dirty_pages, rec_lsn); } @@ -3372,7 +3372,7 @@ static LSN parse_checkpoint_record(LSN lsn) start_address= checkpoint_start= translog_next_LSN(start_address, LSN_IMPOSSIBLE); tprint(tracef, "Checkpoint record start_horizon now adjusted to" - " LSN (%lu,0x%lx)\n", LSN_IN_PARTS(start_address)); + " LSN " LSN_FMT "\n", LSN_IN_PARTS(start_address)); if (checkpoint_start == LSN_IMPOSSIBLE) { /* @@ -3383,10 +3383,10 @@ static LSN parse_checkpoint_record(LSN lsn) } /* now, where the REDO phase should start reading log: */ tprint(tracef, "Checkpoint has min_rec_lsn of dirty pages at" - " LSN (%lu,0x%lx)\n", LSN_IN_PARTS(minimum_rec_lsn_of_dirty_pages)); + " LSN " LSN_FMT "\n", LSN_IN_PARTS(minimum_rec_lsn_of_dirty_pages)); set_if_smaller(start_address, minimum_rec_lsn_of_dirty_pages); DBUG_PRINT("info", - ("checkpoint_start: (%lu,0x%lx) start_address: (%lu,0x%lx)", + ("checkpoint_start: " LSN_FMT " start_address: " LSN_FMT, LSN_IN_PARTS(checkpoint_start), LSN_IN_PARTS(start_address))); return start_address; } diff --git a/storage/maria/ma_rkey.c b/storage/maria/ma_rkey.c index 58e47089ce9..c98cdfe15ef 100644 --- a/storage/maria/ma_rkey.c +++ b/storage/maria/ma_rkey.c @@ -36,8 +36,8 @@ int maria_rkey(MARIA_HA *info, uchar *buf, int inx, const uchar *key_data, MARIA_KEY key; ICP_RESULT icp_res= ICP_MATCH; DBUG_ENTER("maria_rkey"); - DBUG_PRINT("enter", ("base: 0x%lx buf: 0x%lx inx: %d search_flag: %d", - (long) info, (long) buf, inx, search_flag)); + DBUG_PRINT("enter", ("base:%p buf:%p inx: %d search_flag: %d", + info, buf, inx, search_flag)); if ((inx = _ma_check_index(info,inx)) < 0) DBUG_RETURN(my_errno); diff --git a/storage/maria/ma_rt_index.c b/storage/maria/ma_rt_index.c index c92045eb245..4c18bff7c70 100644 --- a/storage/maria/ma_rt_index.c +++ b/storage/maria/ma_rt_index.c @@ -105,7 +105,7 @@ static int maria_rtree_find_req(MARIA_HA *info, MARIA_KEYDEF *keyinfo, level + 1))) { case 0: /* found - exit from recursion */ - *saved_key= k - page_buf; + *saved_key= (uint) (k - page_buf); goto ok; case 1: /* not found - continue searching */ info->maria_rtree_recursion_depth= level; @@ -140,7 +140,7 @@ static int maria_rtree_find_req(MARIA_HA *info, MARIA_KEYDEF *keyinfo, memcpy(info->last_key.data, k, info->last_key.data_length + info->last_key.ref_length); info->maria_rtree_recursion_depth= level; - *saved_key= last - page_buf; + *saved_key= (uint) (last - page_buf); if (after_key < last) { @@ -366,7 +366,7 @@ static int maria_rtree_get_req(MARIA_HA *info, MARIA_KEYDEF *keyinfo, _ma_kpos(nod_flag, k), level + 1))) { case 0: /* found - exit from recursion */ - *saved_key= k - page.buff; + *saved_key= (uint) (k - page.buff); goto ok; case 1: /* not found - continue searching */ info->maria_rtree_recursion_depth= level; @@ -398,7 +398,7 @@ static int maria_rtree_get_req(MARIA_HA *info, MARIA_KEYDEF *keyinfo, info->last_key.data_length + info->last_key.ref_length); info->maria_rtree_recursion_depth= level; - *saved_key= k - page.buff; + *saved_key= (uint) (k - page.buff); if (after_key < last) { diff --git a/storage/maria/ma_rt_key.c b/storage/maria/ma_rt_key.c index 488137ff159..500e484cf02 100644 --- a/storage/maria/ma_rt_key.c +++ b/storage/maria/ma_rt_key.c @@ -58,7 +58,7 @@ int maria_rtree_add_key(const MARIA_KEY *key, MARIA_PAGE *page, page->size+= tot_key_length; page_store_size(share, page); if (share->now_transactional && - _ma_log_add(page, key_pos - page->buff, + _ma_log_add(page, (uint)(key_pos - page->buff), key_pos, tot_key_length, tot_key_length, 0, KEY_OP_DEBUG_LOG_ADD_1)) DBUG_RETURN(-1); diff --git a/storage/maria/ma_rt_split.c b/storage/maria/ma_rt_split.c index c26c0277e4f..898cb574f63 100644 --- a/storage/maria/ma_rt_split.c +++ b/storage/maria/ma_rt_split.c @@ -308,7 +308,7 @@ static my_bool _ma_log_rt_split(MARIA_PAGE *page, uint translog_parts, extra_length= 0; my_off_t page_pos; DBUG_ENTER("_ma_log_rt_split"); - DBUG_PRINT("enter", ("page: %lu", (ulong) page)); + DBUG_PRINT("enter", ("page: %p", page)); DBUG_ASSERT(share->now_transactional); page_pos= page->pos / share->block_size; @@ -477,11 +477,11 @@ int maria_rtree_split_page(const MARIA_KEY *key, MARIA_PAGE *page, memcpy(to_with_nod_flag, cur_key_with_nod_flag, full_length); if (log_this_change) { - uint to_with_nod_flag_offs= to_with_nod_flag - page->buff; + size_t to_with_nod_flag_offs= to_with_nod_flag - page->buff; if (likely(cur_key != key->data)) { /* this memcpy() is internal to the page (source in the page) */ - uint cur_key_with_nod_flag_offs= cur_key_with_nod_flag - page->buff; + size_t cur_key_with_nod_flag_offs= cur_key_with_nod_flag - page->buff; int2store(log_internal_copy_ptr, to_with_nod_flag_offs); log_internal_copy_ptr+= 2; int2store(log_internal_copy_ptr, cur_key_with_nod_flag_offs); @@ -526,8 +526,8 @@ int maria_rtree_split_page(const MARIA_KEY *key, MARIA_PAGE *page, ( /* log change to split page */ _ma_log_rt_split(page, key->data - nod_flag, full_length, log_internal_copy, - log_internal_copy_ptr - log_internal_copy, - log_key_copy, org_length - page->size) || + (uint)(log_internal_copy_ptr - log_internal_copy), + log_key_copy, (uint)(org_length - page->size)) || /* and to new page */ _ma_log_new(&new_page, 0))) err_code= -1; diff --git a/storage/maria/ma_search.c b/storage/maria/ma_search.c index 951850b16a4..089e3fabdb2 100644 --- a/storage/maria/ma_search.c +++ b/storage/maria/ma_search.c @@ -380,8 +380,8 @@ int _ma_seq_search(const MARIA_KEY *key, const MARIA_PAGE *ma_page, { _ma_set_fatal_error(share, HA_ERR_CRASHED); DBUG_PRINT("error", - ("Found wrong key: length: %u page: 0x%lx end: 0x%lx", - length, (long) page, (long) end)); + ("Found wrong key: length: %u page: %p end: %p", + length, page, end)); DBUG_RETURN(MARIA_FOUND_WRONG_KEY); } if ((flag= ha_key_cmp(keyinfo->seg, t_buff, key->data, @@ -389,15 +389,15 @@ int _ma_seq_search(const MARIA_KEY *key, const MARIA_PAGE *ma_page, comp_flag | tmp_key.flag, not_used)) >= 0) break; - DBUG_PRINT("loop_extra",("page: 0x%lx key: '%s' flag: %d", - (long) page, t_buff, flag)); + DBUG_PRINT("loop_extra",("page:%p key: '%s' flag: %d", + page, t_buff, flag)); memcpy(buff,t_buff,length); *ret_pos=page; } if (flag == 0) memcpy(buff,t_buff,length); /* Result is first key */ *last_key= page == end; - DBUG_PRINT("exit",("flag: %d ret_pos: 0x%lx", flag, (long) *ret_pos)); + DBUG_PRINT("exit",("flag: %d ret_pos: %p", flag, *ret_pos)); DBUG_RETURN(flag); } /* _ma_seq_search */ @@ -555,8 +555,8 @@ int _ma_prefix_search(const MARIA_KEY *key, const MARIA_PAGE *ma_page, { _ma_set_fatal_error(share, HA_ERR_CRASHED); DBUG_PRINT("error", - ("Found wrong key: length: %u page: 0x%lx end: %lx", - length, (long) page, (long) end)); + ("Found wrong key: length: %u page: %p end: %p", + length, page, end)); DBUG_RETURN(MARIA_FOUND_WRONG_KEY); } @@ -692,7 +692,7 @@ int _ma_prefix_search(const MARIA_KEY *key, const MARIA_PAGE *ma_page, *last_key= page == end; - DBUG_PRINT("exit",("flag: %d ret_pos: 0x%lx", flag, (long) *ret_pos)); + DBUG_PRINT("exit",("flag: %d ret_pos: %p", flag, *ret_pos)); DBUG_RETURN(flag); } /* _ma_prefix_search */ @@ -1047,8 +1047,8 @@ uint _ma_get_pack_key(MARIA_KEY *int_key, uint page_flag, if (length > keyseg->length) { DBUG_PRINT("error", - ("Found too long null packed key: %u of %u at 0x%lx", - length, keyseg->length, (long) *page_pos)); + ("Found too long null packed key: %u of %u at %p", + length, keyseg->length, *page_pos)); DBUG_DUMP("key", *page_pos, 16); _ma_set_fatal_error(keyinfo->share, HA_ERR_CRASHED); return 0; @@ -1104,8 +1104,8 @@ uint _ma_get_pack_key(MARIA_KEY *int_key, uint page_flag, } if (length > (uint) keyseg->length) { - DBUG_PRINT("error",("Found too long packed key: %u of %u at 0x%lx", - length, keyseg->length, (long) *page_pos)); + DBUG_PRINT("error",("Found too long packed key: %u of %u at %p", + length, keyseg->length, *page_pos)); DBUG_DUMP("key", *page_pos, 16); _ma_set_fatal_error(keyinfo->share, HA_ERR_CRASHED); return 0; /* Error */ @@ -1134,7 +1134,7 @@ uint _ma_get_pack_key(MARIA_KEY *int_key, uint page_flag, page+=length; } - int_key->data_length= (key - int_key->data); + int_key->data_length= (uint)(key - int_key->data); int_key->flag= 0; length= keyseg->length; if (page_flag & KEYPAGE_FLAG_HAS_TRANSID) @@ -1263,8 +1263,8 @@ uint _ma_get_binary_pack_key(MARIA_KEY *int_key, uint page_flag, uint nod_flag, if (length > keyinfo->maxlength) { DBUG_PRINT("error", - ("Found too long binary packed key: %u of %u at 0x%lx", - length, keyinfo->maxlength, (long) *page_pos)); + ("Found too long binary packed key: %u of %u at %p", + length, keyinfo->maxlength, *page_pos)); DBUG_DUMP("key", *page_pos, 16); _ma_set_fatal_error(keyinfo->share, HA_ERR_CRASHED); DBUG_RETURN(0); /* Wrong key */ @@ -1325,8 +1325,8 @@ uint _ma_get_binary_pack_key(MARIA_KEY *int_key, uint page_flag, uint nod_flag, from=page; from_end=page_end; } DBUG_ASSERT((int) length >= 0); - DBUG_PRINT("info",("key: 0x%lx from: 0x%lx length: %u", - (long) key, (long) from, length)); + DBUG_PRINT("info",("key: %p from: %p length: %u", + key, from, length)); memmove(key, from, (size_t) length); key+=length; from+=length; @@ -1336,7 +1336,7 @@ uint _ma_get_binary_pack_key(MARIA_KEY *int_key, uint page_flag, uint nod_flag, If we have mixed key blocks with data pointer and key block pointer, we have to copy both. */ - int_key->data_length= (key - int_key->data); + int_key->data_length= (uint)(key - int_key->data); int_key->ref_length= length= keyseg->length; int_key->flag= 0; if ((tmp=(uint) (from_end-from)) <= length) @@ -1452,7 +1452,7 @@ uchar *_ma_get_key(MARIA_KEY *key, MARIA_PAGE *ma_page, uchar *keypos) } } } - DBUG_PRINT("exit",("page: 0x%lx length: %u", (long) page, + DBUG_PRINT("exit",("page: %p length: %u", page, key->data_length + key->ref_length)); DBUG_RETURN(page); } /* _ma_get_key */ @@ -1522,8 +1522,8 @@ uchar *_ma_get_last_key(MARIA_KEY *key, MARIA_PAGE *ma_page, uchar *endpos) uchar *lastpos, *page; MARIA_KEYDEF *keyinfo= key->keyinfo; DBUG_ENTER("_ma_get_last_key"); - DBUG_PRINT("enter",("page: 0x%lx endpos: 0x%lx", (long) ma_page->buff, - (long) endpos)); + DBUG_PRINT("enter",("page: %p endpos: %p", ma_page->buff, + endpos)); page_flag= ma_page->flag; nod_flag= ma_page->node; @@ -1548,14 +1548,14 @@ uchar *_ma_get_last_key(MARIA_KEY *key, MARIA_PAGE *ma_page, uchar *endpos) lastpos= page; if (!(*keyinfo->get_key)(key, page_flag, nod_flag, &page)) { - DBUG_PRINT("error",("Couldn't find last key: page: 0x%lx", - (long) page)); + DBUG_PRINT("error",("Couldn't find last key: page: %p", + page)); _ma_set_fatal_error(keyinfo->share, HA_ERR_CRASHED); DBUG_RETURN(0); } } } - DBUG_PRINT("exit",("lastpos: 0x%lx length: %u", (ulong) lastpos, + DBUG_PRINT("exit",("lastpos: %p length: %u", lastpos, key->data_length + key->ref_length)); DBUG_RETURN(lastpos); } /* _ma_get_last_key */ @@ -1654,9 +1654,9 @@ int _ma_search_next(register MARIA_HA *info, MARIA_KEY *key, MARIA_KEY tmp_key; MARIA_PAGE page; DBUG_ENTER("_ma_search_next"); - DBUG_PRINT("enter",("nextflag: %u lastpos: %lu int_keypos: 0x%lx page_changed %d keyread_buff_used: %d", + DBUG_PRINT("enter",("nextflag: %u lastpos: %lu int_keypos:%p page_changed %d keyread_buff_used: %d", nextflag, (ulong) info->cur_row.lastpos, - (ulong) info->int_keypos, + info->int_keypos, info->page_changed, info->keyread_buff_used)); DBUG_EXECUTE("key", _ma_print_key(DBUG_FILE, key);); @@ -2142,8 +2142,8 @@ _ma_calc_var_pack_key_length(const MARIA_KEY *int_key, uint nod_flag, ref_length=0; next_length_pack=0; } - DBUG_PRINT("test",("length: %d next_key: 0x%lx", length, - (long) next_key)); + DBUG_PRINT("test",("length: %d next_key: %p", length, + next_key)); { uint tmp_length; diff --git a/storage/maria/ma_servicethread.c b/storage/maria/ma_servicethread.c index e495b15eef2..99ae36689de 100644 --- a/storage/maria/ma_servicethread.c +++ b/storage/maria/ma_servicethread.c @@ -31,7 +31,7 @@ int ma_service_thread_control_init(MA_SERVICE_THREAD_CONTROL *control) { int res= 0; DBUG_ENTER("ma_service_thread_control_init"); - DBUG_PRINT("init", ("control 0x%lx", (ulong) control)); + DBUG_PRINT("init", ("control %p", control)); control->inited= TRUE; control->killed= FALSE; res= (mysql_mutex_init(key_SERVICE_THREAD_CONTROL_lock, @@ -57,7 +57,7 @@ int ma_service_thread_control_init(MA_SERVICE_THREAD_CONTROL *control) void ma_service_thread_control_end(MA_SERVICE_THREAD_CONTROL *control) { DBUG_ENTER("ma_service_thread_control_end"); - DBUG_PRINT("init", ("control 0x%lx", (ulong) control)); + DBUG_PRINT("init", ("control %p", control)); DBUG_ASSERT(control->inited); mysql_mutex_lock(control->LOCK_control); if (!control->killed) @@ -95,7 +95,7 @@ my_bool my_service_thread_sleep(MA_SERVICE_THREAD_CONTROL *control, struct timespec abstime; my_bool res= FALSE; DBUG_ENTER("my_service_thread_sleep"); - DBUG_PRINT("init", ("control 0x%lx", (ulong) control)); + DBUG_PRINT("init", ("control %p", control)); mysql_mutex_lock(control->LOCK_control); if (control->killed) { diff --git a/storage/maria/ma_sort.c b/storage/maria/ma_sort.c index 6e106976b70..024b72fff2e 100644 --- a/storage/maria/ma_sort.c +++ b/storage/maria/ma_sort.c @@ -500,10 +500,10 @@ static my_bool _ma_thr_find_all_keys_exec(MARIA_SORT_PARAM* sort_param) (BUFFPEK *) alloc_dynamic(&sort_param->buffpek), &sort_param->tempfile)) goto err; - sort_param->keys= (sort_param->buffpek.elements - 1) * (keys - 1) + idx; + sort_param->keys= (uint)((sort_param->buffpek.elements - 1) * (keys - 1) + idx); } else - sort_param->keys= idx; + sort_param->keys= (uint)idx; DBUG_RETURN(FALSE); @@ -627,7 +627,7 @@ int _ma_thr_write_keys(MARIA_SORT_PARAM *sort_param) uint maxbuffer=sinfo->buffpek.elements-1; if (!mergebuf) { - length=param->sort_buffer_length; + length=(size_t)param->sort_buffer_length; while (length >= MIN_SORT_MEMORY) { if ((mergebuf= my_malloc((size_t) length, MYF(0)))) @@ -919,13 +919,13 @@ static my_off_t read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek, uint sort_length) { register ha_keys count; - my_off_t length; + size_t length; if ((count= (ha_keys) MY_MIN((ha_rows) buffpek->max_keys, (ha_rows) buffpek->count))) { if (my_b_pread(fromfile, (uchar*) buffpek->base, - (length= sort_length * count), buffpek->file_pos)) + (length= sort_length * (size_t)count), buffpek->file_pos)) return(HA_OFFSET_ERROR); /* purecov: inspected */ buffpek->key=buffpek->base; buffpek->file_pos+= length; /* New filepos */ diff --git a/storage/maria/ma_state.c b/storage/maria/ma_state.c index 817e1b69ddf..fc79bbfac91 100644 --- a/storage/maria/ma_state.c +++ b/storage/maria/ma_state.c @@ -86,8 +86,8 @@ my_bool _ma_setup_live_state(MARIA_HA *info) mysql_mutex_lock(&share->intern_lock); share->in_trans++; - DBUG_PRINT("info", ("share: 0x%lx in_trans: %d", - (ulong) share, share->in_trans)); + DBUG_PRINT("info", ("share: %p in_trans: %d", + share, share->in_trans)); history= share->state_history; @@ -524,8 +524,8 @@ my_bool _ma_trnman_end_trans_hook(TRN *trn, my_bool commit, /* Remove not visible states */ share->state_history= _ma_remove_not_visible_states(history, 0, 1); } - DBUG_PRINT("info", ("share: 0x%lx in_trans: %d", - (ulong) share, share->in_trans)); + DBUG_PRINT("info", ("share: %p in_trans: %d", + share, share->in_trans)); } } share->in_trans--; diff --git a/storage/maria/ma_write.c b/storage/maria/ma_write.c index aad555a85a1..ff68b4bb9a2 100644 --- a/storage/maria/ma_write.c +++ b/storage/maria/ma_write.c @@ -787,7 +787,7 @@ int _ma_insert(register MARIA_HA *info, MARIA_KEY *key, MARIA_SHARE *share= info->s; MARIA_KEYDEF *keyinfo= key->keyinfo; DBUG_ENTER("_ma_insert"); - DBUG_PRINT("enter",("key_pos: 0x%lx", (ulong) key_pos)); + DBUG_PRINT("enter",("key_pos:%p", key_pos)); DBUG_EXECUTE("key", _ma_print_key(DBUG_FILE, key);); /* @@ -813,8 +813,8 @@ int _ma_insert(register MARIA_HA *info, MARIA_KEY *key, { DBUG_PRINT("test",("t_length: %d ref_len: %d", t_length,s_temp.ref_length)); - DBUG_PRINT("test",("n_ref_len: %d n_length: %d key_pos: 0x%lx", - s_temp.n_ref_length, s_temp.n_length, (long) s_temp.key)); + DBUG_PRINT("test",("n_ref_len: %d n_length: %d key_pos: %p", + s_temp.n_ref_length, s_temp.n_length, s_temp.key)); } #endif if (t_length > 0) @@ -1128,8 +1128,8 @@ uchar *_ma_find_half_pos(MARIA_KEY *key, MARIA_PAGE *ma_page, DBUG_RETURN(0); } while (page < end); *after_key= page; - DBUG_PRINT("exit",("returns: 0x%lx page: 0x%lx half: 0x%lx", - (long) lastpos, (long) page, (long) end)); + DBUG_PRINT("exit",("returns: %p page: %p half: %p", + lastpos, page, end)); DBUG_RETURN(lastpos); } /* _ma_find_half_pos */ @@ -1211,8 +1211,8 @@ static uchar *_ma_find_last_pos(MARIA_KEY *int_key, MARIA_PAGE *ma_page, } while (page < end); *after_key=lastpos; - DBUG_PRINT("exit",("returns: 0x%lx page: 0x%lx end: 0x%lx", - (long) prevpos,(long) page,(long) end)); + DBUG_PRINT("exit",("returns: %p page: %p end: %p", + prevpos,page,end)); DBUG_RETURN(prevpos); } /* _ma_find_last_pos */ diff --git a/storage/maria/maria_chk.c b/storage/maria/maria_chk.c index fc3d3492252..cb8b374691e 100644 --- a/storage/maria/maria_chk.c +++ b/storage/maria/maria_chk.c @@ -1526,8 +1526,8 @@ static void descript(HA_CHECK *param, register MARIA_HA *info, char *name) } if (share->base.born_transactional) { - printf("LSNs: create_rename (%lu,0x%lx)," - " state_horizon (%lu,0x%lx), skip_redo (%lu,0x%lx)\n", + printf("LSNs: create_rename " LSN_FMT "," + " state_horizon " LSN_FMT ", skip_redo " LSN_FMT "\n", LSN_IN_PARTS(share->state.create_rename_lsn), LSN_IN_PARTS(share->state.is_of_horizon), LSN_IN_PARTS(share->state.skip_redo_lsn)); diff --git a/storage/maria/maria_read_log.c b/storage/maria/maria_read_log.c index a0724b2199b..2c24c125f36 100644 --- a/storage/maria/maria_read_log.c +++ b/storage/maria/maria_read_log.c @@ -70,7 +70,7 @@ int main(int argc, char **argv) fprintf(stderr, "Can't find any log\n"); goto err; } - if (init_pagecache(maria_pagecache, opt_page_buffer_size, 0, 0, + if (init_pagecache(maria_pagecache, (size_t)opt_page_buffer_size, 0, 0, maria_block_size, 0, MY_WME) == 0) { fprintf(stderr, "Got error in init_pagecache() (errno: %d)\n", errno); @@ -109,11 +109,11 @@ int main(int argc, char **argv) last_checkpoint_lsn != LSN_IMPOSSIBLE) { lsn= LSN_IMPOSSIBLE; /* LSN set in maria_apply_log() */ - fprintf(stdout, "Starting from checkpoint (%lu,0x%lx)\n", + fprintf(stdout, "Starting from checkpoint " LSN_FMT "\n", LSN_IN_PARTS(last_checkpoint_lsn)); } else - fprintf(stdout, "The transaction log starts from lsn (%lu,0x%lx)\n", + fprintf(stdout, "The transaction log starts from lsn " LSN_FMT "\n", LSN_IN_PARTS(lsn)); if (opt_start_from_lsn) @@ -125,7 +125,7 @@ int main(int argc, char **argv) goto err; } lsn= (LSN) opt_start_from_lsn; - fprintf(stdout, "Starting reading log from lsn (%lu,0x%lx)\n", + fprintf(stdout, "Starting reading log from lsn " LSN_FMT "\n", LSN_IN_PARTS(lsn)); } diff --git a/storage/maria/unittest/ma_test_loghandler-t.c b/storage/maria/unittest/ma_test_loghandler-t.c index aa8615e9b77..02922ed9331 100644 --- a/storage/maria/unittest/ma_test_loghandler-t.c +++ b/storage/maria/unittest/ma_test_loghandler-t.c @@ -114,7 +114,7 @@ static my_bool check_content(uchar *ptr, ulong length) void read_ok(TRANSLOG_HEADER_BUFFER *rec) { - ok(1, "read record type: %u LSN: (%lu,0x%lx)", + ok(1, "read record type: %u LSN: " LSN_FMT, rec->type, LSN_IN_PARTS(rec->lsn)); } @@ -399,7 +399,7 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Incorrect LOGREC_FIXED_RECORD_0LSN_EXAMPLE " "data read(0)\n" "type %u, strid %u, len %u, i: %u, 4: %u 5: %u, " - "lsn(%lu,0x%lx)\n", + "lsn" LSN_FMT "\n", (uint) rec.type, (uint) rec.short_trid, (uint) rec.record_length, (uint) uint4korr(rec.header), (uint) rec.header[4], (uint) rec.header[5], @@ -444,8 +444,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Incorrect LOGREC_FIXED_RECORD_1LSN_EXAMPLE " "data read(%d) " "type: %u strid: %u len: %u" - "ref: (%lu,0x%lx) (%lu,0x%lx) " - "lsn(%lu,0x%lx)\n", + "ref: " LSN_FMT " " LSN_FMT " " + "lsn" LSN_FMT "\n", i, (uint) rec.type, (uint) rec.short_trid, (uint) rec.record_length, LSN_IN_PARTS(ref), LSN_IN_PARTS(lsn), @@ -475,9 +475,9 @@ int main(int argc __attribute__((unused)), char *argv[]) { fprintf(stderr, "Incorrect LOGREC_FIXED_RECORD_2LSN_EXAMPLE " "data read(%d) " - "type %u, strid %u, len %u, ref1(%lu,0x%lx), " - "ref2(%lu,0x%lx) %x%x%x%x%x%x%x%x%x " - "lsn(%lu,0x%lx)\n", + "type %u, strid %u, len %u, ref1" LSN_FMT ", " + "ref2" LSN_FMT " %x%x%x%x%x%x%x%x%x " + "lsn" LSN_FMT "\n", i, (uint) rec.type, (uint) rec.short_trid, (uint) rec.record_length, LSN_IN_PARTS(ref1), LSN_IN_PARTS(ref2), @@ -522,7 +522,7 @@ int main(int argc __attribute__((unused)), char *argv[]) "data read(%d)" "type %u (%d), strid %u (%d), len %lu, %lu + 7 (%d), " "hdr len: %u (%d), " - "ref(%lu,0x%lx), lsn(%lu,0x%lx) (%d), content: %d\n", + "ref" LSN_FMT ", lsn" LSN_FMT " (%d), content: %d\n", i, (uint) rec.type, rec.type != LOGREC_VARIABLE_RECORD_1LSN_EXAMPLE, (uint) rec.short_trid, @@ -541,7 +541,7 @@ int main(int argc __attribute__((unused)), char *argv[]) { fprintf(stderr, "Incorrect LOGREC_VARIABLE_RECORD_1LSN_EXAMPLE " - "in whole rec read lsn(%lu,0x%lx)\n", + "in whole rec read lsn" LSN_FMT "\n", LSN_IN_PARTS(rec.lsn)); goto err; } @@ -565,8 +565,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Incorrect LOGREC_VARIABLE_RECORD_2LSN_EXAMPLE " "data read(%d) " "type %u, strid %u, len %lu != %lu + 14, hdr len: %d, " - "ref1(%lu,0x%lx), ref2(%lu,0x%lx), " - "lsn(%lu,0x%lx)\n", + "ref1" LSN_FMT ", ref2" LSN_FMT ", " + "lsn" LSN_FMT "\n", i, (uint) rec.type, (uint) rec.short_trid, (ulong) rec.record_length, (ulong) rec_len, len, LSN_IN_PARTS(ref1), LSN_IN_PARTS(ref2), @@ -577,7 +577,7 @@ int main(int argc __attribute__((unused)), char *argv[]) { fprintf(stderr, "Incorrect LOGREC_VARIABLE_RECORD_2LSN_EXAMPLE " - "in whole rec read lsn(%lu,0x%lx)\n", + "in whole rec read lsn" LSN_FMT "\n", LSN_IN_PARTS(rec.lsn)); goto err; } @@ -606,7 +606,7 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Incorrect LOGREC_FIXED_RECORD_0LSN_EXAMPLE " "data read(%d)\n" "type %u, strid %u, len %u, i: %u, 4: %u 5: %u " - "lsn(%lu,0x%lx)\n", + "lsn" LSN_FMT "\n", i, (uint) rec.type, (uint) rec.short_trid, (uint) rec.record_length, (uint) uint4korr(rec.header), (uint) rec.header[4], @@ -629,7 +629,7 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Incorrect LOGREC_VARIABLE_RECORD_0LSN_EXAMPLE " "data read(%d) " "type %u, strid %u, len %lu != %lu, hdr len: %d, " - "lsn(%lu,0x%lx)\n", + "lsn" LSN_FMT "\n", i, (uint) rec.type, (uint) rec.short_trid, (ulong) rec.record_length, (ulong) rec_len, len, LSN_IN_PARTS(rec.lsn)); @@ -639,7 +639,7 @@ int main(int argc __attribute__((unused)), char *argv[]) { fprintf(stderr, "Incorrect LOGREC_VARIABLE_RECORD_2LSN_EXAMPLE " - "in whole rec read lsn(%lu,0x%lx)\n", + "in whole rec read lsn" LSN_FMT "\n", LSN_IN_PARTS(rec.lsn)); goto err; } diff --git a/storage/maria/unittest/ma_test_loghandler_first_lsn-t.c b/storage/maria/unittest/ma_test_loghandler_first_lsn-t.c index 9306be3958e..87f0cf6c72c 100644 --- a/storage/maria/unittest/ma_test_loghandler_first_lsn-t.c +++ b/storage/maria/unittest/ma_test_loghandler_first_lsn-t.c @@ -102,7 +102,7 @@ int main(int argc __attribute__((unused)), char *argv[]) first_lsn= translog_first_lsn_in_log(); if (first_lsn != LSN_IMPOSSIBLE) { - fprintf(stderr, "Incorrect first lsn response (%lu,0x%lx).", + fprintf(stderr, "Incorrect first lsn response " LSN_FMT ".", LSN_IN_PARTS(first_lsn)); translog_destroy(); exit(1); @@ -140,8 +140,8 @@ int main(int argc __attribute__((unused)), char *argv[]) first_lsn= translog_first_lsn_in_log(); if (first_lsn != theor_lsn) { - fprintf(stderr, "Incorrect first lsn: (%lu,0x%lx) " - " theoretical first: (%lu,0x%lx)\n", + fprintf(stderr, "Incorrect first lsn: " LSN_FMT " " + " theoretical first: " LSN_FMT "\n", LSN_IN_PARTS(first_lsn), LSN_IN_PARTS(theor_lsn)); translog_destroy(); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_max_lsn-t.c b/storage/maria/unittest/ma_test_loghandler_max_lsn-t.c index 9ff391b3814..6cb2fcb55e5 100644 --- a/storage/maria/unittest/ma_test_loghandler_max_lsn-t.c +++ b/storage/maria/unittest/ma_test_loghandler_max_lsn-t.c @@ -94,7 +94,7 @@ int main(int argc __attribute__((unused)), char *argv[]) } if (max_lsn != LSN_IMPOSSIBLE) { - fprintf(stderr, "Incorrect first lsn response (%lu,0x%lx).", + fprintf(stderr, "Incorrect first lsn response " LSN_FMT ".", LSN_IN_PARTS(max_lsn)); translog_destroy(); exit(1); @@ -138,8 +138,8 @@ int main(int argc __attribute__((unused)), char *argv[]) } if (max_lsn != last_lsn) { - fprintf(stderr, "Incorrect max lsn: (%lu,0x%lx) " - " last lsn on first file: (%lu,0x%lx)\n", + fprintf(stderr, "Incorrect max lsn: " LSN_FMT " " + " last lsn on first file: " LSN_FMT "\n", LSN_IN_PARTS(max_lsn), LSN_IN_PARTS(last_lsn)); translog_destroy(); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_multigroup-t.c b/storage/maria/unittest/ma_test_loghandler_multigroup-t.c index 39cff18a3ab..69bc2f70f8c 100644 --- a/storage/maria/unittest/ma_test_loghandler_multigroup-t.c +++ b/storage/maria/unittest/ma_test_loghandler_multigroup-t.c @@ -485,7 +485,7 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Incorrect LOGREC_FIXED_RECORD_0LSN_EXAMPLE " "data read(0)\n" "type %u, strid %u, len %u, i: %u, 4: %u 5: %u, " - "lsn(0x%lu,0x%lx)\n", + LSN_FMT "\n", (uint) rec.type, (uint) rec.short_trid, (uint) rec.record_length, (uint)uint4korr(rec.header), (uint) rec.header[4], (uint) rec.header[5], @@ -533,7 +533,7 @@ int main(int argc __attribute__((unused)), char *argv[]) { fprintf(stderr, "Incorrect LOGREC_FIXED_RECORD_1LSN_EXAMPLE " "data read(%d)" - "type %u, strid %u, len %u, ref(%lu,0x%lx), lsn(%lu,0x%lx)\n", + "type %u, strid %u, len %u, ref" LSN_FMT ", lsn" LSN_FMT "\n", i, (uint) rec.type, (uint) rec.short_trid, (uint) rec.record_length, LSN_IN_PARTS(ref), LSN_IN_PARTS(rec.lsn)); @@ -563,9 +563,9 @@ int main(int argc __attribute__((unused)), char *argv[]) { fprintf(stderr, "Incorrect LOGREC_FIXED_RECORD_2LSN_EXAMPLE " "data read(%d) " - "type %u, strid %u, len %u, ref1(%lu,0x%lx), " - "ref2(%lu,0x%lx) %x%x%x%x%x%x%x%x%x " - "lsn(%lu,0x%lx)\n", + "type %u, strid %u, len %u, ref1" LSN_FMT ", " + "ref2" LSN_FMT " %x%x%x%x%x%x%x%x%x " + "lsn" LSN_FMT "\n", i, (uint) rec.type, (uint) rec.short_trid, (uint) rec.record_length, LSN_IN_PARTS(ref1), LSN_IN_PARTS(ref2), @@ -611,7 +611,7 @@ int main(int argc __attribute__((unused)), char *argv[]) "data read(%d)" "type %u (%d), strid %u (%d), len %lu, %lu + 7 (%d), " "hdr len: %d (%d), " - "ref(%lu,0x%lx), lsn(%lu,0x%lx) (%d), content: %d\n", + "ref" LSN_FMT ", lsn" LSN_FMT " (%d), content: %d\n", i, (uint) rec.type, rec.type !=LOGREC_VARIABLE_RECORD_1LSN_EXAMPLE, (uint) rec.short_trid, @@ -631,7 +631,7 @@ int main(int argc __attribute__((unused)), char *argv[]) { fprintf(stderr, "Incorrect LOGREC_VARIABLE_RECORD_1LSN_EXAMPLE " - "in whole rec read lsn(%lu,0x%lx)\n", + "in whole rec read lsn" LSN_FMT "\n", LSN_IN_PARTS(rec.lsn)); translog_free_record_header(&rec); goto err; @@ -655,8 +655,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Incorrect LOGREC_VARIABLE_RECORD_2LSN_EXAMPLE " " data read(%d) " "type %u, strid %u, len %lu != %lu + 14, hdr len: %d, " - "ref1(%lu,0x%lx), ref2(%lu,0x%lx), " - "lsn(%lu,0x%lx)\n", + "ref1" LSN_FMT ", ref2" LSN_FMT ", " + "lsn" LSN_FMT "\n", i, (uint) rec.type, (uint) rec.short_trid, (ulong) rec.record_length, (ulong) rec_len, len, @@ -669,7 +669,7 @@ int main(int argc __attribute__((unused)), char *argv[]) { fprintf(stderr, "Incorrect LOGREC_VARIABLE_RECORD_2LSN_EXAMPLE " - "in whole rec read lsn(%lu,0x%lx)\n", + "in whole rec read lsn" LSN_FMT "\n", LSN_IN_PARTS(rec.lsn)); translog_free_record_header(&rec); goto err; @@ -701,7 +701,7 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Incorrect LOGREC_FIXED_RECORD_0LSN_EXAMPLE " "data read(%d)\n" "type %u, strid %u, len %u, i: %u, 4: %u 5: %u " - "lsn(%lu,0x%lx)\n", + "lsn" LSN_FMT "\n", i, (uint) rec.type, (uint) rec.short_trid, (uint) rec.record_length, (uint)uint4korr(rec.header), (uint) rec.header[4], @@ -725,7 +725,7 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Incorrect LOGREC_VARIABLE_RECORD_0LSN_EXAMPLE " "data read(%d) " "type %u, strid %u, len %lu != %lu, hdr len: %d, " - "lsn(%lu,0x%lx)\n", + "lsn" LSN_FMT "\n", i, (uint) rec.type, (uint) rec.short_trid, (ulong) rec.record_length, (ulong) rec_len, len, LSN_IN_PARTS(rec.lsn)); @@ -736,7 +736,7 @@ int main(int argc __attribute__((unused)), char *argv[]) { fprintf(stderr, "Incorrect LOGREC_VARIABLE_RECORD_2LSN_EXAMPLE " - "in whole rec read lsn(%lu,0x%lx)\n", + "in whole rec read lsn" LSN_FMT "\n", LSN_IN_PARTS(rec.lsn)); translog_free_record_header(&rec); goto err; diff --git a/storage/maria/unittest/ma_test_loghandler_multithread-t.c b/storage/maria/unittest/ma_test_loghandler_multithread-t.c index ff843937fcd..8b834527dd0 100644 --- a/storage/maria/unittest/ma_test_loghandler_multithread-t.c +++ b/storage/maria/unittest/ma_test_loghandler_multithread-t.c @@ -486,7 +486,7 @@ int main(int argc __attribute__((unused)), fprintf(stderr, "Incorrect LOGREC_FIXED_RECORD_0LSN_EXAMPLE " "data read(%d)\n" "type %u, strid %u %u, len %u, i: %u %u, " - "lsn(%lu,0x%lx) (%lu,0x%lx)\n", + "lsn" LSN_FMT " " LSN_FMT "\n", i, (uint) rec.type, (uint) rec.short_trid, (uint) uint2korr(rec.header), (uint) rec.record_length, @@ -510,7 +510,7 @@ int main(int argc __attribute__((unused)), "data read(%d) " "thread: %d, iteration %d, stage %d\n" "type %u (%d), len %d, length %lu %lu (%d) " - "lsn(%lu,0x%lx) (%lu,0x%lx)\n", + "lsn" LSN_FMT " " LSN_FMT "\n", i, (uint) rec.short_trid, index, stage, (uint) rec.type, (rec.type != LOGREC_VARIABLE_RECORD_0LSN_EXAMPLE), @@ -526,7 +526,7 @@ int main(int argc __attribute__((unused)), { fprintf(stderr, "Incorrect LOGREC_VARIABLE_RECORD_0LSN_EXAMPLE " - "in whole rec read lsn(%lu,0x%lx)\n", + "in whole rec read lsn" LSN_FMT "\n", LSN_IN_PARTS(rec.lsn)); translog_free_record_header(&rec); goto err; diff --git a/storage/maria/unittest/ma_test_loghandler_noflush-t.c b/storage/maria/unittest/ma_test_loghandler_noflush-t.c index 9555cc0842f..2be6dc1da3c 100644 --- a/storage/maria/unittest/ma_test_loghandler_noflush-t.c +++ b/storage/maria/unittest/ma_test_loghandler_noflush-t.c @@ -116,7 +116,7 @@ int main(int argc __attribute__((unused)), char *argv[]) "data read(0)\n" "type: %u (%d) strid: %u (%d) len: %u (%d) i: %u (%d), " "4: %u (%d) 5: %u (%d) " - "lsn(%lu,0x%lx) (%d)\n", + "lsn" LSN_FMT " (%d)\n", (uint) rec.type, (rec.type !=LOGREC_FIXED_RECORD_0LSN_EXAMPLE), (uint) rec.short_trid, (rec.short_trid != 0), (uint) rec.record_length, (rec.record_length != 6), diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index cd69f3cd8ee..62c400b1c07 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -341,8 +341,8 @@ int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out, } } } - DBUG_PRINT("loop", ("found: 0x%lx recpos: %d minpos: %d length: %d", - (long) found, recpos, minpos, length)); + DBUG_PRINT("loop", ("found: %p recpos: %d minpos: %d length: %d", + found, recpos, minpos, length)); if (recpos != minpos) { /* reserve space for null bits */ diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c index cec4f04810e..9e60ae41e14 100644 --- a/storage/myisam/mi_check.c +++ b/storage/myisam/mi_check.c @@ -2915,8 +2915,8 @@ int mi_repair_parallel(HA_CHECK *param, register MI_INFO *info, */ sort_param[i].read_cache= ((rep_quick || !i) ? param->read_cache : new_data_cache); - DBUG_PRINT("io_cache_share", ("thread: %u read_cache: 0x%lx", - i, (long) &sort_param[i].read_cache)); + DBUG_PRINT("io_cache_share", ("thread: %u read_cache: %p", + i, &sort_param[i].read_cache)); /* two approaches: the same amount of memory for each thread @@ -3967,7 +3967,7 @@ static int sort_ft_key_write(MI_SORT_PARAM *sort_param, const void *a) key_block++; sort_info->key_block=key_block; sort_param->keyinfo=& sort_info->info->s->ft2_keyinfo; - ft_buf->count=((uchar*) ft_buf->buf - p)/val_len; + ft_buf->count=(int)((uchar*) ft_buf->buf - p)/val_len; /* flushing buffer to second-level tree */ for (error=0; !error && p < (uchar*) ft_buf->buf; p+= val_len) diff --git a/storage/myisam/mi_close.c b/storage/myisam/mi_close.c index 3681821697f..d68110ead6d 100644 --- a/storage/myisam/mi_close.c +++ b/storage/myisam/mi_close.c @@ -27,8 +27,8 @@ int mi_close(register MI_INFO *info) int error=0,flag; MYISAM_SHARE *share=info->s; DBUG_ENTER("mi_close"); - DBUG_PRINT("enter",("base: 0x%lx reopen: %u locks: %u", - (long) info, (uint) share->reopen, + DBUG_PRINT("enter",("base: %p reopen: %u locks: %u", + info, (uint) share->reopen, (uint) share->tot_locks)); if (info->open_list.data) diff --git a/storage/myisam/mi_delete.c b/storage/myisam/mi_delete.c index 99185844b72..92b368d92dc 100644 --- a/storage/myisam/mi_delete.c +++ b/storage/myisam/mi_delete.c @@ -410,8 +410,8 @@ static int del(register MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *key, MYISAM_SHARE *share=info->s; MI_KEY_PARAM s_temp; DBUG_ENTER("del"); - DBUG_PRINT("enter",("leaf_page: %ld keypos: 0x%lx", (long) leaf_page, - (ulong) keypos)); + DBUG_PRINT("enter",("leaf_page: %lld keypos: %p", leaf_page, + keypos)); DBUG_DUMP("leaf_buff",(uchar*) leaf_buff,mi_getint(leaf_buff)); endpos=leaf_buff+mi_getint(leaf_buff); @@ -516,8 +516,8 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo, MI_KEY_PARAM s_temp; MYISAM_SHARE *share=info->s; DBUG_ENTER("underflow"); - DBUG_PRINT("enter",("leaf_page: %ld keypos: 0x%lx",(long) leaf_page, - (ulong) keypos)); + DBUG_PRINT("enter",("leaf_page: %lld keypos: %p",leaf_page, + keypos)); DBUG_DUMP("anc_buff",(uchar*) anc_buff,mi_getint(anc_buff)); DBUG_DUMP("leaf_buff",(uchar*) leaf_buff,mi_getint(leaf_buff)); @@ -597,8 +597,8 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo, else { /* Page is full */ endpos=anc_buff+anc_length; - DBUG_PRINT("test",("anc_buff: 0x%lx endpos: 0x%lx", - (long) anc_buff, (long) endpos)); + DBUG_PRINT("test",("anc_buff: %p endpos: %p", + anc_buff, endpos)); if (keypos != anc_buff+2+key_reflength && !_mi_get_last_key(info,keyinfo,anc_buff,anc_key,keypos,&length)) goto err; @@ -776,7 +776,7 @@ static uint remove_key(MI_KEYDEF *keyinfo, uint nod_flag, int s_length; uchar *start; DBUG_ENTER("remove_key"); - DBUG_PRINT("enter",("keypos: 0x%lx page_end: 0x%lx",(long) keypos, (long) page_end)); + DBUG_PRINT("enter",("keypos: %p page_end: %p",keypos, page_end)); start=keypos; if (!(keyinfo->flag & diff --git a/storage/myisam/mi_dynrec.c b/storage/myisam/mi_dynrec.c index 5241d72014c..c4baca3d89f 100644 --- a/storage/myisam/mi_dynrec.c +++ b/storage/myisam/mi_dynrec.c @@ -1344,8 +1344,8 @@ ulong _mi_rec_unpack(register MI_INFO *info, register uchar *to, uchar *from, err: my_errno= HA_ERR_WRONG_IN_RECORD; - DBUG_PRINT("error",("to_end: 0x%lx -> 0x%lx from_end: 0x%lx -> 0x%lx", - (long) to, (long) to_end, (long) from, (long) from_end)); + DBUG_PRINT("error",("to_end: %p -> %p from_end: %p -> %p", + to, to_end, from, from_end)); DBUG_DUMP("from",(uchar*) info->rec_buff,info->s->base.min_pack_length); DBUG_RETURN(MY_FILE_ERROR); } /* _mi_rec_unpack */ diff --git a/storage/myisam/mi_preload.c b/storage/myisam/mi_preload.c index 1a2d5aac94f..e0d23e0fca0 100644 --- a/storage/myisam/mi_preload.c +++ b/storage/myisam/mi_preload.c @@ -41,7 +41,7 @@ int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves) { uint i; - ulong length, block_length= 0; + size_t length, block_length= 0; uchar *buff= NULL; MYISAM_SHARE* share= info->s; uint keys= share->state.header.keys; @@ -68,7 +68,7 @@ int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves) } } else - block_length= share->key_cache->param_block_size; + block_length= (size_t)share->key_cache->param_block_size; length= info->preload_buff_size/block_length * block_length; set_if_bigger(length, block_length); @@ -84,7 +84,7 @@ int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves) { /* Read the next block of index file into the preload buffer */ if ((my_off_t) length > (key_file_length-pos)) - length= (ulong) (key_file_length-pos); + length= (size_t) (key_file_length-pos); if (mysql_file_pread(share->kfile, (uchar*) buff, length, pos, MYF(MY_FAE|MY_FNABP))) goto err; diff --git a/storage/myisam/mi_rkey.c b/storage/myisam/mi_rkey.c index b70922b0505..1dddb8b49ad 100644 --- a/storage/myisam/mi_rkey.c +++ b/storage/myisam/mi_rkey.c @@ -32,8 +32,8 @@ int mi_rkey(MI_INFO *info, uchar *buf, int inx, const uchar *key, uint pack_key_length, use_key_length, nextflag; ICP_RESULT res= ICP_NO_MATCH; DBUG_ENTER("mi_rkey"); - DBUG_PRINT("enter", ("base: 0x%lx buf: 0x%lx inx: %d search_flag: %d", - (long) info, (long) buf, inx, search_flag)); + DBUG_PRINT("enter", ("base: %p buf: %p inx: %d search_flag: %d", + info, buf, inx, search_flag)); if ((inx = _mi_check_index(info,inx)) < 0) DBUG_RETURN(my_errno); diff --git a/storage/myisam/mi_search.c b/storage/myisam/mi_search.c index 60a34c641ad..79ed846ce7e 100644 --- a/storage/myisam/mi_search.c +++ b/storage/myisam/mi_search.c @@ -267,8 +267,8 @@ int _mi_seq_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page, mi_print_error(info->s, HA_ERR_CRASHED); my_errno=HA_ERR_CRASHED; DBUG_PRINT("error", - ("Found wrong key: length: %u page: 0x%lx end: 0x%lx", - length, (long) page, (long) end)); + ("Found wrong key: length: %u page: %p end: %p", + length, page, end)); DBUG_RETURN(MI_FOUND_WRONG_KEY); } if ((flag=ha_key_cmp(keyinfo->seg,t_buff,key,key_len,comp_flag, @@ -284,7 +284,7 @@ int _mi_seq_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page, if (flag == 0) memcpy(buff,t_buff,length); /* Result is first key */ *last_key= page == end; - DBUG_PRINT("exit",("flag: %d ret_pos: 0x%lx", flag, (long) *ret_pos)); + DBUG_PRINT("exit",("flag: %d ret_pos: %p", flag, *ret_pos)); DBUG_RETURN(flag); } /* _mi_seq_search */ @@ -419,8 +419,8 @@ int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page, mi_print_error(info->s, HA_ERR_CRASHED); my_errno=HA_ERR_CRASHED; DBUG_PRINT("error", - ("Found wrong key: length: %u page: 0x%lx end: %lx", - length, (long) page, (long) end)); + ("Found wrong key: length: %u page: %p end: %p", + length, page, end)); DBUG_RETURN(MI_FOUND_WRONG_KEY); } @@ -554,7 +554,7 @@ int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page, *last_key= page == end; - DBUG_PRINT("exit",("flag: %d ret_pos: 0x%lx", flag, (long) *ret_pos)); + DBUG_PRINT("exit",("flag: %d ret_pos: %p", flag, *ret_pos)); DBUG_RETURN(flag); } /* _mi_prefix_search */ @@ -816,8 +816,8 @@ uint _mi_get_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag, if (length > keyseg->length) { DBUG_PRINT("error", - ("Found too long null packed key: %u of %u at 0x%lx", - length, keyseg->length, (long) *page_pos)); + ("Found too long null packed key: %u of %u at %p", + length, keyseg->length, *page_pos)); DBUG_DUMP("key", *page_pos, 16); mi_print_error(keyinfo->share, HA_ERR_CRASHED); my_errno=HA_ERR_CRASHED; @@ -873,8 +873,8 @@ uint _mi_get_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag, } if (length > (uint) keyseg->length) { - DBUG_PRINT("error",("Found too long packed key: %u of %u at 0x%lx", - length, keyseg->length, (long) *page_pos)); + DBUG_PRINT("error",("Found too long packed key: %u of %u at %p", + length, keyseg->length, *page_pos)); DBUG_DUMP("key", *page_pos, 16); mi_print_error(keyinfo->share, HA_ERR_CRASHED); my_errno=HA_ERR_CRASHED; @@ -945,8 +945,8 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag, if (length > keyinfo->maxlength) { DBUG_PRINT("error", - ("Found too long binary packed key: %u of %u at 0x%lx", - length, keyinfo->maxlength, (long) *page_pos)); + ("Found too long binary packed key: %u of %u at %p", + length, keyinfo->maxlength, *page_pos)); DBUG_DUMP("key", *page_pos, 16); goto crashed; /* Wrong key */ } @@ -1003,8 +1003,8 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag, length-=tmp; from=page; from_end=page_end; } - DBUG_PRINT("info",("key: 0x%lx from: 0x%lx length: %u", - (long) key, (long) from, length)); + DBUG_PRINT("info",("key: %p from: %p length: %u", + key, from, length)); memmove((uchar*) key, (uchar*) from, (size_t) length); key+=length; from+=length; @@ -1077,7 +1077,7 @@ uchar *_mi_get_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page, } } } - DBUG_PRINT("exit",("page: 0x%lx length: %u", (long) page, + DBUG_PRINT("exit",("page: %p length: %u", page, *return_key_length)); DBUG_RETURN(page); } /* _mi_get_key */ @@ -1130,8 +1130,8 @@ uchar *_mi_get_last_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page, uint nod_flag; uchar *lastpos; DBUG_ENTER("_mi_get_last_key"); - DBUG_PRINT("enter",("page: 0x%lx endpos: 0x%lx", (long) page, - (long) endpos)); + DBUG_PRINT("enter",("page:%p endpos: %p", page, + endpos)); nod_flag=mi_test_if_nod(page); if (! (keyinfo->flag & (HA_VAR_LENGTH_KEY | HA_BINARY_PACK_KEY))) @@ -1151,15 +1151,15 @@ uchar *_mi_get_last_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page, *return_key_length=(*keyinfo->get_key)(keyinfo,nod_flag,&page,lastkey); if (*return_key_length == 0) { - DBUG_PRINT("error",("Couldn't find last key: page: 0x%lx", - (long) page)); + DBUG_PRINT("error",("Couldn't find last key: page: %p", + page)); mi_print_error(info->s, HA_ERR_CRASHED); my_errno=HA_ERR_CRASHED; DBUG_RETURN(0); } } } - DBUG_PRINT("exit",("lastpos: 0x%lx length: %u", (long) lastpos, + DBUG_PRINT("exit",("lastpos: %p length: %u", lastpos, *return_key_length)); DBUG_RETURN(lastpos); } /* _mi_get_last_key */ @@ -1245,9 +1245,9 @@ int _mi_search_next(register MI_INFO *info, register MI_KEYDEF *keyinfo, uint nod_flag; uchar lastkey[HA_MAX_KEY_BUFF]; DBUG_ENTER("_mi_search_next"); - DBUG_PRINT("enter",("nextflag: %u lastpos: %lu int_keypos: %lu", - nextflag, (ulong) info->lastpos, - (ulong) info->int_keypos)); + DBUG_PRINT("enter",("nextflag: %u lastpos: %llu int_keypos: %p", + nextflag, info->lastpos, + info->int_keypos)); DBUG_EXECUTE("key",_mi_print_key(DBUG_FILE,keyinfo->seg,key,key_length);); /* Force full read if we are at last key or if we are not on a leaf @@ -1697,8 +1697,8 @@ _mi_calc_var_pack_key_length(MI_KEYDEF *keyinfo,uint nod_flag,uchar *next_key, ref_length=0; next_length_pack=0; } - DBUG_PRINT("test",("length: %d next_key: 0x%lx", length, - (long) next_key)); + DBUG_PRINT("test",("length: %d next_key: %p", length, + next_key)); { uint tmp_length; diff --git a/storage/myisam/mi_write.c b/storage/myisam/mi_write.c index 6603eb789a1..81af85acfc2 100644 --- a/storage/myisam/mi_write.c +++ b/storage/myisam/mi_write.c @@ -471,7 +471,7 @@ int _mi_insert(register MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *endpos, *prev_key; MI_KEY_PARAM s_temp; DBUG_ENTER("_mi_insert"); - DBUG_PRINT("enter",("key_pos: 0x%lx", (long) key_pos)); + DBUG_PRINT("enter",("key_pos: %p", key_pos)); DBUG_EXECUTE("key",_mi_print_key(DBUG_FILE,keyinfo->seg,key,USE_WHOLE_KEY);); nod_flag=mi_test_if_nod(anc_buff); @@ -492,8 +492,8 @@ int _mi_insert(register MI_INFO *info, register MI_KEYDEF *keyinfo, { DBUG_PRINT("test",("t_length: %d ref_len: %d", t_length,s_temp.ref_length)); - DBUG_PRINT("test",("n_ref_len: %d n_length: %d key_pos: 0x%lx", - s_temp.n_ref_length,s_temp.n_length, (long) s_temp.key)); + DBUG_PRINT("test",("n_ref_len: %d n_length: %d key_pos: %p", + s_temp.n_ref_length,s_temp.n_length, s_temp.key)); } #endif if (t_length > 0) @@ -693,8 +693,8 @@ uchar *_mi_find_half_pos(uint nod_flag, MI_KEYDEF *keyinfo, uchar *page, } while (page < end); *return_key_length=length; *after_key=page; - DBUG_PRINT("exit",("returns: 0x%lx page: 0x%lx half: 0x%lx", - (long) lastpos, (long) page, (long) end)); + DBUG_PRINT("exit",("returns: %p page: %p half: %p", + lastpos, page, end)); DBUG_RETURN(lastpos); } /* _mi_find_half_pos */ @@ -750,8 +750,8 @@ static uchar *_mi_find_last_pos(MI_KEYDEF *keyinfo, uchar *page, *return_key_length=last_length; *after_key=lastpos; - DBUG_PRINT("exit",("returns: 0x%lx page: 0x%lx end: 0x%lx", - (long) prevpos,(long) page,(long) end)); + DBUG_PRINT("exit",("returns: %p page: %p end: %p", + prevpos, page, end)); DBUG_RETURN(prevpos); } /* _mi_find_last_pos */ diff --git a/storage/myisam/myisamchk.c b/storage/myisam/myisamchk.c index c31ac8cd0d4..2e36c364453 100644 --- a/storage/myisam/myisamchk.c +++ b/storage/myisam/myisamchk.c @@ -28,7 +28,7 @@ static uint decode_bits; static char **default_argv; static const char *load_default_groups[]= { "myisamchk", 0 }; -static const char *set_collation_name, *opt_tmpdir; +static char *set_collation_name, *opt_tmpdir; static CHARSET_INFO *set_collation; static long opt_myisam_block_size; static long opt_key_cache_block_size; @@ -1116,7 +1116,7 @@ static int myisamchk(HA_CHECK *param, char * filename) { if (param->testflag & (T_EXTEND | T_MEDIUM)) (void) init_key_cache(dflt_key_cache,opt_key_cache_block_size, - param->use_buffers, 0, 0, 0, 0); + (size_t)param->use_buffers, 0, 0, 0, 0); (void) init_io_cache(¶m->read_cache,datafile, (uint) param->read_buffer_length, READ_CACHE, diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc index dbcd7a0cb3b..0cf8305463b 100644 --- a/storage/myisammrg/ha_myisammrg.cc +++ b/storage/myisammrg/ha_myisammrg.cc @@ -359,7 +359,7 @@ int ha_myisammrg::open(const char *name, int mode __attribute__((unused)), uint test_if_locked_arg) { DBUG_ENTER("ha_myisammrg::open"); - DBUG_PRINT("myrg", ("name: '%s' table: 0x%lx", name, (long) table)); + DBUG_PRINT("myrg", ("name: '%s' table: %p", name, table)); DBUG_PRINT("myrg", ("test_if_locked_arg: %u", test_if_locked_arg)); /* Must not be used when table is open. */ @@ -413,8 +413,8 @@ int ha_myisammrg::open(const char *name, int mode __attribute__((unused)), DBUG_RETURN(my_errno ? my_errno : -1); /* purecov: end */ } - DBUG_PRINT("myrg", ("MYRG_INFO: 0x%lx child tables: %u", - (long) file, file->tables)); + DBUG_PRINT("myrg", ("MYRG_INFO: %p child tables: %u", + file, file->tables)); DBUG_RETURN(0); } @@ -440,8 +440,8 @@ int ha_myisammrg::add_children_list(void) List_iterator_fast<Mrg_child_def> it(child_def_list); Mrg_child_def *mrg_child_def; DBUG_ENTER("ha_myisammrg::add_children_list"); - DBUG_PRINT("myrg", ("table: '%s'.'%s' 0x%lx", this->table->s->db.str, - this->table->s->table_name.str, (long) this->table)); + DBUG_PRINT("myrg", ("table: '%s'.'%s' %p", this->table->s->db.str, + this->table->s->table_name.str, this->table)); /* Must call this with open table. */ DBUG_ASSERT(this->file); @@ -699,12 +699,12 @@ extern "C" MI_INFO *myisammrg_attach_children_callback(void *callback_param) if ((child->file->ht->db_type != DB_TYPE_MYISAM) || !(myisam= ((ha_myisam*) child->file)->file_ptr())) { - DBUG_PRINT("error", ("no MyISAM handle for child table: '%s'.'%s' 0x%lx", + DBUG_PRINT("error", ("no MyISAM handle for child table: '%s'.'%s' %p", child->s->db.str, child->s->table_name.str, - (long) child)); + child)); } - DBUG_PRINT("myrg", ("MyISAM handle: 0x%lx", (long) myisam)); + DBUG_PRINT("myrg", ("MyISAM handle: %p", myisam)); end: @@ -810,8 +810,8 @@ int ha_myisammrg::attach_children(void) int error; Mrg_attach_children_callback_param param(parent_l, this->children_l, child_def_list); DBUG_ENTER("ha_myisammrg::attach_children"); - DBUG_PRINT("myrg", ("table: '%s'.'%s' 0x%lx", table->s->db.str, - table->s->table_name.str, (long) table)); + DBUG_PRINT("myrg", ("table: '%s'.'%s' %p", table->s->db.str, + table->s->table_name.str, table)); DBUG_PRINT("myrg", ("test_if_locked: %u", this->test_if_locked)); /* Must call this with open table. */ @@ -1684,7 +1684,7 @@ uint ha_myisammrg::count_query_cache_dependant_tables(uint8 *tables_type) (*tables_type)|= HA_CACHE_TBL_NONTRANSACT; but it has no effect because HA_CACHE_TBL_NONTRANSACT is 0 */ - return (file->end_table - file->open_tables); + return (uint)(file->end_table - file->open_tables); } diff --git a/storage/perfschema/pfs.cc b/storage/perfschema/pfs.cc index c411025d148..7440df6a487 100644 --- a/storage/perfschema/pfs.cc +++ b/storage/perfschema/pfs.cc @@ -1265,7 +1265,7 @@ static int build_prefix(const LEX_CSTRING *prefix, const char *category, out_ptr+= len; *out_ptr= '/'; out_ptr++; - *output_length= out_ptr - output; + *output_length= (int)(out_ptr - output); return 0; } @@ -1942,7 +1942,7 @@ static void set_thread_id_v1(PSI_thread *thread, ulonglong processlist_id) PFS_thread *pfs= reinterpret_cast<PFS_thread*> (thread); if (unlikely(pfs == NULL)) return; - pfs->m_processlist_id= processlist_id; + pfs->m_processlist_id= (ulong)processlist_id; } /** @@ -5121,7 +5121,7 @@ static void set_socket_info_v1(PSI_socket *socket, /** Set socket descriptor */ if (fd != NULL) - pfs->m_fd= *fd; + pfs->m_fd= (uint)*fd; /** Set raw socket address and length */ if (likely(addr != NULL && addr_len > 0)) diff --git a/storage/perfschema/pfs_account.cc b/storage/perfschema/pfs_account.cc index 4e3a6d8d1d3..60467a764e3 100644 --- a/storage/perfschema/pfs_account.cc +++ b/storage/perfschema/pfs_account.cc @@ -202,7 +202,7 @@ static void set_account_key(PFS_account_key *key, } ptr[0]= 0; ptr++; - key->m_key_length= ptr - &key->m_hash_key[0]; + key->m_key_length= (uint)(ptr - &key->m_hash_key[0]); } PFS_account * diff --git a/storage/perfschema/pfs_engine_table.cc b/storage/perfschema/pfs_engine_table.cc index f3c6edcef59..2dfad937e87 100644 --- a/storage/perfschema/pfs_engine_table.cc +++ b/storage/perfschema/pfs_engine_table.cc @@ -1359,7 +1359,7 @@ bool pfs_show_status(handlerton *hton, THD *thd, break; } - buflen= longlong10_to_str(size, buf, 10) - buf; + buflen= (uint)(longlong10_to_str(size, buf, 10) - buf); if (print(thd, PERFORMANCE_SCHEMA_str.str, PERFORMANCE_SCHEMA_str.length, name, strlen(name), diff --git a/storage/perfschema/pfs_host.cc b/storage/perfschema/pfs_host.cc index 7da34a6d5f6..15db3e80a94 100644 --- a/storage/perfschema/pfs_host.cc +++ b/storage/perfschema/pfs_host.cc @@ -190,7 +190,7 @@ static void set_host_key(PFS_host_key *key, } ptr[0]= 0; ptr++; - key->m_key_length= ptr - &key->m_hash_key[0]; + key->m_key_length= (uint)(ptr - &key->m_hash_key[0]); } PFS_host *find_or_create_host(PFS_thread *thread, diff --git a/storage/perfschema/pfs_instr.cc b/storage/perfschema/pfs_instr.cc index a9ea6faf212..9cb2c68dbaf 100644 --- a/storage/perfschema/pfs_instr.cc +++ b/storage/perfschema/pfs_instr.cc @@ -952,7 +952,7 @@ PFS_thread* create_thread(PFS_thread_class *klass, const void *identity, pfs->m_thread_internal_id= PFS_atomic::add_u64(&thread_internal_id_counter, 1); pfs->m_parent_thread_internal_id= 0; - pfs->m_processlist_id= processlist_id; + pfs->m_processlist_id= (ulong)processlist_id; pfs->m_event_id= 1; pfs->m_stmt_lock.set_allocated(); pfs->m_session_lock.set_allocated(); @@ -1604,7 +1604,7 @@ PFS_socket* create_socket(PFS_socket_class *klass, const my_socket *fd, uint addr_len_used= addr_len; if (fd != NULL) - fd_used= *fd; + fd_used= (int)*fd; if (addr_len_used > sizeof(sockaddr_storage)) addr_len_used= sizeof(sockaddr_storage); diff --git a/storage/perfschema/pfs_instr_class.cc b/storage/perfschema/pfs_instr_class.cc index 8f2fdc2594e..647702c0d8f 100644 --- a/storage/perfschema/pfs_instr_class.cc +++ b/storage/perfschema/pfs_instr_class.cc @@ -449,7 +449,7 @@ static void set_table_share_key(PFS_table_share_key *key, ptr+= table_name_length; ptr[0]= 0; ptr++; - key->m_key_length= ptr - &key->m_hash_key[0]; + key->m_key_length= (uint)(ptr - &key->m_hash_key[0]); if (lower_case_table_names) { diff --git a/storage/perfschema/pfs_setup_actor.cc b/storage/perfschema/pfs_setup_actor.cc index f12d70840c1..40534d65da0 100644 --- a/storage/perfschema/pfs_setup_actor.cc +++ b/storage/perfschema/pfs_setup_actor.cc @@ -152,7 +152,7 @@ static void set_setup_actor_key(PFS_setup_actor_key *key, ptr+= role_length; ptr[0]= 0; ptr++; - key->m_key_length= ptr - &key->m_hash_key[0]; + key->m_key_length= (uint)(ptr - &key->m_hash_key[0]); } int insert_setup_actor(const String *user, const String *host, const String *role) diff --git a/storage/perfschema/pfs_setup_object.cc b/storage/perfschema/pfs_setup_object.cc index 809fe8edd24..9fca9a6b945 100644 --- a/storage/perfschema/pfs_setup_object.cc +++ b/storage/perfschema/pfs_setup_object.cc @@ -145,7 +145,7 @@ static void set_setup_object_key(PFS_setup_object_key *key, ptr+= object_length; ptr[0]= 0; ptr++; - key->m_key_length= ptr - &key->m_hash_key[0]; + key->m_key_length= (uint)(ptr - &key->m_hash_key[0]); } int insert_setup_object(enum_object_type object_type, const String *schema, diff --git a/storage/perfschema/pfs_user.cc b/storage/perfschema/pfs_user.cc index 528457fe017..d2266e1f7ef 100644 --- a/storage/perfschema/pfs_user.cc +++ b/storage/perfschema/pfs_user.cc @@ -190,7 +190,7 @@ static void set_user_key(PFS_user_key *key, } ptr[0]= 0; ptr++; - key->m_key_length= ptr - &key->m_hash_key[0]; + key->m_key_length= (uint)(ptr - &key->m_hash_key[0]); } PFS_user * diff --git a/storage/perfschema/table_events_waits.cc b/storage/perfschema/table_events_waits.cc index cd32d81519e..01f8cd5e6b5 100644 --- a/storage/perfschema/table_events_waits.cc +++ b/storage/perfschema/table_events_waits.cc @@ -279,7 +279,7 @@ int table_events_waits_common::make_socket_object_columns(volatile PFS_events_wa safe_socket->m_addr_len); /* Convert port number to a string (length includes ':') */ - int port_len= int10_to_str(port, (port_str+1), 10) - port_str + 1; + int port_len= (int)(int10_to_str(port, (port_str+1), 10) - port_str + 1); /* OBJECT NAME */ m_row.m_object_name_length= ip_length + port_len; diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 3b01c883d92..a7d0373be2a 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -3239,14 +3239,14 @@ static bool rocksdb_show_status(handlerton *const hton, THD *const thd, // NB! We're replacing hyphens with underscores in output to better match // the existing naming convention. if (rdb->GetIntProperty("rocksdb.is-write-stopped", &v)) { - snprintf(buf, sizeof(buf), "rocksdb.is_write_stopped COUNT : %lu\n", v); + snprintf(buf, sizeof(buf), "rocksdb.is_write_stopped COUNT : %llu\n", (ulonglong)v); str.append(buf); } if (rdb->GetIntProperty("rocksdb.actual-delayed-write-rate", &v)) { snprintf(buf, sizeof(buf), "rocksdb.actual_delayed_write_rate " - "COUNT : %lu\n", - v); + "COUNT : %llu\n", + (ulonglong)v); str.append(buf); } @@ -4178,7 +4178,7 @@ std::vector<std::string> rdb_get_open_table_names(void) { } std::vector<std::string> Rdb_open_tables_map::get_table_names(void) const { - ulong i; + size_t i; const Rdb_table_handler *table_handler; std::vector<std::string> names; diff --git a/storage/rocksdb/rdb_sst_info.cc b/storage/rocksdb/rdb_sst_info.cc index 75a9a182fab..da3a3d94354 100644 --- a/storage/rocksdb/rdb_sst_info.cc +++ b/storage/rocksdb/rdb_sst_info.cc @@ -104,9 +104,11 @@ Rdb_sst_file_ordered::Rdb_sst_file::put(const rocksdb::Slice &key, const rocksdb::Slice &value) { DBUG_ASSERT(m_sst_file_writer != nullptr); +#ifdef __GNUC__ // Add the specified key/value to the sst file writer #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#endif return m_sst_file_writer->Add(key, value); } diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc index db61e91d320..d5773de8593 100644 --- a/storage/sphinx/ha_sphinx.cc +++ b/storage/sphinx/ha_sphinx.cc @@ -1736,7 +1736,7 @@ bool CSphSEQuery::ParseField ( char * sField ) } } else if ( !strcmp ( sName, "override" ) ) // name,type,id:value,id:value,... { - char * sName = NULL; + sName = NULL; int iType = 0; CSphSEQuery::Override_t * pOverride = NULL; @@ -1791,7 +1791,7 @@ bool CSphSEQuery::ParseField ( char * sField ) if (!( sRest = strchr ( sRest, ':' ) )) break; *sRest++ = '\0'; if (!( sRest - sId )) break; - char * sValue = sRest; + sValue = sRest; if ( ( sRest = strchr ( sRest, ',' ) )!=NULL ) *sRest++ = '\0'; if ( !*sValue ) @@ -2210,7 +2210,7 @@ int ha_sphinx::Connect ( const char * sHost, ushort uPort ) } char sError[512]; - int iSocket = socket ( iDomain, SOCK_STREAM, 0 ); + int iSocket = (int) socket ( iDomain, SOCK_STREAM, 0 ); if ( iSocket<0 ) { @@ -2671,7 +2671,7 @@ bool ha_sphinx::UnpackStats ( CSphSEStats * pStats ) assert ( pStats ); char * pCurSave = m_pCur; - for ( uint i=0; i<m_iMatchesTotal && m_pCur<m_pResponseEnd-sizeof(uint32); i++ ) // NOLINT + for ( uint m=0; m<m_iMatchesTotal && m_pCur<m_pResponseEnd-sizeof(uint32); m++ ) // NOLINT { m_pCur += m_bId64 ? 12 : 8; // skip id+weight for ( uint32 i=0; i<m_iAttrs && m_pCur<m_pResponseEnd-sizeof(uint32); i++ ) // NOLINT @@ -3156,7 +3156,7 @@ int ha_sphinx::get_rec ( byte * buf, const byte *, uint ) } } - af->store ( sBuf, pCur-sBuf, &my_charset_bin ); + af->store ( sBuf, uint(pCur-sBuf), &my_charset_bin ); } break; @@ -3383,39 +3383,39 @@ ha_rows ha_sphinx::records_in_range ( uint, key_range *, key_range * ) // currently provided for doing that. // // Called from handle.cc by ha_create_table(). -int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * ) +int ha_sphinx::create ( const char * name, TABLE * table_arg, HA_CREATE_INFO * ) { SPH_ENTER_METHOD(); char sError[256]; CSphSEShare tInfo; - if ( !ParseUrl ( &tInfo, table, true ) ) + if ( !ParseUrl ( &tInfo, table_arg, true ) ) SPH_RET(-1); // check SphinxAPI table for ( ; !tInfo.m_bSphinxQL; ) { // check system fields (count and types) - if ( table->s->fields<SPHINXSE_SYSTEM_COLUMNS ) + if ( table_arg->s->fields<SPHINXSE_SYSTEM_COLUMNS ) { my_snprintf ( sError, sizeof(sError), "%s: there MUST be at least %d columns", name, SPHINXSE_SYSTEM_COLUMNS ); break; } - if ( !IsIDField ( table->field[0] ) ) + if ( !IsIDField ( table_arg->field[0] ) ) { my_snprintf ( sError, sizeof(sError), "%s: 1st column (docid) MUST be unsigned integer or bigint", name ); break; } - if ( !IsIntegerFieldType ( table->field[1]->type() ) ) + if ( !IsIntegerFieldType ( table_arg->field[1]->type() ) ) { my_snprintf ( sError, sizeof(sError), "%s: 2nd column (weight) MUST be integer or bigint", name ); break; } - enum_field_types f2 = table->field[2]->type(); + enum_field_types f2 = table_arg->field[2]->type(); if ( f2!=MYSQL_TYPE_VARCHAR && f2!=MYSQL_TYPE_BLOB && f2!=MYSQL_TYPE_MEDIUM_BLOB && f2!=MYSQL_TYPE_LONG_BLOB && f2!=MYSQL_TYPE_TINY_BLOB ) { @@ -3425,25 +3425,25 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * ) // check attributes int i; - for ( i=3; i<(int)table->s->fields; i++ ) + for ( i=3; i<(int)table_arg->s->fields; i++ ) { - enum_field_types eType = table->field[i]->type(); + enum_field_types eType = table_arg->field[i]->type(); if ( eType!=MYSQL_TYPE_TIMESTAMP && !IsIntegerFieldType(eType) && eType!=MYSQL_TYPE_VARCHAR && eType!=MYSQL_TYPE_FLOAT ) { my_snprintf ( sError, sizeof(sError), "%s: %dth column (attribute %s) MUST be integer, bigint, timestamp, varchar, or float", - name, i+1, table->field[i]->field_name.str ); + name, i+1, table_arg->field[i]->field_name.str ); break; } } - if ( i!=(int)table->s->fields ) + if ( i!=(int)table_arg->s->fields ) break; // check index if ( - table->s->keys!=1 || - table->key_info[0].user_defined_key_parts!=1 || - strcasecmp ( table->key_info[0].key_part[0].field->field_name.str, table->field[2]->field_name.str ) ) + table_arg->s->keys!=1 || + table_arg->key_info[0].user_defined_key_parts!=1 || + strcasecmp ( table_arg->key_info[0].key_part[0].field->field_name.str, table->field[2]->field_name.str ) ) { my_snprintf ( sError, sizeof(sError), "%s: there must be an index on '%s' column", name, table->field[2]->field_name.str ); @@ -3461,13 +3461,13 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * ) sError[0] = '\0'; // check that 1st column is id, is of int type, and has an index - if ( strcmp ( table->field[0]->field_name.str, "id" ) ) + if ( strcmp ( table_arg->field[0]->field_name.str, "id" ) ) { my_snprintf ( sError, sizeof(sError), "%s: 1st column must be called 'id'", name ); break; } - if ( !IsIDField ( table->field[0] ) ) + if ( !IsIDField ( table_arg->field[0] ) ) { my_snprintf ( sError, sizeof(sError), "%s: 'id' column must be INT UNSIGNED or BIGINT", name ); break; @@ -3475,22 +3475,22 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * ) // check index if ( - table->s->keys!=1 || - table->key_info[0].user_defined_key_parts!=1 || - strcasecmp ( table->key_info[0].key_part[0].field->field_name.str, "id" ) ) + table_arg->s->keys!=1 || + table_arg->key_info[0].user_defined_key_parts!=1 || + strcasecmp ( table_arg->key_info[0].key_part[0].field->field_name.str, "id" ) ) { my_snprintf ( sError, sizeof(sError), "%s: 'id' column must be indexed", name ); break; } // check column types - for ( int i=1; i<(int)table->s->fields; i++ ) + for ( int i=1; i<(int)table_arg->s->fields; i++ ) { - enum_field_types eType = table->field[i]->type(); + enum_field_types eType = table_arg->field[i]->type(); if ( eType!=MYSQL_TYPE_TIMESTAMP && !IsIntegerFieldType(eType) && eType!=MYSQL_TYPE_VARCHAR && eType!=MYSQL_TYPE_FLOAT ) { my_snprintf ( sError, sizeof(sError), "%s: column %d(%s) is of unsupported type (use int/bigint/timestamp/varchar/float)", - name, i+1, table->field[i]->field_name.str ); + name, i+1, table_arg->field[i]->field_name.str ); break; } } @@ -3505,7 +3505,7 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * ) if ( sError[0] ) { my_error ( ER_CANT_CREATE_TABLE, MYF(0), - table->s->db.str, table->s->table_name, sError ); + table_arg->s->db.str, table_arg->s->table_name, sError ); SPH_RET(-1); } diff --git a/storage/sphinx/snippets_udf.cc b/storage/sphinx/snippets_udf.cc index 2015483d347..90fde5d9c63 100644 --- a/storage/sphinx/snippets_udf.cc +++ b/storage/sphinx/snippets_udf.cc @@ -449,7 +449,7 @@ int CSphUrl::Connect() char * pError = NULL; do { - iSocket = socket ( iDomain, SOCK_STREAM, 0 ); + iSocket = (int)socket ( iDomain, SOCK_STREAM, 0 ); if ( iSocket==-1 ) { pError = "Failed to create client socket"; @@ -641,7 +641,7 @@ struct CSphSnippets } #define STRING CHECK_TYPE(STRING_RESULT) -#define INT CHECK_TYPE(INT_RESULT); int iValue = *(long long *)pArgs->args[i] +#define INT CHECK_TYPE(INT_RESULT); int iValue =(int)*(long long *)pArgs->args[i] my_bool sphinx_snippets_init ( UDF_INIT * pUDF, UDF_ARGS * pArgs, char * sMessage ) { diff --git a/storage/spider/ha_spider.cc b/storage/spider/ha_spider.cc index daf2a6fe213..a38e90f373f 100644 --- a/storage/spider/ha_spider.cc +++ b/storage/spider/ha_spider.cc @@ -7794,7 +7794,7 @@ int ha_spider::cmp_ref( *field; field++ ) { - if ((ret = (*field)->cmp_binary_offset(ptr_diff))) + if ((ret = (*field)->cmp_binary_offset((uint)ptr_diff))) { DBUG_PRINT("info",("spider different at %s", (*field)->field_name.str)); diff --git a/storage/spider/hs_client/config.cpp b/storage/spider/hs_client/config.cpp index b546230ca03..3bf0f3e5bdf 100644 --- a/storage/spider/hs_client/config.cpp +++ b/storage/spider/hs_client/config.cpp @@ -263,8 +263,8 @@ parse_args(int argc, char **argv, config& conf) } if (!(param = new conf_param())) continue; - uint32 key_len = eq - arg; - uint32 val_len = strlen(eq + 1); + uint32 key_len = (uint32)(eq - arg); + uint32 val_len = (uint32)(strlen(eq + 1)); if ( param->key.reserve(key_len + 1) || param->val.reserve(val_len + 1) diff --git a/storage/spider/hs_client/hstcpcli.cpp b/storage/spider/hs_client/hstcpcli.cpp index fed87803f9c..60da87b9f20 100644 --- a/storage/spider/hs_client/hstcpcli.cpp +++ b/storage/spider/hs_client/hstcpcli.cpp @@ -497,7 +497,7 @@ hstcpcli::response_recv(size_t& num_flds_r) char *const err_begin = start; read_token(start, finish); char *const err_end = start; - String e = String(err_begin, err_end - err_begin, &my_charset_bin); + String e = String(err_begin, (uint32)(err_end - err_begin), &my_charset_bin); if (!e.length()) { e = String("unknown_error", &my_charset_bin); } diff --git a/storage/spider/hs_client/socket.cpp b/storage/spider/hs_client/socket.cpp index c61b39d140f..0717acf0da1 100644 --- a/storage/spider/hs_client/socket.cpp +++ b/storage/spider/hs_client/socket.cpp @@ -223,7 +223,7 @@ socket_set_options(auto_file& fd, const socket_args& args, String& err_r) int socket_open(auto_file& fd, const socket_args& args, String& err_r) { - fd.reset(socket(args.family, args.socktype, args.protocol)); + fd.reset((int)socket(args.family, args.socktype, args.protocol)); if (fd.get() < 0) { return errno_string("socket", errno, err_r); } @@ -253,7 +253,7 @@ socket_connect(auto_file& fd, const socket_args& args, String& err_r) int socket_bind(auto_file& fd, const socket_args& args, String& err_r) { - fd.reset(socket(args.family, args.socktype, args.protocol)); + fd.reset((int)socket(args.family, args.socktype, args.protocol)); if (fd.get() < 0) { return errno_string("socket", errno, err_r); } @@ -300,7 +300,7 @@ int socket_accept(int listen_fd, auto_file& fd, const socket_args& args, sockaddr_storage& addr_r, socklen_t& addrlen_r, String& err_r) { - fd.reset(accept(listen_fd, reinterpret_cast<sockaddr *>(&addr_r), + fd.reset((int)accept(listen_fd, reinterpret_cast<sockaddr *>(&addr_r), &addrlen_r)); if (fd.get() < 0) { return errno_string("accept", errno, err_r); diff --git a/storage/spider/spd_sys_table.cc b/storage/spider/spd_sys_table.cc index cef289b427d..54af725bb64 100644 --- a/storage/spider/spd_sys_table.cc +++ b/storage/spider/spd_sys_table.cc @@ -678,13 +678,13 @@ void spider_store_tables_name( } table->field[0]->store( ptr_db, - ptr_diff_table - 1, + (uint)(ptr_diff_table - 1), system_charset_info); DBUG_PRINT("info",("spider field[0]->null_bit = %d", table->field[0]->null_bit)); table->field[1]->store( ptr_table, - name_length - ptr_diff_db - ptr_diff_table, + (uint)(name_length - ptr_diff_db - ptr_diff_table), system_charset_info); DBUG_PRINT("info",("spider field[1]->null_bit = %d", table->field[1]->null_bit)); diff --git a/strings/ctype-mb.c b/strings/ctype-mb.c index 6438273bc4b..9e476c23202 100644 --- a/strings/ctype-mb.c +++ b/strings/ctype-mb.c @@ -637,7 +637,7 @@ size_t my_strnxfrm_mb_internal(CHARSET_INFO *cs, uchar *dst, uchar *de, else { /* Multi-byte character */ - int len= (dst + chlen <= de) ? chlen : de - dst; + size_t len= (dst + chlen <= de) ? chlen : de - dst; memcpy(dst, src, len); dst+= len; src+= len; diff --git a/strings/ctype-simple.c b/strings/ctype-simple.c index 99f26731851..cf73f117f0f 100644 --- a/strings/ctype-simple.c +++ b/strings/ctype-simple.c @@ -1703,7 +1703,7 @@ my_strntoull10rnd_8bit(CHARSET_INFO *cs __attribute__((unused)), /* Unknown character, exit the loop */ break; } - shift= dot ? dot - str : 0; /* Right shift */ + shift= dot ? (int)(dot - str) : 0; /* Right shift */ addon= 0; exp: /* [ E [ <sign> ] <unsigned integer> ] */ @@ -2019,7 +2019,7 @@ my_strxfrm_pad_desc_and_reverse(CHARSET_INFO *cs, my_strxfrm_desc_and_reverse(str, frmend, flags, level); if ((flags & MY_STRXFRM_PAD_TO_MAXLEN) && frmend < strend) { - uint fill_length= strend - frmend; + size_t fill_length= strend - frmend; cs->cset->fill(cs, (char*) frmend, fill_length, cs->pad_char); frmend= strend; } @@ -2041,7 +2041,7 @@ my_strxfrm_pad_desc_and_reverse_nopad(CHARSET_INFO *cs, my_strxfrm_desc_and_reverse(str, frmend, flags, level); if ((flags & MY_STRXFRM_PAD_TO_MAXLEN) && frmend < strend) { - uint fill_length= strend - frmend; + size_t fill_length= strend - frmend; memset(frmend, 0x00, fill_length); frmend= strend; } diff --git a/strings/ctype-ucs2.c b/strings/ctype-ucs2.c index 3913ab054db..43fdca6efc1 100644 --- a/strings/ctype-ucs2.c +++ b/strings/ctype-ucs2.c @@ -1051,7 +1051,7 @@ static void my_fill_mb2(CHARSET_INFO *cs, char *s, size_t slen, int fill) { char buf[10], *last; - int buflen, remainder; + size_t buflen, remainder; DBUG_ASSERT((slen % 2) == 0); @@ -1066,7 +1066,7 @@ my_fill_mb2(CHARSET_INFO *cs, char *s, size_t slen, int fill) for (last= s + slen - buflen; s <= last; s+= buflen) { /* Enough space for the characer */ - memcpy(s, buf, (size_t) buflen); + memcpy(s, buf, buflen); } /* diff --git a/strings/dtoa.c b/strings/dtoa.c index a16ec93d3eb..0da850bd1ca 100644 --- a/strings/dtoa.c +++ b/strings/dtoa.c @@ -106,7 +106,7 @@ size_t my_fcvt(double x, int precision, char *to, my_bool *error) } src= res; - len= end - src; + len= (int)(end - src); if (sign) *dst++= '-'; @@ -238,7 +238,7 @@ size_t my_gcvt(double x, my_gcvt_arg_type type, int width, char *to, *error= FALSE; src= res; - len= end - res; + len= (int)(end - res); /* Number of digits in the exponent from the 'e' conversion. @@ -330,7 +330,7 @@ size_t my_gcvt(double x, my_gcvt_arg_type type, int width, char *to, dtoa_free(res, buf, sizeof(buf)); res= dtoa(x, 5, width - decpt, &decpt, &sign, &end, buf, sizeof(buf)); src= res; - len= end - res; + len= (int)(end - res); } if (len == 0) @@ -396,7 +396,7 @@ size_t my_gcvt(double x, my_gcvt_arg_type type, int width, char *to, dtoa_free(res, buf, sizeof(buf)); res= dtoa(x, 4, width, &decpt, &sign, &end, buf, sizeof(buf)); src= res; - len= end - res; + len= (int)(end - res); if (--decpt < 0) decpt= -decpt; } diff --git a/strings/json_lib.c b/strings/json_lib.c index f8898809465..176aa22c1e4 100644 --- a/strings/json_lib.c +++ b/strings/json_lib.c @@ -399,7 +399,7 @@ static int read_strn(json_engine_t *j) return 1; j->state= j->stack[j->stack_p]; - j->value_len= (j->s.c_str - j->value) - 1; + j->value_len= (int)(j->s.c_str - j->value) - 1; return 0; } @@ -537,7 +537,7 @@ static int read_num(json_engine_t *j) if (skip_num_constant(j) == 0) { j->value_type= JSON_VALUE_NUMBER; - j->value_len= j->s.c_str - j->value_begin; + j->value_len= (int)(j->s.c_str - j->value_begin); return 0; } return 1; @@ -1494,7 +1494,7 @@ int json_append_ascii(CHARSET_INFO *json_cs, return c_len; } - return json - json_start; + return (int)(json - json_start); } @@ -1530,7 +1530,7 @@ int json_unescape(CHARSET_INFO *json_cs, return -1; } - return s.error==JE_EOS ? res - res_b : -1; + return s.error==JE_EOS ? (int)(res - res_b) : -1; } @@ -1645,7 +1645,7 @@ int json_escape(CHARSET_INFO *str_cs, } } - return json - json_start; + return (int)(json - json_start); } diff --git a/tests/mysql_client_fw.c b/tests/mysql_client_fw.c index 50ecf6c0860..bf06e2b502b 100644 --- a/tests/mysql_client_fw.c +++ b/tests/mysql_client_fw.c @@ -977,7 +977,7 @@ const char *query_arg) fetch->handle= mysql_stmt_init(mysql); - rc= mysql_stmt_prepare(fetch->handle, fetch->query, strlen(fetch->query)); + rc= mysql_stmt_prepare(fetch->handle, fetch->query, (ulong)strlen(fetch->query)); check_execute(fetch->handle, rc); /* @@ -1087,7 +1087,7 @@ enum fetch_type fetch_type) for (fetch= fetch_array; fetch < fetch_array + query_count; ++fetch) { /* Init will exit(1) in case of error */ - stmt_fetch_init(fetch, fetch - fetch_array, + stmt_fetch_init(fetch, (uint)(fetch - fetch_array), query_list[fetch - fetch_array]); } @@ -1187,19 +1187,19 @@ static char **defaults_argv; static struct my_option client_test_long_options[] = { - {"basedir", 'b', "Basedir for tests.", &opt_basedir, - &opt_basedir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"basedir", 'b', "Basedir for tests.",(void *)&opt_basedir, + (void *)&opt_basedir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"character-sets-dir", 'C', - "Directory for character set files.", &charsets_dir, - &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + "Directory for character set files.", (void *)&charsets_dir, + (void *)&charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"count", 't', "Number of times test to be executed", &opt_count_read, &opt_count_read, 0, GET_UINT, REQUIRED_ARG, 1, 0, 0, 0, 0, 0}, {"database", 'D', "Database to use", &opt_db, &opt_db, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"do-not-drop-database", 'd', "Do not drop database while disconnecting", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"debug", '#', "Output debug log", &default_dbug_option, - &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, + {"debug", '#', "Output debug log", (void *)&default_dbug_option, + (void *)&default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"help", '?', "Display this help and exit", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"host", 'h', "Connect to host", &opt_host, &opt_host, @@ -1235,8 +1235,8 @@ static struct my_option client_test_long_options[] = {"user", 'u', "User for login if not current user", &opt_user, &opt_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif - {"vardir", 'v', "Data dir for tests.", &opt_vardir, - &opt_vardir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"vardir", 'v', "Data dir for tests.", (void *)&opt_vardir, + (void *)&opt_vardir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"non-blocking-api", 'n', "Use the non-blocking client API for communication.", &non_blocking_api_enabled, &non_blocking_api_enabled, 0, diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index ae371745c97..9ef85b42d5c 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -11824,7 +11824,7 @@ static void test_bug5194() } *query_ptr= '\0'; - rc= mysql_stmt_prepare(stmt, query, query_ptr - query); + rc= mysql_stmt_prepare(stmt, query, (ulong)(query_ptr - query)); if (rc && nrows * COLUMN_COUNT > uint16_max) { if (!opt_silent) @@ -15806,7 +15806,7 @@ static void test_bug21206() for (fetch= fetch_array; fetch < fetch_array + cursor_count; ++fetch) { /* Init will exit(1) in case of error */ - stmt_fetch_init(fetch, fetch - fetch_array, query); + stmt_fetch_init(fetch, (uint)(fetch - fetch_array), query); } for (fetch= fetch_array; fetch < fetch_array + cursor_count; ++fetch) @@ -16008,7 +16008,7 @@ static void test_bug21635() rc= mysql_query(mysql, "INSERT INTO t1 VALUES (1)"); myquery(rc); - rc= mysql_real_query(mysql, query, query_end - query); + rc= mysql_real_query(mysql, query, (ulong)(query_end - query)); myquery(rc); result= mysql_use_result(mysql); diff --git a/vio/vio.c b/vio/vio.c index 2c5f37a75a4..34ca9ed872d 100644 --- a/vio/vio.c +++ b/vio/vio.c @@ -80,7 +80,7 @@ static void vio_init(Vio *vio, enum enum_vio_type type, my_socket sd, uint flags) { DBUG_ENTER("vio_init"); - DBUG_PRINT("enter", ("type: %d sd: %d flags: %d", type, sd, flags)); + DBUG_PRINT("enter", ("type: %d sd: %d flags: %d", type, (int)sd, flags)); #ifndef HAVE_VIO_READ_BUFF flags&= ~VIO_BUFFERED_READ; @@ -249,7 +249,7 @@ Vio *mysql_socket_vio_new(MYSQL_SOCKET mysql_socket, enum enum_vio_type type, ui Vio *vio; my_socket sd= mysql_socket_getfd(mysql_socket); DBUG_ENTER("mysql_socket_vio_new"); - DBUG_PRINT("enter", ("sd: %d", sd)); + DBUG_PRINT("enter", ("sd: %d", (int)sd)); if ((vio = (Vio*) my_malloc(sizeof(*vio),MYF(MY_WME)))) { vio_init(vio, type, sd, flags); @@ -266,7 +266,7 @@ Vio *vio_new(my_socket sd, enum enum_vio_type type, uint flags) Vio *vio; MYSQL_SOCKET mysql_socket= MYSQL_INVALID_SOCKET; DBUG_ENTER("vio_new"); - DBUG_PRINT("enter", ("sd: %d", sd)); + DBUG_PRINT("enter", ("sd: %d", (int)sd)); mysql_socket_setfd(&mysql_socket, sd); vio = mysql_socket_vio_new(mysql_socket, type, flags); diff --git a/vio/viosocket.c b/vio/viosocket.c index cc16c3698c0..f983fe20fe7 100644 --- a/vio/viosocket.c +++ b/vio/viosocket.c @@ -145,9 +145,9 @@ size_t vio_read(Vio *vio, uchar *buf, size_t size) ssize_t ret; int flags= 0; DBUG_ENTER("vio_read"); - DBUG_PRINT("enter", ("sd: %d buf: %p size: %d", - mysql_socket_getfd(vio->mysql_socket), buf, - (int) size)); + DBUG_PRINT("enter", ("sd: %d buf: %p size: %zu", + (int)mysql_socket_getfd(vio->mysql_socket), buf, + size)); /* Ensure nobody uses vio_read_buff and vio_read simultaneously. */ DBUG_ASSERT(vio->read_end == vio->read_pos); @@ -212,9 +212,9 @@ size_t vio_read_buff(Vio *vio, uchar* buf, size_t size) size_t rc; #define VIO_UNBUFFERED_READ_MIN_SIZE 2048 DBUG_ENTER("vio_read_buff"); - DBUG_PRINT("enter", ("sd: %d buf: %p size: %d", - mysql_socket_getfd(vio->mysql_socket), - buf, (int) size)); + DBUG_PRINT("enter", ("sd: %d buf: %p size:%zu", + (int)mysql_socket_getfd(vio->mysql_socket), + buf, size)); if (vio->read_pos < vio->read_end) { @@ -259,9 +259,9 @@ size_t vio_write(Vio *vio, const uchar* buf, size_t size) ssize_t ret; int flags= 0; DBUG_ENTER("vio_write"); - DBUG_PRINT("enter", ("sd: %d buf: %p size: %d", - mysql_socket_getfd(vio->mysql_socket), buf, - (int) size)); + DBUG_PRINT("enter", ("sd: %d buf: %p size: %zu", + (int)mysql_socket_getfd(vio->mysql_socket), buf, + size)); /* If timeout is enabled, do not block. */ if (vio->write_timeout >= 0) @@ -509,7 +509,7 @@ int vio_keepalive(Vio* vio, my_bool set_keep_alive) uint opt = 0; DBUG_ENTER("vio_keepalive"); DBUG_PRINT("enter", ("sd: %d set_keep_alive: %d", - mysql_socket_getfd(vio->mysql_socket), + (int)mysql_socket_getfd(vio->mysql_socket), (int)set_keep_alive)); if (vio->type != VIO_TYPE_NAMEDPIPE && vio->type != VIO_TYPE_SHARED_MEMORY) @@ -563,7 +563,7 @@ int vio_close(Vio *vio) { int r=0; DBUG_ENTER("vio_close"); - DBUG_PRINT("enter", ("sd: %d", mysql_socket_getfd(vio->mysql_socket))); + DBUG_PRINT("enter", ("sd: %d", (int)mysql_socket_getfd(vio->mysql_socket))); if (vio->type != VIO_CLOSED) { diff --git a/vio/viossl.c b/vio/viossl.c index 0bc2c263336..e7cc85ea539 100644 --- a/vio/viossl.c +++ b/vio/viossl.c @@ -136,8 +136,8 @@ size_t vio_ssl_read(Vio *vio, uchar *buf, size_t size) int ret; SSL *ssl= vio->ssl_arg; DBUG_ENTER("vio_ssl_read"); - DBUG_PRINT("enter", ("sd: %d buf: %p size: %d ssl: %p", - mysql_socket_getfd(vio->mysql_socket), buf, (int) size, + DBUG_PRINT("enter", ("sd: %d buf: %p size: %zu ssl: %p", + (int)mysql_socket_getfd(vio->mysql_socket), buf, size, vio->ssl_arg)); if (vio->async_context && vio->async_context->active) @@ -168,9 +168,9 @@ size_t vio_ssl_write(Vio *vio, const uchar *buf, size_t size) int ret; SSL *ssl= vio->ssl_arg; DBUG_ENTER("vio_ssl_write"); - DBUG_PRINT("enter", ("sd: %d buf: %p size: %d", - mysql_socket_getfd(vio->mysql_socket), - buf, (int) size)); + DBUG_PRINT("enter", ("sd: %d buf: %p size: %zu", + (int)mysql_socket_getfd(vio->mysql_socket), + buf, size)); if (vio->async_context && vio->async_context->active) ret= my_ssl_write_async(vio->async_context, (SSL *)vio->ssl_arg, buf, @@ -319,8 +319,8 @@ static int ssl_do(struct st_VioSSLFd *ptr, Vio *vio, long timeout, my_bool was_blocking; my_socket sd= mysql_socket_getfd(vio->mysql_socket); DBUG_ENTER("ssl_do"); - DBUG_PRINT("enter", ("ptr: 0x%lx, sd: %d ctx: 0x%lx", - (long) ptr, sd, (long) ptr->ssl_context)); + DBUG_PRINT("enter", ("ptr: %p, sd: %d ctx: %p", + ptr, (int)sd, ptr->ssl_context)); /* Set socket to blocking if not already set */ vio_blocking(vio, 1, &was_blocking); @@ -332,7 +332,7 @@ static int ssl_do(struct st_VioSSLFd *ptr, Vio *vio, long timeout, vio_blocking(vio, was_blocking, &unused); DBUG_RETURN(1); } - DBUG_PRINT("info", ("ssl: 0x%lx timeout: %ld", (long) ssl, timeout)); + DBUG_PRINT("info", ("ssl: %p timeout: %ld", ssl, timeout)); SSL_clear(ssl); SSL_SESSION_set_timeout(SSL_get_session(ssl), timeout); SSL_set_fd(ssl, sd); diff --git a/vio/viosslfactories.c b/vio/viosslfactories.c index 71ef2879464..6358b976e16 100644 --- a/vio/viosslfactories.c +++ b/vio/viosslfactories.c @@ -100,8 +100,8 @@ vio_set_cert_stuff(SSL_CTX *ctx, const char *cert_file, const char *key_file, enum enum_ssl_init_error* error) { DBUG_ENTER("vio_set_cert_stuff"); - DBUG_PRINT("enter", ("ctx: 0x%lx cert_file: %s key_file: %s", - (long) ctx, cert_file, key_file)); + DBUG_PRINT("enter", ("ctx: %p cert_file: %s key_file: %s", + ctx, cert_file, key_file)); if (!cert_file && key_file) cert_file= key_file; |